diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-12-15 14:52:16 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-12-15 14:52:16 -0300 |
commit | 8d91c1e411f55d7ea91b1183a2e9f8088fb4d5be (patch) | |
tree | e9891aa6c295060d065adffd610c4f49ecf884f3 /include/asm-generic/qrwlock.h | |
parent | a71852147516bc1cb5b0b3cbd13639bfd4022dc8 (diff) |
Linux-libre 4.3.2-gnu
Diffstat (limited to 'include/asm-generic/qrwlock.h')
-rw-r--r-- | include/asm-generic/qrwlock.h | 78 |
1 files changed, 35 insertions, 43 deletions
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 6383d54bf..54a8e65e1 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -36,39 +36,39 @@ /* * External function declarations */ -extern void queue_read_lock_slowpath(struct qrwlock *lock); -extern void queue_write_lock_slowpath(struct qrwlock *lock); +extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts); +extern void queued_write_lock_slowpath(struct qrwlock *lock); /** - * queue_read_can_lock- would read_trylock() succeed? + * queued_read_can_lock- would read_trylock() succeed? * @lock: Pointer to queue rwlock structure */ -static inline int queue_read_can_lock(struct qrwlock *lock) +static inline int queued_read_can_lock(struct qrwlock *lock) { return !(atomic_read(&lock->cnts) & _QW_WMASK); } /** - * queue_write_can_lock- would write_trylock() succeed? + * queued_write_can_lock- would write_trylock() succeed? * @lock: Pointer to queue rwlock structure */ -static inline int queue_write_can_lock(struct qrwlock *lock) +static inline int queued_write_can_lock(struct qrwlock *lock) { return !atomic_read(&lock->cnts); } /** - * queue_read_trylock - try to acquire read lock of a queue rwlock + * queued_read_trylock - try to acquire read lock of a queue rwlock * @lock : Pointer to queue rwlock structure * Return: 1 if lock acquired, 0 if failed */ -static inline int queue_read_trylock(struct qrwlock *lock) +static inline int queued_read_trylock(struct qrwlock *lock) { u32 cnts; cnts = atomic_read(&lock->cnts); if (likely(!(cnts & _QW_WMASK))) { - cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts); + cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); if (likely(!(cnts & _QW_WMASK))) return 1; atomic_sub(_QR_BIAS, &lock->cnts); @@ -77,11 +77,11 @@ static inline int queue_read_trylock(struct qrwlock *lock) } /** - * queue_write_trylock - try to acquire write lock of a queue rwlock + * queued_write_trylock - try to acquire write lock of a queue rwlock * @lock : Pointer to queue rwlock structure * Return: 1 if lock acquired, 0 if failed */ -static inline int queue_write_trylock(struct qrwlock *lock) +static inline int queued_write_trylock(struct qrwlock *lock) { u32 cnts; @@ -89,78 +89,70 @@ static inline int queue_write_trylock(struct qrwlock *lock) if (unlikely(cnts)) return 0; - return likely(atomic_cmpxchg(&lock->cnts, - cnts, cnts | _QW_LOCKED) == cnts); + return likely(atomic_cmpxchg_acquire(&lock->cnts, + cnts, cnts | _QW_LOCKED) == cnts); } /** - * queue_read_lock - acquire read lock of a queue rwlock + * queued_read_lock - acquire read lock of a queue rwlock * @lock: Pointer to queue rwlock structure */ -static inline void queue_read_lock(struct qrwlock *lock) +static inline void queued_read_lock(struct qrwlock *lock) { u32 cnts; - cnts = atomic_add_return(_QR_BIAS, &lock->cnts); + cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts); if (likely(!(cnts & _QW_WMASK))) return; /* The slowpath will decrement the reader count, if necessary. */ - queue_read_lock_slowpath(lock); + queued_read_lock_slowpath(lock, cnts); } /** - * queue_write_lock - acquire write lock of a queue rwlock + * queued_write_lock - acquire write lock of a queue rwlock * @lock : Pointer to queue rwlock structure */ -static inline void queue_write_lock(struct qrwlock *lock) +static inline void queued_write_lock(struct qrwlock *lock) { /* Optimize for the unfair lock case where the fair flag is 0. */ - if (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0) + if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0) return; - queue_write_lock_slowpath(lock); + queued_write_lock_slowpath(lock); } /** - * queue_read_unlock - release read lock of a queue rwlock + * queued_read_unlock - release read lock of a queue rwlock * @lock : Pointer to queue rwlock structure */ -static inline void queue_read_unlock(struct qrwlock *lock) +static inline void queued_read_unlock(struct qrwlock *lock) { /* * Atomically decrement the reader count */ - smp_mb__before_atomic(); - atomic_sub(_QR_BIAS, &lock->cnts); + (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); } -#ifndef queue_write_unlock /** - * queue_write_unlock - release write lock of a queue rwlock + * queued_write_unlock - release write lock of a queue rwlock * @lock : Pointer to queue rwlock structure */ -static inline void queue_write_unlock(struct qrwlock *lock) +static inline void queued_write_unlock(struct qrwlock *lock) { - /* - * If the writer field is atomic, it can be cleared directly. - * Otherwise, an atomic subtraction will be used to clear it. - */ - smp_mb__before_atomic(); - atomic_sub(_QW_LOCKED, &lock->cnts); + smp_store_release((u8 *)&lock->cnts, 0); } -#endif /* * Remapping rwlock architecture specific functions to the corresponding * queue rwlock functions. */ -#define arch_read_can_lock(l) queue_read_can_lock(l) -#define arch_write_can_lock(l) queue_write_can_lock(l) -#define arch_read_lock(l) queue_read_lock(l) -#define arch_write_lock(l) queue_write_lock(l) -#define arch_read_trylock(l) queue_read_trylock(l) -#define arch_write_trylock(l) queue_write_trylock(l) -#define arch_read_unlock(l) queue_read_unlock(l) -#define arch_write_unlock(l) queue_write_unlock(l) +#define arch_read_can_lock(l) queued_read_can_lock(l) +#define arch_write_can_lock(l) queued_write_can_lock(l) +#define arch_read_lock(l) queued_read_lock(l) +#define arch_write_lock(l) queued_write_lock(l) +#define arch_read_trylock(l) queued_read_trylock(l) +#define arch_write_trylock(l) queued_write_trylock(l) +#define arch_read_unlock(l) queued_read_unlock(l) +#define arch_write_unlock(l) queued_write_unlock(l) #endif /* __ASM_GENERIC_QRWLOCK_H */ |