From 57f0f512b273f60d52568b8c6b77e17f5636edc0 Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Wed, 5 Aug 2015 17:04:01 -0300 Subject: Initial import --- include/linux/lockref.h | 51 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 include/linux/lockref.h (limited to 'include/linux/lockref.h') diff --git a/include/linux/lockref.h b/include/linux/lockref.h new file mode 100644 index 000000000..b10b122dd --- /dev/null +++ b/include/linux/lockref.h @@ -0,0 +1,51 @@ +#ifndef __LINUX_LOCKREF_H +#define __LINUX_LOCKREF_H + +/* + * Locked reference counts. + * + * These are different from just plain atomic refcounts in that they + * are atomic with respect to the spinlock that goes with them. In + * particular, there can be implementations that don't actually get + * the spinlock for the common decrement/increment operations, but they + * still have to check that the operation is done semantically as if + * the spinlock had been taken (using a cmpxchg operation that covers + * both the lock and the count word, or using memory transactions, for + * example). + */ + +#include +#include + +#define USE_CMPXCHG_LOCKREF \ + (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ + IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4) + +struct lockref { + union { +#if USE_CMPXCHG_LOCKREF + aligned_u64 lock_count; +#endif + struct { + spinlock_t lock; + int count; + }; + }; +}; + +extern void lockref_get(struct lockref *); +extern int lockref_put_return(struct lockref *); +extern int lockref_get_not_zero(struct lockref *); +extern int lockref_get_or_lock(struct lockref *); +extern int lockref_put_or_lock(struct lockref *); + +extern void lockref_mark_dead(struct lockref *); +extern int lockref_get_not_dead(struct lockref *); + +/* Must be called under spinlock for reliable results */ +static inline int __lockref_is_dead(const struct lockref *l) +{ + return ((int)l->count < 0); +} + +#endif /* __LINUX_LOCKREF_H */ -- cgit v1.2.3-54-g00ecf