File: /Users/paulross/dev/linux/linux-3.13/include/linux/lockref.h

Green shading in the line number column means the source is part of the translation unit, red means it is conditionally excluded. Highlighted line numbers link to the translation unit page. Highlighted macros link to the macro page.

       1: #ifndef __LINUX_LOCKREF_H
       2: #define __LINUX_LOCKREF_H
       3: 
       4: /*
       5:  * Locked reference counts.
       6:  *
       7:  * These are different from just plain atomic refcounts in that they
       8:  * are atomic with respect to the spinlock that goes with them.  In
       9:  * particular, there can be implementations that don't actually get
      10:  * the spinlock for the common decrement/increment operations, but they
      11:  * still have to check that the operation is done semantically as if
      12:  * the spinlock had been taken (using a cmpxchg operation that covers
      13:  * both the lock and the count word, or using memory transactions, for
      14:  * example).
      15:  */
      16: 
      17: #include <linux/spinlock.h>
      18: #include <generated/bounds.h>
      19: 
      20: #define USE_CMPXCHG_LOCKREF \
      21:     (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
      22:      IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
      23: 
      24: struct lockref {
      25:     union {
      26: #if USE_CMPXCHG_LOCKREF
      27:         aligned_u64 lock_count;
      28: #endif
      29:         struct {
      30:             spinlock_t lock;
      31:             unsigned int count;
      32:         };
      33:     };
      34: };
      35: 
      36: extern void lockref_get(struct lockref *);
      37: extern int lockref_get_not_zero(struct lockref *);
      38: extern int lockref_get_or_lock(struct lockref *);
      39: extern int lockref_put_or_lock(struct lockref *);
      40: 
      41: extern void lockref_mark_dead(struct lockref *);
      42: extern int lockref_get_not_dead(struct lockref *);
      43: 
      44: /* Must be called under spinlock for reliable results */
      45: static inline int __lockref_is_dead(const struct lockref *l)
      46: {
      47:     return ((int)l->count < 0);
      48: }
      49: 
      50: #endif /* __LINUX_LOCKREF_H */
      51: