File: /Users/paulross/dev/linux/linux-3.13/include/linux/kref.h

Green shading in the line number column means the source is part of the translation unit, red means it is conditionally excluded. Highlighted line numbers link to the translation unit page. Highlighted macros link to the macro page.

       1: /*
       2:  * kref.h - library routines for handling generic reference counted objects
       3:  *
       4:  * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
       5:  * Copyright (C) 2004 IBM Corp.
       6:  *
       7:  * based on kobject.h which was:
       8:  * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
       9:  * Copyright (C) 2002-2003 Open Source Development Labs
      10:  *
      11:  * This file is released under the GPLv2.
      12:  *
      13:  */
      14: 
      15: #ifndef _KREF_H_
      16: #define _KREF_H_
      17: 
      18: #include <linux/bug.h>
      19: #include <linux/atomic.h>
      20: #include <linux/kernel.h>
      21: #include <linux/mutex.h>
      22: #include <linux/spinlock.h>
      23: 
      24: struct kref {
      25:     atomic_t refcount;
      26: };
      27: 
      28: /**
      29:  * kref_init - initialize object.
      30:  * @kref: object in question.
      31:  */
      32: static inline void kref_init(struct kref *kref)
      33: {
      34:     atomic_set(&kref->refcount, 1);
      35: }
      36: 
      37: /**
      38:  * kref_get - increment refcount for object.
      39:  * @kref: object.
      40:  */
      41: static inline void kref_get(struct kref *kref)
      42: {
      43:     /* If refcount was 0 before incrementing then we have a race
      44:      * condition when this kref is freeing by some other thread right now.
      45:      * In this case one should use kref_get_unless_zero()
      46:      */
      47:     WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2);
      48: }
      49: 
      50: /**
      51:  * kref_sub - subtract a number of refcounts for object.
      52:  * @kref: object.
      53:  * @count: Number of recounts to subtract.
      54:  * @release: pointer to the function that will clean up the object when the
      55:  *         last reference to the object is released.
      56:  *         This pointer is required, and it is not acceptable to pass kfree
      57:  *         in as this function.  If the caller does pass kfree to this
      58:  *         function, you will be publicly mocked mercilessly by the kref
      59:  *         maintainer, and anyone else who happens to notice it.  You have
      60:  *         been warned.
      61:  *
      62:  * Subtract @count from the refcount, and if 0, call release().
      63:  * Return 1 if the object was removed, otherwise return 0.  Beware, if this
      64:  * function returns 0, you still can not count on the kref from remaining in
      65:  * memory.  Only use the return value if you want to see if the kref is now
      66:  * gone, not present.
      67:  */
      68: static inline int kref_sub(struct kref *kref, unsigned int count,
      69:          void (*release)(struct kref *kref))
      70: {
      71:     WARN_ON(release == NULL);
      72: 
      73:     if (atomic_sub_and_test((int) count, &kref->refcount)) {
      74:         release(kref);
      75:         return 1;
      76:     }
      77:     return 0;
      78: }
      79: 
      80: /**
      81:  * kref_put - decrement refcount for object.
      82:  * @kref: object.
      83:  * @release: pointer to the function that will clean up the object when the
      84:  *         last reference to the object is released.
      85:  *         This pointer is required, and it is not acceptable to pass kfree
      86:  *         in as this function.  If the caller does pass kfree to this
      87:  *         function, you will be publicly mocked mercilessly by the kref
      88:  *         maintainer, and anyone else who happens to notice it.  You have
      89:  *         been warned.
      90:  *
      91:  * Decrement the refcount, and if 0, call release().
      92:  * Return 1 if the object was removed, otherwise return 0.  Beware, if this
      93:  * function returns 0, you still can not count on the kref from remaining in
      94:  * memory.  Only use the return value if you want to see if the kref is now
      95:  * gone, not present.
      96:  */
      97: static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
      98: {
      99:     return kref_sub(kref, 1, release);
     100: }
     101: 
     102: /**
     103:  * kref_put_spinlock_irqsave - decrement refcount for object.
     104:  * @kref: object.
     105:  * @release: pointer to the function that will clean up the object when the
     106:  *         last reference to the object is released.
     107:  *         This pointer is required, and it is not acceptable to pass kfree
     108:  *         in as this function.
     109:  * @lock: lock to take in release case
     110:  *
     111:  * Behaves identical to kref_put with one exception.  If the reference count
     112:  * drops to zero, the lock will be taken atomically wrt dropping the reference
     113:  * count.  The release function has to call spin_unlock() without _irqrestore.
     114:  */
     115: static inline int kref_put_spinlock_irqsave(struct kref *kref,
     116:         void (*release)(struct kref *kref),
     117:         spinlock_t *lock)
     118: {
     119:     unsigned long flags;
     120: 
     121:     WARN_ON(release == NULL);
     122:     if (atomic_add_unless(&kref->refcount, -1, 1))
     123:         return 0;
     124:     spin_lock_irqsave(lock, flags);
     125:     if (atomic_dec_and_test(&kref->refcount)) {
     126:         release(kref);
     127:         local_irq_restore(flags);
     128:         return 1;
     129:     }
     130:     spin_unlock_irqrestore(lock, flags);
     131:     return 0;
     132: }
     133: 
     134: static inline int kref_put_mutex(struct kref *kref,
     135:                  void (*release)(struct kref *kref),
     136:                  struct mutex *lock)
     137: {
     138:     WARN_ON(release == NULL);
     139:     if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
     140:         mutex_lock(lock);
     141:         if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
     142:             mutex_unlock(lock);
     143:             return 0;
     144:         }
     145:         release(kref);
     146:         return 1;
     147:     }
     148:     return 0;
     149: }
     150: 
     151: /**
     152:  * kref_get_unless_zero - Increment refcount for object unless it is zero.
     153:  * @kref: object.
     154:  *
     155:  * Return non-zero if the increment succeeded. Otherwise return 0.
     156:  *
     157:  * This function is intended to simplify locking around refcounting for
     158:  * objects that can be looked up from a lookup structure, and which are
     159:  * removed from that lookup structure in the object destructor.
     160:  * Operations on such objects require at least a read lock around
     161:  * lookup + kref_get, and a write lock around kref_put + remove from lookup
     162:  * structure. Furthermore, RCU implementations become extremely tricky.
     163:  * With a lookup followed by a kref_get_unless_zero *with return value check*
     164:  * locking in the kref_put path can be deferred to the actual removal from
     165:  * the lookup structure and RCU lookups become trivial.
     166:  */
     167: static inline int __must_check kref_get_unless_zero(struct kref *kref)
     168: {
     169:     return atomic_add_unless(&kref->refcount, 1, 0);
     170: }
     171: #endif /* _KREF_H_ */
     172: