Merge branches 'acpica-fixes' and 'device-properties-fixes'
[deliverable/linux.git] / include / linux / kref.h
1 /*
2 * kref.h - library routines for handling generic reference counted objects
3 *
4 * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
5 * Copyright (C) 2004 IBM Corp.
6 *
7 * based on kobject.h which was:
8 * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
9 * Copyright (C) 2002-2003 Open Source Development Labs
10 *
11 * This file is released under the GPLv2.
12 *
13 */
14
15 #ifndef _KREF_H_
16 #define _KREF_H_
17
18 #include <linux/bug.h>
19 #include <linux/atomic.h>
20 #include <linux/kernel.h>
21 #include <linux/mutex.h>
22
23 struct kref {
24 atomic_t refcount;
25 };
26
27 /**
28 * kref_init - initialize object.
29 * @kref: object in question.
30 */
31 static inline void kref_init(struct kref *kref)
32 {
33 atomic_set(&kref->refcount, 1);
34 }
35
36 /**
37 * kref_get - increment refcount for object.
38 * @kref: object.
39 */
40 static inline void kref_get(struct kref *kref)
41 {
42 /* If refcount was 0 before incrementing then we have a race
43 * condition when this kref is freeing by some other thread right now.
44 * In this case one should use kref_get_unless_zero()
45 */
46 WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2);
47 }
48
49 /**
50 * kref_sub - subtract a number of refcounts for object.
51 * @kref: object.
52 * @count: Number of recounts to subtract.
53 * @release: pointer to the function that will clean up the object when the
54 * last reference to the object is released.
55 * This pointer is required, and it is not acceptable to pass kfree
56 * in as this function. If the caller does pass kfree to this
57 * function, you will be publicly mocked mercilessly by the kref
58 * maintainer, and anyone else who happens to notice it. You have
59 * been warned.
60 *
61 * Subtract @count from the refcount, and if 0, call release().
62 * Return 1 if the object was removed, otherwise return 0. Beware, if this
63 * function returns 0, you still can not count on the kref from remaining in
64 * memory. Only use the return value if you want to see if the kref is now
65 * gone, not present.
66 */
67 static inline int kref_sub(struct kref *kref, unsigned int count,
68 void (*release)(struct kref *kref))
69 {
70 WARN_ON(release == NULL);
71
72 if (atomic_sub_and_test((int) count, &kref->refcount)) {
73 release(kref);
74 return 1;
75 }
76 return 0;
77 }
78
79 /**
80 * kref_put - decrement refcount for object.
81 * @kref: object.
82 * @release: pointer to the function that will clean up the object when the
83 * last reference to the object is released.
84 * This pointer is required, and it is not acceptable to pass kfree
85 * in as this function. If the caller does pass kfree to this
86 * function, you will be publicly mocked mercilessly by the kref
87 * maintainer, and anyone else who happens to notice it. You have
88 * been warned.
89 *
90 * Decrement the refcount, and if 0, call release().
91 * Return 1 if the object was removed, otherwise return 0. Beware, if this
92 * function returns 0, you still can not count on the kref from remaining in
93 * memory. Only use the return value if you want to see if the kref is now
94 * gone, not present.
95 */
96 static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
97 {
98 return kref_sub(kref, 1, release);
99 }
100
101 static inline int kref_put_mutex(struct kref *kref,
102 void (*release)(struct kref *kref),
103 struct mutex *lock)
104 {
105 WARN_ON(release == NULL);
106 if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
107 mutex_lock(lock);
108 if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
109 mutex_unlock(lock);
110 return 0;
111 }
112 release(kref);
113 return 1;
114 }
115 return 0;
116 }
117
118 /**
119 * kref_get_unless_zero - Increment refcount for object unless it is zero.
120 * @kref: object.
121 *
122 * Return non-zero if the increment succeeded. Otherwise return 0.
123 *
124 * This function is intended to simplify locking around refcounting for
125 * objects that can be looked up from a lookup structure, and which are
126 * removed from that lookup structure in the object destructor.
127 * Operations on such objects require at least a read lock around
128 * lookup + kref_get, and a write lock around kref_put + remove from lookup
129 * structure. Furthermore, RCU implementations become extremely tricky.
130 * With a lookup followed by a kref_get_unless_zero *with return value check*
131 * locking in the kref_put path can be deferred to the actual removal from
132 * the lookup structure and RCU lookups become trivial.
133 */
134 static inline int __must_check kref_get_unless_zero(struct kref *kref)
135 {
136 return atomic_add_unless(&kref->refcount, 1, 0);
137 }
138 #endif /* _KREF_H_ */
This page took 0.032103 seconds and 5 git commands to generate.