Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
932fb06b | 2 | * kref.h - library routines for handling generic reference counted objects |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> | |
5 | * Copyright (C) 2004 IBM Corp. | |
6 | * | |
7 | * based on kobject.h which was: | |
8 | * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org> | |
9 | * Copyright (C) 2002-2003 Open Source Development Labs | |
10 | * | |
11 | * This file is released under the GPLv2. | |
12 | * | |
13 | */ | |
14 | ||
15 | #ifndef _KREF_H_ | |
16 | #define _KREF_H_ | |
17 | ||
6261ddee GKH |
18 | #include <linux/bug.h> |
19 | #include <linux/atomic.h> | |
67175b85 | 20 | #include <linux/kernel.h> |
8ad5db8a | 21 | #include <linux/mutex.h> |
1da177e4 LT |
22 | |
23 | struct kref { | |
24 | atomic_t refcount; | |
25 | }; | |
26 | ||
4af679cd PZ |
27 | /** |
28 | * kref_init - initialize object. | |
29 | * @kref: object in question. | |
30 | */ | |
31 | static inline void kref_init(struct kref *kref) | |
32 | { | |
33 | atomic_set(&kref->refcount, 1); | |
4af679cd PZ |
34 | } |
35 | ||
36 | /** | |
37 | * kref_get - increment refcount for object. | |
38 | * @kref: object. | |
39 | */ | |
40 | static inline void kref_get(struct kref *kref) | |
41 | { | |
42 | WARN_ON(!atomic_read(&kref->refcount)); | |
43 | atomic_inc(&kref->refcount); | |
4af679cd PZ |
44 | } |
45 | ||
46 | /** | |
47dbd7d9 | 47 | * kref_sub - subtract a number of refcounts for object. |
4af679cd | 48 | * @kref: object. |
47dbd7d9 | 49 | * @count: Number of recounts to subtract. |
4af679cd PZ |
50 | * @release: pointer to the function that will clean up the object when the |
51 | * last reference to the object is released. | |
52 | * This pointer is required, and it is not acceptable to pass kfree | |
6261ddee GKH |
53 | * in as this function. If the caller does pass kfree to this |
54 | * function, you will be publicly mocked mercilessly by the kref | |
55 | * maintainer, and anyone else who happens to notice it. You have | |
56 | * been warned. | |
4af679cd | 57 | * |
47dbd7d9 | 58 | * Subtract @count from the refcount, and if 0, call release(). |
4af679cd PZ |
59 | * Return 1 if the object was removed, otherwise return 0. Beware, if this |
60 | * function returns 0, you still can not count on the kref from remaining in | |
61 | * memory. Only use the return value if you want to see if the kref is now | |
62 | * gone, not present. | |
63 | */ | |
47dbd7d9 PZ |
64 | static inline int kref_sub(struct kref *kref, unsigned int count, |
65 | void (*release)(struct kref *kref)) | |
4af679cd PZ |
66 | { |
67 | WARN_ON(release == NULL); | |
4af679cd | 68 | |
47dbd7d9 | 69 | if (atomic_sub_and_test((int) count, &kref->refcount)) { |
4af679cd PZ |
70 | release(kref); |
71 | return 1; | |
72 | } | |
73 | return 0; | |
74 | } | |
75 | ||
4af679cd | 76 | /** |
47dbd7d9 | 77 | * kref_put - decrement refcount for object. |
4af679cd | 78 | * @kref: object. |
4af679cd PZ |
79 | * @release: pointer to the function that will clean up the object when the |
80 | * last reference to the object is released. | |
81 | * This pointer is required, and it is not acceptable to pass kfree | |
6261ddee GKH |
82 | * in as this function. If the caller does pass kfree to this |
83 | * function, you will be publicly mocked mercilessly by the kref | |
84 | * maintainer, and anyone else who happens to notice it. You have | |
85 | * been warned. | |
4af679cd | 86 | * |
47dbd7d9 | 87 | * Decrement the refcount, and if 0, call release(). |
4af679cd PZ |
88 | * Return 1 if the object was removed, otherwise return 0. Beware, if this |
89 | * function returns 0, you still can not count on the kref from remaining in | |
90 | * memory. Only use the return value if you want to see if the kref is now | |
91 | * gone, not present. | |
92 | */ | |
47dbd7d9 | 93 | static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) |
4af679cd | 94 | { |
47dbd7d9 | 95 | return kref_sub(kref, 1, release); |
4af679cd | 96 | } |
8ad5db8a AV |
97 | |
98 | static inline int kref_put_mutex(struct kref *kref, | |
99 | void (*release)(struct kref *kref), | |
100 | struct mutex *lock) | |
101 | { | |
102 | WARN_ON(release == NULL); | |
103 | if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) { | |
104 | mutex_lock(lock); | |
105 | if (unlikely(!atomic_dec_and_test(&kref->refcount))) { | |
106 | mutex_unlock(lock); | |
107 | return 0; | |
108 | } | |
109 | release(kref); | |
110 | return 1; | |
111 | } | |
112 | return 0; | |
113 | } | |
4b20db3d TH |
114 | |
115 | /** | |
116 | * kref_get_unless_zero - Increment refcount for object unless it is zero. | |
117 | * @kref: object. | |
118 | * | |
119 | * Return non-zero if the increment succeeded. Otherwise return 0. | |
120 | * | |
121 | * This function is intended to simplify locking around refcounting for | |
122 | * objects that can be looked up from a lookup structure, and which are | |
123 | * removed from that lookup structure in the object destructor. | |
124 | * Operations on such objects require at least a read lock around | |
125 | * lookup + kref_get, and a write lock around kref_put + remove from lookup | |
126 | * structure. Furthermore, RCU implementations become extremely tricky. | |
127 | * With a lookup followed by a kref_get_unless_zero *with return value check* | |
128 | * locking in the kref_put path can be deferred to the actual removal from | |
129 | * the lookup structure and RCU lookups become trivial. | |
130 | */ | |
131 | static inline int __must_check kref_get_unless_zero(struct kref *kref) | |
132 | { | |
133 | return atomic_add_unless(&kref->refcount, 1, 0); | |
134 | } | |
1da177e4 | 135 | #endif /* _KREF_H_ */ |