KEYS: Move the unreferenced key reaper to the keys garbage collector file
[deliverable/linux.git] / security / keys / gc.c
1 /* Key garbage collector
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/security.h>
15 #include <keys/keyring-type.h>
16 #include "internal.h"
17
18 /*
19 * Delay between key revocation/expiry in seconds
20 */
21 unsigned key_gc_delay = 5 * 60;
22
23 /*
24 * Reaper for unused keys.
25 */
26 static void key_gc_unused_keys(struct work_struct *work);
27 DECLARE_WORK(key_gc_unused_work, key_gc_unused_keys);
28
29 /*
30 * Reaper for links from keyrings to dead keys.
31 */
32 static void key_gc_timer_func(unsigned long);
33 static void key_gc_dead_links(struct work_struct *);
34 static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0);
35 static DECLARE_WORK(key_gc_work, key_gc_dead_links);
36 static key_serial_t key_gc_cursor; /* the last key the gc considered */
37 static bool key_gc_again;
38 static unsigned long key_gc_executing;
39 static time_t key_gc_next_run = LONG_MAX;
40 static time_t key_gc_new_timer;
41
42 /*
43 * Schedule a garbage collection run.
44 * - time precision isn't particularly important
45 */
46 void key_schedule_gc(time_t gc_at)
47 {
48 unsigned long expires;
49 time_t now = current_kernel_time().tv_sec;
50
51 kenter("%ld", gc_at - now);
52
53 if (gc_at <= now) {
54 schedule_work(&key_gc_work);
55 } else if (gc_at < key_gc_next_run) {
56 expires = jiffies + (gc_at - now) * HZ;
57 mod_timer(&key_gc_timer, expires);
58 }
59 }
60
61 /*
62 * The garbage collector timer kicked off
63 */
64 static void key_gc_timer_func(unsigned long data)
65 {
66 kenter("");
67 key_gc_next_run = LONG_MAX;
68 schedule_work(&key_gc_work);
69 }
70
71 /*
72 * Garbage collect pointers from a keyring.
73 *
74 * Return true if we altered the keyring.
75 */
76 static bool key_gc_keyring(struct key *keyring, time_t limit)
77 __releases(key_serial_lock)
78 {
79 struct keyring_list *klist;
80 struct key *key;
81 int loop;
82
83 kenter("%x", key_serial(keyring));
84
85 if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
86 goto dont_gc;
87
88 /* scan the keyring looking for dead keys */
89 rcu_read_lock();
90 klist = rcu_dereference(keyring->payload.subscriptions);
91 if (!klist)
92 goto unlock_dont_gc;
93
94 for (loop = klist->nkeys - 1; loop >= 0; loop--) {
95 key = klist->keys[loop];
96 if (test_bit(KEY_FLAG_DEAD, &key->flags) ||
97 (key->expiry > 0 && key->expiry <= limit))
98 goto do_gc;
99 }
100
101 unlock_dont_gc:
102 rcu_read_unlock();
103 dont_gc:
104 kleave(" = false");
105 return false;
106
107 do_gc:
108 rcu_read_unlock();
109 key_gc_cursor = keyring->serial;
110 key_get(keyring);
111 spin_unlock(&key_serial_lock);
112 keyring_gc(keyring, limit);
113 key_put(keyring);
114 kleave(" = true");
115 return true;
116 }
117
118 /*
119 * Garbage collector for links to dead keys.
120 *
121 * This involves scanning the keyrings for dead, expired and revoked keys that
122 * have overstayed their welcome
123 */
124 static void key_gc_dead_links(struct work_struct *work)
125 {
126 struct rb_node *rb;
127 key_serial_t cursor;
128 struct key *key, *xkey;
129 time_t new_timer = LONG_MAX, limit, now;
130
131 now = current_kernel_time().tv_sec;
132 kenter("[%x,%ld]", key_gc_cursor, key_gc_new_timer - now);
133
134 if (test_and_set_bit(0, &key_gc_executing)) {
135 key_schedule_gc(current_kernel_time().tv_sec + 1);
136 kleave(" [busy; deferring]");
137 return;
138 }
139
140 limit = now;
141 if (limit > key_gc_delay)
142 limit -= key_gc_delay;
143 else
144 limit = key_gc_delay;
145
146 spin_lock(&key_serial_lock);
147
148 if (unlikely(RB_EMPTY_ROOT(&key_serial_tree))) {
149 spin_unlock(&key_serial_lock);
150 clear_bit(0, &key_gc_executing);
151 return;
152 }
153
154 cursor = key_gc_cursor;
155 if (cursor < 0)
156 cursor = 0;
157 if (cursor > 0)
158 new_timer = key_gc_new_timer;
159 else
160 key_gc_again = false;
161
162 /* find the first key above the cursor */
163 key = NULL;
164 rb = key_serial_tree.rb_node;
165 while (rb) {
166 xkey = rb_entry(rb, struct key, serial_node);
167 if (cursor < xkey->serial) {
168 key = xkey;
169 rb = rb->rb_left;
170 } else if (cursor > xkey->serial) {
171 rb = rb->rb_right;
172 } else {
173 rb = rb_next(rb);
174 if (!rb)
175 goto reached_the_end;
176 key = rb_entry(rb, struct key, serial_node);
177 break;
178 }
179 }
180
181 if (!key)
182 goto reached_the_end;
183
184 /* trawl through the keys looking for keyrings */
185 for (;;) {
186 if (key->expiry > limit && key->expiry < new_timer) {
187 kdebug("will expire %x in %ld",
188 key_serial(key), key->expiry - limit);
189 new_timer = key->expiry;
190 }
191
192 if (key->type == &key_type_keyring &&
193 key_gc_keyring(key, limit))
194 /* the gc had to release our lock so that the keyring
195 * could be modified, so we have to get it again */
196 goto gc_released_our_lock;
197
198 rb = rb_next(&key->serial_node);
199 if (!rb)
200 goto reached_the_end;
201 key = rb_entry(rb, struct key, serial_node);
202 }
203
204 gc_released_our_lock:
205 kdebug("gc_released_our_lock");
206 key_gc_new_timer = new_timer;
207 key_gc_again = true;
208 clear_bit(0, &key_gc_executing);
209 schedule_work(&key_gc_work);
210 kleave(" [continue]");
211 return;
212
213 /* when we reach the end of the run, we set the timer for the next one */
214 reached_the_end:
215 kdebug("reached_the_end");
216 spin_unlock(&key_serial_lock);
217 key_gc_new_timer = new_timer;
218 key_gc_cursor = 0;
219 clear_bit(0, &key_gc_executing);
220
221 if (key_gc_again) {
222 /* there may have been a key that expired whilst we were
223 * scanning, so if we discarded any links we should do another
224 * scan */
225 new_timer = now + 1;
226 key_schedule_gc(new_timer);
227 } else if (new_timer < LONG_MAX) {
228 new_timer += key_gc_delay;
229 key_schedule_gc(new_timer);
230 }
231 kleave(" [end]");
232 }
233
234 /*
235 * Garbage collector for unused keys.
236 *
237 * This is done in process context so that we don't have to disable interrupts
238 * all over the place. key_put() schedules this rather than trying to do the
239 * cleanup itself, which means key_put() doesn't have to sleep.
240 */
241 static void key_gc_unused_keys(struct work_struct *work)
242 {
243 struct rb_node *_n;
244 struct key *key;
245
246 go_again:
247 /* look for a dead key in the tree */
248 spin_lock(&key_serial_lock);
249
250 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
251 key = rb_entry(_n, struct key, serial_node);
252
253 if (atomic_read(&key->usage) == 0)
254 goto found_dead_key;
255 }
256
257 spin_unlock(&key_serial_lock);
258 return;
259
260 found_dead_key:
261 /* we found a dead key - once we've removed it from the tree, we can
262 * drop the lock */
263 rb_erase(&key->serial_node, &key_serial_tree);
264 spin_unlock(&key_serial_lock);
265
266 key_check(key);
267
268 security_key_free(key);
269
270 /* deal with the user's key tracking and quota */
271 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
272 spin_lock(&key->user->lock);
273 key->user->qnkeys--;
274 key->user->qnbytes -= key->quotalen;
275 spin_unlock(&key->user->lock);
276 }
277
278 atomic_dec(&key->user->nkeys);
279 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
280 atomic_dec(&key->user->nikeys);
281
282 key_user_put(key->user);
283
284 /* now throw away the key memory */
285 if (key->type->destroy)
286 key->type->destroy(key);
287
288 kfree(key->description);
289
290 #ifdef KEY_DEBUGGING
291 key->magic = KEY_DEBUG_MAGIC_X;
292 #endif
293 kmem_cache_free(key_jar, key);
294
295 /* there may, of course, be more than one key to destroy */
296 goto go_again;
297 }
This page took 0.038273 seconds and 5 git commands to generate.