Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * The "user cache". | |
3 | * | |
4 | * (C) Copyright 1991-2000 Linus Torvalds | |
5 | * | |
6 | * We have a per-user structure to keep track of how many | |
7 | * processes, files etc the user has claimed, in order to be | |
8 | * able to have per-user limits for system resources. | |
9 | */ | |
10 | ||
11 | #include <linux/init.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/bitops.h> | |
15 | #include <linux/key.h> | |
4021cb27 | 16 | #include <linux/interrupt.h> |
acce292c CLG |
17 | #include <linux/module.h> |
18 | #include <linux/user_namespace.h> | |
1da177e4 LT |
19 | |
20 | /* | |
21 | * UID task count cache, to get fast user lookup in "alloc_uid" | |
22 | * when changing user ID's (ie setuid() and friends). | |
23 | */ | |
24 | ||
1da177e4 LT |
25 | #define UIDHASH_MASK (UIDHASH_SZ - 1) |
26 | #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) | |
acce292c | 27 | #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) |
1da177e4 | 28 | |
e18b890b | 29 | static struct kmem_cache *uid_cachep; |
4021cb27 IM |
30 | |
31 | /* | |
32 | * The uidhash_lock is mostly taken from process context, but it is | |
33 | * occasionally also taken from softirq/tasklet context, when | |
34 | * task-structs get RCU-freed. Hence all locking must be softirq-safe. | |
3fa97c9d AM |
35 | * But free_uid() is also called with local interrupts disabled, and running |
36 | * local_bh_enable() with local interrupts disabled is an error - we'll run | |
37 | * softirq callbacks, and they can unconditionally enable interrupts, and | |
38 | * the caller of free_uid() didn't expect that.. | |
4021cb27 | 39 | */ |
1da177e4 LT |
40 | static DEFINE_SPINLOCK(uidhash_lock); |
41 | ||
42 | struct user_struct root_user = { | |
43 | .__count = ATOMIC_INIT(1), | |
44 | .processes = ATOMIC_INIT(1), | |
45 | .files = ATOMIC_INIT(0), | |
46 | .sigpending = ATOMIC_INIT(0), | |
47 | .mq_bytes = 0, | |
48 | .locked_shm = 0, | |
49 | #ifdef CONFIG_KEYS | |
50 | .uid_keyring = &root_user_keyring, | |
51 | .session_keyring = &root_session_keyring, | |
52 | #endif | |
24e377a8 | 53 | #ifdef CONFIG_FAIR_USER_SCHED |
4cf86d77 | 54 | .tg = &init_task_group, |
24e377a8 | 55 | #endif |
1da177e4 LT |
56 | }; |
57 | ||
5cb350ba DG |
58 | /* |
59 | * These routines must be called with the uidhash spinlock held! | |
60 | */ | |
61 | static inline void uid_hash_insert(struct user_struct *up, | |
62 | struct hlist_head *hashent) | |
63 | { | |
64 | hlist_add_head(&up->uidhash_node, hashent); | |
65 | } | |
66 | ||
67 | static inline void uid_hash_remove(struct user_struct *up) | |
68 | { | |
69 | hlist_del_init(&up->uidhash_node); | |
70 | } | |
71 | ||
72 | static inline struct user_struct *uid_hash_find(uid_t uid, | |
73 | struct hlist_head *hashent) | |
74 | { | |
75 | struct user_struct *user; | |
76 | struct hlist_node *h; | |
77 | ||
78 | hlist_for_each_entry(user, h, hashent, uidhash_node) { | |
79 | if (user->uid == uid) { | |
80 | atomic_inc(&user->__count); | |
81 | return user; | |
82 | } | |
83 | } | |
84 | ||
85 | return NULL; | |
86 | } | |
87 | ||
24e377a8 | 88 | #ifdef CONFIG_FAIR_USER_SCHED |
5cb350ba DG |
89 | |
90 | static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */ | |
91 | static DEFINE_MUTEX(uids_mutex); | |
92 | ||
24e377a8 SV |
93 | static void sched_destroy_user(struct user_struct *up) |
94 | { | |
95 | sched_destroy_group(up->tg); | |
96 | } | |
97 | ||
98 | static int sched_create_user(struct user_struct *up) | |
99 | { | |
100 | int rc = 0; | |
101 | ||
102 | up->tg = sched_create_group(); | |
103 | if (IS_ERR(up->tg)) | |
104 | rc = -ENOMEM; | |
105 | ||
106 | return rc; | |
107 | } | |
108 | ||
109 | static void sched_switch_user(struct task_struct *p) | |
110 | { | |
111 | sched_move_task(p); | |
112 | } | |
113 | ||
5cb350ba DG |
114 | static inline void uids_mutex_lock(void) |
115 | { | |
116 | mutex_lock(&uids_mutex); | |
117 | } | |
24e377a8 | 118 | |
5cb350ba DG |
119 | static inline void uids_mutex_unlock(void) |
120 | { | |
121 | mutex_unlock(&uids_mutex); | |
122 | } | |
24e377a8 | 123 | |
5cb350ba DG |
124 | /* return cpu shares held by the user */ |
125 | ssize_t cpu_shares_show(struct kset *kset, char *buffer) | |
126 | { | |
127 | struct user_struct *up = container_of(kset, struct user_struct, kset); | |
24e377a8 | 128 | |
5cb350ba DG |
129 | return sprintf(buffer, "%lu\n", sched_group_shares(up->tg)); |
130 | } | |
131 | ||
132 | /* modify cpu shares held by the user */ | |
133 | ssize_t cpu_shares_store(struct kset *kset, const char *buffer, size_t size) | |
134 | { | |
135 | struct user_struct *up = container_of(kset, struct user_struct, kset); | |
136 | unsigned long shares; | |
137 | int rc; | |
138 | ||
139 | sscanf(buffer, "%lu", &shares); | |
140 | ||
141 | rc = sched_group_set_shares(up->tg, shares); | |
142 | ||
143 | return (rc ? rc : size); | |
144 | } | |
145 | ||
146 | static void user_attr_init(struct subsys_attribute *sa, char *name, int mode) | |
147 | { | |
148 | sa->attr.name = name; | |
149 | sa->attr.mode = mode; | |
150 | sa->show = cpu_shares_show; | |
151 | sa->store = cpu_shares_store; | |
152 | } | |
153 | ||
154 | /* Create "/sys/kernel/uids/<uid>" directory and | |
155 | * "/sys/kernel/uids/<uid>/cpu_share" file for this user. | |
1da177e4 | 156 | */ |
5cb350ba | 157 | static int user_kobject_create(struct user_struct *up) |
1da177e4 | 158 | { |
5cb350ba DG |
159 | struct kset *kset = &up->kset; |
160 | struct kobject *kobj = &kset->kobj; | |
161 | int error; | |
162 | ||
163 | memset(kset, 0, sizeof(struct kset)); | |
164 | kobj->parent = &uids_kobject; /* create under /sys/kernel/uids dir */ | |
165 | kobject_set_name(kobj, "%d", up->uid); | |
166 | kset_init(kset); | |
167 | user_attr_init(&up->user_attr, "cpu_share", 0644); | |
168 | ||
169 | error = kobject_add(kobj); | |
170 | if (error) | |
171 | goto done; | |
172 | ||
173 | error = sysfs_create_file(kobj, &up->user_attr.attr); | |
174 | if (error) | |
175 | kobject_del(kobj); | |
176 | ||
fb7dde37 SV |
177 | kobject_uevent(kobj, KOBJ_ADD); |
178 | ||
5cb350ba DG |
179 | done: |
180 | return error; | |
1da177e4 LT |
181 | } |
182 | ||
5cb350ba DG |
183 | /* create these in sysfs filesystem: |
184 | * "/sys/kernel/uids" directory | |
185 | * "/sys/kernel/uids/0" directory (for root user) | |
186 | * "/sys/kernel/uids/0/cpu_share" file (for root user) | |
187 | */ | |
188 | int __init uids_kobject_init(void) | |
1da177e4 | 189 | { |
5cb350ba DG |
190 | int error; |
191 | ||
192 | /* create under /sys/kernel dir */ | |
193 | uids_kobject.parent = &kernel_subsys.kobj; | |
fb7dde37 | 194 | uids_kobject.kset = &kernel_subsys; |
5cb350ba DG |
195 | kobject_set_name(&uids_kobject, "uids"); |
196 | kobject_init(&uids_kobject); | |
197 | ||
198 | error = kobject_add(&uids_kobject); | |
199 | if (!error) | |
200 | error = user_kobject_create(&root_user); | |
201 | ||
202 | return error; | |
1da177e4 LT |
203 | } |
204 | ||
5cb350ba DG |
205 | /* work function to remove sysfs directory for a user and free up |
206 | * corresponding structures. | |
207 | */ | |
208 | static void remove_user_sysfs_dir(struct work_struct *w) | |
1da177e4 | 209 | { |
5cb350ba DG |
210 | struct user_struct *up = container_of(w, struct user_struct, work); |
211 | struct kobject *kobj = &up->kset.kobj; | |
212 | unsigned long flags; | |
213 | int remove_user = 0; | |
1da177e4 | 214 | |
5cb350ba DG |
215 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() |
216 | * atomic. | |
217 | */ | |
218 | uids_mutex_lock(); | |
219 | ||
220 | local_irq_save(flags); | |
221 | ||
222 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { | |
223 | uid_hash_remove(up); | |
224 | remove_user = 1; | |
225 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
226 | } else { | |
227 | local_irq_restore(flags); | |
1da177e4 LT |
228 | } |
229 | ||
5cb350ba DG |
230 | if (!remove_user) |
231 | goto done; | |
232 | ||
233 | sysfs_remove_file(kobj, &up->user_attr.attr); | |
fb7dde37 | 234 | kobject_uevent(kobj, KOBJ_REMOVE); |
5cb350ba DG |
235 | kobject_del(kobj); |
236 | ||
237 | sched_destroy_user(up); | |
238 | key_put(up->uid_keyring); | |
239 | key_put(up->session_keyring); | |
240 | kmem_cache_free(uid_cachep, up); | |
241 | ||
242 | done: | |
243 | uids_mutex_unlock(); | |
244 | } | |
245 | ||
246 | /* IRQs are disabled and uidhash_lock is held upon function entry. | |
247 | * IRQ state (as stored in flags) is restored and uidhash_lock released | |
248 | * upon function exit. | |
249 | */ | |
250 | static inline void free_user(struct user_struct *up, unsigned long flags) | |
251 | { | |
252 | /* restore back the count */ | |
253 | atomic_inc(&up->__count); | |
254 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
255 | ||
256 | INIT_WORK(&up->work, remove_user_sysfs_dir); | |
257 | schedule_work(&up->work); | |
1da177e4 LT |
258 | } |
259 | ||
5cb350ba DG |
260 | #else /* CONFIG_FAIR_USER_SCHED */ |
261 | ||
262 | static void sched_destroy_user(struct user_struct *up) { } | |
263 | static int sched_create_user(struct user_struct *up) { return 0; } | |
264 | static void sched_switch_user(struct task_struct *p) { } | |
265 | static inline int user_kobject_create(struct user_struct *up) { return 0; } | |
266 | static inline void uids_mutex_lock(void) { } | |
267 | static inline void uids_mutex_unlock(void) { } | |
268 | ||
269 | /* IRQs are disabled and uidhash_lock is held upon function entry. | |
270 | * IRQ state (as stored in flags) is restored and uidhash_lock released | |
271 | * upon function exit. | |
272 | */ | |
273 | static inline void free_user(struct user_struct *up, unsigned long flags) | |
274 | { | |
275 | uid_hash_remove(up); | |
276 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
277 | sched_destroy_user(up); | |
278 | key_put(up->uid_keyring); | |
279 | key_put(up->session_keyring); | |
280 | kmem_cache_free(uid_cachep, up); | |
281 | } | |
282 | ||
283 | #endif /* CONFIG_FAIR_USER_SCHED */ | |
284 | ||
1da177e4 LT |
285 | /* |
286 | * Locate the user_struct for the passed UID. If found, take a ref on it. The | |
287 | * caller must undo that ref with free_uid(). | |
288 | * | |
289 | * If the user_struct could not be found, return NULL. | |
290 | */ | |
291 | struct user_struct *find_user(uid_t uid) | |
292 | { | |
293 | struct user_struct *ret; | |
3fa97c9d | 294 | unsigned long flags; |
acce292c | 295 | struct user_namespace *ns = current->nsproxy->user_ns; |
1da177e4 | 296 | |
3fa97c9d | 297 | spin_lock_irqsave(&uidhash_lock, flags); |
acce292c | 298 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); |
3fa97c9d | 299 | spin_unlock_irqrestore(&uidhash_lock, flags); |
1da177e4 LT |
300 | return ret; |
301 | } | |
302 | ||
303 | void free_uid(struct user_struct *up) | |
304 | { | |
3fa97c9d AM |
305 | unsigned long flags; |
306 | ||
36f57413 AM |
307 | if (!up) |
308 | return; | |
309 | ||
3fa97c9d | 310 | local_irq_save(flags); |
5cb350ba DG |
311 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) |
312 | free_user(up, flags); | |
313 | else | |
36f57413 | 314 | local_irq_restore(flags); |
1da177e4 LT |
315 | } |
316 | ||
acce292c | 317 | struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) |
1da177e4 | 318 | { |
735de223 | 319 | struct hlist_head *hashent = uidhashentry(ns, uid); |
1da177e4 LT |
320 | struct user_struct *up; |
321 | ||
5cb350ba DG |
322 | /* Make uid_hash_find() + user_kobject_create() + uid_hash_insert() |
323 | * atomic. | |
324 | */ | |
325 | uids_mutex_lock(); | |
326 | ||
3fa97c9d | 327 | spin_lock_irq(&uidhash_lock); |
1da177e4 | 328 | up = uid_hash_find(uid, hashent); |
3fa97c9d | 329 | spin_unlock_irq(&uidhash_lock); |
1da177e4 LT |
330 | |
331 | if (!up) { | |
332 | struct user_struct *new; | |
333 | ||
e94b1766 | 334 | new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); |
1da177e4 LT |
335 | if (!new) |
336 | return NULL; | |
337 | new->uid = uid; | |
338 | atomic_set(&new->__count, 1); | |
339 | atomic_set(&new->processes, 0); | |
340 | atomic_set(&new->files, 0); | |
341 | atomic_set(&new->sigpending, 0); | |
2d9048e2 | 342 | #ifdef CONFIG_INOTIFY_USER |
0eeca283 RL |
343 | atomic_set(&new->inotify_watches, 0); |
344 | atomic_set(&new->inotify_devs, 0); | |
345 | #endif | |
1da177e4 LT |
346 | |
347 | new->mq_bytes = 0; | |
348 | new->locked_shm = 0; | |
349 | ||
d720024e | 350 | if (alloc_uid_keyring(new, current) < 0) { |
1da177e4 LT |
351 | kmem_cache_free(uid_cachep, new); |
352 | return NULL; | |
353 | } | |
354 | ||
24e377a8 SV |
355 | if (sched_create_user(new) < 0) { |
356 | key_put(new->uid_keyring); | |
357 | key_put(new->session_keyring); | |
358 | kmem_cache_free(uid_cachep, new); | |
359 | return NULL; | |
360 | } | |
361 | ||
5cb350ba DG |
362 | if (user_kobject_create(new)) { |
363 | sched_destroy_user(new); | |
364 | key_put(new->uid_keyring); | |
365 | key_put(new->session_keyring); | |
366 | kmem_cache_free(uid_cachep, new); | |
367 | uids_mutex_unlock(); | |
368 | return NULL; | |
369 | } | |
370 | ||
1da177e4 LT |
371 | /* |
372 | * Before adding this, check whether we raced | |
373 | * on adding the same user already.. | |
374 | */ | |
3fa97c9d | 375 | spin_lock_irq(&uidhash_lock); |
1da177e4 LT |
376 | up = uid_hash_find(uid, hashent); |
377 | if (up) { | |
5cb350ba DG |
378 | /* This case is not possible when CONFIG_FAIR_USER_SCHED |
379 | * is defined, since we serialize alloc_uid() using | |
380 | * uids_mutex. Hence no need to call | |
381 | * sched_destroy_user() or remove_user_sysfs_dir(). | |
382 | */ | |
1da177e4 LT |
383 | key_put(new->uid_keyring); |
384 | key_put(new->session_keyring); | |
385 | kmem_cache_free(uid_cachep, new); | |
386 | } else { | |
387 | uid_hash_insert(new, hashent); | |
388 | up = new; | |
389 | } | |
3fa97c9d | 390 | spin_unlock_irq(&uidhash_lock); |
1da177e4 LT |
391 | |
392 | } | |
5cb350ba DG |
393 | |
394 | uids_mutex_unlock(); | |
395 | ||
1da177e4 LT |
396 | return up; |
397 | } | |
398 | ||
399 | void switch_uid(struct user_struct *new_user) | |
400 | { | |
401 | struct user_struct *old_user; | |
402 | ||
403 | /* What if a process setreuid()'s and this brings the | |
404 | * new uid over his NPROC rlimit? We can check this now | |
405 | * cheaply with the new uid cache, so if it matters | |
406 | * we should be checking for it. -DaveM | |
407 | */ | |
408 | old_user = current->user; | |
409 | atomic_inc(&new_user->processes); | |
410 | atomic_dec(&old_user->processes); | |
411 | switch_uid_keyring(new_user); | |
412 | current->user = new_user; | |
24e377a8 | 413 | sched_switch_user(current); |
45c18b0b LT |
414 | |
415 | /* | |
416 | * We need to synchronize with __sigqueue_alloc() | |
417 | * doing a get_uid(p->user).. If that saw the old | |
418 | * user value, we need to wait until it has exited | |
419 | * its critical region before we can free the old | |
420 | * structure. | |
421 | */ | |
422 | smp_mb(); | |
423 | spin_unlock_wait(¤t->sighand->siglock); | |
424 | ||
1da177e4 LT |
425 | free_uid(old_user); |
426 | suid_keys(current); | |
427 | } | |
428 | ||
28f300d2 PE |
429 | void release_uids(struct user_namespace *ns) |
430 | { | |
431 | int i; | |
432 | unsigned long flags; | |
433 | struct hlist_head *head; | |
434 | struct hlist_node *nd; | |
435 | ||
436 | spin_lock_irqsave(&uidhash_lock, flags); | |
437 | /* | |
438 | * collapse the chains so that the user_struct-s will | |
439 | * be still alive, but not in hashes. subsequent free_uid() | |
440 | * will free them. | |
441 | */ | |
442 | for (i = 0; i < UIDHASH_SZ; i++) { | |
443 | head = ns->uidhash_table + i; | |
444 | while (!hlist_empty(head)) { | |
445 | nd = head->first; | |
446 | hlist_del_init(nd); | |
447 | } | |
448 | } | |
449 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
450 | ||
451 | free_uid(ns->root_user); | |
452 | } | |
1da177e4 LT |
453 | |
454 | static int __init uid_cache_init(void) | |
455 | { | |
456 | int n; | |
457 | ||
458 | uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), | |
20c2df83 | 459 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1da177e4 LT |
460 | |
461 | for(n = 0; n < UIDHASH_SZ; ++n) | |
735de223 | 462 | INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); |
1da177e4 LT |
463 | |
464 | /* Insert the root user immediately (init already runs as root) */ | |
3fa97c9d | 465 | spin_lock_irq(&uidhash_lock); |
acce292c | 466 | uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); |
3fa97c9d | 467 | spin_unlock_irq(&uidhash_lock); |
1da177e4 LT |
468 | |
469 | return 0; | |
470 | } | |
471 | ||
472 | module_init(uid_cache_init); |