Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * lib/kernel_lock.c | |
3 | * | |
4 | * This is the traditional BKL - big kernel lock. Largely | |
5895df96 | 5 | * relegated to obsolescence, but used by various less |
1da177e4 LT |
6 | * important (or lazy) subsystems. |
7 | */ | |
8 | #include <linux/smp_lock.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/kallsyms.h> | |
11 | ||
1da177e4 LT |
12 | /* |
13 | * The 'big kernel semaphore' | |
14 | * | |
15 | * This mutex is taken and released recursively by lock_kernel() | |
d6e05edc | 16 | * and unlock_kernel(). It is transparently dropped and reacquired |
1da177e4 LT |
17 | * over schedule(). It is used to protect legacy code that hasn't |
18 | * been migrated to a proper locking design yet. | |
19 | * | |
20 | * Note: code locked by this semaphore will only be serialized against | |
21 | * other code using the same locking facility. The code guarantees that | |
22 | * the task remains on the same CPU. | |
23 | * | |
24 | * Don't use in new code. | |
25 | */ | |
26 | static DECLARE_MUTEX(kernel_sem); | |
27 | ||
28 | /* | |
29 | * Re-acquire the kernel semaphore. | |
30 | * | |
31 | * This function is called with preemption off. | |
32 | * | |
33 | * We are executing in schedule() so the code must be extremely careful | |
34 | * about recursion, both due to the down() and due to the enabling of | |
35 | * preemption. schedule() will re-check the preemption flag after | |
36 | * reacquiring the semaphore. | |
37 | */ | |
38 | int __lockfunc __reacquire_kernel_lock(void) | |
39 | { | |
40 | struct task_struct *task = current; | |
41 | int saved_lock_depth = task->lock_depth; | |
42 | ||
43 | BUG_ON(saved_lock_depth < 0); | |
44 | ||
45 | task->lock_depth = -1; | |
46 | preempt_enable_no_resched(); | |
47 | ||
48 | down(&kernel_sem); | |
49 | ||
50 | preempt_disable(); | |
51 | task->lock_depth = saved_lock_depth; | |
52 | ||
53 | return 0; | |
54 | } | |
55 | ||
56 | void __lockfunc __release_kernel_lock(void) | |
57 | { | |
58 | up(&kernel_sem); | |
59 | } | |
60 | ||
61 | /* | |
62 | * Getting the big kernel semaphore. | |
63 | */ | |
64 | void __lockfunc lock_kernel(void) | |
65 | { | |
66 | struct task_struct *task = current; | |
67 | int depth = task->lock_depth + 1; | |
68 | ||
69 | if (likely(!depth)) | |
70 | /* | |
71 | * No recursion worries - we set up lock_depth _after_ | |
72 | */ | |
73 | down(&kernel_sem); | |
74 | ||
75 | task->lock_depth = depth; | |
76 | } | |
77 | ||
78 | void __lockfunc unlock_kernel(void) | |
79 | { | |
80 | struct task_struct *task = current; | |
81 | ||
82 | BUG_ON(task->lock_depth < 0); | |
83 | ||
84 | if (likely(--task->lock_depth < 0)) | |
85 | up(&kernel_sem); | |
86 | } | |
87 | ||
1da177e4 LT |
88 | EXPORT_SYMBOL(lock_kernel); |
89 | EXPORT_SYMBOL(unlock_kernel); | |
90 |