Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* CPU control. |
2 | * (C) 2001, 2002, 2003, 2004 Rusty Russell | |
3 | * | |
4 | * This code is licenced under the GPL. | |
5 | */ | |
6 | #include <linux/proc_fs.h> | |
7 | #include <linux/smp.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/notifier.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/unistd.h> | |
12 | #include <linux/cpu.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/kthread.h> | |
15 | #include <linux/stop_machine.h> | |
16 | #include <asm/semaphore.h> | |
17 | ||
18 | /* This protects CPUs going up and down... */ | |
19 | DECLARE_MUTEX(cpucontrol); | |
20 | ||
21 | static struct notifier_block *cpu_chain; | |
22 | ||
23 | /* Need to know about CPUs going up/down? */ | |
24 | int register_cpu_notifier(struct notifier_block *nb) | |
25 | { | |
26 | int ret; | |
27 | ||
28 | if ((ret = down_interruptible(&cpucontrol)) != 0) | |
29 | return ret; | |
30 | ret = notifier_chain_register(&cpu_chain, nb); | |
31 | up(&cpucontrol); | |
32 | return ret; | |
33 | } | |
34 | EXPORT_SYMBOL(register_cpu_notifier); | |
35 | ||
36 | void unregister_cpu_notifier(struct notifier_block *nb) | |
37 | { | |
38 | down(&cpucontrol); | |
39 | notifier_chain_unregister(&cpu_chain, nb); | |
40 | up(&cpucontrol); | |
41 | } | |
42 | EXPORT_SYMBOL(unregister_cpu_notifier); | |
43 | ||
44 | #ifdef CONFIG_HOTPLUG_CPU | |
45 | static inline void check_for_tasks(int cpu) | |
46 | { | |
47 | struct task_struct *p; | |
48 | ||
49 | write_lock_irq(&tasklist_lock); | |
50 | for_each_process(p) { | |
51 | if (task_cpu(p) == cpu && | |
52 | (!cputime_eq(p->utime, cputime_zero) || | |
53 | !cputime_eq(p->stime, cputime_zero))) | |
54 | printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ | |
55 | (state = %ld, flags = %lx) \n", | |
56 | p->comm, p->pid, cpu, p->state, p->flags); | |
57 | } | |
58 | write_unlock_irq(&tasklist_lock); | |
59 | } | |
60 | ||
61 | /* Take this CPU down. */ | |
62 | static int take_cpu_down(void *unused) | |
63 | { | |
64 | int err; | |
65 | ||
66 | /* Take offline: makes arch_cpu_down somewhat easier. */ | |
67 | cpu_clear(smp_processor_id(), cpu_online_map); | |
68 | ||
69 | /* Ensure this CPU doesn't handle any more interrupts. */ | |
70 | err = __cpu_disable(); | |
71 | if (err < 0) | |
72 | cpu_set(smp_processor_id(), cpu_online_map); | |
73 | else | |
74 | /* Force idle task to run as soon as we yield: it should | |
75 | immediately notice cpu is offline and die quickly. */ | |
76 | sched_idle_next(); | |
77 | ||
78 | return err; | |
79 | } | |
80 | ||
81 | int cpu_down(unsigned int cpu) | |
82 | { | |
83 | int err; | |
84 | struct task_struct *p; | |
85 | cpumask_t old_allowed, tmp; | |
86 | ||
87 | if ((err = lock_cpu_hotplug_interruptible()) != 0) | |
88 | return err; | |
89 | ||
90 | if (num_online_cpus() == 1) { | |
91 | err = -EBUSY; | |
92 | goto out; | |
93 | } | |
94 | ||
95 | if (!cpu_online(cpu)) { | |
96 | err = -EINVAL; | |
97 | goto out; | |
98 | } | |
99 | ||
100 | err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, | |
101 | (void *)(long)cpu); | |
102 | if (err == NOTIFY_BAD) { | |
103 | printk("%s: attempt to take down CPU %u failed\n", | |
104 | __FUNCTION__, cpu); | |
105 | err = -EINVAL; | |
106 | goto out; | |
107 | } | |
108 | ||
109 | /* Ensure that we are not runnable on dying cpu */ | |
110 | old_allowed = current->cpus_allowed; | |
111 | tmp = CPU_MASK_ALL; | |
112 | cpu_clear(cpu, tmp); | |
113 | set_cpus_allowed(current, tmp); | |
114 | ||
115 | p = __stop_machine_run(take_cpu_down, NULL, cpu); | |
116 | if (IS_ERR(p)) { | |
117 | /* CPU didn't die: tell everyone. Can't complain. */ | |
118 | if (notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, | |
119 | (void *)(long)cpu) == NOTIFY_BAD) | |
120 | BUG(); | |
121 | ||
122 | err = PTR_ERR(p); | |
123 | goto out_allowed; | |
124 | } | |
125 | ||
126 | if (cpu_online(cpu)) | |
127 | goto out_thread; | |
128 | ||
129 | /* Wait for it to sleep (leaving idle task). */ | |
130 | while (!idle_cpu(cpu)) | |
131 | yield(); | |
132 | ||
133 | /* This actually kills the CPU. */ | |
134 | __cpu_die(cpu); | |
135 | ||
136 | /* Move it here so it can run. */ | |
137 | kthread_bind(p, get_cpu()); | |
138 | put_cpu(); | |
139 | ||
140 | /* CPU is completely dead: tell everyone. Too late to complain. */ | |
141 | if (notifier_call_chain(&cpu_chain, CPU_DEAD, (void *)(long)cpu) | |
142 | == NOTIFY_BAD) | |
143 | BUG(); | |
144 | ||
145 | check_for_tasks(cpu); | |
146 | ||
147 | out_thread: | |
148 | err = kthread_stop(p); | |
149 | out_allowed: | |
150 | set_cpus_allowed(current, old_allowed); | |
151 | out: | |
152 | unlock_cpu_hotplug(); | |
153 | return err; | |
154 | } | |
155 | #endif /*CONFIG_HOTPLUG_CPU*/ | |
156 | ||
157 | int __devinit cpu_up(unsigned int cpu) | |
158 | { | |
159 | int ret; | |
160 | void *hcpu = (void *)(long)cpu; | |
161 | ||
162 | if ((ret = down_interruptible(&cpucontrol)) != 0) | |
163 | return ret; | |
164 | ||
165 | if (cpu_online(cpu) || !cpu_present(cpu)) { | |
166 | ret = -EINVAL; | |
167 | goto out; | |
168 | } | |
169 | ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); | |
170 | if (ret == NOTIFY_BAD) { | |
171 | printk("%s: attempt to bring up CPU %u failed\n", | |
172 | __FUNCTION__, cpu); | |
173 | ret = -EINVAL; | |
174 | goto out_notify; | |
175 | } | |
176 | ||
177 | /* Arch-specific enabling code. */ | |
178 | ret = __cpu_up(cpu); | |
179 | if (ret != 0) | |
180 | goto out_notify; | |
181 | if (!cpu_online(cpu)) | |
182 | BUG(); | |
183 | ||
184 | /* Now call notifier in preparation. */ | |
185 | notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); | |
186 | ||
187 | out_notify: | |
188 | if (ret != 0) | |
189 | notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); | |
190 | out: | |
191 | up(&cpucontrol); | |
192 | return ret; | |
193 | } |