x86: Set CONFIG_NR_CPUS even on UP
[deliverable/linux.git] / kernel / cpu.c
1 /* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/kthread.h>
15 #include <linux/stop_machine.h>
16 #include <linux/mutex.h>
17
18 /*
19 * Represents all cpu's present in the system
20 * In systems capable of hotplug, this map could dynamically grow
21 * as new cpu's are detected in the system via any platform specific
22 * method, such as ACPI for e.g.
23 */
24 cpumask_t cpu_present_map __read_mostly;
25 EXPORT_SYMBOL(cpu_present_map);
26
27 /*
28 * Represents all cpu's that are currently online.
29 */
30 cpumask_t cpu_online_map __read_mostly;
31 EXPORT_SYMBOL(cpu_online_map);
32
33 #ifdef CONFIG_INIT_ALL_POSSIBLE
34 cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
35 #else
36 cpumask_t cpu_possible_map __read_mostly;
37 #endif
38 EXPORT_SYMBOL(cpu_possible_map);
39
40 #ifdef CONFIG_SMP
41 /* Serializes the updates to cpu_online_map, cpu_present_map */
42 static DEFINE_MUTEX(cpu_add_remove_lock);
43
44 static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
45
46 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
47 * Should always be manipulated under cpu_add_remove_lock
48 */
49 static int cpu_hotplug_disabled;
50
51 static struct {
52 struct task_struct *active_writer;
53 struct mutex lock; /* Synchronizes accesses to refcount, */
54 /*
55 * Also blocks the new readers during
56 * an ongoing cpu hotplug operation.
57 */
58 int refcount;
59 } cpu_hotplug;
60
61 void __init cpu_hotplug_init(void)
62 {
63 cpu_hotplug.active_writer = NULL;
64 mutex_init(&cpu_hotplug.lock);
65 cpu_hotplug.refcount = 0;
66 }
67
68 cpumask_t cpu_active_map;
69
70 #ifdef CONFIG_HOTPLUG_CPU
71
72 void get_online_cpus(void)
73 {
74 might_sleep();
75 if (cpu_hotplug.active_writer == current)
76 return;
77 mutex_lock(&cpu_hotplug.lock);
78 cpu_hotplug.refcount++;
79 mutex_unlock(&cpu_hotplug.lock);
80
81 }
82 EXPORT_SYMBOL_GPL(get_online_cpus);
83
84 void put_online_cpus(void)
85 {
86 if (cpu_hotplug.active_writer == current)
87 return;
88 mutex_lock(&cpu_hotplug.lock);
89 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
90 wake_up_process(cpu_hotplug.active_writer);
91 mutex_unlock(&cpu_hotplug.lock);
92
93 }
94 EXPORT_SYMBOL_GPL(put_online_cpus);
95
96 #endif /* CONFIG_HOTPLUG_CPU */
97
98 /*
99 * The following two API's must be used when attempting
100 * to serialize the updates to cpu_online_map, cpu_present_map.
101 */
102 void cpu_maps_update_begin(void)
103 {
104 mutex_lock(&cpu_add_remove_lock);
105 }
106
107 void cpu_maps_update_done(void)
108 {
109 mutex_unlock(&cpu_add_remove_lock);
110 }
111
112 /*
113 * This ensures that the hotplug operation can begin only when the
114 * refcount goes to zero.
115 *
116 * Note that during a cpu-hotplug operation, the new readers, if any,
117 * will be blocked by the cpu_hotplug.lock
118 *
119 * Since cpu_hotplug_begin() is always called after invoking
120 * cpu_maps_update_begin(), we can be sure that only one writer is active.
121 *
122 * Note that theoretically, there is a possibility of a livelock:
123 * - Refcount goes to zero, last reader wakes up the sleeping
124 * writer.
125 * - Last reader unlocks the cpu_hotplug.lock.
126 * - A new reader arrives at this moment, bumps up the refcount.
127 * - The writer acquires the cpu_hotplug.lock finds the refcount
128 * non zero and goes to sleep again.
129 *
130 * However, this is very difficult to achieve in practice since
131 * get_online_cpus() not an api which is called all that often.
132 *
133 */
134 static void cpu_hotplug_begin(void)
135 {
136 cpu_hotplug.active_writer = current;
137
138 for (;;) {
139 mutex_lock(&cpu_hotplug.lock);
140 if (likely(!cpu_hotplug.refcount))
141 break;
142 __set_current_state(TASK_UNINTERRUPTIBLE);
143 mutex_unlock(&cpu_hotplug.lock);
144 schedule();
145 }
146 }
147
148 static void cpu_hotplug_done(void)
149 {
150 cpu_hotplug.active_writer = NULL;
151 mutex_unlock(&cpu_hotplug.lock);
152 }
153 /* Need to know about CPUs going up/down? */
154 int __ref register_cpu_notifier(struct notifier_block *nb)
155 {
156 int ret;
157 cpu_maps_update_begin();
158 ret = raw_notifier_chain_register(&cpu_chain, nb);
159 cpu_maps_update_done();
160 return ret;
161 }
162
163 #ifdef CONFIG_HOTPLUG_CPU
164
165 EXPORT_SYMBOL(register_cpu_notifier);
166
167 void __ref unregister_cpu_notifier(struct notifier_block *nb)
168 {
169 cpu_maps_update_begin();
170 raw_notifier_chain_unregister(&cpu_chain, nb);
171 cpu_maps_update_done();
172 }
173 EXPORT_SYMBOL(unregister_cpu_notifier);
174
175 static inline void check_for_tasks(int cpu)
176 {
177 struct task_struct *p;
178
179 write_lock_irq(&tasklist_lock);
180 for_each_process(p) {
181 if (task_cpu(p) == cpu &&
182 (!cputime_eq(p->utime, cputime_zero) ||
183 !cputime_eq(p->stime, cputime_zero)))
184 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
185 (state = %ld, flags = %x) \n",
186 p->comm, task_pid_nr(p), cpu,
187 p->state, p->flags);
188 }
189 write_unlock_irq(&tasklist_lock);
190 }
191
192 struct take_cpu_down_param {
193 unsigned long mod;
194 void *hcpu;
195 };
196
197 /* Take this CPU down. */
198 static int __ref take_cpu_down(void *_param)
199 {
200 struct take_cpu_down_param *param = _param;
201 int err;
202
203 /* Ensure this CPU doesn't handle any more interrupts. */
204 err = __cpu_disable();
205 if (err < 0)
206 return err;
207
208 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
209 param->hcpu);
210
211 /* Force idle task to run as soon as we yield: it should
212 immediately notice cpu is offline and die quickly. */
213 sched_idle_next();
214 return 0;
215 }
216
217 /* Requires cpu_add_remove_lock to be held */
218 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
219 {
220 int err, nr_calls = 0;
221 cpumask_t old_allowed, tmp;
222 void *hcpu = (void *)(long)cpu;
223 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
224 struct take_cpu_down_param tcd_param = {
225 .mod = mod,
226 .hcpu = hcpu,
227 };
228
229 if (num_online_cpus() == 1)
230 return -EBUSY;
231
232 if (!cpu_online(cpu))
233 return -EINVAL;
234
235 cpu_hotplug_begin();
236 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
237 hcpu, -1, &nr_calls);
238 if (err == NOTIFY_BAD) {
239 nr_calls--;
240 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
241 hcpu, nr_calls, NULL);
242 printk("%s: attempt to take down CPU %u failed\n",
243 __func__, cpu);
244 err = -EINVAL;
245 goto out_release;
246 }
247
248 /* Ensure that we are not runnable on dying cpu */
249 old_allowed = current->cpus_allowed;
250 cpus_setall(tmp);
251 cpu_clear(cpu, tmp);
252 set_cpus_allowed_ptr(current, &tmp);
253 tmp = cpumask_of_cpu(cpu);
254
255 err = __stop_machine(take_cpu_down, &tcd_param, &tmp);
256 if (err) {
257 /* CPU didn't die: tell everyone. Can't complain. */
258 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
259 hcpu) == NOTIFY_BAD)
260 BUG();
261
262 goto out_allowed;
263 }
264 BUG_ON(cpu_online(cpu));
265
266 /* Wait for it to sleep (leaving idle task). */
267 while (!idle_cpu(cpu))
268 yield();
269
270 /* This actually kills the CPU. */
271 __cpu_die(cpu);
272
273 /* CPU is completely dead: tell everyone. Too late to complain. */
274 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod,
275 hcpu) == NOTIFY_BAD)
276 BUG();
277
278 check_for_tasks(cpu);
279
280 out_allowed:
281 set_cpus_allowed_ptr(current, &old_allowed);
282 out_release:
283 cpu_hotplug_done();
284 if (!err) {
285 if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod,
286 hcpu) == NOTIFY_BAD)
287 BUG();
288 }
289 return err;
290 }
291
292 int __ref cpu_down(unsigned int cpu)
293 {
294 int err = 0;
295
296 cpu_maps_update_begin();
297
298 if (cpu_hotplug_disabled) {
299 err = -EBUSY;
300 goto out;
301 }
302
303 cpu_clear(cpu, cpu_active_map);
304
305 /*
306 * Make sure the all cpus did the reschedule and are not
307 * using stale version of the cpu_active_map.
308 * This is not strictly necessary becuase stop_machine()
309 * that we run down the line already provides the required
310 * synchronization. But it's really a side effect and we do not
311 * want to depend on the innards of the stop_machine here.
312 */
313 synchronize_sched();
314
315 err = _cpu_down(cpu, 0);
316
317 if (cpu_online(cpu))
318 cpu_set(cpu, cpu_active_map);
319
320 out:
321 cpu_maps_update_done();
322 return err;
323 }
324 EXPORT_SYMBOL(cpu_down);
325 #endif /*CONFIG_HOTPLUG_CPU*/
326
327 /* Requires cpu_add_remove_lock to be held */
328 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
329 {
330 int ret, nr_calls = 0;
331 void *hcpu = (void *)(long)cpu;
332 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
333
334 if (cpu_online(cpu) || !cpu_present(cpu))
335 return -EINVAL;
336
337 cpu_hotplug_begin();
338 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
339 -1, &nr_calls);
340 if (ret == NOTIFY_BAD) {
341 nr_calls--;
342 printk("%s: attempt to bring up CPU %u failed\n",
343 __func__, cpu);
344 ret = -EINVAL;
345 goto out_notify;
346 }
347
348 /* Arch-specific enabling code. */
349 ret = __cpu_up(cpu);
350 if (ret != 0)
351 goto out_notify;
352 BUG_ON(!cpu_online(cpu));
353
354 cpu_set(cpu, cpu_active_map);
355
356 /* Now call notifier in preparation. */
357 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
358
359 out_notify:
360 if (ret != 0)
361 __raw_notifier_call_chain(&cpu_chain,
362 CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
363 cpu_hotplug_done();
364
365 return ret;
366 }
367
368 int __cpuinit cpu_up(unsigned int cpu)
369 {
370 int err = 0;
371 if (!cpu_isset(cpu, cpu_possible_map)) {
372 printk(KERN_ERR "can't online cpu %d because it is not "
373 "configured as may-hotadd at boot time\n", cpu);
374 #if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
375 printk(KERN_ERR "please check additional_cpus= boot "
376 "parameter\n");
377 #endif
378 return -EINVAL;
379 }
380
381 cpu_maps_update_begin();
382
383 if (cpu_hotplug_disabled) {
384 err = -EBUSY;
385 goto out;
386 }
387
388 err = _cpu_up(cpu, 0);
389
390 out:
391 cpu_maps_update_done();
392 return err;
393 }
394
395 #ifdef CONFIG_PM_SLEEP_SMP
396 static cpumask_t frozen_cpus;
397
398 int disable_nonboot_cpus(void)
399 {
400 int cpu, first_cpu, error = 0;
401
402 cpu_maps_update_begin();
403 first_cpu = first_cpu(cpu_online_map);
404 /* We take down all of the non-boot CPUs in one shot to avoid races
405 * with the userspace trying to use the CPU hotplug at the same time
406 */
407 cpus_clear(frozen_cpus);
408 printk("Disabling non-boot CPUs ...\n");
409 for_each_online_cpu(cpu) {
410 if (cpu == first_cpu)
411 continue;
412 error = _cpu_down(cpu, 1);
413 if (!error) {
414 cpu_set(cpu, frozen_cpus);
415 printk("CPU%d is down\n", cpu);
416 } else {
417 printk(KERN_ERR "Error taking CPU%d down: %d\n",
418 cpu, error);
419 break;
420 }
421 }
422 if (!error) {
423 BUG_ON(num_online_cpus() > 1);
424 /* Make sure the CPUs won't be enabled by someone else */
425 cpu_hotplug_disabled = 1;
426 } else {
427 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
428 }
429 cpu_maps_update_done();
430 return error;
431 }
432
433 void __ref enable_nonboot_cpus(void)
434 {
435 int cpu, error;
436
437 /* Allow everyone to use the CPU hotplug again */
438 cpu_maps_update_begin();
439 cpu_hotplug_disabled = 0;
440 if (cpus_empty(frozen_cpus))
441 goto out;
442
443 printk("Enabling non-boot CPUs ...\n");
444 for_each_cpu_mask_nr(cpu, frozen_cpus) {
445 error = _cpu_up(cpu, 1);
446 if (!error) {
447 printk("CPU%d is up\n", cpu);
448 continue;
449 }
450 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
451 }
452 cpus_clear(frozen_cpus);
453 out:
454 cpu_maps_update_done();
455 }
456 #endif /* CONFIG_PM_SLEEP_SMP */
457
458 /**
459 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
460 * @cpu: cpu that just started
461 *
462 * This function calls the cpu_chain notifiers with CPU_STARTING.
463 * It must be called by the arch code on the new cpu, before the new cpu
464 * enables interrupts and before the "boot" cpu returns from __cpu_up().
465 */
466 void __cpuinit notify_cpu_starting(unsigned int cpu)
467 {
468 unsigned long val = CPU_STARTING;
469
470 #ifdef CONFIG_PM_SLEEP_SMP
471 if (cpu_isset(cpu, frozen_cpus))
472 val = CPU_STARTING_FROZEN;
473 #endif /* CONFIG_PM_SLEEP_SMP */
474 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu);
475 }
476
477 #endif /* CONFIG_SMP */
478
479 /*
480 * cpu_bit_bitmap[] is a special, "compressed" data structure that
481 * represents all NR_CPUS bits binary values of 1<<nr.
482 *
483 * It is used by cpumask_of_cpu() to get a constant address to a CPU
484 * mask value that has a single bit set only.
485 */
486
487 /* cpu_bit_bitmap[0] is empty - so we can back into it */
488 #define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x)
489 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
490 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
491 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
492
493 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
494
495 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
496 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
497 #if BITS_PER_LONG > 32
498 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
499 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
500 #endif
501 };
502 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
503
504 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
505 EXPORT_SYMBOL(cpu_all_bits);
This page took 0.041507 seconds and 5 git commands to generate.