acf15b49e55b6c830dfee7969ac09ef7087911dd
[deliverable/linux.git] / kernel / time / tick-broadcast.c
1 /*
2 * linux/kernel/time/tick-broadcast.c
3 *
4 * This file contains functions which emulate a local clock-event
5 * device via a broadcast event source.
6 *
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10 *
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
13 */
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/irq.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
21 #include <linux/tick.h>
22
23 #include "tick-internal.h"
24
25 /*
26 * Broadcast support for broken x86 hardware, where the local apic
27 * timer stops in C3 state.
28 */
29
30 struct tick_device tick_broadcast_device;
31 static cpumask_t tick_broadcast_mask;
32 static DEFINE_SPINLOCK(tick_broadcast_lock);
33
34 #ifdef CONFIG_TICK_ONESHOT
35 static void tick_broadcast_clear_oneshot(int cpu);
36 #else
37 static inline void tick_broadcast_clear_oneshot(int cpu) { }
38 #endif
39
40 /*
41 * Debugging: see timer_list.c
42 */
43 struct tick_device *tick_get_broadcast_device(void)
44 {
45 return &tick_broadcast_device;
46 }
47
48 cpumask_t *tick_get_broadcast_mask(void)
49 {
50 return &tick_broadcast_mask;
51 }
52
53 /*
54 * Start the device in periodic mode
55 */
56 static void tick_broadcast_start_periodic(struct clock_event_device *bc)
57 {
58 if (bc)
59 tick_setup_periodic(bc, 1);
60 }
61
62 /*
63 * Check, if the device can be utilized as broadcast device:
64 */
65 int tick_check_broadcast_device(struct clock_event_device *dev)
66 {
67 if (tick_broadcast_device.evtdev ||
68 (dev->features & CLOCK_EVT_FEAT_C3STOP))
69 return 0;
70
71 clockevents_exchange_device(NULL, dev);
72 tick_broadcast_device.evtdev = dev;
73 if (!cpus_empty(tick_broadcast_mask))
74 tick_broadcast_start_periodic(dev);
75 return 1;
76 }
77
78 /*
79 * Check, if the device is the broadcast device
80 */
81 int tick_is_broadcast_device(struct clock_event_device *dev)
82 {
83 return (dev && tick_broadcast_device.evtdev == dev);
84 }
85
86 /*
87 * Check, if the device is disfunctional and a place holder, which
88 * needs to be handled by the broadcast device.
89 */
90 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
91 {
92 unsigned long flags;
93 int ret = 0;
94
95 spin_lock_irqsave(&tick_broadcast_lock, flags);
96
97 /*
98 * Devices might be registered with both periodic and oneshot
99 * mode disabled. This signals, that the device needs to be
100 * operated from the broadcast device and is a placeholder for
101 * the cpu local device.
102 */
103 if (!tick_device_is_functional(dev)) {
104 dev->event_handler = tick_handle_periodic;
105 cpu_set(cpu, tick_broadcast_mask);
106 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
107 ret = 1;
108 } else {
109 /*
110 * When the new device is not affected by the stop
111 * feature and the cpu is marked in the broadcast mask
112 * then clear the broadcast bit.
113 */
114 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
115 int cpu = smp_processor_id();
116
117 cpu_clear(cpu, tick_broadcast_mask);
118 tick_broadcast_clear_oneshot(cpu);
119 }
120 }
121 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
122 return ret;
123 }
124
125 /*
126 * Broadcast the event to the cpus, which are set in the mask
127 */
128 int tick_do_broadcast(cpumask_t mask)
129 {
130 int ret = 0, cpu = smp_processor_id();
131 struct tick_device *td;
132
133 /*
134 * Check, if the current cpu is in the mask
135 */
136 if (cpu_isset(cpu, mask)) {
137 cpu_clear(cpu, mask);
138 td = &per_cpu(tick_cpu_device, cpu);
139 td->evtdev->event_handler(td->evtdev);
140 ret = 1;
141 }
142
143 if (!cpus_empty(mask)) {
144 /*
145 * It might be necessary to actually check whether the devices
146 * have different broadcast functions. For now, just use the
147 * one of the first device. This works as long as we have this
148 * misfeature only on x86 (lapic)
149 */
150 cpu = first_cpu(mask);
151 td = &per_cpu(tick_cpu_device, cpu);
152 td->evtdev->broadcast(mask);
153 ret = 1;
154 }
155 return ret;
156 }
157
158 /*
159 * Periodic broadcast:
160 * - invoke the broadcast handlers
161 */
162 static void tick_do_periodic_broadcast(void)
163 {
164 cpumask_t mask;
165
166 spin_lock(&tick_broadcast_lock);
167
168 cpus_and(mask, cpu_online_map, tick_broadcast_mask);
169 tick_do_broadcast(mask);
170
171 spin_unlock(&tick_broadcast_lock);
172 }
173
174 /*
175 * Event handler for periodic broadcast ticks
176 */
177 static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
178 {
179 tick_do_periodic_broadcast();
180
181 /*
182 * The device is in periodic mode. No reprogramming necessary:
183 */
184 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
185 return;
186
187 /*
188 * Setup the next period for devices, which do not have
189 * periodic mode:
190 */
191 for (;;) {
192 ktime_t next = ktime_add(dev->next_event, tick_period);
193
194 if (!clockevents_program_event(dev, next, ktime_get()))
195 return;
196 tick_do_periodic_broadcast();
197 }
198 }
199
200 /*
201 * Powerstate information: The system enters/leaves a state, where
202 * affected devices might stop
203 */
204 static void tick_do_broadcast_on_off(void *why)
205 {
206 struct clock_event_device *bc, *dev;
207 struct tick_device *td;
208 unsigned long flags, *reason = why;
209 int cpu;
210
211 spin_lock_irqsave(&tick_broadcast_lock, flags);
212
213 cpu = smp_processor_id();
214 td = &per_cpu(tick_cpu_device, cpu);
215 dev = td->evtdev;
216 bc = tick_broadcast_device.evtdev;
217
218 /*
219 * Is the device in broadcast mode forever or is it not
220 * affected by the powerstate ?
221 */
222 if (!dev || !tick_device_is_functional(dev) ||
223 !(dev->features & CLOCK_EVT_FEAT_C3STOP))
224 goto out;
225
226 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_ON) {
227 if (!cpu_isset(cpu, tick_broadcast_mask)) {
228 cpu_set(cpu, tick_broadcast_mask);
229 if (td->mode == TICKDEV_MODE_PERIODIC)
230 clockevents_set_mode(dev,
231 CLOCK_EVT_MODE_SHUTDOWN);
232 }
233 } else {
234 if (cpu_isset(cpu, tick_broadcast_mask)) {
235 cpu_clear(cpu, tick_broadcast_mask);
236 if (td->mode == TICKDEV_MODE_PERIODIC)
237 tick_setup_periodic(dev, 0);
238 }
239 }
240
241 if (cpus_empty(tick_broadcast_mask))
242 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
243 else {
244 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
245 tick_broadcast_start_periodic(bc);
246 else
247 tick_broadcast_setup_oneshot(bc);
248 }
249 out:
250 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
251 }
252
253 /*
254 * Powerstate information: The system enters/leaves a state, where
255 * affected devices might stop.
256 */
257 void tick_broadcast_on_off(unsigned long reason, int *oncpu)
258 {
259 int cpu = get_cpu();
260
261 if (!cpu_isset(*oncpu, cpu_online_map)) {
262 printk(KERN_ERR "tick-braodcast: ignoring broadcast for "
263 "offline CPU #%d\n", *oncpu);
264 } else {
265
266 if (cpu == *oncpu)
267 tick_do_broadcast_on_off(&reason);
268 else
269 smp_call_function_single(*oncpu,
270 tick_do_broadcast_on_off,
271 &reason, 1, 1);
272 }
273 put_cpu();
274 }
275
276 /*
277 * Set the periodic handler depending on broadcast on/off
278 */
279 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
280 {
281 if (!broadcast)
282 dev->event_handler = tick_handle_periodic;
283 else
284 dev->event_handler = tick_handle_periodic_broadcast;
285 }
286
287 /*
288 * Remove a CPU from broadcasting
289 */
290 void tick_shutdown_broadcast(unsigned int *cpup)
291 {
292 struct clock_event_device *bc;
293 unsigned long flags;
294 unsigned int cpu = *cpup;
295
296 spin_lock_irqsave(&tick_broadcast_lock, flags);
297
298 bc = tick_broadcast_device.evtdev;
299 cpu_clear(cpu, tick_broadcast_mask);
300
301 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
302 if (bc && cpus_empty(tick_broadcast_mask))
303 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
304 }
305
306 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
307 }
308
309 void tick_suspend_broadcast(void)
310 {
311 struct clock_event_device *bc;
312 unsigned long flags;
313
314 spin_lock_irqsave(&tick_broadcast_lock, flags);
315
316 bc = tick_broadcast_device.evtdev;
317 if (bc)
318 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
319
320 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
321 }
322
323 int tick_resume_broadcast(void)
324 {
325 struct clock_event_device *bc;
326 unsigned long flags;
327 int broadcast = 0;
328
329 spin_lock_irqsave(&tick_broadcast_lock, flags);
330
331 bc = tick_broadcast_device.evtdev;
332
333 if (bc) {
334 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
335
336 switch (tick_broadcast_device.mode) {
337 case TICKDEV_MODE_PERIODIC:
338 if(!cpus_empty(tick_broadcast_mask))
339 tick_broadcast_start_periodic(bc);
340 broadcast = cpu_isset(smp_processor_id(),
341 tick_broadcast_mask);
342 break;
343 case TICKDEV_MODE_ONESHOT:
344 broadcast = tick_resume_broadcast_oneshot(bc);
345 break;
346 }
347 }
348 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
349
350 return broadcast;
351 }
352
353
354 #ifdef CONFIG_TICK_ONESHOT
355
356 static cpumask_t tick_broadcast_oneshot_mask;
357
358 /*
359 * Debugging: see timer_list.c
360 */
361 cpumask_t *tick_get_broadcast_oneshot_mask(void)
362 {
363 return &tick_broadcast_oneshot_mask;
364 }
365
366 static int tick_broadcast_set_event(ktime_t expires, int force)
367 {
368 struct clock_event_device *bc = tick_broadcast_device.evtdev;
369 ktime_t now = ktime_get();
370 int res;
371
372 for(;;) {
373 res = clockevents_program_event(bc, expires, now);
374 if (!res || !force)
375 return res;
376 now = ktime_get();
377 expires = ktime_add(now, ktime_set(0, bc->min_delta_ns));
378 }
379 }
380
381 int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
382 {
383 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
384 return 0;
385 }
386
387 /*
388 * Reprogram the broadcast device:
389 *
390 * Called with tick_broadcast_lock held and interrupts disabled.
391 */
392 static int tick_broadcast_reprogram(void)
393 {
394 ktime_t expires = { .tv64 = KTIME_MAX };
395 struct tick_device *td;
396 int cpu;
397
398 /*
399 * Find the event which expires next:
400 */
401 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS;
402 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
403 td = &per_cpu(tick_cpu_device, cpu);
404 if (td->evtdev->next_event.tv64 < expires.tv64)
405 expires = td->evtdev->next_event;
406 }
407
408 if (expires.tv64 == KTIME_MAX)
409 return 0;
410
411 return tick_broadcast_set_event(expires, 0);
412 }
413
414 /*
415 * Handle oneshot mode broadcasting
416 */
417 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
418 {
419 struct tick_device *td;
420 cpumask_t mask;
421 ktime_t now;
422 int cpu;
423
424 spin_lock(&tick_broadcast_lock);
425 again:
426 dev->next_event.tv64 = KTIME_MAX;
427 mask = CPU_MASK_NONE;
428 now = ktime_get();
429 /* Find all expired events */
430 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS;
431 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
432 td = &per_cpu(tick_cpu_device, cpu);
433 if (td->evtdev->next_event.tv64 <= now.tv64)
434 cpu_set(cpu, mask);
435 }
436
437 /*
438 * Wakeup the cpus which have an expired event. The broadcast
439 * device is reprogrammed in the return from idle code.
440 */
441 if (!tick_do_broadcast(mask)) {
442 /*
443 * The global event did not expire any CPU local
444 * events. This happens in dyntick mode, as the
445 * maximum PIT delta is quite small.
446 */
447 if (tick_broadcast_reprogram())
448 goto again;
449 }
450 spin_unlock(&tick_broadcast_lock);
451 }
452
453 /*
454 * Powerstate information: The system enters/leaves a state, where
455 * affected devices might stop
456 */
457 void tick_broadcast_oneshot_control(unsigned long reason)
458 {
459 struct clock_event_device *bc, *dev;
460 struct tick_device *td;
461 unsigned long flags;
462 int cpu;
463
464 spin_lock_irqsave(&tick_broadcast_lock, flags);
465
466 /*
467 * Periodic mode does not care about the enter/exit of power
468 * states
469 */
470 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
471 goto out;
472
473 bc = tick_broadcast_device.evtdev;
474 cpu = smp_processor_id();
475 td = &per_cpu(tick_cpu_device, cpu);
476 dev = td->evtdev;
477
478 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
479 goto out;
480
481 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
482 if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
483 cpu_set(cpu, tick_broadcast_oneshot_mask);
484 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
485 if (dev->next_event.tv64 < bc->next_event.tv64)
486 tick_broadcast_set_event(dev->next_event, 1);
487 }
488 } else {
489 if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
490 cpu_clear(cpu, tick_broadcast_oneshot_mask);
491 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
492 if (dev->next_event.tv64 != KTIME_MAX)
493 tick_program_event(dev->next_event, 1);
494 }
495 }
496
497 out:
498 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
499 }
500
501 /*
502 * Reset the one shot broadcast for a cpu
503 *
504 * Called with tick_broadcast_lock held
505 */
506 static void tick_broadcast_clear_oneshot(int cpu)
507 {
508 cpu_clear(cpu, tick_broadcast_oneshot_mask);
509 }
510
511 /**
512 * tick_broadcast_setup_highres - setup the broadcast device for highres
513 */
514 void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
515 {
516 if (bc->mode != CLOCK_EVT_MODE_ONESHOT) {
517 bc->event_handler = tick_handle_oneshot_broadcast;
518 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
519 bc->next_event.tv64 = KTIME_MAX;
520 }
521 }
522
523 /*
524 * Select oneshot operating mode for the broadcast device
525 */
526 void tick_broadcast_switch_to_oneshot(void)
527 {
528 struct clock_event_device *bc;
529 unsigned long flags;
530
531 spin_lock_irqsave(&tick_broadcast_lock, flags);
532
533 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
534 bc = tick_broadcast_device.evtdev;
535 if (bc)
536 tick_broadcast_setup_oneshot(bc);
537 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
538 }
539
540
541 /*
542 * Remove a dead CPU from broadcasting
543 */
544 void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
545 {
546 unsigned long flags;
547 unsigned int cpu = *cpup;
548
549 spin_lock_irqsave(&tick_broadcast_lock, flags);
550
551 /*
552 * Clear the broadcast mask flag for the dead cpu, but do not
553 * stop the broadcast device!
554 */
555 cpu_clear(cpu, tick_broadcast_oneshot_mask);
556
557 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
558 }
559
560 #endif
This page took 0.102725 seconds and 5 git commands to generate.