d067c01586f56fca68ad0e030b5b400555f1ac73
[deliverable/linux.git] / kernel / time / tick-broadcast.c
1 /*
2 * linux/kernel/time/tick-broadcast.c
3 *
4 * This file contains functions which emulate a local clock-event
5 * device via a broadcast event source.
6 *
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10 *
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
13 */
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
21 #include <linux/smp.h>
22 #include <linux/module.h>
23
24 #include "tick-internal.h"
25
26 /*
27 * Broadcast support for broken x86 hardware, where the local apic
28 * timer stops in C3 state.
29 */
30
31 static struct tick_device tick_broadcast_device;
32 static cpumask_var_t tick_broadcast_mask;
33 static cpumask_var_t tmpmask;
34 static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
35 static int tick_broadcast_force;
36
37 #ifdef CONFIG_TICK_ONESHOT
38 static void tick_broadcast_clear_oneshot(int cpu);
39 #else
40 static inline void tick_broadcast_clear_oneshot(int cpu) { }
41 #endif
42
43 /*
44 * Debugging: see timer_list.c
45 */
46 struct tick_device *tick_get_broadcast_device(void)
47 {
48 return &tick_broadcast_device;
49 }
50
51 struct cpumask *tick_get_broadcast_mask(void)
52 {
53 return tick_broadcast_mask;
54 }
55
56 /*
57 * Start the device in periodic mode
58 */
59 static void tick_broadcast_start_periodic(struct clock_event_device *bc)
60 {
61 if (bc)
62 tick_setup_periodic(bc, 1);
63 }
64
65 /*
66 * Check, if the device can be utilized as broadcast device:
67 */
68 static bool tick_check_broadcast_device(struct clock_event_device *curdev,
69 struct clock_event_device *newdev)
70 {
71 if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
72 (newdev->features & CLOCK_EVT_FEAT_C3STOP))
73 return false;
74
75 if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
76 !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
77 return false;
78
79 return !curdev || newdev->rating > curdev->rating;
80 }
81
82 /*
83 * Conditionally install/replace broadcast device
84 */
85 void tick_install_broadcast_device(struct clock_event_device *dev)
86 {
87 struct clock_event_device *cur = tick_broadcast_device.evtdev;
88
89 if (!tick_check_broadcast_device(cur, dev))
90 return;
91
92 if (!try_module_get(dev->owner))
93 return;
94
95 clockevents_exchange_device(cur, dev);
96 if (cur)
97 cur->event_handler = clockevents_handle_noop;
98 tick_broadcast_device.evtdev = dev;
99 if (!cpumask_empty(tick_broadcast_mask))
100 tick_broadcast_start_periodic(dev);
101 /*
102 * Inform all cpus about this. We might be in a situation
103 * where we did not switch to oneshot mode because the per cpu
104 * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
105 * of a oneshot capable broadcast device. Without that
106 * notification the systems stays stuck in periodic mode
107 * forever.
108 */
109 if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
110 tick_clock_notify();
111 }
112
113 /*
114 * Check, if the device is the broadcast device
115 */
116 int tick_is_broadcast_device(struct clock_event_device *dev)
117 {
118 return (dev && tick_broadcast_device.evtdev == dev);
119 }
120
121 static void err_broadcast(const struct cpumask *mask)
122 {
123 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
124 }
125
126 static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
127 {
128 if (!dev->broadcast)
129 dev->broadcast = tick_broadcast;
130 if (!dev->broadcast) {
131 pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
132 dev->name);
133 dev->broadcast = err_broadcast;
134 }
135 }
136
137 /*
138 * Check, if the device is disfunctional and a place holder, which
139 * needs to be handled by the broadcast device.
140 */
141 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
142 {
143 unsigned long flags;
144 int ret = 0;
145
146 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
147
148 /*
149 * Devices might be registered with both periodic and oneshot
150 * mode disabled. This signals, that the device needs to be
151 * operated from the broadcast device and is a placeholder for
152 * the cpu local device.
153 */
154 if (!tick_device_is_functional(dev)) {
155 dev->event_handler = tick_handle_periodic;
156 tick_device_setup_broadcast_func(dev);
157 cpumask_set_cpu(cpu, tick_broadcast_mask);
158 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
159 ret = 1;
160 } else {
161 /*
162 * When the new device is not affected by the stop
163 * feature and the cpu is marked in the broadcast mask
164 * then clear the broadcast bit.
165 */
166 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
167 int cpu = smp_processor_id();
168 cpumask_clear_cpu(cpu, tick_broadcast_mask);
169 tick_broadcast_clear_oneshot(cpu);
170 } else {
171 tick_device_setup_broadcast_func(dev);
172 }
173 }
174 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
175 return ret;
176 }
177
178 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
179 int tick_receive_broadcast(void)
180 {
181 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
182 struct clock_event_device *evt = td->evtdev;
183
184 if (!evt)
185 return -ENODEV;
186
187 if (!evt->event_handler)
188 return -EINVAL;
189
190 evt->event_handler(evt);
191 return 0;
192 }
193 #endif
194
195 /*
196 * Broadcast the event to the cpus, which are set in the mask (mangled).
197 */
198 static void tick_do_broadcast(struct cpumask *mask)
199 {
200 int cpu = smp_processor_id();
201 struct tick_device *td;
202
203 /*
204 * Check, if the current cpu is in the mask
205 */
206 if (cpumask_test_cpu(cpu, mask)) {
207 cpumask_clear_cpu(cpu, mask);
208 td = &per_cpu(tick_cpu_device, cpu);
209 td->evtdev->event_handler(td->evtdev);
210 }
211
212 if (!cpumask_empty(mask)) {
213 /*
214 * It might be necessary to actually check whether the devices
215 * have different broadcast functions. For now, just use the
216 * one of the first device. This works as long as we have this
217 * misfeature only on x86 (lapic)
218 */
219 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
220 td->evtdev->broadcast(mask);
221 }
222 }
223
224 /*
225 * Periodic broadcast:
226 * - invoke the broadcast handlers
227 */
228 static void tick_do_periodic_broadcast(void)
229 {
230 raw_spin_lock(&tick_broadcast_lock);
231
232 cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
233 tick_do_broadcast(tmpmask);
234
235 raw_spin_unlock(&tick_broadcast_lock);
236 }
237
238 /*
239 * Event handler for periodic broadcast ticks
240 */
241 static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
242 {
243 ktime_t next;
244
245 tick_do_periodic_broadcast();
246
247 /*
248 * The device is in periodic mode. No reprogramming necessary:
249 */
250 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
251 return;
252
253 /*
254 * Setup the next period for devices, which do not have
255 * periodic mode. We read dev->next_event first and add to it
256 * when the event already expired. clockevents_program_event()
257 * sets dev->next_event only when the event is really
258 * programmed to the device.
259 */
260 for (next = dev->next_event; ;) {
261 next = ktime_add(next, tick_period);
262
263 if (!clockevents_program_event(dev, next, false))
264 return;
265 tick_do_periodic_broadcast();
266 }
267 }
268
269 /*
270 * Powerstate information: The system enters/leaves a state, where
271 * affected devices might stop
272 */
273 static void tick_do_broadcast_on_off(unsigned long *reason)
274 {
275 struct clock_event_device *bc, *dev;
276 struct tick_device *td;
277 unsigned long flags;
278 int cpu, bc_stopped;
279
280 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
281
282 cpu = smp_processor_id();
283 td = &per_cpu(tick_cpu_device, cpu);
284 dev = td->evtdev;
285 bc = tick_broadcast_device.evtdev;
286
287 /*
288 * Is the device not affected by the powerstate ?
289 */
290 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
291 goto out;
292
293 if (!tick_device_is_functional(dev))
294 goto out;
295
296 bc_stopped = cpumask_empty(tick_broadcast_mask);
297
298 switch (*reason) {
299 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
300 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
301 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
302 if (tick_broadcast_device.mode ==
303 TICKDEV_MODE_PERIODIC)
304 clockevents_shutdown(dev);
305 }
306 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
307 tick_broadcast_force = 1;
308 break;
309 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
310 if (!tick_broadcast_force &&
311 cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
312 if (tick_broadcast_device.mode ==
313 TICKDEV_MODE_PERIODIC)
314 tick_setup_periodic(dev, 0);
315 }
316 break;
317 }
318
319 if (cpumask_empty(tick_broadcast_mask)) {
320 if (!bc_stopped)
321 clockevents_shutdown(bc);
322 } else if (bc_stopped) {
323 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
324 tick_broadcast_start_periodic(bc);
325 else
326 tick_broadcast_setup_oneshot(bc);
327 }
328 out:
329 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
330 }
331
332 /*
333 * Powerstate information: The system enters/leaves a state, where
334 * affected devices might stop.
335 */
336 void tick_broadcast_on_off(unsigned long reason, int *oncpu)
337 {
338 if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
339 printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
340 "offline CPU #%d\n", *oncpu);
341 else
342 tick_do_broadcast_on_off(&reason);
343 }
344
345 /*
346 * Set the periodic handler depending on broadcast on/off
347 */
348 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
349 {
350 if (!broadcast)
351 dev->event_handler = tick_handle_periodic;
352 else
353 dev->event_handler = tick_handle_periodic_broadcast;
354 }
355
356 /*
357 * Remove a CPU from broadcasting
358 */
359 void tick_shutdown_broadcast(unsigned int *cpup)
360 {
361 struct clock_event_device *bc;
362 unsigned long flags;
363 unsigned int cpu = *cpup;
364
365 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
366
367 bc = tick_broadcast_device.evtdev;
368 cpumask_clear_cpu(cpu, tick_broadcast_mask);
369
370 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
371 if (bc && cpumask_empty(tick_broadcast_mask))
372 clockevents_shutdown(bc);
373 }
374
375 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
376 }
377
378 void tick_suspend_broadcast(void)
379 {
380 struct clock_event_device *bc;
381 unsigned long flags;
382
383 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
384
385 bc = tick_broadcast_device.evtdev;
386 if (bc)
387 clockevents_shutdown(bc);
388
389 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
390 }
391
392 int tick_resume_broadcast(void)
393 {
394 struct clock_event_device *bc;
395 unsigned long flags;
396 int broadcast = 0;
397
398 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
399
400 bc = tick_broadcast_device.evtdev;
401
402 if (bc) {
403 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
404
405 switch (tick_broadcast_device.mode) {
406 case TICKDEV_MODE_PERIODIC:
407 if (!cpumask_empty(tick_broadcast_mask))
408 tick_broadcast_start_periodic(bc);
409 broadcast = cpumask_test_cpu(smp_processor_id(),
410 tick_broadcast_mask);
411 break;
412 case TICKDEV_MODE_ONESHOT:
413 if (!cpumask_empty(tick_broadcast_mask))
414 broadcast = tick_resume_broadcast_oneshot(bc);
415 break;
416 }
417 }
418 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
419
420 return broadcast;
421 }
422
423
424 #ifdef CONFIG_TICK_ONESHOT
425
426 static cpumask_var_t tick_broadcast_oneshot_mask;
427 static cpumask_var_t tick_broadcast_pending_mask;
428 static cpumask_var_t tick_broadcast_force_mask;
429
430 /*
431 * Exposed for debugging: see timer_list.c
432 */
433 struct cpumask *tick_get_broadcast_oneshot_mask(void)
434 {
435 return tick_broadcast_oneshot_mask;
436 }
437
438 /*
439 * Called before going idle with interrupts disabled. Checks whether a
440 * broadcast event from the other core is about to happen. We detected
441 * that in tick_broadcast_oneshot_control(). The callsite can use this
442 * to avoid a deep idle transition as we are about to get the
443 * broadcast IPI right away.
444 */
445 int tick_check_broadcast_expired(void)
446 {
447 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
448 }
449
450 /*
451 * Set broadcast interrupt affinity
452 */
453 static void tick_broadcast_set_affinity(struct clock_event_device *bc,
454 const struct cpumask *cpumask)
455 {
456 if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
457 return;
458
459 if (cpumask_equal(bc->cpumask, cpumask))
460 return;
461
462 bc->cpumask = cpumask;
463 irq_set_affinity(bc->irq, bc->cpumask);
464 }
465
466 static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
467 ktime_t expires, int force)
468 {
469 int ret;
470
471 if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
472 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
473
474 ret = clockevents_program_event(bc, expires, force);
475 if (!ret)
476 tick_broadcast_set_affinity(bc, cpumask_of(cpu));
477 return ret;
478 }
479
480 int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
481 {
482 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
483 return 0;
484 }
485
486 /*
487 * Called from irq_enter() when idle was interrupted to reenable the
488 * per cpu device.
489 */
490 void tick_check_oneshot_broadcast(int cpu)
491 {
492 if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
493 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
494
495 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
496 }
497 }
498
499 /*
500 * Handle oneshot mode broadcasting
501 */
502 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
503 {
504 struct tick_device *td;
505 ktime_t now, next_event;
506 int cpu, next_cpu = 0;
507
508 raw_spin_lock(&tick_broadcast_lock);
509 again:
510 dev->next_event.tv64 = KTIME_MAX;
511 next_event.tv64 = KTIME_MAX;
512 cpumask_clear(tmpmask);
513 now = ktime_get();
514 /* Find all expired events */
515 for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
516 td = &per_cpu(tick_cpu_device, cpu);
517 if (td->evtdev->next_event.tv64 <= now.tv64) {
518 cpumask_set_cpu(cpu, tmpmask);
519 /*
520 * Mark the remote cpu in the pending mask, so
521 * it can avoid reprogramming the cpu local
522 * timer in tick_broadcast_oneshot_control().
523 */
524 cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
525 } else if (td->evtdev->next_event.tv64 < next_event.tv64) {
526 next_event.tv64 = td->evtdev->next_event.tv64;
527 next_cpu = cpu;
528 }
529 }
530
531 /* Take care of enforced broadcast requests */
532 cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
533 cpumask_clear(tick_broadcast_force_mask);
534
535 /*
536 * Wakeup the cpus which have an expired event.
537 */
538 tick_do_broadcast(tmpmask);
539
540 /*
541 * Two reasons for reprogram:
542 *
543 * - The global event did not expire any CPU local
544 * events. This happens in dyntick mode, as the maximum PIT
545 * delta is quite small.
546 *
547 * - There are pending events on sleeping CPUs which were not
548 * in the event mask
549 */
550 if (next_event.tv64 != KTIME_MAX) {
551 /*
552 * Rearm the broadcast device. If event expired,
553 * repeat the above
554 */
555 if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
556 goto again;
557 }
558 raw_spin_unlock(&tick_broadcast_lock);
559 }
560
561 /*
562 * Powerstate information: The system enters/leaves a state, where
563 * affected devices might stop
564 */
565 void tick_broadcast_oneshot_control(unsigned long reason)
566 {
567 struct clock_event_device *bc, *dev;
568 struct tick_device *td;
569 unsigned long flags;
570 ktime_t now;
571 int cpu;
572
573 /*
574 * Periodic mode does not care about the enter/exit of power
575 * states
576 */
577 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
578 return;
579
580 /*
581 * We are called with preemtion disabled from the depth of the
582 * idle code, so we can't be moved away.
583 */
584 cpu = smp_processor_id();
585 td = &per_cpu(tick_cpu_device, cpu);
586 dev = td->evtdev;
587
588 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
589 return;
590
591 bc = tick_broadcast_device.evtdev;
592
593 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
594 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
595 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
596 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
597 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
598 /*
599 * We only reprogram the broadcast timer if we
600 * did not mark ourself in the force mask and
601 * if the cpu local event is earlier than the
602 * broadcast event. If the current CPU is in
603 * the force mask, then we are going to be
604 * woken by the IPI right away.
605 */
606 if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
607 dev->next_event.tv64 < bc->next_event.tv64)
608 tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
609 }
610 } else {
611 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
612 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
613 if (dev->next_event.tv64 == KTIME_MAX)
614 goto out;
615 /*
616 * The cpu which was handling the broadcast
617 * timer marked this cpu in the broadcast
618 * pending mask and fired the broadcast
619 * IPI. So we are going to handle the expired
620 * event anyway via the broadcast IPI
621 * handler. No need to reprogram the timer
622 * with an already expired event.
623 */
624 if (cpumask_test_and_clear_cpu(cpu,
625 tick_broadcast_pending_mask))
626 goto out;
627
628 /*
629 * If the pending bit is not set, then we are
630 * either the CPU handling the broadcast
631 * interrupt or we got woken by something else.
632 *
633 * We are not longer in the broadcast mask, so
634 * if the cpu local expiry time is already
635 * reached, we would reprogram the cpu local
636 * timer with an already expired event.
637 *
638 * This can lead to a ping-pong when we return
639 * to idle and therefor rearm the broadcast
640 * timer before the cpu local timer was able
641 * to fire. This happens because the forced
642 * reprogramming makes sure that the event
643 * will happen in the future and depending on
644 * the min_delta setting this might be far
645 * enough out that the ping-pong starts.
646 *
647 * If the cpu local next_event has expired
648 * then we know that the broadcast timer
649 * next_event has expired as well and
650 * broadcast is about to be handled. So we
651 * avoid reprogramming and enforce that the
652 * broadcast handler, which did not run yet,
653 * will invoke the cpu local handler.
654 *
655 * We cannot call the handler directly from
656 * here, because we might be in a NOHZ phase
657 * and we did not go through the irq_enter()
658 * nohz fixups.
659 */
660 now = ktime_get();
661 if (dev->next_event.tv64 <= now.tv64) {
662 cpumask_set_cpu(cpu, tick_broadcast_force_mask);
663 goto out;
664 }
665 /*
666 * We got woken by something else. Reprogram
667 * the cpu local timer device.
668 */
669 tick_program_event(dev->next_event, 1);
670 }
671 }
672 out:
673 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
674 }
675
676 /*
677 * Reset the one shot broadcast for a cpu
678 *
679 * Called with tick_broadcast_lock held
680 */
681 static void tick_broadcast_clear_oneshot(int cpu)
682 {
683 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
684 }
685
686 static void tick_broadcast_init_next_event(struct cpumask *mask,
687 ktime_t expires)
688 {
689 struct tick_device *td;
690 int cpu;
691
692 for_each_cpu(cpu, mask) {
693 td = &per_cpu(tick_cpu_device, cpu);
694 if (td->evtdev)
695 td->evtdev->next_event = expires;
696 }
697 }
698
699 /**
700 * tick_broadcast_setup_oneshot - setup the broadcast device
701 */
702 void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
703 {
704 int cpu = smp_processor_id();
705
706 /* Set it up only once ! */
707 if (bc->event_handler != tick_handle_oneshot_broadcast) {
708 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
709
710 bc->event_handler = tick_handle_oneshot_broadcast;
711
712 /* Take the do_timer update */
713 if (!tick_nohz_full_cpu(cpu))
714 tick_do_timer_cpu = cpu;
715
716 /*
717 * We must be careful here. There might be other CPUs
718 * waiting for periodic broadcast. We need to set the
719 * oneshot_mask bits for those and program the
720 * broadcast device to fire.
721 */
722 cpumask_copy(tmpmask, tick_broadcast_mask);
723 cpumask_clear_cpu(cpu, tmpmask);
724 cpumask_or(tick_broadcast_oneshot_mask,
725 tick_broadcast_oneshot_mask, tmpmask);
726
727 if (was_periodic && !cpumask_empty(tmpmask)) {
728 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
729 tick_broadcast_init_next_event(tmpmask,
730 tick_next_period);
731 tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
732 } else
733 bc->next_event.tv64 = KTIME_MAX;
734 } else {
735 /*
736 * The first cpu which switches to oneshot mode sets
737 * the bit for all other cpus which are in the general
738 * (periodic) broadcast mask. So the bit is set and
739 * would prevent the first broadcast enter after this
740 * to program the bc device.
741 */
742 tick_broadcast_clear_oneshot(cpu);
743 }
744 }
745
746 /*
747 * Select oneshot operating mode for the broadcast device
748 */
749 void tick_broadcast_switch_to_oneshot(void)
750 {
751 struct clock_event_device *bc;
752 unsigned long flags;
753
754 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
755
756 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
757 bc = tick_broadcast_device.evtdev;
758 if (bc)
759 tick_broadcast_setup_oneshot(bc);
760
761 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
762 }
763
764
765 /*
766 * Remove a dead CPU from broadcasting
767 */
768 void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
769 {
770 unsigned long flags;
771 unsigned int cpu = *cpup;
772
773 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
774
775 /*
776 * Clear the broadcast mask flag for the dead cpu, but do not
777 * stop the broadcast device!
778 */
779 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
780
781 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
782 }
783
784 /*
785 * Check, whether the broadcast device is in one shot mode
786 */
787 int tick_broadcast_oneshot_active(void)
788 {
789 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
790 }
791
792 /*
793 * Check whether the broadcast device supports oneshot.
794 */
795 bool tick_broadcast_oneshot_available(void)
796 {
797 struct clock_event_device *bc = tick_broadcast_device.evtdev;
798
799 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
800 }
801
802 #endif
803
804 void __init tick_broadcast_init(void)
805 {
806 zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
807 zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
808 #ifdef CONFIG_TICK_ONESHOT
809 zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
810 zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
811 zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
812 #endif
813 }
This page took 0.061269 seconds and 4 git commands to generate.