2 * linux/kernel/time/tick-broadcast.c
4 * This file contains functions which emulate a local clock-event
5 * device via a broadcast event source.
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
22 #include "tick-internal.h"
25 * Broadcast support for broken x86 hardware, where the local apic
26 * timer stops in C3 state.
29 static struct tick_device tick_broadcast_device;
30 /* FIXME: Use cpumask_var_t. */
31 static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
32 static DECLARE_BITMAP(tmpmask, NR_CPUS);
33 static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
34 static int tick_broadcast_force;
36 #ifdef CONFIG_TICK_ONESHOT
37 static void tick_broadcast_clear_oneshot(int cpu);
39 static inline void tick_broadcast_clear_oneshot(int cpu) { }
43 * Debugging: see timer_list.c
45 struct tick_device *tick_get_broadcast_device(void)
47 return &tick_broadcast_device;
50 struct cpumask *tick_get_broadcast_mask(void)
52 return to_cpumask(tick_broadcast_mask);
56 * Start the device in periodic mode
58 static void tick_broadcast_start_periodic(struct clock_event_device *bc)
61 tick_setup_periodic(bc, 1);
65 * Check, if the device can be utilized as broadcast device:
67 int tick_check_broadcast_device(struct clock_event_device *dev)
69 struct clock_event_device *cur = tick_broadcast_device.evtdev;
71 if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
72 (tick_broadcast_device.evtdev &&
73 tick_broadcast_device.evtdev->rating >= dev->rating) ||
74 (dev->features & CLOCK_EVT_FEAT_C3STOP))
77 clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
79 cur->event_handler = clockevents_handle_noop;
80 tick_broadcast_device.evtdev = dev;
81 if (!cpumask_empty(tick_get_broadcast_mask()))
82 tick_broadcast_start_periodic(dev);
87 * Check, if the device is the broadcast device
89 int tick_is_broadcast_device(struct clock_event_device *dev)
91 return (dev && tick_broadcast_device.evtdev == dev);
95 * Check, if the device is disfunctional and a place holder, which
96 * needs to be handled by the broadcast device.
98 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
103 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
106 * Devices might be registered with both periodic and oneshot
107 * mode disabled. This signals, that the device needs to be
108 * operated from the broadcast device and is a placeholder for
109 * the cpu local device.
111 if (!tick_device_is_functional(dev)) {
112 dev->event_handler = tick_handle_periodic;
113 cpumask_set_cpu(cpu, tick_get_broadcast_mask());
114 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
118 * When the new device is not affected by the stop
119 * feature and the cpu is marked in the broadcast mask
120 * then clear the broadcast bit.
122 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
123 int cpu = smp_processor_id();
125 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
126 tick_broadcast_clear_oneshot(cpu);
129 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
134 * Broadcast the event to the cpus, which are set in the mask (mangled).
136 static void tick_do_broadcast(struct cpumask *mask)
138 int cpu = smp_processor_id();
139 struct tick_device *td;
142 * Check, if the current cpu is in the mask
144 if (cpumask_test_cpu(cpu, mask)) {
145 cpumask_clear_cpu(cpu, mask);
146 td = &per_cpu(tick_cpu_device, cpu);
147 td->evtdev->event_handler(td->evtdev);
150 if (!cpumask_empty(mask)) {
152 * It might be necessary to actually check whether the devices
153 * have different broadcast functions. For now, just use the
154 * one of the first device. This works as long as we have this
155 * misfeature only on x86 (lapic)
157 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
158 td->evtdev->broadcast(mask);
163 * Periodic broadcast:
164 * - invoke the broadcast handlers
166 static void tick_do_periodic_broadcast(void)
168 raw_spin_lock(&tick_broadcast_lock);
170 cpumask_and(to_cpumask(tmpmask),
171 cpu_online_mask, tick_get_broadcast_mask());
172 tick_do_broadcast(to_cpumask(tmpmask));
174 raw_spin_unlock(&tick_broadcast_lock);
178 * Event handler for periodic broadcast ticks
180 static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
184 tick_do_periodic_broadcast();
187 * The device is in periodic mode. No reprogramming necessary:
189 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
193 * Setup the next period for devices, which do not have
194 * periodic mode. We read dev->next_event first and add to it
195 * when the event already expired. clockevents_program_event()
196 * sets dev->next_event only when the event is really
197 * programmed to the device.
199 for (next = dev->next_event; ;) {
200 next = ktime_add(next, tick_period);
202 if (!clockevents_program_event(dev, next, false))
204 tick_do_periodic_broadcast();
209 * Powerstate information: The system enters/leaves a state, where
210 * affected devices might stop
212 static void tick_do_broadcast_on_off(unsigned long *reason)
214 struct clock_event_device *bc, *dev;
215 struct tick_device *td;
219 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
221 cpu = smp_processor_id();
222 td = &per_cpu(tick_cpu_device, cpu);
224 bc = tick_broadcast_device.evtdev;
227 * Is the device not affected by the powerstate ?
229 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
232 if (!tick_device_is_functional(dev))
235 bc_stopped = cpumask_empty(tick_get_broadcast_mask());
238 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
239 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
240 if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
241 cpumask_set_cpu(cpu, tick_get_broadcast_mask());
242 if (tick_broadcast_device.mode ==
243 TICKDEV_MODE_PERIODIC)
244 clockevents_shutdown(dev);
246 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
247 tick_broadcast_force = 1;
249 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
250 if (!tick_broadcast_force &&
251 cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
252 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
253 if (tick_broadcast_device.mode ==
254 TICKDEV_MODE_PERIODIC)
255 tick_setup_periodic(dev, 0);
260 if (cpumask_empty(tick_get_broadcast_mask())) {
262 clockevents_shutdown(bc);
263 } else if (bc_stopped) {
264 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
265 tick_broadcast_start_periodic(bc);
267 tick_broadcast_setup_oneshot(bc);
270 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
274 * Powerstate information: The system enters/leaves a state, where
275 * affected devices might stop.
277 void tick_broadcast_on_off(unsigned long reason, int *oncpu)
279 if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
280 printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
281 "offline CPU #%d\n", *oncpu);
283 tick_do_broadcast_on_off(&reason);
287 * Set the periodic handler depending on broadcast on/off
289 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
292 dev->event_handler = tick_handle_periodic;
294 dev->event_handler = tick_handle_periodic_broadcast;
298 * Remove a CPU from broadcasting
300 void tick_shutdown_broadcast(unsigned int *cpup)
302 struct clock_event_device *bc;
304 unsigned int cpu = *cpup;
306 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
308 bc = tick_broadcast_device.evtdev;
309 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
311 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
312 if (bc && cpumask_empty(tick_get_broadcast_mask()))
313 clockevents_shutdown(bc);
316 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
319 void tick_suspend_broadcast(void)
321 struct clock_event_device *bc;
324 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
326 bc = tick_broadcast_device.evtdev;
328 clockevents_shutdown(bc);
330 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
333 int tick_resume_broadcast(void)
335 struct clock_event_device *bc;
339 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
341 bc = tick_broadcast_device.evtdev;
344 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
346 switch (tick_broadcast_device.mode) {
347 case TICKDEV_MODE_PERIODIC:
348 if (!cpumask_empty(tick_get_broadcast_mask()))
349 tick_broadcast_start_periodic(bc);
350 broadcast = cpumask_test_cpu(smp_processor_id(),
351 tick_get_broadcast_mask());
353 case TICKDEV_MODE_ONESHOT:
354 broadcast = tick_resume_broadcast_oneshot(bc);
358 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
364 #ifdef CONFIG_TICK_ONESHOT
366 /* FIXME: use cpumask_var_t. */
367 static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS);
370 * Exposed for debugging: see timer_list.c
372 struct cpumask *tick_get_broadcast_oneshot_mask(void)
374 return to_cpumask(tick_broadcast_oneshot_mask);
377 static int tick_broadcast_set_event(ktime_t expires, int force)
379 struct clock_event_device *bc = tick_broadcast_device.evtdev;
381 return clockevents_program_event(bc, expires, force);
384 int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
386 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
391 * Called from irq_enter() when idle was interrupted to reenable the
394 void tick_check_oneshot_broadcast(int cpu)
396 if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) {
397 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
399 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
404 * Handle oneshot mode broadcasting
406 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
408 struct tick_device *td;
409 ktime_t now, next_event;
412 raw_spin_lock(&tick_broadcast_lock);
414 dev->next_event.tv64 = KTIME_MAX;
415 next_event.tv64 = KTIME_MAX;
416 cpumask_clear(to_cpumask(tmpmask));
418 /* Find all expired events */
419 for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) {
420 td = &per_cpu(tick_cpu_device, cpu);
421 if (td->evtdev->next_event.tv64 <= now.tv64)
422 cpumask_set_cpu(cpu, to_cpumask(tmpmask));
423 else if (td->evtdev->next_event.tv64 < next_event.tv64)
424 next_event.tv64 = td->evtdev->next_event.tv64;
428 * Wakeup the cpus which have an expired event.
430 tick_do_broadcast(to_cpumask(tmpmask));
433 * Two reasons for reprogram:
435 * - The global event did not expire any CPU local
436 * events. This happens in dyntick mode, as the maximum PIT
437 * delta is quite small.
439 * - There are pending events on sleeping CPUs which were not
442 if (next_event.tv64 != KTIME_MAX) {
444 * Rearm the broadcast device. If event expired,
447 if (tick_broadcast_set_event(next_event, 0))
450 raw_spin_unlock(&tick_broadcast_lock);
454 * Powerstate information: The system enters/leaves a state, where
455 * affected devices might stop
457 void tick_broadcast_oneshot_control(unsigned long reason)
459 struct clock_event_device *bc, *dev;
460 struct tick_device *td;
465 * Periodic mode does not care about the enter/exit of power
468 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
472 * We are called with preemtion disabled from the depth of the
473 * idle code, so we can't be moved away.
475 cpu = smp_processor_id();
476 td = &per_cpu(tick_cpu_device, cpu);
479 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
482 bc = tick_broadcast_device.evtdev;
484 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
485 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
486 if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
487 cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
488 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
489 if (dev->next_event.tv64 < bc->next_event.tv64)
490 tick_broadcast_set_event(dev->next_event, 1);
493 if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
494 cpumask_clear_cpu(cpu,
495 tick_get_broadcast_oneshot_mask());
496 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
497 if (dev->next_event.tv64 != KTIME_MAX)
498 tick_program_event(dev->next_event, 1);
501 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
505 * Reset the one shot broadcast for a cpu
507 * Called with tick_broadcast_lock held
509 static void tick_broadcast_clear_oneshot(int cpu)
511 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
514 static void tick_broadcast_init_next_event(struct cpumask *mask,
517 struct tick_device *td;
520 for_each_cpu(cpu, mask) {
521 td = &per_cpu(tick_cpu_device, cpu);
523 td->evtdev->next_event = expires;
528 * tick_broadcast_setup_oneshot - setup the broadcast device
530 void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
532 int cpu = smp_processor_id();
534 /* Set it up only once ! */
535 if (bc->event_handler != tick_handle_oneshot_broadcast) {
536 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
538 bc->event_handler = tick_handle_oneshot_broadcast;
539 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
541 /* Take the do_timer update */
542 tick_do_timer_cpu = cpu;
545 * We must be careful here. There might be other CPUs
546 * waiting for periodic broadcast. We need to set the
547 * oneshot_mask bits for those and program the
548 * broadcast device to fire.
550 cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask());
551 cpumask_clear_cpu(cpu, to_cpumask(tmpmask));
552 cpumask_or(tick_get_broadcast_oneshot_mask(),
553 tick_get_broadcast_oneshot_mask(),
554 to_cpumask(tmpmask));
556 if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
557 tick_broadcast_init_next_event(to_cpumask(tmpmask),
559 tick_broadcast_set_event(tick_next_period, 1);
561 bc->next_event.tv64 = KTIME_MAX;
564 * The first cpu which switches to oneshot mode sets
565 * the bit for all other cpus which are in the general
566 * (periodic) broadcast mask. So the bit is set and
567 * would prevent the first broadcast enter after this
568 * to program the bc device.
570 tick_broadcast_clear_oneshot(cpu);
575 * Select oneshot operating mode for the broadcast device
577 void tick_broadcast_switch_to_oneshot(void)
579 struct clock_event_device *bc;
582 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
584 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
585 bc = tick_broadcast_device.evtdev;
587 tick_broadcast_setup_oneshot(bc);
588 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
593 * Remove a dead CPU from broadcasting
595 void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
598 unsigned int cpu = *cpup;
600 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
603 * Clear the broadcast mask flag for the dead cpu, but do not
604 * stop the broadcast device!
606 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
608 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
612 * Check, whether the broadcast device is in one shot mode
614 int tick_broadcast_oneshot_active(void)
616 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
620 * Check whether the broadcast device supports oneshot.
622 bool tick_broadcast_oneshot_available(void)
624 struct clock_event_device *bc = tick_broadcast_device.evtdev;
626 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;