2 * linux/kernel/time/tick-broadcast.c
4 * This file contains functions which emulate a local clock-event
5 * device via a broadcast event source.
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
22 #include "tick-internal.h"
25 * Broadcast support for broken x86 hardware, where the local apic
26 * timer stops in C3 state.
29 static struct tick_device tick_broadcast_device;
30 /* FIXME: Use cpumask_var_t. */
31 static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
32 static DECLARE_BITMAP(tmpmask, NR_CPUS);
33 static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
34 static int tick_broadcast_force;
36 #ifdef CONFIG_TICK_ONESHOT
37 static void tick_broadcast_clear_oneshot(int cpu);
39 static inline void tick_broadcast_clear_oneshot(int cpu) { }
43 * Debugging: see timer_list.c
45 struct tick_device *tick_get_broadcast_device(void)
47 return &tick_broadcast_device;
50 struct cpumask *tick_get_broadcast_mask(void)
52 return to_cpumask(tick_broadcast_mask);
56 * Start the device in periodic mode
58 static void tick_broadcast_start_periodic(struct clock_event_device *bc)
61 tick_setup_periodic(bc, 1);
65 * Check, if the device can be utilized as broadcast device:
67 int tick_check_broadcast_device(struct clock_event_device *dev)
69 if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
70 (tick_broadcast_device.evtdev &&
71 tick_broadcast_device.evtdev->rating >= dev->rating) ||
72 (dev->features & CLOCK_EVT_FEAT_C3STOP))
75 clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
76 tick_broadcast_device.evtdev = dev;
77 if (!cpumask_empty(tick_get_broadcast_mask()))
78 tick_broadcast_start_periodic(dev);
83 * Check, if the device is the broadcast device
85 int tick_is_broadcast_device(struct clock_event_device *dev)
87 return (dev && tick_broadcast_device.evtdev == dev);
91 * Check, if the device is disfunctional and a place holder, which
92 * needs to be handled by the broadcast device.
94 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
99 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
102 * Devices might be registered with both periodic and oneshot
103 * mode disabled. This signals, that the device needs to be
104 * operated from the broadcast device and is a placeholder for
105 * the cpu local device.
107 if (!tick_device_is_functional(dev)) {
108 dev->event_handler = tick_handle_periodic;
109 cpumask_set_cpu(cpu, tick_get_broadcast_mask());
110 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
114 * When the new device is not affected by the stop
115 * feature and the cpu is marked in the broadcast mask
116 * then clear the broadcast bit.
118 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
119 int cpu = smp_processor_id();
121 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
122 tick_broadcast_clear_oneshot(cpu);
125 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
130 * Broadcast the event to the cpus, which are set in the mask (mangled).
132 static void tick_do_broadcast(struct cpumask *mask)
134 int cpu = smp_processor_id();
135 struct tick_device *td;
138 * Check, if the current cpu is in the mask
140 if (cpumask_test_cpu(cpu, mask)) {
141 cpumask_clear_cpu(cpu, mask);
142 td = &per_cpu(tick_cpu_device, cpu);
143 td->evtdev->event_handler(td->evtdev);
146 if (!cpumask_empty(mask)) {
148 * It might be necessary to actually check whether the devices
149 * have different broadcast functions. For now, just use the
150 * one of the first device. This works as long as we have this
151 * misfeature only on x86 (lapic)
153 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
154 td->evtdev->broadcast(mask);
159 * Periodic broadcast:
160 * - invoke the broadcast handlers
162 static void tick_do_periodic_broadcast(void)
164 raw_spin_lock(&tick_broadcast_lock);
166 cpumask_and(to_cpumask(tmpmask),
167 cpu_online_mask, tick_get_broadcast_mask());
168 tick_do_broadcast(to_cpumask(tmpmask));
170 raw_spin_unlock(&tick_broadcast_lock);
174 * Event handler for periodic broadcast ticks
176 static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
180 tick_do_periodic_broadcast();
183 * The device is in periodic mode. No reprogramming necessary:
185 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
189 * Setup the next period for devices, which do not have
190 * periodic mode. We read dev->next_event first and add to it
191 * when the event already expired. clockevents_program_event()
192 * sets dev->next_event only when the event is really
193 * programmed to the device.
195 for (next = dev->next_event; ;) {
196 next = ktime_add(next, tick_period);
198 if (!clockevents_program_event(dev, next, false))
200 tick_do_periodic_broadcast();
205 * Powerstate information: The system enters/leaves a state, where
206 * affected devices might stop
208 static void tick_do_broadcast_on_off(unsigned long *reason)
210 struct clock_event_device *bc, *dev;
211 struct tick_device *td;
215 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
217 cpu = smp_processor_id();
218 td = &per_cpu(tick_cpu_device, cpu);
220 bc = tick_broadcast_device.evtdev;
223 * Is the device not affected by the powerstate ?
225 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
228 if (!tick_device_is_functional(dev))
231 bc_stopped = cpumask_empty(tick_get_broadcast_mask());
234 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
235 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
236 if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
237 cpumask_set_cpu(cpu, tick_get_broadcast_mask());
238 if (tick_broadcast_device.mode ==
239 TICKDEV_MODE_PERIODIC)
240 clockevents_shutdown(dev);
242 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
243 tick_broadcast_force = 1;
245 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
246 if (!tick_broadcast_force &&
247 cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
248 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
249 if (tick_broadcast_device.mode ==
250 TICKDEV_MODE_PERIODIC)
251 tick_setup_periodic(dev, 0);
256 if (cpumask_empty(tick_get_broadcast_mask())) {
258 clockevents_shutdown(bc);
259 } else if (bc_stopped) {
260 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
261 tick_broadcast_start_periodic(bc);
263 tick_broadcast_setup_oneshot(bc);
266 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
270 * Powerstate information: The system enters/leaves a state, where
271 * affected devices might stop.
273 void tick_broadcast_on_off(unsigned long reason, int *oncpu)
275 if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
276 printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
277 "offline CPU #%d\n", *oncpu);
279 tick_do_broadcast_on_off(&reason);
283 * Set the periodic handler depending on broadcast on/off
285 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
288 dev->event_handler = tick_handle_periodic;
290 dev->event_handler = tick_handle_periodic_broadcast;
294 * Remove a CPU from broadcasting
296 void tick_shutdown_broadcast(unsigned int *cpup)
298 struct clock_event_device *bc;
300 unsigned int cpu = *cpup;
302 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
304 bc = tick_broadcast_device.evtdev;
305 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
307 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
308 if (bc && cpumask_empty(tick_get_broadcast_mask()))
309 clockevents_shutdown(bc);
312 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
315 void tick_suspend_broadcast(void)
317 struct clock_event_device *bc;
320 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
322 bc = tick_broadcast_device.evtdev;
324 clockevents_shutdown(bc);
326 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
329 int tick_resume_broadcast(void)
331 struct clock_event_device *bc;
335 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
337 bc = tick_broadcast_device.evtdev;
340 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
342 switch (tick_broadcast_device.mode) {
343 case TICKDEV_MODE_PERIODIC:
344 if (!cpumask_empty(tick_get_broadcast_mask()))
345 tick_broadcast_start_periodic(bc);
346 broadcast = cpumask_test_cpu(smp_processor_id(),
347 tick_get_broadcast_mask());
349 case TICKDEV_MODE_ONESHOT:
350 broadcast = tick_resume_broadcast_oneshot(bc);
354 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
360 #ifdef CONFIG_TICK_ONESHOT
362 /* FIXME: use cpumask_var_t. */
363 static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS);
366 * Exposed for debugging: see timer_list.c
368 struct cpumask *tick_get_broadcast_oneshot_mask(void)
370 return to_cpumask(tick_broadcast_oneshot_mask);
373 static int tick_broadcast_set_event(ktime_t expires, int force)
375 struct clock_event_device *bc = tick_broadcast_device.evtdev;
377 return clockevents_program_event(bc, expires, force);
380 int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
382 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
387 * Called from irq_enter() when idle was interrupted to reenable the
390 void tick_check_oneshot_broadcast(int cpu)
392 if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) {
393 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
395 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
400 * Handle oneshot mode broadcasting
402 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
404 struct tick_device *td;
405 ktime_t now, next_event;
408 raw_spin_lock(&tick_broadcast_lock);
410 dev->next_event.tv64 = KTIME_MAX;
411 next_event.tv64 = KTIME_MAX;
412 cpumask_clear(to_cpumask(tmpmask));
414 /* Find all expired events */
415 for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) {
416 td = &per_cpu(tick_cpu_device, cpu);
417 if (td->evtdev->next_event.tv64 <= now.tv64)
418 cpumask_set_cpu(cpu, to_cpumask(tmpmask));
419 else if (td->evtdev->next_event.tv64 < next_event.tv64)
420 next_event.tv64 = td->evtdev->next_event.tv64;
424 * Wakeup the cpus which have an expired event.
426 tick_do_broadcast(to_cpumask(tmpmask));
429 * Two reasons for reprogram:
431 * - The global event did not expire any CPU local
432 * events. This happens in dyntick mode, as the maximum PIT
433 * delta is quite small.
435 * - There are pending events on sleeping CPUs which were not
438 if (next_event.tv64 != KTIME_MAX) {
440 * Rearm the broadcast device. If event expired,
443 if (tick_broadcast_set_event(next_event, 0))
446 raw_spin_unlock(&tick_broadcast_lock);
450 * Powerstate information: The system enters/leaves a state, where
451 * affected devices might stop
453 void tick_broadcast_oneshot_control(unsigned long reason)
455 struct clock_event_device *bc, *dev;
456 struct tick_device *td;
461 * Periodic mode does not care about the enter/exit of power
464 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
468 * We are called with preemtion disabled from the depth of the
469 * idle code, so we can't be moved away.
471 cpu = smp_processor_id();
472 td = &per_cpu(tick_cpu_device, cpu);
475 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
478 bc = tick_broadcast_device.evtdev;
480 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
481 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
482 if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
483 cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
484 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
485 if (dev->next_event.tv64 < bc->next_event.tv64)
486 tick_broadcast_set_event(dev->next_event, 1);
489 if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
490 cpumask_clear_cpu(cpu,
491 tick_get_broadcast_oneshot_mask());
492 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
493 if (dev->next_event.tv64 != KTIME_MAX)
494 tick_program_event(dev->next_event, 1);
497 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
501 * Reset the one shot broadcast for a cpu
503 * Called with tick_broadcast_lock held
505 static void tick_broadcast_clear_oneshot(int cpu)
507 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
510 static void tick_broadcast_init_next_event(struct cpumask *mask,
513 struct tick_device *td;
516 for_each_cpu(cpu, mask) {
517 td = &per_cpu(tick_cpu_device, cpu);
519 td->evtdev->next_event = expires;
524 * tick_broadcast_setup_oneshot - setup the broadcast device
526 void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
528 int cpu = smp_processor_id();
530 /* Set it up only once ! */
531 if (bc->event_handler != tick_handle_oneshot_broadcast) {
532 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
534 bc->event_handler = tick_handle_oneshot_broadcast;
535 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
537 /* Take the do_timer update */
538 tick_do_timer_cpu = cpu;
541 * We must be careful here. There might be other CPUs
542 * waiting for periodic broadcast. We need to set the
543 * oneshot_mask bits for those and program the
544 * broadcast device to fire.
546 cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask());
547 cpumask_clear_cpu(cpu, to_cpumask(tmpmask));
548 cpumask_or(tick_get_broadcast_oneshot_mask(),
549 tick_get_broadcast_oneshot_mask(),
550 to_cpumask(tmpmask));
552 if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
553 tick_broadcast_init_next_event(to_cpumask(tmpmask),
555 tick_broadcast_set_event(tick_next_period, 1);
557 bc->next_event.tv64 = KTIME_MAX;
560 * The first cpu which switches to oneshot mode sets
561 * the bit for all other cpus which are in the general
562 * (periodic) broadcast mask. So the bit is set and
563 * would prevent the first broadcast enter after this
564 * to program the bc device.
566 tick_broadcast_clear_oneshot(cpu);
571 * Select oneshot operating mode for the broadcast device
573 void tick_broadcast_switch_to_oneshot(void)
575 struct clock_event_device *bc;
578 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
580 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
581 bc = tick_broadcast_device.evtdev;
583 tick_broadcast_setup_oneshot(bc);
584 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
589 * Remove a dead CPU from broadcasting
591 void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
594 unsigned int cpu = *cpup;
596 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
599 * Clear the broadcast mask flag for the dead cpu, but do not
600 * stop the broadcast device!
602 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
604 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
608 * Check, whether the broadcast device is in one shot mode
610 int tick_broadcast_oneshot_active(void)
612 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
616 * Check whether the broadcast device supports oneshot.
618 bool tick_broadcast_oneshot_available(void)
620 struct clock_event_device *bc = tick_broadcast_device.evtdev;
622 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;