#include <linux/tick.h>
#include <linux/seq_file.h>
#include <linux/err.h>
+#include <linux/debugobjects.h>
#include <asm/uaccess.h>
ktime_add(xtim, tomono);
}
-/*
- * Helper function to check, whether the timer is running the callback
- * function
- */
-static inline int hrtimer_callback_running(struct hrtimer *timer)
-{
- return timer->state & HRTIMER_STATE_CALLBACK;
-}
-
/*
* Functions and macros which are different for UP/SMP systems are kept in a
* single place
*/
u64 ktime_divns(const ktime_t kt, s64 div)
{
- u64 dclc, inc, dns;
+ u64 dclc;
int sft = 0;
- dclc = dns = ktime_to_ns(kt);
- inc = div;
+ dclc = ktime_to_ns(kt);
/* Make sure the divisor is less than 2^32: */
while (div >> 32) {
sft++;
return res;
}
+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
+
+static struct debug_obj_descr hrtimer_debug_descr;
+
+/*
+ * fixup_init is called when:
+ * - an active object is initialized
+ */
+static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
+{
+ struct hrtimer *timer = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ hrtimer_cancel(timer);
+ debug_object_init(timer, &hrtimer_debug_descr);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * fixup_activate is called when:
+ * - an active object is activated
+ * - an unknown object is activated (might be a statically initialized object)
+ */
+static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
+{
+ switch (state) {
+
+ case ODEBUG_STATE_NOTAVAILABLE:
+ WARN_ON_ONCE(1);
+ return 0;
+
+ case ODEBUG_STATE_ACTIVE:
+ WARN_ON(1);
+
+ default:
+ return 0;
+ }
+}
+
+/*
+ * fixup_free is called when:
+ * - an active object is freed
+ */
+static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
+{
+ struct hrtimer *timer = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ hrtimer_cancel(timer);
+ debug_object_free(timer, &hrtimer_debug_descr);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static struct debug_obj_descr hrtimer_debug_descr = {
+ .name = "hrtimer",
+ .fixup_init = hrtimer_fixup_init,
+ .fixup_activate = hrtimer_fixup_activate,
+ .fixup_free = hrtimer_fixup_free,
+};
+
+static inline void debug_hrtimer_init(struct hrtimer *timer)
+{
+ debug_object_init(timer, &hrtimer_debug_descr);
+}
+
+static inline void debug_hrtimer_activate(struct hrtimer *timer)
+{
+ debug_object_activate(timer, &hrtimer_debug_descr);
+}
+
+static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
+{
+ debug_object_deactivate(timer, &hrtimer_debug_descr);
+}
+
+static inline void debug_hrtimer_free(struct hrtimer *timer)
+{
+ debug_object_free(timer, &hrtimer_debug_descr);
+}
+
+static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+ enum hrtimer_mode mode);
+
+void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
+ enum hrtimer_mode mode)
+{
+ debug_object_init_on_stack(timer, &hrtimer_debug_descr);
+ __hrtimer_init(timer, clock_id, mode);
+}
+
+void destroy_hrtimer_on_stack(struct hrtimer *timer)
+{
+ debug_object_free(timer, &hrtimer_debug_descr);
+}
+
+#else
+static inline void debug_hrtimer_init(struct hrtimer *timer) { }
+static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
+static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
+#endif
+
/*
* Check, whether the timer is on the callback pending list
*/
void clock_was_set(void)
{
/* Retrigger the CPU local events everywhere */
- on_each_cpu(retrigger_next_event, NULL, 0, 1);
+ on_each_cpu(retrigger_next_event, NULL, 1);
}
/*
*/
void hres_timers_resume(void)
{
- WARN_ON_ONCE(num_online_cpus() > 1);
-
/* Retrigger the CPU local events: */
retrigger_next_event(NULL);
}
/* Timer is expired, act upon the callback mode */
switch(timer->cb_mode) {
case HRTIMER_CB_IRQSAFE_NO_RESTART:
+ debug_hrtimer_deactivate(timer);
/*
* We can call the callback from here. No restart
* happens, so no danger of recursion
*/
BUG_ON(timer->function(timer) != HRTIMER_NORESTART);
return 1;
- case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ:
+ case HRTIMER_CB_IRQSAFE_PERCPU:
+ case HRTIMER_CB_IRQSAFE_UNLOCKED:
/*
* This is solely for the sched tick emulation with
* dynamic tick support to ensure that we do not
* restart the tick right on the edge and end up with
* the tick timer in the softirq ! The calling site
- * takes care of this.
+ * takes care of this. Also used for hrtimer sleeper !
*/
+ debug_hrtimer_deactivate(timer);
return 1;
case HRTIMER_CB_IRQSAFE:
case HRTIMER_CB_SOFTIRQ:
list_add_tail(&timer->cb_entry,
&base->cpu_base->cb_pending);
timer->state = HRTIMER_STATE_PENDING;
- raise_softirq(HRTIMER_SOFTIRQ);
return 1;
default:
BUG();
return 1;
}
+static inline void hrtimer_raise_softirq(void)
+{
+ raise_softirq(HRTIMER_SOFTIRQ);
+}
+
#else
static inline int hrtimer_hres_active(void) { return 0; }
{
return 0;
}
+static inline void hrtimer_raise_softirq(void) { }
#endif /* CONFIG_HIGH_RES_TIMERS */
struct hrtimer *entry;
int leftmost = 1;
+ debug_hrtimer_activate(timer);
+
/*
* Find the right place in the rbtree:
*/
* reprogramming happens in the interrupt handler. This is a
* rare case and less expensive than a smp call.
*/
+ debug_hrtimer_deactivate(timer);
timer_stats_hrtimer_clear_start_info(timer);
reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
{
struct hrtimer_clock_base *base, *new_base;
unsigned long flags;
- int ret;
+ int ret, raise;
base = lock_hrtimer_base(timer, &flags);
tim = ktime_add_safe(tim, base->resolution);
#endif
}
+
timer->expires = tim;
timer_stats_hrtimer_set_start_info(timer);
enqueue_hrtimer(timer, new_base,
new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
+ /*
+ * The timer may be expired and moved to the cb_pending
+ * list. We can not raise the softirq with base lock held due
+ * to a possible deadlock with runqueue lock.
+ */
+ raise = timer->state == HRTIMER_STATE_PENDING;
+
+ /*
+ * We use preempt_disable to prevent this task from migrating after
+ * setting up the softirq and raising it. Otherwise, if me migrate
+ * we will raise the softirq on the wrong CPU.
+ */
+ preempt_disable();
+
unlock_hrtimer_base(timer, &flags);
+ if (raise)
+ hrtimer_raise_softirq();
+ preempt_enable();
+
return ret;
}
EXPORT_SYMBOL_GPL(hrtimer_start);
}
EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
-#if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
+#ifdef CONFIG_NO_HZ
/**
* hrtimer_get_next_event - get the time until next expiry event
*
}
#endif
-/**
- * hrtimer_init - initialize a timer to the given clock
- * @timer: the timer to be initialized
- * @clock_id: the clock to be used
- * @mode: timer mode abs/rel
- */
-void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
- enum hrtimer_mode mode)
+static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+ enum hrtimer_mode mode)
{
struct hrtimer_cpu_base *cpu_base;
memset(timer->start_comm, 0, TASK_COMM_LEN);
#endif
}
+
+/**
+ * hrtimer_init - initialize a timer to the given clock
+ * @timer: the timer to be initialized
+ * @clock_id: the clock to be used
+ * @mode: timer mode abs/rel
+ */
+void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+ enum hrtimer_mode mode)
+{
+ debug_hrtimer_init(timer);
+ __hrtimer_init(timer, clock_id, mode);
+}
EXPORT_SYMBOL_GPL(hrtimer_init);
/**
timer = list_entry(cpu_base->cb_pending.next,
struct hrtimer, cb_entry);
+ debug_hrtimer_deactivate(timer);
timer_stats_account_hrtimer(timer);
fn = timer->function;
* If the timer was rearmed on another CPU, reprogram
* the event device.
*/
- if (timer->base->first == &timer->node)
- hrtimer_reprogram(timer, timer->base);
+ struct hrtimer_clock_base *base = timer->base;
+
+ if (base->first == &timer->node &&
+ hrtimer_reprogram(timer, base)) {
+ /*
+ * Timer is expired. Thus move it from tree to
+ * pending list again.
+ */
+ __remove_hrtimer(timer, base,
+ HRTIMER_STATE_PENDING, 0);
+ list_add_tail(&timer->cb_entry,
+ &base->cpu_base->cb_pending);
+ }
}
}
spin_unlock_irq(&cpu_base->lock);
enum hrtimer_restart (*fn)(struct hrtimer *);
int restart;
+ debug_hrtimer_deactivate(timer);
__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
timer_stats_account_hrtimer(timer);
fn = timer->function;
- if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) {
+ if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
+ timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) {
/*
* Used for scheduler timers, avoid lock inversion with
* rq->lock and tasklist_lock.
sl->timer.function = hrtimer_wakeup;
sl->task = task;
#ifdef CONFIG_HIGH_RES_TIMERS
- sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
+ sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
#endif
}
{
struct hrtimer_sleeper t;
struct timespec __user *rmtp;
+ int ret = 0;
- hrtimer_init(&t.timer, restart->nanosleep.index, HRTIMER_MODE_ABS);
+ hrtimer_init_on_stack(&t.timer, restart->nanosleep.index,
+ HRTIMER_MODE_ABS);
t.timer.expires.tv64 = restart->nanosleep.expires;
if (do_nanosleep(&t, HRTIMER_MODE_ABS))
- return 0;
+ goto out;
rmtp = restart->nanosleep.rmtp;
if (rmtp) {
- int ret = update_rmtp(&t.timer, rmtp);
+ ret = update_rmtp(&t.timer, rmtp);
if (ret <= 0)
- return ret;
+ goto out;
}
/* The other values in restart are already filled in */
- return -ERESTART_RESTARTBLOCK;
+ ret = -ERESTART_RESTARTBLOCK;
+out:
+ destroy_hrtimer_on_stack(&t.timer);
+ return ret;
}
long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
{
struct restart_block *restart;
struct hrtimer_sleeper t;
+ int ret = 0;
- hrtimer_init(&t.timer, clockid, mode);
+ hrtimer_init_on_stack(&t.timer, clockid, mode);
t.timer.expires = timespec_to_ktime(*rqtp);
if (do_nanosleep(&t, mode))
- return 0;
+ goto out;
/* Absolute timers do not update the rmtp value and restart: */
- if (mode == HRTIMER_MODE_ABS)
- return -ERESTARTNOHAND;
+ if (mode == HRTIMER_MODE_ABS) {
+ ret = -ERESTARTNOHAND;
+ goto out;
+ }
if (rmtp) {
- int ret = update_rmtp(&t.timer, rmtp);
+ ret = update_rmtp(&t.timer, rmtp);
if (ret <= 0)
- return ret;
+ goto out;
}
restart = ¤t_thread_info()->restart_block;
restart->nanosleep.rmtp = rmtp;
restart->nanosleep.expires = t.timer.expires.tv64;
- return -ERESTART_RESTARTBLOCK;
+ ret = -ERESTART_RESTARTBLOCK;
+out:
+ destroy_hrtimer_on_stack(&t.timer);
+ return ret;
}
asmlinkage long
#ifdef CONFIG_HOTPLUG_CPU
-static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
- struct hrtimer_clock_base *new_base)
+static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
+ struct hrtimer_clock_base *new_base, int dcpu)
{
struct hrtimer *timer;
struct rb_node *node;
+ int raise = 0;
while ((node = rb_first(&old_base->active))) {
timer = rb_entry(node, struct hrtimer, node);
BUG_ON(hrtimer_callback_running(timer));
- __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0);
+ debug_hrtimer_deactivate(timer);
+
+ /*
+ * Should not happen. Per CPU timers should be
+ * canceled _before_ the migration code is called
+ */
+ if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
+ __remove_hrtimer(timer, old_base,
+ HRTIMER_STATE_INACTIVE, 0);
+ WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
+ timer, timer->function, dcpu);
+ continue;
+ }
+
+ /*
+ * Mark it as STATE_MIGRATE not INACTIVE otherwise the
+ * timer could be seen as !active and just vanish away
+ * under us on another CPU
+ */
+ __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
timer->base = new_base;
/*
* Enqueue the timer. Allow reprogramming of the event device
*/
enqueue_hrtimer(timer, new_base, 1);
+
+#ifdef CONFIG_HIGH_RES_TIMERS
+ /*
+ * Happens with high res enabled when the timer was
+ * already expired and the callback mode is
+ * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
+ * enqueue code does not move them to the soft irq
+ * pending list for performance/latency reasons, but
+ * in the migration state, we need to do that
+ * otherwise we end up with a stale timer.
+ */
+ if (timer->state == HRTIMER_STATE_MIGRATE) {
+ timer->state = HRTIMER_STATE_PENDING;
+ list_add_tail(&timer->cb_entry,
+ &new_base->cpu_base->cb_pending);
+ raise = 1;
+ }
+#endif
+ /* Clear the migration state bit */
+ timer->state &= ~HRTIMER_STATE_MIGRATE;
}
+ return raise;
}
+#ifdef CONFIG_HIGH_RES_TIMERS
+static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
+ struct hrtimer_cpu_base *new_base)
+{
+ struct hrtimer *timer;
+ int raise = 0;
+
+ while (!list_empty(&old_base->cb_pending)) {
+ timer = list_entry(old_base->cb_pending.next,
+ struct hrtimer, cb_entry);
+
+ __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0);
+ timer->base = &new_base->clock_base[timer->base->index];
+ list_add_tail(&timer->cb_entry, &new_base->cb_pending);
+ raise = 1;
+ }
+ return raise;
+}
+#else
+static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
+ struct hrtimer_cpu_base *new_base)
+{
+ return 0;
+}
+#endif
+
static void migrate_hrtimers(int cpu)
{
struct hrtimer_cpu_base *old_base, *new_base;
- int i;
+ int i, raise = 0;
BUG_ON(cpu_online(cpu));
old_base = &per_cpu(hrtimer_bases, cpu);
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
- migrate_hrtimer_list(&old_base->clock_base[i],
- &new_base->clock_base[i]);
+ if (migrate_hrtimer_list(&old_base->clock_base[i],
+ &new_base->clock_base[i], cpu))
+ raise = 1;
}
+ if (migrate_hrtimer_pending(old_base, new_base))
+ raise = 1;
+
spin_unlock(&old_base->lock);
spin_unlock(&new_base->lock);
local_irq_enable();
put_cpu_var(hrtimer_bases);
+
+ if (raise)
+ hrtimer_raise_softirq();
}
#endif /* CONFIG_HOTPLUG_CPU */
(void *)(long)smp_processor_id());
register_cpu_notifier(&hrtimers_nb);
#ifdef CONFIG_HIGH_RES_TIMERS
- open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq, NULL);
+ open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
#endif
}