* Credits:
* based on kernel/timer.c
*
+ * Help, testing, suggestions, bugfixes, improvements were
+ * provided by:
+ *
+ * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
+ * et. al.
+ *
* For licencing details see kernel-base/COPYING
*/
/*
* The timer bases:
+ *
+ * Note: If we want to add new timer bases, we have to skip the two
+ * clock ids captured by the cpu-timers. We do this by holding empty
+ * entries rather than doing math adjustment of the clock ids.
+ * This ensures that we capture erroneous accesses to these clock ids
+ * rather than moving them into the range of valid clock id's.
*/
#define MAX_HRTIMER_BASES 2
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
ts->tv_nsec + tomono.tv_nsec);
}
+EXPORT_SYMBOL_GPL(ktime_get_ts);
/*
* Functions and macros which are different for UP/SMP systems are kept in a
* @interval: the interval to forward
*
* Forward the timer expiry so it will expire in the future.
- * The number of overruns is added to the overrun field.
+ * Returns the number of overruns.
*/
unsigned long
-hrtimer_forward(struct hrtimer *timer, const ktime_t interval)
+hrtimer_forward(struct hrtimer *timer, ktime_t interval)
{
unsigned long orun = 1;
ktime_t delta, now;
if (delta.tv64 < 0)
return 0;
+ if (interval.tv64 < timer->base->resolution.tv64)
+ interval.tv64 = timer->base->resolution.tv64;
+
if (unlikely(delta.tv64 >= interval.tv64)) {
nsec_t incr = ktime_to_ns(interval);
static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
{
struct rb_node **link = &base->active.rb_node;
- struct list_head *prev = &base->pending;
struct rb_node *parent = NULL;
struct hrtimer *entry;
*/
if (timer->expires.tv64 < entry->expires.tv64)
link = &(*link)->rb_left;
- else {
+ else
link = &(*link)->rb_right;
- prev = &entry->list;
- }
}
/*
- * Insert the timer to the rbtree and to the sorted list:
+ * Insert the timer to the rbtree and check whether it
+ * replaces the first pending timer
*/
rb_link_node(&timer->node, parent, link);
rb_insert_color(&timer->node, &base->active);
- list_add(&timer->list, prev);
timer->state = HRTIMER_PENDING;
-}
+ if (!base->first || timer->expires.tv64 <
+ rb_entry(base->first, struct hrtimer, node)->expires.tv64)
+ base->first = &timer->node;
+}
/*
* __remove_hrtimer - internal function to remove a timer
static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
{
/*
- * Remove the timer from the sorted list and from the rbtree:
+ * Remove the timer from the rbtree and replace the
+ * first entry pointer if necessary.
*/
- list_del(&timer->list);
+ if (base->first == &timer->node)
+ base->first = rb_next(&timer->node);
rb_erase(&timer->node, &base->active);
}
/* Switch the timer base, if necessary: */
new_base = switch_hrtimer_base(timer, base);
- if (mode == HRTIMER_REL)
+ if (mode == HRTIMER_REL) {
tim = ktime_add(tim, new_base->get_time());
+ /*
+ * CONFIG_TIME_LOW_RES is a temporary way for architectures
+ * to signal that they simply return xtime in
+ * do_gettimeoffset(). In this case we want to round up by
+ * resolution when starting a relative timer, to avoid short
+ * timeouts. This will go away with the GTOD framework.
+ */
+#ifdef CONFIG_TIME_LOW_RES
+ tim = ktime_add(tim, base->resolution);
+#endif
+ }
timer->expires = tim;
enqueue_hrtimer(timer, new_base);
return rem;
}
+#ifdef CONFIG_NO_IDLE_HZ
/**
- * hrtimer_rebase - rebase an initialized hrtimer to a different base
+ * hrtimer_get_next_event - get the time until next expiry event
*
- * @timer: the timer to be rebased
- * @clock_id: the clock to be used
+ * Returns the delta to the next expiry event or KTIME_MAX if no timer
+ * is pending.
*/
-void hrtimer_rebase(struct hrtimer *timer, const clockid_t clock_id)
+ktime_t hrtimer_get_next_event(void)
{
- struct hrtimer_base *bases;
+ struct hrtimer_base *base = __get_cpu_var(hrtimer_bases);
+ ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
+ unsigned long flags;
+ int i;
- bases = per_cpu(hrtimer_bases, raw_smp_processor_id());
- timer->base = &bases[clock_id];
+ for (i = 0; i < MAX_HRTIMER_BASES; i++, base++) {
+ struct hrtimer *timer;
+
+ spin_lock_irqsave(&base->lock, flags);
+ if (!base->first) {
+ spin_unlock_irqrestore(&base->lock, flags);
+ continue;
+ }
+ timer = rb_entry(base->first, struct hrtimer, node);
+ delta.tv64 = timer->expires.tv64;
+ spin_unlock_irqrestore(&base->lock, flags);
+ delta = ktime_sub(delta, base->get_time());
+ if (delta.tv64 < mindelta.tv64)
+ mindelta.tv64 = delta.tv64;
+ }
+ if (mindelta.tv64 < 0)
+ mindelta.tv64 = 0;
+ return mindelta;
}
+#endif
/**
* hrtimer_init - initialize a timer to the given clock
*
* @timer: the timer to be initialized
* @clock_id: the clock to be used
+ * @mode: timer mode abs/rel
*/
-void hrtimer_init(struct hrtimer *timer, const clockid_t clock_id)
+void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+ enum hrtimer_mode mode)
{
+ struct hrtimer_base *bases;
+
memset(timer, 0, sizeof(struct hrtimer));
- hrtimer_rebase(timer, clock_id);
+
+ bases = per_cpu(hrtimer_bases, raw_smp_processor_id());
+
+ if (clock_id == CLOCK_REALTIME && mode != HRTIMER_ABS)
+ clock_id = CLOCK_MONOTONIC;
+
+ timer->base = &bases[clock_id];
}
/**
{
struct hrtimer_base *bases;
- tp->tv_sec = 0;
bases = per_cpu(hrtimer_bases, raw_smp_processor_id());
- tp->tv_nsec = bases[which_clock].resolution;
+ *tp = ktime_to_timespec(bases[which_clock].resolution);
return 0;
}
static inline void run_hrtimer_queue(struct hrtimer_base *base)
{
ktime_t now = base->get_time();
+ struct rb_node *node;
spin_lock_irq(&base->lock);
- while (!list_empty(&base->pending)) {
+ while ((node = base->first)) {
struct hrtimer *timer;
int (*fn)(void *);
int restart;
void *data;
- timer = list_entry(base->pending.next, struct hrtimer, list);
+ timer = rb_entry(node, struct hrtimer, node);
if (now.tv64 <= timer->expires.tv64)
break;
fn = timer->function;
data = timer->data;
set_curr_timer(base, timer);
+ timer->state = HRTIMER_RUNNING;
__remove_hrtimer(timer, base);
spin_unlock_irq(&base->lock);
spin_lock_irq(&base->lock);
+ /* Another CPU has added back the timer */
+ if (timer->state != HRTIMER_RUNNING)
+ continue;
+
if (restart == HRTIMER_RESTART)
enqueue_hrtimer(timer, base);
else
run_hrtimer_queue(&base[i]);
}
+/*
+ * Sleep related functions:
+ */
+
+/**
+ * schedule_hrtimer - sleep until timeout
+ *
+ * @timer: hrtimer variable initialized with the correct clock base
+ * @mode: timeout value is abs/rel
+ *
+ * Make the current task sleep until @timeout is
+ * elapsed.
+ *
+ * You can set the task state as follows -
+ *
+ * %TASK_UNINTERRUPTIBLE - at least @timeout is guaranteed to
+ * pass before the routine returns. The routine will return 0
+ *
+ * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
+ * delivered to the current task. In this case the remaining time
+ * will be returned
+ *
+ * The current task state is guaranteed to be TASK_RUNNING when this
+ * routine returns.
+ */
+static ktime_t __sched
+schedule_hrtimer(struct hrtimer *timer, const enum hrtimer_mode mode)
+{
+ /* fn stays NULL, meaning single-shot wakeup: */
+ timer->data = current;
+
+ hrtimer_start(timer, timer->expires, mode);
+
+ schedule();
+ hrtimer_cancel(timer);
+
+ /* Return the remaining time: */
+ if (timer->state != HRTIMER_EXPIRED)
+ return ktime_sub(timer->expires, timer->base->get_time());
+ else
+ return (ktime_t) {.tv64 = 0 };
+}
+
+static inline ktime_t __sched
+schedule_hrtimer_interruptible(struct hrtimer *timer,
+ const enum hrtimer_mode mode)
+{
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ return schedule_hrtimer(timer, mode);
+}
+
+static long __sched nanosleep_restart(struct restart_block *restart)
+{
+ struct timespec __user *rmtp;
+ struct timespec tu;
+ void *rfn_save = restart->fn;
+ struct hrtimer timer;
+ ktime_t rem;
+
+ restart->fn = do_no_restart_syscall;
+
+ hrtimer_init(&timer, (clockid_t) restart->arg3, HRTIMER_ABS);
+
+ timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0;
+
+ rem = schedule_hrtimer_interruptible(&timer, HRTIMER_ABS);
+
+ if (rem.tv64 <= 0)
+ return 0;
+
+ rmtp = (struct timespec __user *) restart->arg2;
+ tu = ktime_to_timespec(rem);
+ if (rmtp && copy_to_user(rmtp, &tu, sizeof(tu)))
+ return -EFAULT;
+
+ restart->fn = rfn_save;
+
+ /* The other values in restart are already filled in */
+ return -ERESTART_RESTARTBLOCK;
+}
+
+long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
+ const enum hrtimer_mode mode, const clockid_t clockid)
+{
+ struct restart_block *restart;
+ struct hrtimer timer;
+ struct timespec tu;
+ ktime_t rem;
+
+ hrtimer_init(&timer, clockid, mode);
+
+ timer.expires = timespec_to_ktime(*rqtp);
+
+ rem = schedule_hrtimer_interruptible(&timer, mode);
+ if (rem.tv64 <= 0)
+ return 0;
+
+ /* Absolute timers do not update the rmtp value and restart: */
+ if (mode == HRTIMER_ABS)
+ return -ERESTARTNOHAND;
+
+ tu = ktime_to_timespec(rem);
+
+ if (rmtp && copy_to_user(rmtp, &tu, sizeof(tu)))
+ return -EFAULT;
+
+ restart = ¤t_thread_info()->restart_block;
+ restart->fn = nanosleep_restart;
+ restart->arg0 = timer.expires.tv64 & 0xFFFFFFFF;
+ restart->arg1 = timer.expires.tv64 >> 32;
+ restart->arg2 = (unsigned long) rmtp;
+ restart->arg3 = (unsigned long) timer.base->index;
+
+ return -ERESTART_RESTARTBLOCK;
+}
+
+asmlinkage long
+sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
+{
+ struct timespec tu;
+
+ if (copy_from_user(&tu, rqtp, sizeof(tu)))
+ return -EFAULT;
+
+ if (!timespec_valid(&tu))
+ return -EINVAL;
+
+ return hrtimer_nanosleep(&tu, rmtp, HRTIMER_REL, CLOCK_MONOTONIC);
+}
+
/*
* Functions related to boot-time initialization:
*/
struct hrtimer_base *base = per_cpu(hrtimer_bases, cpu);
int i;
- for (i = 0; i < MAX_HRTIMER_BASES; i++) {
+ for (i = 0; i < MAX_HRTIMER_BASES; i++, base++)
spin_lock_init(&base->lock);
- INIT_LIST_HEAD(&base->pending);
- base++;
- }
}
#ifdef CONFIG_HOTPLUG_CPU