2 * Implement CPU time clocks for the POSIX clock interface.
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <linux/errno.h>
8 #include <linux/math64.h>
9 #include <asm/uaccess.h>
10 #include <linux/kernel_stat.h>
11 #include <trace/events/timer.h>
14 * Called after updating RLIMIT_CPU to run cpu timer and update
15 * tsk->signal->cputime_expires expiration cache if necessary. Needs
16 * siglock protection since other code may update expiration cache as
19 void update_rlimit_cpu(unsigned long rlim_new)
21 cputime_t cputime = secs_to_cputime(rlim_new);
23 spin_lock_irq(¤t->sighand->siglock);
24 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
25 spin_unlock_irq(¤t->sighand->siglock);
28 static int check_clock(const clockid_t which_clock)
31 struct task_struct *p;
32 const pid_t pid = CPUCLOCK_PID(which_clock);
34 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
40 read_lock(&tasklist_lock);
41 p = find_task_by_vpid(pid);
42 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
43 same_thread_group(p, current) : thread_group_leader(p))) {
46 read_unlock(&tasklist_lock);
51 static inline union cpu_time_count
52 timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
54 union cpu_time_count ret;
55 ret.sched = 0; /* high half always zero when .cpu used */
56 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
57 ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
59 ret.cpu = timespec_to_cputime(tp);
64 static void sample_to_timespec(const clockid_t which_clock,
65 union cpu_time_count cpu,
68 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
69 *tp = ns_to_timespec(cpu.sched);
71 cputime_to_timespec(cpu.cpu, tp);
74 static inline int cpu_time_before(const clockid_t which_clock,
75 union cpu_time_count now,
76 union cpu_time_count then)
78 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
79 return now.sched < then.sched;
81 return cputime_lt(now.cpu, then.cpu);
84 static inline void cpu_time_add(const clockid_t which_clock,
85 union cpu_time_count *acc,
86 union cpu_time_count val)
88 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
89 acc->sched += val.sched;
91 acc->cpu = cputime_add(acc->cpu, val.cpu);
94 static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
95 union cpu_time_count a,
96 union cpu_time_count b)
98 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
101 a.cpu = cputime_sub(a.cpu, b.cpu);
107 * Divide and limit the result to res >= 1
109 * This is necessary to prevent signal delivery starvation, when the result of
110 * the division would be rounded down to 0.
112 static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
114 cputime_t res = cputime_div(time, div);
116 return max_t(cputime_t, res, 1);
120 * Update expiry time from increment, and increase overrun count,
121 * given the current clock sample.
123 static void bump_cpu_timer(struct k_itimer *timer,
124 union cpu_time_count now)
128 if (timer->it.cpu.incr.sched == 0)
131 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
132 unsigned long long delta, incr;
134 if (now.sched < timer->it.cpu.expires.sched)
136 incr = timer->it.cpu.incr.sched;
137 delta = now.sched + incr - timer->it.cpu.expires.sched;
138 /* Don't use (incr*2 < delta), incr*2 might overflow. */
139 for (i = 0; incr < delta - incr; i++)
141 for (; i >= 0; incr >>= 1, i--) {
144 timer->it.cpu.expires.sched += incr;
145 timer->it_overrun += 1 << i;
149 cputime_t delta, incr;
151 if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
153 incr = timer->it.cpu.incr.cpu;
154 delta = cputime_sub(cputime_add(now.cpu, incr),
155 timer->it.cpu.expires.cpu);
156 /* Don't use (incr*2 < delta), incr*2 might overflow. */
157 for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
158 incr = cputime_add(incr, incr);
159 for (; i >= 0; incr = cputime_halve(incr), i--) {
160 if (cputime_lt(delta, incr))
162 timer->it.cpu.expires.cpu =
163 cputime_add(timer->it.cpu.expires.cpu, incr);
164 timer->it_overrun += 1 << i;
165 delta = cputime_sub(delta, incr);
170 static inline cputime_t prof_ticks(struct task_struct *p)
172 return cputime_add(p->utime, p->stime);
174 static inline cputime_t virt_ticks(struct task_struct *p)
179 int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
181 int error = check_clock(which_clock);
184 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
185 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
187 * If sched_clock is using a cycle counter, we
188 * don't have any idea of its true resolution
189 * exported, but it is much more than 1s/HZ.
197 int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
200 * You can never reset a CPU clock, but we check for other errors
201 * in the call before failing with EPERM.
203 int error = check_clock(which_clock);
212 * Sample a per-thread clock for the given task.
214 static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
215 union cpu_time_count *cpu)
217 switch (CPUCLOCK_WHICH(which_clock)) {
221 cpu->cpu = prof_ticks(p);
224 cpu->cpu = virt_ticks(p);
227 cpu->sched = task_sched_runtime(p);
233 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
235 struct sighand_struct *sighand;
236 struct signal_struct *sig;
237 struct task_struct *t;
239 *times = INIT_CPUTIME;
242 sighand = rcu_dereference(tsk->sighand);
250 times->utime = cputime_add(times->utime, t->utime);
251 times->stime = cputime_add(times->stime, t->stime);
252 times->sum_exec_runtime += t->se.sum_exec_runtime;
257 times->utime = cputime_add(times->utime, sig->utime);
258 times->stime = cputime_add(times->stime, sig->stime);
259 times->sum_exec_runtime += sig->sum_sched_runtime;
264 static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
266 if (cputime_gt(b->utime, a->utime))
269 if (cputime_gt(b->stime, a->stime))
272 if (b->sum_exec_runtime > a->sum_exec_runtime)
273 a->sum_exec_runtime = b->sum_exec_runtime;
276 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
278 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
279 struct task_cputime sum;
282 spin_lock_irqsave(&cputimer->lock, flags);
283 if (!cputimer->running) {
284 cputimer->running = 1;
286 * The POSIX timer interface allows for absolute time expiry
287 * values through the TIMER_ABSTIME flag, therefore we have
288 * to synchronize the timer to the clock every time we start
291 thread_group_cputime(tsk, &sum);
292 update_gt_cputime(&cputimer->cputime, &sum);
294 *times = cputimer->cputime;
295 spin_unlock_irqrestore(&cputimer->lock, flags);
299 * Sample a process (thread group) clock for the given group_leader task.
300 * Must be called with tasklist_lock held for reading.
302 static int cpu_clock_sample_group(const clockid_t which_clock,
303 struct task_struct *p,
304 union cpu_time_count *cpu)
306 struct task_cputime cputime;
308 switch (CPUCLOCK_WHICH(which_clock)) {
312 thread_group_cputime(p, &cputime);
313 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
316 thread_group_cputime(p, &cputime);
317 cpu->cpu = cputime.utime;
320 cpu->sched = thread_group_sched_runtime(p);
327 int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
329 const pid_t pid = CPUCLOCK_PID(which_clock);
331 union cpu_time_count rtn;
335 * Special case constant value for our own clocks.
336 * We don't have to do any lookup to find ourselves.
338 if (CPUCLOCK_PERTHREAD(which_clock)) {
340 * Sampling just ourselves we can do with no locking.
342 error = cpu_clock_sample(which_clock,
345 read_lock(&tasklist_lock);
346 error = cpu_clock_sample_group(which_clock,
348 read_unlock(&tasklist_lock);
352 * Find the given PID, and validate that the caller
353 * should be able to see it.
355 struct task_struct *p;
357 p = find_task_by_vpid(pid);
359 if (CPUCLOCK_PERTHREAD(which_clock)) {
360 if (same_thread_group(p, current)) {
361 error = cpu_clock_sample(which_clock,
365 read_lock(&tasklist_lock);
366 if (thread_group_leader(p) && p->signal) {
368 cpu_clock_sample_group(which_clock,
371 read_unlock(&tasklist_lock);
379 sample_to_timespec(which_clock, rtn, tp);
385 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
386 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
387 * new timer already all-zeros initialized.
389 int posix_cpu_timer_create(struct k_itimer *new_timer)
392 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
393 struct task_struct *p;
395 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
398 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
400 read_lock(&tasklist_lock);
401 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
405 p = find_task_by_vpid(pid);
406 if (p && !same_thread_group(p, current))
411 p = current->group_leader;
413 p = find_task_by_vpid(pid);
414 if (p && !thread_group_leader(p))
418 new_timer->it.cpu.task = p;
424 read_unlock(&tasklist_lock);
430 * Clean up a CPU-clock timer that is about to be destroyed.
431 * This is called from timer deletion with the timer already locked.
432 * If we return TIMER_RETRY, it's necessary to release the timer's lock
433 * and try again. (This happens when the timer is in the middle of firing.)
435 int posix_cpu_timer_del(struct k_itimer *timer)
437 struct task_struct *p = timer->it.cpu.task;
440 if (likely(p != NULL)) {
441 read_lock(&tasklist_lock);
442 if (unlikely(p->signal == NULL)) {
444 * We raced with the reaping of the task.
445 * The deletion should have cleared us off the list.
447 BUG_ON(!list_empty(&timer->it.cpu.entry));
449 spin_lock(&p->sighand->siglock);
450 if (timer->it.cpu.firing)
453 list_del(&timer->it.cpu.entry);
454 spin_unlock(&p->sighand->siglock);
456 read_unlock(&tasklist_lock);
466 * Clean out CPU timers still ticking when a thread exited. The task
467 * pointer is cleared, and the expiry time is replaced with the residual
468 * time for later timer_gettime calls to return.
469 * This must be called with the siglock held.
471 static void cleanup_timers(struct list_head *head,
472 cputime_t utime, cputime_t stime,
473 unsigned long long sum_exec_runtime)
475 struct cpu_timer_list *timer, *next;
476 cputime_t ptime = cputime_add(utime, stime);
478 list_for_each_entry_safe(timer, next, head, entry) {
479 list_del_init(&timer->entry);
480 if (cputime_lt(timer->expires.cpu, ptime)) {
481 timer->expires.cpu = cputime_zero;
483 timer->expires.cpu = cputime_sub(timer->expires.cpu,
489 list_for_each_entry_safe(timer, next, head, entry) {
490 list_del_init(&timer->entry);
491 if (cputime_lt(timer->expires.cpu, utime)) {
492 timer->expires.cpu = cputime_zero;
494 timer->expires.cpu = cputime_sub(timer->expires.cpu,
500 list_for_each_entry_safe(timer, next, head, entry) {
501 list_del_init(&timer->entry);
502 if (timer->expires.sched < sum_exec_runtime) {
503 timer->expires.sched = 0;
505 timer->expires.sched -= sum_exec_runtime;
511 * These are both called with the siglock held, when the current thread
512 * is being reaped. When the final (leader) thread in the group is reaped,
513 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
515 void posix_cpu_timers_exit(struct task_struct *tsk)
517 cleanup_timers(tsk->cpu_timers,
518 tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
521 void posix_cpu_timers_exit_group(struct task_struct *tsk)
523 struct signal_struct *const sig = tsk->signal;
525 cleanup_timers(tsk->signal->cpu_timers,
526 cputime_add(tsk->utime, sig->utime),
527 cputime_add(tsk->stime, sig->stime),
528 tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
531 static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
534 * That's all for this thread or process.
535 * We leave our residual in expires to be reported.
537 put_task_struct(timer->it.cpu.task);
538 timer->it.cpu.task = NULL;
539 timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
540 timer->it.cpu.expires,
544 static inline int expires_gt(cputime_t expires, cputime_t new_exp)
546 return cputime_eq(expires, cputime_zero) ||
547 cputime_gt(expires, new_exp);
551 * Insert the timer on the appropriate list before any timers that
552 * expire later. This must be called with the tasklist_lock held
553 * for reading, and interrupts disabled.
555 static void arm_timer(struct k_itimer *timer)
557 struct task_struct *p = timer->it.cpu.task;
558 struct list_head *head, *listpos;
559 struct task_cputime *cputime_expires;
560 struct cpu_timer_list *const nt = &timer->it.cpu;
561 struct cpu_timer_list *next;
563 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
564 head = p->cpu_timers;
565 cputime_expires = &p->cputime_expires;
567 head = p->signal->cpu_timers;
568 cputime_expires = &p->signal->cputime_expires;
570 head += CPUCLOCK_WHICH(timer->it_clock);
572 BUG_ON(!irqs_disabled());
573 spin_lock(&p->sighand->siglock);
576 list_for_each_entry(next, head, entry) {
577 if (cpu_time_before(timer->it_clock, nt->expires, next->expires))
579 listpos = &next->entry;
581 list_add(&nt->entry, listpos);
583 if (listpos == head) {
584 union cpu_time_count *exp = &nt->expires;
587 * We are the new earliest-expiring POSIX 1.b timer, hence
588 * need to update expiration cache. Take into account that
589 * for process timers we share expiration cache with itimers
590 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
593 switch (CPUCLOCK_WHICH(timer->it_clock)) {
595 if (expires_gt(cputime_expires->prof_exp, exp->cpu))
596 cputime_expires->prof_exp = exp->cpu;
599 if (expires_gt(cputime_expires->virt_exp, exp->cpu))
600 cputime_expires->virt_exp = exp->cpu;
603 if (cputime_expires->sched_exp == 0 ||
604 cputime_expires->sched_exp > exp->sched)
605 cputime_expires->sched_exp = exp->sched;
610 spin_unlock(&p->sighand->siglock);
614 * The timer is locked, fire it and arrange for its reload.
616 static void cpu_timer_fire(struct k_itimer *timer)
618 if (unlikely(timer->sigq == NULL)) {
620 * This a special case for clock_nanosleep,
621 * not a normal timer from sys_timer_create.
623 wake_up_process(timer->it_process);
624 timer->it.cpu.expires.sched = 0;
625 } else if (timer->it.cpu.incr.sched == 0) {
627 * One-shot timer. Clear it as soon as it's fired.
629 posix_timer_event(timer, 0);
630 timer->it.cpu.expires.sched = 0;
631 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
633 * The signal did not get queued because the signal
634 * was ignored, so we won't get any callback to
635 * reload the timer. But we need to keep it
636 * ticking in case the signal is deliverable next time.
638 posix_cpu_timer_schedule(timer);
643 * Sample a process (thread group) timer for the given group_leader task.
644 * Must be called with tasklist_lock held for reading.
646 static int cpu_timer_sample_group(const clockid_t which_clock,
647 struct task_struct *p,
648 union cpu_time_count *cpu)
650 struct task_cputime cputime;
652 thread_group_cputimer(p, &cputime);
653 switch (CPUCLOCK_WHICH(which_clock)) {
657 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
660 cpu->cpu = cputime.utime;
663 cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
670 * Guts of sys_timer_settime for CPU timers.
671 * This is called with the timer locked and interrupts disabled.
672 * If we return TIMER_RETRY, it's necessary to release the timer's lock
673 * and try again. (This happens when the timer is in the middle of firing.)
675 int posix_cpu_timer_set(struct k_itimer *timer, int flags,
676 struct itimerspec *new, struct itimerspec *old)
678 struct task_struct *p = timer->it.cpu.task;
679 union cpu_time_count old_expires, new_expires, old_incr, val;
682 if (unlikely(p == NULL)) {
684 * Timer refers to a dead task's clock.
689 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
691 read_lock(&tasklist_lock);
693 * We need the tasklist_lock to protect against reaping that
694 * clears p->signal. If p has just been reaped, we can no
695 * longer get any information about it at all.
697 if (unlikely(p->signal == NULL)) {
698 read_unlock(&tasklist_lock);
700 timer->it.cpu.task = NULL;
705 * Disarm any old timer after extracting its expiry time.
707 BUG_ON(!irqs_disabled());
710 old_incr = timer->it.cpu.incr;
711 spin_lock(&p->sighand->siglock);
712 old_expires = timer->it.cpu.expires;
713 if (unlikely(timer->it.cpu.firing)) {
714 timer->it.cpu.firing = -1;
717 list_del_init(&timer->it.cpu.entry);
718 spin_unlock(&p->sighand->siglock);
721 * We need to sample the current value to convert the new
722 * value from to relative and absolute, and to convert the
723 * old value from absolute to relative. To set a process
724 * timer, we need a sample to balance the thread expiry
725 * times (in arm_timer). With an absolute time, we must
726 * check if it's already passed. In short, we need a sample.
728 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
729 cpu_clock_sample(timer->it_clock, p, &val);
731 cpu_timer_sample_group(timer->it_clock, p, &val);
735 if (old_expires.sched == 0) {
736 old->it_value.tv_sec = 0;
737 old->it_value.tv_nsec = 0;
740 * Update the timer in case it has
741 * overrun already. If it has,
742 * we'll report it as having overrun
743 * and with the next reloaded timer
744 * already ticking, though we are
745 * swallowing that pending
746 * notification here to install the
749 bump_cpu_timer(timer, val);
750 if (cpu_time_before(timer->it_clock, val,
751 timer->it.cpu.expires)) {
752 old_expires = cpu_time_sub(
754 timer->it.cpu.expires, val);
755 sample_to_timespec(timer->it_clock,
759 old->it_value.tv_nsec = 1;
760 old->it_value.tv_sec = 0;
767 * We are colliding with the timer actually firing.
768 * Punt after filling in the timer's old value, and
769 * disable this firing since we are already reporting
770 * it as an overrun (thanks to bump_cpu_timer above).
772 read_unlock(&tasklist_lock);
776 if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
777 cpu_time_add(timer->it_clock, &new_expires, val);
781 * Install the new expiry time (or zero).
782 * For a timer with no notification action, we don't actually
783 * arm the timer (we'll just fake it for timer_gettime).
785 timer->it.cpu.expires = new_expires;
786 if (new_expires.sched != 0 &&
787 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
788 cpu_time_before(timer->it_clock, val, new_expires)) {
792 read_unlock(&tasklist_lock);
795 * Install the new reload setting, and
796 * set up the signal and overrun bookkeeping.
798 timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
802 * This acts as a modification timestamp for the timer,
803 * so any automatic reload attempt will punt on seeing
804 * that we have reset the timer manually.
806 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
808 timer->it_overrun_last = 0;
809 timer->it_overrun = -1;
811 if (new_expires.sched != 0 &&
812 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
813 !cpu_time_before(timer->it_clock, val, new_expires)) {
815 * The designated time already passed, so we notify
816 * immediately, even if the thread never runs to
817 * accumulate more time on this clock.
819 cpu_timer_fire(timer);
825 sample_to_timespec(timer->it_clock,
826 old_incr, &old->it_interval);
831 void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
833 union cpu_time_count now;
834 struct task_struct *p = timer->it.cpu.task;
838 * Easy part: convert the reload time.
840 sample_to_timespec(timer->it_clock,
841 timer->it.cpu.incr, &itp->it_interval);
843 if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */
844 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
848 if (unlikely(p == NULL)) {
850 * This task already died and the timer will never fire.
851 * In this case, expires is actually the dead value.
854 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
860 * Sample the clock to take the difference with the expiry time.
862 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
863 cpu_clock_sample(timer->it_clock, p, &now);
864 clear_dead = p->exit_state;
866 read_lock(&tasklist_lock);
867 if (unlikely(p->signal == NULL)) {
869 * The process has been reaped.
870 * We can't even collect a sample any more.
871 * Call the timer disarmed, nothing else to do.
874 timer->it.cpu.task = NULL;
875 timer->it.cpu.expires.sched = 0;
876 read_unlock(&tasklist_lock);
879 cpu_timer_sample_group(timer->it_clock, p, &now);
880 clear_dead = (unlikely(p->exit_state) &&
881 thread_group_empty(p));
883 read_unlock(&tasklist_lock);
886 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
887 if (timer->it.cpu.incr.sched == 0 &&
888 cpu_time_before(timer->it_clock,
889 timer->it.cpu.expires, now)) {
891 * Do-nothing timer expired and has no reload,
892 * so it's as if it was never set.
894 timer->it.cpu.expires.sched = 0;
895 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
899 * Account for any expirations and reloads that should
902 bump_cpu_timer(timer, now);
905 if (unlikely(clear_dead)) {
907 * We've noticed that the thread is dead, but
908 * not yet reaped. Take this opportunity to
911 clear_dead_task(timer, now);
915 if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
916 sample_to_timespec(timer->it_clock,
917 cpu_time_sub(timer->it_clock,
918 timer->it.cpu.expires, now),
922 * The timer should have expired already, but the firing
923 * hasn't taken place yet. Say it's just about to expire.
925 itp->it_value.tv_nsec = 1;
926 itp->it_value.tv_sec = 0;
931 * Check for any per-thread CPU timers that have fired and move them off
932 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
933 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
935 static void check_thread_timers(struct task_struct *tsk,
936 struct list_head *firing)
939 struct list_head *timers = tsk->cpu_timers;
940 struct signal_struct *const sig = tsk->signal;
944 tsk->cputime_expires.prof_exp = cputime_zero;
945 while (!list_empty(timers)) {
946 struct cpu_timer_list *t = list_first_entry(timers,
947 struct cpu_timer_list,
949 if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
950 tsk->cputime_expires.prof_exp = t->expires.cpu;
954 list_move_tail(&t->entry, firing);
959 tsk->cputime_expires.virt_exp = cputime_zero;
960 while (!list_empty(timers)) {
961 struct cpu_timer_list *t = list_first_entry(timers,
962 struct cpu_timer_list,
964 if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
965 tsk->cputime_expires.virt_exp = t->expires.cpu;
969 list_move_tail(&t->entry, firing);
974 tsk->cputime_expires.sched_exp = 0;
975 while (!list_empty(timers)) {
976 struct cpu_timer_list *t = list_first_entry(timers,
977 struct cpu_timer_list,
979 if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
980 tsk->cputime_expires.sched_exp = t->expires.sched;
984 list_move_tail(&t->entry, firing);
988 * Check for the special case thread timers.
990 soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
991 if (soft != RLIM_INFINITY) {
993 ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
995 if (hard != RLIM_INFINITY &&
996 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
998 * At the hard limit, we just die.
999 * No need to calculate anything else now.
1001 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1004 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
1006 * At the soft limit, send a SIGXCPU every second.
1009 soft += USEC_PER_SEC;
1010 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
1013 "RT Watchdog Timeout: %s[%d]\n",
1014 tsk->comm, task_pid_nr(tsk));
1015 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1020 static void stop_process_timers(struct task_struct *tsk)
1022 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
1023 unsigned long flags;
1025 if (!cputimer->running)
1028 spin_lock_irqsave(&cputimer->lock, flags);
1029 cputimer->running = 0;
1030 spin_unlock_irqrestore(&cputimer->lock, flags);
1033 static u32 onecputick;
1035 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
1036 cputime_t *expires, cputime_t cur_time, int signo)
1038 if (cputime_eq(it->expires, cputime_zero))
1041 if (cputime_ge(cur_time, it->expires)) {
1042 if (!cputime_eq(it->incr, cputime_zero)) {
1043 it->expires = cputime_add(it->expires, it->incr);
1044 it->error += it->incr_error;
1045 if (it->error >= onecputick) {
1046 it->expires = cputime_sub(it->expires,
1048 it->error -= onecputick;
1051 it->expires = cputime_zero;
1054 trace_itimer_expire(signo == SIGPROF ?
1055 ITIMER_PROF : ITIMER_VIRTUAL,
1056 tsk->signal->leader_pid, cur_time);
1057 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
1060 if (!cputime_eq(it->expires, cputime_zero) &&
1061 (cputime_eq(*expires, cputime_zero) ||
1062 cputime_lt(it->expires, *expires))) {
1063 *expires = it->expires;
1068 * Check for any per-thread CPU timers that have fired and move them
1069 * off the tsk->*_timers list onto the firing list. Per-thread timers
1070 * have already been taken off.
1072 static void check_process_timers(struct task_struct *tsk,
1073 struct list_head *firing)
1076 struct signal_struct *const sig = tsk->signal;
1077 cputime_t utime, ptime, virt_expires, prof_expires;
1078 unsigned long long sum_sched_runtime, sched_expires;
1079 struct list_head *timers = sig->cpu_timers;
1080 struct task_cputime cputime;
1084 * Don't sample the current process CPU clocks if there are no timers.
1086 if (list_empty(&timers[CPUCLOCK_PROF]) &&
1087 cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) &&
1088 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
1089 list_empty(&timers[CPUCLOCK_VIRT]) &&
1090 cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
1091 list_empty(&timers[CPUCLOCK_SCHED])) {
1092 stop_process_timers(tsk);
1097 * Collect the current process totals.
1099 thread_group_cputimer(tsk, &cputime);
1100 utime = cputime.utime;
1101 ptime = cputime_add(utime, cputime.stime);
1102 sum_sched_runtime = cputime.sum_exec_runtime;
1104 prof_expires = cputime_zero;
1105 while (!list_empty(timers)) {
1106 struct cpu_timer_list *tl = list_first_entry(timers,
1107 struct cpu_timer_list,
1109 if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) {
1110 prof_expires = tl->expires.cpu;
1114 list_move_tail(&tl->entry, firing);
1119 virt_expires = cputime_zero;
1120 while (!list_empty(timers)) {
1121 struct cpu_timer_list *tl = list_first_entry(timers,
1122 struct cpu_timer_list,
1124 if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) {
1125 virt_expires = tl->expires.cpu;
1129 list_move_tail(&tl->entry, firing);
1135 while (!list_empty(timers)) {
1136 struct cpu_timer_list *tl = list_first_entry(timers,
1137 struct cpu_timer_list,
1139 if (!--maxfire || sum_sched_runtime < tl->expires.sched) {
1140 sched_expires = tl->expires.sched;
1144 list_move_tail(&tl->entry, firing);
1148 * Check for the special case process timers.
1150 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
1152 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
1154 soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1155 if (soft != RLIM_INFINITY) {
1156 unsigned long psecs = cputime_to_secs(ptime);
1157 unsigned long hard =
1158 ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
1160 if (psecs >= hard) {
1162 * At the hard limit, we just die.
1163 * No need to calculate anything else now.
1165 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1168 if (psecs >= soft) {
1170 * At the soft limit, send a SIGXCPU every second.
1172 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1175 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
1178 x = secs_to_cputime(soft);
1179 if (cputime_eq(prof_expires, cputime_zero) ||
1180 cputime_lt(x, prof_expires)) {
1185 if (!cputime_eq(prof_expires, cputime_zero) &&
1186 (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) ||
1187 cputime_gt(sig->cputime_expires.prof_exp, prof_expires)))
1188 sig->cputime_expires.prof_exp = prof_expires;
1189 if (!cputime_eq(virt_expires, cputime_zero) &&
1190 (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) ||
1191 cputime_gt(sig->cputime_expires.virt_exp, virt_expires)))
1192 sig->cputime_expires.virt_exp = virt_expires;
1193 if (sched_expires != 0 &&
1194 (sig->cputime_expires.sched_exp == 0 ||
1195 sig->cputime_expires.sched_exp > sched_expires))
1196 sig->cputime_expires.sched_exp = sched_expires;
1200 * This is called from the signal code (via do_schedule_next_timer)
1201 * when the last timer signal was delivered and we have to reload the timer.
1203 void posix_cpu_timer_schedule(struct k_itimer *timer)
1205 struct task_struct *p = timer->it.cpu.task;
1206 union cpu_time_count now;
1208 if (unlikely(p == NULL))
1210 * The task was cleaned up already, no future firings.
1215 * Fetch the current sample and update the timer's expiry time.
1217 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1218 cpu_clock_sample(timer->it_clock, p, &now);
1219 bump_cpu_timer(timer, now);
1220 if (unlikely(p->exit_state)) {
1221 clear_dead_task(timer, now);
1224 read_lock(&tasklist_lock); /* arm_timer needs it. */
1226 read_lock(&tasklist_lock);
1227 if (unlikely(p->signal == NULL)) {
1229 * The process has been reaped.
1230 * We can't even collect a sample any more.
1233 timer->it.cpu.task = p = NULL;
1234 timer->it.cpu.expires.sched = 0;
1236 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1238 * We've noticed that the thread is dead, but
1239 * not yet reaped. Take this opportunity to
1240 * drop our task ref.
1242 clear_dead_task(timer, now);
1245 cpu_timer_sample_group(timer->it_clock, p, &now);
1246 bump_cpu_timer(timer, now);
1247 /* Leave the tasklist_lock locked for the call below. */
1251 * Now re-arm for the new expiry time.
1256 read_unlock(&tasklist_lock);
1259 timer->it_overrun_last = timer->it_overrun;
1260 timer->it_overrun = -1;
1261 ++timer->it_requeue_pending;
1265 * task_cputime_zero - Check a task_cputime struct for all zero fields.
1267 * @cputime: The struct to compare.
1269 * Checks @cputime to see if all fields are zero. Returns true if all fields
1270 * are zero, false if any field is nonzero.
1272 static inline int task_cputime_zero(const struct task_cputime *cputime)
1274 if (cputime_eq(cputime->utime, cputime_zero) &&
1275 cputime_eq(cputime->stime, cputime_zero) &&
1276 cputime->sum_exec_runtime == 0)
1282 * task_cputime_expired - Compare two task_cputime entities.
1284 * @sample: The task_cputime structure to be checked for expiration.
1285 * @expires: Expiration times, against which @sample will be checked.
1287 * Checks @sample against @expires to see if any field of @sample has expired.
1288 * Returns true if any field of the former is greater than the corresponding
1289 * field of the latter if the latter field is set. Otherwise returns false.
1291 static inline int task_cputime_expired(const struct task_cputime *sample,
1292 const struct task_cputime *expires)
1294 if (!cputime_eq(expires->utime, cputime_zero) &&
1295 cputime_ge(sample->utime, expires->utime))
1297 if (!cputime_eq(expires->stime, cputime_zero) &&
1298 cputime_ge(cputime_add(sample->utime, sample->stime),
1301 if (expires->sum_exec_runtime != 0 &&
1302 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1308 * fastpath_timer_check - POSIX CPU timers fast path.
1310 * @tsk: The task (thread) being checked.
1312 * Check the task and thread group timers. If both are zero (there are no
1313 * timers set) return false. Otherwise snapshot the task and thread group
1314 * timers and compare them with the corresponding expiration times. Return
1315 * true if a timer has expired, else return false.
1317 static inline int fastpath_timer_check(struct task_struct *tsk)
1319 struct signal_struct *sig;
1321 /* tsk == current, ensure it is safe to use ->signal/sighand */
1322 if (unlikely(tsk->exit_state))
1325 if (!task_cputime_zero(&tsk->cputime_expires)) {
1326 struct task_cputime task_sample = {
1327 .utime = tsk->utime,
1328 .stime = tsk->stime,
1329 .sum_exec_runtime = tsk->se.sum_exec_runtime
1332 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1337 if (!task_cputime_zero(&sig->cputime_expires)) {
1338 struct task_cputime group_sample;
1340 thread_group_cputimer(tsk, &group_sample);
1341 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1349 * This is called from the timer interrupt handler. The irq handler has
1350 * already updated our counts. We need to check if any timers fire now.
1351 * Interrupts are disabled.
1353 void run_posix_cpu_timers(struct task_struct *tsk)
1356 struct k_itimer *timer, *next;
1358 BUG_ON(!irqs_disabled());
1361 * The fast path checks that there are no expired thread or thread
1362 * group timers. If that's so, just return.
1364 if (!fastpath_timer_check(tsk))
1367 spin_lock(&tsk->sighand->siglock);
1369 * Here we take off tsk->signal->cpu_timers[N] and
1370 * tsk->cpu_timers[N] all the timers that are firing, and
1371 * put them on the firing list.
1373 check_thread_timers(tsk, &firing);
1374 check_process_timers(tsk, &firing);
1377 * We must release these locks before taking any timer's lock.
1378 * There is a potential race with timer deletion here, as the
1379 * siglock now protects our private firing list. We have set
1380 * the firing flag in each timer, so that a deletion attempt
1381 * that gets the timer lock before we do will give it up and
1382 * spin until we've taken care of that timer below.
1384 spin_unlock(&tsk->sighand->siglock);
1387 * Now that all the timers on our list have the firing flag,
1388 * noone will touch their list entries but us. We'll take
1389 * each timer's lock before clearing its firing flag, so no
1390 * timer call will interfere.
1392 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1395 spin_lock(&timer->it_lock);
1396 list_del_init(&timer->it.cpu.entry);
1397 cpu_firing = timer->it.cpu.firing;
1398 timer->it.cpu.firing = 0;
1400 * The firing flag is -1 if we collided with a reset
1401 * of the timer, which already reported this
1402 * almost-firing as an overrun. So don't generate an event.
1404 if (likely(cpu_firing >= 0))
1405 cpu_timer_fire(timer);
1406 spin_unlock(&timer->it_lock);
1411 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1412 * The tsk->sighand->siglock must be held by the caller.
1414 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1415 cputime_t *newval, cputime_t *oldval)
1417 union cpu_time_count now;
1419 BUG_ON(clock_idx == CPUCLOCK_SCHED);
1420 cpu_timer_sample_group(clock_idx, tsk, &now);
1424 * We are setting itimer. The *oldval is absolute and we update
1425 * it to be relative, *newval argument is relative and we update
1426 * it to be absolute.
1428 if (!cputime_eq(*oldval, cputime_zero)) {
1429 if (cputime_le(*oldval, now.cpu)) {
1430 /* Just about to fire. */
1431 *oldval = cputime_one_jiffy;
1433 *oldval = cputime_sub(*oldval, now.cpu);
1437 if (cputime_eq(*newval, cputime_zero))
1439 *newval = cputime_add(*newval, now.cpu);
1443 * Update expiration cache if we are the earliest timer, or eventually
1444 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1446 switch (clock_idx) {
1448 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1449 tsk->signal->cputime_expires.prof_exp = *newval;
1452 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1453 tsk->signal->cputime_expires.virt_exp = *newval;
1458 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1459 struct timespec *rqtp, struct itimerspec *it)
1461 struct k_itimer timer;
1465 * Set up a temporary timer and then wait for it to go off.
1467 memset(&timer, 0, sizeof timer);
1468 spin_lock_init(&timer.it_lock);
1469 timer.it_clock = which_clock;
1470 timer.it_overrun = -1;
1471 error = posix_cpu_timer_create(&timer);
1472 timer.it_process = current;
1474 static struct itimerspec zero_it;
1476 memset(it, 0, sizeof *it);
1477 it->it_value = *rqtp;
1479 spin_lock_irq(&timer.it_lock);
1480 error = posix_cpu_timer_set(&timer, flags, it, NULL);
1482 spin_unlock_irq(&timer.it_lock);
1486 while (!signal_pending(current)) {
1487 if (timer.it.cpu.expires.sched == 0) {
1489 * Our timer fired and was reset.
1491 spin_unlock_irq(&timer.it_lock);
1496 * Block until cpu_timer_fire (or a signal) wakes us.
1498 __set_current_state(TASK_INTERRUPTIBLE);
1499 spin_unlock_irq(&timer.it_lock);
1501 spin_lock_irq(&timer.it_lock);
1505 * We were interrupted by a signal.
1507 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1508 posix_cpu_timer_set(&timer, 0, &zero_it, it);
1509 spin_unlock_irq(&timer.it_lock);
1511 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1513 * It actually did fire already.
1518 error = -ERESTART_RESTARTBLOCK;
1524 int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1525 struct timespec *rqtp, struct timespec __user *rmtp)
1527 struct restart_block *restart_block =
1528 ¤t_thread_info()->restart_block;
1529 struct itimerspec it;
1533 * Diagnose required errors first.
1535 if (CPUCLOCK_PERTHREAD(which_clock) &&
1536 (CPUCLOCK_PID(which_clock) == 0 ||
1537 CPUCLOCK_PID(which_clock) == current->pid))
1540 error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1542 if (error == -ERESTART_RESTARTBLOCK) {
1544 if (flags & TIMER_ABSTIME)
1545 return -ERESTARTNOHAND;
1547 * Report back to the user the time still remaining.
1549 if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1552 restart_block->fn = posix_cpu_nsleep_restart;
1553 restart_block->arg0 = which_clock;
1554 restart_block->arg1 = (unsigned long) rmtp;
1555 restart_block->arg2 = rqtp->tv_sec;
1556 restart_block->arg3 = rqtp->tv_nsec;
1561 long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1563 clockid_t which_clock = restart_block->arg0;
1564 struct timespec __user *rmtp;
1566 struct itimerspec it;
1569 rmtp = (struct timespec __user *) restart_block->arg1;
1570 t.tv_sec = restart_block->arg2;
1571 t.tv_nsec = restart_block->arg3;
1573 restart_block->fn = do_no_restart_syscall;
1574 error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1576 if (error == -ERESTART_RESTARTBLOCK) {
1578 * Report back to the user the time still remaining.
1580 if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1583 restart_block->fn = posix_cpu_nsleep_restart;
1584 restart_block->arg0 = which_clock;
1585 restart_block->arg1 = (unsigned long) rmtp;
1586 restart_block->arg2 = t.tv_sec;
1587 restart_block->arg3 = t.tv_nsec;
1594 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1595 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1597 static int process_cpu_clock_getres(const clockid_t which_clock,
1598 struct timespec *tp)
1600 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1602 static int process_cpu_clock_get(const clockid_t which_clock,
1603 struct timespec *tp)
1605 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1607 static int process_cpu_timer_create(struct k_itimer *timer)
1609 timer->it_clock = PROCESS_CLOCK;
1610 return posix_cpu_timer_create(timer);
1612 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1613 struct timespec *rqtp,
1614 struct timespec __user *rmtp)
1616 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1618 static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1622 static int thread_cpu_clock_getres(const clockid_t which_clock,
1623 struct timespec *tp)
1625 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1627 static int thread_cpu_clock_get(const clockid_t which_clock,
1628 struct timespec *tp)
1630 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1632 static int thread_cpu_timer_create(struct k_itimer *timer)
1634 timer->it_clock = THREAD_CLOCK;
1635 return posix_cpu_timer_create(timer);
1637 static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
1638 struct timespec *rqtp, struct timespec __user *rmtp)
1642 static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
1647 static __init int init_posix_cpu_timers(void)
1649 struct k_clock process = {
1650 .clock_getres = process_cpu_clock_getres,
1651 .clock_get = process_cpu_clock_get,
1652 .clock_set = do_posix_clock_nosettime,
1653 .timer_create = process_cpu_timer_create,
1654 .nsleep = process_cpu_nsleep,
1655 .nsleep_restart = process_cpu_nsleep_restart,
1657 struct k_clock thread = {
1658 .clock_getres = thread_cpu_clock_getres,
1659 .clock_get = thread_cpu_clock_get,
1660 .clock_set = do_posix_clock_nosettime,
1661 .timer_create = thread_cpu_timer_create,
1662 .nsleep = thread_cpu_nsleep,
1663 .nsleep_restart = thread_cpu_nsleep_restart,
1667 register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1668 register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1670 cputime_to_timespec(cputime_one_jiffy, &ts);
1671 onecputick = ts.tv_nsec;
1672 WARN_ON(ts.tv_sec != 0);
1676 __initcall(init_posix_cpu_timers);