2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
11 #include <linux/timekeeper_internal.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/percpu.h>
15 #include <linux/init.h>
17 #include <linux/sched.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/clocksource.h>
20 #include <linux/jiffies.h>
21 #include <linux/time.h>
22 #include <linux/tick.h>
23 #include <linux/stop_machine.h>
24 #include <linux/pvclock_gtod.h>
27 static struct timekeeper timekeeper;
29 /* flag for if timekeeping is suspended */
30 int __read_mostly timekeeping_suspended;
32 /* Flag for if there is a persistent clock on this platform */
33 bool __read_mostly persistent_clock_exist = false;
35 static inline void tk_normalize_xtime(struct timekeeper *tk)
37 while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
38 tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
43 static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
45 tk->xtime_sec = ts->tv_sec;
46 tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
49 static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
51 tk->xtime_sec += ts->tv_sec;
52 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
53 tk_normalize_xtime(tk);
56 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
61 * Verify consistency of: offset_real = -wall_to_monotonic
62 * before modifying anything
64 set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec,
65 -tk->wall_to_monotonic.tv_nsec);
66 WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64);
67 tk->wall_to_monotonic = wtm;
68 set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
69 tk->offs_real = timespec_to_ktime(tmp);
70 tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tk->tai_offset, 0));
73 static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
75 /* Verify consistency before modifying */
76 WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);
78 tk->total_sleep_time = t;
79 tk->offs_boot = timespec_to_ktime(t);
83 * timekeeper_setup_internals - Set up internals to use clocksource clock.
85 * @clock: Pointer to clocksource.
87 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
88 * pair and interval request.
90 * Unless you're the timekeeping code, you should not be using this!
92 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
96 struct clocksource *old_clock;
98 old_clock = tk->clock;
100 clock->cycle_last = clock->read(clock);
102 /* Do the ns -> cycle conversion first, using original mult */
103 tmp = NTP_INTERVAL_LENGTH;
104 tmp <<= clock->shift;
106 tmp += clock->mult/2;
107 do_div(tmp, clock->mult);
111 interval = (cycle_t) tmp;
112 tk->cycle_interval = interval;
114 /* Go back from cycles -> shifted ns */
115 tk->xtime_interval = (u64) interval * clock->mult;
116 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
118 ((u64) interval * clock->mult) >> clock->shift;
120 /* if changing clocks, convert xtime_nsec shift units */
122 int shift_change = clock->shift - old_clock->shift;
123 if (shift_change < 0)
124 tk->xtime_nsec >>= -shift_change;
126 tk->xtime_nsec <<= shift_change;
128 tk->shift = clock->shift;
131 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
134 * The timekeeper keeps its own mult values for the currently
135 * active clocksource. These value will be adjusted via NTP
136 * to counteract clock drifting.
138 tk->mult = clock->mult;
141 /* Timekeeper helper functions. */
143 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
144 u32 (*arch_gettimeoffset)(void);
146 u32 get_arch_timeoffset(void)
148 if (likely(arch_gettimeoffset))
149 return arch_gettimeoffset();
153 static inline u32 get_arch_timeoffset(void) { return 0; }
156 static inline s64 timekeeping_get_ns(struct timekeeper *tk)
158 cycle_t cycle_now, cycle_delta;
159 struct clocksource *clock;
162 /* read clocksource: */
164 cycle_now = clock->read(clock);
166 /* calculate the delta since the last update_wall_time: */
167 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
169 nsec = cycle_delta * tk->mult + tk->xtime_nsec;
172 /* If arch requires, add in get_arch_timeoffset() */
173 return nsec + get_arch_timeoffset();
176 static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
178 cycle_t cycle_now, cycle_delta;
179 struct clocksource *clock;
182 /* read clocksource: */
184 cycle_now = clock->read(clock);
186 /* calculate the delta since the last update_wall_time: */
187 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
189 /* convert delta to nanoseconds. */
190 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
192 /* If arch requires, add in get_arch_timeoffset() */
193 return nsec + get_arch_timeoffset();
196 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
198 static void update_pvclock_gtod(struct timekeeper *tk)
200 raw_notifier_call_chain(&pvclock_gtod_chain, 0, tk);
204 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
206 * Must hold write on timekeeper.lock
208 int pvclock_gtod_register_notifier(struct notifier_block *nb)
210 struct timekeeper *tk = &timekeeper;
214 write_seqlock_irqsave(&tk->lock, flags);
215 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
216 /* update timekeeping data */
217 update_pvclock_gtod(tk);
218 write_sequnlock_irqrestore(&tk->lock, flags);
222 EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
225 * pvclock_gtod_unregister_notifier - unregister a pvclock
226 * timedata update listener
228 * Must hold write on timekeeper.lock
230 int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
232 struct timekeeper *tk = &timekeeper;
236 write_seqlock_irqsave(&tk->lock, flags);
237 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
238 write_sequnlock_irqrestore(&tk->lock, flags);
242 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
244 /* must hold write on timekeeper.lock */
245 static void timekeeping_update(struct timekeeper *tk, bool clearntp)
252 update_pvclock_gtod(tk);
256 * timekeeping_forward_now - update clock to the current time
258 * Forward the current clock to update its state since the last call to
259 * update_wall_time(). This is useful before significant clock changes,
260 * as it avoids having to deal with this time offset explicitly.
262 static void timekeeping_forward_now(struct timekeeper *tk)
264 cycle_t cycle_now, cycle_delta;
265 struct clocksource *clock;
269 cycle_now = clock->read(clock);
270 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
271 clock->cycle_last = cycle_now;
273 tk->xtime_nsec += cycle_delta * tk->mult;
275 /* If arch requires, add in get_arch_timeoffset() */
276 tk->xtime_nsec += (u64)get_arch_timeoffset() << tk->shift;
278 tk_normalize_xtime(tk);
280 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
281 timespec_add_ns(&tk->raw_time, nsec);
285 * __getnstimeofday - Returns the time of day in a timespec.
286 * @ts: pointer to the timespec to be set
288 * Updates the time of day in the timespec.
289 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
291 int __getnstimeofday(struct timespec *ts)
293 struct timekeeper *tk = &timekeeper;
298 seq = read_seqbegin(&tk->lock);
300 ts->tv_sec = tk->xtime_sec;
301 nsecs = timekeeping_get_ns(tk);
303 } while (read_seqretry(&tk->lock, seq));
306 timespec_add_ns(ts, nsecs);
309 * Do not bail out early, in case there were callers still using
310 * the value, even in the face of the WARN_ON.
312 if (unlikely(timekeeping_suspended))
316 EXPORT_SYMBOL(__getnstimeofday);
319 * getnstimeofday - Returns the time of day in a timespec.
320 * @ts: pointer to the timespec to be set
322 * Returns the time of day in a timespec (WARN if suspended).
324 void getnstimeofday(struct timespec *ts)
326 WARN_ON(__getnstimeofday(ts));
328 EXPORT_SYMBOL(getnstimeofday);
330 ktime_t ktime_get(void)
332 struct timekeeper *tk = &timekeeper;
336 WARN_ON(timekeeping_suspended);
339 seq = read_seqbegin(&tk->lock);
340 secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
341 nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
343 } while (read_seqretry(&tk->lock, seq));
345 * Use ktime_set/ktime_add_ns to create a proper ktime on
346 * 32-bit architectures without CONFIG_KTIME_SCALAR.
348 return ktime_add_ns(ktime_set(secs, 0), nsecs);
350 EXPORT_SYMBOL_GPL(ktime_get);
353 * ktime_get_ts - get the monotonic clock in timespec format
354 * @ts: pointer to timespec variable
356 * The function calculates the monotonic clock from the realtime
357 * clock and the wall_to_monotonic offset and stores the result
358 * in normalized timespec format in the variable pointed to by @ts.
360 void ktime_get_ts(struct timespec *ts)
362 struct timekeeper *tk = &timekeeper;
363 struct timespec tomono;
367 WARN_ON(timekeeping_suspended);
370 seq = read_seqbegin(&tk->lock);
371 ts->tv_sec = tk->xtime_sec;
372 nsec = timekeeping_get_ns(tk);
373 tomono = tk->wall_to_monotonic;
375 } while (read_seqretry(&tk->lock, seq));
377 ts->tv_sec += tomono.tv_sec;
379 timespec_add_ns(ts, nsec + tomono.tv_nsec);
381 EXPORT_SYMBOL_GPL(ktime_get_ts);
385 * timekeeping_clocktai - Returns the TAI time of day in a timespec
386 * @ts: pointer to the timespec to be set
388 * Returns the time of day in a timespec.
390 void timekeeping_clocktai(struct timespec *ts)
392 struct timekeeper *tk = &timekeeper;
396 WARN_ON(timekeeping_suspended);
399 seq = read_seqbegin(&tk->lock);
401 ts->tv_sec = tk->xtime_sec + tk->tai_offset;
402 nsecs = timekeeping_get_ns(tk);
404 } while (read_seqretry(&tk->lock, seq));
407 timespec_add_ns(ts, nsecs);
410 EXPORT_SYMBOL(timekeeping_clocktai);
414 * ktime_get_clocktai - Returns the TAI time of day in a ktime
416 * Returns the time of day in a ktime.
418 ktime_t ktime_get_clocktai(void)
422 timekeeping_clocktai(&ts);
423 return timespec_to_ktime(ts);
425 EXPORT_SYMBOL(ktime_get_clocktai);
427 #ifdef CONFIG_NTP_PPS
430 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
431 * @ts_raw: pointer to the timespec to be set to raw monotonic time
432 * @ts_real: pointer to the timespec to be set to the time of day
434 * This function reads both the time of day and raw monotonic time at the
435 * same time atomically and stores the resulting timestamps in timespec
438 void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
440 struct timekeeper *tk = &timekeeper;
442 s64 nsecs_raw, nsecs_real;
444 WARN_ON_ONCE(timekeeping_suspended);
447 seq = read_seqbegin(&tk->lock);
449 *ts_raw = tk->raw_time;
450 ts_real->tv_sec = tk->xtime_sec;
451 ts_real->tv_nsec = 0;
453 nsecs_raw = timekeeping_get_ns_raw(tk);
454 nsecs_real = timekeeping_get_ns(tk);
456 } while (read_seqretry(&tk->lock, seq));
458 timespec_add_ns(ts_raw, nsecs_raw);
459 timespec_add_ns(ts_real, nsecs_real);
461 EXPORT_SYMBOL(getnstime_raw_and_real);
463 #endif /* CONFIG_NTP_PPS */
466 * do_gettimeofday - Returns the time of day in a timeval
467 * @tv: pointer to the timeval to be set
469 * NOTE: Users should be converted to using getnstimeofday()
471 void do_gettimeofday(struct timeval *tv)
475 getnstimeofday(&now);
476 tv->tv_sec = now.tv_sec;
477 tv->tv_usec = now.tv_nsec/1000;
479 EXPORT_SYMBOL(do_gettimeofday);
482 * do_settimeofday - Sets the time of day
483 * @tv: pointer to the timespec variable containing the new time
485 * Sets the time of day to the new time and update NTP and notify hrtimers
487 int do_settimeofday(const struct timespec *tv)
489 struct timekeeper *tk = &timekeeper;
490 struct timespec ts_delta, xt;
493 if (!timespec_valid_strict(tv))
496 write_seqlock_irqsave(&tk->lock, flags);
498 timekeeping_forward_now(tk);
501 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
502 ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
504 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta));
506 tk_set_xtime(tk, tv);
508 timekeeping_update(tk, true);
510 write_sequnlock_irqrestore(&tk->lock, flags);
512 /* signal hrtimers about time change */
517 EXPORT_SYMBOL(do_settimeofday);
520 * timekeeping_inject_offset - Adds or subtracts from the current time.
521 * @tv: pointer to the timespec variable containing the offset
523 * Adds or subtracts an offset value from the current time.
525 int timekeeping_inject_offset(struct timespec *ts)
527 struct timekeeper *tk = &timekeeper;
532 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
535 write_seqlock_irqsave(&tk->lock, flags);
537 timekeeping_forward_now(tk);
539 /* Make sure the proposed value is valid */
540 tmp = timespec_add(tk_xtime(tk), *ts);
541 if (!timespec_valid_strict(&tmp)) {
546 tk_xtime_add(tk, ts);
547 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
549 error: /* even if we error out, we forwarded the time, so call update */
550 timekeeping_update(tk, true);
552 write_sequnlock_irqrestore(&tk->lock, flags);
554 /* signal hrtimers about time change */
559 EXPORT_SYMBOL(timekeeping_inject_offset);
563 * timekeeping_get_tai_offset - Returns current TAI offset from UTC
566 s32 timekeeping_get_tai_offset(void)
568 struct timekeeper *tk = &timekeeper;
573 seq = read_seqbegin(&tk->lock);
574 ret = tk->tai_offset;
575 } while (read_seqretry(&tk->lock, seq));
581 * __timekeeping_set_tai_offset - Lock free worker function
584 void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
586 tk->tai_offset = tai_offset;
587 tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tai_offset, 0));
591 * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
594 void timekeeping_set_tai_offset(s32 tai_offset)
596 struct timekeeper *tk = &timekeeper;
599 write_seqlock_irqsave(&tk->lock, flags);
600 __timekeeping_set_tai_offset(tk, tai_offset);
601 write_sequnlock_irqrestore(&tk->lock, flags);
605 * change_clocksource - Swaps clocksources if a new one is available
607 * Accumulates current time interval and initializes new clocksource
609 static int change_clocksource(void *data)
611 struct timekeeper *tk = &timekeeper;
612 struct clocksource *new, *old;
615 new = (struct clocksource *) data;
617 write_seqlock_irqsave(&tk->lock, flags);
619 timekeeping_forward_now(tk);
620 if (!new->enable || new->enable(new) == 0) {
622 tk_setup_internals(tk, new);
626 timekeeping_update(tk, true);
628 write_sequnlock_irqrestore(&tk->lock, flags);
634 * timekeeping_notify - Install a new clock source
635 * @clock: pointer to the clock source
637 * This function is called from clocksource.c after a new, better clock
638 * source has been registered. The caller holds the clocksource_mutex.
640 void timekeeping_notify(struct clocksource *clock)
642 struct timekeeper *tk = &timekeeper;
644 if (tk->clock == clock)
646 stop_machine(change_clocksource, clock, NULL);
651 * ktime_get_real - get the real (wall-) time in ktime_t format
653 * returns the time in ktime_t format
655 ktime_t ktime_get_real(void)
659 getnstimeofday(&now);
661 return timespec_to_ktime(now);
663 EXPORT_SYMBOL_GPL(ktime_get_real);
666 * getrawmonotonic - Returns the raw monotonic time in a timespec
667 * @ts: pointer to the timespec to be set
669 * Returns the raw monotonic time (completely un-modified by ntp)
671 void getrawmonotonic(struct timespec *ts)
673 struct timekeeper *tk = &timekeeper;
678 seq = read_seqbegin(&tk->lock);
679 nsecs = timekeeping_get_ns_raw(tk);
682 } while (read_seqretry(&tk->lock, seq));
684 timespec_add_ns(ts, nsecs);
686 EXPORT_SYMBOL(getrawmonotonic);
689 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
691 int timekeeping_valid_for_hres(void)
693 struct timekeeper *tk = &timekeeper;
698 seq = read_seqbegin(&tk->lock);
700 ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
702 } while (read_seqretry(&tk->lock, seq));
708 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
710 u64 timekeeping_max_deferment(void)
712 struct timekeeper *tk = &timekeeper;
717 seq = read_seqbegin(&tk->lock);
719 ret = tk->clock->max_idle_ns;
721 } while (read_seqretry(&tk->lock, seq));
727 * read_persistent_clock - Return time from the persistent clock.
729 * Weak dummy function for arches that do not yet support it.
730 * Reads the time from the battery backed persistent clock.
731 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
733 * XXX - Do be sure to remove it once all arches implement it.
735 void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
742 * read_boot_clock - Return time of the system start.
744 * Weak dummy function for arches that do not yet support it.
745 * Function to read the exact time the system has been started.
746 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
748 * XXX - Do be sure to remove it once all arches implement it.
750 void __attribute__((weak)) read_boot_clock(struct timespec *ts)
757 * timekeeping_init - Initializes the clocksource and common timekeeping values
759 void __init timekeeping_init(void)
761 struct timekeeper *tk = &timekeeper;
762 struct clocksource *clock;
764 struct timespec now, boot, tmp;
766 read_persistent_clock(&now);
768 if (!timespec_valid_strict(&now)) {
769 pr_warn("WARNING: Persistent clock returned invalid value!\n"
770 " Check your CMOS/BIOS settings.\n");
773 } else if (now.tv_sec || now.tv_nsec)
774 persistent_clock_exist = true;
776 read_boot_clock(&boot);
777 if (!timespec_valid_strict(&boot)) {
778 pr_warn("WARNING: Boot clock returned invalid value!\n"
779 " Check your CMOS/BIOS settings.\n");
784 seqlock_init(&tk->lock);
788 write_seqlock_irqsave(&tk->lock, flags);
789 clock = clocksource_default_clock();
791 clock->enable(clock);
792 tk_setup_internals(tk, clock);
794 tk_set_xtime(tk, &now);
795 tk->raw_time.tv_sec = 0;
796 tk->raw_time.tv_nsec = 0;
797 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
800 set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
801 tk_set_wall_to_mono(tk, tmp);
805 tk_set_sleep_time(tk, tmp);
807 write_sequnlock_irqrestore(&tk->lock, flags);
810 /* time in seconds when suspend began */
811 static struct timespec timekeeping_suspend_time;
814 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
815 * @delta: pointer to a timespec delta value
817 * Takes a timespec offset measuring a suspend interval and properly
818 * adds the sleep offset to the timekeeping variables.
820 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
821 struct timespec *delta)
823 if (!timespec_valid_strict(delta)) {
824 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
825 "sleep delta value!\n");
828 tk_xtime_add(tk, delta);
829 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta));
830 tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta));
834 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
835 * @delta: pointer to a timespec delta value
837 * This hook is for architectures that cannot support read_persistent_clock
838 * because their RTC/persistent clock is only accessible when irqs are enabled.
840 * This function should only be called by rtc_resume(), and allows
841 * a suspend offset to be injected into the timekeeping values.
843 void timekeeping_inject_sleeptime(struct timespec *delta)
845 struct timekeeper *tk = &timekeeper;
849 * Make sure we don't set the clock twice, as timekeeping_resume()
852 if (has_persistent_clock())
855 write_seqlock_irqsave(&tk->lock, flags);
857 timekeeping_forward_now(tk);
859 __timekeeping_inject_sleeptime(tk, delta);
861 timekeeping_update(tk, true);
863 write_sequnlock_irqrestore(&tk->lock, flags);
865 /* signal hrtimers about time change */
870 * timekeeping_resume - Resumes the generic timekeeping subsystem.
872 * This is for the generic clocksource timekeeping.
873 * xtime/wall_to_monotonic/jiffies/etc are
874 * still managed by arch specific suspend/resume code.
876 static void timekeeping_resume(void)
878 struct timekeeper *tk = &timekeeper;
879 struct clocksource *clock = tk->clock;
881 struct timespec ts_new, ts_delta;
882 cycle_t cycle_now, cycle_delta;
883 bool suspendtime_found = false;
885 read_persistent_clock(&ts_new);
887 clockevents_resume();
888 clocksource_resume();
890 write_seqlock_irqsave(&tk->lock, flags);
893 * After system resumes, we need to calculate the suspended time and
894 * compensate it for the OS time. There are 3 sources that could be
895 * used: Nonstop clocksource during suspend, persistent clock and rtc
898 * One specific platform may have 1 or 2 or all of them, and the
899 * preference will be:
900 * suspend-nonstop clocksource -> persistent clock -> rtc
901 * The less preferred source will only be tried if there is no better
902 * usable source. The rtc part is handled separately in rtc core code.
904 cycle_now = clock->read(clock);
905 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
906 cycle_now > clock->cycle_last) {
907 u64 num, max = ULLONG_MAX;
908 u32 mult = clock->mult;
909 u32 shift = clock->shift;
912 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
915 * "cycle_delta * mutl" may cause 64 bits overflow, if the
916 * suspended time is too long. In that case we need do the
917 * 64 bits math carefully
920 if (cycle_delta > max) {
921 num = div64_u64(cycle_delta, max);
922 nsec = (((u64) max * mult) >> shift) * num;
923 cycle_delta -= num * max;
925 nsec += ((u64) cycle_delta * mult) >> shift;
927 ts_delta = ns_to_timespec(nsec);
928 suspendtime_found = true;
929 } else if (timespec_compare(&ts_new, &timekeeping_suspend_time) > 0) {
930 ts_delta = timespec_sub(ts_new, timekeeping_suspend_time);
931 suspendtime_found = true;
934 if (suspendtime_found)
935 __timekeeping_inject_sleeptime(tk, &ts_delta);
937 /* Re-base the last cycle value */
938 clock->cycle_last = cycle_now;
940 timekeeping_suspended = 0;
941 timekeeping_update(tk, false);
942 write_sequnlock_irqrestore(&tk->lock, flags);
944 touch_softlockup_watchdog();
946 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
948 /* Resume hrtimers */
952 static int timekeeping_suspend(void)
954 struct timekeeper *tk = &timekeeper;
956 struct timespec delta, delta_delta;
957 static struct timespec old_delta;
959 read_persistent_clock(&timekeeping_suspend_time);
961 write_seqlock_irqsave(&tk->lock, flags);
962 timekeeping_forward_now(tk);
963 timekeeping_suspended = 1;
966 * To avoid drift caused by repeated suspend/resumes,
967 * which each can add ~1 second drift error,
968 * try to compensate so the difference in system time
969 * and persistent_clock time stays close to constant.
971 delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time);
972 delta_delta = timespec_sub(delta, old_delta);
973 if (abs(delta_delta.tv_sec) >= 2) {
975 * if delta_delta is too large, assume time correction
976 * has occured and set old_delta to the current delta.
980 /* Otherwise try to adjust old_system to compensate */
981 timekeeping_suspend_time =
982 timespec_add(timekeeping_suspend_time, delta_delta);
984 write_sequnlock_irqrestore(&tk->lock, flags);
986 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
987 clocksource_suspend();
988 clockevents_suspend();
993 /* sysfs resume/suspend bits for timekeeping */
994 static struct syscore_ops timekeeping_syscore_ops = {
995 .resume = timekeeping_resume,
996 .suspend = timekeeping_suspend,
999 static int __init timekeeping_init_ops(void)
1001 register_syscore_ops(&timekeeping_syscore_ops);
1005 device_initcall(timekeeping_init_ops);
1008 * If the error is already larger, we look ahead even further
1009 * to compensate for late or lost adjustments.
1011 static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
1012 s64 error, s64 *interval,
1016 u32 look_ahead, adj;
1020 * Use the current error value to determine how much to look ahead.
1021 * The larger the error the slower we adjust for it to avoid problems
1022 * with losing too many ticks, otherwise we would overadjust and
1023 * produce an even larger error. The smaller the adjustment the
1024 * faster we try to adjust for it, as lost ticks can do less harm
1025 * here. This is tuned so that an error of about 1 msec is adjusted
1026 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
1028 error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
1029 error2 = abs(error2);
1030 for (look_ahead = 0; error2 > 0; look_ahead++)
1034 * Now calculate the error in (1 << look_ahead) ticks, but first
1035 * remove the single look ahead already included in the error.
1037 tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
1038 tick_error -= tk->xtime_interval >> 1;
1039 error = ((error - tick_error) >> look_ahead) + tick_error;
1041 /* Finally calculate the adjustment shift value. */
1046 *interval = -*interval;
1050 for (adj = 0; error > i; adj++)
1059 * Adjust the multiplier to reduce the error value,
1060 * this is optimized for the most common adjustments of -1,0,1,
1061 * for other values we can do a bit more work.
1063 static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1065 s64 error, interval = tk->cycle_interval;
1069 * The point of this is to check if the error is greater than half
1072 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
1074 * Note we subtract one in the shift, so that error is really error*2.
1075 * This "saves" dividing(shifting) interval twice, but keeps the
1076 * (error > interval) comparison as still measuring if error is
1077 * larger than half an interval.
1079 * Note: It does not "save" on aggravation when reading the code.
1081 error = tk->ntp_error >> (tk->ntp_error_shift - 1);
1082 if (error > interval) {
1084 * We now divide error by 4(via shift), which checks if
1085 * the error is greater than twice the interval.
1086 * If it is greater, we need a bigadjust, if its smaller,
1087 * we can adjust by 1.
1091 * XXX - In update_wall_time, we round up to the next
1092 * nanosecond, and store the amount rounded up into
1093 * the error. This causes the likely below to be unlikely.
1095 * The proper fix is to avoid rounding up by using
1096 * the high precision tk->xtime_nsec instead of
1097 * xtime.tv_nsec everywhere. Fixing this will take some
1100 if (likely(error <= interval))
1103 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
1105 if (error < -interval) {
1106 /* See comment above, this is just switched for the negative */
1108 if (likely(error >= -interval)) {
1110 interval = -interval;
1113 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
1120 if (unlikely(tk->clock->maxadj &&
1121 (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
1122 printk_once(KERN_WARNING
1123 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1124 tk->clock->name, (long)tk->mult + adj,
1125 (long)tk->clock->mult + tk->clock->maxadj);
1128 * So the following can be confusing.
1130 * To keep things simple, lets assume adj == 1 for now.
1132 * When adj != 1, remember that the interval and offset values
1133 * have been appropriately scaled so the math is the same.
1135 * The basic idea here is that we're increasing the multiplier
1136 * by one, this causes the xtime_interval to be incremented by
1137 * one cycle_interval. This is because:
1138 * xtime_interval = cycle_interval * mult
1139 * So if mult is being incremented by one:
1140 * xtime_interval = cycle_interval * (mult + 1)
1142 * xtime_interval = (cycle_interval * mult) + cycle_interval
1143 * Which can be shortened to:
1144 * xtime_interval += cycle_interval
1146 * So offset stores the non-accumulated cycles. Thus the current
1147 * time (in shifted nanoseconds) is:
1148 * now = (offset * adj) + xtime_nsec
1149 * Now, even though we're adjusting the clock frequency, we have
1150 * to keep time consistent. In other words, we can't jump back
1151 * in time, and we also want to avoid jumping forward in time.
1153 * So given the same offset value, we need the time to be the same
1154 * both before and after the freq adjustment.
1155 * now = (offset * adj_1) + xtime_nsec_1
1156 * now = (offset * adj_2) + xtime_nsec_2
1158 * (offset * adj_1) + xtime_nsec_1 =
1159 * (offset * adj_2) + xtime_nsec_2
1163 * (offset * adj_1) + xtime_nsec_1 =
1164 * (offset * (adj_1+1)) + xtime_nsec_2
1165 * (offset * adj_1) + xtime_nsec_1 =
1166 * (offset * adj_1) + offset + xtime_nsec_2
1167 * Canceling the sides:
1168 * xtime_nsec_1 = offset + xtime_nsec_2
1170 * xtime_nsec_2 = xtime_nsec_1 - offset
1171 * Which simplfies to:
1172 * xtime_nsec -= offset
1174 * XXX - TODO: Doc ntp_error calculation.
1177 tk->xtime_interval += interval;
1178 tk->xtime_nsec -= offset;
1179 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1183 * It may be possible that when we entered this function, xtime_nsec
1184 * was very small. Further, if we're slightly speeding the clocksource
1185 * in the code above, its possible the required corrective factor to
1186 * xtime_nsec could cause it to underflow.
1188 * Now, since we already accumulated the second, cannot simply roll
1189 * the accumulated second back, since the NTP subsystem has been
1190 * notified via second_overflow. So instead we push xtime_nsec forward
1191 * by the amount we underflowed, and add that amount into the error.
1193 * We'll correct this error next time through this function, when
1194 * xtime_nsec is not as small.
1196 if (unlikely((s64)tk->xtime_nsec < 0)) {
1197 s64 neg = -(s64)tk->xtime_nsec;
1199 tk->ntp_error += neg << tk->ntp_error_shift;
1205 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1207 * Helper function that accumulates a the nsecs greater then a second
1208 * from the xtime_nsec field to the xtime_secs field.
1209 * It also calls into the NTP code to handle leapsecond processing.
1212 static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
1214 u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
1216 while (tk->xtime_nsec >= nsecps) {
1219 tk->xtime_nsec -= nsecps;
1222 /* Figure out if its a leap sec and apply if needed */
1223 leap = second_overflow(tk->xtime_sec);
1224 if (unlikely(leap)) {
1227 tk->xtime_sec += leap;
1231 tk_set_wall_to_mono(tk,
1232 timespec_sub(tk->wall_to_monotonic, ts));
1234 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1236 clock_was_set_delayed();
1242 * logarithmic_accumulation - shifted accumulation of cycles
1244 * This functions accumulates a shifted interval of cycles into
1245 * into a shifted interval nanoseconds. Allows for O(log) accumulation
1248 * Returns the unconsumed cycles.
1250 static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1253 cycle_t interval = tk->cycle_interval << shift;
1256 /* If the offset is smaller then a shifted interval, do nothing */
1257 if (offset < interval)
1260 /* Accumulate one shifted interval */
1262 tk->clock->cycle_last += interval;
1264 tk->xtime_nsec += tk->xtime_interval << shift;
1265 accumulate_nsecs_to_secs(tk);
1267 /* Accumulate raw time */
1268 raw_nsecs = (u64)tk->raw_interval << shift;
1269 raw_nsecs += tk->raw_time.tv_nsec;
1270 if (raw_nsecs >= NSEC_PER_SEC) {
1271 u64 raw_secs = raw_nsecs;
1272 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1273 tk->raw_time.tv_sec += raw_secs;
1275 tk->raw_time.tv_nsec = raw_nsecs;
1277 /* Accumulate error between NTP and clock interval */
1278 tk->ntp_error += ntp_tick_length() << shift;
1279 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1280 (tk->ntp_error_shift + shift);
1285 #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
1286 static inline void old_vsyscall_fixup(struct timekeeper *tk)
1291 * Store only full nanoseconds into xtime_nsec after rounding
1292 * it up and add the remainder to the error difference.
1293 * XXX - This is necessary to avoid small 1ns inconsistnecies caused
1294 * by truncating the remainder in vsyscalls. However, it causes
1295 * additional work to be done in timekeeping_adjust(). Once
1296 * the vsyscall implementations are converted to use xtime_nsec
1297 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
1298 * users are removed, this can be killed.
1300 remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
1301 tk->xtime_nsec -= remainder;
1302 tk->xtime_nsec += 1ULL << tk->shift;
1303 tk->ntp_error += remainder << tk->ntp_error_shift;
1307 #define old_vsyscall_fixup(tk)
1313 * update_wall_time - Uses the current clocksource to increment the wall time
1316 static void update_wall_time(void)
1318 struct clocksource *clock;
1319 struct timekeeper *tk = &timekeeper;
1321 int shift = 0, maxshift;
1322 unsigned long flags;
1324 write_seqlock_irqsave(&tk->lock, flags);
1326 /* Make sure we're fully resumed: */
1327 if (unlikely(timekeeping_suspended))
1332 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1333 offset = tk->cycle_interval;
1335 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
1338 /* Check if there's really nothing to do */
1339 if (offset < tk->cycle_interval)
1343 * With NO_HZ we may have to accumulate many cycle_intervals
1344 * (think "ticks") worth of time at once. To do this efficiently,
1345 * we calculate the largest doubling multiple of cycle_intervals
1346 * that is smaller than the offset. We then accumulate that
1347 * chunk in one go, and then try to consume the next smaller
1350 shift = ilog2(offset) - ilog2(tk->cycle_interval);
1351 shift = max(0, shift);
1352 /* Bound shift to one less than what overflows tick_length */
1353 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1354 shift = min(shift, maxshift);
1355 while (offset >= tk->cycle_interval) {
1356 offset = logarithmic_accumulation(tk, offset, shift);
1357 if (offset < tk->cycle_interval<<shift)
1361 /* correct the clock when NTP error is too big */
1362 timekeeping_adjust(tk, offset);
1365 * XXX This can be killed once everyone converts
1366 * to the new update_vsyscall.
1368 old_vsyscall_fixup(tk);
1371 * Finally, make sure that after the rounding
1372 * xtime_nsec isn't larger than NSEC_PER_SEC
1374 accumulate_nsecs_to_secs(tk);
1376 timekeeping_update(tk, false);
1379 write_sequnlock_irqrestore(&tk->lock, flags);
1384 * getboottime - Return the real time of system boot.
1385 * @ts: pointer to the timespec to be set
1387 * Returns the wall-time of boot in a timespec.
1389 * This is based on the wall_to_monotonic offset and the total suspend
1390 * time. Calls to settimeofday will affect the value returned (which
1391 * basically means that however wrong your real time clock is at boot time,
1392 * you get the right time here).
1394 void getboottime(struct timespec *ts)
1396 struct timekeeper *tk = &timekeeper;
1397 struct timespec boottime = {
1398 .tv_sec = tk->wall_to_monotonic.tv_sec +
1399 tk->total_sleep_time.tv_sec,
1400 .tv_nsec = tk->wall_to_monotonic.tv_nsec +
1401 tk->total_sleep_time.tv_nsec
1404 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
1406 EXPORT_SYMBOL_GPL(getboottime);
1409 * get_monotonic_boottime - Returns monotonic time since boot
1410 * @ts: pointer to the timespec to be set
1412 * Returns the monotonic time since boot in a timespec.
1414 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
1415 * includes the time spent in suspend.
1417 void get_monotonic_boottime(struct timespec *ts)
1419 struct timekeeper *tk = &timekeeper;
1420 struct timespec tomono, sleep;
1424 WARN_ON(timekeeping_suspended);
1427 seq = read_seqbegin(&tk->lock);
1428 ts->tv_sec = tk->xtime_sec;
1429 nsec = timekeeping_get_ns(tk);
1430 tomono = tk->wall_to_monotonic;
1431 sleep = tk->total_sleep_time;
1433 } while (read_seqretry(&tk->lock, seq));
1435 ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
1437 timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec);
1439 EXPORT_SYMBOL_GPL(get_monotonic_boottime);
1442 * ktime_get_boottime - Returns monotonic time since boot in a ktime
1444 * Returns the monotonic time since boot in a ktime
1446 * This is similar to CLOCK_MONTONIC/ktime_get, but also
1447 * includes the time spent in suspend.
1449 ktime_t ktime_get_boottime(void)
1453 get_monotonic_boottime(&ts);
1454 return timespec_to_ktime(ts);
1456 EXPORT_SYMBOL_GPL(ktime_get_boottime);
1459 * monotonic_to_bootbased - Convert the monotonic time to boot based.
1460 * @ts: pointer to the timespec to be converted
1462 void monotonic_to_bootbased(struct timespec *ts)
1464 struct timekeeper *tk = &timekeeper;
1466 *ts = timespec_add(*ts, tk->total_sleep_time);
1468 EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
1470 unsigned long get_seconds(void)
1472 struct timekeeper *tk = &timekeeper;
1474 return tk->xtime_sec;
1476 EXPORT_SYMBOL(get_seconds);
1478 struct timespec __current_kernel_time(void)
1480 struct timekeeper *tk = &timekeeper;
1482 return tk_xtime(tk);
1485 struct timespec current_kernel_time(void)
1487 struct timekeeper *tk = &timekeeper;
1488 struct timespec now;
1492 seq = read_seqbegin(&tk->lock);
1495 } while (read_seqretry(&tk->lock, seq));
1499 EXPORT_SYMBOL(current_kernel_time);
1501 struct timespec get_monotonic_coarse(void)
1503 struct timekeeper *tk = &timekeeper;
1504 struct timespec now, mono;
1508 seq = read_seqbegin(&tk->lock);
1511 mono = tk->wall_to_monotonic;
1512 } while (read_seqretry(&tk->lock, seq));
1514 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
1515 now.tv_nsec + mono.tv_nsec);
1520 * Must hold jiffies_lock
1522 void do_timer(unsigned long ticks)
1524 jiffies_64 += ticks;
1526 calc_global_load(ticks);
1530 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
1531 * and sleep offsets.
1532 * @xtim: pointer to timespec to be set with xtime
1533 * @wtom: pointer to timespec to be set with wall_to_monotonic
1534 * @sleep: pointer to timespec to be set with time in suspend
1536 void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1537 struct timespec *wtom, struct timespec *sleep)
1539 struct timekeeper *tk = &timekeeper;
1543 seq = read_seqbegin(&tk->lock);
1544 *xtim = tk_xtime(tk);
1545 *wtom = tk->wall_to_monotonic;
1546 *sleep = tk->total_sleep_time;
1547 } while (read_seqretry(&tk->lock, seq));
1550 #ifdef CONFIG_HIGH_RES_TIMERS
1552 * ktime_get_update_offsets - hrtimer helper
1553 * @offs_real: pointer to storage for monotonic -> realtime offset
1554 * @offs_boot: pointer to storage for monotonic -> boottime offset
1556 * Returns current monotonic time and updates the offsets
1557 * Called from hrtimer_interupt() or retrigger_next_event()
1559 ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,
1562 struct timekeeper *tk = &timekeeper;
1568 seq = read_seqbegin(&tk->lock);
1570 secs = tk->xtime_sec;
1571 nsecs = timekeeping_get_ns(tk);
1573 *offs_real = tk->offs_real;
1574 *offs_boot = tk->offs_boot;
1575 *offs_tai = tk->offs_tai;
1576 } while (read_seqretry(&tk->lock, seq));
1578 now = ktime_add_ns(ktime_set(secs, 0), nsecs);
1579 now = ktime_sub(now, *offs_real);
1585 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1587 ktime_t ktime_get_monotonic_offset(void)
1589 struct timekeeper *tk = &timekeeper;
1591 struct timespec wtom;
1594 seq = read_seqbegin(&tk->lock);
1595 wtom = tk->wall_to_monotonic;
1596 } while (read_seqretry(&tk->lock, seq));
1598 return timespec_to_ktime(wtom);
1600 EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
1603 * xtime_update() - advances the timekeeping infrastructure
1604 * @ticks: number of ticks, that have elapsed since the last call.
1606 * Must be called with interrupts disabled.
1608 void xtime_update(unsigned long ticks)
1610 write_seqlock(&jiffies_lock);
1612 write_sequnlock(&jiffies_lock);