2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/percpu.h>
14 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/syscore_ops.h>
18 #include <linux/clocksource.h>
19 #include <linux/jiffies.h>
20 #include <linux/time.h>
21 #include <linux/tick.h>
22 #include <linux/stop_machine.h>
24 /* Structure holding internal timekeeping values. */
26 /* Current clocksource used for timekeeping. */
27 struct clocksource *clock;
28 /* The shift value of the current clocksource. */
31 /* Number of clock cycles in one NTP interval. */
32 cycle_t cycle_interval;
33 /* Number of clock shifted nano seconds in one NTP interval. */
35 /* shifted nano seconds left over when rounding cycle_interval */
37 /* Raw nano seconds accumulated per NTP interval. */
40 /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */
42 /* Difference between accumulated time and NTP time in ntp
43 * shifted nano seconds. */
45 /* Shift conversion between clock shifted nano seconds and
46 * ntp shifted nano seconds. */
48 /* NTP adjusted clock multiplier */
52 static struct timekeeper timekeeper;
55 * timekeeper_setup_internals - Set up internals to use clocksource clock.
57 * @clock: Pointer to clocksource.
59 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
60 * pair and interval request.
62 * Unless you're the timekeeping code, you should not be using this!
64 static void timekeeper_setup_internals(struct clocksource *clock)
69 timekeeper.clock = clock;
70 clock->cycle_last = clock->read(clock);
72 /* Do the ns -> cycle conversion first, using original mult */
73 tmp = NTP_INTERVAL_LENGTH;
77 do_div(tmp, clock->mult);
81 interval = (cycle_t) tmp;
82 timekeeper.cycle_interval = interval;
84 /* Go back from cycles -> shifted ns */
85 timekeeper.xtime_interval = (u64) interval * clock->mult;
86 timekeeper.xtime_remainder = ntpinterval - timekeeper.xtime_interval;
87 timekeeper.raw_interval =
88 ((u64) interval * clock->mult) >> clock->shift;
90 timekeeper.xtime_nsec = 0;
91 timekeeper.shift = clock->shift;
93 timekeeper.ntp_error = 0;
94 timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
97 * The timekeeper keeps its own mult values for the currently
98 * active clocksource. These value will be adjusted via NTP
99 * to counteract clock drifting.
101 timekeeper.mult = clock->mult;
104 /* Timekeeper helper functions. */
105 static inline s64 timekeeping_get_ns(void)
107 cycle_t cycle_now, cycle_delta;
108 struct clocksource *clock;
110 /* read clocksource: */
111 clock = timekeeper.clock;
112 cycle_now = clock->read(clock);
114 /* calculate the delta since the last update_wall_time: */
115 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
117 /* return delta convert to nanoseconds using ntp adjusted mult. */
118 return clocksource_cyc2ns(cycle_delta, timekeeper.mult,
122 static inline s64 timekeeping_get_ns_raw(void)
124 cycle_t cycle_now, cycle_delta;
125 struct clocksource *clock;
127 /* read clocksource: */
128 clock = timekeeper.clock;
129 cycle_now = clock->read(clock);
131 /* calculate the delta since the last update_wall_time: */
132 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
134 /* return delta convert to nanoseconds using ntp adjusted mult. */
135 return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
139 * This read-write spinlock protects us from races in SMP while
140 * playing with xtime.
142 __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
147 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
148 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
149 * at zero at system boot time, so wall_to_monotonic will be negative,
150 * however, we will ALWAYS keep the tv_nsec part positive so we can use
151 * the usual normalization.
153 * wall_to_monotonic is moved after resume from suspend for the monotonic
154 * time not to jump. We need to add total_sleep_time to wall_to_monotonic
155 * to get the real boot based time offset.
157 * - wall_to_monotonic is no longer the boot time, getboottime must be
160 static struct timespec xtime __attribute__ ((aligned (16)));
161 static struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
162 static struct timespec total_sleep_time;
165 * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
167 static struct timespec raw_time;
169 /* flag for if timekeeping is suspended */
170 int __read_mostly timekeeping_suspended;
173 * timekeeping_forward_now - update clock to the current time
175 * Forward the current clock to update its state since the last call to
176 * update_wall_time(). This is useful before significant clock changes,
177 * as it avoids having to deal with this time offset explicitly.
179 static void timekeeping_forward_now(void)
181 cycle_t cycle_now, cycle_delta;
182 struct clocksource *clock;
185 clock = timekeeper.clock;
186 cycle_now = clock->read(clock);
187 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
188 clock->cycle_last = cycle_now;
190 nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult,
193 /* If arch requires, add in gettimeoffset() */
194 nsec += arch_gettimeoffset();
196 timespec_add_ns(&xtime, nsec);
198 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
199 timespec_add_ns(&raw_time, nsec);
203 * getnstimeofday - Returns the time of day in a timespec
204 * @ts: pointer to the timespec to be set
206 * Returns the time of day in a timespec.
208 void getnstimeofday(struct timespec *ts)
213 WARN_ON(timekeeping_suspended);
216 seq = read_seqbegin(&xtime_lock);
219 nsecs = timekeeping_get_ns();
221 /* If arch requires, add in gettimeoffset() */
222 nsecs += arch_gettimeoffset();
224 } while (read_seqretry(&xtime_lock, seq));
226 timespec_add_ns(ts, nsecs);
229 EXPORT_SYMBOL(getnstimeofday);
231 ktime_t ktime_get(void)
236 WARN_ON(timekeeping_suspended);
239 seq = read_seqbegin(&xtime_lock);
240 secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
241 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
242 nsecs += timekeeping_get_ns();
243 /* If arch requires, add in gettimeoffset() */
244 nsecs += arch_gettimeoffset();
246 } while (read_seqretry(&xtime_lock, seq));
248 * Use ktime_set/ktime_add_ns to create a proper ktime on
249 * 32-bit architectures without CONFIG_KTIME_SCALAR.
251 return ktime_add_ns(ktime_set(secs, 0), nsecs);
253 EXPORT_SYMBOL_GPL(ktime_get);
256 * ktime_get_ts - get the monotonic clock in timespec format
257 * @ts: pointer to timespec variable
259 * The function calculates the monotonic clock from the realtime
260 * clock and the wall_to_monotonic offset and stores the result
261 * in normalized timespec format in the variable pointed to by @ts.
263 void ktime_get_ts(struct timespec *ts)
265 struct timespec tomono;
269 WARN_ON(timekeeping_suspended);
272 seq = read_seqbegin(&xtime_lock);
274 tomono = wall_to_monotonic;
275 nsecs = timekeeping_get_ns();
276 /* If arch requires, add in gettimeoffset() */
277 nsecs += arch_gettimeoffset();
279 } while (read_seqretry(&xtime_lock, seq));
281 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
282 ts->tv_nsec + tomono.tv_nsec + nsecs);
284 EXPORT_SYMBOL_GPL(ktime_get_ts);
286 #ifdef CONFIG_NTP_PPS
289 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
290 * @ts_raw: pointer to the timespec to be set to raw monotonic time
291 * @ts_real: pointer to the timespec to be set to the time of day
293 * This function reads both the time of day and raw monotonic time at the
294 * same time atomically and stores the resulting timestamps in timespec
297 void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
300 s64 nsecs_raw, nsecs_real;
302 WARN_ON_ONCE(timekeeping_suspended);
307 seq = read_seqbegin(&xtime_lock);
312 nsecs_raw = timekeeping_get_ns_raw();
313 nsecs_real = timekeeping_get_ns();
315 /* If arch requires, add in gettimeoffset() */
316 arch_offset = arch_gettimeoffset();
317 nsecs_raw += arch_offset;
318 nsecs_real += arch_offset;
320 } while (read_seqretry(&xtime_lock, seq));
322 timespec_add_ns(ts_raw, nsecs_raw);
323 timespec_add_ns(ts_real, nsecs_real);
325 EXPORT_SYMBOL(getnstime_raw_and_real);
327 #endif /* CONFIG_NTP_PPS */
330 * do_gettimeofday - Returns the time of day in a timeval
331 * @tv: pointer to the timeval to be set
333 * NOTE: Users should be converted to using getnstimeofday()
335 void do_gettimeofday(struct timeval *tv)
339 getnstimeofday(&now);
340 tv->tv_sec = now.tv_sec;
341 tv->tv_usec = now.tv_nsec/1000;
344 EXPORT_SYMBOL(do_gettimeofday);
346 * do_settimeofday - Sets the time of day
347 * @tv: pointer to the timespec variable containing the new time
349 * Sets the time of day to the new time and update NTP and notify hrtimers
351 int do_settimeofday(const struct timespec *tv)
353 struct timespec ts_delta;
356 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
359 write_seqlock_irqsave(&xtime_lock, flags);
361 timekeeping_forward_now();
363 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
364 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
365 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
369 timekeeper.ntp_error = 0;
372 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
375 write_sequnlock_irqrestore(&xtime_lock, flags);
377 /* signal hrtimers about time change */
383 EXPORT_SYMBOL(do_settimeofday);
387 * timekeeping_inject_offset - Adds or subtracts from the current time.
388 * @tv: pointer to the timespec variable containing the offset
390 * Adds or subtracts an offset value from the current time.
392 int timekeeping_inject_offset(struct timespec *ts)
396 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
399 write_seqlock_irqsave(&xtime_lock, flags);
401 timekeeping_forward_now();
403 xtime = timespec_add(xtime, *ts);
404 wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts);
406 timekeeper.ntp_error = 0;
409 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
412 write_sequnlock_irqrestore(&xtime_lock, flags);
414 /* signal hrtimers about time change */
419 EXPORT_SYMBOL(timekeeping_inject_offset);
422 * change_clocksource - Swaps clocksources if a new one is available
424 * Accumulates current time interval and initializes new clocksource
426 static int change_clocksource(void *data)
428 struct clocksource *new, *old;
430 new = (struct clocksource *) data;
432 timekeeping_forward_now();
433 if (!new->enable || new->enable(new) == 0) {
434 old = timekeeper.clock;
435 timekeeper_setup_internals(new);
443 * timekeeping_notify - Install a new clock source
444 * @clock: pointer to the clock source
446 * This function is called from clocksource.c after a new, better clock
447 * source has been registered. The caller holds the clocksource_mutex.
449 void timekeeping_notify(struct clocksource *clock)
451 if (timekeeper.clock == clock)
453 stop_machine(change_clocksource, clock, NULL);
458 * ktime_get_real - get the real (wall-) time in ktime_t format
460 * returns the time in ktime_t format
462 ktime_t ktime_get_real(void)
466 getnstimeofday(&now);
468 return timespec_to_ktime(now);
470 EXPORT_SYMBOL_GPL(ktime_get_real);
473 * getrawmonotonic - Returns the raw monotonic time in a timespec
474 * @ts: pointer to the timespec to be set
476 * Returns the raw monotonic time (completely un-modified by ntp)
478 void getrawmonotonic(struct timespec *ts)
484 seq = read_seqbegin(&xtime_lock);
485 nsecs = timekeeping_get_ns_raw();
488 } while (read_seqretry(&xtime_lock, seq));
490 timespec_add_ns(ts, nsecs);
492 EXPORT_SYMBOL(getrawmonotonic);
496 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
498 int timekeeping_valid_for_hres(void)
504 seq = read_seqbegin(&xtime_lock);
506 ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
508 } while (read_seqretry(&xtime_lock, seq));
514 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
516 * Caller must observe xtime_lock via read_seqbegin/read_seqretry to
517 * ensure that the clocksource does not change!
519 u64 timekeeping_max_deferment(void)
521 return timekeeper.clock->max_idle_ns;
525 * read_persistent_clock - Return time from the persistent clock.
527 * Weak dummy function for arches that do not yet support it.
528 * Reads the time from the battery backed persistent clock.
529 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
531 * XXX - Do be sure to remove it once all arches implement it.
533 void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
540 * read_boot_clock - Return time of the system start.
542 * Weak dummy function for arches that do not yet support it.
543 * Function to read the exact time the system has been started.
544 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
546 * XXX - Do be sure to remove it once all arches implement it.
548 void __attribute__((weak)) read_boot_clock(struct timespec *ts)
555 * timekeeping_init - Initializes the clocksource and common timekeeping values
557 void __init timekeeping_init(void)
559 struct clocksource *clock;
561 struct timespec now, boot;
563 read_persistent_clock(&now);
564 read_boot_clock(&boot);
566 write_seqlock_irqsave(&xtime_lock, flags);
570 clock = clocksource_default_clock();
572 clock->enable(clock);
573 timekeeper_setup_internals(clock);
575 xtime.tv_sec = now.tv_sec;
576 xtime.tv_nsec = now.tv_nsec;
578 raw_time.tv_nsec = 0;
579 if (boot.tv_sec == 0 && boot.tv_nsec == 0) {
580 boot.tv_sec = xtime.tv_sec;
581 boot.tv_nsec = xtime.tv_nsec;
583 set_normalized_timespec(&wall_to_monotonic,
584 -boot.tv_sec, -boot.tv_nsec);
585 total_sleep_time.tv_sec = 0;
586 total_sleep_time.tv_nsec = 0;
587 write_sequnlock_irqrestore(&xtime_lock, flags);
590 /* time in seconds when suspend began */
591 static struct timespec timekeeping_suspend_time;
594 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
595 * @delta: pointer to a timespec delta value
597 * Takes a timespec offset measuring a suspend interval and properly
598 * adds the sleep offset to the timekeeping variables.
600 static void __timekeeping_inject_sleeptime(struct timespec *delta)
602 if (!timespec_valid(delta)) {
603 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
604 "sleep delta value!\n");
608 xtime = timespec_add(xtime, *delta);
609 wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta);
610 total_sleep_time = timespec_add(total_sleep_time, *delta);
615 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
616 * @delta: pointer to a timespec delta value
618 * This hook is for architectures that cannot support read_persistent_clock
619 * because their RTC/persistent clock is only accessible when irqs are enabled.
621 * This function should only be called by rtc_resume(), and allows
622 * a suspend offset to be injected into the timekeeping values.
624 void timekeeping_inject_sleeptime(struct timespec *delta)
629 /* Make sure we don't set the clock twice */
630 read_persistent_clock(&ts);
631 if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
634 write_seqlock_irqsave(&xtime_lock, flags);
635 timekeeping_forward_now();
637 __timekeeping_inject_sleeptime(delta);
639 timekeeper.ntp_error = 0;
641 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
644 write_sequnlock_irqrestore(&xtime_lock, flags);
646 /* signal hrtimers about time change */
652 * timekeeping_resume - Resumes the generic timekeeping subsystem.
654 * This is for the generic clocksource timekeeping.
655 * xtime/wall_to_monotonic/jiffies/etc are
656 * still managed by arch specific suspend/resume code.
658 static void timekeeping_resume(void)
663 read_persistent_clock(&ts);
665 clocksource_resume();
667 write_seqlock_irqsave(&xtime_lock, flags);
669 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
670 ts = timespec_sub(ts, timekeeping_suspend_time);
671 __timekeeping_inject_sleeptime(&ts);
673 /* re-base the last cycle value */
674 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
675 timekeeper.ntp_error = 0;
676 timekeeping_suspended = 0;
677 write_sequnlock_irqrestore(&xtime_lock, flags);
679 touch_softlockup_watchdog();
681 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
683 /* Resume hrtimers */
687 static int timekeeping_suspend(void)
690 struct timespec delta, delta_delta;
691 static struct timespec old_delta;
693 read_persistent_clock(&timekeeping_suspend_time);
695 write_seqlock_irqsave(&xtime_lock, flags);
696 timekeeping_forward_now();
697 timekeeping_suspended = 1;
700 * To avoid drift caused by repeated suspend/resumes,
701 * which each can add ~1 second drift error,
702 * try to compensate so the difference in system time
703 * and persistent_clock time stays close to constant.
705 delta = timespec_sub(xtime, timekeeping_suspend_time);
706 delta_delta = timespec_sub(delta, old_delta);
707 if (abs(delta_delta.tv_sec) >= 2) {
709 * if delta_delta is too large, assume time correction
710 * has occured and set old_delta to the current delta.
714 /* Otherwise try to adjust old_system to compensate */
715 timekeeping_suspend_time =
716 timespec_add(timekeeping_suspend_time, delta_delta);
718 write_sequnlock_irqrestore(&xtime_lock, flags);
720 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
721 clocksource_suspend();
726 /* sysfs resume/suspend bits for timekeeping */
727 static struct syscore_ops timekeeping_syscore_ops = {
728 .resume = timekeeping_resume,
729 .suspend = timekeeping_suspend,
732 static int __init timekeeping_init_ops(void)
734 register_syscore_ops(&timekeeping_syscore_ops);
738 device_initcall(timekeeping_init_ops);
741 * If the error is already larger, we look ahead even further
742 * to compensate for late or lost adjustments.
744 static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
752 * Use the current error value to determine how much to look ahead.
753 * The larger the error the slower we adjust for it to avoid problems
754 * with losing too many ticks, otherwise we would overadjust and
755 * produce an even larger error. The smaller the adjustment the
756 * faster we try to adjust for it, as lost ticks can do less harm
757 * here. This is tuned so that an error of about 1 msec is adjusted
758 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
760 error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
761 error2 = abs(error2);
762 for (look_ahead = 0; error2 > 0; look_ahead++)
766 * Now calculate the error in (1 << look_ahead) ticks, but first
767 * remove the single look ahead already included in the error.
769 tick_error = tick_length >> (timekeeper.ntp_error_shift + 1);
770 tick_error -= timekeeper.xtime_interval >> 1;
771 error = ((error - tick_error) >> look_ahead) + tick_error;
773 /* Finally calculate the adjustment shift value. */
778 *interval = -*interval;
782 for (adj = 0; error > i; adj++)
791 * Adjust the multiplier to reduce the error value,
792 * this is optimized for the most common adjustments of -1,0,1,
793 * for other values we can do a bit more work.
795 static void timekeeping_adjust(s64 offset)
797 s64 error, interval = timekeeper.cycle_interval;
801 * The point of this is to check if the error is greater then half
804 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
806 * Note we subtract one in the shift, so that error is really error*2.
807 * This "saves" dividing(shifting) intererval twice, but keeps the
808 * (error > interval) comparision as still measuring if error is
809 * larger then half an interval.
811 * Note: It does not "save" on aggrivation when reading the code.
813 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
814 if (error > interval) {
816 * We now divide error by 4(via shift), which checks if
817 * the error is greater then twice the interval.
818 * If it is greater, we need a bigadjust, if its smaller,
819 * we can adjust by 1.
823 * XXX - In update_wall_time, we round up to the next
824 * nanosecond, and store the amount rounded up into
825 * the error. This causes the likely below to be unlikely.
827 * The properfix is to avoid rounding up by using
828 * the high precision timekeeper.xtime_nsec instead of
829 * xtime.tv_nsec everywhere. Fixing this will take some
832 if (likely(error <= interval))
835 adj = timekeeping_bigadjust(error, &interval, &offset);
836 } else if (error < -interval) {
837 /* See comment above, this is just switched for the negative */
839 if (likely(error >= -interval)) {
841 interval = -interval;
844 adj = timekeeping_bigadjust(error, &interval, &offset);
845 } else /* No adjustment needed */
848 WARN_ONCE(timekeeper.clock->maxadj &&
849 (timekeeper.mult + adj > timekeeper.clock->mult +
850 timekeeper.clock->maxadj),
851 "Adjusting %s more then 11%% (%ld vs %ld)\n",
852 timekeeper.clock->name, (long)timekeeper.mult + adj,
853 (long)timekeeper.clock->mult +
854 timekeeper.clock->maxadj);
856 * So the following can be confusing.
858 * To keep things simple, lets assume adj == 1 for now.
860 * When adj != 1, remember that the interval and offset values
861 * have been appropriately scaled so the math is the same.
863 * The basic idea here is that we're increasing the multiplier
864 * by one, this causes the xtime_interval to be incremented by
865 * one cycle_interval. This is because:
866 * xtime_interval = cycle_interval * mult
867 * So if mult is being incremented by one:
868 * xtime_interval = cycle_interval * (mult + 1)
870 * xtime_interval = (cycle_interval * mult) + cycle_interval
871 * Which can be shortened to:
872 * xtime_interval += cycle_interval
874 * So offset stores the non-accumulated cycles. Thus the current
875 * time (in shifted nanoseconds) is:
876 * now = (offset * adj) + xtime_nsec
877 * Now, even though we're adjusting the clock frequency, we have
878 * to keep time consistent. In other words, we can't jump back
879 * in time, and we also want to avoid jumping forward in time.
881 * So given the same offset value, we need the time to be the same
882 * both before and after the freq adjustment.
883 * now = (offset * adj_1) + xtime_nsec_1
884 * now = (offset * adj_2) + xtime_nsec_2
886 * (offset * adj_1) + xtime_nsec_1 =
887 * (offset * adj_2) + xtime_nsec_2
891 * (offset * adj_1) + xtime_nsec_1 =
892 * (offset * (adj_1+1)) + xtime_nsec_2
893 * (offset * adj_1) + xtime_nsec_1 =
894 * (offset * adj_1) + offset + xtime_nsec_2
895 * Canceling the sides:
896 * xtime_nsec_1 = offset + xtime_nsec_2
898 * xtime_nsec_2 = xtime_nsec_1 - offset
899 * Which simplfies to:
900 * xtime_nsec -= offset
902 * XXX - TODO: Doc ntp_error calculation.
904 timekeeper.mult += adj;
905 timekeeper.xtime_interval += interval;
906 timekeeper.xtime_nsec -= offset;
907 timekeeper.ntp_error -= (interval - offset) <<
908 timekeeper.ntp_error_shift;
913 * logarithmic_accumulation - shifted accumulation of cycles
915 * This functions accumulates a shifted interval of cycles into
916 * into a shifted interval nanoseconds. Allows for O(log) accumulation
919 * Returns the unconsumed cycles.
921 static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
923 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
926 /* If the offset is smaller then a shifted interval, do nothing */
927 if (offset < timekeeper.cycle_interval<<shift)
930 /* Accumulate one shifted interval */
931 offset -= timekeeper.cycle_interval << shift;
932 timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift;
934 timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
935 while (timekeeper.xtime_nsec >= nsecps) {
937 timekeeper.xtime_nsec -= nsecps;
939 leap = second_overflow(xtime.tv_sec);
940 xtime.tv_sec += leap;
941 wall_to_monotonic.tv_sec -= leap;
944 /* Accumulate raw time */
945 raw_nsecs = timekeeper.raw_interval << shift;
946 raw_nsecs += raw_time.tv_nsec;
947 if (raw_nsecs >= NSEC_PER_SEC) {
948 u64 raw_secs = raw_nsecs;
949 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
950 raw_time.tv_sec += raw_secs;
952 raw_time.tv_nsec = raw_nsecs;
954 /* Accumulate error between NTP and clock interval */
955 timekeeper.ntp_error += tick_length << shift;
956 timekeeper.ntp_error -=
957 (timekeeper.xtime_interval + timekeeper.xtime_remainder) <<
958 (timekeeper.ntp_error_shift + shift);
965 * update_wall_time - Uses the current clocksource to increment the wall time
967 * Called from the timer interrupt, must hold a write on xtime_lock.
969 static void update_wall_time(void)
971 struct clocksource *clock;
973 int shift = 0, maxshift;
975 /* Make sure we're fully resumed: */
976 if (unlikely(timekeeping_suspended))
979 clock = timekeeper.clock;
981 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
982 offset = timekeeper.cycle_interval;
984 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
986 timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
989 * With NO_HZ we may have to accumulate many cycle_intervals
990 * (think "ticks") worth of time at once. To do this efficiently,
991 * we calculate the largest doubling multiple of cycle_intervals
992 * that is smaller then the offset. We then accumulate that
993 * chunk in one go, and then try to consume the next smaller
996 shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
997 shift = max(0, shift);
998 /* Bound shift to one less then what overflows tick_length */
999 maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1;
1000 shift = min(shift, maxshift);
1001 while (offset >= timekeeper.cycle_interval) {
1002 offset = logarithmic_accumulation(offset, shift);
1003 if(offset < timekeeper.cycle_interval<<shift)
1007 /* correct the clock when NTP error is too big */
1008 timekeeping_adjust(offset);
1011 * Since in the loop above, we accumulate any amount of time
1012 * in xtime_nsec over a second into xtime.tv_sec, its possible for
1013 * xtime_nsec to be fairly small after the loop. Further, if we're
1014 * slightly speeding the clocksource up in timekeeping_adjust(),
1015 * its possible the required corrective factor to xtime_nsec could
1016 * cause it to underflow.
1018 * Now, we cannot simply roll the accumulated second back, since
1019 * the NTP subsystem has been notified via second_overflow. So
1020 * instead we push xtime_nsec forward by the amount we underflowed,
1021 * and add that amount into the error.
1023 * We'll correct this error next time through this function, when
1024 * xtime_nsec is not as small.
1026 if (unlikely((s64)timekeeper.xtime_nsec < 0)) {
1027 s64 neg = -(s64)timekeeper.xtime_nsec;
1028 timekeeper.xtime_nsec = 0;
1029 timekeeper.ntp_error += neg << timekeeper.ntp_error_shift;
1034 * Store full nanoseconds into xtime after rounding it up and
1035 * add the remainder to the error difference.
1037 xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1;
1038 timekeeper.xtime_nsec -= (s64) xtime.tv_nsec << timekeeper.shift;
1039 timekeeper.ntp_error += timekeeper.xtime_nsec <<
1040 timekeeper.ntp_error_shift;
1043 * Finally, make sure that after the rounding
1044 * xtime.tv_nsec isn't larger then NSEC_PER_SEC
1046 if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) {
1048 xtime.tv_nsec -= NSEC_PER_SEC;
1050 leap = second_overflow(xtime.tv_sec);
1051 xtime.tv_sec += leap;
1052 wall_to_monotonic.tv_sec -= leap;
1055 /* check to see if there is a new clocksource to use */
1056 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
1061 * getboottime - Return the real time of system boot.
1062 * @ts: pointer to the timespec to be set
1064 * Returns the wall-time of boot in a timespec.
1066 * This is based on the wall_to_monotonic offset and the total suspend
1067 * time. Calls to settimeofday will affect the value returned (which
1068 * basically means that however wrong your real time clock is at boot time,
1069 * you get the right time here).
1071 void getboottime(struct timespec *ts)
1073 struct timespec boottime = {
1074 .tv_sec = wall_to_monotonic.tv_sec + total_sleep_time.tv_sec,
1075 .tv_nsec = wall_to_monotonic.tv_nsec + total_sleep_time.tv_nsec
1078 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
1080 EXPORT_SYMBOL_GPL(getboottime);
1084 * get_monotonic_boottime - Returns monotonic time since boot
1085 * @ts: pointer to the timespec to be set
1087 * Returns the monotonic time since boot in a timespec.
1089 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
1090 * includes the time spent in suspend.
1092 void get_monotonic_boottime(struct timespec *ts)
1094 struct timespec tomono, sleep;
1098 WARN_ON(timekeeping_suspended);
1101 seq = read_seqbegin(&xtime_lock);
1103 tomono = wall_to_monotonic;
1104 sleep = total_sleep_time;
1105 nsecs = timekeeping_get_ns();
1107 } while (read_seqretry(&xtime_lock, seq));
1109 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
1110 ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
1112 EXPORT_SYMBOL_GPL(get_monotonic_boottime);
1115 * ktime_get_boottime - Returns monotonic time since boot in a ktime
1117 * Returns the monotonic time since boot in a ktime
1119 * This is similar to CLOCK_MONTONIC/ktime_get, but also
1120 * includes the time spent in suspend.
1122 ktime_t ktime_get_boottime(void)
1126 get_monotonic_boottime(&ts);
1127 return timespec_to_ktime(ts);
1129 EXPORT_SYMBOL_GPL(ktime_get_boottime);
1132 * monotonic_to_bootbased - Convert the monotonic time to boot based.
1133 * @ts: pointer to the timespec to be converted
1135 void monotonic_to_bootbased(struct timespec *ts)
1137 *ts = timespec_add(*ts, total_sleep_time);
1139 EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
1141 unsigned long get_seconds(void)
1143 return xtime.tv_sec;
1145 EXPORT_SYMBOL(get_seconds);
1147 struct timespec __current_kernel_time(void)
1152 struct timespec current_kernel_time(void)
1154 struct timespec now;
1158 seq = read_seqbegin(&xtime_lock);
1161 } while (read_seqretry(&xtime_lock, seq));
1165 EXPORT_SYMBOL(current_kernel_time);
1167 struct timespec get_monotonic_coarse(void)
1169 struct timespec now, mono;
1173 seq = read_seqbegin(&xtime_lock);
1176 mono = wall_to_monotonic;
1177 } while (read_seqretry(&xtime_lock, seq));
1179 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
1180 now.tv_nsec + mono.tv_nsec);
1185 * The 64-bit jiffies value is not atomic - you MUST NOT read it
1186 * without sampling the sequence number in xtime_lock.
1187 * jiffies is defined in the linker script...
1189 void do_timer(unsigned long ticks)
1191 jiffies_64 += ticks;
1193 calc_global_load(ticks);
1197 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
1198 * and sleep offsets.
1199 * @xtim: pointer to timespec to be set with xtime
1200 * @wtom: pointer to timespec to be set with wall_to_monotonic
1201 * @sleep: pointer to timespec to be set with time in suspend
1203 void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1204 struct timespec *wtom, struct timespec *sleep)
1209 seq = read_seqbegin(&xtime_lock);
1211 *wtom = wall_to_monotonic;
1212 *sleep = total_sleep_time;
1213 } while (read_seqretry(&xtime_lock, seq));
1217 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1219 ktime_t ktime_get_monotonic_offset(void)
1222 struct timespec wtom;
1225 seq = read_seqbegin(&xtime_lock);
1226 wtom = wall_to_monotonic;
1227 } while (read_seqretry(&xtime_lock, seq));
1228 return timespec_to_ktime(wtom);
1232 * xtime_update() - advances the timekeeping infrastructure
1233 * @ticks: number of ticks, that have elapsed since the last call.
1235 * Must be called with interrupts disabled.
1237 void xtime_update(unsigned long ticks)
1239 write_seqlock(&xtime_lock);
1241 write_sequnlock(&xtime_lock);