2 * sched_clock for unstable cpu clocks
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
10 * Ingo Molnar <mingo@redhat.com>
11 * Guillaume Chazarain <guichaz@gmail.com>
13 * Create a semi stable clock from a mixture of other events, including:
17 * - explicit idle events
19 * We use gtod as base and the unstable clock deltas. The deltas are filtered,
20 * making it monotonic and keeping it within an expected window. This window
21 * is set up using jiffies.
23 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
24 * that is otherwise invisible (TSC gets stopped).
26 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
27 * consistent between cpus (never more than 1 jiffies difference).
29 #include <linux/sched.h>
30 #include <linux/percpu.h>
31 #include <linux/spinlock.h>
32 #include <linux/ktime.h>
33 #include <linux/module.h>
36 * Scheduler clock - returns current time in nanosec units.
37 * This is default implementation.
38 * Architectures and sub-architectures can override this.
40 unsigned long long __attribute__((weak)) sched_clock(void)
42 return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
45 static __read_mostly int sched_clock_running;
47 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
49 struct sched_clock_data {
51 * Raw spinlock - this is a special case: this might be called
52 * from within instrumentation code so we dont want to do any
53 * instrumentation ourselves.
57 unsigned long tick_jiffies;
63 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
65 static inline struct sched_clock_data *this_scd(void)
67 return &__get_cpu_var(sched_clock_data);
70 static inline struct sched_clock_data *cpu_sdc(int cpu)
72 return &per_cpu(sched_clock_data, cpu);
75 void sched_clock_init(void)
77 u64 ktime_now = ktime_to_ns(ktime_get());
78 unsigned long now_jiffies = jiffies;
81 for_each_possible_cpu(cpu) {
82 struct sched_clock_data *scd = cpu_sdc(cpu);
84 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
85 scd->tick_jiffies = now_jiffies;
87 scd->tick_gtod = ktime_now;
88 scd->clock = ktime_now;
91 sched_clock_running = 1;
95 * update the percpu scd from the raw @now value
97 * - filter out backward motion
98 * - use jiffies to generate a min,max window to clip the raw values
100 static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
102 unsigned long now_jiffies = jiffies;
103 long delta_jiffies = now_jiffies - scd->tick_jiffies;
104 u64 clock = scd->clock;
105 u64 min_clock, max_clock;
106 s64 delta = now - scd->tick_raw;
108 WARN_ON_ONCE(!irqs_disabled());
109 min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC;
111 if (unlikely(delta < 0)) {
116 max_clock = min_clock + TICK_NSEC;
118 if (unlikely(clock + delta > max_clock)) {
119 if (clock < max_clock)
128 if (unlikely(clock < min_clock))
131 scd->tick_jiffies = now_jiffies;
137 static void lock_double_clock(struct sched_clock_data *data1,
138 struct sched_clock_data *data2)
141 __raw_spin_lock(&data1->lock);
142 __raw_spin_lock(&data2->lock);
144 __raw_spin_lock(&data2->lock);
145 __raw_spin_lock(&data1->lock);
149 u64 sched_clock_cpu(int cpu)
151 struct sched_clock_data *scd = cpu_sdc(cpu);
152 u64 now, clock, this_clock, remote_clock;
154 if (unlikely(!sched_clock_running))
157 WARN_ON_ONCE(!irqs_disabled());
160 if (cpu != raw_smp_processor_id()) {
161 struct sched_clock_data *my_scd = this_scd();
163 lock_double_clock(scd, my_scd);
165 this_clock = __update_sched_clock(my_scd, now);
166 remote_clock = scd->clock;
169 * Use the opportunity that we have both locks
170 * taken to couple the two clocks: we take the
171 * larger time as the latest time for both
172 * runqueues. (this creates monotonic movement)
174 if (likely(remote_clock < this_clock)) {
179 * Should be rare, but possible:
181 clock = remote_clock;
182 my_scd->clock = remote_clock;
185 __raw_spin_unlock(&my_scd->lock);
187 __raw_spin_lock(&scd->lock);
188 clock = __update_sched_clock(scd, now);
191 __raw_spin_unlock(&scd->lock);
196 void sched_clock_tick(void)
198 struct sched_clock_data *scd = this_scd();
201 if (unlikely(!sched_clock_running))
204 WARN_ON_ONCE(!irqs_disabled());
206 now_gtod = ktime_to_ns(ktime_get());
209 __raw_spin_lock(&scd->lock);
210 __update_sched_clock(scd, now);
212 * update tick_gtod after __update_sched_clock() because that will
213 * already observe 1 new jiffy; adding a new tick_gtod to that would
214 * increase the clock 2 jiffies.
217 scd->tick_gtod = now_gtod;
218 __raw_spin_unlock(&scd->lock);
222 * We are going deep-idle (irqs are disabled):
224 void sched_clock_idle_sleep_event(void)
226 sched_clock_cpu(smp_processor_id());
228 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
231 * We just idled delta nanoseconds (called with irqs disabled):
233 void sched_clock_idle_wakeup_event(u64 delta_ns)
235 struct sched_clock_data *scd = this_scd();
238 * Override the previous timestamp and ignore all
239 * sched_clock() deltas that occured while we idled,
240 * and use the PM-provided delta_ns to advance the
243 __raw_spin_lock(&scd->lock);
244 scd->clock += delta_ns;
245 __raw_spin_unlock(&scd->lock);
247 touch_softlockup_watchdog();
249 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
251 #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
253 void sched_clock_init(void)
255 sched_clock_running = 1;
258 u64 sched_clock_cpu(int cpu)
260 if (unlikely(!sched_clock_running))
263 return sched_clock();
268 unsigned long long cpu_clock(int cpu)
270 unsigned long long clock;
273 local_irq_save(flags);
274 clock = sched_clock_cpu(cpu);
275 local_irq_restore(flags);
279 EXPORT_SYMBOL_GPL(cpu_clock);