1 #include <linux/sched.h>
2 #include <linux/clocksource.h>
3 #include <linux/workqueue.h>
4 #include <linux/delay.h>
5 #include <linux/cpufreq.h>
6 #include <linux/jiffies.h>
7 #include <linux/init.h>
9 #include <linux/percpu.h>
11 #include <asm/delay.h>
14 #include <asm/timer.h>
16 #include "mach_timer.h"
18 extern int tsc_unstable;
19 extern int tsc_disabled;
21 /* Accelerators for sched_clock()
22 * convert from cycles(64bits) => nanoseconds (64bits)
24 * ns = cycles / (freq / ns_per_sec)
25 * ns = cycles * (ns_per_sec / freq)
26 * ns = cycles * (10^9 / (cpu_khz * 10^3))
27 * ns = cycles * (10^6 / cpu_khz)
29 * Then we use scaling math (suggested by george@mvista.com) to get:
30 * ns = cycles * (10^6 * SC / cpu_khz) / SC
31 * ns = cycles * cyc2ns_scale / SC
33 * And since SC is a constant power of two, we can convert the div
36 * We can use khz divisor instead of mhz to keep a better precision, since
37 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
38 * (mathieu.desnoyers@polymtl.ca)
40 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
43 DEFINE_PER_CPU(unsigned long, cyc2ns);
45 static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
47 unsigned long long tsc_now, ns_now;
48 unsigned long flags, *scale;
50 local_irq_save(flags);
51 sched_clock_idle_sleep_event();
53 scale = &per_cpu(cyc2ns, cpu);
56 ns_now = __cycles_2_ns(tsc_now);
59 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
62 * Start smoothly with the new frequency:
64 sched_clock_idle_wakeup_event(0);
65 local_irq_restore(flags);
68 unsigned long native_calculate_cpu_khz(void)
70 unsigned long long start, end;
72 u64 delta64 = (u64)ULLONG_MAX;
76 local_irq_save(flags);
78 /* run 3 times to ensure the cache is warm and to get an accurate reading */
79 for (i = 0; i < 3; i++) {
80 mach_prepare_counter();
87 * The CTC wasn't reliable: we got a hit on the very first read,
88 * or the CPU was so fast/slow that the quotient wouldn't fit in
94 /* cpu freq too slow: */
95 if ((end - start) <= CALIBRATE_TIME_MSEC)
99 * We want the minimum time of all runs in case one of them
100 * is inaccurate due to SMI or other delay
102 delta64 = min(delta64, (end - start));
105 /* cpu freq too fast (or every run was bad): */
106 if (delta64 > (1ULL<<32))
109 delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
110 do_div(delta64,CALIBRATE_TIME_MSEC);
112 local_irq_restore(flags);
113 return (unsigned long)delta64;
115 local_irq_restore(flags);
119 int recalibrate_cpu_khz(void)
122 unsigned long cpu_khz_old = cpu_khz;
125 cpu_khz = calculate_cpu_khz();
127 cpu_data(0).loops_per_jiffy =
128 cpufreq_scale(cpu_data(0).loops_per_jiffy,
129 cpu_khz_old, cpu_khz);
138 EXPORT_SYMBOL(recalibrate_cpu_khz);
140 #ifdef CONFIG_CPU_FREQ
143 * if the CPU frequency is scaled, TSC-based delays will need a different
144 * loops_per_jiffy value to function properly.
146 static unsigned int ref_freq;
147 static unsigned long loops_per_jiffy_ref;
148 static unsigned long cpu_khz_ref;
151 time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
153 struct cpufreq_freqs *freq = data;
157 ref_freq = freq->new;
160 ref_freq = freq->old;
161 loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
162 cpu_khz_ref = cpu_khz;
165 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
166 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
167 (val == CPUFREQ_RESUMECHANGE)) {
168 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
169 cpu_data(freq->cpu).loops_per_jiffy =
170 cpufreq_scale(loops_per_jiffy_ref,
171 ref_freq, freq->new);
175 if (num_online_cpus() == 1)
176 cpu_khz = cpufreq_scale(cpu_khz_ref,
177 ref_freq, freq->new);
178 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
180 set_cyc2ns_scale(cpu_khz, freq->cpu);
182 * TSC based sched_clock turns
185 mark_tsc_unstable("cpufreq changes");
193 static struct notifier_block time_cpufreq_notifier_block = {
194 .notifier_call = time_cpufreq_notifier
197 static int __init cpufreq_tsc(void)
199 return cpufreq_register_notifier(&time_cpufreq_notifier_block,
200 CPUFREQ_TRANSITION_NOTIFIER);
202 core_initcall(cpufreq_tsc);
206 /* clock source code */
208 static struct clocksource clocksource_tsc;
211 * We compare the TSC to the cycle_last value in the clocksource
212 * structure to avoid a nasty time-warp issue. This can be observed in
213 * a very small window right after one CPU updated cycle_last under
214 * xtime lock and the other CPU reads a TSC value which is smaller
215 * than the cycle_last reference value due to a TSC which is slighty
216 * behind. This delta is nowhere else observable, but in that case it
217 * results in a forward time jump in the range of hours due to the
218 * unsigned delta calculation of the time keeping core code, which is
219 * necessary to support wrapping clocksources like pm timer.
221 static cycle_t read_tsc(void)
227 return ret >= clocksource_tsc.cycle_last ?
228 ret : clocksource_tsc.cycle_last;
231 static struct clocksource clocksource_tsc = {
235 .mask = CLOCKSOURCE_MASK(64),
236 .mult = 0, /* to be set */
238 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
239 CLOCK_SOURCE_MUST_VERIFY,
242 void mark_tsc_unstable(char *reason)
246 printk("Marking TSC unstable due to: %s.\n", reason);
247 /* Can be called before registration */
248 if (clocksource_tsc.mult)
249 clocksource_change_rating(&clocksource_tsc, 0);
251 clocksource_tsc.rating = 0;
254 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
256 static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d)
258 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
264 /* List of systems that have known TSC problems */
265 static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
267 .callback = dmi_mark_tsc_unstable,
268 .ident = "IBM Thinkpad 380XD",
270 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
271 DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
278 * Make an educated guess if the TSC is trustworthy and synchronized
281 __cpuinit int unsynchronized_tsc(void)
283 if (!cpu_has_tsc || tsc_unstable)
286 /* Anything with constant TSC should be synchronized */
287 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
291 * Intel systems are normally all synchronized.
292 * Exceptions must mark TSC as unstable:
294 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
295 /* assume multi socket systems are not synchronized: */
296 if (num_possible_cpus() > 1)
303 * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
305 #ifdef CONFIG_MGEODE_LX
306 /* RTSC counts during suspend */
307 #define RTSC_SUSP 0x100
309 static void __init check_geode_tsc_reliable(void)
311 unsigned long res_low, res_high;
313 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
314 if (res_low & RTSC_SUSP)
315 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
318 static inline void check_geode_tsc_reliable(void) { }
322 void __init tsc_init(void)
327 if (!cpu_has_tsc || tsc_disabled > 0)
330 cpu_khz = calculate_cpu_khz();
334 mark_tsc_unstable("could not calculate TSC khz");
338 lpj = ((u64)tsc_khz * 1000);
342 /* now allow native_sched_clock() to use rdtsc */
345 printk("Detected %lu.%03lu MHz processor.\n",
346 (unsigned long)cpu_khz / 1000,
347 (unsigned long)cpu_khz % 1000);
350 * Secondary CPUs do not run through tsc_init(), so set up
351 * all the scale factors for all CPUs, assuming the same
352 * speed as the bootup CPU. (cpufreq notifiers will fix this
353 * up if their speed diverges)
355 for_each_possible_cpu(cpu)
356 set_cyc2ns_scale(cpu_khz, cpu);
360 /* Check and install the TSC clocksource */
361 dmi_check_system(bad_tsc_dmi_table);
363 unsynchronized_tsc();
364 check_geode_tsc_reliable();
365 clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
366 clocksource_tsc.shift);
367 /* lower the rating if we already know its unstable: */
368 if (check_tsc_unstable()) {
369 clocksource_tsc.rating = 0;
370 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
372 clocksource_register(&clocksource_tsc);