2 * "High Precision Event Timer" based timekeeping.
4 * Copyright (c) 1991,1992,1995 Linus Torvalds
5 * Copyright (c) 1994 Alan Modra
6 * Copyright (c) 1995 Markus Kuhn
7 * Copyright (c) 1996 Ingo Molnar
8 * Copyright (c) 1998 Andrea Arcangeli
9 * Copyright (c) 2002,2006 Vojtech Pavlik
10 * Copyright (c) 2003 Andi Kleen
11 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
14 #include <linux/clockchips.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/time.h>
20 #include <asm/i8253.h>
23 #include <asm/vgtod.h>
25 #include <asm/timer.h>
27 volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
29 unsigned long profile_pc(struct pt_regs *regs)
31 unsigned long pc = instruction_pointer(regs);
33 /* Assume the lock function has either no stack frame or a copy
35 Eflags always has bits 22 and up cleared unlike kernel addresses. */
36 if (!user_mode(regs) && in_lock_functions(pc)) {
37 unsigned long *sp = (unsigned long *)regs->sp;
45 EXPORT_SYMBOL(profile_pc);
47 static irqreturn_t timer_event_interrupt(int irq, void *dev_id)
49 add_pda(irq0_irqs, 1);
51 global_clock_event->event_handler(global_clock_event);
56 /* calibrate_cpu is used on systems with fixed rate TSCs to determine
57 * processor frequency */
58 #define TICK_COUNT 100000000
59 unsigned long __init native_calculate_cpu_khz(void)
61 int tsc_start, tsc_now;
63 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
66 for (i = 0; i < 4; i++)
67 if (avail_to_resrv_perfctr_nmi_bit(i))
69 no_ctr_free = (i == 4);
72 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
73 wrmsrl(MSR_K7_EVNTSEL3, 0);
74 rdmsrl(MSR_K7_PERFCTR3, pmc3);
76 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
77 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
79 local_irq_save(flags);
80 /* start measuring cycles, incrementing from 0 */
81 wrmsrl(MSR_K7_PERFCTR0 + i, 0);
82 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
85 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
86 tsc_now = get_cycles();
87 } while ((tsc_now - tsc_start) < TICK_COUNT);
89 local_irq_restore(flags);
91 wrmsrl(MSR_K7_EVNTSEL3, 0);
92 wrmsrl(MSR_K7_PERFCTR3, pmc3);
93 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
95 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
96 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
99 return pmc_now * tsc_khz / (tsc_now - tsc_start);
102 static struct irqaction irq0 = {
103 .handler = timer_event_interrupt,
104 .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING,
105 .mask = CPU_MASK_NONE,
109 void __init hpet_time_init(void)
117 void __init time_init(void)
122 if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
123 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
124 cpu_khz = calculate_cpu_khz();
126 if (unsynchronized_tsc())
127 mark_tsc_unstable("TSCs unsynchronized");
129 if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
130 vgetcpu_mode = VGETCPU_RDTSCP;
132 vgetcpu_mode = VGETCPU_LSL;
134 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
135 cpu_khz / 1000, cpu_khz % 1000);
136 init_tsc_clocksource();
137 late_time_init = choose_time_init();