Blackfin: SMP: make core timers per-cpu clock events for HRT
[pandora-kernel.git] / arch / blackfin / kernel / time-ts.c
1 /*
2  * Based on arm clockevents implementation and old bfin time tick.
3  *
4  * Copyright 2008-2009 Analog Devics Inc.
5  *                2008 GeoTechnologies
6  *                     Vitja Makarov
7  *
8  * Licensed under the GPL-2
9  */
10
11 #include <linux/module.h>
12 #include <linux/profile.h>
13 #include <linux/interrupt.h>
14 #include <linux/time.h>
15 #include <linux/timex.h>
16 #include <linux/irq.h>
17 #include <linux/clocksource.h>
18 #include <linux/clockchips.h>
19 #include <linux/cpufreq.h>
20
21 #include <asm/blackfin.h>
22 #include <asm/time.h>
23 #include <asm/gptimers.h>
24
25 /* Accelerators for sched_clock()
26  * convert from cycles(64bits) => nanoseconds (64bits)
27  *  basic equation:
28  *              ns = cycles / (freq / ns_per_sec)
29  *              ns = cycles * (ns_per_sec / freq)
30  *              ns = cycles * (10^9 / (cpu_khz * 10^3))
31  *              ns = cycles * (10^6 / cpu_khz)
32  *
33  *      Then we use scaling math (suggested by george@mvista.com) to get:
34  *              ns = cycles * (10^6 * SC / cpu_khz) / SC
35  *              ns = cycles * cyc2ns_scale / SC
36  *
37  *      And since SC is a constant power of two, we can convert the div
38  *  into a shift.
39  *
40  *  We can use khz divisor instead of mhz to keep a better precision, since
41  *  cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
42  *  (mathieu.desnoyers@polymtl.ca)
43  *
44  *                      -johnstul@us.ibm.com "math is hard, lets go shopping!"
45  */
46
47 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
48
49 #if defined(CONFIG_CYCLES_CLOCKSOURCE)
50
51 static notrace cycle_t bfin_read_cycles(struct clocksource *cs)
52 {
53         return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
54 }
55
56 static struct clocksource bfin_cs_cycles = {
57         .name           = "bfin_cs_cycles",
58         .rating         = 400,
59         .read           = bfin_read_cycles,
60         .mask           = CLOCKSOURCE_MASK(64),
61         .shift          = CYC2NS_SCALE_FACTOR,
62         .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
63 };
64
65 static inline unsigned long long bfin_cs_cycles_sched_clock(void)
66 {
67         return clocksource_cyc2ns(bfin_read_cycles(&bfin_cs_cycles),
68                 bfin_cs_cycles.mult, bfin_cs_cycles.shift);
69 }
70
71 static int __init bfin_cs_cycles_init(void)
72 {
73         bfin_cs_cycles.mult = \
74                 clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift);
75
76         if (clocksource_register(&bfin_cs_cycles))
77                 panic("failed to register clocksource");
78
79         return 0;
80 }
81 #else
82 # define bfin_cs_cycles_init()
83 #endif
84
85 #ifdef CONFIG_GPTMR0_CLOCKSOURCE
86
87 void __init setup_gptimer0(void)
88 {
89         disable_gptimers(TIMER0bit);
90
91         set_gptimer_config(TIMER0_id, \
92                 TIMER_OUT_DIS | TIMER_PERIOD_CNT | TIMER_MODE_PWM);
93         set_gptimer_period(TIMER0_id, -1);
94         set_gptimer_pwidth(TIMER0_id, -2);
95         SSYNC();
96         enable_gptimers(TIMER0bit);
97 }
98
99 static cycle_t bfin_read_gptimer0(struct clocksource *cs)
100 {
101         return bfin_read_TIMER0_COUNTER();
102 }
103
104 static struct clocksource bfin_cs_gptimer0 = {
105         .name           = "bfin_cs_gptimer0",
106         .rating         = 350,
107         .read           = bfin_read_gptimer0,
108         .mask           = CLOCKSOURCE_MASK(32),
109         .shift          = CYC2NS_SCALE_FACTOR,
110         .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
111 };
112
113 static inline unsigned long long bfin_cs_gptimer0_sched_clock(void)
114 {
115         return clocksource_cyc2ns(bfin_read_TIMER0_COUNTER(),
116                 bfin_cs_gptimer0.mult, bfin_cs_gptimer0.shift);
117 }
118
119 static int __init bfin_cs_gptimer0_init(void)
120 {
121         setup_gptimer0();
122
123         bfin_cs_gptimer0.mult = \
124                 clocksource_hz2mult(get_sclk(), bfin_cs_gptimer0.shift);
125
126         if (clocksource_register(&bfin_cs_gptimer0))
127                 panic("failed to register clocksource");
128
129         return 0;
130 }
131 #else
132 # define bfin_cs_gptimer0_init()
133 #endif
134
135 #if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE)
136 /* prefer to use cycles since it has higher rating */
137 notrace unsigned long long sched_clock(void)
138 {
139 #if defined(CONFIG_CYCLES_CLOCKSOURCE)
140         return bfin_cs_cycles_sched_clock();
141 #else
142         return bfin_cs_gptimer0_sched_clock();
143 #endif
144 }
145 #endif
146
147 #if defined(CONFIG_TICKSOURCE_GPTMR0)
148 static int bfin_gptmr0_set_next_event(unsigned long cycles,
149                                      struct clock_event_device *evt)
150 {
151         disable_gptimers(TIMER0bit);
152
153         /* it starts counting three SCLK cycles after the TIMENx bit is set */
154         set_gptimer_pwidth(TIMER0_id, cycles - 3);
155         enable_gptimers(TIMER0bit);
156         return 0;
157 }
158
159 static void bfin_gptmr0_set_mode(enum clock_event_mode mode,
160                                 struct clock_event_device *evt)
161 {
162         switch (mode) {
163         case CLOCK_EVT_MODE_PERIODIC: {
164                 set_gptimer_config(TIMER0_id, \
165                         TIMER_OUT_DIS | TIMER_IRQ_ENA | \
166                         TIMER_PERIOD_CNT | TIMER_MODE_PWM);
167                 set_gptimer_period(TIMER0_id, get_sclk() / HZ);
168                 set_gptimer_pwidth(TIMER0_id, get_sclk() / HZ - 1);
169                 enable_gptimers(TIMER0bit);
170                 break;
171         }
172         case CLOCK_EVT_MODE_ONESHOT:
173                 disable_gptimers(TIMER0bit);
174                 set_gptimer_config(TIMER0_id, \
175                         TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM);
176                 set_gptimer_period(TIMER0_id, 0);
177                 break;
178         case CLOCK_EVT_MODE_UNUSED:
179         case CLOCK_EVT_MODE_SHUTDOWN:
180                 disable_gptimers(TIMER0bit);
181                 break;
182         case CLOCK_EVT_MODE_RESUME:
183                 break;
184         }
185 }
186
187 static void bfin_gptmr0_ack(void)
188 {
189         set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0);
190 }
191
192 static void __init bfin_gptmr0_init(void)
193 {
194         disable_gptimers(TIMER0bit);
195 }
196
197 #ifdef CONFIG_CORE_TIMER_IRQ_L1
198 __attribute__((l1_text))
199 #endif
200 irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
201 {
202         struct clock_event_device *evt = dev_id;
203         smp_mb();
204         evt->event_handler(evt);
205         bfin_gptmr0_ack();
206         return IRQ_HANDLED;
207 }
208
209 static struct irqaction gptmr0_irq = {
210         .name           = "Blackfin GPTimer0",
211         .flags          = IRQF_DISABLED | IRQF_TIMER | \
212                           IRQF_IRQPOLL | IRQF_PERCPU,
213         .handler        = bfin_gptmr0_interrupt,
214 };
215
216 static struct clock_event_device clockevent_gptmr0 = {
217         .name           = "bfin_gptimer0",
218         .rating         = 300,
219         .irq            = IRQ_TIMER0,
220         .shift          = 32,
221         .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
222         .set_next_event = bfin_gptmr0_set_next_event,
223         .set_mode       = bfin_gptmr0_set_mode,
224 };
225
226 static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt)
227 {
228         unsigned long clock_tick;
229
230         clock_tick = get_sclk();
231         evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
232         evt->max_delta_ns = clockevent_delta2ns(-1, evt);
233         evt->min_delta_ns = clockevent_delta2ns(100, evt);
234
235         evt->cpumask = cpumask_of(0);
236
237         clockevents_register_device(evt);
238 }
239 #endif /* CONFIG_TICKSOURCE_GPTMR0 */
240
241 #if defined(CONFIG_TICKSOURCE_CORETMR)
242 /* per-cpu local core timer */
243 static DEFINE_PER_CPU(struct clock_event_device, coretmr_events);
244
245 static int bfin_coretmr_set_next_event(unsigned long cycles,
246                                 struct clock_event_device *evt)
247 {
248         bfin_write_TCNTL(TMPWR);
249         CSYNC();
250         bfin_write_TCOUNT(cycles);
251         CSYNC();
252         bfin_write_TCNTL(TMPWR | TMREN);
253         return 0;
254 }
255
256 static void bfin_coretmr_set_mode(enum clock_event_mode mode,
257                                 struct clock_event_device *evt)
258 {
259         switch (mode) {
260         case CLOCK_EVT_MODE_PERIODIC: {
261                 unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
262                 bfin_write_TCNTL(TMPWR);
263                 CSYNC();
264                 bfin_write_TSCALE(TIME_SCALE - 1);
265                 bfin_write_TPERIOD(tcount);
266                 bfin_write_TCOUNT(tcount);
267                 CSYNC();
268                 bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD);
269                 break;
270         }
271         case CLOCK_EVT_MODE_ONESHOT:
272                 bfin_write_TCNTL(TMPWR);
273                 CSYNC();
274                 bfin_write_TSCALE(TIME_SCALE - 1);
275                 bfin_write_TPERIOD(0);
276                 bfin_write_TCOUNT(0);
277                 break;
278         case CLOCK_EVT_MODE_UNUSED:
279         case CLOCK_EVT_MODE_SHUTDOWN:
280                 bfin_write_TCNTL(0);
281                 CSYNC();
282                 break;
283         case CLOCK_EVT_MODE_RESUME:
284                 break;
285         }
286 }
287
288 void bfin_coretmr_init(void)
289 {
290         /* power up the timer, but don't enable it just yet */
291         bfin_write_TCNTL(TMPWR);
292         CSYNC();
293
294         /* the TSCALE prescaler counter. */
295         bfin_write_TSCALE(TIME_SCALE - 1);
296         bfin_write_TPERIOD(0);
297         bfin_write_TCOUNT(0);
298
299         CSYNC();
300 }
301
302 #ifdef CONFIG_CORE_TIMER_IRQ_L1
303 __attribute__((l1_text))
304 #endif
305 irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id)
306 {
307         int cpu = smp_processor_id();
308         struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
309
310         smp_mb();
311         evt->event_handler(evt);
312         return IRQ_HANDLED;
313 }
314
315 static struct irqaction coretmr_irq = {
316         .name           = "Blackfin CoreTimer",
317         .flags          = IRQF_DISABLED | IRQF_TIMER | \
318                           IRQF_IRQPOLL | IRQF_PERCPU,
319         .handler        = bfin_coretmr_interrupt,
320 };
321
322 void bfin_coretmr_clockevent_init(void)
323 {
324         unsigned long clock_tick;
325         unsigned int cpu = smp_processor_id();
326         struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
327
328         evt->name = "bfin_core_timer";
329         evt->rating = 350;
330         evt->irq = -1;
331         evt->shift = 32;
332         evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
333         evt->set_next_event = bfin_coretmr_set_next_event;
334         evt->set_mode = bfin_coretmr_set_mode;
335
336         clock_tick = get_cclk() / TIME_SCALE;
337         evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
338         evt->max_delta_ns = clockevent_delta2ns(-1, evt);
339         evt->min_delta_ns = clockevent_delta2ns(100, evt);
340
341         evt->cpumask = cpumask_of(cpu);
342
343         clockevents_register_device(evt);
344 }
345 #endif /* CONFIG_TICKSOURCE_CORETMR */
346
347
348 void __init time_init(void)
349 {
350         time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */
351
352 #ifdef CONFIG_RTC_DRV_BFIN
353         /* [#2663] hack to filter junk RTC values that would cause
354          * userspace to have to deal with time values greater than
355          * 2^31 seconds (which uClibc cannot cope with yet)
356          */
357         if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) {
358                 printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n");
359                 bfin_write_RTC_STAT(0);
360         }
361 #endif
362
363         /* Initialize xtime. From now on, xtime is updated with timer interrupts */
364         xtime.tv_sec = secs_since_1970;
365         xtime.tv_nsec = 0;
366         set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
367
368         bfin_cs_cycles_init();
369         bfin_cs_gptimer0_init();
370
371 #if defined(CONFIG_TICKSOURCE_CORETMR)
372         bfin_coretmr_init();
373         setup_irq(IRQ_CORETMR, &coretmr_irq);
374         bfin_coretmr_clockevent_init();
375 #endif
376
377 #if defined(CONFIG_TICKSOURCE_GPTMR0)
378         bfin_gptmr0_init();
379         setup_irq(IRQ_TIMER0, &gptmr0_irq);
380         gptmr0_irq.dev_id = &clockevent_gptmr0;
381         bfin_gptmr0_clockevent_init(&clockevent_gptmr0);
382 #endif
383
384 #if !defined(CONFIG_TICKSOURCE_CORETMR) && !defined(CONFIG_TICKSOURCE_GPTMR0)
385 # error at least one clock event device is required
386 #endif
387 }