Merge branch 'stable-3.2' into pandora-3.2
[pandora-kernel.git] / drivers / cpufreq / omap-cpufreq.c
1 /*
2  *  CPU frequency scaling for OMAP using OPP information
3  *
4  *  Copyright (C) 2005 Nokia Corporation
5  *  Written by Tony Lindgren <tony@atomide.com>
6  *
7  *  Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
8  *
9  * Copyright (C) 2007-2011 Texas Instruments, Inc.
10  * - OMAP3/4 support by Rajendra Nayak, Santosh Shilimkar
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/cpufreq.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/err.h>
23 #include <linux/clk.h>
24 #include <linux/io.h>
25 #include <linux/opp.h>
26 #include <linux/cpu.h>
27 #include <linux/module.h>
28 #include <linux/regulator/consumer.h>
29
30 #include <asm/system.h>
31 #include <asm/smp_plat.h>
32 #include <asm/cpu.h>
33
34 #include <plat/clock.h>
35 #include <plat/omap-pm.h>
36 #include <plat/common.h>
37 #include <plat/omap_device.h>
38
39 #include <mach/hardware.h>
40
41 /* OPP tolerance in percentage */
42 #define OPP_TOLERANCE   4
43
44 #ifdef CONFIG_SMP
45 struct lpj_info {
46         unsigned long   ref;
47         unsigned int    freq;
48 };
49
50 static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
51 static struct lpj_info global_lpj_ref;
52 #endif
53
54 static struct cpufreq_frequency_table *freq_table;
55 static atomic_t freq_table_users = ATOMIC_INIT(0);
56 static struct clk *mpu_clk;
57 static char *mpu_clk_name;
58 static struct device *mpu_dev;
59 static struct regulator *mpu_reg;
60 static unsigned long freq_max, volt_max;
61
62 static int omap_verify_speed(struct cpufreq_policy *policy)
63 {
64         if (!freq_table)
65                 return -EINVAL;
66         return cpufreq_frequency_table_verify(policy, freq_table);
67 }
68
69 static unsigned int omap_getspeed(unsigned int cpu)
70 {
71         unsigned long rate;
72
73         if (cpu >= NR_CPUS)
74                 return 0;
75
76         rate = clk_get_rate(mpu_clk) / 1000;
77         return rate;
78 }
79
80 static int omap_target(struct cpufreq_policy *policy,
81                        unsigned int target_freq,
82                        unsigned int relation)
83 {
84         unsigned int i;
85         int r, ret = 0;
86         struct cpufreq_freqs freqs;
87         struct opp *opp;
88         unsigned long freq, volt = 0, volt_old = 0;
89
90         if (!freq_table) {
91                 dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__,
92                                 policy->cpu);
93                 return -EINVAL;
94         }
95
96         ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
97                         relation, &i);
98         if (ret) {
99                 dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n",
100                         __func__, policy->cpu, target_freq, ret);
101                 return ret;
102         }
103         freqs.new = freq_table[i].frequency;
104         if (!freqs.new) {
105                 dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__,
106                         policy->cpu, target_freq);
107                 return -EINVAL;
108         }
109
110         freqs.old = omap_getspeed(policy->cpu);
111         freqs.cpu = policy->cpu;
112
113         if (freqs.old == freqs.new && policy->cur == freqs.new)
114                 return ret;
115
116         /* notifiers */
117         for_each_cpu(i, policy->cpus) {
118                 freqs.cpu = i;
119                 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
120         }
121
122         freq = freqs.new * 1000;
123
124         if (mpu_reg) {
125                 opp = opp_find_freq_ceil(mpu_dev, &freq);
126                 if (IS_ERR(opp)) {
127                         dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
128                                 __func__, freqs.new);
129                         return -EINVAL;
130                 }
131                 volt = opp_get_voltage(opp);
132                 volt_old = regulator_get_voltage(mpu_reg);
133         }
134
135         dev_dbg(mpu_dev, "cpufreq-omap: %u MHz, %ld mV --> %u MHz, %ld mV\n", 
136                 freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
137                 freqs.new / 1000, volt ? volt / 1000 : -1);
138
139         /* scaling up?  scale voltage before frequency */
140         if (mpu_reg && (freqs.new > freqs.old)) {
141                 r = regulator_set_voltage(mpu_reg, volt, volt_max);
142                 if (r < 0) {
143                         dev_warn(mpu_dev, "%s: unable to scale voltage up.\n",
144                                  __func__);
145                         freqs.new = freqs.old;
146                         goto done;
147                 }
148         }
149
150         ret = clk_set_rate(mpu_clk, freqs.new * 1000);
151
152         /* scaling down?  scale voltage after frequency */
153         if (mpu_reg && (freqs.new < freqs.old)) {
154                 r = regulator_set_voltage(mpu_reg, volt, volt_max);
155                 if (r < 0) {
156                         dev_warn(mpu_dev, "%s: unable to scale voltage down.\n",
157                                  __func__);
158                         ret = clk_set_rate(mpu_clk, freqs.old * 1000);
159                         freqs.new = freqs.old;
160                         goto done;
161                 }
162         }
163
164         freqs.new = omap_getspeed(policy->cpu);
165 #ifdef CONFIG_SMP
166         /*
167          * Note that loops_per_jiffy is not updated on SMP systems in
168          * cpufreq driver. So, update the per-CPU loops_per_jiffy value
169          * on frequency transition. We need to update all dependent CPUs.
170          */
171         for_each_cpu(i, policy->cpus) {
172                 struct lpj_info *lpj = &per_cpu(lpj_ref, i);
173                 if (!lpj->freq) {
174                         lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
175                         lpj->freq = freqs.old;
176                 }
177
178                 per_cpu(cpu_data, i).loops_per_jiffy =
179                         cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
180         }
181
182         /* And don't forget to adjust the global one */
183         if (!global_lpj_ref.freq) {
184                 global_lpj_ref.ref = loops_per_jiffy;
185                 global_lpj_ref.freq = freqs.old;
186         }
187         loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
188                                         freqs.new);
189 #endif
190
191 done:
192         /* notifiers */
193         for_each_cpu(i, policy->cpus) {
194                 freqs.cpu = i;
195                 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
196         }
197
198         return ret;
199 }
200
201 static inline void freq_table_free(void)
202 {
203         if (atomic_dec_and_test(&freq_table_users))
204                 opp_free_cpufreq_table(mpu_dev, &freq_table);
205 }
206
207 /* force-update hack */
208 static struct notifier_block omap_freq_nb;
209 static struct cpufreq_policy *omap_freq_policy;
210
211 static void check_max_freq(unsigned long freq)
212 {
213         unsigned long volt;
214         struct opp *opp;
215
216         freq *= 1000;
217
218         if (freq <= freq_max)
219                 return;
220
221         opp = opp_find_freq_ceil(mpu_dev, &freq);
222         if (IS_ERR(opp)) {
223                 dev_err(mpu_dev, "%s: unable to find MPU OPP for %ld\n",
224                                 __func__, freq);
225                 return;
226         }
227
228         volt = opp_get_voltage(opp);
229         volt += volt * OPP_TOLERANCE / 100;
230
231         if (volt > volt_max) {
232                 volt_max = volt;
233                 freq_max = freq;
234         }
235 }
236
237 static int freq_notifier_call(struct notifier_block *nb, unsigned long type,
238                               void *devp)
239 {
240         static DEFINE_SPINLOCK(lock);
241         struct cpufreq_frequency_table *new_freq_table, *old_freq_table;
242         unsigned long flags;
243         int ret;
244
245         ret = opp_init_cpufreq_table(mpu_dev, &new_freq_table);
246         if (ret) {
247                 dev_err(mpu_dev, "%s: failed to create cpufreq_table: %d\n",
248                         __func__, ret);
249                 return ret;
250         }
251
252         /* FIXME: use proper locks instead of these hacks */
253         spin_lock_irqsave(&lock, flags);
254         old_freq_table = freq_table;
255         freq_table = new_freq_table;
256         spin_unlock_irqrestore(&lock, flags);
257         msleep(1);
258         opp_free_cpufreq_table(mpu_dev, &old_freq_table);
259
260         if (omap_freq_policy == NULL) {
261                 dev_err(mpu_dev, "%s: omap_freq_policy is NULL\n", __func__);
262                 return -EINVAL;
263         }
264
265         cpufreq_frequency_table_get_attr(freq_table, omap_freq_policy->cpu);
266
267         ret = cpufreq_frequency_table_cpuinfo(omap_freq_policy, freq_table);
268         if (ret)
269                 dev_err(mpu_dev, "%s: cpufreq_frequency_table_cpuinfo: %d\n",
270                         __func__, ret);
271         omap_freq_policy->user_policy.min = omap_freq_policy->cpuinfo.min_freq;
272         omap_freq_policy->user_policy.max = omap_freq_policy->cpuinfo.max_freq;
273
274         check_max_freq(omap_freq_policy->cpuinfo.max_freq);
275
276         return ret;
277 }
278
279 static void freq_register_opp_notifier(struct device *dev,
280                                        struct cpufreq_policy *policy)
281 {
282         struct srcu_notifier_head *nh = opp_get_notifier(dev);
283         int ret;
284
285         omap_freq_policy = policy;
286
287         if (IS_ERR(nh)) {
288                 ret = PTR_ERR(nh);
289                 goto out;
290         }
291         omap_freq_nb.notifier_call = freq_notifier_call;
292         ret = srcu_notifier_chain_register(nh, &omap_freq_nb);
293 out:
294         if (ret != 0)
295                 dev_err(mpu_dev, "%s: failed to register notifier: %d\n",
296                                 __func__, ret);
297 }
298
299 static void freq_unregister_opp_notifier(struct device *dev)
300 {
301         struct srcu_notifier_head *nh = opp_get_notifier(dev);
302
303         if (IS_ERR(nh))
304                 return;
305         srcu_notifier_chain_unregister(nh, &omap_freq_nb);
306 }
307
308 static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
309 {
310         int result = 0;
311
312         mpu_clk = clk_get(NULL, mpu_clk_name);
313         if (IS_ERR(mpu_clk))
314                 return PTR_ERR(mpu_clk);
315
316         if (policy->cpu >= NR_CPUS) {
317                 result = -EINVAL;
318                 goto fail_ck;
319         }
320
321         policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu);
322
323         if (atomic_inc_return(&freq_table_users) == 1)
324                 result = opp_init_cpufreq_table(mpu_dev, &freq_table);
325
326         if (result) {
327                 dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n",
328                                 __func__, policy->cpu, result);
329                 goto fail_ck;
330         }
331
332         result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
333         if (result)
334                 goto fail_table;
335
336         cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
337
338         policy->min = policy->cpuinfo.min_freq;
339         policy->max = policy->cpuinfo.max_freq;
340         policy->cur = omap_getspeed(policy->cpu);
341
342         check_max_freq(policy->cpuinfo.max_freq);
343
344         /*
345          * On OMAP SMP configuartion, both processors share the voltage
346          * and clock. So both CPUs needs to be scaled together and hence
347          * needs software co-ordination. Use cpufreq affected_cpus
348          * interface to handle this scenario. Additional is_smp() check
349          * is to keep SMP_ON_UP build working.
350          */
351         if (is_smp()) {
352                 policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
353                 cpumask_setall(policy->cpus);
354         }
355
356         /* FIXME: what's the actual transition time? */
357         policy->cpuinfo.transition_latency = 300 * 1000;
358
359         freq_register_opp_notifier(mpu_dev, policy);
360
361         return 0;
362
363 fail_table:
364         freq_table_free();
365 fail_ck:
366         clk_put(mpu_clk);
367         return result;
368 }
369
370 static int omap_cpu_exit(struct cpufreq_policy *policy)
371 {
372         freq_unregister_opp_notifier(mpu_dev);
373         freq_table_free();
374         clk_put(mpu_clk);
375         return 0;
376 }
377
378 static struct freq_attr *omap_cpufreq_attr[] = {
379         &cpufreq_freq_attr_scaling_available_freqs,
380         NULL,
381 };
382
383 static struct cpufreq_driver omap_driver = {
384         .flags          = CPUFREQ_STICKY,
385         .verify         = omap_verify_speed,
386         .target         = omap_target,
387         .get            = omap_getspeed,
388         .init           = omap_cpu_init,
389         .exit           = omap_cpu_exit,
390         .name           = "omap",
391         .attr           = omap_cpufreq_attr,
392 };
393
394 static int __init omap_cpufreq_init(void)
395 {
396         if (cpu_is_omap24xx())
397                 mpu_clk_name = "virt_prcm_set";
398         else if (cpu_is_omap34xx())
399                 mpu_clk_name = "dpll1_ck";
400         else if (cpu_is_omap44xx())
401                 mpu_clk_name = "dpll_mpu_ck";
402
403         if (!mpu_clk_name) {
404                 pr_err("%s: unsupported Silicon?\n", __func__);
405                 return -EINVAL;
406         }
407
408         mpu_dev = omap_device_get_by_hwmod_name("mpu");
409         if (!mpu_dev) {
410                 pr_warning("%s: unable to get the mpu device\n", __func__);
411                 return -EINVAL;
412         }
413
414         mpu_reg = regulator_get(mpu_dev, "vcc");
415         if (IS_ERR(mpu_reg)) {
416                 pr_warning("%s: unable to get MPU regulator\n", __func__);
417                 mpu_reg = NULL;
418         } else {
419                 /* 
420                  * Ensure physical regulator is present.
421                  * (e.g. could be dummy regulator.)
422                  */
423                 if (regulator_get_voltage(mpu_reg) < 0) {
424                         pr_warn("%s: physical regulator not present for MPU\n",
425                                 __func__);
426                         regulator_put(mpu_reg);
427                         mpu_reg = NULL;
428                 }
429         }
430
431         return cpufreq_register_driver(&omap_driver);
432 }
433
434 static void __exit omap_cpufreq_exit(void)
435 {
436         cpufreq_unregister_driver(&omap_driver);
437 }
438
439 MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs");
440 MODULE_LICENSE("GPL");
441 module_init(omap_cpufreq_init);
442 module_exit(omap_cpufreq_exit);