Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
[pandora-kernel.git] / arch / arm / plat-omap / clock.c
1 /*
2  *  linux/arch/arm/plat-omap/clock.c
3  *
4  *  Copyright (C) 2004 - 2008 Nokia corporation
5  *  Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
6  *
7  *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/version.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/err.h>
20 #include <linux/string.h>
21 #include <linux/clk.h>
22 #include <linux/mutex.h>
23 #include <linux/platform_device.h>
24 #include <linux/cpufreq.h>
25 #include <linux/debugfs.h>
26
27 #include <asm/io.h>
28
29 #include <asm/arch/clock.h>
30
31 static LIST_HEAD(clocks);
32 static DEFINE_MUTEX(clocks_mutex);
33 static DEFINE_SPINLOCK(clockfw_lock);
34
35 static struct clk_functions *arch_clock;
36
37 /*-------------------------------------------------------------------------
38  * Standard clock functions defined in include/linux/clk.h
39  *-------------------------------------------------------------------------*/
40
41 /*
42  * Returns a clock. Note that we first try to use device id on the bus
43  * and clock name. If this fails, we try to use clock name only.
44  */
45 struct clk * clk_get(struct device *dev, const char *id)
46 {
47         struct clk *p, *clk = ERR_PTR(-ENOENT);
48         int idno;
49
50         if (dev == NULL || dev->bus != &platform_bus_type)
51                 idno = -1;
52         else
53                 idno = to_platform_device(dev)->id;
54
55         mutex_lock(&clocks_mutex);
56
57         list_for_each_entry(p, &clocks, node) {
58                 if (p->id == idno &&
59                     strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
60                         clk = p;
61                         goto found;
62                 }
63         }
64
65         list_for_each_entry(p, &clocks, node) {
66                 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
67                         clk = p;
68                         break;
69                 }
70         }
71
72 found:
73         mutex_unlock(&clocks_mutex);
74
75         return clk;
76 }
77 EXPORT_SYMBOL(clk_get);
78
79 int clk_enable(struct clk *clk)
80 {
81         unsigned long flags;
82         int ret = 0;
83
84         if (clk == NULL || IS_ERR(clk))
85                 return -EINVAL;
86
87         spin_lock_irqsave(&clockfw_lock, flags);
88         if (arch_clock->clk_enable)
89                 ret = arch_clock->clk_enable(clk);
90         spin_unlock_irqrestore(&clockfw_lock, flags);
91
92         return ret;
93 }
94 EXPORT_SYMBOL(clk_enable);
95
96 void clk_disable(struct clk *clk)
97 {
98         unsigned long flags;
99
100         if (clk == NULL || IS_ERR(clk))
101                 return;
102
103         spin_lock_irqsave(&clockfw_lock, flags);
104         if (clk->usecount == 0) {
105                 printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
106                        clk->name);
107                 WARN_ON(1);
108                 goto out;
109         }
110
111         if (arch_clock->clk_disable)
112                 arch_clock->clk_disable(clk);
113
114 out:
115         spin_unlock_irqrestore(&clockfw_lock, flags);
116 }
117 EXPORT_SYMBOL(clk_disable);
118
119 int clk_get_usecount(struct clk *clk)
120 {
121         unsigned long flags;
122         int ret = 0;
123
124         if (clk == NULL || IS_ERR(clk))
125                 return 0;
126
127         spin_lock_irqsave(&clockfw_lock, flags);
128         ret = clk->usecount;
129         spin_unlock_irqrestore(&clockfw_lock, flags);
130
131         return ret;
132 }
133 EXPORT_SYMBOL(clk_get_usecount);
134
135 unsigned long clk_get_rate(struct clk *clk)
136 {
137         unsigned long flags;
138         unsigned long ret = 0;
139
140         if (clk == NULL || IS_ERR(clk))
141                 return 0;
142
143         spin_lock_irqsave(&clockfw_lock, flags);
144         ret = clk->rate;
145         spin_unlock_irqrestore(&clockfw_lock, flags);
146
147         return ret;
148 }
149 EXPORT_SYMBOL(clk_get_rate);
150
151 void clk_put(struct clk *clk)
152 {
153         if (clk && !IS_ERR(clk))
154                 module_put(clk->owner);
155 }
156 EXPORT_SYMBOL(clk_put);
157
158 /*-------------------------------------------------------------------------
159  * Optional clock functions defined in include/linux/clk.h
160  *-------------------------------------------------------------------------*/
161
162 long clk_round_rate(struct clk *clk, unsigned long rate)
163 {
164         unsigned long flags;
165         long ret = 0;
166
167         if (clk == NULL || IS_ERR(clk))
168                 return ret;
169
170         spin_lock_irqsave(&clockfw_lock, flags);
171         if (arch_clock->clk_round_rate)
172                 ret = arch_clock->clk_round_rate(clk, rate);
173         spin_unlock_irqrestore(&clockfw_lock, flags);
174
175         return ret;
176 }
177 EXPORT_SYMBOL(clk_round_rate);
178
179 int clk_set_rate(struct clk *clk, unsigned long rate)
180 {
181         unsigned long flags;
182         int ret = -EINVAL;
183
184         if (clk == NULL || IS_ERR(clk))
185                 return ret;
186
187         spin_lock_irqsave(&clockfw_lock, flags);
188         if (arch_clock->clk_set_rate)
189                 ret = arch_clock->clk_set_rate(clk, rate);
190         spin_unlock_irqrestore(&clockfw_lock, flags);
191
192         return ret;
193 }
194 EXPORT_SYMBOL(clk_set_rate);
195
196 int clk_set_parent(struct clk *clk, struct clk *parent)
197 {
198         unsigned long flags;
199         int ret = -EINVAL;
200
201         if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
202                 return ret;
203
204         spin_lock_irqsave(&clockfw_lock, flags);
205         if (arch_clock->clk_set_parent)
206                 ret =  arch_clock->clk_set_parent(clk, parent);
207         spin_unlock_irqrestore(&clockfw_lock, flags);
208
209         return ret;
210 }
211 EXPORT_SYMBOL(clk_set_parent);
212
213 struct clk *clk_get_parent(struct clk *clk)
214 {
215         unsigned long flags;
216         struct clk * ret = NULL;
217
218         if (clk == NULL || IS_ERR(clk))
219                 return ret;
220
221         spin_lock_irqsave(&clockfw_lock, flags);
222         if (arch_clock->clk_get_parent)
223                 ret = arch_clock->clk_get_parent(clk);
224         spin_unlock_irqrestore(&clockfw_lock, flags);
225
226         return ret;
227 }
228 EXPORT_SYMBOL(clk_get_parent);
229
230 /*-------------------------------------------------------------------------
231  * OMAP specific clock functions shared between omap1 and omap2
232  *-------------------------------------------------------------------------*/
233
234 unsigned int __initdata mpurate;
235
236 /*
237  * By default we use the rate set by the bootloader.
238  * You can override this with mpurate= cmdline option.
239  */
240 static int __init omap_clk_setup(char *str)
241 {
242         get_option(&str, &mpurate);
243
244         if (!mpurate)
245                 return 1;
246
247         if (mpurate < 1000)
248                 mpurate *= 1000000;
249
250         return 1;
251 }
252 __setup("mpurate=", omap_clk_setup);
253
254 /* Used for clocks that always have same value as the parent clock */
255 void followparent_recalc(struct clk *clk)
256 {
257         if (clk == NULL || IS_ERR(clk))
258                 return;
259
260         clk->rate = clk->parent->rate;
261         if (unlikely(clk->flags & RATE_PROPAGATES))
262                 propagate_rate(clk);
263 }
264
265 /* Propagate rate to children */
266 void propagate_rate(struct clk * tclk)
267 {
268         struct clk *clkp;
269
270         if (tclk == NULL || IS_ERR(tclk))
271                 return;
272
273         list_for_each_entry(clkp, &clocks, node) {
274                 if (likely(clkp->parent != tclk))
275                         continue;
276                 if (likely((u32)clkp->recalc))
277                         clkp->recalc(clkp);
278         }
279 }
280
281 /**
282  * recalculate_root_clocks - recalculate and propagate all root clocks
283  *
284  * Recalculates all root clocks (clocks with no parent), which if the
285  * clock's .recalc is set correctly, should also propagate their rates.
286  * Called at init.
287  */
288 void recalculate_root_clocks(void)
289 {
290         struct clk *clkp;
291
292         list_for_each_entry(clkp, &clocks, node) {
293                 if (unlikely(!clkp->parent) && likely((u32)clkp->recalc))
294                         clkp->recalc(clkp);
295         }
296 }
297
298 int clk_register(struct clk *clk)
299 {
300         if (clk == NULL || IS_ERR(clk))
301                 return -EINVAL;
302
303         mutex_lock(&clocks_mutex);
304         list_add(&clk->node, &clocks);
305         if (clk->init)
306                 clk->init(clk);
307         mutex_unlock(&clocks_mutex);
308
309         return 0;
310 }
311 EXPORT_SYMBOL(clk_register);
312
313 void clk_unregister(struct clk *clk)
314 {
315         if (clk == NULL || IS_ERR(clk))
316                 return;
317
318         mutex_lock(&clocks_mutex);
319         list_del(&clk->node);
320         mutex_unlock(&clocks_mutex);
321 }
322 EXPORT_SYMBOL(clk_unregister);
323
324 void clk_deny_idle(struct clk *clk)
325 {
326         unsigned long flags;
327
328         if (clk == NULL || IS_ERR(clk))
329                 return;
330
331         spin_lock_irqsave(&clockfw_lock, flags);
332         if (arch_clock->clk_deny_idle)
333                 arch_clock->clk_deny_idle(clk);
334         spin_unlock_irqrestore(&clockfw_lock, flags);
335 }
336 EXPORT_SYMBOL(clk_deny_idle);
337
338 void clk_allow_idle(struct clk *clk)
339 {
340         unsigned long flags;
341
342         if (clk == NULL || IS_ERR(clk))
343                 return;
344
345         spin_lock_irqsave(&clockfw_lock, flags);
346         if (arch_clock->clk_allow_idle)
347                 arch_clock->clk_allow_idle(clk);
348         spin_unlock_irqrestore(&clockfw_lock, flags);
349 }
350 EXPORT_SYMBOL(clk_allow_idle);
351
352 void clk_enable_init_clocks(void)
353 {
354         struct clk *clkp;
355
356         list_for_each_entry(clkp, &clocks, node) {
357                 if (clkp->flags & ENABLE_ON_INIT)
358                         clk_enable(clkp);
359         }
360 }
361 EXPORT_SYMBOL(clk_enable_init_clocks);
362
363 #ifdef CONFIG_CPU_FREQ
364 void clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
365 {
366         unsigned long flags;
367
368         spin_lock_irqsave(&clockfw_lock, flags);
369         if (arch_clock->clk_init_cpufreq_table)
370                 arch_clock->clk_init_cpufreq_table(table);
371         spin_unlock_irqrestore(&clockfw_lock, flags);
372 }
373 EXPORT_SYMBOL(clk_init_cpufreq_table);
374 #endif
375
376 /*-------------------------------------------------------------------------*/
377
378 #ifdef CONFIG_OMAP_RESET_CLOCKS
379 /*
380  * Disable any unused clocks left on by the bootloader
381  */
382 static int __init clk_disable_unused(void)
383 {
384         struct clk *ck;
385         unsigned long flags;
386
387         list_for_each_entry(ck, &clocks, node) {
388                 if (ck->usecount > 0 || (ck->flags & ALWAYS_ENABLED) ||
389                         ck->enable_reg == 0)
390                         continue;
391
392                 spin_lock_irqsave(&clockfw_lock, flags);
393                 if (arch_clock->clk_disable_unused)
394                         arch_clock->clk_disable_unused(ck);
395                 spin_unlock_irqrestore(&clockfw_lock, flags);
396         }
397
398         return 0;
399 }
400 late_initcall(clk_disable_unused);
401 #endif
402
403 int __init clk_init(struct clk_functions * custom_clocks)
404 {
405         if (!custom_clocks) {
406                 printk(KERN_ERR "No custom clock functions registered\n");
407                 BUG();
408         }
409
410         arch_clock = custom_clocks;
411
412         return 0;
413 }
414
415 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
416 /*
417  *      debugfs support to trace clock tree hierarchy and attributes
418  */
419 static struct dentry *clk_debugfs_root;
420
421 static int clk_debugfs_register_one(struct clk *c)
422 {
423         int err;
424         struct dentry *d, *child;
425         struct clk *pa = c->parent;
426         char s[255];
427         char *p = s;
428
429         p += sprintf(p, "%s", c->name);
430         if (c->id != 0)
431                 sprintf(p, ":%d", c->id);
432         d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
433         if (IS_ERR(d))
434                 return PTR_ERR(d);
435         c->dent = d;
436
437         d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
438         if (IS_ERR(d)) {
439                 err = PTR_ERR(d);
440                 goto err_out;
441         }
442         d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
443         if (IS_ERR(d)) {
444                 err = PTR_ERR(d);
445                 goto err_out;
446         }
447         d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
448         if (IS_ERR(d)) {
449                 err = PTR_ERR(d);
450                 goto err_out;
451         }
452         return 0;
453
454 err_out:
455         d = c->dent;
456         list_for_each_entry(child, &d->d_subdirs, d_u.d_child)
457                 debugfs_remove(child);
458         debugfs_remove(c->dent);
459         return err;
460 }
461
462 static int clk_debugfs_register(struct clk *c)
463 {
464         int err;
465         struct clk *pa = c->parent;
466
467         if (pa && !pa->dent) {
468                 err = clk_debugfs_register(pa);
469                 if (err)
470                         return err;
471         }
472
473         if (!c->dent) {
474                 err = clk_debugfs_register_one(c);
475                 if (err)
476                         return err;
477         }
478         return 0;
479 }
480
481 static int __init clk_debugfs_init(void)
482 {
483         struct clk *c;
484         struct dentry *d;
485         int err;
486
487         d = debugfs_create_dir("clock", NULL);
488         if (IS_ERR(d))
489                 return PTR_ERR(d);
490         clk_debugfs_root = d;
491
492         list_for_each_entry(c, &clocks, node) {
493                 err = clk_debugfs_register(c);
494                 if (err)
495                         goto err_out;
496         }
497         return 0;
498 err_out:
499         debugfs_remove(clk_debugfs_root); /* REVISIT: Cleanup correctly */
500         return err;
501 }
502 late_initcall(clk_debugfs_init);
503
504 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */