Merge current mainline tree into linux-omap tree
[pandora-kernel.git] / arch / arm / plat-omap / clock.c
1 /*
2  *  linux/arch/arm/plat-omap/clock.c
3  *
4  *  Copyright (C) 2004 - 2008 Nokia corporation
5  *  Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
6  *
7  *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/string.h>
20 #include <linux/clk.h>
21 #include <linux/mutex.h>
22 #include <linux/platform_device.h>
23 #include <linux/cpufreq.h>
24 #include <linux/debugfs.h>
25
26 #include <asm/io.h>
27
28 #include <mach/clock.h>
29
30 static LIST_HEAD(clocks);
31 static DEFINE_MUTEX(clocks_mutex);
32 static DEFINE_SPINLOCK(clockfw_lock);
33
34 static struct clk_functions *arch_clock;
35
36 /*-------------------------------------------------------------------------
37  * Standard clock functions defined in include/linux/clk.h
38  *-------------------------------------------------------------------------*/
39
40 /*
41  * Returns a clock. Note that we first try to use device id on the bus
42  * and clock name. If this fails, we try to use clock name only.
43  */
44 struct clk * clk_get(struct device *dev, const char *id)
45 {
46         struct clk *p, *clk = ERR_PTR(-ENOENT);
47         int idno;
48
49         if (dev == NULL || dev->bus != &platform_bus_type)
50                 idno = -1;
51         else
52                 idno = to_platform_device(dev)->id;
53
54         mutex_lock(&clocks_mutex);
55
56         list_for_each_entry(p, &clocks, node) {
57                 if (p->id == idno &&
58                     strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
59                         clk = p;
60                         goto found;
61                 }
62         }
63
64         list_for_each_entry(p, &clocks, node) {
65                 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
66                         clk = p;
67                         break;
68                 }
69         }
70
71 found:
72         mutex_unlock(&clocks_mutex);
73
74         return clk;
75 }
76 EXPORT_SYMBOL(clk_get);
77
78 int clk_enable(struct clk *clk)
79 {
80         unsigned long flags;
81         int ret = 0;
82
83         if (clk == NULL || IS_ERR(clk))
84                 return -EINVAL;
85
86         spin_lock_irqsave(&clockfw_lock, flags);
87         if (arch_clock->clk_enable)
88                 ret = arch_clock->clk_enable(clk);
89         spin_unlock_irqrestore(&clockfw_lock, flags);
90
91         return ret;
92 }
93 EXPORT_SYMBOL(clk_enable);
94
95 void clk_disable(struct clk *clk)
96 {
97         unsigned long flags;
98
99         if (clk == NULL || IS_ERR(clk))
100                 return;
101
102         spin_lock_irqsave(&clockfw_lock, flags);
103         if (clk->usecount == 0) {
104                 printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
105                        clk->name);
106                 WARN_ON(1);
107                 goto out;
108         }
109
110         if (arch_clock->clk_disable)
111                 arch_clock->clk_disable(clk);
112
113 out:
114         spin_unlock_irqrestore(&clockfw_lock, flags);
115 }
116 EXPORT_SYMBOL(clk_disable);
117
118 int clk_get_usecount(struct clk *clk)
119 {
120         unsigned long flags;
121         int ret = 0;
122
123         if (clk == NULL || IS_ERR(clk))
124                 return 0;
125
126         spin_lock_irqsave(&clockfw_lock, flags);
127         ret = clk->usecount;
128         spin_unlock_irqrestore(&clockfw_lock, flags);
129
130         return ret;
131 }
132 EXPORT_SYMBOL(clk_get_usecount);
133
134 unsigned long clk_get_rate(struct clk *clk)
135 {
136         unsigned long flags;
137         unsigned long ret = 0;
138
139         if (clk == NULL || IS_ERR(clk))
140                 return 0;
141
142         spin_lock_irqsave(&clockfw_lock, flags);
143         ret = clk->rate;
144         spin_unlock_irqrestore(&clockfw_lock, flags);
145
146         return ret;
147 }
148 EXPORT_SYMBOL(clk_get_rate);
149
150 void clk_put(struct clk *clk)
151 {
152         if (clk && !IS_ERR(clk))
153                 module_put(clk->owner);
154 }
155 EXPORT_SYMBOL(clk_put);
156
157 /*-------------------------------------------------------------------------
158  * Optional clock functions defined in include/linux/clk.h
159  *-------------------------------------------------------------------------*/
160
161 long clk_round_rate(struct clk *clk, unsigned long rate)
162 {
163         unsigned long flags;
164         long ret = 0;
165
166         if (clk == NULL || IS_ERR(clk))
167                 return ret;
168
169         spin_lock_irqsave(&clockfw_lock, flags);
170         if (arch_clock->clk_round_rate)
171                 ret = arch_clock->clk_round_rate(clk, rate);
172         spin_unlock_irqrestore(&clockfw_lock, flags);
173
174         return ret;
175 }
176 EXPORT_SYMBOL(clk_round_rate);
177
178 int clk_set_rate(struct clk *clk, unsigned long rate)
179 {
180         unsigned long flags;
181         int ret = -EINVAL;
182
183         if (clk == NULL || IS_ERR(clk))
184                 return ret;
185
186         spin_lock_irqsave(&clockfw_lock, flags);
187         if (arch_clock->clk_set_rate)
188                 ret = arch_clock->clk_set_rate(clk, rate);
189         spin_unlock_irqrestore(&clockfw_lock, flags);
190
191         return ret;
192 }
193 EXPORT_SYMBOL(clk_set_rate);
194
195 int clk_set_parent(struct clk *clk, struct clk *parent)
196 {
197         unsigned long flags;
198         int ret = -EINVAL;
199
200         if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
201                 return ret;
202
203         spin_lock_irqsave(&clockfw_lock, flags);
204         if (arch_clock->clk_set_parent)
205                 ret =  arch_clock->clk_set_parent(clk, parent);
206         spin_unlock_irqrestore(&clockfw_lock, flags);
207
208         return ret;
209 }
210 EXPORT_SYMBOL(clk_set_parent);
211
212 struct clk *clk_get_parent(struct clk *clk)
213 {
214         unsigned long flags;
215         struct clk * ret = NULL;
216
217         if (clk == NULL || IS_ERR(clk))
218                 return ret;
219
220         spin_lock_irqsave(&clockfw_lock, flags);
221         if (arch_clock->clk_get_parent)
222                 ret = arch_clock->clk_get_parent(clk);
223         spin_unlock_irqrestore(&clockfw_lock, flags);
224
225         return ret;
226 }
227 EXPORT_SYMBOL(clk_get_parent);
228
229 /*-------------------------------------------------------------------------
230  * OMAP specific clock functions shared between omap1 and omap2
231  *-------------------------------------------------------------------------*/
232
233 unsigned int __initdata mpurate;
234
235 /*
236  * By default we use the rate set by the bootloader.
237  * You can override this with mpurate= cmdline option.
238  */
239 static int __init omap_clk_setup(char *str)
240 {
241         get_option(&str, &mpurate);
242
243         if (!mpurate)
244                 return 1;
245
246         if (mpurate < 1000)
247                 mpurate *= 1000000;
248
249         return 1;
250 }
251 __setup("mpurate=", omap_clk_setup);
252
253 /* Used for clocks that always have same value as the parent clock */
254 void followparent_recalc(struct clk *clk)
255 {
256         if (clk == NULL || IS_ERR(clk))
257                 return;
258
259         clk->rate = clk->parent->rate;
260         if (unlikely(clk->flags & RATE_PROPAGATES))
261                 propagate_rate(clk);
262 }
263
264 /* Propagate rate to children */
265 void propagate_rate(struct clk * tclk)
266 {
267         struct clk *clkp;
268
269         if (tclk == NULL || IS_ERR(tclk))
270                 return;
271
272         list_for_each_entry(clkp, &clocks, node) {
273                 if (likely(clkp->parent != tclk))
274                         continue;
275                 if (likely((u32)clkp->recalc))
276                         clkp->recalc(clkp);
277         }
278 }
279
280 /**
281  * recalculate_root_clocks - recalculate and propagate all root clocks
282  *
283  * Recalculates all root clocks (clocks with no parent), which if the
284  * clock's .recalc is set correctly, should also propagate their rates.
285  * Called at init.
286  */
287 void recalculate_root_clocks(void)
288 {
289         struct clk *clkp;
290
291         list_for_each_entry(clkp, &clocks, node) {
292                 if (unlikely(!clkp->parent) && likely((u32)clkp->recalc))
293                         clkp->recalc(clkp);
294         }
295 }
296
297 int clk_register(struct clk *clk)
298 {
299         if (clk == NULL || IS_ERR(clk))
300                 return -EINVAL;
301
302         mutex_lock(&clocks_mutex);
303         list_add(&clk->node, &clocks);
304         if (clk->init)
305                 clk->init(clk);
306         mutex_unlock(&clocks_mutex);
307
308         return 0;
309 }
310 EXPORT_SYMBOL(clk_register);
311
312 void clk_unregister(struct clk *clk)
313 {
314         if (clk == NULL || IS_ERR(clk))
315                 return;
316
317         mutex_lock(&clocks_mutex);
318         list_del(&clk->node);
319         mutex_unlock(&clocks_mutex);
320 }
321 EXPORT_SYMBOL(clk_unregister);
322
323 void clk_deny_idle(struct clk *clk)
324 {
325         unsigned long flags;
326
327         if (clk == NULL || IS_ERR(clk))
328                 return;
329
330         spin_lock_irqsave(&clockfw_lock, flags);
331         if (arch_clock->clk_deny_idle)
332                 arch_clock->clk_deny_idle(clk);
333         spin_unlock_irqrestore(&clockfw_lock, flags);
334 }
335 EXPORT_SYMBOL(clk_deny_idle);
336
337 void clk_allow_idle(struct clk *clk)
338 {
339         unsigned long flags;
340
341         if (clk == NULL || IS_ERR(clk))
342                 return;
343
344         spin_lock_irqsave(&clockfw_lock, flags);
345         if (arch_clock->clk_allow_idle)
346                 arch_clock->clk_allow_idle(clk);
347         spin_unlock_irqrestore(&clockfw_lock, flags);
348 }
349 EXPORT_SYMBOL(clk_allow_idle);
350
351 void clk_enable_init_clocks(void)
352 {
353         struct clk *clkp;
354
355         list_for_each_entry(clkp, &clocks, node) {
356                 if (clkp->flags & ENABLE_ON_INIT)
357                         clk_enable(clkp);
358         }
359 }
360 EXPORT_SYMBOL(clk_enable_init_clocks);
361
362 #ifdef CONFIG_CPU_FREQ
363 void clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
364 {
365         unsigned long flags;
366
367         spin_lock_irqsave(&clockfw_lock, flags);
368         if (arch_clock->clk_init_cpufreq_table)
369                 arch_clock->clk_init_cpufreq_table(table);
370         spin_unlock_irqrestore(&clockfw_lock, flags);
371 }
372 EXPORT_SYMBOL(clk_init_cpufreq_table);
373 #endif
374
375 /*-------------------------------------------------------------------------*/
376
377 #ifdef CONFIG_OMAP_RESET_CLOCKS
378 /*
379  * Disable any unused clocks left on by the bootloader
380  */
381 static int __init clk_disable_unused(void)
382 {
383         struct clk *ck;
384         unsigned long flags;
385
386         list_for_each_entry(ck, &clocks, node) {
387                 if (ck->usecount > 0 ||
388                     (ck->flags & (ALWAYS_ENABLED | PARENT_CONTROLS_CLOCK)))
389                         continue;
390
391                 if (cpu_class_is_omap1() && ck->enable_reg == 0)
392                         continue;
393
394                 spin_lock_irqsave(&clockfw_lock, flags);
395                 if (arch_clock->clk_disable_unused)
396                         arch_clock->clk_disable_unused(ck);
397                 spin_unlock_irqrestore(&clockfw_lock, flags);
398         }
399
400         return 0;
401 }
402 late_initcall(clk_disable_unused);
403 #endif
404
405 int __init clk_init(struct clk_functions * custom_clocks)
406 {
407         if (!custom_clocks) {
408                 printk(KERN_ERR "No custom clock functions registered\n");
409                 BUG();
410         }
411
412         arch_clock = custom_clocks;
413
414         return 0;
415 }
416
417 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
418 /*
419  *      debugfs support to trace clock tree hierarchy and attributes
420  */
421 static struct dentry *clk_debugfs_root;
422
423 static int clk_debugfs_register_one(struct clk *c)
424 {
425         int err;
426         struct dentry *d, *child;
427         struct clk *pa = c->parent;
428         char s[255];
429         char *p = s;
430
431         p += sprintf(p, "%s", c->name);
432         if (c->id != 0)
433                 sprintf(p, ":%d", c->id);
434         d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
435         if (IS_ERR(d))
436                 return PTR_ERR(d);
437         c->dent = d;
438
439         d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
440         if (IS_ERR(d)) {
441                 err = PTR_ERR(d);
442                 goto err_out;
443         }
444         d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
445         if (IS_ERR(d)) {
446                 err = PTR_ERR(d);
447                 goto err_out;
448         }
449         d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
450         if (IS_ERR(d)) {
451                 err = PTR_ERR(d);
452                 goto err_out;
453         }
454         return 0;
455
456 err_out:
457         d = c->dent;
458         list_for_each_entry(child, &d->d_subdirs, d_u.d_child)
459                 debugfs_remove(child);
460         debugfs_remove(c->dent);
461         return err;
462 }
463
464 static int clk_debugfs_register(struct clk *c)
465 {
466         int err;
467         struct clk *pa = c->parent;
468
469         if (pa && !pa->dent) {
470                 err = clk_debugfs_register(pa);
471                 if (err)
472                         return err;
473         }
474
475         if (!c->dent) {
476                 err = clk_debugfs_register_one(c);
477                 if (err)
478                         return err;
479         }
480         return 0;
481 }
482
483 static int __init clk_debugfs_init(void)
484 {
485         struct clk *c;
486         struct dentry *d;
487         int err;
488
489         d = debugfs_create_dir("clock", NULL);
490         if (IS_ERR(d))
491                 return PTR_ERR(d);
492         clk_debugfs_root = d;
493
494         list_for_each_entry(c, &clocks, node) {
495                 err = clk_debugfs_register(c);
496                 if (err)
497                         goto err_out;
498         }
499         return 0;
500 err_out:
501         debugfs_remove(clk_debugfs_root); /* REVISIT: Cleanup correctly */
502         return err;
503 }
504 late_initcall(clk_debugfs_init);
505
506 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */