sh: clkfwk: rate table construction and rounding for SH7785.
[pandora-kernel.git] / arch / sh / kernel / cpu / clock.c
1 /*
2  * arch/sh/kernel/cpu/clock.c - SuperH clock framework
3  *
4  *  Copyright (C) 2005 - 2009  Paul Mundt
5  *
6  * This clock framework is derived from the OMAP version by:
7  *
8  *      Copyright (C) 2004 - 2008 Nokia Corporation
9  *      Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10  *
11  *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12  *
13  *  With clkdev bits:
14  *
15  *      Copyright (C) 2008 Russell King.
16  *
17  * This file is subject to the terms and conditions of the GNU General Public
18  * License.  See the file "COPYING" in the main directory of this archive
19  * for more details.
20  */
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/mutex.h>
25 #include <linux/list.h>
26 #include <linux/kobject.h>
27 #include <linux/sysdev.h>
28 #include <linux/seq_file.h>
29 #include <linux/err.h>
30 #include <linux/platform_device.h>
31 #include <linux/proc_fs.h>
32 #include <asm/clock.h>
33 #include <asm/machvec.h>
34
35 static LIST_HEAD(clock_list);
36 static DEFINE_SPINLOCK(clock_lock);
37 static DEFINE_MUTEX(clock_list_sem);
38
39 /* Used for clocks that always have same value as the parent clock */
40 unsigned long followparent_recalc(struct clk *clk)
41 {
42         return clk->parent->rate;
43 }
44
45 int clk_reparent(struct clk *child, struct clk *parent)
46 {
47         list_del_init(&child->sibling);
48         if (parent)
49                 list_add(&child->sibling, &parent->children);
50         child->parent = parent;
51
52         /* now do the debugfs renaming to reattach the child
53            to the proper parent */
54
55         return 0;
56 }
57
58 /* Propagate rate to children */
59 void propagate_rate(struct clk *tclk)
60 {
61         struct clk *clkp;
62
63         list_for_each_entry(clkp, &tclk->children, sibling) {
64                 if (clkp->ops && clkp->ops->recalc)
65                         clkp->rate = clkp->ops->recalc(clkp);
66                 if (clkp->ops && clkp->ops->build_rate_table)
67                         clkp->ops->build_rate_table(clkp);
68
69                 propagate_rate(clkp);
70         }
71 }
72
73 static void __clk_disable(struct clk *clk)
74 {
75         if (clk->usecount == 0) {
76                 printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
77                        clk->name);
78                 WARN_ON(1);
79                 return;
80         }
81
82         if (!(--clk->usecount)) {
83                 if (likely(clk->ops && clk->ops->disable))
84                         clk->ops->disable(clk);
85                 if (likely(clk->parent))
86                         __clk_disable(clk->parent);
87         }
88 }
89
90 void clk_disable(struct clk *clk)
91 {
92         unsigned long flags;
93
94         if (!clk)
95                 return;
96
97         spin_lock_irqsave(&clock_lock, flags);
98         __clk_disable(clk);
99         spin_unlock_irqrestore(&clock_lock, flags);
100 }
101 EXPORT_SYMBOL_GPL(clk_disable);
102
103 static int __clk_enable(struct clk *clk)
104 {
105         int ret = 0;
106
107         if (clk->usecount++ == 0) {
108                 if (clk->parent) {
109                         ret = __clk_enable(clk->parent);
110                         if (unlikely(ret))
111                                 goto err;
112                 }
113
114                 if (clk->ops && clk->ops->enable) {
115                         ret = clk->ops->enable(clk);
116                         if (ret) {
117                                 if (clk->parent)
118                                         __clk_disable(clk->parent);
119                                 goto err;
120                         }
121                 }
122         }
123
124         return ret;
125 err:
126         clk->usecount--;
127         return ret;
128 }
129
130 int clk_enable(struct clk *clk)
131 {
132         unsigned long flags;
133         int ret;
134
135         if (!clk)
136                 return -EINVAL;
137
138         spin_lock_irqsave(&clock_lock, flags);
139         ret = __clk_enable(clk);
140         spin_unlock_irqrestore(&clock_lock, flags);
141
142         return ret;
143 }
144 EXPORT_SYMBOL_GPL(clk_enable);
145
146 static LIST_HEAD(root_clks);
147
148 /**
149  * recalculate_root_clocks - recalculate and propagate all root clocks
150  *
151  * Recalculates all root clocks (clocks with no parent), which if the
152  * clock's .recalc is set correctly, should also propagate their rates.
153  * Called at init.
154  */
155 void recalculate_root_clocks(void)
156 {
157         struct clk *clkp;
158
159         list_for_each_entry(clkp, &root_clks, sibling) {
160                 if (clkp->ops && clkp->ops->recalc)
161                         clkp->rate = clkp->ops->recalc(clkp);
162                 propagate_rate(clkp);
163         }
164 }
165
166 int clk_register(struct clk *clk)
167 {
168         if (clk == NULL || IS_ERR(clk))
169                 return -EINVAL;
170
171         /*
172          * trap out already registered clocks
173          */
174         if (clk->node.next || clk->node.prev)
175                 return 0;
176
177         mutex_lock(&clock_list_sem);
178
179         INIT_LIST_HEAD(&clk->children);
180         clk->usecount = 0;
181
182         if (clk->parent)
183                 list_add(&clk->sibling, &clk->parent->children);
184         else
185                 list_add(&clk->sibling, &root_clks);
186
187         list_add(&clk->node, &clock_list);
188         if (clk->ops && clk->ops->init)
189                 clk->ops->init(clk);
190         mutex_unlock(&clock_list_sem);
191
192         return 0;
193 }
194 EXPORT_SYMBOL_GPL(clk_register);
195
196 void clk_unregister(struct clk *clk)
197 {
198         mutex_lock(&clock_list_sem);
199         list_del(&clk->sibling);
200         list_del(&clk->node);
201         mutex_unlock(&clock_list_sem);
202 }
203 EXPORT_SYMBOL_GPL(clk_unregister);
204
205 static void clk_enable_init_clocks(void)
206 {
207         struct clk *clkp;
208
209         list_for_each_entry(clkp, &clock_list, node)
210                 if (clkp->flags & CLK_ENABLE_ON_INIT)
211                         clk_enable(clkp);
212 }
213
214 unsigned long clk_get_rate(struct clk *clk)
215 {
216         return clk->rate;
217 }
218 EXPORT_SYMBOL_GPL(clk_get_rate);
219
220 int clk_set_rate(struct clk *clk, unsigned long rate)
221 {
222         return clk_set_rate_ex(clk, rate, 0);
223 }
224 EXPORT_SYMBOL_GPL(clk_set_rate);
225
226 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
227 {
228         int ret = -EOPNOTSUPP;
229         unsigned long flags;
230
231         spin_lock_irqsave(&clock_lock, flags);
232
233         if (likely(clk->ops && clk->ops->set_rate)) {
234                 ret = clk->ops->set_rate(clk, rate, algo_id);
235                 if (ret != 0)
236                         goto out_unlock;
237         } else {
238                 clk->rate = rate;
239                 ret = 0;
240         }
241
242         if (clk->ops && clk->ops->recalc)
243                 clk->rate = clk->ops->recalc(clk);
244
245         propagate_rate(clk);
246
247 out_unlock:
248         spin_unlock_irqrestore(&clock_lock, flags);
249
250         return ret;
251 }
252 EXPORT_SYMBOL_GPL(clk_set_rate_ex);
253
254 int clk_set_parent(struct clk *clk, struct clk *parent)
255 {
256         unsigned long flags;
257         int ret = -EINVAL;
258
259         if (!parent || !clk)
260                 return ret;
261         if (clk->parent == parent)
262                 return 0;
263
264         spin_lock_irqsave(&clock_lock, flags);
265         if (clk->usecount == 0) {
266                 if (clk->ops->set_parent)
267                         ret = clk->ops->set_parent(clk, parent);
268                 else
269                         ret = clk_reparent(clk, parent);
270
271                 if (ret == 0) {
272                         pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
273                                  clk->name, clk->parent->name, clk->rate);
274                         if (clk->ops->recalc)
275                                 clk->rate = clk->ops->recalc(clk);
276                         propagate_rate(clk);
277                 }
278         } else
279                 ret = -EBUSY;
280         spin_unlock_irqrestore(&clock_lock, flags);
281
282         return ret;
283 }
284 EXPORT_SYMBOL_GPL(clk_set_parent);
285
286 struct clk *clk_get_parent(struct clk *clk)
287 {
288         return clk->parent;
289 }
290 EXPORT_SYMBOL_GPL(clk_get_parent);
291
292 long clk_round_rate(struct clk *clk, unsigned long rate)
293 {
294         if (likely(clk->ops && clk->ops->round_rate)) {
295                 unsigned long flags, rounded;
296
297                 spin_lock_irqsave(&clock_lock, flags);
298                 rounded = clk->ops->round_rate(clk, rate);
299                 spin_unlock_irqrestore(&clock_lock, flags);
300
301                 return rounded;
302         }
303
304         return clk_get_rate(clk);
305 }
306 EXPORT_SYMBOL_GPL(clk_round_rate);
307
308 /*
309  * Find the correct struct clk for the device and connection ID.
310  * We do slightly fuzzy matching here:
311  *  An entry with a NULL ID is assumed to be a wildcard.
312  *  If an entry has a device ID, it must match
313  *  If an entry has a connection ID, it must match
314  * Then we take the most specific entry - with the following
315  * order of precidence: dev+con > dev only > con only.
316  */
317 static struct clk *clk_find(const char *dev_id, const char *con_id)
318 {
319         struct clk_lookup *p;
320         struct clk *clk = NULL;
321         int match, best = 0;
322
323         list_for_each_entry(p, &clock_list, node) {
324                 match = 0;
325                 if (p->dev_id) {
326                         if (!dev_id || strcmp(p->dev_id, dev_id))
327                                 continue;
328                         match += 2;
329                 }
330                 if (p->con_id) {
331                         if (!con_id || strcmp(p->con_id, con_id))
332                                 continue;
333                         match += 1;
334                 }
335                 if (match == 0)
336                         continue;
337
338                 if (match > best) {
339                         clk = p->clk;
340                         best = match;
341                 }
342         }
343         return clk;
344 }
345
346 struct clk *clk_get_sys(const char *dev_id, const char *con_id)
347 {
348         struct clk *clk;
349
350         mutex_lock(&clock_list_sem);
351         clk = clk_find(dev_id, con_id);
352         mutex_unlock(&clock_list_sem);
353
354         return clk ? clk : ERR_PTR(-ENOENT);
355 }
356 EXPORT_SYMBOL_GPL(clk_get_sys);
357
358 /*
359  * Returns a clock. Note that we first try to use device id on the bus
360  * and clock name. If this fails, we try to use clock name only.
361  */
362 struct clk *clk_get(struct device *dev, const char *id)
363 {
364         const char *dev_id = dev ? dev_name(dev) : NULL;
365         struct clk *p, *clk = ERR_PTR(-ENOENT);
366         int idno;
367
368         clk = clk_get_sys(dev_id, id);
369         if (clk && !IS_ERR(clk))
370                 return clk;
371
372         if (dev == NULL || dev->bus != &platform_bus_type)
373                 idno = -1;
374         else
375                 idno = to_platform_device(dev)->id;
376
377         mutex_lock(&clock_list_sem);
378         list_for_each_entry(p, &clock_list, node) {
379                 if (p->id == idno &&
380                     strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
381                         clk = p;
382                         goto found;
383                 }
384         }
385
386         list_for_each_entry(p, &clock_list, node) {
387                 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
388                         clk = p;
389                         break;
390                 }
391         }
392
393 found:
394         mutex_unlock(&clock_list_sem);
395
396         return clk;
397 }
398 EXPORT_SYMBOL_GPL(clk_get);
399
400 void clk_put(struct clk *clk)
401 {
402         if (clk && !IS_ERR(clk))
403                 module_put(clk->owner);
404 }
405 EXPORT_SYMBOL_GPL(clk_put);
406
407
408 static int show_clocks(char *buf, char **start, off_t off,
409                        int len, int *eof, void *data)
410 {
411         struct clk *clk;
412         char *p = buf;
413
414         list_for_each_entry_reverse(clk, &clock_list, node) {
415                 unsigned long rate = clk_get_rate(clk);
416
417                 p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name,
418                              rate / 1000000, (rate % 1000000) / 10000,
419                               (clk->usecount > 0) ?  "enabled" : "disabled");
420         }
421
422         return p - buf;
423 }
424
425 #ifdef CONFIG_PM
426 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
427 {
428         static pm_message_t prev_state;
429         struct clk *clkp;
430
431         switch (state.event) {
432         case PM_EVENT_ON:
433                 /* Resumeing from hibernation */
434                 if (prev_state.event != PM_EVENT_FREEZE)
435                         break;
436
437                 list_for_each_entry(clkp, &clock_list, node) {
438                         if (likely(clkp->ops)) {
439                                 unsigned long rate = clkp->rate;
440
441                                 if (likely(clkp->ops->set_parent))
442                                         clkp->ops->set_parent(clkp,
443                                                 clkp->parent);
444                                 if (likely(clkp->ops->set_rate))
445                                         clkp->ops->set_rate(clkp,
446                                                 rate, NO_CHANGE);
447                                 else if (likely(clkp->ops->recalc))
448                                         clkp->rate = clkp->ops->recalc(clkp);
449                         }
450                 }
451                 break;
452         case PM_EVENT_FREEZE:
453                 break;
454         case PM_EVENT_SUSPEND:
455                 break;
456         }
457
458         prev_state = state;
459         return 0;
460 }
461
462 static int clks_sysdev_resume(struct sys_device *dev)
463 {
464         return clks_sysdev_suspend(dev, PMSG_ON);
465 }
466
467 static struct sysdev_class clks_sysdev_class = {
468         .name = "clks",
469 };
470
471 static struct sysdev_driver clks_sysdev_driver = {
472         .suspend = clks_sysdev_suspend,
473         .resume = clks_sysdev_resume,
474 };
475
476 static struct sys_device clks_sysdev_dev = {
477         .cls = &clks_sysdev_class,
478 };
479
480 static int __init clk_sysdev_init(void)
481 {
482         sysdev_class_register(&clks_sysdev_class);
483         sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
484         sysdev_register(&clks_sysdev_dev);
485
486         return 0;
487 }
488 subsys_initcall(clk_sysdev_init);
489 #endif
490
491 int __init clk_init(void)
492 {
493         int ret;
494
495         ret = arch_clk_init();
496         if (unlikely(ret)) {
497                 pr_err("%s: CPU clock registration failed.\n", __func__);
498                 return ret;
499         }
500
501         if (sh_mv.mv_clk_init) {
502                 ret = sh_mv.mv_clk_init();
503                 if (unlikely(ret)) {
504                         pr_err("%s: machvec clock initialization failed.\n",
505                                __func__);
506                         return ret;
507                 }
508         }
509
510         /* Kick the child clocks.. */
511         recalculate_root_clocks();
512
513         /* Enable the necessary init clocks */
514         clk_enable_init_clocks();
515
516         return ret;
517 }
518
519 static int __init clk_proc_init(void)
520 {
521         struct proc_dir_entry *p;
522         p = create_proc_read_entry("clocks", S_IRUSR, NULL,
523                                    show_clocks, NULL);
524         if (unlikely(!p))
525                 return -EINVAL;
526
527         return 0;
528 }
529 subsys_initcall(clk_proc_init);