sh: clkfwk: Map tree hierarchy in debugfs.
[pandora-kernel.git] / arch / sh / kernel / cpu / clock.c
1 /*
2  * arch/sh/kernel/cpu/clock.c - SuperH clock framework
3  *
4  *  Copyright (C) 2005 - 2009  Paul Mundt
5  *
6  * This clock framework is derived from the OMAP version by:
7  *
8  *      Copyright (C) 2004 - 2008 Nokia Corporation
9  *      Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10  *
11  *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12  *
13  *  With clkdev bits:
14  *
15  *      Copyright (C) 2008 Russell King.
16  *
17  * This file is subject to the terms and conditions of the GNU General Public
18  * License.  See the file "COPYING" in the main directory of this archive
19  * for more details.
20  */
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/mutex.h>
25 #include <linux/list.h>
26 #include <linux/kobject.h>
27 #include <linux/sysdev.h>
28 #include <linux/seq_file.h>
29 #include <linux/err.h>
30 #include <linux/platform_device.h>
31 #include <linux/debugfs.h>
32 #include <asm/clock.h>
33 #include <asm/machvec.h>
34
35 static LIST_HEAD(clock_list);
36 static DEFINE_SPINLOCK(clock_lock);
37 static DEFINE_MUTEX(clock_list_sem);
38
39 /* Used for clocks that always have same value as the parent clock */
40 unsigned long followparent_recalc(struct clk *clk)
41 {
42         return clk->parent->rate;
43 }
44
45 int clk_reparent(struct clk *child, struct clk *parent)
46 {
47         list_del_init(&child->sibling);
48         if (parent)
49                 list_add(&child->sibling, &parent->children);
50         child->parent = parent;
51
52         /* now do the debugfs renaming to reattach the child
53            to the proper parent */
54
55         return 0;
56 }
57
58 /* Propagate rate to children */
59 void propagate_rate(struct clk *tclk)
60 {
61         struct clk *clkp;
62
63         list_for_each_entry(clkp, &tclk->children, sibling) {
64                 if (clkp->ops && clkp->ops->recalc)
65                         clkp->rate = clkp->ops->recalc(clkp);
66                 if (clkp->ops && clkp->ops->build_rate_table)
67                         clkp->ops->build_rate_table(clkp);
68
69                 propagate_rate(clkp);
70         }
71 }
72
73 static void __clk_disable(struct clk *clk)
74 {
75         if (clk->usecount == 0) {
76                 printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
77                        clk->name);
78                 WARN_ON(1);
79                 return;
80         }
81
82         if (!(--clk->usecount)) {
83                 if (likely(clk->ops && clk->ops->disable))
84                         clk->ops->disable(clk);
85                 if (likely(clk->parent))
86                         __clk_disable(clk->parent);
87         }
88 }
89
90 void clk_disable(struct clk *clk)
91 {
92         unsigned long flags;
93
94         if (!clk)
95                 return;
96
97         spin_lock_irqsave(&clock_lock, flags);
98         __clk_disable(clk);
99         spin_unlock_irqrestore(&clock_lock, flags);
100 }
101 EXPORT_SYMBOL_GPL(clk_disable);
102
103 static int __clk_enable(struct clk *clk)
104 {
105         int ret = 0;
106
107         if (clk->usecount++ == 0) {
108                 if (clk->parent) {
109                         ret = __clk_enable(clk->parent);
110                         if (unlikely(ret))
111                                 goto err;
112                 }
113
114                 if (clk->ops && clk->ops->enable) {
115                         ret = clk->ops->enable(clk);
116                         if (ret) {
117                                 if (clk->parent)
118                                         __clk_disable(clk->parent);
119                                 goto err;
120                         }
121                 }
122         }
123
124         return ret;
125 err:
126         clk->usecount--;
127         return ret;
128 }
129
130 int clk_enable(struct clk *clk)
131 {
132         unsigned long flags;
133         int ret;
134
135         if (!clk)
136                 return -EINVAL;
137
138         spin_lock_irqsave(&clock_lock, flags);
139         ret = __clk_enable(clk);
140         spin_unlock_irqrestore(&clock_lock, flags);
141
142         return ret;
143 }
144 EXPORT_SYMBOL_GPL(clk_enable);
145
146 static LIST_HEAD(root_clks);
147
148 /**
149  * recalculate_root_clocks - recalculate and propagate all root clocks
150  *
151  * Recalculates all root clocks (clocks with no parent), which if the
152  * clock's .recalc is set correctly, should also propagate their rates.
153  * Called at init.
154  */
155 void recalculate_root_clocks(void)
156 {
157         struct clk *clkp;
158
159         list_for_each_entry(clkp, &root_clks, sibling) {
160                 if (clkp->ops && clkp->ops->recalc)
161                         clkp->rate = clkp->ops->recalc(clkp);
162                 propagate_rate(clkp);
163         }
164 }
165
166 int clk_register(struct clk *clk)
167 {
168         if (clk == NULL || IS_ERR(clk))
169                 return -EINVAL;
170
171         /*
172          * trap out already registered clocks
173          */
174         if (clk->node.next || clk->node.prev)
175                 return 0;
176
177         mutex_lock(&clock_list_sem);
178
179         INIT_LIST_HEAD(&clk->children);
180         clk->usecount = 0;
181
182         if (clk->parent)
183                 list_add(&clk->sibling, &clk->parent->children);
184         else
185                 list_add(&clk->sibling, &root_clks);
186
187         list_add(&clk->node, &clock_list);
188         if (clk->ops && clk->ops->init)
189                 clk->ops->init(clk);
190         mutex_unlock(&clock_list_sem);
191
192         return 0;
193 }
194 EXPORT_SYMBOL_GPL(clk_register);
195
196 void clk_unregister(struct clk *clk)
197 {
198         mutex_lock(&clock_list_sem);
199         list_del(&clk->sibling);
200         list_del(&clk->node);
201         mutex_unlock(&clock_list_sem);
202 }
203 EXPORT_SYMBOL_GPL(clk_unregister);
204
205 static void clk_enable_init_clocks(void)
206 {
207         struct clk *clkp;
208
209         list_for_each_entry(clkp, &clock_list, node)
210                 if (clkp->flags & CLK_ENABLE_ON_INIT)
211                         clk_enable(clkp);
212 }
213
214 unsigned long clk_get_rate(struct clk *clk)
215 {
216         return clk->rate;
217 }
218 EXPORT_SYMBOL_GPL(clk_get_rate);
219
220 int clk_set_rate(struct clk *clk, unsigned long rate)
221 {
222         return clk_set_rate_ex(clk, rate, 0);
223 }
224 EXPORT_SYMBOL_GPL(clk_set_rate);
225
226 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
227 {
228         int ret = -EOPNOTSUPP;
229         unsigned long flags;
230
231         spin_lock_irqsave(&clock_lock, flags);
232
233         if (likely(clk->ops && clk->ops->set_rate)) {
234                 ret = clk->ops->set_rate(clk, rate, algo_id);
235                 if (ret != 0)
236                         goto out_unlock;
237         } else {
238                 clk->rate = rate;
239                 ret = 0;
240         }
241
242         if (clk->ops && clk->ops->recalc)
243                 clk->rate = clk->ops->recalc(clk);
244
245         propagate_rate(clk);
246
247 out_unlock:
248         spin_unlock_irqrestore(&clock_lock, flags);
249
250         return ret;
251 }
252 EXPORT_SYMBOL_GPL(clk_set_rate_ex);
253
254 int clk_set_parent(struct clk *clk, struct clk *parent)
255 {
256         unsigned long flags;
257         int ret = -EINVAL;
258
259         if (!parent || !clk)
260                 return ret;
261         if (clk->parent == parent)
262                 return 0;
263
264         spin_lock_irqsave(&clock_lock, flags);
265         if (clk->usecount == 0) {
266                 if (clk->ops->set_parent)
267                         ret = clk->ops->set_parent(clk, parent);
268                 else
269                         ret = clk_reparent(clk, parent);
270
271                 if (ret == 0) {
272                         pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
273                                  clk->name, clk->parent->name, clk->rate);
274                         if (clk->ops->recalc)
275                                 clk->rate = clk->ops->recalc(clk);
276                         propagate_rate(clk);
277                 }
278         } else
279                 ret = -EBUSY;
280         spin_unlock_irqrestore(&clock_lock, flags);
281
282         return ret;
283 }
284 EXPORT_SYMBOL_GPL(clk_set_parent);
285
286 struct clk *clk_get_parent(struct clk *clk)
287 {
288         return clk->parent;
289 }
290 EXPORT_SYMBOL_GPL(clk_get_parent);
291
292 long clk_round_rate(struct clk *clk, unsigned long rate)
293 {
294         if (likely(clk->ops && clk->ops->round_rate)) {
295                 unsigned long flags, rounded;
296
297                 spin_lock_irqsave(&clock_lock, flags);
298                 rounded = clk->ops->round_rate(clk, rate);
299                 spin_unlock_irqrestore(&clock_lock, flags);
300
301                 return rounded;
302         }
303
304         return clk_get_rate(clk);
305 }
306 EXPORT_SYMBOL_GPL(clk_round_rate);
307
308 /*
309  * Find the correct struct clk for the device and connection ID.
310  * We do slightly fuzzy matching here:
311  *  An entry with a NULL ID is assumed to be a wildcard.
312  *  If an entry has a device ID, it must match
313  *  If an entry has a connection ID, it must match
314  * Then we take the most specific entry - with the following
315  * order of precidence: dev+con > dev only > con only.
316  */
317 static struct clk *clk_find(const char *dev_id, const char *con_id)
318 {
319         struct clk_lookup *p;
320         struct clk *clk = NULL;
321         int match, best = 0;
322
323         list_for_each_entry(p, &clock_list, node) {
324                 match = 0;
325                 if (p->dev_id) {
326                         if (!dev_id || strcmp(p->dev_id, dev_id))
327                                 continue;
328                         match += 2;
329                 }
330                 if (p->con_id) {
331                         if (!con_id || strcmp(p->con_id, con_id))
332                                 continue;
333                         match += 1;
334                 }
335                 if (match == 0)
336                         continue;
337
338                 if (match > best) {
339                         clk = p->clk;
340                         best = match;
341                 }
342         }
343         return clk;
344 }
345
346 struct clk *clk_get_sys(const char *dev_id, const char *con_id)
347 {
348         struct clk *clk;
349
350         mutex_lock(&clock_list_sem);
351         clk = clk_find(dev_id, con_id);
352         mutex_unlock(&clock_list_sem);
353
354         return clk ? clk : ERR_PTR(-ENOENT);
355 }
356 EXPORT_SYMBOL_GPL(clk_get_sys);
357
358 /*
359  * Returns a clock. Note that we first try to use device id on the bus
360  * and clock name. If this fails, we try to use clock name only.
361  */
362 struct clk *clk_get(struct device *dev, const char *id)
363 {
364         const char *dev_id = dev ? dev_name(dev) : NULL;
365         struct clk *p, *clk = ERR_PTR(-ENOENT);
366         int idno;
367
368         clk = clk_get_sys(dev_id, id);
369         if (clk && !IS_ERR(clk))
370                 return clk;
371
372         if (dev == NULL || dev->bus != &platform_bus_type)
373                 idno = -1;
374         else
375                 idno = to_platform_device(dev)->id;
376
377         mutex_lock(&clock_list_sem);
378         list_for_each_entry(p, &clock_list, node) {
379                 if (p->id == idno &&
380                     strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
381                         clk = p;
382                         goto found;
383                 }
384         }
385
386         list_for_each_entry(p, &clock_list, node) {
387                 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
388                         clk = p;
389                         break;
390                 }
391         }
392
393 found:
394         mutex_unlock(&clock_list_sem);
395
396         return clk;
397 }
398 EXPORT_SYMBOL_GPL(clk_get);
399
400 void clk_put(struct clk *clk)
401 {
402         if (clk && !IS_ERR(clk))
403                 module_put(clk->owner);
404 }
405 EXPORT_SYMBOL_GPL(clk_put);
406
407 #ifdef CONFIG_PM
408 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
409 {
410         static pm_message_t prev_state;
411         struct clk *clkp;
412
413         switch (state.event) {
414         case PM_EVENT_ON:
415                 /* Resumeing from hibernation */
416                 if (prev_state.event != PM_EVENT_FREEZE)
417                         break;
418
419                 list_for_each_entry(clkp, &clock_list, node) {
420                         if (likely(clkp->ops)) {
421                                 unsigned long rate = clkp->rate;
422
423                                 if (likely(clkp->ops->set_parent))
424                                         clkp->ops->set_parent(clkp,
425                                                 clkp->parent);
426                                 if (likely(clkp->ops->set_rate))
427                                         clkp->ops->set_rate(clkp,
428                                                 rate, NO_CHANGE);
429                                 else if (likely(clkp->ops->recalc))
430                                         clkp->rate = clkp->ops->recalc(clkp);
431                         }
432                 }
433                 break;
434         case PM_EVENT_FREEZE:
435                 break;
436         case PM_EVENT_SUSPEND:
437                 break;
438         }
439
440         prev_state = state;
441         return 0;
442 }
443
444 static int clks_sysdev_resume(struct sys_device *dev)
445 {
446         return clks_sysdev_suspend(dev, PMSG_ON);
447 }
448
449 static struct sysdev_class clks_sysdev_class = {
450         .name = "clks",
451 };
452
453 static struct sysdev_driver clks_sysdev_driver = {
454         .suspend = clks_sysdev_suspend,
455         .resume = clks_sysdev_resume,
456 };
457
458 static struct sys_device clks_sysdev_dev = {
459         .cls = &clks_sysdev_class,
460 };
461
462 static int __init clk_sysdev_init(void)
463 {
464         sysdev_class_register(&clks_sysdev_class);
465         sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
466         sysdev_register(&clks_sysdev_dev);
467
468         return 0;
469 }
470 subsys_initcall(clk_sysdev_init);
471 #endif
472
473 int __init clk_init(void)
474 {
475         int ret;
476
477         ret = arch_clk_init();
478         if (unlikely(ret)) {
479                 pr_err("%s: CPU clock registration failed.\n", __func__);
480                 return ret;
481         }
482
483         if (sh_mv.mv_clk_init) {
484                 ret = sh_mv.mv_clk_init();
485                 if (unlikely(ret)) {
486                         pr_err("%s: machvec clock initialization failed.\n",
487                                __func__);
488                         return ret;
489                 }
490         }
491
492         /* Kick the child clocks.. */
493         recalculate_root_clocks();
494
495         /* Enable the necessary init clocks */
496         clk_enable_init_clocks();
497
498         return ret;
499 }
500
501 /*
502  *      debugfs support to trace clock tree hierarchy and attributes
503  */
504 static struct dentry *clk_debugfs_root;
505
506 static int clk_debugfs_register_one(struct clk *c)
507 {
508         int err;
509         struct dentry *d, *child;
510         struct clk *pa = c->parent;
511         char s[255];
512         char *p = s;
513
514         p += sprintf(p, "%s", c->name);
515         if (c->id > 0)
516                 sprintf(p, ":%d", c->id);
517         d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
518         if (!d)
519                 return -ENOMEM;
520         c->dentry = d;
521
522         d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
523         if (!d) {
524                 err = -ENOMEM;
525                 goto err_out;
526         }
527         d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
528         if (!d) {
529                 err = -ENOMEM;
530                 goto err_out;
531         }
532         d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
533         if (!d) {
534                 err = -ENOMEM;
535                 goto err_out;
536         }
537         return 0;
538
539 err_out:
540         d = c->dentry;
541         list_for_each_entry(child, &d->d_subdirs, d_u.d_child)
542                 debugfs_remove(child);
543         debugfs_remove(c->dentry);
544         return err;
545 }
546
547 static int clk_debugfs_register(struct clk *c)
548 {
549         int err;
550         struct clk *pa = c->parent;
551
552         if (pa && !pa->dentry) {
553                 err = clk_debugfs_register(pa);
554                 if (err)
555                         return err;
556         }
557
558         if (!c->dentry) {
559                 err = clk_debugfs_register_one(c);
560                 if (err)
561                         return err;
562         }
563         return 0;
564 }
565
566 static int __init clk_debugfs_init(void)
567 {
568         struct clk *c;
569         struct dentry *d;
570         int err;
571
572         d = debugfs_create_dir("clock", NULL);
573         if (!d)
574                 return -ENOMEM;
575         clk_debugfs_root = d;
576
577         list_for_each_entry(c, &clock_list, node) {
578                 err = clk_debugfs_register(c);
579                 if (err)
580                         goto err_out;
581         }
582         return 0;
583 err_out:
584         debugfs_remove(clk_debugfs_root); /* REVISIT: Cleanup correctly */
585         return err;
586 }
587 late_initcall(clk_debugfs_init);