aa0fd0893585bb545b5ac68d58c83c7740446bff
[pandora-kernel.git] / arch / sh / kernel / cpu / clock.c
1 /*
2  * arch/sh/kernel/cpu/clock.c - SuperH clock framework
3  *
4  *  Copyright (C) 2005 - 2009  Paul Mundt
5  *
6  * This clock framework is derived from the OMAP version by:
7  *
8  *      Copyright (C) 2004 - 2008 Nokia Corporation
9  *      Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10  *
11  *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12  *
13  *  With clkdev bits:
14  *
15  *      Copyright (C) 2008 Russell King.
16  *
17  * This file is subject to the terms and conditions of the GNU General Public
18  * License.  See the file "COPYING" in the main directory of this archive
19  * for more details.
20  */
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/mutex.h>
25 #include <linux/list.h>
26 #include <linux/kobject.h>
27 #include <linux/sysdev.h>
28 #include <linux/seq_file.h>
29 #include <linux/err.h>
30 #include <linux/platform_device.h>
31 #include <linux/debugfs.h>
32 #include <linux/cpufreq.h>
33 #include <asm/clock.h>
34 #include <asm/machvec.h>
35
36 static LIST_HEAD(clock_list);
37 static DEFINE_SPINLOCK(clock_lock);
38 static DEFINE_MUTEX(clock_list_sem);
39
40 void clk_rate_table_build(struct clk *clk,
41                           struct cpufreq_frequency_table *freq_table,
42                           int nr_freqs,
43                           struct clk_div_mult_table *src_table,
44                           unsigned long *bitmap)
45 {
46         unsigned long mult, div;
47         unsigned long freq;
48         int i;
49
50         for (i = 0; i < nr_freqs; i++) {
51                 div = 1;
52                 mult = 1;
53
54                 if (src_table->divisors && i < src_table->nr_divisors)
55                         div = src_table->divisors[i];
56
57                 if (src_table->multipliers && i < src_table->nr_multipliers)
58                         mult = src_table->multipliers[i];
59
60                 if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
61                         freq = CPUFREQ_ENTRY_INVALID;
62                 else
63                         freq = clk->parent->rate * mult / div;
64
65                 freq_table[i].index = i;
66                 freq_table[i].frequency = freq;
67         }
68
69         /* Termination entry */
70         freq_table[i].index = i;
71         freq_table[i].frequency = CPUFREQ_TABLE_END;
72 }
73
74 long clk_rate_table_round(struct clk *clk,
75                           struct cpufreq_frequency_table *freq_table,
76                           unsigned long rate)
77 {
78         unsigned long rate_error, rate_error_prev = ~0UL;
79         unsigned long rate_best_fit = rate;
80         unsigned long highest, lowest;
81         int i;
82
83         highest = lowest = 0;
84
85         for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
86                 unsigned long freq = freq_table[i].frequency;
87
88                 if (freq == CPUFREQ_ENTRY_INVALID)
89                         continue;
90
91                 if (freq > highest)
92                         highest = freq;
93                 if (freq < lowest)
94                         lowest = freq;
95
96                 rate_error = abs(freq - rate);
97                 if (rate_error < rate_error_prev) {
98                         rate_best_fit = freq;
99                         rate_error_prev = rate_error;
100                 }
101
102                 if (rate_error == 0)
103                         break;
104         }
105
106         if (rate >= highest)
107                 rate_best_fit = highest;
108         if (rate <= lowest)
109                 rate_best_fit = lowest;
110
111         return rate_best_fit;
112 }
113
114 /* Used for clocks that always have same value as the parent clock */
115 unsigned long followparent_recalc(struct clk *clk)
116 {
117         return clk->parent ? clk->parent->rate : 0;
118 }
119
120 int clk_reparent(struct clk *child, struct clk *parent)
121 {
122         list_del_init(&child->sibling);
123         if (parent)
124                 list_add(&child->sibling, &parent->children);
125         child->parent = parent;
126
127         /* now do the debugfs renaming to reattach the child
128            to the proper parent */
129
130         return 0;
131 }
132
133 /* Propagate rate to children */
134 void propagate_rate(struct clk *tclk)
135 {
136         struct clk *clkp;
137
138         list_for_each_entry(clkp, &tclk->children, sibling) {
139                 if (clkp->ops && clkp->ops->recalc)
140                         clkp->rate = clkp->ops->recalc(clkp);
141
142                 propagate_rate(clkp);
143         }
144 }
145
146 static void __clk_disable(struct clk *clk)
147 {
148         if (clk->usecount == 0) {
149                 printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
150                        clk->name);
151                 WARN_ON(1);
152                 return;
153         }
154
155         if (!(--clk->usecount)) {
156                 if (likely(clk->ops && clk->ops->disable))
157                         clk->ops->disable(clk);
158                 if (likely(clk->parent))
159                         __clk_disable(clk->parent);
160         }
161 }
162
163 void clk_disable(struct clk *clk)
164 {
165         unsigned long flags;
166
167         if (!clk)
168                 return;
169
170         spin_lock_irqsave(&clock_lock, flags);
171         __clk_disable(clk);
172         spin_unlock_irqrestore(&clock_lock, flags);
173 }
174 EXPORT_SYMBOL_GPL(clk_disable);
175
176 static int __clk_enable(struct clk *clk)
177 {
178         int ret = 0;
179
180         if (clk->usecount++ == 0) {
181                 if (clk->parent) {
182                         ret = __clk_enable(clk->parent);
183                         if (unlikely(ret))
184                                 goto err;
185                 }
186
187                 if (clk->ops && clk->ops->enable) {
188                         ret = clk->ops->enable(clk);
189                         if (ret) {
190                                 if (clk->parent)
191                                         __clk_disable(clk->parent);
192                                 goto err;
193                         }
194                 }
195         }
196
197         return ret;
198 err:
199         clk->usecount--;
200         return ret;
201 }
202
203 int clk_enable(struct clk *clk)
204 {
205         unsigned long flags;
206         int ret;
207
208         if (!clk)
209                 return -EINVAL;
210
211         spin_lock_irqsave(&clock_lock, flags);
212         ret = __clk_enable(clk);
213         spin_unlock_irqrestore(&clock_lock, flags);
214
215         return ret;
216 }
217 EXPORT_SYMBOL_GPL(clk_enable);
218
219 static LIST_HEAD(root_clks);
220
221 /**
222  * recalculate_root_clocks - recalculate and propagate all root clocks
223  *
224  * Recalculates all root clocks (clocks with no parent), which if the
225  * clock's .recalc is set correctly, should also propagate their rates.
226  * Called at init.
227  */
228 void recalculate_root_clocks(void)
229 {
230         struct clk *clkp;
231
232         list_for_each_entry(clkp, &root_clks, sibling) {
233                 if (clkp->ops && clkp->ops->recalc)
234                         clkp->rate = clkp->ops->recalc(clkp);
235                 propagate_rate(clkp);
236         }
237 }
238
239 int clk_register(struct clk *clk)
240 {
241         if (clk == NULL || IS_ERR(clk))
242                 return -EINVAL;
243
244         /*
245          * trap out already registered clocks
246          */
247         if (clk->node.next || clk->node.prev)
248                 return 0;
249
250         mutex_lock(&clock_list_sem);
251
252         INIT_LIST_HEAD(&clk->children);
253         clk->usecount = 0;
254
255         if (clk->parent)
256                 list_add(&clk->sibling, &clk->parent->children);
257         else
258                 list_add(&clk->sibling, &root_clks);
259
260         list_add(&clk->node, &clock_list);
261         if (clk->ops && clk->ops->init)
262                 clk->ops->init(clk);
263         mutex_unlock(&clock_list_sem);
264
265         return 0;
266 }
267 EXPORT_SYMBOL_GPL(clk_register);
268
269 void clk_unregister(struct clk *clk)
270 {
271         mutex_lock(&clock_list_sem);
272         list_del(&clk->sibling);
273         list_del(&clk->node);
274         mutex_unlock(&clock_list_sem);
275 }
276 EXPORT_SYMBOL_GPL(clk_unregister);
277
278 static void clk_enable_init_clocks(void)
279 {
280         struct clk *clkp;
281
282         list_for_each_entry(clkp, &clock_list, node)
283                 if (clkp->flags & CLK_ENABLE_ON_INIT)
284                         clk_enable(clkp);
285 }
286
287 unsigned long clk_get_rate(struct clk *clk)
288 {
289         return clk->rate;
290 }
291 EXPORT_SYMBOL_GPL(clk_get_rate);
292
293 int clk_set_rate(struct clk *clk, unsigned long rate)
294 {
295         return clk_set_rate_ex(clk, rate, 0);
296 }
297 EXPORT_SYMBOL_GPL(clk_set_rate);
298
299 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
300 {
301         int ret = -EOPNOTSUPP;
302         unsigned long flags;
303
304         spin_lock_irqsave(&clock_lock, flags);
305
306         if (likely(clk->ops && clk->ops->set_rate)) {
307                 ret = clk->ops->set_rate(clk, rate, algo_id);
308                 if (ret != 0)
309                         goto out_unlock;
310         } else {
311                 clk->rate = rate;
312                 ret = 0;
313         }
314
315         if (clk->ops && clk->ops->recalc)
316                 clk->rate = clk->ops->recalc(clk);
317
318         propagate_rate(clk);
319
320 out_unlock:
321         spin_unlock_irqrestore(&clock_lock, flags);
322
323         return ret;
324 }
325 EXPORT_SYMBOL_GPL(clk_set_rate_ex);
326
327 int clk_set_parent(struct clk *clk, struct clk *parent)
328 {
329         unsigned long flags;
330         int ret = -EINVAL;
331
332         if (!parent || !clk)
333                 return ret;
334         if (clk->parent == parent)
335                 return 0;
336
337         spin_lock_irqsave(&clock_lock, flags);
338         if (clk->usecount == 0) {
339                 if (clk->ops->set_parent)
340                         ret = clk->ops->set_parent(clk, parent);
341                 else
342                         ret = clk_reparent(clk, parent);
343
344                 if (ret == 0) {
345                         pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
346                                  clk->name, clk->parent->name, clk->rate);
347                         if (clk->ops->recalc)
348                                 clk->rate = clk->ops->recalc(clk);
349                         propagate_rate(clk);
350                 }
351         } else
352                 ret = -EBUSY;
353         spin_unlock_irqrestore(&clock_lock, flags);
354
355         return ret;
356 }
357 EXPORT_SYMBOL_GPL(clk_set_parent);
358
359 struct clk *clk_get_parent(struct clk *clk)
360 {
361         return clk->parent;
362 }
363 EXPORT_SYMBOL_GPL(clk_get_parent);
364
365 long clk_round_rate(struct clk *clk, unsigned long rate)
366 {
367         if (likely(clk->ops && clk->ops->round_rate)) {
368                 unsigned long flags, rounded;
369
370                 spin_lock_irqsave(&clock_lock, flags);
371                 rounded = clk->ops->round_rate(clk, rate);
372                 spin_unlock_irqrestore(&clock_lock, flags);
373
374                 return rounded;
375         }
376
377         return clk_get_rate(clk);
378 }
379 EXPORT_SYMBOL_GPL(clk_round_rate);
380
381 /*
382  * Find the correct struct clk for the device and connection ID.
383  * We do slightly fuzzy matching here:
384  *  An entry with a NULL ID is assumed to be a wildcard.
385  *  If an entry has a device ID, it must match
386  *  If an entry has a connection ID, it must match
387  * Then we take the most specific entry - with the following
388  * order of precidence: dev+con > dev only > con only.
389  */
390 static struct clk *clk_find(const char *dev_id, const char *con_id)
391 {
392         struct clk_lookup *p;
393         struct clk *clk = NULL;
394         int match, best = 0;
395
396         list_for_each_entry(p, &clock_list, node) {
397                 match = 0;
398                 if (p->dev_id) {
399                         if (!dev_id || strcmp(p->dev_id, dev_id))
400                                 continue;
401                         match += 2;
402                 }
403                 if (p->con_id) {
404                         if (!con_id || strcmp(p->con_id, con_id))
405                                 continue;
406                         match += 1;
407                 }
408                 if (match == 0)
409                         continue;
410
411                 if (match > best) {
412                         clk = p->clk;
413                         best = match;
414                 }
415         }
416         return clk;
417 }
418
419 struct clk *clk_get_sys(const char *dev_id, const char *con_id)
420 {
421         struct clk *clk;
422
423         mutex_lock(&clock_list_sem);
424         clk = clk_find(dev_id, con_id);
425         mutex_unlock(&clock_list_sem);
426
427         return clk ? clk : ERR_PTR(-ENOENT);
428 }
429 EXPORT_SYMBOL_GPL(clk_get_sys);
430
431 /*
432  * Returns a clock. Note that we first try to use device id on the bus
433  * and clock name. If this fails, we try to use clock name only.
434  */
435 struct clk *clk_get(struct device *dev, const char *id)
436 {
437         const char *dev_id = dev ? dev_name(dev) : NULL;
438         struct clk *p, *clk = ERR_PTR(-ENOENT);
439         int idno;
440
441         clk = clk_get_sys(dev_id, id);
442         if (clk && !IS_ERR(clk))
443                 return clk;
444
445         if (dev == NULL || dev->bus != &platform_bus_type)
446                 idno = -1;
447         else
448                 idno = to_platform_device(dev)->id;
449
450         mutex_lock(&clock_list_sem);
451         list_for_each_entry(p, &clock_list, node) {
452                 if (p->id == idno &&
453                     strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
454                         clk = p;
455                         goto found;
456                 }
457         }
458
459         list_for_each_entry(p, &clock_list, node) {
460                 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
461                         clk = p;
462                         break;
463                 }
464         }
465
466 found:
467         mutex_unlock(&clock_list_sem);
468
469         return clk;
470 }
471 EXPORT_SYMBOL_GPL(clk_get);
472
473 void clk_put(struct clk *clk)
474 {
475         if (clk && !IS_ERR(clk))
476                 module_put(clk->owner);
477 }
478 EXPORT_SYMBOL_GPL(clk_put);
479
480 #ifdef CONFIG_PM
481 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
482 {
483         static pm_message_t prev_state;
484         struct clk *clkp;
485
486         switch (state.event) {
487         case PM_EVENT_ON:
488                 /* Resumeing from hibernation */
489                 if (prev_state.event != PM_EVENT_FREEZE)
490                         break;
491
492                 list_for_each_entry(clkp, &clock_list, node) {
493                         if (likely(clkp->ops)) {
494                                 unsigned long rate = clkp->rate;
495
496                                 if (likely(clkp->ops->set_parent))
497                                         clkp->ops->set_parent(clkp,
498                                                 clkp->parent);
499                                 if (likely(clkp->ops->set_rate))
500                                         clkp->ops->set_rate(clkp,
501                                                 rate, NO_CHANGE);
502                                 else if (likely(clkp->ops->recalc))
503                                         clkp->rate = clkp->ops->recalc(clkp);
504                         }
505                 }
506                 break;
507         case PM_EVENT_FREEZE:
508                 break;
509         case PM_EVENT_SUSPEND:
510                 break;
511         }
512
513         prev_state = state;
514         return 0;
515 }
516
517 static int clks_sysdev_resume(struct sys_device *dev)
518 {
519         return clks_sysdev_suspend(dev, PMSG_ON);
520 }
521
522 static struct sysdev_class clks_sysdev_class = {
523         .name = "clks",
524 };
525
526 static struct sysdev_driver clks_sysdev_driver = {
527         .suspend = clks_sysdev_suspend,
528         .resume = clks_sysdev_resume,
529 };
530
531 static struct sys_device clks_sysdev_dev = {
532         .cls = &clks_sysdev_class,
533 };
534
535 static int __init clk_sysdev_init(void)
536 {
537         sysdev_class_register(&clks_sysdev_class);
538         sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
539         sysdev_register(&clks_sysdev_dev);
540
541         return 0;
542 }
543 subsys_initcall(clk_sysdev_init);
544 #endif
545
546 int __init clk_init(void)
547 {
548         int ret;
549
550         ret = arch_clk_init();
551         if (unlikely(ret)) {
552                 pr_err("%s: CPU clock registration failed.\n", __func__);
553                 return ret;
554         }
555
556         if (sh_mv.mv_clk_init) {
557                 ret = sh_mv.mv_clk_init();
558                 if (unlikely(ret)) {
559                         pr_err("%s: machvec clock initialization failed.\n",
560                                __func__);
561                         return ret;
562                 }
563         }
564
565         /* Kick the child clocks.. */
566         recalculate_root_clocks();
567
568         /* Enable the necessary init clocks */
569         clk_enable_init_clocks();
570
571         return ret;
572 }
573
574 /*
575  *      debugfs support to trace clock tree hierarchy and attributes
576  */
577 static struct dentry *clk_debugfs_root;
578
579 static int clk_debugfs_register_one(struct clk *c)
580 {
581         int err;
582         struct dentry *d, *child;
583         struct clk *pa = c->parent;
584         char s[255];
585         char *p = s;
586
587         p += sprintf(p, "%s", c->name);
588         if (c->id >= 0)
589                 sprintf(p, ":%d", c->id);
590         d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
591         if (!d)
592                 return -ENOMEM;
593         c->dentry = d;
594
595         d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
596         if (!d) {
597                 err = -ENOMEM;
598                 goto err_out;
599         }
600         d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
601         if (!d) {
602                 err = -ENOMEM;
603                 goto err_out;
604         }
605         d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
606         if (!d) {
607                 err = -ENOMEM;
608                 goto err_out;
609         }
610         return 0;
611
612 err_out:
613         d = c->dentry;
614         list_for_each_entry(child, &d->d_subdirs, d_u.d_child)
615                 debugfs_remove(child);
616         debugfs_remove(c->dentry);
617         return err;
618 }
619
620 static int clk_debugfs_register(struct clk *c)
621 {
622         int err;
623         struct clk *pa = c->parent;
624
625         if (pa && !pa->dentry) {
626                 err = clk_debugfs_register(pa);
627                 if (err)
628                         return err;
629         }
630
631         if (!c->dentry) {
632                 err = clk_debugfs_register_one(c);
633                 if (err)
634                         return err;
635         }
636         return 0;
637 }
638
639 static int __init clk_debugfs_init(void)
640 {
641         struct clk *c;
642         struct dentry *d;
643         int err;
644
645         d = debugfs_create_dir("clock", NULL);
646         if (!d)
647                 return -ENOMEM;
648         clk_debugfs_root = d;
649
650         list_for_each_entry(c, &clock_list, node) {
651                 err = clk_debugfs_register(c);
652                 if (err)
653                         goto err_out;
654         }
655         return 0;
656 err_out:
657         debugfs_remove(clk_debugfs_root); /* REVISIT: Cleanup correctly */
658         return err;
659 }
660 late_initcall(clk_debugfs_init);