2 * arch/sh/kernel/cpu/clock.c - SuperH clock framework
4 * Copyright (C) 2005, 2006, 2007 Paul Mundt
6 * This clock framework is derived from the OMAP version by:
8 * Copyright (C) 2004 - 2005 Nokia Corporation
9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/list.h>
22 #include <linux/kobject.h>
23 #include <linux/sysdev.h>
24 #include <linux/seq_file.h>
25 #include <linux/err.h>
26 #include <linux/platform_device.h>
27 #include <linux/proc_fs.h>
28 #include <asm/clock.h>
29 #include <asm/timer.h>
31 static LIST_HEAD(clock_list);
32 static DEFINE_SPINLOCK(clock_lock);
33 static DEFINE_MUTEX(clock_list_sem);
36 * Each subtype is expected to define the init routines for these clocks,
37 * as each subtype (or processor family) will have these clocks at the
38 * very least. These are all provided through the CPG, which even some of
39 * the more quirky parts (such as ST40, SH4-202, etc.) still have.
41 * The processor-specific code is expected to register any additional
42 * clock sources that are of interest.
44 static struct clk master_clk = {
46 .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
47 .rate = CONFIG_SH_PCLK_FREQ,
50 static struct clk module_clk = {
52 .parent = &master_clk,
53 .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
56 static struct clk bus_clk = {
58 .parent = &master_clk,
59 .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
62 static struct clk cpu_clk = {
64 .parent = &master_clk,
65 .flags = CLK_ALWAYS_ENABLED,
69 * The ordering of these clocks matters, do not change it.
71 static struct clk *onchip_clocks[] = {
78 /* Propagate rate to children */
79 static void propagate_rate(struct clk *clk)
83 list_for_each_entry(clkp, &clock_list, node) {
84 if (likely(clkp->parent != clk))
86 if (likely(clkp->ops && clkp->ops->recalc))
87 clkp->rate = clkp->ops->recalc(clkp);
88 if (unlikely(clkp->flags & CLK_RATE_PROPAGATES))
93 static void __clk_init(struct clk *clk)
96 * See if this is the first time we're enabling the clock, some
97 * clocks that are always enabled still require "special"
98 * initialization. This is especially true if the clock mode
99 * changes and the clock needs to hunt for the proper set of
100 * divisors to use before it can effectively recalc.
103 if (clk->flags & CLK_NEEDS_INIT) {
104 if (clk->ops && clk->ops->init)
107 clk->flags &= ~CLK_NEEDS_INIT;
111 static int __clk_enable(struct clk *clk)
118 /* nothing to do if always enabled */
119 if (clk->flags & CLK_ALWAYS_ENABLED)
122 if (clk->usecount == 1) {
125 __clk_enable(clk->parent);
127 if (clk->ops && clk->ops->enable)
128 clk->ops->enable(clk);
134 int clk_enable(struct clk *clk)
139 spin_lock_irqsave(&clock_lock, flags);
140 ret = __clk_enable(clk);
141 spin_unlock_irqrestore(&clock_lock, flags);
145 EXPORT_SYMBOL_GPL(clk_enable);
147 static void __clk_disable(struct clk *clk)
154 WARN_ON(clk->usecount < 0);
156 if (clk->flags & CLK_ALWAYS_ENABLED)
159 if (clk->usecount == 0) {
160 if (likely(clk->ops && clk->ops->disable))
161 clk->ops->disable(clk);
163 __clk_disable(clk->parent);
167 void clk_disable(struct clk *clk)
171 spin_lock_irqsave(&clock_lock, flags);
173 spin_unlock_irqrestore(&clock_lock, flags);
175 EXPORT_SYMBOL_GPL(clk_disable);
177 int clk_register(struct clk *clk)
179 mutex_lock(&clock_list_sem);
181 list_add(&clk->node, &clock_list);
183 clk->flags |= CLK_NEEDS_INIT;
185 mutex_unlock(&clock_list_sem);
187 if (clk->flags & CLK_ALWAYS_ENABLED) {
189 pr_debug( "Clock '%s' is ALWAYS_ENABLED\n", clk->name);
190 if (clk->ops && clk->ops->enable)
191 clk->ops->enable(clk);
192 pr_debug( "Enabled.");
197 EXPORT_SYMBOL_GPL(clk_register);
199 void clk_unregister(struct clk *clk)
201 mutex_lock(&clock_list_sem);
202 list_del(&clk->node);
203 mutex_unlock(&clock_list_sem);
205 EXPORT_SYMBOL_GPL(clk_unregister);
207 unsigned long clk_get_rate(struct clk *clk)
211 EXPORT_SYMBOL_GPL(clk_get_rate);
213 int clk_set_rate(struct clk *clk, unsigned long rate)
215 return clk_set_rate_ex(clk, rate, 0);
217 EXPORT_SYMBOL_GPL(clk_set_rate);
219 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
221 int ret = -EOPNOTSUPP;
223 if (likely(clk->ops && clk->ops->set_rate)) {
226 spin_lock_irqsave(&clock_lock, flags);
227 ret = clk->ops->set_rate(clk, rate, algo_id);
228 spin_unlock_irqrestore(&clock_lock, flags);
231 if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
236 EXPORT_SYMBOL_GPL(clk_set_rate_ex);
238 void clk_recalc_rate(struct clk *clk)
240 if (likely(clk->ops && clk->ops->recalc)) {
243 spin_lock_irqsave(&clock_lock, flags);
244 clk->rate = clk->ops->recalc(clk);
245 spin_unlock_irqrestore(&clock_lock, flags);
248 if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
251 EXPORT_SYMBOL_GPL(clk_recalc_rate);
253 int clk_set_parent(struct clk *clk, struct clk *parent)
262 if (likely(clk->ops && clk->ops->set_parent)) {
264 spin_lock_irqsave(&clock_lock, flags);
265 ret = clk->ops->set_parent(clk, parent);
266 spin_unlock_irqrestore(&clock_lock, flags);
267 clk->parent = (ret ? old : parent);
270 if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
274 EXPORT_SYMBOL_GPL(clk_set_parent);
276 struct clk *clk_get_parent(struct clk *clk)
280 EXPORT_SYMBOL_GPL(clk_get_parent);
282 long clk_round_rate(struct clk *clk, unsigned long rate)
284 if (likely(clk->ops && clk->ops->round_rate)) {
285 unsigned long flags, rounded;
287 spin_lock_irqsave(&clock_lock, flags);
288 rounded = clk->ops->round_rate(clk, rate);
289 spin_unlock_irqrestore(&clock_lock, flags);
294 return clk_get_rate(clk);
296 EXPORT_SYMBOL_GPL(clk_round_rate);
299 * Returns a clock. Note that we first try to use device id on the bus
300 * and clock name. If this fails, we try to use clock name only.
302 struct clk *clk_get(struct device *dev, const char *id)
304 struct clk *p, *clk = ERR_PTR(-ENOENT);
307 if (dev == NULL || dev->bus != &platform_bus_type)
310 idno = to_platform_device(dev)->id;
312 mutex_lock(&clock_list_sem);
313 list_for_each_entry(p, &clock_list, node) {
315 strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
321 list_for_each_entry(p, &clock_list, node) {
322 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
329 mutex_unlock(&clock_list_sem);
333 EXPORT_SYMBOL_GPL(clk_get);
335 void clk_put(struct clk *clk)
337 if (clk && !IS_ERR(clk))
338 module_put(clk->owner);
340 EXPORT_SYMBOL_GPL(clk_put);
342 void __init __attribute__ ((weak))
343 arch_init_clk_ops(struct clk_ops **ops, int type)
347 int __init __attribute__ ((weak))
353 static int show_clocks(char *buf, char **start, off_t off,
354 int len, int *eof, void *data)
359 list_for_each_entry_reverse(clk, &clock_list, node) {
360 unsigned long rate = clk_get_rate(clk);
362 p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name,
363 rate / 1000000, (rate % 1000000) / 10000,
364 ((clk->flags & CLK_ALWAYS_ENABLED) ||
366 "enabled" : "disabled");
373 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
375 static pm_message_t prev_state;
378 switch (state.event) {
380 /* Resumeing from hibernation */
381 if (prev_state.event != PM_EVENT_FREEZE)
384 list_for_each_entry(clkp, &clock_list, node) {
385 if (likely(clkp->ops)) {
386 unsigned long rate = clkp->rate;
388 if (likely(clkp->ops->set_parent))
389 clkp->ops->set_parent(clkp,
391 if (likely(clkp->ops->set_rate))
392 clkp->ops->set_rate(clkp,
394 else if (likely(clkp->ops->recalc))
395 clkp->rate = clkp->ops->recalc(clkp);
399 case PM_EVENT_FREEZE:
401 case PM_EVENT_SUSPEND:
409 static int clks_sysdev_resume(struct sys_device *dev)
411 return clks_sysdev_suspend(dev, PMSG_ON);
414 static struct sysdev_class clks_sysdev_class = {
418 static struct sysdev_driver clks_sysdev_driver = {
419 .suspend = clks_sysdev_suspend,
420 .resume = clks_sysdev_resume,
423 static struct sys_device clks_sysdev_dev = {
424 .cls = &clks_sysdev_class,
427 static int __init clk_sysdev_init(void)
429 sysdev_class_register(&clks_sysdev_class);
430 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
431 sysdev_register(&clks_sysdev_dev);
435 subsys_initcall(clk_sysdev_init);
438 int __init clk_init(void)
442 BUG_ON(!master_clk.rate);
444 for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
445 struct clk *clk = onchip_clocks[i];
447 arch_init_clk_ops(&clk->ops, i);
448 ret |= clk_register(clk);
451 ret |= arch_clk_init();
453 /* Kick the child clocks.. */
454 propagate_rate(&master_clk);
455 propagate_rate(&bus_clk);
460 static int __init clk_proc_init(void)
462 struct proc_dir_entry *p;
463 p = create_proc_read_entry("clocks", S_IRUSR, NULL,
470 subsys_initcall(clk_proc_init);