2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/notifier.h>
22 #include <linux/cpufreq.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/device.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
29 #include <linux/completion.h>
30 #include <linux/mutex.h>
32 #include <trace/events/power.h>
34 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
38 * The "cpufreq driver" - the arch- or hardware-dependent low
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
42 static struct cpufreq_driver *cpufreq_driver;
43 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
44 #ifdef CONFIG_HOTPLUG_CPU
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
48 static DEFINE_SPINLOCK(cpufreq_driver_lock);
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
62 * - All holders of the lock should check to make sure that the CPU they
63 * are concerned with are online after they get the lock.
64 * - Governor routines that can be called in cpufreq hotplug path should not
65 * take this sem as top level hotplug notifier handler takes this.
66 * - Lock should not be held across
67 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
69 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
70 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
72 #define lock_policy_rwsem(mode, cpu) \
73 int lock_policy_rwsem_##mode \
76 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
77 BUG_ON(policy_cpu == -1); \
78 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
79 if (unlikely(!cpu_online(cpu))) { \
80 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
87 lock_policy_rwsem(read, cpu);
88 EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
90 lock_policy_rwsem(write, cpu);
91 EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
93 void unlock_policy_rwsem_read(int cpu)
95 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
96 BUG_ON(policy_cpu == -1);
97 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
99 EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
101 void unlock_policy_rwsem_write(int cpu)
103 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
104 BUG_ON(policy_cpu == -1);
105 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
107 EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
110 /* internal prototypes */
111 static int __cpufreq_governor(struct cpufreq_policy *policy,
113 static unsigned int __cpufreq_get(unsigned int cpu);
114 static void handle_update(struct work_struct *work);
117 * Two notifier lists: the "policy" list is involved in the
118 * validation process for a new CPU frequency policy; the
119 * "transition" list for kernel code that needs to handle
120 * changes to devices when the CPU clock speed changes.
121 * The mutex locks both lists.
123 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
124 static struct srcu_notifier_head cpufreq_transition_notifier_list;
126 static bool init_cpufreq_transition_notifier_list_called;
127 static int __init init_cpufreq_transition_notifier_list(void)
129 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
130 init_cpufreq_transition_notifier_list_called = true;
133 pure_initcall(init_cpufreq_transition_notifier_list);
135 static LIST_HEAD(cpufreq_governor_list);
136 static DEFINE_MUTEX(cpufreq_governor_mutex);
138 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
140 struct cpufreq_policy *data;
143 if (cpu >= nr_cpu_ids)
146 /* get the cpufreq driver */
147 spin_lock_irqsave(&cpufreq_driver_lock, flags);
152 if (!try_module_get(cpufreq_driver->owner))
157 data = per_cpu(cpufreq_cpu_data, cpu);
160 goto err_out_put_module;
162 if (!kobject_get(&data->kobj))
163 goto err_out_put_module;
165 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
169 module_put(cpufreq_driver->owner);
171 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
175 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
178 void cpufreq_cpu_put(struct cpufreq_policy *data)
180 kobject_put(&data->kobj);
181 module_put(cpufreq_driver->owner);
183 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
186 /*********************************************************************
187 * UNIFIED DEBUG HELPERS *
188 *********************************************************************/
189 #ifdef CONFIG_CPU_FREQ_DEBUG
191 /* what part(s) of the CPUfreq subsystem are debugged? */
192 static unsigned int debug;
194 /* is the debug output ratelimit'ed using printk_ratelimit? User can
195 * set or modify this value.
197 static unsigned int debug_ratelimit = 1;
199 /* is the printk_ratelimit'ing enabled? It's enabled after a successful
200 * loading of a cpufreq driver, temporarily disabled when a new policy
201 * is set, and disabled upon cpufreq driver removal
203 static unsigned int disable_ratelimit = 1;
204 static DEFINE_SPINLOCK(disable_ratelimit_lock);
206 static void cpufreq_debug_enable_ratelimit(void)
210 spin_lock_irqsave(&disable_ratelimit_lock, flags);
211 if (disable_ratelimit)
213 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
216 static void cpufreq_debug_disable_ratelimit(void)
220 spin_lock_irqsave(&disable_ratelimit_lock, flags);
222 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
225 void cpufreq_debug_printk(unsigned int type, const char *prefix,
226 const char *fmt, ...)
235 spin_lock_irqsave(&disable_ratelimit_lock, flags);
236 if (!disable_ratelimit && debug_ratelimit
237 && !printk_ratelimit()) {
238 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
241 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
243 len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
246 len += vsnprintf(&s[len], (256 - len), fmt, args);
254 EXPORT_SYMBOL(cpufreq_debug_printk);
257 module_param(debug, uint, 0644);
258 MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
259 " 2 to debug drivers, and 4 to debug governors.");
261 module_param(debug_ratelimit, uint, 0644);
262 MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
263 " set to 0 to disable ratelimiting.");
265 #else /* !CONFIG_CPU_FREQ_DEBUG */
267 static inline void cpufreq_debug_enable_ratelimit(void) { return; }
268 static inline void cpufreq_debug_disable_ratelimit(void) { return; }
270 #endif /* CONFIG_CPU_FREQ_DEBUG */
273 /*********************************************************************
274 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
275 *********************************************************************/
278 * adjust_jiffies - adjust the system "loops_per_jiffy"
280 * This function alters the system "loops_per_jiffy" for the clock
281 * speed change. Note that loops_per_jiffy cannot be updated on SMP
282 * systems as each CPU might be scaled differently. So, use the arch
283 * per-CPU loops_per_jiffy value wherever possible.
286 static unsigned long l_p_j_ref;
287 static unsigned int l_p_j_ref_freq;
289 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
291 if (ci->flags & CPUFREQ_CONST_LOOPS)
294 if (!l_p_j_ref_freq) {
295 l_p_j_ref = loops_per_jiffy;
296 l_p_j_ref_freq = ci->old;
297 dprintk("saving %lu as reference value for loops_per_jiffy; "
298 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
300 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
301 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
302 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
303 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
305 dprintk("scaling loops_per_jiffy to %lu "
306 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
310 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
318 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
319 * on frequency transition.
321 * This function calls the transition notifiers and the "adjust_jiffies"
322 * function. It is called twice on all CPU frequency changes that have
325 void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
327 struct cpufreq_policy *policy;
329 BUG_ON(irqs_disabled());
331 freqs->flags = cpufreq_driver->flags;
332 dprintk("notification %u of frequency transition to %u kHz\n",
335 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
338 case CPUFREQ_PRECHANGE:
339 /* detect if the driver reported a value as "old frequency"
340 * which is not equal to what the cpufreq core thinks is
343 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
344 if ((policy) && (policy->cpu == freqs->cpu) &&
345 (policy->cur) && (policy->cur != freqs->old)) {
346 dprintk("Warning: CPU frequency is"
347 " %u, cpufreq assumed %u kHz.\n",
348 freqs->old, policy->cur);
349 freqs->old = policy->cur;
352 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
353 CPUFREQ_PRECHANGE, freqs);
354 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
357 case CPUFREQ_POSTCHANGE:
358 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
359 trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
360 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
361 CPUFREQ_POSTCHANGE, freqs);
362 if (likely(policy) && likely(policy->cpu == freqs->cpu))
363 policy->cur = freqs->new;
367 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
371 /*********************************************************************
373 *********************************************************************/
375 static struct cpufreq_governor *__find_governor(const char *str_governor)
377 struct cpufreq_governor *t;
379 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
380 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
387 * cpufreq_parse_governor - parse a governor string
389 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
390 struct cpufreq_governor **governor)
397 if (cpufreq_driver->setpolicy) {
398 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
399 *policy = CPUFREQ_POLICY_PERFORMANCE;
401 } else if (!strnicmp(str_governor, "powersave",
403 *policy = CPUFREQ_POLICY_POWERSAVE;
406 } else if (cpufreq_driver->target) {
407 struct cpufreq_governor *t;
409 mutex_lock(&cpufreq_governor_mutex);
411 t = __find_governor(str_governor);
414 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
420 mutex_unlock(&cpufreq_governor_mutex);
421 ret = request_module("%s", name);
422 mutex_lock(&cpufreq_governor_mutex);
425 t = __find_governor(str_governor);
436 mutex_unlock(&cpufreq_governor_mutex);
444 * cpufreq_per_cpu_attr_read() / show_##file_name() -
445 * print out cpufreq information
447 * Write out information from cpufreq_driver->policy[cpu]; object must be
451 #define show_one(file_name, object) \
452 static ssize_t show_##file_name \
453 (struct cpufreq_policy *policy, char *buf) \
455 return sprintf(buf, "%u\n", policy->object); \
458 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
459 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
460 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
461 show_one(scaling_min_freq, min);
462 show_one(scaling_max_freq, max);
463 show_one(scaling_cur_freq, cur);
465 static int __cpufreq_set_policy(struct cpufreq_policy *data,
466 struct cpufreq_policy *policy);
469 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
471 #define store_one(file_name, object) \
472 static ssize_t store_##file_name \
473 (struct cpufreq_policy *policy, const char *buf, size_t count) \
475 unsigned int ret = -EINVAL; \
476 struct cpufreq_policy new_policy; \
478 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
482 ret = sscanf(buf, "%u", &new_policy.object); \
486 ret = __cpufreq_set_policy(policy, &new_policy); \
487 policy->user_policy.object = policy->object; \
489 return ret ? ret : count; \
492 store_one(scaling_min_freq, min);
493 store_one(scaling_max_freq, max);
496 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
498 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
501 unsigned int cur_freq = __cpufreq_get(policy->cpu);
503 return sprintf(buf, "<unknown>");
504 return sprintf(buf, "%u\n", cur_freq);
509 * show_scaling_governor - show the current policy for the specified CPU
511 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
513 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
514 return sprintf(buf, "powersave\n");
515 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
516 return sprintf(buf, "performance\n");
517 else if (policy->governor)
518 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
519 policy->governor->name);
525 * store_scaling_governor - store policy for the specified CPU
527 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
528 const char *buf, size_t count)
530 unsigned int ret = -EINVAL;
531 char str_governor[16];
532 struct cpufreq_policy new_policy;
534 ret = cpufreq_get_policy(&new_policy, policy->cpu);
538 ret = sscanf(buf, "%15s", str_governor);
542 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
543 &new_policy.governor))
546 /* Do not use cpufreq_set_policy here or the user_policy.max
547 will be wrongly overridden */
548 ret = __cpufreq_set_policy(policy, &new_policy);
550 policy->user_policy.policy = policy->policy;
551 policy->user_policy.governor = policy->governor;
560 * show_scaling_driver - show the cpufreq driver currently loaded
562 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
564 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
568 * show_scaling_available_governors - show the available CPUfreq governors
570 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
574 struct cpufreq_governor *t;
576 if (!cpufreq_driver->target) {
577 i += sprintf(buf, "performance powersave");
581 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
582 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
583 - (CPUFREQ_NAME_LEN + 2)))
585 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
588 i += sprintf(&buf[i], "\n");
592 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
597 for_each_cpu(cpu, mask) {
599 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
600 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
601 if (i >= (PAGE_SIZE - 5))
604 i += sprintf(&buf[i], "\n");
609 * show_related_cpus - show the CPUs affected by each transition even if
610 * hw coordination is in use
612 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
614 if (cpumask_empty(policy->related_cpus))
615 return show_cpus(policy->cpus, buf);
616 return show_cpus(policy->related_cpus, buf);
620 * show_affected_cpus - show the CPUs affected by each transition
622 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
624 return show_cpus(policy->cpus, buf);
627 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
628 const char *buf, size_t count)
630 unsigned int freq = 0;
633 if (!policy->governor || !policy->governor->store_setspeed)
636 ret = sscanf(buf, "%u", &freq);
640 policy->governor->store_setspeed(policy, freq);
645 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
647 if (!policy->governor || !policy->governor->show_setspeed)
648 return sprintf(buf, "<unsupported>\n");
650 return policy->governor->show_setspeed(policy, buf);
654 * show_scaling_driver - show the current cpufreq HW/BIOS limitation
656 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
660 if (cpufreq_driver->bios_limit) {
661 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
663 return sprintf(buf, "%u\n", limit);
665 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
668 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
669 cpufreq_freq_attr_ro(cpuinfo_min_freq);
670 cpufreq_freq_attr_ro(cpuinfo_max_freq);
671 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
672 cpufreq_freq_attr_ro(scaling_available_governors);
673 cpufreq_freq_attr_ro(scaling_driver);
674 cpufreq_freq_attr_ro(scaling_cur_freq);
675 cpufreq_freq_attr_ro(bios_limit);
676 cpufreq_freq_attr_ro(related_cpus);
677 cpufreq_freq_attr_ro(affected_cpus);
678 cpufreq_freq_attr_rw(scaling_min_freq);
679 cpufreq_freq_attr_rw(scaling_max_freq);
680 cpufreq_freq_attr_rw(scaling_governor);
681 cpufreq_freq_attr_rw(scaling_setspeed);
683 static struct attribute *default_attrs[] = {
684 &cpuinfo_min_freq.attr,
685 &cpuinfo_max_freq.attr,
686 &cpuinfo_transition_latency.attr,
687 &scaling_min_freq.attr,
688 &scaling_max_freq.attr,
691 &scaling_governor.attr,
692 &scaling_driver.attr,
693 &scaling_available_governors.attr,
694 &scaling_setspeed.attr,
698 struct kobject *cpufreq_global_kobject;
699 EXPORT_SYMBOL(cpufreq_global_kobject);
701 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
702 #define to_attr(a) container_of(a, struct freq_attr, attr)
704 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
706 struct cpufreq_policy *policy = to_policy(kobj);
707 struct freq_attr *fattr = to_attr(attr);
708 ssize_t ret = -EINVAL;
709 policy = cpufreq_cpu_get(policy->cpu);
713 if (lock_policy_rwsem_read(policy->cpu) < 0)
717 ret = fattr->show(policy, buf);
721 unlock_policy_rwsem_read(policy->cpu);
723 cpufreq_cpu_put(policy);
728 static ssize_t store(struct kobject *kobj, struct attribute *attr,
729 const char *buf, size_t count)
731 struct cpufreq_policy *policy = to_policy(kobj);
732 struct freq_attr *fattr = to_attr(attr);
733 ssize_t ret = -EINVAL;
734 policy = cpufreq_cpu_get(policy->cpu);
738 if (lock_policy_rwsem_write(policy->cpu) < 0)
742 ret = fattr->store(policy, buf, count);
746 unlock_policy_rwsem_write(policy->cpu);
748 cpufreq_cpu_put(policy);
753 static void cpufreq_sysfs_release(struct kobject *kobj)
755 struct cpufreq_policy *policy = to_policy(kobj);
756 dprintk("last reference is dropped\n");
757 complete(&policy->kobj_unregister);
760 static const struct sysfs_ops sysfs_ops = {
765 static struct kobj_type ktype_cpufreq = {
766 .sysfs_ops = &sysfs_ops,
767 .default_attrs = default_attrs,
768 .release = cpufreq_sysfs_release,
775 * Positive: When we have a managed CPU and the sysfs got symlinked
777 static int cpufreq_add_dev_policy(unsigned int cpu,
778 struct cpufreq_policy *policy,
779 struct sys_device *sys_dev)
785 #ifdef CONFIG_HOTPLUG_CPU
786 struct cpufreq_governor *gov;
788 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
790 policy->governor = gov;
791 dprintk("Restoring governor %s for cpu %d\n",
792 policy->governor->name, cpu);
796 for_each_cpu(j, policy->cpus) {
797 struct cpufreq_policy *managed_policy;
802 /* Check for existing affected CPUs.
803 * They may not be aware of it due to CPU Hotplug.
804 * cpufreq_cpu_put is called when the device is removed
805 * in __cpufreq_remove_dev()
807 managed_policy = cpufreq_cpu_get(j);
808 if (unlikely(managed_policy)) {
810 /* Set proper policy_cpu */
811 unlock_policy_rwsem_write(cpu);
812 per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
814 if (lock_policy_rwsem_write(cpu) < 0) {
815 /* Should not go through policy unlock path */
816 if (cpufreq_driver->exit)
817 cpufreq_driver->exit(policy);
818 cpufreq_cpu_put(managed_policy);
822 spin_lock_irqsave(&cpufreq_driver_lock, flags);
823 cpumask_copy(managed_policy->cpus, policy->cpus);
824 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
825 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
827 dprintk("CPU already managed, adding link\n");
828 ret = sysfs_create_link(&sys_dev->kobj,
829 &managed_policy->kobj,
832 cpufreq_cpu_put(managed_policy);
834 * Success. We only needed to be added to the mask.
835 * Call driver->exit() because only the cpu parent of
836 * the kobj needed to call init().
838 if (cpufreq_driver->exit)
839 cpufreq_driver->exit(policy);
852 /* symlink affected CPUs */
853 static int cpufreq_add_dev_symlink(unsigned int cpu,
854 struct cpufreq_policy *policy)
859 for_each_cpu(j, policy->cpus) {
860 struct cpufreq_policy *managed_policy;
861 struct sys_device *cpu_sys_dev;
868 dprintk("CPU %u already managed, adding link\n", j);
869 managed_policy = cpufreq_cpu_get(cpu);
870 cpu_sys_dev = get_cpu_sysdev(j);
871 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
874 cpufreq_cpu_put(managed_policy);
881 static int cpufreq_add_dev_interface(unsigned int cpu,
882 struct cpufreq_policy *policy,
883 struct sys_device *sys_dev)
885 struct cpufreq_policy new_policy;
886 struct freq_attr **drv_attr;
891 /* prepare interface data */
892 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
893 &sys_dev->kobj, "cpufreq");
897 /* set up files for this cpu device */
898 drv_attr = cpufreq_driver->attr;
899 while ((drv_attr) && (*drv_attr)) {
900 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
902 goto err_out_kobj_put;
905 if (cpufreq_driver->get) {
906 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
908 goto err_out_kobj_put;
910 if (cpufreq_driver->target) {
911 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
913 goto err_out_kobj_put;
915 if (cpufreq_driver->bios_limit) {
916 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
918 goto err_out_kobj_put;
921 spin_lock_irqsave(&cpufreq_driver_lock, flags);
922 for_each_cpu(j, policy->cpus) {
925 per_cpu(cpufreq_cpu_data, j) = policy;
926 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
928 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
930 ret = cpufreq_add_dev_symlink(cpu, policy);
932 goto err_out_kobj_put;
934 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
935 /* assure that the starting sequence is run in __cpufreq_set_policy */
936 policy->governor = NULL;
938 /* set default policy */
939 ret = __cpufreq_set_policy(policy, &new_policy);
940 policy->user_policy.policy = policy->policy;
941 policy->user_policy.governor = policy->governor;
944 dprintk("setting policy failed\n");
945 if (cpufreq_driver->exit)
946 cpufreq_driver->exit(policy);
951 kobject_put(&policy->kobj);
952 wait_for_completion(&policy->kobj_unregister);
958 * cpufreq_add_dev - add a CPU device
960 * Adds the cpufreq interface for a CPU device.
962 * The Oracle says: try running cpufreq registration/unregistration concurrently
963 * with with cpu hotplugging and all hell will break loose. Tried to clean this
964 * mess up, but more thorough testing is needed. - Mathieu
966 static int cpufreq_add_dev(struct sys_device *sys_dev)
968 unsigned int cpu = sys_dev->id;
969 int ret = 0, found = 0;
970 struct cpufreq_policy *policy;
973 #ifdef CONFIG_HOTPLUG_CPU
977 if (cpu_is_offline(cpu))
980 cpufreq_debug_disable_ratelimit();
981 dprintk("adding CPU %u\n", cpu);
984 /* check whether a different CPU already registered this
985 * CPU because it is in the same boat. */
986 policy = cpufreq_cpu_get(cpu);
987 if (unlikely(policy)) {
988 cpufreq_cpu_put(policy);
989 cpufreq_debug_enable_ratelimit();
994 if (!try_module_get(cpufreq_driver->owner)) {
1000 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
1004 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1005 goto err_free_policy;
1007 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1008 goto err_free_cpumask;
1011 cpumask_copy(policy->cpus, cpumask_of(cpu));
1013 /* Initially set CPU itself as the policy_cpu */
1014 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
1015 ret = (lock_policy_rwsem_write(cpu) < 0);
1018 init_completion(&policy->kobj_unregister);
1019 INIT_WORK(&policy->update, handle_update);
1021 /* Set governor before ->init, so that driver could check it */
1022 #ifdef CONFIG_HOTPLUG_CPU
1023 for_each_online_cpu(sibling) {
1024 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
1025 if (cp && cp->governor &&
1026 (cpumask_test_cpu(cpu, cp->related_cpus))) {
1027 policy->governor = cp->governor;
1034 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1035 /* call driver. From then on the cpufreq must be able
1036 * to accept all calls to ->verify and ->setpolicy for this CPU
1038 ret = cpufreq_driver->init(policy);
1040 dprintk("initialization failed\n");
1041 goto err_unlock_policy;
1043 policy->user_policy.min = policy->min;
1044 policy->user_policy.max = policy->max;
1046 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1047 CPUFREQ_START, policy);
1049 ret = cpufreq_add_dev_policy(cpu, policy, sys_dev);
1052 /* This is a managed cpu, symlink created,
1055 goto err_unlock_policy;
1058 ret = cpufreq_add_dev_interface(cpu, policy, sys_dev);
1060 goto err_out_unregister;
1062 unlock_policy_rwsem_write(cpu);
1064 kobject_uevent(&policy->kobj, KOBJ_ADD);
1065 module_put(cpufreq_driver->owner);
1066 dprintk("initialization complete\n");
1067 cpufreq_debug_enable_ratelimit();
1073 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1074 for_each_cpu(j, policy->cpus)
1075 per_cpu(cpufreq_cpu_data, j) = NULL;
1076 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1078 kobject_put(&policy->kobj);
1079 wait_for_completion(&policy->kobj_unregister);
1082 unlock_policy_rwsem_write(cpu);
1084 free_cpumask_var(policy->cpus);
1088 module_put(cpufreq_driver->owner);
1090 cpufreq_debug_enable_ratelimit();
1096 * __cpufreq_remove_dev - remove a CPU device
1098 * Removes the cpufreq interface for a CPU device.
1099 * Caller should already have policy_rwsem in write mode for this CPU.
1100 * This routine frees the rwsem before returning.
1102 static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1104 unsigned int cpu = sys_dev->id;
1105 unsigned long flags;
1106 struct cpufreq_policy *data;
1107 struct kobject *kobj;
1108 struct completion *cmp;
1110 struct sys_device *cpu_sys_dev;
1114 cpufreq_debug_disable_ratelimit();
1115 dprintk("unregistering CPU %u\n", cpu);
1117 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1118 data = per_cpu(cpufreq_cpu_data, cpu);
1121 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1122 cpufreq_debug_enable_ratelimit();
1123 unlock_policy_rwsem_write(cpu);
1126 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1130 /* if this isn't the CPU which is the parent of the kobj, we
1131 * only need to unlink, put and exit
1133 if (unlikely(cpu != data->cpu)) {
1134 dprintk("removing link\n");
1135 cpumask_clear_cpu(cpu, data->cpus);
1136 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1137 kobj = &sys_dev->kobj;
1138 cpufreq_cpu_put(data);
1139 cpufreq_debug_enable_ratelimit();
1140 unlock_policy_rwsem_write(cpu);
1141 sysfs_remove_link(kobj, "cpufreq");
1148 #ifdef CONFIG_HOTPLUG_CPU
1149 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1153 /* if we have other CPUs still registered, we need to unlink them,
1154 * or else wait_for_completion below will lock up. Clean the
1155 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1156 * the sysfs links afterwards.
1158 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1159 for_each_cpu(j, data->cpus) {
1162 per_cpu(cpufreq_cpu_data, j) = NULL;
1166 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1168 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1169 for_each_cpu(j, data->cpus) {
1172 dprintk("removing link for cpu %u\n", j);
1173 #ifdef CONFIG_HOTPLUG_CPU
1174 strncpy(per_cpu(cpufreq_cpu_governor, j),
1175 data->governor->name, CPUFREQ_NAME_LEN);
1177 cpu_sys_dev = get_cpu_sysdev(j);
1178 kobj = &cpu_sys_dev->kobj;
1179 unlock_policy_rwsem_write(cpu);
1180 sysfs_remove_link(kobj, "cpufreq");
1181 lock_policy_rwsem_write(cpu);
1182 cpufreq_cpu_put(data);
1186 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1189 if (cpufreq_driver->target)
1190 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1193 cmp = &data->kobj_unregister;
1194 unlock_policy_rwsem_write(cpu);
1197 /* we need to make sure that the underlying kobj is actually
1198 * not referenced anymore by anybody before we proceed with
1201 dprintk("waiting for dropping of refcount\n");
1202 wait_for_completion(cmp);
1203 dprintk("wait complete\n");
1205 lock_policy_rwsem_write(cpu);
1206 if (cpufreq_driver->exit)
1207 cpufreq_driver->exit(data);
1208 unlock_policy_rwsem_write(cpu);
1210 free_cpumask_var(data->related_cpus);
1211 free_cpumask_var(data->cpus);
1213 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1215 cpufreq_debug_enable_ratelimit();
1220 static int cpufreq_remove_dev(struct sys_device *sys_dev)
1222 unsigned int cpu = sys_dev->id;
1225 if (cpu_is_offline(cpu))
1228 if (unlikely(lock_policy_rwsem_write(cpu)))
1231 retval = __cpufreq_remove_dev(sys_dev);
1236 static void handle_update(struct work_struct *work)
1238 struct cpufreq_policy *policy =
1239 container_of(work, struct cpufreq_policy, update);
1240 unsigned int cpu = policy->cpu;
1241 dprintk("handle_update for cpu %u called\n", cpu);
1242 cpufreq_update_policy(cpu);
1246 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1248 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1249 * @new_freq: CPU frequency the CPU actually runs at
1251 * We adjust to current frequency first, and need to clean up later.
1252 * So either call to cpufreq_update_policy() or schedule handle_update()).
1254 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1255 unsigned int new_freq)
1257 struct cpufreq_freqs freqs;
1259 dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
1260 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1263 freqs.old = old_freq;
1264 freqs.new = new_freq;
1265 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1266 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1271 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1274 * This is the last known freq, without actually getting it from the driver.
1275 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1277 unsigned int cpufreq_quick_get(unsigned int cpu)
1279 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1280 unsigned int ret_freq = 0;
1283 ret_freq = policy->cur;
1284 cpufreq_cpu_put(policy);
1289 EXPORT_SYMBOL(cpufreq_quick_get);
1292 static unsigned int __cpufreq_get(unsigned int cpu)
1294 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1295 unsigned int ret_freq = 0;
1297 if (!cpufreq_driver->get)
1300 ret_freq = cpufreq_driver->get(cpu);
1302 if (ret_freq && policy->cur &&
1303 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1304 /* verify no discrepancy between actual and
1305 saved value exists */
1306 if (unlikely(ret_freq != policy->cur)) {
1307 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1308 schedule_work(&policy->update);
1316 * cpufreq_get - get the current CPU frequency (in kHz)
1319 * Get the CPU current (static) CPU frequency
1321 unsigned int cpufreq_get(unsigned int cpu)
1323 unsigned int ret_freq = 0;
1324 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1329 if (unlikely(lock_policy_rwsem_read(cpu)))
1332 ret_freq = __cpufreq_get(cpu);
1334 unlock_policy_rwsem_read(cpu);
1337 cpufreq_cpu_put(policy);
1341 EXPORT_SYMBOL(cpufreq_get);
1345 * cpufreq_suspend - let the low level driver prepare for suspend
1348 static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1352 int cpu = sysdev->id;
1353 struct cpufreq_policy *cpu_policy;
1355 dprintk("suspending cpu %u\n", cpu);
1357 if (!cpu_online(cpu))
1360 /* we may be lax here as interrupts are off. Nonetheless
1361 * we need to grab the correct cpu policy, as to check
1362 * whether we really run on this CPU.
1365 cpu_policy = cpufreq_cpu_get(cpu);
1369 /* only handle each CPU group once */
1370 if (unlikely(cpu_policy->cpu != cpu))
1373 if (cpufreq_driver->suspend) {
1374 ret = cpufreq_driver->suspend(cpu_policy, pmsg);
1376 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1377 "step on CPU %u\n", cpu_policy->cpu);
1381 cpufreq_cpu_put(cpu_policy);
1386 * cpufreq_resume - restore proper CPU frequency handling after resume
1388 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1389 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1390 * restored. It will verify that the current freq is in sync with
1391 * what we believe it to be. This is a bit later than when it
1392 * should be, but nonethteless it's better than calling
1393 * cpufreq_driver->get() here which might re-enable interrupts...
1395 static int cpufreq_resume(struct sys_device *sysdev)
1399 int cpu = sysdev->id;
1400 struct cpufreq_policy *cpu_policy;
1402 dprintk("resuming cpu %u\n", cpu);
1404 if (!cpu_online(cpu))
1407 /* we may be lax here as interrupts are off. Nonetheless
1408 * we need to grab the correct cpu policy, as to check
1409 * whether we really run on this CPU.
1412 cpu_policy = cpufreq_cpu_get(cpu);
1416 /* only handle each CPU group once */
1417 if (unlikely(cpu_policy->cpu != cpu))
1420 if (cpufreq_driver->resume) {
1421 ret = cpufreq_driver->resume(cpu_policy);
1423 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1424 "step on CPU %u\n", cpu_policy->cpu);
1429 schedule_work(&cpu_policy->update);
1432 cpufreq_cpu_put(cpu_policy);
1436 static struct sysdev_driver cpufreq_sysdev_driver = {
1437 .add = cpufreq_add_dev,
1438 .remove = cpufreq_remove_dev,
1439 .suspend = cpufreq_suspend,
1440 .resume = cpufreq_resume,
1444 /*********************************************************************
1445 * NOTIFIER LISTS INTERFACE *
1446 *********************************************************************/
1449 * cpufreq_register_notifier - register a driver with cpufreq
1450 * @nb: notifier function to register
1451 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1453 * Add a driver to one of two lists: either a list of drivers that
1454 * are notified about clock rate changes (once before and once after
1455 * the transition), or a list of drivers that are notified about
1456 * changes in cpufreq policy.
1458 * This function may sleep, and has the same return conditions as
1459 * blocking_notifier_chain_register.
1461 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1465 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1468 case CPUFREQ_TRANSITION_NOTIFIER:
1469 ret = srcu_notifier_chain_register(
1470 &cpufreq_transition_notifier_list, nb);
1472 case CPUFREQ_POLICY_NOTIFIER:
1473 ret = blocking_notifier_chain_register(
1474 &cpufreq_policy_notifier_list, nb);
1482 EXPORT_SYMBOL(cpufreq_register_notifier);
1486 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1487 * @nb: notifier block to be unregistered
1488 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1490 * Remove a driver from the CPU frequency notifier list.
1492 * This function may sleep, and has the same return conditions as
1493 * blocking_notifier_chain_unregister.
1495 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1500 case CPUFREQ_TRANSITION_NOTIFIER:
1501 ret = srcu_notifier_chain_unregister(
1502 &cpufreq_transition_notifier_list, nb);
1504 case CPUFREQ_POLICY_NOTIFIER:
1505 ret = blocking_notifier_chain_unregister(
1506 &cpufreq_policy_notifier_list, nb);
1514 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1517 /*********************************************************************
1519 *********************************************************************/
1522 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1523 unsigned int target_freq,
1524 unsigned int relation)
1526 int retval = -EINVAL;
1528 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1529 target_freq, relation);
1530 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1531 retval = cpufreq_driver->target(policy, target_freq, relation);
1535 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1537 int cpufreq_driver_target(struct cpufreq_policy *policy,
1538 unsigned int target_freq,
1539 unsigned int relation)
1543 policy = cpufreq_cpu_get(policy->cpu);
1547 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1550 ret = __cpufreq_driver_target(policy, target_freq, relation);
1552 unlock_policy_rwsem_write(policy->cpu);
1555 cpufreq_cpu_put(policy);
1559 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1561 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1565 policy = cpufreq_cpu_get(policy->cpu);
1569 if (cpu_online(cpu) && cpufreq_driver->getavg)
1570 ret = cpufreq_driver->getavg(policy, cpu);
1572 cpufreq_cpu_put(policy);
1575 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1578 * when "event" is CPUFREQ_GOV_LIMITS
1581 static int __cpufreq_governor(struct cpufreq_policy *policy,
1586 /* Only must be defined when default governor is known to have latency
1587 restrictions, like e.g. conservative or ondemand.
1588 That this is the case is already ensured in Kconfig
1590 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1591 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1593 struct cpufreq_governor *gov = NULL;
1596 if (policy->governor->max_transition_latency &&
1597 policy->cpuinfo.transition_latency >
1598 policy->governor->max_transition_latency) {
1602 printk(KERN_WARNING "%s governor failed, too long"
1603 " transition latency of HW, fallback"
1604 " to %s governor\n",
1605 policy->governor->name,
1607 policy->governor = gov;
1611 if (!try_module_get(policy->governor->owner))
1614 dprintk("__cpufreq_governor for CPU %u, event %u\n",
1615 policy->cpu, event);
1616 ret = policy->governor->governor(policy, event);
1618 /* we keep one module reference alive for
1619 each CPU governed by this CPU */
1620 if ((event != CPUFREQ_GOV_START) || ret)
1621 module_put(policy->governor->owner);
1622 if ((event == CPUFREQ_GOV_STOP) && !ret)
1623 module_put(policy->governor->owner);
1629 int cpufreq_register_governor(struct cpufreq_governor *governor)
1636 mutex_lock(&cpufreq_governor_mutex);
1639 if (__find_governor(governor->name) == NULL) {
1641 list_add(&governor->governor_list, &cpufreq_governor_list);
1644 mutex_unlock(&cpufreq_governor_mutex);
1647 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1650 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1652 #ifdef CONFIG_HOTPLUG_CPU
1659 #ifdef CONFIG_HOTPLUG_CPU
1660 for_each_present_cpu(cpu) {
1661 if (cpu_online(cpu))
1663 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1664 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1668 mutex_lock(&cpufreq_governor_mutex);
1669 list_del(&governor->governor_list);
1670 mutex_unlock(&cpufreq_governor_mutex);
1673 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1677 /*********************************************************************
1678 * POLICY INTERFACE *
1679 *********************************************************************/
1682 * cpufreq_get_policy - get the current cpufreq_policy
1683 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1686 * Reads the current cpufreq policy.
1688 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1690 struct cpufreq_policy *cpu_policy;
1694 cpu_policy = cpufreq_cpu_get(cpu);
1698 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1700 cpufreq_cpu_put(cpu_policy);
1703 EXPORT_SYMBOL(cpufreq_get_policy);
1707 * data : current policy.
1708 * policy : policy to be set.
1710 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1711 struct cpufreq_policy *policy)
1715 cpufreq_debug_disable_ratelimit();
1716 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1717 policy->min, policy->max);
1719 memcpy(&policy->cpuinfo, &data->cpuinfo,
1720 sizeof(struct cpufreq_cpuinfo));
1722 if (policy->min > data->max || policy->max < data->min) {
1727 /* verify the cpu speed can be set within this limit */
1728 ret = cpufreq_driver->verify(policy);
1732 /* adjust if necessary - all reasons */
1733 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1734 CPUFREQ_ADJUST, policy);
1736 /* adjust if necessary - hardware incompatibility*/
1737 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1738 CPUFREQ_INCOMPATIBLE, policy);
1740 /* verify the cpu speed can be set within this limit,
1741 which might be different to the first one */
1742 ret = cpufreq_driver->verify(policy);
1746 /* notification of the new policy */
1747 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1748 CPUFREQ_NOTIFY, policy);
1750 data->min = policy->min;
1751 data->max = policy->max;
1753 dprintk("new min and max freqs are %u - %u kHz\n",
1754 data->min, data->max);
1756 if (cpufreq_driver->setpolicy) {
1757 data->policy = policy->policy;
1758 dprintk("setting range\n");
1759 ret = cpufreq_driver->setpolicy(policy);
1761 if (policy->governor != data->governor) {
1762 /* save old, working values */
1763 struct cpufreq_governor *old_gov = data->governor;
1765 dprintk("governor switch\n");
1767 /* end old governor */
1768 if (data->governor) {
1770 * Need to release the rwsem around governor
1771 * stop due to lock dependency between
1772 * cancel_delayed_work_sync and the read lock
1773 * taken in the delayed work handler.
1775 unlock_policy_rwsem_write(data->cpu);
1776 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1777 lock_policy_rwsem_write(data->cpu);
1780 /* start new governor */
1781 data->governor = policy->governor;
1782 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1783 /* new governor failed, so re-start old one */
1784 dprintk("starting governor %s failed\n",
1785 data->governor->name);
1787 data->governor = old_gov;
1788 __cpufreq_governor(data,
1794 /* might be a policy change, too, so fall through */
1796 dprintk("governor: change or update limits\n");
1797 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1801 cpufreq_debug_enable_ratelimit();
1806 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1807 * @cpu: CPU which shall be re-evaluated
1809 * Usefull for policy notifiers which have different necessities
1810 * at different times.
1812 int cpufreq_update_policy(unsigned int cpu)
1814 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1815 struct cpufreq_policy policy;
1823 if (unlikely(lock_policy_rwsem_write(cpu))) {
1828 dprintk("updating policy for CPU %u\n", cpu);
1829 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1830 policy.min = data->user_policy.min;
1831 policy.max = data->user_policy.max;
1832 policy.policy = data->user_policy.policy;
1833 policy.governor = data->user_policy.governor;
1835 /* BIOS might change freq behind our back
1836 -> ask driver for current freq and notify governors about a change */
1837 if (cpufreq_driver->get) {
1838 policy.cur = cpufreq_driver->get(cpu);
1840 dprintk("Driver did not initialize current freq");
1841 data->cur = policy.cur;
1843 if (data->cur != policy.cur)
1844 cpufreq_out_of_sync(cpu, data->cur,
1849 ret = __cpufreq_set_policy(data, &policy);
1851 unlock_policy_rwsem_write(cpu);
1854 cpufreq_cpu_put(data);
1858 EXPORT_SYMBOL(cpufreq_update_policy);
1860 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1861 unsigned long action, void *hcpu)
1863 unsigned int cpu = (unsigned long)hcpu;
1864 struct sys_device *sys_dev;
1866 sys_dev = get_cpu_sysdev(cpu);
1870 case CPU_ONLINE_FROZEN:
1871 cpufreq_add_dev(sys_dev);
1873 case CPU_DOWN_PREPARE:
1874 case CPU_DOWN_PREPARE_FROZEN:
1875 if (unlikely(lock_policy_rwsem_write(cpu)))
1878 __cpufreq_remove_dev(sys_dev);
1880 case CPU_DOWN_FAILED:
1881 case CPU_DOWN_FAILED_FROZEN:
1882 cpufreq_add_dev(sys_dev);
1889 static struct notifier_block __refdata cpufreq_cpu_notifier =
1891 .notifier_call = cpufreq_cpu_callback,
1894 /*********************************************************************
1895 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1896 *********************************************************************/
1899 * cpufreq_register_driver - register a CPU Frequency driver
1900 * @driver_data: A struct cpufreq_driver containing the values#
1901 * submitted by the CPU Frequency driver.
1903 * Registers a CPU Frequency driver to this core code. This code
1904 * returns zero on success, -EBUSY when another driver got here first
1905 * (and isn't unregistered in the meantime).
1908 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1910 unsigned long flags;
1913 if (!driver_data || !driver_data->verify || !driver_data->init ||
1914 ((!driver_data->setpolicy) && (!driver_data->target)))
1917 dprintk("trying to register driver %s\n", driver_data->name);
1919 if (driver_data->setpolicy)
1920 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1922 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1923 if (cpufreq_driver) {
1924 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1927 cpufreq_driver = driver_data;
1928 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1930 ret = sysdev_driver_register(&cpu_sysdev_class,
1931 &cpufreq_sysdev_driver);
1933 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1937 /* check for at least one working CPU */
1938 for (i = 0; i < nr_cpu_ids; i++)
1939 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1944 /* if all ->init() calls failed, unregister */
1946 dprintk("no CPU initialized for driver %s\n",
1948 sysdev_driver_unregister(&cpu_sysdev_class,
1949 &cpufreq_sysdev_driver);
1951 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1952 cpufreq_driver = NULL;
1953 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1958 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1959 dprintk("driver %s up and running\n", driver_data->name);
1960 cpufreq_debug_enable_ratelimit();
1965 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1969 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1971 * Unregister the current CPUFreq driver. Only call this if you have
1972 * the right to do so, i.e. if you have succeeded in initialising before!
1973 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1974 * currently not initialised.
1976 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1978 unsigned long flags;
1980 cpufreq_debug_disable_ratelimit();
1982 if (!cpufreq_driver || (driver != cpufreq_driver)) {
1983 cpufreq_debug_enable_ratelimit();
1987 dprintk("unregistering driver %s\n", driver->name);
1989 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1990 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1992 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1993 cpufreq_driver = NULL;
1994 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1998 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2000 static int __init cpufreq_core_init(void)
2004 for_each_possible_cpu(cpu) {
2005 per_cpu(cpufreq_policy_cpu, cpu) = -1;
2006 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2009 cpufreq_global_kobject = kobject_create_and_add("cpufreq",
2010 &cpu_sysdev_class.kset.kobj);
2011 BUG_ON(!cpufreq_global_kobject);
2015 core_initcall(cpufreq_core_init);