2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/cpufreq.h>
17 #include <linux/cpu.h>
18 #include <linux/jiffies.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mutex.h>
21 #include <linux/hrtimer.h>
22 #include <linux/tick.h>
23 #include <linux/ktime.h>
24 #include <linux/sched.h>
27 * dbs is used in this file as a shortform for demandbased switching
28 * It helps to keep variable names smaller, simpler
31 #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
32 #define DEF_FREQUENCY_UP_THRESHOLD (80)
33 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
34 #define MICRO_FREQUENCY_UP_THRESHOLD (95)
35 #define MIN_FREQUENCY_UP_THRESHOLD (11)
36 #define MAX_FREQUENCY_UP_THRESHOLD (100)
39 * The polling frequency of this governor depends on the capability of
40 * the processor. Default polling frequency is 1000 times the transition
41 * latency of the processor. The governor will work on any processor with
42 * transition latency <= 10mS, using appropriate sampling
44 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
45 * this governor will not work.
46 * All times here are in uS.
48 static unsigned int def_sampling_rate;
49 #define MIN_SAMPLING_RATE_RATIO (2)
50 /* for correct statistics, we need at least 10 ticks between each measure */
51 #define MIN_STAT_SAMPLING_RATE \
52 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
53 #define MIN_SAMPLING_RATE \
54 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
55 #define MAX_SAMPLING_RATE (500 * def_sampling_rate)
56 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
57 #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
59 static void do_dbs_timer(struct work_struct *work);
62 enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
64 struct cpu_dbs_info_s {
65 cputime64_t prev_cpu_idle;
66 cputime64_t prev_cpu_wall;
67 cputime64_t prev_cpu_nice;
68 struct cpufreq_policy *cur_policy;
69 struct delayed_work work;
70 struct cpufreq_frequency_table *freq_table;
72 unsigned int freq_lo_jiffies;
73 unsigned int freq_hi_jiffies;
75 unsigned int enable:1,
78 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
80 static unsigned int dbs_enable; /* number of CPUs using this policy */
83 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
84 * lock and dbs_mutex. cpu_hotplug lock should always be held before
85 * dbs_mutex. If any function that can potentially take cpu_hotplug lock
86 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
87 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
88 * is recursive for the same process. -Venki
90 static DEFINE_MUTEX(dbs_mutex);
92 static struct workqueue_struct *kondemand_wq;
94 static struct dbs_tuners {
95 unsigned int sampling_rate;
96 unsigned int up_threshold;
97 unsigned int down_differential;
98 unsigned int ignore_nice;
99 unsigned int powersave_bias;
101 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
102 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
107 static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
110 cputime64_t idle_time;
111 cputime64_t cur_wall_time;
112 cputime64_t busy_time;
114 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
115 busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
116 kstat_cpu(cpu).cpustat.system);
118 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
119 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
120 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
121 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
123 idle_time = cputime64_sub(cur_wall_time, busy_time);
125 *wall = cur_wall_time;
130 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
132 u64 idle_time = get_cpu_idle_time_us(cpu, wall);
134 if (idle_time == -1ULL)
135 return get_cpu_idle_time_jiffy(cpu, wall);
141 * Find right freq to be set now with powersave_bias on.
142 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
143 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
145 static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
146 unsigned int freq_next,
147 unsigned int relation)
149 unsigned int freq_req, freq_reduc, freq_avg;
150 unsigned int freq_hi, freq_lo;
151 unsigned int index = 0;
152 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
153 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu);
155 if (!dbs_info->freq_table) {
156 dbs_info->freq_lo = 0;
157 dbs_info->freq_lo_jiffies = 0;
161 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
163 freq_req = dbs_info->freq_table[index].frequency;
164 freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
165 freq_avg = freq_req - freq_reduc;
167 /* Find freq bounds for freq_avg in freq_table */
169 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
170 CPUFREQ_RELATION_H, &index);
171 freq_lo = dbs_info->freq_table[index].frequency;
173 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
174 CPUFREQ_RELATION_L, &index);
175 freq_hi = dbs_info->freq_table[index].frequency;
177 /* Find out how long we have to be in hi and lo freqs */
178 if (freq_hi == freq_lo) {
179 dbs_info->freq_lo = 0;
180 dbs_info->freq_lo_jiffies = 0;
183 jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
184 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
185 jiffies_hi += ((freq_hi - freq_lo) / 2);
186 jiffies_hi /= (freq_hi - freq_lo);
187 jiffies_lo = jiffies_total - jiffies_hi;
188 dbs_info->freq_lo = freq_lo;
189 dbs_info->freq_lo_jiffies = jiffies_lo;
190 dbs_info->freq_hi_jiffies = jiffies_hi;
194 static void ondemand_powersave_bias_init(void)
197 for_each_online_cpu(i) {
198 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i);
199 dbs_info->freq_table = cpufreq_frequency_get_table(i);
200 dbs_info->freq_lo = 0;
204 /************************** sysfs interface ************************/
205 static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
207 static int print_once;
210 printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
211 "sysfs file is deprecated - used by: %s\n",
215 return sprintf(buf, "%u\n", MAX_SAMPLING_RATE);
218 static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
220 static int print_once;
223 printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_min "
224 "sysfs file is deprecated - used by: %s\n",
228 return sprintf(buf, "%u\n", MIN_SAMPLING_RATE);
231 #define define_one_ro(_name) \
232 static struct freq_attr _name = \
233 __ATTR(_name, 0444, show_##_name, NULL)
235 define_one_ro(sampling_rate_max);
236 define_one_ro(sampling_rate_min);
238 /* cpufreq_ondemand Governor Tunables */
239 #define show_one(file_name, object) \
240 static ssize_t show_##file_name \
241 (struct cpufreq_policy *unused, char *buf) \
243 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
245 show_one(sampling_rate, sampling_rate);
246 show_one(up_threshold, up_threshold);
247 show_one(ignore_nice_load, ignore_nice);
248 show_one(powersave_bias, powersave_bias);
250 static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
251 const char *buf, size_t count)
255 ret = sscanf(buf, "%u", &input);
257 mutex_lock(&dbs_mutex);
258 if (ret != 1 || input > MAX_SAMPLING_RATE
259 || input < MIN_SAMPLING_RATE) {
260 mutex_unlock(&dbs_mutex);
264 dbs_tuners_ins.sampling_rate = input;
265 mutex_unlock(&dbs_mutex);
270 static ssize_t store_up_threshold(struct cpufreq_policy *unused,
271 const char *buf, size_t count)
275 ret = sscanf(buf, "%u", &input);
277 mutex_lock(&dbs_mutex);
278 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
279 input < MIN_FREQUENCY_UP_THRESHOLD) {
280 mutex_unlock(&dbs_mutex);
284 dbs_tuners_ins.up_threshold = input;
285 mutex_unlock(&dbs_mutex);
290 static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
291 const char *buf, size_t count)
298 ret = sscanf(buf, "%u", &input);
305 mutex_lock(&dbs_mutex);
306 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
307 mutex_unlock(&dbs_mutex);
310 dbs_tuners_ins.ignore_nice = input;
312 /* we need to re-evaluate prev_cpu_idle */
313 for_each_online_cpu(j) {
314 struct cpu_dbs_info_s *dbs_info;
315 dbs_info = &per_cpu(cpu_dbs_info, j);
316 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
317 &dbs_info->prev_cpu_wall);
318 if (dbs_tuners_ins.ignore_nice)
319 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
322 mutex_unlock(&dbs_mutex);
327 static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
328 const char *buf, size_t count)
332 ret = sscanf(buf, "%u", &input);
340 mutex_lock(&dbs_mutex);
341 dbs_tuners_ins.powersave_bias = input;
342 ondemand_powersave_bias_init();
343 mutex_unlock(&dbs_mutex);
348 #define define_one_rw(_name) \
349 static struct freq_attr _name = \
350 __ATTR(_name, 0644, show_##_name, store_##_name)
352 define_one_rw(sampling_rate);
353 define_one_rw(up_threshold);
354 define_one_rw(ignore_nice_load);
355 define_one_rw(powersave_bias);
357 static struct attribute *dbs_attributes[] = {
358 &sampling_rate_max.attr,
359 &sampling_rate_min.attr,
362 &ignore_nice_load.attr,
363 &powersave_bias.attr,
367 static struct attribute_group dbs_attr_group = {
368 .attrs = dbs_attributes,
372 /************************** sysfs end ************************/
374 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
376 unsigned int max_load_freq;
378 struct cpufreq_policy *policy;
381 if (!this_dbs_info->enable)
384 this_dbs_info->freq_lo = 0;
385 policy = this_dbs_info->cur_policy;
388 * Every sampling_rate, we check, if current idle time is less
389 * than 20% (default), then we try to increase frequency
390 * Every sampling_rate, we look for a the lowest
391 * frequency which can sustain the load while keeping idle time over
392 * 30%. If such a frequency exist, we try to decrease to this frequency.
394 * Any frequency increase takes it to the maximum frequency.
395 * Frequency reduction happens at minimum steps of
396 * 5% (default) of current frequency
399 /* Get Absolute Load - in terms of freq */
402 for_each_cpu(j, policy->cpus) {
403 struct cpu_dbs_info_s *j_dbs_info;
404 cputime64_t cur_wall_time, cur_idle_time;
405 unsigned int idle_time, wall_time;
406 unsigned int load, load_freq;
409 j_dbs_info = &per_cpu(cpu_dbs_info, j);
411 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
413 wall_time = (unsigned int) cputime64_sub(cur_wall_time,
414 j_dbs_info->prev_cpu_wall);
415 j_dbs_info->prev_cpu_wall = cur_wall_time;
417 idle_time = (unsigned int) cputime64_sub(cur_idle_time,
418 j_dbs_info->prev_cpu_idle);
419 j_dbs_info->prev_cpu_idle = cur_idle_time;
421 if (dbs_tuners_ins.ignore_nice) {
422 cputime64_t cur_nice;
423 unsigned long cur_nice_jiffies;
425 cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
426 j_dbs_info->prev_cpu_nice);
428 * Assumption: nice time between sampling periods will
429 * be less than 2^32 jiffies for 32 bit sys
431 cur_nice_jiffies = (unsigned long)
432 cputime64_to_jiffies64(cur_nice);
434 j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
435 idle_time += jiffies_to_usecs(cur_nice_jiffies);
438 if (unlikely(!wall_time || wall_time < idle_time))
441 load = 100 * (wall_time - idle_time) / wall_time;
443 freq_avg = __cpufreq_driver_getavg(policy, j);
445 freq_avg = policy->cur;
447 load_freq = load * freq_avg;
448 if (load_freq > max_load_freq)
449 max_load_freq = load_freq;
452 /* Check for frequency increase */
453 if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
454 /* if we are already at full speed then break out early */
455 if (!dbs_tuners_ins.powersave_bias) {
456 if (policy->cur == policy->max)
459 __cpufreq_driver_target(policy, policy->max,
462 int freq = powersave_bias_target(policy, policy->max,
464 __cpufreq_driver_target(policy, freq,
470 /* Check for frequency decrease */
471 /* if we cannot reduce the frequency anymore, break out early */
472 if (policy->cur == policy->min)
476 * The optimal frequency is the frequency that is the lowest that
477 * can support the current CPU usage without triggering the up
478 * policy. To be safe, we focus 10 points under the threshold.
481 (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
483 unsigned int freq_next;
484 freq_next = max_load_freq /
485 (dbs_tuners_ins.up_threshold -
486 dbs_tuners_ins.down_differential);
488 if (!dbs_tuners_ins.powersave_bias) {
489 __cpufreq_driver_target(policy, freq_next,
492 int freq = powersave_bias_target(policy, freq_next,
494 __cpufreq_driver_target(policy, freq,
500 static void do_dbs_timer(struct work_struct *work)
502 struct cpu_dbs_info_s *dbs_info =
503 container_of(work, struct cpu_dbs_info_s, work.work);
504 unsigned int cpu = dbs_info->cpu;
505 int sample_type = dbs_info->sample_type;
507 /* We want all CPUs to do sampling nearly on same jiffy */
508 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
510 delay -= jiffies % delay;
512 if (lock_policy_rwsem_write(cpu) < 0)
515 if (!dbs_info->enable) {
516 unlock_policy_rwsem_write(cpu);
520 /* Common NORMAL_SAMPLE setup */
521 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
522 if (!dbs_tuners_ins.powersave_bias ||
523 sample_type == DBS_NORMAL_SAMPLE) {
524 dbs_check_cpu(dbs_info);
525 if (dbs_info->freq_lo) {
526 /* Setup timer for SUB_SAMPLE */
527 dbs_info->sample_type = DBS_SUB_SAMPLE;
528 delay = dbs_info->freq_hi_jiffies;
531 __cpufreq_driver_target(dbs_info->cur_policy,
532 dbs_info->freq_lo, CPUFREQ_RELATION_H);
534 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
535 unlock_policy_rwsem_write(cpu);
538 static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
540 /* We want all CPUs to do sampling nearly on same jiffy */
541 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
542 delay -= jiffies % delay;
544 dbs_info->enable = 1;
545 ondemand_powersave_bias_init();
546 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
547 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
548 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
552 static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
554 dbs_info->enable = 0;
555 cancel_delayed_work(&dbs_info->work);
558 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
561 unsigned int cpu = policy->cpu;
562 struct cpu_dbs_info_s *this_dbs_info;
566 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
569 case CPUFREQ_GOV_START:
570 if ((!cpu_online(cpu)) || (!policy->cur))
573 if (this_dbs_info->enable) /* Already enabled */
576 mutex_lock(&dbs_mutex);
579 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
582 mutex_unlock(&dbs_mutex);
586 for_each_cpu(j, policy->cpus) {
587 struct cpu_dbs_info_s *j_dbs_info;
588 j_dbs_info = &per_cpu(cpu_dbs_info, j);
589 j_dbs_info->cur_policy = policy;
591 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
592 &j_dbs_info->prev_cpu_wall);
593 if (dbs_tuners_ins.ignore_nice) {
594 j_dbs_info->prev_cpu_nice =
595 kstat_cpu(j).cpustat.nice;
598 this_dbs_info->cpu = cpu;
600 * Start the timerschedule work, when this governor
601 * is used for first time
603 if (dbs_enable == 1) {
604 unsigned int latency;
605 /* policy latency is in nS. Convert it to uS first */
606 latency = policy->cpuinfo.transition_latency / 1000;
610 def_sampling_rate = latency *
611 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
613 if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
614 def_sampling_rate = MIN_STAT_SAMPLING_RATE;
616 dbs_tuners_ins.sampling_rate = def_sampling_rate;
618 dbs_timer_init(this_dbs_info);
620 mutex_unlock(&dbs_mutex);
623 case CPUFREQ_GOV_STOP:
624 mutex_lock(&dbs_mutex);
625 dbs_timer_exit(this_dbs_info);
626 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
628 mutex_unlock(&dbs_mutex);
632 case CPUFREQ_GOV_LIMITS:
633 mutex_lock(&dbs_mutex);
634 if (policy->max < this_dbs_info->cur_policy->cur)
635 __cpufreq_driver_target(this_dbs_info->cur_policy,
636 policy->max, CPUFREQ_RELATION_H);
637 else if (policy->min > this_dbs_info->cur_policy->cur)
638 __cpufreq_driver_target(this_dbs_info->cur_policy,
639 policy->min, CPUFREQ_RELATION_L);
640 mutex_unlock(&dbs_mutex);
646 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
649 struct cpufreq_governor cpufreq_gov_ondemand = {
651 .governor = cpufreq_governor_dbs,
652 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
653 .owner = THIS_MODULE,
656 static int __init cpufreq_gov_dbs_init(void)
663 idle_time = get_cpu_idle_time_us(cpu, &wall);
665 if (idle_time != -1ULL) {
666 /* Idle micro accounting is supported. Use finer thresholds */
667 dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
668 dbs_tuners_ins.down_differential =
669 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
672 kondemand_wq = create_workqueue("kondemand");
674 printk(KERN_ERR "Creation of kondemand failed\n");
677 err = cpufreq_register_governor(&cpufreq_gov_ondemand);
679 destroy_workqueue(kondemand_wq);
684 static void __exit cpufreq_gov_dbs_exit(void)
686 cpufreq_unregister_governor(&cpufreq_gov_ondemand);
687 destroy_workqueue(kondemand_wq);
691 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
692 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
693 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
694 "Low Latency Frequency Transition capable processors");
695 MODULE_LICENSE("GPL");
697 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
698 fs_initcall(cpufreq_gov_dbs_init);
700 module_init(cpufreq_gov_dbs_init);
702 module_exit(cpufreq_gov_dbs_exit);