Merge branch 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / cpufreq / cpufreq_ondemand.c
1 /*
2  *  drivers/cpufreq/cpufreq_ondemand.c
3  *
4  *  Copyright (C)  2001 Russell King
5  *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6  *                      Jun Nakajima <jun.nakajima@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/cpufreq.h>
17 #include <linux/cpu.h>
18 #include <linux/jiffies.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mutex.h>
21 #include <linux/hrtimer.h>
22 #include <linux/tick.h>
23 #include <linux/ktime.h>
24 #include <linux/sched.h>
25
26 /*
27  * dbs is used in this file as a shortform for demandbased switching
28  * It helps to keep variable names smaller, simpler
29  */
30
31 #define DEF_FREQUENCY_DOWN_DIFFERENTIAL         (10)
32 #define DEF_FREQUENCY_UP_THRESHOLD              (80)
33 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL       (3)
34 #define MICRO_FREQUENCY_UP_THRESHOLD            (95)
35 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE         (10000)
36 #define MIN_FREQUENCY_UP_THRESHOLD              (11)
37 #define MAX_FREQUENCY_UP_THRESHOLD              (100)
38
39 /*
40  * The polling frequency of this governor depends on the capability of
41  * the processor. Default polling frequency is 1000 times the transition
42  * latency of the processor. The governor will work on any processor with
43  * transition latency <= 10mS, using appropriate sampling
44  * rate.
45  * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
46  * this governor will not work.
47  * All times here are in uS.
48  */
49 #define MIN_SAMPLING_RATE_RATIO                 (2)
50
51 static unsigned int min_sampling_rate;
52
53 #define LATENCY_MULTIPLIER                      (1000)
54 #define MIN_LATENCY_MULTIPLIER                  (100)
55 #define TRANSITION_LATENCY_LIMIT                (10 * 1000 * 1000)
56
57 static void do_dbs_timer(struct work_struct *work);
58
59 /* Sampling types */
60 enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
61
62 struct cpu_dbs_info_s {
63         cputime64_t prev_cpu_idle;
64         cputime64_t prev_cpu_wall;
65         cputime64_t prev_cpu_nice;
66         struct cpufreq_policy *cur_policy;
67         struct delayed_work work;
68         struct cpufreq_frequency_table *freq_table;
69         unsigned int freq_lo;
70         unsigned int freq_lo_jiffies;
71         unsigned int freq_hi_jiffies;
72         int cpu;
73         unsigned int sample_type:1;
74         /*
75          * percpu mutex that serializes governor limit change with
76          * do_dbs_timer invocation. We do not want do_dbs_timer to run
77          * when user is changing the governor or limits.
78          */
79         struct mutex timer_mutex;
80 };
81 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
82
83 static unsigned int dbs_enable; /* number of CPUs using this policy */
84
85 /*
86  * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
87  * different CPUs. It protects dbs_enable in governor start/stop.
88  */
89 static DEFINE_MUTEX(dbs_mutex);
90
91 static struct workqueue_struct  *kondemand_wq;
92
93 static struct dbs_tuners {
94         unsigned int sampling_rate;
95         unsigned int up_threshold;
96         unsigned int down_differential;
97         unsigned int ignore_nice;
98         unsigned int powersave_bias;
99 } dbs_tuners_ins = {
100         .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
101         .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
102         .ignore_nice = 0,
103         .powersave_bias = 0,
104 };
105
106 static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
107                                                         cputime64_t *wall)
108 {
109         cputime64_t idle_time;
110         cputime64_t cur_wall_time;
111         cputime64_t busy_time;
112
113         cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
114         busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
115                         kstat_cpu(cpu).cpustat.system);
116
117         busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
118         busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
119         busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
120         busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
121
122         idle_time = cputime64_sub(cur_wall_time, busy_time);
123         if (wall)
124                 *wall = cur_wall_time;
125
126         return idle_time;
127 }
128
129 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
130 {
131         u64 idle_time = get_cpu_idle_time_us(cpu, wall);
132
133         if (idle_time == -1ULL)
134                 return get_cpu_idle_time_jiffy(cpu, wall);
135
136         return idle_time;
137 }
138
139 /*
140  * Find right freq to be set now with powersave_bias on.
141  * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
142  * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
143  */
144 static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
145                                           unsigned int freq_next,
146                                           unsigned int relation)
147 {
148         unsigned int freq_req, freq_reduc, freq_avg;
149         unsigned int freq_hi, freq_lo;
150         unsigned int index = 0;
151         unsigned int jiffies_total, jiffies_hi, jiffies_lo;
152         struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu);
153
154         if (!dbs_info->freq_table) {
155                 dbs_info->freq_lo = 0;
156                 dbs_info->freq_lo_jiffies = 0;
157                 return freq_next;
158         }
159
160         cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
161                         relation, &index);
162         freq_req = dbs_info->freq_table[index].frequency;
163         freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
164         freq_avg = freq_req - freq_reduc;
165
166         /* Find freq bounds for freq_avg in freq_table */
167         index = 0;
168         cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
169                         CPUFREQ_RELATION_H, &index);
170         freq_lo = dbs_info->freq_table[index].frequency;
171         index = 0;
172         cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
173                         CPUFREQ_RELATION_L, &index);
174         freq_hi = dbs_info->freq_table[index].frequency;
175
176         /* Find out how long we have to be in hi and lo freqs */
177         if (freq_hi == freq_lo) {
178                 dbs_info->freq_lo = 0;
179                 dbs_info->freq_lo_jiffies = 0;
180                 return freq_lo;
181         }
182         jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
183         jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
184         jiffies_hi += ((freq_hi - freq_lo) / 2);
185         jiffies_hi /= (freq_hi - freq_lo);
186         jiffies_lo = jiffies_total - jiffies_hi;
187         dbs_info->freq_lo = freq_lo;
188         dbs_info->freq_lo_jiffies = jiffies_lo;
189         dbs_info->freq_hi_jiffies = jiffies_hi;
190         return freq_hi;
191 }
192
193 static void ondemand_powersave_bias_init_cpu(int cpu)
194 {
195         struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
196         dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
197         dbs_info->freq_lo = 0;
198 }
199
200 static void ondemand_powersave_bias_init(void)
201 {
202         int i;
203         for_each_online_cpu(i) {
204                 ondemand_powersave_bias_init_cpu(i);
205         }
206 }
207
208 /************************** sysfs interface ************************/
209 static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
210 {
211         printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
212                "sysfs file is deprecated - used by: %s\n", current->comm);
213         return sprintf(buf, "%u\n", -1U);
214 }
215
216 static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
217 {
218         return sprintf(buf, "%u\n", min_sampling_rate);
219 }
220
221 #define define_one_ro(_name)            \
222 static struct freq_attr _name =         \
223 __ATTR(_name, 0444, show_##_name, NULL)
224
225 define_one_ro(sampling_rate_max);
226 define_one_ro(sampling_rate_min);
227
228 /* cpufreq_ondemand Governor Tunables */
229 #define show_one(file_name, object)                                     \
230 static ssize_t show_##file_name                                         \
231 (struct cpufreq_policy *unused, char *buf)                              \
232 {                                                                       \
233         return sprintf(buf, "%u\n", dbs_tuners_ins.object);             \
234 }
235 show_one(sampling_rate, sampling_rate);
236 show_one(up_threshold, up_threshold);
237 show_one(ignore_nice_load, ignore_nice);
238 show_one(powersave_bias, powersave_bias);
239
240 static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
241                 const char *buf, size_t count)
242 {
243         unsigned int input;
244         int ret;
245         ret = sscanf(buf, "%u", &input);
246         if (ret != 1)
247                 return -EINVAL;
248
249         mutex_lock(&dbs_mutex);
250         dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
251         mutex_unlock(&dbs_mutex);
252
253         return count;
254 }
255
256 static ssize_t store_up_threshold(struct cpufreq_policy *unused,
257                 const char *buf, size_t count)
258 {
259         unsigned int input;
260         int ret;
261         ret = sscanf(buf, "%u", &input);
262
263         if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
264                         input < MIN_FREQUENCY_UP_THRESHOLD) {
265                 return -EINVAL;
266         }
267
268         mutex_lock(&dbs_mutex);
269         dbs_tuners_ins.up_threshold = input;
270         mutex_unlock(&dbs_mutex);
271
272         return count;
273 }
274
275 static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
276                 const char *buf, size_t count)
277 {
278         unsigned int input;
279         int ret;
280
281         unsigned int j;
282
283         ret = sscanf(buf, "%u", &input);
284         if (ret != 1)
285                 return -EINVAL;
286
287         if (input > 1)
288                 input = 1;
289
290         mutex_lock(&dbs_mutex);
291         if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
292                 mutex_unlock(&dbs_mutex);
293                 return count;
294         }
295         dbs_tuners_ins.ignore_nice = input;
296
297         /* we need to re-evaluate prev_cpu_idle */
298         for_each_online_cpu(j) {
299                 struct cpu_dbs_info_s *dbs_info;
300                 dbs_info = &per_cpu(cpu_dbs_info, j);
301                 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
302                                                 &dbs_info->prev_cpu_wall);
303                 if (dbs_tuners_ins.ignore_nice)
304                         dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
305
306         }
307         mutex_unlock(&dbs_mutex);
308
309         return count;
310 }
311
312 static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
313                 const char *buf, size_t count)
314 {
315         unsigned int input;
316         int ret;
317         ret = sscanf(buf, "%u", &input);
318
319         if (ret != 1)
320                 return -EINVAL;
321
322         if (input > 1000)
323                 input = 1000;
324
325         mutex_lock(&dbs_mutex);
326         dbs_tuners_ins.powersave_bias = input;
327         ondemand_powersave_bias_init();
328         mutex_unlock(&dbs_mutex);
329
330         return count;
331 }
332
333 #define define_one_rw(_name) \
334 static struct freq_attr _name = \
335 __ATTR(_name, 0644, show_##_name, store_##_name)
336
337 define_one_rw(sampling_rate);
338 define_one_rw(up_threshold);
339 define_one_rw(ignore_nice_load);
340 define_one_rw(powersave_bias);
341
342 static struct attribute *dbs_attributes[] = {
343         &sampling_rate_max.attr,
344         &sampling_rate_min.attr,
345         &sampling_rate.attr,
346         &up_threshold.attr,
347         &ignore_nice_load.attr,
348         &powersave_bias.attr,
349         NULL
350 };
351
352 static struct attribute_group dbs_attr_group = {
353         .attrs = dbs_attributes,
354         .name = "ondemand",
355 };
356
357 /************************** sysfs end ************************/
358
359 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
360 {
361         unsigned int max_load_freq;
362
363         struct cpufreq_policy *policy;
364         unsigned int j;
365
366         this_dbs_info->freq_lo = 0;
367         policy = this_dbs_info->cur_policy;
368
369         /*
370          * Every sampling_rate, we check, if current idle time is less
371          * than 20% (default), then we try to increase frequency
372          * Every sampling_rate, we look for a the lowest
373          * frequency which can sustain the load while keeping idle time over
374          * 30%. If such a frequency exist, we try to decrease to this frequency.
375          *
376          * Any frequency increase takes it to the maximum frequency.
377          * Frequency reduction happens at minimum steps of
378          * 5% (default) of current frequency
379          */
380
381         /* Get Absolute Load - in terms of freq */
382         max_load_freq = 0;
383
384         for_each_cpu(j, policy->cpus) {
385                 struct cpu_dbs_info_s *j_dbs_info;
386                 cputime64_t cur_wall_time, cur_idle_time;
387                 unsigned int idle_time, wall_time;
388                 unsigned int load, load_freq;
389                 int freq_avg;
390
391                 j_dbs_info = &per_cpu(cpu_dbs_info, j);
392
393                 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
394
395                 wall_time = (unsigned int) cputime64_sub(cur_wall_time,
396                                 j_dbs_info->prev_cpu_wall);
397                 j_dbs_info->prev_cpu_wall = cur_wall_time;
398
399                 idle_time = (unsigned int) cputime64_sub(cur_idle_time,
400                                 j_dbs_info->prev_cpu_idle);
401                 j_dbs_info->prev_cpu_idle = cur_idle_time;
402
403                 if (dbs_tuners_ins.ignore_nice) {
404                         cputime64_t cur_nice;
405                         unsigned long cur_nice_jiffies;
406
407                         cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
408                                          j_dbs_info->prev_cpu_nice);
409                         /*
410                          * Assumption: nice time between sampling periods will
411                          * be less than 2^32 jiffies for 32 bit sys
412                          */
413                         cur_nice_jiffies = (unsigned long)
414                                         cputime64_to_jiffies64(cur_nice);
415
416                         j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
417                         idle_time += jiffies_to_usecs(cur_nice_jiffies);
418                 }
419
420                 if (unlikely(!wall_time || wall_time < idle_time))
421                         continue;
422
423                 load = 100 * (wall_time - idle_time) / wall_time;
424
425                 freq_avg = __cpufreq_driver_getavg(policy, j);
426                 if (freq_avg <= 0)
427                         freq_avg = policy->cur;
428
429                 load_freq = load * freq_avg;
430                 if (load_freq > max_load_freq)
431                         max_load_freq = load_freq;
432         }
433
434         /* Check for frequency increase */
435         if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
436                 /* if we are already at full speed then break out early */
437                 if (!dbs_tuners_ins.powersave_bias) {
438                         if (policy->cur == policy->max)
439                                 return;
440
441                         __cpufreq_driver_target(policy, policy->max,
442                                 CPUFREQ_RELATION_H);
443                 } else {
444                         int freq = powersave_bias_target(policy, policy->max,
445                                         CPUFREQ_RELATION_H);
446                         __cpufreq_driver_target(policy, freq,
447                                 CPUFREQ_RELATION_L);
448                 }
449                 return;
450         }
451
452         /* Check for frequency decrease */
453         /* if we cannot reduce the frequency anymore, break out early */
454         if (policy->cur == policy->min)
455                 return;
456
457         /*
458          * The optimal frequency is the frequency that is the lowest that
459          * can support the current CPU usage without triggering the up
460          * policy. To be safe, we focus 10 points under the threshold.
461          */
462         if (max_load_freq <
463             (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
464              policy->cur) {
465                 unsigned int freq_next;
466                 freq_next = max_load_freq /
467                                 (dbs_tuners_ins.up_threshold -
468                                  dbs_tuners_ins.down_differential);
469
470                 if (!dbs_tuners_ins.powersave_bias) {
471                         __cpufreq_driver_target(policy, freq_next,
472                                         CPUFREQ_RELATION_L);
473                 } else {
474                         int freq = powersave_bias_target(policy, freq_next,
475                                         CPUFREQ_RELATION_L);
476                         __cpufreq_driver_target(policy, freq,
477                                 CPUFREQ_RELATION_L);
478                 }
479         }
480 }
481
482 static void do_dbs_timer(struct work_struct *work)
483 {
484         struct cpu_dbs_info_s *dbs_info =
485                 container_of(work, struct cpu_dbs_info_s, work.work);
486         unsigned int cpu = dbs_info->cpu;
487         int sample_type = dbs_info->sample_type;
488
489         /* We want all CPUs to do sampling nearly on same jiffy */
490         int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
491
492         delay -= jiffies % delay;
493         mutex_lock(&dbs_info->timer_mutex);
494
495         /* Common NORMAL_SAMPLE setup */
496         dbs_info->sample_type = DBS_NORMAL_SAMPLE;
497         if (!dbs_tuners_ins.powersave_bias ||
498             sample_type == DBS_NORMAL_SAMPLE) {
499                 dbs_check_cpu(dbs_info);
500                 if (dbs_info->freq_lo) {
501                         /* Setup timer for SUB_SAMPLE */
502                         dbs_info->sample_type = DBS_SUB_SAMPLE;
503                         delay = dbs_info->freq_hi_jiffies;
504                 }
505         } else {
506                 __cpufreq_driver_target(dbs_info->cur_policy,
507                         dbs_info->freq_lo, CPUFREQ_RELATION_H);
508         }
509         queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
510         mutex_unlock(&dbs_info->timer_mutex);
511 }
512
513 static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
514 {
515         /* We want all CPUs to do sampling nearly on same jiffy */
516         int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
517         delay -= jiffies % delay;
518
519         dbs_info->sample_type = DBS_NORMAL_SAMPLE;
520         INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
521         queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
522                 delay);
523 }
524
525 static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
526 {
527         cancel_delayed_work_sync(&dbs_info->work);
528 }
529
530 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
531                                    unsigned int event)
532 {
533         unsigned int cpu = policy->cpu;
534         struct cpu_dbs_info_s *this_dbs_info;
535         unsigned int j;
536         int rc;
537
538         this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
539
540         switch (event) {
541         case CPUFREQ_GOV_START:
542                 if ((!cpu_online(cpu)) || (!policy->cur))
543                         return -EINVAL;
544
545                 mutex_lock(&dbs_mutex);
546
547                 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
548                 if (rc) {
549                         mutex_unlock(&dbs_mutex);
550                         return rc;
551                 }
552
553                 dbs_enable++;
554                 for_each_cpu(j, policy->cpus) {
555                         struct cpu_dbs_info_s *j_dbs_info;
556                         j_dbs_info = &per_cpu(cpu_dbs_info, j);
557                         j_dbs_info->cur_policy = policy;
558
559                         j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
560                                                 &j_dbs_info->prev_cpu_wall);
561                         if (dbs_tuners_ins.ignore_nice) {
562                                 j_dbs_info->prev_cpu_nice =
563                                                 kstat_cpu(j).cpustat.nice;
564                         }
565                 }
566                 this_dbs_info->cpu = cpu;
567                 ondemand_powersave_bias_init_cpu(cpu);
568                 mutex_init(&this_dbs_info->timer_mutex);
569                 /*
570                  * Start the timerschedule work, when this governor
571                  * is used for first time
572                  */
573                 if (dbs_enable == 1) {
574                         unsigned int latency;
575                         /* policy latency is in nS. Convert it to uS first */
576                         latency = policy->cpuinfo.transition_latency / 1000;
577                         if (latency == 0)
578                                 latency = 1;
579                         /* Bring kernel and HW constraints together */
580                         min_sampling_rate = max(min_sampling_rate,
581                                         MIN_LATENCY_MULTIPLIER * latency);
582                         dbs_tuners_ins.sampling_rate =
583                                 max(min_sampling_rate,
584                                     latency * LATENCY_MULTIPLIER);
585                 }
586                 mutex_unlock(&dbs_mutex);
587
588                 dbs_timer_init(this_dbs_info);
589                 break;
590
591         case CPUFREQ_GOV_STOP:
592                 dbs_timer_exit(this_dbs_info);
593
594                 mutex_lock(&dbs_mutex);
595                 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
596                 mutex_destroy(&this_dbs_info->timer_mutex);
597                 dbs_enable--;
598                 mutex_unlock(&dbs_mutex);
599
600                 break;
601
602         case CPUFREQ_GOV_LIMITS:
603                 mutex_lock(&this_dbs_info->timer_mutex);
604                 if (policy->max < this_dbs_info->cur_policy->cur)
605                         __cpufreq_driver_target(this_dbs_info->cur_policy,
606                                 policy->max, CPUFREQ_RELATION_H);
607                 else if (policy->min > this_dbs_info->cur_policy->cur)
608                         __cpufreq_driver_target(this_dbs_info->cur_policy,
609                                 policy->min, CPUFREQ_RELATION_L);
610                 mutex_unlock(&this_dbs_info->timer_mutex);
611                 break;
612         }
613         return 0;
614 }
615
616 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
617 static
618 #endif
619 struct cpufreq_governor cpufreq_gov_ondemand = {
620         .name                   = "ondemand",
621         .governor               = cpufreq_governor_dbs,
622         .max_transition_latency = TRANSITION_LATENCY_LIMIT,
623         .owner                  = THIS_MODULE,
624 };
625
626 static int __init cpufreq_gov_dbs_init(void)
627 {
628         int err;
629         cputime64_t wall;
630         u64 idle_time;
631         int cpu = get_cpu();
632
633         idle_time = get_cpu_idle_time_us(cpu, &wall);
634         put_cpu();
635         if (idle_time != -1ULL) {
636                 /* Idle micro accounting is supported. Use finer thresholds */
637                 dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
638                 dbs_tuners_ins.down_differential =
639                                         MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
640                 /*
641                  * In no_hz/micro accounting case we set the minimum frequency
642                  * not depending on HZ, but fixed (very low). The deferred
643                  * timer might skip some samples if idle/sleeping as needed.
644                 */
645                 min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
646         } else {
647                 /* For correct statistics, we need 10 ticks for each measure */
648                 min_sampling_rate =
649                         MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
650         }
651
652         kondemand_wq = create_workqueue("kondemand");
653         if (!kondemand_wq) {
654                 printk(KERN_ERR "Creation of kondemand failed\n");
655                 return -EFAULT;
656         }
657         err = cpufreq_register_governor(&cpufreq_gov_ondemand);
658         if (err)
659                 destroy_workqueue(kondemand_wq);
660
661         return err;
662 }
663
664 static void __exit cpufreq_gov_dbs_exit(void)
665 {
666         cpufreq_unregister_governor(&cpufreq_gov_ondemand);
667         destroy_workqueue(kondemand_wq);
668 }
669
670
671 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
672 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
673 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
674         "Low Latency Frequency Transition capable processors");
675 MODULE_LICENSE("GPL");
676
677 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
678 fs_initcall(cpufreq_gov_dbs_init);
679 #else
680 module_init(cpufreq_gov_dbs_init);
681 #endif
682 module_exit(cpufreq_gov_dbs_exit);