[CPUFREQ] drivers/cpufreq: Remove unnecessary semicolons
[pandora-kernel.git] / drivers / cpufreq / cpufreq_conservative.c
1 /*
2  *  drivers/cpufreq/cpufreq_conservative.c
3  *
4  *  Copyright (C)  2001 Russell King
5  *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6  *                      Jun Nakajima <jun.nakajima@intel.com>
7  *            (C)  2009 Alexander Clouter <alex@digriz.org.uk>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/cpufreq.h>
18 #include <linux/cpu.h>
19 #include <linux/jiffies.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/mutex.h>
22 #include <linux/hrtimer.h>
23 #include <linux/tick.h>
24 #include <linux/ktime.h>
25 #include <linux/sched.h>
26
27 /*
28  * dbs is used in this file as a shortform for demandbased switching
29  * It helps to keep variable names smaller, simpler
30  */
31
32 #define DEF_FREQUENCY_UP_THRESHOLD              (80)
33 #define DEF_FREQUENCY_DOWN_THRESHOLD            (20)
34
35 /*
36  * The polling frequency of this governor depends on the capability of
37  * the processor. Default polling frequency is 1000 times the transition
38  * latency of the processor. The governor will work on any processor with
39  * transition latency <= 10mS, using appropriate sampling
40  * rate.
41  * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
42  * this governor will not work.
43  * All times here are in uS.
44  */
45 #define MIN_SAMPLING_RATE_RATIO                 (2)
46
47 static unsigned int min_sampling_rate;
48
49 #define LATENCY_MULTIPLIER                      (1000)
50 #define MIN_LATENCY_MULTIPLIER                  (100)
51 #define DEF_SAMPLING_DOWN_FACTOR                (1)
52 #define MAX_SAMPLING_DOWN_FACTOR                (10)
53 #define TRANSITION_LATENCY_LIMIT                (10 * 1000 * 1000)
54
55 static void do_dbs_timer(struct work_struct *work);
56
57 struct cpu_dbs_info_s {
58         cputime64_t prev_cpu_idle;
59         cputime64_t prev_cpu_wall;
60         cputime64_t prev_cpu_nice;
61         struct cpufreq_policy *cur_policy;
62         struct delayed_work work;
63         unsigned int down_skip;
64         unsigned int requested_freq;
65         int cpu;
66         unsigned int enable:1;
67         /*
68          * percpu mutex that serializes governor limit change with
69          * do_dbs_timer invocation. We do not want do_dbs_timer to run
70          * when user is changing the governor or limits.
71          */
72         struct mutex timer_mutex;
73 };
74 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
75
76 static unsigned int dbs_enable; /* number of CPUs using this policy */
77
78 /*
79  * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
80  * different CPUs. It protects dbs_enable in governor start/stop.
81  */
82 static DEFINE_MUTEX(dbs_mutex);
83
84 static struct dbs_tuners {
85         unsigned int sampling_rate;
86         unsigned int sampling_down_factor;
87         unsigned int up_threshold;
88         unsigned int down_threshold;
89         unsigned int ignore_nice;
90         unsigned int freq_step;
91 } dbs_tuners_ins = {
92         .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
93         .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
94         .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
95         .ignore_nice = 0,
96         .freq_step = 5,
97 };
98
99 static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
100                                                         cputime64_t *wall)
101 {
102         cputime64_t idle_time;
103         cputime64_t cur_wall_time;
104         cputime64_t busy_time;
105
106         cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
107         busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
108                         kstat_cpu(cpu).cpustat.system);
109
110         busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
111         busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
112         busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
113         busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
114
115         idle_time = cputime64_sub(cur_wall_time, busy_time);
116         if (wall)
117                 *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
118
119         return (cputime64_t)jiffies_to_usecs(idle_time);
120 }
121
122 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
123 {
124         u64 idle_time = get_cpu_idle_time_us(cpu, wall);
125
126         if (idle_time == -1ULL)
127                 return get_cpu_idle_time_jiffy(cpu, wall);
128
129         return idle_time;
130 }
131
132 /* keep track of frequency transitions */
133 static int
134 dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
135                      void *data)
136 {
137         struct cpufreq_freqs *freq = data;
138         struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info,
139                                                         freq->cpu);
140
141         struct cpufreq_policy *policy;
142
143         if (!this_dbs_info->enable)
144                 return 0;
145
146         policy = this_dbs_info->cur_policy;
147
148         /*
149          * we only care if our internally tracked freq moves outside
150          * the 'valid' ranges of freqency available to us otherwise
151          * we do not change it
152         */
153         if (this_dbs_info->requested_freq > policy->max
154                         || this_dbs_info->requested_freq < policy->min)
155                 this_dbs_info->requested_freq = freq->new;
156
157         return 0;
158 }
159
160 static struct notifier_block dbs_cpufreq_notifier_block = {
161         .notifier_call = dbs_cpufreq_notifier
162 };
163
164 /************************** sysfs interface ************************/
165 static ssize_t show_sampling_rate_max(struct kobject *kobj,
166                                       struct attribute *attr, char *buf)
167 {
168         printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
169                     "sysfs file is deprecated - used by: %s\n", current->comm);
170         return sprintf(buf, "%u\n", -1U);
171 }
172
173 static ssize_t show_sampling_rate_min(struct kobject *kobj,
174                                       struct attribute *attr, char *buf)
175 {
176         return sprintf(buf, "%u\n", min_sampling_rate);
177 }
178
179 define_one_global_ro(sampling_rate_max);
180 define_one_global_ro(sampling_rate_min);
181
182 /* cpufreq_conservative Governor Tunables */
183 #define show_one(file_name, object)                                     \
184 static ssize_t show_##file_name                                         \
185 (struct kobject *kobj, struct attribute *attr, char *buf)               \
186 {                                                                       \
187         return sprintf(buf, "%u\n", dbs_tuners_ins.object);             \
188 }
189 show_one(sampling_rate, sampling_rate);
190 show_one(sampling_down_factor, sampling_down_factor);
191 show_one(up_threshold, up_threshold);
192 show_one(down_threshold, down_threshold);
193 show_one(ignore_nice_load, ignore_nice);
194 show_one(freq_step, freq_step);
195
196 /*** delete after deprecation time ***/
197 #define DEPRECATION_MSG(file_name)                                      \
198         printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs "   \
199                 "interface is deprecated - " #file_name "\n");
200
201 #define show_one_old(file_name)                                         \
202 static ssize_t show_##file_name##_old                                   \
203 (struct cpufreq_policy *unused, char *buf)                              \
204 {                                                                       \
205         printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs "   \
206                 "interface is deprecated - " #file_name "\n");          \
207         return show_##file_name(NULL, NULL, buf);                       \
208 }
209 show_one_old(sampling_rate);
210 show_one_old(sampling_down_factor);
211 show_one_old(up_threshold);
212 show_one_old(down_threshold);
213 show_one_old(ignore_nice_load);
214 show_one_old(freq_step);
215 show_one_old(sampling_rate_min);
216 show_one_old(sampling_rate_max);
217
218 cpufreq_freq_attr_ro_old(sampling_rate_min);
219 cpufreq_freq_attr_ro_old(sampling_rate_max);
220
221 /*** delete after deprecation time ***/
222
223 static ssize_t store_sampling_down_factor(struct kobject *a,
224                                           struct attribute *b,
225                                           const char *buf, size_t count)
226 {
227         unsigned int input;
228         int ret;
229         ret = sscanf(buf, "%u", &input);
230
231         if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
232                 return -EINVAL;
233
234         mutex_lock(&dbs_mutex);
235         dbs_tuners_ins.sampling_down_factor = input;
236         mutex_unlock(&dbs_mutex);
237
238         return count;
239 }
240
241 static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
242                                    const char *buf, size_t count)
243 {
244         unsigned int input;
245         int ret;
246         ret = sscanf(buf, "%u", &input);
247
248         if (ret != 1)
249                 return -EINVAL;
250
251         mutex_lock(&dbs_mutex);
252         dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
253         mutex_unlock(&dbs_mutex);
254
255         return count;
256 }
257
258 static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
259                                   const char *buf, size_t count)
260 {
261         unsigned int input;
262         int ret;
263         ret = sscanf(buf, "%u", &input);
264
265         mutex_lock(&dbs_mutex);
266         if (ret != 1 || input > 100 ||
267                         input <= dbs_tuners_ins.down_threshold) {
268                 mutex_unlock(&dbs_mutex);
269                 return -EINVAL;
270         }
271
272         dbs_tuners_ins.up_threshold = input;
273         mutex_unlock(&dbs_mutex);
274
275         return count;
276 }
277
278 static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
279                                     const char *buf, size_t count)
280 {
281         unsigned int input;
282         int ret;
283         ret = sscanf(buf, "%u", &input);
284
285         mutex_lock(&dbs_mutex);
286         /* cannot be lower than 11 otherwise freq will not fall */
287         if (ret != 1 || input < 11 || input > 100 ||
288                         input >= dbs_tuners_ins.up_threshold) {
289                 mutex_unlock(&dbs_mutex);
290                 return -EINVAL;
291         }
292
293         dbs_tuners_ins.down_threshold = input;
294         mutex_unlock(&dbs_mutex);
295
296         return count;
297 }
298
299 static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
300                                       const char *buf, size_t count)
301 {
302         unsigned int input;
303         int ret;
304
305         unsigned int j;
306
307         ret = sscanf(buf, "%u", &input);
308         if (ret != 1)
309                 return -EINVAL;
310
311         if (input > 1)
312                 input = 1;
313
314         mutex_lock(&dbs_mutex);
315         if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
316                 mutex_unlock(&dbs_mutex);
317                 return count;
318         }
319         dbs_tuners_ins.ignore_nice = input;
320
321         /* we need to re-evaluate prev_cpu_idle */
322         for_each_online_cpu(j) {
323                 struct cpu_dbs_info_s *dbs_info;
324                 dbs_info = &per_cpu(cs_cpu_dbs_info, j);
325                 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
326                                                 &dbs_info->prev_cpu_wall);
327                 if (dbs_tuners_ins.ignore_nice)
328                         dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
329         }
330         mutex_unlock(&dbs_mutex);
331
332         return count;
333 }
334
335 static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
336                                const char *buf, size_t count)
337 {
338         unsigned int input;
339         int ret;
340         ret = sscanf(buf, "%u", &input);
341
342         if (ret != 1)
343                 return -EINVAL;
344
345         if (input > 100)
346                 input = 100;
347
348         /* no need to test here if freq_step is zero as the user might actually
349          * want this, they would be crazy though :) */
350         mutex_lock(&dbs_mutex);
351         dbs_tuners_ins.freq_step = input;
352         mutex_unlock(&dbs_mutex);
353
354         return count;
355 }
356
357 define_one_global_rw(sampling_rate);
358 define_one_global_rw(sampling_down_factor);
359 define_one_global_rw(up_threshold);
360 define_one_global_rw(down_threshold);
361 define_one_global_rw(ignore_nice_load);
362 define_one_global_rw(freq_step);
363
364 static struct attribute *dbs_attributes[] = {
365         &sampling_rate_max.attr,
366         &sampling_rate_min.attr,
367         &sampling_rate.attr,
368         &sampling_down_factor.attr,
369         &up_threshold.attr,
370         &down_threshold.attr,
371         &ignore_nice_load.attr,
372         &freq_step.attr,
373         NULL
374 };
375
376 static struct attribute_group dbs_attr_group = {
377         .attrs = dbs_attributes,
378         .name = "conservative",
379 };
380
381 /*** delete after deprecation time ***/
382
383 #define write_one_old(file_name)                                        \
384 static ssize_t store_##file_name##_old                                  \
385 (struct cpufreq_policy *unused, const char *buf, size_t count)          \
386 {                                                                       \
387         printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs "   \
388                 "interface is deprecated - " #file_name "\n");  \
389         return store_##file_name(NULL, NULL, buf, count);               \
390 }
391 write_one_old(sampling_rate);
392 write_one_old(sampling_down_factor);
393 write_one_old(up_threshold);
394 write_one_old(down_threshold);
395 write_one_old(ignore_nice_load);
396 write_one_old(freq_step);
397
398 cpufreq_freq_attr_rw_old(sampling_rate);
399 cpufreq_freq_attr_rw_old(sampling_down_factor);
400 cpufreq_freq_attr_rw_old(up_threshold);
401 cpufreq_freq_attr_rw_old(down_threshold);
402 cpufreq_freq_attr_rw_old(ignore_nice_load);
403 cpufreq_freq_attr_rw_old(freq_step);
404
405 static struct attribute *dbs_attributes_old[] = {
406         &sampling_rate_max_old.attr,
407         &sampling_rate_min_old.attr,
408         &sampling_rate_old.attr,
409         &sampling_down_factor_old.attr,
410         &up_threshold_old.attr,
411         &down_threshold_old.attr,
412         &ignore_nice_load_old.attr,
413         &freq_step_old.attr,
414         NULL
415 };
416
417 static struct attribute_group dbs_attr_group_old = {
418         .attrs = dbs_attributes_old,
419         .name = "conservative",
420 };
421
422 /*** delete after deprecation time ***/
423
424 /************************** sysfs end ************************/
425
426 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
427 {
428         unsigned int load = 0;
429         unsigned int max_load = 0;
430         unsigned int freq_target;
431
432         struct cpufreq_policy *policy;
433         unsigned int j;
434
435         policy = this_dbs_info->cur_policy;
436
437         /*
438          * Every sampling_rate, we check, if current idle time is less
439          * than 20% (default), then we try to increase frequency
440          * Every sampling_rate*sampling_down_factor, we check, if current
441          * idle time is more than 80%, then we try to decrease frequency
442          *
443          * Any frequency increase takes it to the maximum frequency.
444          * Frequency reduction happens at minimum steps of
445          * 5% (default) of maximum frequency
446          */
447
448         /* Get Absolute Load */
449         for_each_cpu(j, policy->cpus) {
450                 struct cpu_dbs_info_s *j_dbs_info;
451                 cputime64_t cur_wall_time, cur_idle_time;
452                 unsigned int idle_time, wall_time;
453
454                 j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
455
456                 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
457
458                 wall_time = (unsigned int) cputime64_sub(cur_wall_time,
459                                 j_dbs_info->prev_cpu_wall);
460                 j_dbs_info->prev_cpu_wall = cur_wall_time;
461
462                 idle_time = (unsigned int) cputime64_sub(cur_idle_time,
463                                 j_dbs_info->prev_cpu_idle);
464                 j_dbs_info->prev_cpu_idle = cur_idle_time;
465
466                 if (dbs_tuners_ins.ignore_nice) {
467                         cputime64_t cur_nice;
468                         unsigned long cur_nice_jiffies;
469
470                         cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
471                                          j_dbs_info->prev_cpu_nice);
472                         /*
473                          * Assumption: nice time between sampling periods will
474                          * be less than 2^32 jiffies for 32 bit sys
475                          */
476                         cur_nice_jiffies = (unsigned long)
477                                         cputime64_to_jiffies64(cur_nice);
478
479                         j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
480                         idle_time += jiffies_to_usecs(cur_nice_jiffies);
481                 }
482
483                 if (unlikely(!wall_time || wall_time < idle_time))
484                         continue;
485
486                 load = 100 * (wall_time - idle_time) / wall_time;
487
488                 if (load > max_load)
489                         max_load = load;
490         }
491
492         /*
493          * break out if we 'cannot' reduce the speed as the user might
494          * want freq_step to be zero
495          */
496         if (dbs_tuners_ins.freq_step == 0)
497                 return;
498
499         /* Check for frequency increase */
500         if (max_load > dbs_tuners_ins.up_threshold) {
501                 this_dbs_info->down_skip = 0;
502
503                 /* if we are already at full speed then break out early */
504                 if (this_dbs_info->requested_freq == policy->max)
505                         return;
506
507                 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
508
509                 /* max freq cannot be less than 100. But who knows.... */
510                 if (unlikely(freq_target == 0))
511                         freq_target = 5;
512
513                 this_dbs_info->requested_freq += freq_target;
514                 if (this_dbs_info->requested_freq > policy->max)
515                         this_dbs_info->requested_freq = policy->max;
516
517                 __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
518                         CPUFREQ_RELATION_H);
519                 return;
520         }
521
522         /*
523          * The optimal frequency is the frequency that is the lowest that
524          * can support the current CPU usage without triggering the up
525          * policy. To be safe, we focus 10 points under the threshold.
526          */
527         if (max_load < (dbs_tuners_ins.down_threshold - 10)) {
528                 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
529
530                 this_dbs_info->requested_freq -= freq_target;
531                 if (this_dbs_info->requested_freq < policy->min)
532                         this_dbs_info->requested_freq = policy->min;
533
534                 /*
535                  * if we cannot reduce the frequency anymore, break out early
536                  */
537                 if (policy->cur == policy->min)
538                         return;
539
540                 __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
541                                 CPUFREQ_RELATION_H);
542                 return;
543         }
544 }
545
546 static void do_dbs_timer(struct work_struct *work)
547 {
548         struct cpu_dbs_info_s *dbs_info =
549                 container_of(work, struct cpu_dbs_info_s, work.work);
550         unsigned int cpu = dbs_info->cpu;
551
552         /* We want all CPUs to do sampling nearly on same jiffy */
553         int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
554
555         delay -= jiffies % delay;
556
557         mutex_lock(&dbs_info->timer_mutex);
558
559         dbs_check_cpu(dbs_info);
560
561         schedule_delayed_work_on(cpu, &dbs_info->work, delay);
562         mutex_unlock(&dbs_info->timer_mutex);
563 }
564
565 static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
566 {
567         /* We want all CPUs to do sampling nearly on same jiffy */
568         int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
569         delay -= jiffies % delay;
570
571         dbs_info->enable = 1;
572         INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
573         schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
574 }
575
576 static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
577 {
578         dbs_info->enable = 0;
579         cancel_delayed_work_sync(&dbs_info->work);
580 }
581
582 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
583                                    unsigned int event)
584 {
585         unsigned int cpu = policy->cpu;
586         struct cpu_dbs_info_s *this_dbs_info;
587         unsigned int j;
588         int rc;
589
590         this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
591
592         switch (event) {
593         case CPUFREQ_GOV_START:
594                 if ((!cpu_online(cpu)) || (!policy->cur))
595                         return -EINVAL;
596
597                 mutex_lock(&dbs_mutex);
598
599                 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
600                 if (rc) {
601                         mutex_unlock(&dbs_mutex);
602                         return rc;
603                 }
604
605                 for_each_cpu(j, policy->cpus) {
606                         struct cpu_dbs_info_s *j_dbs_info;
607                         j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
608                         j_dbs_info->cur_policy = policy;
609
610                         j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
611                                                 &j_dbs_info->prev_cpu_wall);
612                         if (dbs_tuners_ins.ignore_nice) {
613                                 j_dbs_info->prev_cpu_nice =
614                                                 kstat_cpu(j).cpustat.nice;
615                         }
616                 }
617                 this_dbs_info->down_skip = 0;
618                 this_dbs_info->requested_freq = policy->cur;
619
620                 mutex_init(&this_dbs_info->timer_mutex);
621                 dbs_enable++;
622                 /*
623                  * Start the timerschedule work, when this governor
624                  * is used for first time
625                  */
626                 if (dbs_enable == 1) {
627                         unsigned int latency;
628                         /* policy latency is in nS. Convert it to uS first */
629                         latency = policy->cpuinfo.transition_latency / 1000;
630                         if (latency == 0)
631                                 latency = 1;
632
633                         rc = sysfs_create_group(cpufreq_global_kobject,
634                                                 &dbs_attr_group);
635                         if (rc) {
636                                 mutex_unlock(&dbs_mutex);
637                                 return rc;
638                         }
639
640                         /*
641                          * conservative does not implement micro like ondemand
642                          * governor, thus we are bound to jiffes/HZ
643                          */
644                         min_sampling_rate =
645                                 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
646                         /* Bring kernel and HW constraints together */
647                         min_sampling_rate = max(min_sampling_rate,
648                                         MIN_LATENCY_MULTIPLIER * latency);
649                         dbs_tuners_ins.sampling_rate =
650                                 max(min_sampling_rate,
651                                     latency * LATENCY_MULTIPLIER);
652
653                         cpufreq_register_notifier(
654                                         &dbs_cpufreq_notifier_block,
655                                         CPUFREQ_TRANSITION_NOTIFIER);
656                 }
657                 mutex_unlock(&dbs_mutex);
658
659                 dbs_timer_init(this_dbs_info);
660
661                 break;
662
663         case CPUFREQ_GOV_STOP:
664                 dbs_timer_exit(this_dbs_info);
665
666                 mutex_lock(&dbs_mutex);
667                 sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
668                 dbs_enable--;
669                 mutex_destroy(&this_dbs_info->timer_mutex);
670
671                 /*
672                  * Stop the timerschedule work, when this governor
673                  * is used for first time
674                  */
675                 if (dbs_enable == 0)
676                         cpufreq_unregister_notifier(
677                                         &dbs_cpufreq_notifier_block,
678                                         CPUFREQ_TRANSITION_NOTIFIER);
679
680                 mutex_unlock(&dbs_mutex);
681                 if (!dbs_enable)
682                         sysfs_remove_group(cpufreq_global_kobject,
683                                            &dbs_attr_group);
684
685                 break;
686
687         case CPUFREQ_GOV_LIMITS:
688                 mutex_lock(&this_dbs_info->timer_mutex);
689                 if (policy->max < this_dbs_info->cur_policy->cur)
690                         __cpufreq_driver_target(
691                                         this_dbs_info->cur_policy,
692                                         policy->max, CPUFREQ_RELATION_H);
693                 else if (policy->min > this_dbs_info->cur_policy->cur)
694                         __cpufreq_driver_target(
695                                         this_dbs_info->cur_policy,
696                                         policy->min, CPUFREQ_RELATION_L);
697                 mutex_unlock(&this_dbs_info->timer_mutex);
698
699                 break;
700         }
701         return 0;
702 }
703
704 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
705 static
706 #endif
707 struct cpufreq_governor cpufreq_gov_conservative = {
708         .name                   = "conservative",
709         .governor               = cpufreq_governor_dbs,
710         .max_transition_latency = TRANSITION_LATENCY_LIMIT,
711         .owner                  = THIS_MODULE,
712 };
713
714 static int __init cpufreq_gov_dbs_init(void)
715 {
716         return cpufreq_register_governor(&cpufreq_gov_conservative);
717 }
718
719 static void __exit cpufreq_gov_dbs_exit(void)
720 {
721         cpufreq_unregister_governor(&cpufreq_gov_conservative);
722 }
723
724
725 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
726 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
727                 "Low Latency Frequency Transition capable processors "
728                 "optimised for use in a battery environment");
729 MODULE_LICENSE("GPL");
730
731 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
732 fs_initcall(cpufreq_gov_dbs_init);
733 #else
734 module_init(cpufreq_gov_dbs_init);
735 #endif
736 module_exit(cpufreq_gov_dbs_exit);