Merge branch 'simplify_PRT' into release
[pandora-kernel.git] / drivers / cpuidle / governors / menu.c
1 /*
2  * menu.c - the menu idle governor
3  *
4  * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5  *
6  * This code is licenced under the GPL.
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/cpuidle.h>
11 #include <linux/pm_qos_params.h>
12 #include <linux/time.h>
13 #include <linux/ktime.h>
14 #include <linux/hrtimer.h>
15 #include <linux/tick.h>
16
17 #define BREAK_FUZZ      4       /* 4 us */
18 #define PRED_HISTORY_PCT        50
19
20 struct menu_device {
21         int             last_state_idx;
22
23         unsigned int    expected_us;
24         unsigned int    predicted_us;
25         unsigned int    current_predicted_us;
26         unsigned int    last_measured_us;
27         unsigned int    elapsed_us;
28 };
29
30 static DEFINE_PER_CPU(struct menu_device, menu_devices);
31
32 /**
33  * menu_select - selects the next idle state to enter
34  * @dev: the CPU
35  */
36 static int menu_select(struct cpuidle_device *dev)
37 {
38         struct menu_device *data = &__get_cpu_var(menu_devices);
39         int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
40         int i;
41
42         /* Special case when user has set very strict latency requirement */
43         if (unlikely(latency_req == 0)) {
44                 data->last_state_idx = 0;
45                 return 0;
46         }
47
48         /* determine the expected residency time */
49         data->expected_us =
50                 (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
51
52         /* Recalculate predicted_us based on prediction_history_pct */
53         data->predicted_us *= PRED_HISTORY_PCT;
54         data->predicted_us += (100 - PRED_HISTORY_PCT) *
55                                 data->current_predicted_us;
56         data->predicted_us /= 100;
57
58         /* find the deepest idle state that satisfies our constraints */
59         for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
60                 struct cpuidle_state *s = &dev->states[i];
61
62                 if (s->target_residency > data->expected_us)
63                         break;
64                 if (s->target_residency > data->predicted_us)
65                         break;
66                 if (s->exit_latency > latency_req)
67                         break;
68         }
69
70         data->last_state_idx = i - 1;
71         return i - 1;
72 }
73
74 /**
75  * menu_reflect - attempts to guess what happened after entry
76  * @dev: the CPU
77  *
78  * NOTE: it's important to be fast here because this operation will add to
79  *       the overall exit latency.
80  */
81 static void menu_reflect(struct cpuidle_device *dev)
82 {
83         struct menu_device *data = &__get_cpu_var(menu_devices);
84         int last_idx = data->last_state_idx;
85         unsigned int last_idle_us = cpuidle_get_last_residency(dev);
86         struct cpuidle_state *target = &dev->states[last_idx];
87         unsigned int measured_us;
88
89         /*
90          * Ugh, this idle state doesn't support residency measurements, so we
91          * are basically lost in the dark.  As a compromise, assume we slept
92          * for one full standard timer tick.  However, be aware that this
93          * could potentially result in a suboptimal state transition.
94          */
95         if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
96                 last_idle_us = USEC_PER_SEC / HZ;
97
98         /*
99          * measured_us and elapsed_us are the cumulative idle time, since the
100          * last time we were woken out of idle by an interrupt.
101          */
102         if (data->elapsed_us <= data->elapsed_us + last_idle_us)
103                 measured_us = data->elapsed_us + last_idle_us;
104         else
105                 measured_us = -1;
106
107         /* Predict time until next break event */
108         data->current_predicted_us = max(measured_us, data->last_measured_us);
109
110         if (last_idle_us + BREAK_FUZZ <
111             data->expected_us - target->exit_latency) {
112                 data->last_measured_us = measured_us;
113                 data->elapsed_us = 0;
114         } else {
115                 data->elapsed_us = measured_us;
116         }
117 }
118
119 /**
120  * menu_enable_device - scans a CPU's states and does setup
121  * @dev: the CPU
122  */
123 static int menu_enable_device(struct cpuidle_device *dev)
124 {
125         struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
126
127         memset(data, 0, sizeof(struct menu_device));
128
129         return 0;
130 }
131
132 static struct cpuidle_governor menu_governor = {
133         .name =         "menu",
134         .rating =       20,
135         .enable =       menu_enable_device,
136         .select =       menu_select,
137         .reflect =      menu_reflect,
138         .owner =        THIS_MODULE,
139 };
140
141 /**
142  * init_menu - initializes the governor
143  */
144 static int __init init_menu(void)
145 {
146         return cpuidle_register_governor(&menu_governor);
147 }
148
149 /**
150  * exit_menu - exits the governor
151  */
152 static void __exit exit_menu(void)
153 {
154         cpuidle_unregister_governor(&menu_governor);
155 }
156
157 MODULE_LICENSE("GPL");
158 module_init(init_menu);
159 module_exit(exit_menu);