static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
{
- cputime64_t retval;
+ cputime64_t idle_time;
+ cputime64_t cur_jiffies;
+ cputime64_t busy_time;
- retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
- kstat_cpu(cpu).cpustat.iowait);
+ cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
+ busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
+ kstat_cpu(cpu).cpustat.system);
- if (dbs_tuners_ins.ignore_nice)
- retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
+ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
+ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
+ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
- return retval;
+ if (!dbs_tuners_ins.ignore_nice) {
+ busy_time = cputime64_add(busy_time,
+ kstat_cpu(cpu).cpustat.nice);
+ }
+
+ idle_time = cputime64_sub(cur_jiffies, busy_time);
+ return idle_time;
}
/*
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
{
unsigned int idle_ticks, total_ticks;
- unsigned int load;
+ unsigned int load = 0;
cputime64_t cur_jiffies;
struct cpufreq_policy *policy;
cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
this_dbs_info->prev_cpu_wall);
- this_dbs_info->prev_cpu_wall = cur_jiffies;
+ this_dbs_info->prev_cpu_wall = get_jiffies_64();
+
if (!total_ticks)
return;
/*
if (tmp_idle_ticks < idle_ticks)
idle_ticks = tmp_idle_ticks;
}
- load = (100 * (total_ticks - idle_ticks)) / total_ticks;
+ if (likely(total_ticks > idle_ticks))
+ load = (100 * (total_ticks - idle_ticks)) / total_ticks;
/* Check for frequency increase */
if (load > dbs_tuners_ins.up_threshold) {
dbs_info->enable = 1;
ondemand_powersave_bias_init();
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
- INIT_DELAYED_WORK(&dbs_info->work, do_dbs_timer);
+ INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
delay);
}