Pull cpuidle into release branch
authorLen Brown <len.brown@intel.com>
Tue, 20 Nov 2007 06:18:37 +0000 (01:18 -0500)
committerLen Brown <len.brown@intel.com>
Tue, 20 Nov 2007 06:18:37 +0000 (01:18 -0500)
1  2 
drivers/acpi/processor_idle.c

@@@ -102,7 -102,7 +102,7 @@@ static int acpi_processor_set_power_pol
   *
   * To skip this limit, boot/load with a large max_cstate limit.
   */
 -static int set_max_cstate(struct dmi_system_id *id)
 +static int set_max_cstate(const struct dmi_system_id *id)
  {
        if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
                return 0;
@@@ -197,6 -197,19 +197,19 @@@ static inline u32 ticks_elapsed_in_us(u
                return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
  }
  
+ static void acpi_safe_halt(void)
+ {
+       current_thread_info()->status &= ~TS_POLLING;
+       /*
+        * TS_POLLING-cleared state must be visible before we
+        * test NEED_RESCHED:
+        */
+       smp_mb();
+       if (!need_resched())
+               safe_halt();
+       current_thread_info()->status |= TS_POLLING;
+ }
  #ifndef CONFIG_CPU_IDLE
  
  static void
@@@ -239,19 -252,6 +252,6 @@@ acpi_processor_power_activate(struct ac
        return;
  }
  
- static void acpi_safe_halt(void)
- {
-       current_thread_info()->status &= ~TS_POLLING;
-       /*
-        * TS_POLLING-cleared state must be visible before we
-        * test NEED_RESCHED:
-        */
-       smp_mb();
-       if (!need_resched())
-               safe_halt();
-       current_thread_info()->status |= TS_POLLING;
- }
  static atomic_t c3_cpu_count;
  
  /* Common C-state entry for C2, C3, .. */
@@@ -299,12 -299,21 +299,12 @@@ static void acpi_timer_check_state(int 
  
  static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
  {
 -#ifdef CONFIG_GENERIC_CLOCKEVENTS
        unsigned long reason;
  
        reason = pr->power.timer_broadcast_on_state < INT_MAX ?
                CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
  
        clockevents_notify(reason, &pr->id);
 -#else
 -      cpumask_t mask = cpumask_of_cpu(pr->id);
 -
 -      if (pr->power.timer_broadcast_on_state < INT_MAX)
 -              on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
 -      else
 -              on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
 -#endif
  }
  
  /* Power(C) State timer broadcast control */
@@@ -312,6 -321,8 +312,6 @@@ static void acpi_state_timer_broadcast(
                                       struct acpi_processor_cx *cx,
                                       int broadcast)
  {
 -#ifdef CONFIG_GENERIC_CLOCKEVENTS
 -
        int state = cx - pr->power.states;
  
        if (state >= pr->power.timer_broadcast_on_state) {
                        CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
                clockevents_notify(reason, &pr->id);
        }
 -#endif
  }
  
  #else
@@@ -1373,15 -1385,7 +1373,7 @@@ static int acpi_idle_enter_c1(struct cp
        if (pr->flags.bm_check)
                acpi_idle_update_bm_rld(pr, cx);
  
-       current_thread_info()->status &= ~TS_POLLING;
-       /*
-        * TS_POLLING-cleared state must be visible before we test
-        * NEED_RESCHED:
-        */
-       smp_mb();
-       if (!need_resched())
-               safe_halt();
-       current_thread_info()->status |= TS_POLLING;
+       acpi_safe_halt();
  
        cx->usage++;
  
@@@ -1399,6 -1403,8 +1391,8 @@@ static int acpi_idle_enter_simple(struc
        struct acpi_processor *pr;
        struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
        u32 t1, t2;
+       int sleep_ticks = 0;
        pr = processors[smp_processor_id()];
  
        if (unlikely(!pr))
                ACPI_FLUSH_CPU_CACHE();
  
        t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+       /* Tell the scheduler that we are going deep-idle: */
+       sched_clock_idle_sleep_event();
        acpi_state_timer_broadcast(pr, cx, 1);
        acpi_idle_do_entry(cx);
        t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
        /* TSC could halt in idle, so notify users */
        mark_tsc_unstable("TSC halts in idle");;
  #endif
+       sleep_ticks = ticks_elapsed(t1, t2);
+       /* Tell the scheduler how much we idled: */
+       sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
  
        local_irq_enable();
        current_thread_info()->status |= TS_POLLING;
        cx->usage++;
  
        acpi_state_timer_broadcast(pr, cx, 0);
-       cx->time += ticks_elapsed(t1, t2);
+       cx->time += sleep_ticks;
        return ticks_elapsed_in_us(t1, t2);
  }
  
@@@ -1463,6 -1475,8 +1463,8 @@@ static int acpi_idle_enter_bm(struct cp
        struct acpi_processor *pr;
        struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
        u32 t1, t2;
+       int sleep_ticks = 0;
        pr = processors[smp_processor_id()];
  
        if (unlikely(!pr))
        if (acpi_idle_suspend)
                return(acpi_idle_enter_c1(dev, state));
  
+       if (acpi_idle_bm_check()) {
+               if (dev->safe_state) {
+                       return dev->safe_state->enter(dev, dev->safe_state);
+               } else {
+                       acpi_safe_halt();
+                       return 0;
+               }
+       }
        local_irq_disable();
        current_thread_info()->status &= ~TS_POLLING;
        /*
                return 0;
        }
  
+       /* Tell the scheduler that we are going deep-idle: */
+       sched_clock_idle_sleep_event();
        /*
         * Must be done before busmaster disable as we might need to
         * access HPET !
         */
        acpi_state_timer_broadcast(pr, cx, 1);
  
-       if (acpi_idle_bm_check()) {
-               cx = pr->power.bm_state;
-               acpi_idle_update_bm_rld(pr, cx);
-               t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
-               acpi_idle_do_entry(cx);
-               t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
-       } else {
-               acpi_idle_update_bm_rld(pr, cx);
+       acpi_idle_update_bm_rld(pr, cx);
  
+       /*
+        * disable bus master
+        * bm_check implies we need ARB_DIS
+        * !bm_check implies we need cache flush
+        * bm_control implies whether we can do ARB_DIS
+        *
+        * That leaves a case where bm_check is set and bm_control is
+        * not set. In that case we cannot do much, we enter C3
+        * without doing anything.
+        */
+       if (pr->flags.bm_check && pr->flags.bm_control) {
                spin_lock(&c3_lock);
                c3_cpu_count++;
                /* Disable bus master arbitration when all CPUs are in C3 */
                if (c3_cpu_count == num_online_cpus())
                        acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
                spin_unlock(&c3_lock);
+       } else if (!pr->flags.bm_check) {
+               ACPI_FLUSH_CPU_CACHE();
+       }
  
-               t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
-               acpi_idle_do_entry(cx);
-               t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+       t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+       acpi_idle_do_entry(cx);
+       t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  
+       /* Re-enable bus master arbitration */
+       if (pr->flags.bm_check && pr->flags.bm_control) {
                spin_lock(&c3_lock);
-               /* Re-enable bus master arbitration */
-               if (c3_cpu_count == num_online_cpus())
-                       acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
+               acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
                c3_cpu_count--;
                spin_unlock(&c3_lock);
        }
        /* TSC could halt in idle, so notify users */
        mark_tsc_unstable("TSC halts in idle");
  #endif
+       sleep_ticks = ticks_elapsed(t1, t2);
+       /* Tell the scheduler how much we idled: */
+       sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
  
        local_irq_enable();
        current_thread_info()->status |= TS_POLLING;
        cx->usage++;
  
        acpi_state_timer_broadcast(pr, cx, 0);
-       cx->time += ticks_elapsed(t1, t2);
+       cx->time += sleep_ticks;
        return ticks_elapsed_in_us(t1, t2);
  }
  
@@@ -1584,12 -1617,14 +1605,14 @@@ static int acpi_processor_setup_cpuidle
                        case ACPI_STATE_C1:
                        state->flags |= CPUIDLE_FLAG_SHALLOW;
                        state->enter = acpi_idle_enter_c1;
+                       dev->safe_state = state;
                        break;
  
                        case ACPI_STATE_C2:
                        state->flags |= CPUIDLE_FLAG_BALANCED;
                        state->flags |= CPUIDLE_FLAG_TIME_VALID;
                        state->enter = acpi_idle_enter_simple;
+                       dev->safe_state = state;
                        break;
  
                        case ACPI_STATE_C3:
        if (!count)
                return -EINVAL;
  
-       /* find the deepest state that can handle active BM */
-       if (pr->flags.bm_check) {
-               for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++)
-                       if (pr->power.states[i].type == ACPI_STATE_C3)
-                               break;
-               pr->power.bm_state = &pr->power.states[i-1];
-       }
        return 0;
  }