Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 13 Apr 2015 18:08:28 +0000 (11:08 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 13 Apr 2015 18:08:28 +0000 (11:08 -0700)
Pull timer updates from Ingo Molnar:
 "The main changes in this cycle were:

   - clockevents state machine cleanups and enhancements (Viresh Kumar)

   - clockevents broadcast notifier horror to state machine conversion
     and related cleanups (Thomas Gleixner, Rafael J Wysocki)

   - clocksource and timekeeping core updates (John Stultz)

   - clocksource driver updates and fixes (Ben Dooks, Dmitry Osipenko,
     Hans de Goede, Laurent Pinchart, Maxime Ripard, Xunlei Pang)

   - y2038 fixes (Xunlei Pang, John Stultz)

   - NMI-safe ktime_get_raw_fast() and general refactoring of the clock
     code, in preparation to perf's per event clock ID support (Peter
     Zijlstra)

   - generic sched/clock fixes, optimizations and cleanups (Daniel
     Thompson)

   - clockevents cpu_down() race fix (Preeti U Murthy)"

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (94 commits)
  timers/PM: Drop unnecessary braces from tick_freeze()
  timers/PM: Fix up tick_unfreeze()
  timekeeping: Get rid of stale comment
  clockevents: Cleanup dead cpu explicitely
  clockevents: Make tick handover explicit
  clockevents: Remove broadcast oneshot control leftovers
  sched/idle: Use explicit broadcast oneshot control function
  ARM: Tegra: Use explicit broadcast oneshot control function
  ARM: OMAP: Use explicit broadcast oneshot control function
  intel_idle: Use explicit broadcast oneshot control function
  ACPI/idle: Use explicit broadcast control function
  ACPI/PAD: Use explicit broadcast oneshot control function
  x86/amd/idle, clockevents: Use explicit broadcast oneshot control functions
  clockevents: Provide explicit broadcast oneshot control functions
  clockevents: Remove the broadcast control leftovers
  ARM: OMAP: Use explicit broadcast control function
  intel_idle: Use explicit broadcast control function
  cpuidle: Use explicit broadcast control function
  ACPI/processor: Use explicit broadcast control function
  ACPI/PAD: Use explicit broadcast control function
  ...

1  2 
arch/x86/kernel/process.c
arch/x86/kvm/x86.c
drivers/acpi/processor_idle.c

@@@ -9,7 -9,7 +9,7 @@@
  #include <linux/sched.h>
  #include <linux/module.h>
  #include <linux/pm.h>
- #include <linux/clockchips.h>
+ #include <linux/tick.h>
  #include <linux/random.h>
  #include <linux/user-return-notifier.h>
  #include <linux/dmi.h>
@@@ -24,7 -24,6 +24,7 @@@
  #include <asm/syscalls.h>
  #include <asm/idle.h>
  #include <asm/uaccess.h>
 +#include <asm/mwait.h>
  #include <asm/i387.h>
  #include <asm/fpu-internal.h>
  #include <asm/debugreg.h>
@@@ -378,14 -377,11 +378,11 @@@ static void amd_e400_idle(void
  
                if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
                        cpumask_set_cpu(cpu, amd_e400_c1e_mask);
-                       /*
-                        * Force broadcast so ACPI can not interfere.
-                        */
-                       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
-                                          &cpu);
+                       /* Force broadcast so ACPI can not interfere. */
+                       tick_broadcast_force();
                        pr_info("Switch to broadcast mode on CPU%d\n", cpu);
                }
-               clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+               tick_broadcast_enter();
  
                default_idle();
  
                 * called with interrupts disabled.
                 */
                local_irq_disable();
-               clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+               tick_broadcast_exit();
                local_irq_enable();
        } else
                default_idle();
  }
  
 +/*
 + * Intel Core2 and older machines prefer MWAIT over HALT for C1.
 + * We can't rely on cpuidle installing MWAIT, because it will not load
 + * on systems that support only C1 -- so the boot default must be MWAIT.
 + *
 + * Some AMD machines are the opposite, they depend on using HALT.
 + *
 + * So for default C1, which is used during boot until cpuidle loads,
 + * use MWAIT-C1 on Intel HW that has it, else use HALT.
 + */
 +static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
 +{
 +      if (c->x86_vendor != X86_VENDOR_INTEL)
 +              return 0;
 +
 +      if (!cpu_has(c, X86_FEATURE_MWAIT))
 +              return 0;
 +
 +      return 1;
 +}
 +
 +/*
 + * MONITOR/MWAIT with no hints, used for default default C1 state.
 + * This invokes MWAIT with interrutps enabled and no flags,
 + * which is backwards compatible with the original MWAIT implementation.
 + */
 +
 +static void mwait_idle(void)
 +{
 +      if (!current_set_polling_and_test()) {
 +              if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
 +                      smp_mb(); /* quirk */
 +                      clflush((void *)&current_thread_info()->flags);
 +                      smp_mb(); /* quirk */
 +              }
 +
 +              __monitor((void *)&current_thread_info()->flags, 0, 0);
 +              if (!need_resched())
 +                      __sti_mwait(0, 0);
 +              else
 +                      local_irq_enable();
 +      } else {
 +              local_irq_enable();
 +      }
 +      __current_clr_polling();
 +}
 +
  void select_idle_routine(const struct cpuinfo_x86 *c)
  {
  #ifdef CONFIG_SMP
                /* E400: APIC timer interrupt does not wake up CPU from C1e */
                pr_info("using AMD E400 aware idle routine\n");
                x86_idle = amd_e400_idle;
 +      } else if (prefer_mwait_c1_over_halt(c)) {
 +              pr_info("using mwait in idle threads\n");
 +              x86_idle = mwait_idle;
        } else
                x86_idle = default_idle;
  }
diff --combined arch/x86/kvm/x86.c
@@@ -801,17 -801,6 +801,17 @@@ unsigned long kvm_get_cr8(struct kvm_vc
  }
  EXPORT_SYMBOL_GPL(kvm_get_cr8);
  
 +static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
 +{
 +      int i;
 +
 +      if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
 +              for (i = 0; i < KVM_NR_DB_REGS; i++)
 +                      vcpu->arch.eff_db[i] = vcpu->arch.db[i];
 +              vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
 +      }
 +}
 +
  static void kvm_update_dr6(struct kvm_vcpu *vcpu)
  {
        if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
@@@ -1081,19 -1070,19 +1081,19 @@@ static void update_pvclock_gtod(struct 
        struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
        u64 boot_ns;
  
-       boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot));
+       boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
  
        write_seqcount_begin(&vdata->seq);
  
        /* copy pvclock gtod data */
-       vdata->clock.vclock_mode        = tk->tkr.clock->archdata.vclock_mode;
-       vdata->clock.cycle_last         = tk->tkr.cycle_last;
-       vdata->clock.mask               = tk->tkr.mask;
-       vdata->clock.mult               = tk->tkr.mult;
-       vdata->clock.shift              = tk->tkr.shift;
+       vdata->clock.vclock_mode        = tk->tkr_mono.clock->archdata.vclock_mode;
+       vdata->clock.cycle_last         = tk->tkr_mono.cycle_last;
+       vdata->clock.mask               = tk->tkr_mono.mask;
+       vdata->clock.mult               = tk->tkr_mono.mult;
+       vdata->clock.shift              = tk->tkr_mono.shift;
  
        vdata->boot_ns                  = boot_ns;
-       vdata->nsec_base                = tk->tkr.xtime_nsec;
+       vdata->nsec_base                = tk->tkr_mono.xtime_nsec;
  
        write_seqcount_end(&vdata->seq);
  }
@@@ -3160,7 -3149,6 +3160,7 @@@ static int kvm_vcpu_ioctl_x86_set_debug
                return -EINVAL;
  
        memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
 +      kvm_update_dr0123(vcpu);
        vcpu->arch.dr6 = dbgregs->dr6;
        kvm_update_dr6(vcpu);
        vcpu->arch.dr7 = dbgregs->dr7;
@@@ -4126,8 -4114,8 +4126,8 @@@ static int vcpu_mmio_write(struct kvm_v
        do {
                n = min(len, 8);
                if (!(vcpu->arch.apic &&
 -                    !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
 -                  && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
 +                    !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
 +                  && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
                        break;
                handled += n;
                addr += n;
@@@ -4146,9 -4134,8 +4146,9 @@@ static int vcpu_mmio_read(struct kvm_vc
        do {
                n = min(len, 8);
                if (!(vcpu->arch.apic &&
 -                    !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
 -                  && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
 +                    !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
 +                                       addr, n, v))
 +                  && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
                        break;
                trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
                handled += n;
@@@ -4488,8 -4475,7 +4488,8 @@@ mmio
        return X86EMUL_CONTINUE;
  }
  
 -int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
 +static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
 +                      unsigned long addr,
                        void *val, unsigned int bytes,
                        struct x86_exception *exception,
                        const struct read_write_emulator_ops *ops)
@@@ -4552,7 -4538,7 +4552,7 @@@ static int emulator_read_emulated(struc
                                   exception, &read_emultor);
  }
  
 -int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
 +static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
                            unsigned long addr,
                            const void *val,
                            unsigned int bytes,
@@@ -4643,10 -4629,10 +4643,10 @@@ static int kernel_pio(struct kvm_vcpu *
        int r;
  
        if (vcpu->arch.pio.in)
 -              r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
 +              r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
                                    vcpu->arch.pio.size, pd);
        else
 -              r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
 +              r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
                                     vcpu->arch.pio.port, vcpu->arch.pio.size,
                                     pd);
        return r;
@@@ -4719,7 -4705,7 +4719,7 @@@ static void emulator_invlpg(struct x86_
        kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
  }
  
 -int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
 +int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
  {
        if (!need_emulate_wbinvd(vcpu))
                return X86EMUL_CONTINUE;
                wbinvd();
        return X86EMUL_CONTINUE;
  }
 +
 +int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
 +{
 +      kvm_x86_ops->skip_emulated_instruction(vcpu);
 +      return kvm_emulate_wbinvd_noskip(vcpu);
 +}
  EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
  
 +
 +
  static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
  {
 -      kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
 +      kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
  }
  
 -int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
 +static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
 +                         unsigned long *dest)
  {
        return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
  }
  
 -int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
 +static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
 +                         unsigned long value)
  {
  
        return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
@@@ -5840,7 -5816,7 +5840,7 @@@ void kvm_arch_exit(void
        free_percpu(shared_msrs);
  }
  
 -int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 +int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
  {
        ++vcpu->stat.halt_exits;
        if (irqchip_in_kernel(vcpu->kvm)) {
                return 0;
        }
  }
 +EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
 +
 +int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 +{
 +      kvm_x86_ops->skip_emulated_instruction(vcpu);
 +      return kvm_vcpu_halt(vcpu);
 +}
  EXPORT_SYMBOL_GPL(kvm_emulate_halt);
  
  int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
@@@ -5934,7 -5903,7 +5934,7 @@@ static void kvm_pv_kick_cpu_op(struct k
        lapic_irq.dest_id = apicid;
  
        lapic_irq.delivery_mode = APIC_DM_REMRD;
 -      kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL);
 +      kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
  }
  
  int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
        unsigned long nr, a0, a1, a2, a3, ret;
        int op_64_bit, r = 1;
  
 +      kvm_x86_ops->skip_emulated_instruction(vcpu);
 +
        if (kvm_hv_hypercall_enabled(vcpu->kvm))
                return kvm_hv_hypercall(vcpu);
  
@@@ -6197,7 -6164,7 +6197,7 @@@ void kvm_arch_mmu_notifier_invalidate_p
  }
  
  /*
 - * Returns 1 to let __vcpu_run() continue the guest execution loop without
 + * Returns 1 to let vcpu_run() continue the guest execution loop without
   * exiting to the userspace.  Otherwise, the value will be returned to the
   * userspace.
   */
@@@ -6334,7 -6301,6 +6334,7 @@@ static int vcpu_enter_guest(struct kvm_
                set_debugreg(vcpu->arch.eff_db[2], 2);
                set_debugreg(vcpu->arch.eff_db[3], 3);
                set_debugreg(vcpu->arch.dr6, 6);
 +              vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
        }
  
        trace_kvm_entry(vcpu->vcpu_id);
        return r;
  }
  
 +static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
 +{
 +      if (!kvm_arch_vcpu_runnable(vcpu)) {
 +              srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
 +              kvm_vcpu_block(vcpu);
 +              vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
 +              if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
 +                      return 1;
 +      }
 +
 +      kvm_apic_accept_events(vcpu);
 +      switch(vcpu->arch.mp_state) {
 +      case KVM_MP_STATE_HALTED:
 +              vcpu->arch.pv.pv_unhalted = false;
 +              vcpu->arch.mp_state =
 +                      KVM_MP_STATE_RUNNABLE;
 +      case KVM_MP_STATE_RUNNABLE:
 +              vcpu->arch.apf.halted = false;
 +              break;
 +      case KVM_MP_STATE_INIT_RECEIVED:
 +              break;
 +      default:
 +              return -EINTR;
 +              break;
 +      }
 +      return 1;
 +}
  
 -static int __vcpu_run(struct kvm_vcpu *vcpu)
 +static int vcpu_run(struct kvm_vcpu *vcpu)
  {
        int r;
        struct kvm *kvm = vcpu->kvm;
  
        vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  
 -      r = 1;
 -      while (r > 0) {
 +      for (;;) {
                if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
                    !vcpu->arch.apf.halted)
                        r = vcpu_enter_guest(vcpu);
 -              else {
 -                      srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
 -                      kvm_vcpu_block(vcpu);
 -                      vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
 -                      if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
 -                              kvm_apic_accept_events(vcpu);
 -                              switch(vcpu->arch.mp_state) {
 -                              case KVM_MP_STATE_HALTED:
 -                                      vcpu->arch.pv.pv_unhalted = false;
 -                                      vcpu->arch.mp_state =
 -                                              KVM_MP_STATE_RUNNABLE;
 -                              case KVM_MP_STATE_RUNNABLE:
 -                                      vcpu->arch.apf.halted = false;
 -                                      break;
 -                              case KVM_MP_STATE_INIT_RECEIVED:
 -                                      break;
 -                              default:
 -                                      r = -EINTR;
 -                                      break;
 -                              }
 -                      }
 -              }
 -
 +              else
 +                      r = vcpu_block(kvm, vcpu);
                if (r <= 0)
                        break;
  
                        r = -EINTR;
                        vcpu->run->exit_reason = KVM_EXIT_INTR;
                        ++vcpu->stat.request_irq_exits;
 +                      break;
                }
  
                kvm_check_async_pf_completion(vcpu);
                        r = -EINTR;
                        vcpu->run->exit_reason = KVM_EXIT_INTR;
                        ++vcpu->stat.signal_exits;
 +                      break;
                }
                if (need_resched()) {
                        srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
@@@ -6609,7 -6568,7 +6609,7 @@@ int kvm_arch_vcpu_ioctl_run(struct kvm_
        } else
                WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
  
 -      r = __vcpu_run(vcpu);
 +      r = vcpu_run(vcpu);
  
  out:
        post_kvm_run_save(vcpu);
@@@ -7116,14 -7075,11 +7116,14 @@@ void kvm_vcpu_reset(struct kvm_vcpu *vc
        kvm_clear_exception_queue(vcpu);
  
        memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
 +      kvm_update_dr0123(vcpu);
        vcpu->arch.dr6 = DR6_INIT;
        kvm_update_dr6(vcpu);
        vcpu->arch.dr7 = DR7_FIXED_1;
        kvm_update_dr7(vcpu);
  
 +      vcpu->arch.cr2 = 0;
 +
        kvm_make_request(KVM_REQ_EVENT, vcpu);
        vcpu->arch.apf.msr_val = 0;
        vcpu->arch.st.msr_val = 0;
@@@ -7284,7 -7240,7 +7284,7 @@@ int kvm_arch_vcpu_init(struct kvm_vcpu 
  
        vcpu->arch.pv.pv_unhalted = false;
        vcpu->arch.emulate_ctxt.ops = &emulate_ops;
 -      if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
 +      if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
        else
                vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
        vcpu->arch.guest_supported_xcr0 = 0;
        vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
  
 +      vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
 +
        kvm_async_pf_hash_reset(vcpu);
        kvm_pmu_init(vcpu);
  
@@@ -7474,7 -7428,7 +7474,7 @@@ void kvm_arch_free_memslot(struct kvm *
  
        for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
                if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
 -                      kvm_kvfree(free->arch.rmap[i]);
 +                      kvfree(free->arch.rmap[i]);
                        free->arch.rmap[i] = NULL;
                }
                if (i == 0)
  
                if (!dont || free->arch.lpage_info[i - 1] !=
                             dont->arch.lpage_info[i - 1]) {
 -                      kvm_kvfree(free->arch.lpage_info[i - 1]);
 +                      kvfree(free->arch.lpage_info[i - 1]);
                        free->arch.lpage_info[i - 1] = NULL;
                }
        }
@@@ -7536,12 -7490,12 +7536,12 @@@ int kvm_arch_create_memslot(struct kvm 
  
  out_free:
        for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
 -              kvm_kvfree(slot->arch.rmap[i]);
 +              kvfree(slot->arch.rmap[i]);
                slot->arch.rmap[i] = NULL;
                if (i == 0)
                        continue;
  
 -              kvm_kvfree(slot->arch.lpage_info[i - 1]);
 +              kvfree(slot->arch.lpage_info[i - 1]);
                slot->arch.lpage_info[i - 1] = NULL;
        }
        return -ENOMEM;
@@@ -7663,23 -7617,6 +7663,23 @@@ void kvm_arch_commit_memory_region(stru
        /* It's OK to get 'new' slot here as it has already been installed */
        new = id_to_memslot(kvm->memslots, mem->slot);
  
 +      /*
 +       * Dirty logging tracks sptes in 4k granularity, meaning that large
 +       * sptes have to be split.  If live migration is successful, the guest
 +       * in the source machine will be destroyed and large sptes will be
 +       * created in the destination. However, if the guest continues to run
 +       * in the source machine (for example if live migration fails), small
 +       * sptes will remain around and cause bad performance.
 +       *
 +       * Scan sptes if dirty logging has been stopped, dropping those
 +       * which can be collapsed into a single large-page spte.  Later
 +       * page faults will create the large-page sptes.
 +       */
 +      if ((change != KVM_MR_DELETE) &&
 +              (old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
 +              !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
 +              kvm_mmu_zap_collapsible_sptes(kvm, new);
 +
        /*
         * Set up write protection and/or dirty logging for the new slot.
         *
@@@ -32,7 -32,7 +32,7 @@@
  #include <linux/acpi.h>
  #include <linux/dmi.h>
  #include <linux/sched.h>       /* need_resched() */
- #include <linux/clockchips.h>
+ #include <linux/tick.h>
  #include <linux/cpuidle.h>
  #include <linux/syscore_ops.h>
  #include <acpi/processor.h>
@@@ -157,12 -157,11 +157,11 @@@ static void lapic_timer_check_state(in
  static void __lapic_timer_propagate_broadcast(void *arg)
  {
        struct acpi_processor *pr = (struct acpi_processor *) arg;
-       unsigned long reason;
  
-       reason = pr->power.timer_broadcast_on_state < INT_MAX ?
-               CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
-       clockevents_notify(reason, &pr->id);
+       if (pr->power.timer_broadcast_on_state < INT_MAX)
+               tick_broadcast_enable();
+       else
+               tick_broadcast_disable();
  }
  
  static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
@@@ -179,11 -178,10 +178,10 @@@ static void lapic_timer_state_broadcast
        int state = cx - pr->power.states;
  
        if (state >= pr->power.timer_broadcast_on_state) {
-               unsigned long reason;
-               reason = broadcast ?  CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
-                       CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
-               clockevents_notify(reason, &pr->id);
+               if (broadcast)
+                       tick_broadcast_enter();
+               else
+                       tick_broadcast_exit();
        }
  }
  
@@@ -922,7 -920,7 +920,7 @@@ static int acpi_processor_setup_cpuidle
                return -EINVAL;
  
        drv->safe_state_index = -1;
 -      for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
 +      for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
                drv->states[i].name[0] = '\0';
                drv->states[i].desc[0] = '\0';
        }