Merge branch 'stable/generic' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen
[pandora-kernel.git] / arch / x86 / kernel / cpu / perf_event.c
index 0a360d1..0492101 100644 (file)
@@ -997,8 +997,7 @@ x86_perf_event_set_period(struct perf_event *event)
 
 static void x86_pmu_enable_event(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       if (cpuc->enabled)
+       if (__this_cpu_read(cpu_hw_events.enabled))
                __x86_pmu_enable_event(&event->hw,
                                       ARCH_PERFMON_EVENTSEL_ENABLE);
 }
@@ -1272,7 +1271,7 @@ perf_event_nmi_handler(struct notifier_block *self,
                break;
        case DIE_NMIUNKNOWN:
                this_nmi = percpu_read(irq_stat.__nmi_count);
-               if (this_nmi != __get_cpu_var(pmu_nmi).marked)
+               if (this_nmi != __this_cpu_read(pmu_nmi.marked))
                        /* let the kernel handle the unknown nmi */
                        return NOTIFY_DONE;
                /*
@@ -1296,8 +1295,8 @@ perf_event_nmi_handler(struct notifier_block *self,
        this_nmi = percpu_read(irq_stat.__nmi_count);
        if ((handled > 1) ||
                /* the next nmi could be a back-to-back nmi */
-           ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
-            (__get_cpu_var(pmu_nmi).handled > 1))) {
+           ((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
+            (__this_cpu_read(pmu_nmi.handled) > 1))) {
                /*
                 * We could have two subsequent back-to-back nmis: The
                 * first handles more than one counter, the 2nd
@@ -1308,8 +1307,8 @@ perf_event_nmi_handler(struct notifier_block *self,
                 * handling more than one counter. We will mark the
                 * next (3rd) and then drop it if unhandled.
                 */
-               __get_cpu_var(pmu_nmi).marked   = this_nmi + 1;
-               __get_cpu_var(pmu_nmi).handled  = handled;
+               __this_cpu_write(pmu_nmi.marked, this_nmi + 1);
+               __this_cpu_write(pmu_nmi.handled, handled);
        }
 
        return NOTIFY_STOP;
@@ -1484,11 +1483,9 @@ static inline void x86_pmu_read(struct perf_event *event)
  */
 static void x86_pmu_start_txn(struct pmu *pmu)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
        perf_pmu_disable(pmu);
-       cpuc->group_flag |= PERF_EVENT_TXN;
-       cpuc->n_txn = 0;
+       __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
+       __this_cpu_write(cpu_hw_events.n_txn, 0);
 }
 
 /*
@@ -1498,14 +1495,12 @@ static void x86_pmu_start_txn(struct pmu *pmu)
  */
 static void x86_pmu_cancel_txn(struct pmu *pmu)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
-       cpuc->group_flag &= ~PERF_EVENT_TXN;
+       __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
        /*
         * Truncate the collected events.
         */
-       cpuc->n_added -= cpuc->n_txn;
-       cpuc->n_events -= cpuc->n_txn;
+       __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
+       __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
        perf_pmu_enable(pmu);
 }