time: Replace __get_cpu_var uses
authorChristoph Lameter <cl@linux.com>
Sun, 17 Aug 2014 17:30:25 +0000 (12:30 -0500)
committerTejun Heo <tj@kernel.org>
Tue, 26 Aug 2014 17:45:44 +0000 (13:45 -0400)
Convert uses of __get_cpu_var for creating a address from a percpu
offset to this_cpu_ptr.

The two cases where get_cpu_var is used to actually access a percpu
variable are changed to use this_cpu_read/raw_cpu_read.

Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
drivers/clocksource/dummy_timer.c
kernel/irq_work.c
kernel/sched/clock.c
kernel/softirq.c
kernel/time/hrtimer.c
kernel/time/tick-broadcast.c
kernel/time/tick-common.c
kernel/time/tick-oneshot.c
kernel/time/tick-sched.c
kernel/time/timer.c

index ad35725..3199060 100644 (file)
@@ -28,7 +28,7 @@ static void dummy_timer_set_mode(enum clock_event_mode mode,
 static void dummy_timer_setup(void)
 {
        int cpu = smp_processor_id();
-       struct clock_event_device *evt = __this_cpu_ptr(&dummy_timer_evt);
+       struct clock_event_device *evt = raw_cpu_ptr(&dummy_timer_evt);
 
        evt->name       = "dummy_timer";
        evt->features   = CLOCK_EVT_FEAT_PERIODIC |
index e6bcbe7..345d19e 100644 (file)
@@ -95,11 +95,11 @@ bool irq_work_queue(struct irq_work *work)
 
        /* If the work is "lazy", handle it from next tick if any */
        if (work->flags & IRQ_WORK_LAZY) {
-               if (llist_add(&work->llnode, &__get_cpu_var(lazy_list)) &&
+               if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
                    tick_nohz_tick_stopped())
                        arch_irq_work_raise();
        } else {
-               if (llist_add(&work->llnode, &__get_cpu_var(raised_list)))
+               if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
                        arch_irq_work_raise();
        }
 
@@ -113,8 +113,8 @@ bool irq_work_needs_cpu(void)
 {
        struct llist_head *raised, *lazy;
 
-       raised = &__get_cpu_var(raised_list);
-       lazy = &__get_cpu_var(lazy_list);
+       raised = this_cpu_ptr(&raised_list);
+       lazy = this_cpu_ptr(&lazy_list);
        if (llist_empty(raised) && llist_empty(lazy))
                return false;
 
@@ -166,8 +166,8 @@ static void irq_work_run_list(struct llist_head *list)
  */
 void irq_work_run(void)
 {
-       irq_work_run_list(&__get_cpu_var(raised_list));
-       irq_work_run_list(&__get_cpu_var(lazy_list));
+       irq_work_run_list(this_cpu_ptr(&raised_list));
+       irq_work_run_list(this_cpu_ptr(&lazy_list));
 }
 EXPORT_SYMBOL_GPL(irq_work_run);
 
index 3ef6451..c27e4f8 100644 (file)
@@ -134,7 +134,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
 
 static inline struct sched_clock_data *this_scd(void)
 {
-       return &__get_cpu_var(sched_clock_data);
+       return this_cpu_ptr(&sched_clock_data);
 }
 
 static inline struct sched_clock_data *cpu_sdc(int cpu)
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge