percpu: add __percpu sparse annotations to core kernel subsystems
authorTejun Heo <tj@kernel.org>
Tue, 2 Feb 2010 05:38:57 +0000 (14:38 +0900)
committerTejun Heo <tj@kernel.org>
Wed, 17 Feb 2010 02:17:38 +0000 (11:17 +0900)
Add __percpu sparse annotations to core subsystems.

These annotations are to make sparse consider percpu variables to be
in a different address space and warn if accessed without going
through percpu accessors.  This patch doesn't affect normal builds.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: linux-mm@kvack.org
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Dipankar Sarma <dipankar@in.ibm.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Eric Biederman <ebiederm@xmission.com>
include/linux/blktrace_api.h
include/linux/genhd.h
include/linux/kexec.h
include/linux/mmzone.h
include/linux/module.h
include/linux/percpu_counter.h
include/linux/srcu.h
kernel/kexec.c
kernel/sched.c
kernel/stop_machine.c
mm/percpu.c

index 3b73b99..416bf62 100644 (file)
@@ -150,8 +150,8 @@ struct blk_user_trace_setup {
 struct blk_trace {
        int trace_state;
        struct rchan *rchan;
-       unsigned long *sequence;
-       unsigned char *msg_data;
+       unsigned long __percpu *sequence;
+       unsigned char __percpu *msg_data;
        u16 act_mask;
        u64 start_lba;
        u64 end_lba;
index 9717081..56b5051 100644 (file)
@@ -101,7 +101,7 @@ struct hd_struct {
        unsigned long stamp;
        int in_flight[2];
 #ifdef CONFIG_SMP
-       struct disk_stats *dkstats;
+       struct disk_stats __percpu *dkstats;
 #else
        struct disk_stats dkstats;
 #endif
index c356b69..03e8e8d 100644 (file)
@@ -199,7 +199,7 @@ extern struct kimage *kexec_crash_image;
  */
 extern struct resource crashk_res;
 typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4];
-extern note_buf_t *crash_notes;
+extern note_buf_t __percpu *crash_notes;
 extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
 extern size_t vmcoreinfo_size;
 extern size_t vmcoreinfo_max_size;
index 7874201..41acd4b 100644 (file)
@@ -301,7 +301,7 @@ struct zone {
        unsigned long           min_unmapped_pages;
        unsigned long           min_slab_pages;
 #endif
-       struct per_cpu_pageset  *pageset;
+       struct per_cpu_pageset __percpu *pageset;
        /*
         * free areas of different sizes
         */
index 7e74ae0..dd618eb 100644 (file)
@@ -365,7 +365,7 @@ struct module
 
        struct module_ref {
                int count;
-       } *refptr;
+       } __percpu *refptr;
 #endif
 
 #ifdef CONFIG_CONSTRUCTORS
index a7684a5..9bd103c 100644 (file)
@@ -21,7 +21,7 @@ struct percpu_counter {
 #ifdef CONFIG_HOTPLUG_CPU
        struct list_head list;  /* All percpu_counters are on a list */
 #endif
-       s32 *counters;
+       s32 __percpu *counters;
 };
 
 extern int percpu_counter_batch;
index 4765d97..41eedcc 100644 (file)
@@ -33,7 +33,7 @@ struct srcu_struct_array {
 
 struct srcu_struct {
        int completed;
-       struct srcu_struct_array *per_cpu_ref;
+       struct srcu_struct_array __percpu *per_cpu_ref;
        struct mutex mutex;
 };
 
index ef077fb..87ebe8a 100644 (file)
@@ -41,7 +41,7 @@
 #include <asm/sections.h>
 
 /* Per cpu memory for storing cpu states in case of system crash. */
-note_buf_tcrash_notes;
+note_buf_t __percpu *crash_notes;
 
 /* vmcoreinfo stuff */
 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
index 3a8fb30..978edfd 100644 (file)
@@ -1566,7 +1566,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 
-static __read_mostly unsigned long *update_shares_data;
+static __read_mostly unsigned long __percpu *update_shares_data;
 
 static void __set_se_shares(struct sched_entity *se, unsigned long shares);
 
@@ -10683,7 +10683,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
 struct cpuacct {
        struct cgroup_subsys_state css;
        /* cpuusage holds pointer to a u64-type object on every cpu */
-       u64 *cpuusage;
+       u64 __percpu *cpuusage;
        struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
        struct cpuacct *parent;
 };
index 912823e..9bb9fb1 100644 (file)
@@ -45,7 +45,7 @@ static int refcount;
 static struct workqueue_struct *stop_machine_wq;
 static struct stop_machine_data active, idle;
 static const struct cpumask *active_cpus;
-static void *stop_machine_work;
+static void __percpu *stop_machine_work;
 
 static void set_state(enum stopmachine_state newstate)
 {
index b336638..768419d 100644 (file)
 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
 #ifndef __addr_to_pcpu_ptr
 #define __addr_to_pcpu_ptr(addr)                                       \
-       (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr  \
-                + (unsigned long)__per_cpu_start)
+       (void __percpu *)((unsigned long)(addr) -                       \
+                         (unsigned long)pcpu_base_addr +               \
+                         (unsigned long)__per_cpu_start)
 #endif
 #ifndef __pcpu_ptr_to_addr
 #define __pcpu_ptr_to_addr(ptr)                                                \
-       (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr   \
-                - (unsigned long)__per_cpu_start)
+       (void __force *)((unsigned long)(ptr) +                         \
+                        (unsigned long)pcpu_base_addr -                \
+                        (unsigned long)__per_cpu_start)
 #endif
 
 struct pcpu_chunk {
@@ -1065,7 +1067,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
  * RETURNS:
  * Percpu pointer to the allocated area on success, NULL on failure.
  */
-static void *pcpu_alloc(size_t size, size_t align, bool reserved)
+static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
 {
        static int warn_limit = 10;
        struct pcpu_chunk *chunk;
@@ -1194,7 +1196,7 @@ fail_unlock_mutex:
  * RETURNS:
  * Percpu pointer to the allocated area on success, NULL on failure.
  */
-void *__alloc_percpu(size_t size, size_t align)
+void __percpu *__alloc_percpu(size_t size, size_t align)
 {
        return pcpu_alloc(size, align, false);
 }
@@ -1215,7 +1217,7 @@ EXPORT_SYMBOL_GPL(__alloc_percpu);
  * RETURNS:
  * Percpu pointer to the allocated area on success, NULL on failure.
  */
-void *__alloc_reserved_percpu(size_t size, size_t align)
+void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
 {
        return pcpu_alloc(size, align, true);
 }
@@ -1267,7 +1269,7 @@ static void pcpu_reclaim(struct work_struct *work)
  * CONTEXT:
  * Can be called from atomic context.
  */
-void free_percpu(void *ptr)
+void free_percpu(void __percpu *ptr)
 {
        void *addr;
        struct pcpu_chunk *chunk;