percpu: fix list_head init bug in __percpu_counter_init()
[pandora-kernel.git] / lib / percpu_counter.c
1 /*
2  * Fast batching percpu counters.
3  */
4
5 #include <linux/percpu_counter.h>
6 #include <linux/notifier.h>
7 #include <linux/mutex.h>
8 #include <linux/init.h>
9 #include <linux/cpu.h>
10 #include <linux/module.h>
11
12 static LIST_HEAD(percpu_counters);
13 static DEFINE_MUTEX(percpu_counters_lock);
14
15 void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
16 {
17         int cpu;
18
19         spin_lock(&fbc->lock);
20         for_each_possible_cpu(cpu) {
21                 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
22                 *pcount = 0;
23         }
24         fbc->count = amount;
25         spin_unlock(&fbc->lock);
26 }
27 EXPORT_SYMBOL(percpu_counter_set);
28
29 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
30 {
31         s64 count;
32         s32 *pcount;
33         int cpu = get_cpu();
34
35         pcount = per_cpu_ptr(fbc->counters, cpu);
36         count = *pcount + amount;
37         if (count >= batch || count <= -batch) {
38                 spin_lock(&fbc->lock);
39                 fbc->count += count;
40                 *pcount = 0;
41                 spin_unlock(&fbc->lock);
42         } else {
43                 *pcount = count;
44         }
45         put_cpu();
46 }
47 EXPORT_SYMBOL(__percpu_counter_add);
48
49 /*
50  * Add up all the per-cpu counts, return the result.  This is a more accurate
51  * but much slower version of percpu_counter_read_positive()
52  */
53 s64 __percpu_counter_sum(struct percpu_counter *fbc)
54 {
55         s64 ret;
56         int cpu;
57
58         spin_lock(&fbc->lock);
59         ret = fbc->count;
60         for_each_online_cpu(cpu) {
61                 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
62                 ret += *pcount;
63         }
64         spin_unlock(&fbc->lock);
65         return ret;
66 }
67 EXPORT_SYMBOL(__percpu_counter_sum);
68
69 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
70                           struct lock_class_key *key)
71 {
72         spin_lock_init(&fbc->lock);
73         lockdep_set_class(&fbc->lock, key);
74         fbc->count = amount;
75         fbc->counters = alloc_percpu(s32);
76         if (!fbc->counters)
77                 return -ENOMEM;
78 #ifdef CONFIG_HOTPLUG_CPU
79         INIT_LIST_HEAD(&fbc->list);
80         mutex_lock(&percpu_counters_lock);
81         list_add(&fbc->list, &percpu_counters);
82         mutex_unlock(&percpu_counters_lock);
83 #endif
84         return 0;
85 }
86 EXPORT_SYMBOL(__percpu_counter_init);
87
88 void percpu_counter_destroy(struct percpu_counter *fbc)
89 {
90         if (!fbc->counters)
91                 return;
92
93 #ifdef CONFIG_HOTPLUG_CPU
94         mutex_lock(&percpu_counters_lock);
95         list_del(&fbc->list);
96         mutex_unlock(&percpu_counters_lock);
97 #endif
98         free_percpu(fbc->counters);
99         fbc->counters = NULL;
100 }
101 EXPORT_SYMBOL(percpu_counter_destroy);
102
103 int percpu_counter_batch __read_mostly = 32;
104 EXPORT_SYMBOL(percpu_counter_batch);
105
106 static void compute_batch_value(void)
107 {
108         int nr = num_online_cpus();
109
110         percpu_counter_batch = max(32, nr*2);
111 }
112
113 static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
114                                         unsigned long action, void *hcpu)
115 {
116 #ifdef CONFIG_HOTPLUG_CPU
117         unsigned int cpu;
118         struct percpu_counter *fbc;
119
120         compute_batch_value();
121         if (action != CPU_DEAD)
122                 return NOTIFY_OK;
123
124         cpu = (unsigned long)hcpu;
125         mutex_lock(&percpu_counters_lock);
126         list_for_each_entry(fbc, &percpu_counters, list) {
127                 s32 *pcount;
128                 unsigned long flags;
129
130                 spin_lock_irqsave(&fbc->lock, flags);
131                 pcount = per_cpu_ptr(fbc->counters, cpu);
132                 fbc->count += *pcount;
133                 *pcount = 0;
134                 spin_unlock_irqrestore(&fbc->lock, flags);
135         }
136         mutex_unlock(&percpu_counters_lock);
137 #endif
138         return NOTIFY_OK;
139 }
140
141 /*
142  * Compare counter against given value.
143  * Return 1 if greater, 0 if equal and -1 if less
144  */
145 int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
146 {
147         s64     count;
148
149         count = percpu_counter_read(fbc);
150         /* Check to see if rough count will be sufficient for comparison */
151         if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
152                 if (count > rhs)
153                         return 1;
154                 else
155                         return -1;
156         }
157         /* Need to use precise count */
158         count = percpu_counter_sum(fbc);
159         if (count > rhs)
160                 return 1;
161         else if (count < rhs)
162                 return -1;
163         else
164                 return 0;
165 }
166 EXPORT_SYMBOL(percpu_counter_compare);
167
168 static int __init percpu_counter_startup(void)
169 {
170         compute_batch_value();
171         hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
172         return 0;
173 }
174 module_init(percpu_counter_startup);