Merge branch 'x86-mce-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / net / core / flow.c
1 /* flow.c: Generic flow cache.
2  *
3  * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4  * Copyright (C) 2003 David S. Miller (davem@redhat.com)
5  */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/jhash.h>
11 #include <linux/interrupt.h>
12 #include <linux/mm.h>
13 #include <linux/random.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/completion.h>
18 #include <linux/percpu.h>
19 #include <linux/bitops.h>
20 #include <linux/notifier.h>
21 #include <linux/cpu.h>
22 #include <linux/cpumask.h>
23 #include <linux/mutex.h>
24 #include <net/flow.h>
25 #include <asm/atomic.h>
26 #include <linux/security.h>
27
28 struct flow_cache_entry {
29         union {
30                 struct hlist_node       hlist;
31                 struct list_head        gc_list;
32         } u;
33         u16                             family;
34         u8                              dir;
35         u32                             genid;
36         struct flowi                    key;
37         struct flow_cache_object        *object;
38 };
39
40 struct flow_cache_percpu {
41         struct hlist_head               *hash_table;
42         int                             hash_count;
43         u32                             hash_rnd;
44         int                             hash_rnd_recalc;
45         struct tasklet_struct           flush_tasklet;
46 };
47
48 struct flow_flush_info {
49         struct flow_cache               *cache;
50         atomic_t                        cpuleft;
51         struct completion               completion;
52 };
53
54 struct flow_cache {
55         u32                             hash_shift;
56         unsigned long                   order;
57         struct flow_cache_percpu        *percpu;
58         struct notifier_block           hotcpu_notifier;
59         int                             low_watermark;
60         int                             high_watermark;
61         struct timer_list               rnd_timer;
62 };
63
64 atomic_t flow_cache_genid = ATOMIC_INIT(0);
65 EXPORT_SYMBOL(flow_cache_genid);
66 static struct flow_cache flow_cache_global;
67 static struct kmem_cache *flow_cachep;
68
69 static DEFINE_SPINLOCK(flow_cache_gc_lock);
70 static LIST_HEAD(flow_cache_gc_list);
71
72 #define flow_cache_hash_size(cache)     (1 << (cache)->hash_shift)
73 #define FLOW_HASH_RND_PERIOD            (10 * 60 * HZ)
74
75 static void flow_cache_new_hashrnd(unsigned long arg)
76 {
77         struct flow_cache *fc = (void *) arg;
78         int i;
79
80         for_each_possible_cpu(i)
81                 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
82
83         fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
84         add_timer(&fc->rnd_timer);
85 }
86
87 static int flow_entry_valid(struct flow_cache_entry *fle)
88 {
89         if (atomic_read(&flow_cache_genid) != fle->genid)
90                 return 0;
91         if (fle->object && !fle->object->ops->check(fle->object))
92                 return 0;
93         return 1;
94 }
95
96 static void flow_entry_kill(struct flow_cache_entry *fle)
97 {
98         if (fle->object)
99                 fle->object->ops->delete(fle->object);
100         kmem_cache_free(flow_cachep, fle);
101 }
102
103 static void flow_cache_gc_task(struct work_struct *work)
104 {
105         struct list_head gc_list;
106         struct flow_cache_entry *fce, *n;
107
108         INIT_LIST_HEAD(&gc_list);
109         spin_lock_bh(&flow_cache_gc_lock);
110         list_splice_tail_init(&flow_cache_gc_list, &gc_list);
111         spin_unlock_bh(&flow_cache_gc_lock);
112
113         list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
114                 flow_entry_kill(fce);
115 }
116 static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
117
118 static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
119                                      int deleted, struct list_head *gc_list)
120 {
121         if (deleted) {
122                 fcp->hash_count -= deleted;
123                 spin_lock_bh(&flow_cache_gc_lock);
124                 list_splice_tail(gc_list, &flow_cache_gc_list);
125                 spin_unlock_bh(&flow_cache_gc_lock);
126                 schedule_work(&flow_cache_gc_work);
127         }
128 }
129
130 static void __flow_cache_shrink(struct flow_cache *fc,
131                                 struct flow_cache_percpu *fcp,
132                                 int shrink_to)
133 {
134         struct flow_cache_entry *fle;
135         struct hlist_node *entry, *tmp;
136         LIST_HEAD(gc_list);
137         int i, deleted = 0;
138
139         for (i = 0; i < flow_cache_hash_size(fc); i++) {
140                 int saved = 0;
141
142                 hlist_for_each_entry_safe(fle, entry, tmp,
143                                           &fcp->hash_table[i], u.hlist) {
144                         if (saved < shrink_to &&
145                             flow_entry_valid(fle)) {
146                                 saved++;
147                         } else {
148                                 deleted++;
149                                 hlist_del(&fle->u.hlist);
150                                 list_add_tail(&fle->u.gc_list, &gc_list);
151                         }
152                 }
153         }
154
155         flow_cache_queue_garbage(fcp, deleted, &gc_list);
156 }
157
158 static void flow_cache_shrink(struct flow_cache *fc,
159                               struct flow_cache_percpu *fcp)
160 {
161         int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
162
163         __flow_cache_shrink(fc, fcp, shrink_to);
164 }
165
166 static void flow_new_hash_rnd(struct flow_cache *fc,
167                               struct flow_cache_percpu *fcp)
168 {
169         get_random_bytes(&fcp->hash_rnd, sizeof(u32));
170         fcp->hash_rnd_recalc = 0;
171         __flow_cache_shrink(fc, fcp, 0);
172 }
173
174 static u32 flow_hash_code(struct flow_cache *fc,
175                           struct flow_cache_percpu *fcp,
176                           struct flowi *key)
177 {
178         u32 *k = (u32 *) key;
179
180         return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
181                 & (flow_cache_hash_size(fc) - 1));
182 }
183
184 #if (BITS_PER_LONG == 64)
185 typedef u64 flow_compare_t;
186 #else
187 typedef u32 flow_compare_t;
188 #endif
189
190 /* I hear what you're saying, use memcmp.  But memcmp cannot make
191  * important assumptions that we can here, such as alignment and
192  * constant size.
193  */
194 static int flow_key_compare(struct flowi *key1, struct flowi *key2)
195 {
196         flow_compare_t *k1, *k1_lim, *k2;
197         const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
198
199         BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
200
201         k1 = (flow_compare_t *) key1;
202         k1_lim = k1 + n_elem;
203
204         k2 = (flow_compare_t *) key2;
205
206         do {
207                 if (*k1++ != *k2++)
208                         return 1;
209         } while (k1 < k1_lim);
210
211         return 0;
212 }
213
214 struct flow_cache_object *
215 flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
216                   flow_resolve_t resolver, void *ctx)
217 {
218         struct flow_cache *fc = &flow_cache_global;
219         struct flow_cache_percpu *fcp;
220         struct flow_cache_entry *fle, *tfle;
221         struct hlist_node *entry;
222         struct flow_cache_object *flo;
223         unsigned int hash;
224
225         local_bh_disable();
226         fcp = this_cpu_ptr(fc->percpu);
227
228         fle = NULL;
229         flo = NULL;
230         /* Packet really early in init?  Making flow_cache_init a
231          * pre-smp initcall would solve this.  --RR */
232         if (!fcp->hash_table)
233                 goto nocache;
234
235         if (fcp->hash_rnd_recalc)
236                 flow_new_hash_rnd(fc, fcp);
237
238         hash = flow_hash_code(fc, fcp, key);
239         hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
240                 if (tfle->family == family &&
241                     tfle->dir == dir &&
242                     flow_key_compare(key, &tfle->key) == 0) {
243                         fle = tfle;
244                         break;
245                 }
246         }
247
248         if (unlikely(!fle)) {
249                 if (fcp->hash_count > fc->high_watermark)
250                         flow_cache_shrink(fc, fcp);
251
252                 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
253                 if (fle) {
254                         fle->family = family;
255                         fle->dir = dir;
256                         memcpy(&fle->key, key, sizeof(*key));
257                         fle->object = NULL;
258                         hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
259                         fcp->hash_count++;
260                 }
261         } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
262                 flo = fle->object;
263                 if (!flo)
264                         goto ret_object;
265                 flo = flo->ops->get(flo);
266                 if (flo)
267                         goto ret_object;
268         } else if (fle->object) {
269                 flo = fle->object;
270                 flo->ops->delete(flo);
271                 fle->object = NULL;
272         }
273
274 nocache:
275         flo = NULL;
276         if (fle) {
277                 flo = fle->object;
278                 fle->object = NULL;
279         }
280         flo = resolver(net, key, family, dir, flo, ctx);
281         if (fle) {
282                 fle->genid = atomic_read(&flow_cache_genid);
283                 if (!IS_ERR(flo))
284                         fle->object = flo;
285                 else
286                         fle->genid--;
287         } else {
288                 if (flo && !IS_ERR(flo))
289                         flo->ops->delete(flo);
290         }
291 ret_object:
292         local_bh_enable();
293         return flo;
294 }
295 EXPORT_SYMBOL(flow_cache_lookup);
296
297 static void flow_cache_flush_tasklet(unsigned long data)
298 {
299         struct flow_flush_info *info = (void *)data;
300         struct flow_cache *fc = info->cache;
301         struct flow_cache_percpu *fcp;
302         struct flow_cache_entry *fle;
303         struct hlist_node *entry, *tmp;
304         LIST_HEAD(gc_list);
305         int i, deleted = 0;
306
307         fcp = this_cpu_ptr(fc->percpu);
308         for (i = 0; i < flow_cache_hash_size(fc); i++) {
309                 hlist_for_each_entry_safe(fle, entry, tmp,
310                                           &fcp->hash_table[i], u.hlist) {
311                         if (flow_entry_valid(fle))
312                                 continue;
313
314                         deleted++;
315                         hlist_del(&fle->u.hlist);
316                         list_add_tail(&fle->u.gc_list, &gc_list);
317                 }
318         }
319
320         flow_cache_queue_garbage(fcp, deleted, &gc_list);
321
322         if (atomic_dec_and_test(&info->cpuleft))
323                 complete(&info->completion);
324 }
325
326 static void flow_cache_flush_per_cpu(void *data)
327 {
328         struct flow_flush_info *info = data;
329         int cpu;
330         struct tasklet_struct *tasklet;
331
332         cpu = smp_processor_id();
333         tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
334         tasklet->data = (unsigned long)info;
335         tasklet_schedule(tasklet);
336 }
337
338 void flow_cache_flush(void)
339 {
340         struct flow_flush_info info;
341         static DEFINE_MUTEX(flow_flush_sem);
342
343         /* Don't want cpus going down or up during this. */
344         get_online_cpus();
345         mutex_lock(&flow_flush_sem);
346         info.cache = &flow_cache_global;
347         atomic_set(&info.cpuleft, num_online_cpus());
348         init_completion(&info.completion);
349
350         local_bh_disable();
351         smp_call_function(flow_cache_flush_per_cpu, &info, 0);
352         flow_cache_flush_tasklet((unsigned long)&info);
353         local_bh_enable();
354
355         wait_for_completion(&info.completion);
356         mutex_unlock(&flow_flush_sem);
357         put_online_cpus();
358 }
359
360 static void __init flow_cache_cpu_prepare(struct flow_cache *fc,
361                                           struct flow_cache_percpu *fcp)
362 {
363         fcp->hash_table = (struct hlist_head *)
364                 __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order);
365         if (!fcp->hash_table)
366                 panic("NET: failed to allocate flow cache order %lu\n", fc->order);
367
368         fcp->hash_rnd_recalc = 1;
369         fcp->hash_count = 0;
370         tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
371 }
372
373 static int flow_cache_cpu(struct notifier_block *nfb,
374                           unsigned long action,
375                           void *hcpu)
376 {
377         struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
378         int cpu = (unsigned long) hcpu;
379         struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
380
381         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
382                 __flow_cache_shrink(fc, fcp, 0);
383         return NOTIFY_OK;
384 }
385
386 static int flow_cache_init(struct flow_cache *fc)
387 {
388         unsigned long order;
389         int i;
390
391         fc->hash_shift = 10;
392         fc->low_watermark = 2 * flow_cache_hash_size(fc);
393         fc->high_watermark = 4 * flow_cache_hash_size(fc);
394
395         for (order = 0;
396              (PAGE_SIZE << order) <
397                      (sizeof(struct hlist_head)*flow_cache_hash_size(fc));
398              order++)
399                 /* NOTHING */;
400         fc->order = order;
401         fc->percpu = alloc_percpu(struct flow_cache_percpu);
402
403         setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
404                     (unsigned long) fc);
405         fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
406         add_timer(&fc->rnd_timer);
407
408         for_each_possible_cpu(i)
409                 flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i));
410
411         fc->hotcpu_notifier = (struct notifier_block){
412                 .notifier_call = flow_cache_cpu,
413         };
414         register_hotcpu_notifier(&fc->hotcpu_notifier);
415
416         return 0;
417 }
418
419 static int __init flow_cache_init_global(void)
420 {
421         flow_cachep = kmem_cache_create("flow_cache",
422                                         sizeof(struct flow_cache_entry),
423                                         0, SLAB_PANIC, NULL);
424
425         return flow_cache_init(&flow_cache_global);
426 }
427
428 module_init(flow_cache_init_global);