Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6
[pandora-kernel.git] / net / mac80211 / mesh_pathtbl.c
1 /*
2  * Copyright (c) 2008, 2009 open80211s Ltd.
3  * Author:     Luis Carlos Cobo <luisca@cozybit.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  */
9
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "ieee80211_i.h"
18 #include "mesh.h"
19
20 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
21 #define INIT_PATHS_SIZE_ORDER   2
22
23 /* Keep the mean chain length below this constant */
24 #define MEAN_CHAIN_LEN          2
25
26 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
27                                 time_after(jiffies, mpath->exp_time) && \
28                                 !(mpath->flags & MESH_PATH_FIXED))
29
30 struct mpath_node {
31         struct hlist_node list;
32         struct rcu_head rcu;
33         /* This indirection allows two different tables to point to the same
34          * mesh_path structure, useful when resizing
35          */
36         struct mesh_path *mpath;
37 };
38
39 static struct mesh_table __rcu *mesh_paths;
40 static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
41
42 int mesh_paths_generation;
43
44 /* This lock will have the grow table function as writer and add / delete nodes
45  * as readers. When reading the table (i.e. doing lookups) we are well protected
46  * by RCU
47  */
48 static DEFINE_RWLOCK(pathtbl_resize_lock);
49
50
51 static inline struct mesh_table *resize_dereference_mesh_paths(void)
52 {
53         return rcu_dereference_protected(mesh_paths,
54                 lockdep_is_held(&pathtbl_resize_lock));
55 }
56
57 static inline struct mesh_table *resize_dereference_mpp_paths(void)
58 {
59         return rcu_dereference_protected(mpp_paths,
60                 lockdep_is_held(&pathtbl_resize_lock));
61 }
62
63 /*
64  * CAREFUL -- "tbl" must not be an expression,
65  * in particular not an rcu_dereference(), since
66  * it's used twice. So it is illegal to do
67  *      for_each_mesh_entry(rcu_dereference(...), ...)
68  */
69 #define for_each_mesh_entry(tbl, p, node, i) \
70         for (i = 0; i <= tbl->hash_mask; i++) \
71                 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
72
73
74 static struct mesh_table *mesh_table_alloc(int size_order)
75 {
76         int i;
77         struct mesh_table *newtbl;
78
79         newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
80         if (!newtbl)
81                 return NULL;
82
83         newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
84                         (1 << size_order), GFP_ATOMIC);
85
86         if (!newtbl->hash_buckets) {
87                 kfree(newtbl);
88                 return NULL;
89         }
90
91         newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
92                         (1 << size_order), GFP_ATOMIC);
93         if (!newtbl->hashwlock) {
94                 kfree(newtbl->hash_buckets);
95                 kfree(newtbl);
96                 return NULL;
97         }
98
99         newtbl->size_order = size_order;
100         newtbl->hash_mask = (1 << size_order) - 1;
101         atomic_set(&newtbl->entries,  0);
102         get_random_bytes(&newtbl->hash_rnd,
103                         sizeof(newtbl->hash_rnd));
104         for (i = 0; i <= newtbl->hash_mask; i++)
105                 spin_lock_init(&newtbl->hashwlock[i]);
106
107         return newtbl;
108 }
109
110 static void __mesh_table_free(struct mesh_table *tbl)
111 {
112         kfree(tbl->hash_buckets);
113         kfree(tbl->hashwlock);
114         kfree(tbl);
115 }
116
117 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
118 {
119         struct hlist_head *mesh_hash;
120         struct hlist_node *p, *q;
121         int i;
122
123         mesh_hash = tbl->hash_buckets;
124         for (i = 0; i <= tbl->hash_mask; i++) {
125                 spin_lock_bh(&tbl->hashwlock[i]);
126                 hlist_for_each_safe(p, q, &mesh_hash[i]) {
127                         tbl->free_node(p, free_leafs);
128                         atomic_dec(&tbl->entries);
129                 }
130                 spin_unlock_bh(&tbl->hashwlock[i]);
131         }
132         __mesh_table_free(tbl);
133 }
134
135 static int mesh_table_grow(struct mesh_table *oldtbl,
136                            struct mesh_table *newtbl)
137 {
138         struct hlist_head *oldhash;
139         struct hlist_node *p, *q;
140         int i;
141
142         if (atomic_read(&oldtbl->entries)
143                         < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
144                 return -EAGAIN;
145
146         newtbl->free_node = oldtbl->free_node;
147         newtbl->mean_chain_len = oldtbl->mean_chain_len;
148         newtbl->copy_node = oldtbl->copy_node;
149         atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
150
151         oldhash = oldtbl->hash_buckets;
152         for (i = 0; i <= oldtbl->hash_mask; i++)
153                 hlist_for_each(p, &oldhash[i])
154                         if (oldtbl->copy_node(p, newtbl) < 0)
155                                 goto errcopy;
156
157         return 0;
158
159 errcopy:
160         for (i = 0; i <= newtbl->hash_mask; i++) {
161                 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
162                         oldtbl->free_node(p, 0);
163         }
164         return -ENOMEM;
165 }
166
167 static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
168                            struct mesh_table *tbl)
169 {
170         /* Use last four bytes of hw addr and interface index as hash index */
171         return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
172                 & tbl->hash_mask;
173 }
174
175
176 /**
177  *
178  * mesh_path_assign_nexthop - update mesh path next hop
179  *
180  * @mpath: mesh path to update
181  * @sta: next hop to assign
182  *
183  * Locking: mpath->state_lock must be held when calling this function
184  */
185 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
186 {
187         struct sk_buff *skb;
188         struct ieee80211_hdr *hdr;
189         struct sk_buff_head tmpq;
190         unsigned long flags;
191
192         rcu_assign_pointer(mpath->next_hop, sta);
193
194         __skb_queue_head_init(&tmpq);
195
196         spin_lock_irqsave(&mpath->frame_queue.lock, flags);
197
198         while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
199                 hdr = (struct ieee80211_hdr *) skb->data;
200                 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
201                 __skb_queue_tail(&tmpq, skb);
202         }
203
204         skb_queue_splice(&tmpq, &mpath->frame_queue);
205         spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
206 }
207
208
209 /**
210  * mesh_path_lookup - look up a path in the mesh path table
211  * @dst: hardware address (ETH_ALEN length) of destination
212  * @sdata: local subif
213  *
214  * Returns: pointer to the mesh path structure, or NULL if not found
215  *
216  * Locking: must be called within a read rcu section.
217  */
218 struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
219 {
220         struct mesh_path *mpath;
221         struct hlist_node *n;
222         struct hlist_head *bucket;
223         struct mesh_table *tbl;
224         struct mpath_node *node;
225
226         tbl = rcu_dereference(mesh_paths);
227
228         bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
229         hlist_for_each_entry_rcu(node, n, bucket, list) {
230                 mpath = node->mpath;
231                 if (mpath->sdata == sdata &&
232                                 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
233                         if (MPATH_EXPIRED(mpath)) {
234                                 spin_lock_bh(&mpath->state_lock);
235                                 if (MPATH_EXPIRED(mpath))
236                                         mpath->flags &= ~MESH_PATH_ACTIVE;
237                                 spin_unlock_bh(&mpath->state_lock);
238                         }
239                         return mpath;
240                 }
241         }
242         return NULL;
243 }
244
245 struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
246 {
247         struct mesh_path *mpath;
248         struct hlist_node *n;
249         struct hlist_head *bucket;
250         struct mesh_table *tbl;
251         struct mpath_node *node;
252
253         tbl = rcu_dereference(mpp_paths);
254
255         bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
256         hlist_for_each_entry_rcu(node, n, bucket, list) {
257                 mpath = node->mpath;
258                 if (mpath->sdata == sdata &&
259                     memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
260                         if (MPATH_EXPIRED(mpath)) {
261                                 spin_lock_bh(&mpath->state_lock);
262                                 if (MPATH_EXPIRED(mpath))
263                                         mpath->flags &= ~MESH_PATH_ACTIVE;
264                                 spin_unlock_bh(&mpath->state_lock);
265                         }
266                         return mpath;
267                 }
268         }
269         return NULL;
270 }
271
272
273 /**
274  * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
275  * @idx: index
276  * @sdata: local subif, or NULL for all entries
277  *
278  * Returns: pointer to the mesh path structure, or NULL if not found.
279  *
280  * Locking: must be called within a read rcu section.
281  */
282 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
283 {
284         struct mesh_table *tbl = rcu_dereference(mesh_paths);
285         struct mpath_node *node;
286         struct hlist_node *p;
287         int i;
288         int j = 0;
289
290         for_each_mesh_entry(tbl, p, node, i) {
291                 if (sdata && node->mpath->sdata != sdata)
292                         continue;
293                 if (j++ == idx) {
294                         if (MPATH_EXPIRED(node->mpath)) {
295                                 spin_lock_bh(&node->mpath->state_lock);
296                                 if (MPATH_EXPIRED(node->mpath))
297                                         node->mpath->flags &= ~MESH_PATH_ACTIVE;
298                                 spin_unlock_bh(&node->mpath->state_lock);
299                         }
300                         return node->mpath;
301                 }
302         }
303
304         return NULL;
305 }
306
307 /**
308  * mesh_path_add - allocate and add a new path to the mesh path table
309  * @addr: destination address of the path (ETH_ALEN length)
310  * @sdata: local subif
311  *
312  * Returns: 0 on success
313  *
314  * State: the initial state of the new path is set to 0
315  */
316 int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
317 {
318         struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
319         struct ieee80211_local *local = sdata->local;
320         struct mesh_table *tbl;
321         struct mesh_path *mpath, *new_mpath;
322         struct mpath_node *node, *new_node;
323         struct hlist_head *bucket;
324         struct hlist_node *n;
325         int grow = 0;
326         int err = 0;
327         u32 hash_idx;
328
329         if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
330                 /* never add ourselves as neighbours */
331                 return -ENOTSUPP;
332
333         if (is_multicast_ether_addr(dst))
334                 return -ENOTSUPP;
335
336         if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
337                 return -ENOSPC;
338
339         err = -ENOMEM;
340         new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
341         if (!new_mpath)
342                 goto err_path_alloc;
343
344         new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
345         if (!new_node)
346                 goto err_node_alloc;
347
348         read_lock_bh(&pathtbl_resize_lock);
349         memcpy(new_mpath->dst, dst, ETH_ALEN);
350         new_mpath->sdata = sdata;
351         new_mpath->flags = 0;
352         skb_queue_head_init(&new_mpath->frame_queue);
353         new_node->mpath = new_mpath;
354         new_mpath->timer.data = (unsigned long) new_mpath;
355         new_mpath->timer.function = mesh_path_timer;
356         new_mpath->exp_time = jiffies;
357         spin_lock_init(&new_mpath->state_lock);
358         init_timer(&new_mpath->timer);
359
360         tbl = resize_dereference_mesh_paths();
361
362         hash_idx = mesh_table_hash(dst, sdata, tbl);
363         bucket = &tbl->hash_buckets[hash_idx];
364
365         spin_lock_bh(&tbl->hashwlock[hash_idx]);
366
367         err = -EEXIST;
368         hlist_for_each_entry(node, n, bucket, list) {
369                 mpath = node->mpath;
370                 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
371                         goto err_exists;
372         }
373
374         hlist_add_head_rcu(&new_node->list, bucket);
375         if (atomic_inc_return(&tbl->entries) >=
376             tbl->mean_chain_len * (tbl->hash_mask + 1))
377                 grow = 1;
378
379         mesh_paths_generation++;
380
381         spin_unlock_bh(&tbl->hashwlock[hash_idx]);
382         read_unlock_bh(&pathtbl_resize_lock);
383         if (grow) {
384                 set_bit(MESH_WORK_GROW_MPATH_TABLE,  &ifmsh->wrkq_flags);
385                 ieee80211_queue_work(&local->hw, &sdata->work);
386         }
387         return 0;
388
389 err_exists:
390         spin_unlock_bh(&tbl->hashwlock[hash_idx]);
391         read_unlock_bh(&pathtbl_resize_lock);
392         kfree(new_node);
393 err_node_alloc:
394         kfree(new_mpath);
395 err_path_alloc:
396         atomic_dec(&sdata->u.mesh.mpaths);
397         return err;
398 }
399
400 static void mesh_table_free_rcu(struct rcu_head *rcu)
401 {
402         struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
403
404         mesh_table_free(tbl, false);
405 }
406
407 void mesh_mpath_table_grow(void)
408 {
409         struct mesh_table *oldtbl, *newtbl;
410
411         write_lock_bh(&pathtbl_resize_lock);
412         oldtbl = resize_dereference_mesh_paths();
413         newtbl = mesh_table_alloc(oldtbl->size_order + 1);
414         if (!newtbl)
415                 goto out;
416         if (mesh_table_grow(oldtbl, newtbl) < 0) {
417                 __mesh_table_free(newtbl);
418                 goto out;
419         }
420         rcu_assign_pointer(mesh_paths, newtbl);
421
422         call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
423
424  out:
425         write_unlock_bh(&pathtbl_resize_lock);
426 }
427
428 void mesh_mpp_table_grow(void)
429 {
430         struct mesh_table *oldtbl, *newtbl;
431
432         write_lock_bh(&pathtbl_resize_lock);
433         oldtbl = resize_dereference_mpp_paths();
434         newtbl = mesh_table_alloc(oldtbl->size_order + 1);
435         if (!newtbl)
436                 goto out;
437         if (mesh_table_grow(oldtbl, newtbl) < 0) {
438                 __mesh_table_free(newtbl);
439                 goto out;
440         }
441         rcu_assign_pointer(mpp_paths, newtbl);
442         call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
443
444  out:
445         write_unlock_bh(&pathtbl_resize_lock);
446 }
447
448 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
449 {
450         struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
451         struct ieee80211_local *local = sdata->local;
452         struct mesh_table *tbl;
453         struct mesh_path *mpath, *new_mpath;
454         struct mpath_node *node, *new_node;
455         struct hlist_head *bucket;
456         struct hlist_node *n;
457         int grow = 0;
458         int err = 0;
459         u32 hash_idx;
460
461         if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
462                 /* never add ourselves as neighbours */
463                 return -ENOTSUPP;
464
465         if (is_multicast_ether_addr(dst))
466                 return -ENOTSUPP;
467
468         err = -ENOMEM;
469         new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
470         if (!new_mpath)
471                 goto err_path_alloc;
472
473         new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
474         if (!new_node)
475                 goto err_node_alloc;
476
477         read_lock_bh(&pathtbl_resize_lock);
478         memcpy(new_mpath->dst, dst, ETH_ALEN);
479         memcpy(new_mpath->mpp, mpp, ETH_ALEN);
480         new_mpath->sdata = sdata;
481         new_mpath->flags = 0;
482         skb_queue_head_init(&new_mpath->frame_queue);
483         new_node->mpath = new_mpath;
484         new_mpath->exp_time = jiffies;
485         spin_lock_init(&new_mpath->state_lock);
486
487         tbl = resize_dereference_mpp_paths();
488
489         hash_idx = mesh_table_hash(dst, sdata, tbl);
490         bucket = &tbl->hash_buckets[hash_idx];
491
492         spin_lock_bh(&tbl->hashwlock[hash_idx]);
493
494         err = -EEXIST;
495         hlist_for_each_entry(node, n, bucket, list) {
496                 mpath = node->mpath;
497                 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
498                         goto err_exists;
499         }
500
501         hlist_add_head_rcu(&new_node->list, bucket);
502         if (atomic_inc_return(&tbl->entries) >=
503             tbl->mean_chain_len * (tbl->hash_mask + 1))
504                 grow = 1;
505
506         spin_unlock_bh(&tbl->hashwlock[hash_idx]);
507         read_unlock_bh(&pathtbl_resize_lock);
508         if (grow) {
509                 set_bit(MESH_WORK_GROW_MPP_TABLE,  &ifmsh->wrkq_flags);
510                 ieee80211_queue_work(&local->hw, &sdata->work);
511         }
512         return 0;
513
514 err_exists:
515         spin_unlock_bh(&tbl->hashwlock[hash_idx]);
516         read_unlock_bh(&pathtbl_resize_lock);
517         kfree(new_node);
518 err_node_alloc:
519         kfree(new_mpath);
520 err_path_alloc:
521         return err;
522 }
523
524
525 /**
526  * mesh_plink_broken - deactivates paths and sends perr when a link breaks
527  *
528  * @sta: broken peer link
529  *
530  * This function must be called from the rate control algorithm if enough
531  * delivery errors suggest that a peer link is no longer usable.
532  */
533 void mesh_plink_broken(struct sta_info *sta)
534 {
535         struct mesh_table *tbl;
536         static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
537         struct mesh_path *mpath;
538         struct mpath_node *node;
539         struct hlist_node *p;
540         struct ieee80211_sub_if_data *sdata = sta->sdata;
541         int i;
542
543         rcu_read_lock();
544         tbl = rcu_dereference(mesh_paths);
545         for_each_mesh_entry(tbl, p, node, i) {
546                 mpath = node->mpath;
547                 spin_lock_bh(&mpath->state_lock);
548                 if (rcu_dereference(mpath->next_hop) == sta &&
549                     mpath->flags & MESH_PATH_ACTIVE &&
550                     !(mpath->flags & MESH_PATH_FIXED)) {
551                         mpath->flags &= ~MESH_PATH_ACTIVE;
552                         ++mpath->sn;
553                         spin_unlock_bh(&mpath->state_lock);
554                         mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
555                                         mpath->dst, cpu_to_le32(mpath->sn),
556                                         cpu_to_le16(PERR_RCODE_DEST_UNREACH),
557                                         bcast, sdata);
558                 } else
559                 spin_unlock_bh(&mpath->state_lock);
560         }
561         rcu_read_unlock();
562 }
563
564 /**
565  * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
566  *
567  * @sta - mesh peer to match
568  *
569  * RCU notes: this function is called when a mesh plink transitions from
570  * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
571  * allows path creation. This will happen before the sta can be freed (because
572  * sta_info_destroy() calls this) so any reader in a rcu read block will be
573  * protected against the plink disappearing.
574  */
575 void mesh_path_flush_by_nexthop(struct sta_info *sta)
576 {
577         struct mesh_table *tbl;
578         struct mesh_path *mpath;
579         struct mpath_node *node;
580         struct hlist_node *p;
581         int i;
582
583         rcu_read_lock();
584         tbl = rcu_dereference(mesh_paths);
585         for_each_mesh_entry(tbl, p, node, i) {
586                 mpath = node->mpath;
587                 if (rcu_dereference(mpath->next_hop) == sta)
588                         mesh_path_del(mpath->dst, mpath->sdata);
589         }
590         rcu_read_unlock();
591 }
592
593 void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
594 {
595         struct mesh_table *tbl;
596         struct mesh_path *mpath;
597         struct mpath_node *node;
598         struct hlist_node *p;
599         int i;
600
601         rcu_read_lock();
602         tbl = rcu_dereference(mesh_paths);
603         for_each_mesh_entry(tbl, p, node, i) {
604                 mpath = node->mpath;
605                 if (mpath->sdata == sdata)
606                         mesh_path_del(mpath->dst, mpath->sdata);
607         }
608         rcu_read_unlock();
609 }
610
611 static void mesh_path_node_reclaim(struct rcu_head *rp)
612 {
613         struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
614         struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
615
616         del_timer_sync(&node->mpath->timer);
617         atomic_dec(&sdata->u.mesh.mpaths);
618         kfree(node->mpath);
619         kfree(node);
620 }
621
622 /**
623  * mesh_path_del - delete a mesh path from the table
624  *
625  * @addr: dst address (ETH_ALEN length)
626  * @sdata: local subif
627  *
628  * Returns: 0 if successful
629  */
630 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
631 {
632         struct mesh_table *tbl;
633         struct mesh_path *mpath;
634         struct mpath_node *node;
635         struct hlist_head *bucket;
636         struct hlist_node *n;
637         int hash_idx;
638         int err = 0;
639
640         read_lock_bh(&pathtbl_resize_lock);
641         tbl = resize_dereference_mesh_paths();
642         hash_idx = mesh_table_hash(addr, sdata, tbl);
643         bucket = &tbl->hash_buckets[hash_idx];
644
645         spin_lock_bh(&tbl->hashwlock[hash_idx]);
646         hlist_for_each_entry(node, n, bucket, list) {
647                 mpath = node->mpath;
648                 if (mpath->sdata == sdata &&
649                     memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
650                         spin_lock(&mpath->state_lock);
651                         mpath->flags |= MESH_PATH_RESOLVING;
652                         hlist_del_rcu(&node->list);
653                         call_rcu(&node->rcu, mesh_path_node_reclaim);
654                         atomic_dec(&tbl->entries);
655                         spin_unlock(&mpath->state_lock);
656                         goto enddel;
657                 }
658         }
659
660         err = -ENXIO;
661 enddel:
662         mesh_paths_generation++;
663         spin_unlock_bh(&tbl->hashwlock[hash_idx]);
664         read_unlock_bh(&pathtbl_resize_lock);
665         return err;
666 }
667
668 /**
669  * mesh_path_tx_pending - sends pending frames in a mesh path queue
670  *
671  * @mpath: mesh path to activate
672  *
673  * Locking: the state_lock of the mpath structure must NOT be held when calling
674  * this function.
675  */
676 void mesh_path_tx_pending(struct mesh_path *mpath)
677 {
678         if (mpath->flags & MESH_PATH_ACTIVE)
679                 ieee80211_add_pending_skbs(mpath->sdata->local,
680                                 &mpath->frame_queue);
681 }
682
683 /**
684  * mesh_path_discard_frame - discard a frame whose path could not be resolved
685  *
686  * @skb: frame to discard
687  * @sdata: network subif the frame was to be sent through
688  *
689  * If the frame was being forwarded from another MP, a PERR frame will be sent
690  * to the precursor.  The precursor's address (i.e. the previous hop) was saved
691  * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
692  * the destination is successfully resolved.
693  *
694  * Locking: the function must me called within a rcu_read_lock region
695  */
696 void mesh_path_discard_frame(struct sk_buff *skb,
697                              struct ieee80211_sub_if_data *sdata)
698 {
699         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
700         struct mesh_path *mpath;
701         u32 sn = 0;
702
703         if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
704                 u8 *ra, *da;
705
706                 da = hdr->addr3;
707                 ra = hdr->addr1;
708                 mpath = mesh_path_lookup(da, sdata);
709                 if (mpath)
710                         sn = ++mpath->sn;
711                 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
712                                    cpu_to_le32(sn),
713                                    cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata);
714         }
715
716         kfree_skb(skb);
717         sdata->u.mesh.mshstats.dropped_frames_no_route++;
718 }
719
720 /**
721  * mesh_path_flush_pending - free the pending queue of a mesh path
722  *
723  * @mpath: mesh path whose queue has to be freed
724  *
725  * Locking: the function must me called within a rcu_read_lock region
726  */
727 void mesh_path_flush_pending(struct mesh_path *mpath)
728 {
729         struct sk_buff *skb;
730
731         while ((skb = skb_dequeue(&mpath->frame_queue)) &&
732                         (mpath->flags & MESH_PATH_ACTIVE))
733                 mesh_path_discard_frame(skb, mpath->sdata);
734 }
735
736 /**
737  * mesh_path_fix_nexthop - force a specific next hop for a mesh path
738  *
739  * @mpath: the mesh path to modify
740  * @next_hop: the next hop to force
741  *
742  * Locking: this function must be called holding mpath->state_lock
743  */
744 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
745 {
746         spin_lock_bh(&mpath->state_lock);
747         mesh_path_assign_nexthop(mpath, next_hop);
748         mpath->sn = 0xffff;
749         mpath->metric = 0;
750         mpath->hop_count = 0;
751         mpath->exp_time = 0;
752         mpath->flags |= MESH_PATH_FIXED;
753         mesh_path_activate(mpath);
754         spin_unlock_bh(&mpath->state_lock);
755         mesh_path_tx_pending(mpath);
756 }
757
758 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
759 {
760         struct mesh_path *mpath;
761         struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
762         mpath = node->mpath;
763         hlist_del_rcu(p);
764         if (free_leafs) {
765                 del_timer_sync(&mpath->timer);
766                 kfree(mpath);
767         }
768         kfree(node);
769 }
770
771 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
772 {
773         struct mesh_path *mpath;
774         struct mpath_node *node, *new_node;
775         u32 hash_idx;
776
777         new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
778         if (new_node == NULL)
779                 return -ENOMEM;
780
781         node = hlist_entry(p, struct mpath_node, list);
782         mpath = node->mpath;
783         new_node->mpath = mpath;
784         hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
785         hlist_add_head(&new_node->list,
786                         &newtbl->hash_buckets[hash_idx]);
787         return 0;
788 }
789
790 int mesh_pathtbl_init(void)
791 {
792         struct mesh_table *tbl_path, *tbl_mpp;
793
794         tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
795         if (!tbl_path)
796                 return -ENOMEM;
797         tbl_path->free_node = &mesh_path_node_free;
798         tbl_path->copy_node = &mesh_path_node_copy;
799         tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
800
801         tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
802         if (!tbl_mpp) {
803                 mesh_table_free(tbl_path, true);
804                 return -ENOMEM;
805         }
806         tbl_mpp->free_node = &mesh_path_node_free;
807         tbl_mpp->copy_node = &mesh_path_node_copy;
808         tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
809
810         /* Need no locking since this is during init */
811         RCU_INIT_POINTER(mesh_paths, tbl_path);
812         RCU_INIT_POINTER(mpp_paths, tbl_mpp);
813
814         return 0;
815 }
816
817 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
818 {
819         struct mesh_table *tbl;
820         struct mesh_path *mpath;
821         struct mpath_node *node;
822         struct hlist_node *p;
823         int i;
824
825         rcu_read_lock();
826         tbl = rcu_dereference(mesh_paths);
827         for_each_mesh_entry(tbl, p, node, i) {
828                 if (node->mpath->sdata != sdata)
829                         continue;
830                 mpath = node->mpath;
831                 spin_lock_bh(&mpath->state_lock);
832                 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
833                     (!(mpath->flags & MESH_PATH_FIXED)) &&
834                      time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) {
835                         spin_unlock_bh(&mpath->state_lock);
836                         mesh_path_del(mpath->dst, mpath->sdata);
837                 } else
838                         spin_unlock_bh(&mpath->state_lock);
839         }
840         rcu_read_unlock();
841 }
842
843 void mesh_pathtbl_unregister(void)
844 {
845         /* no need for locking during exit path */
846         mesh_table_free(rcu_dereference_raw(mesh_paths), true);
847         mesh_table_free(rcu_dereference_raw(mpp_paths), true);
848 }