2 * Copyright (c) 2008 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/spinlock.h>
14 #include <linux/string.h>
15 #include <net/mac80211.h>
16 #include "ieee80211_i.h"
19 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
20 #define INIT_PATHS_SIZE_ORDER 2
22 /* Keep the mean chain length below this constant */
23 #define MEAN_CHAIN_LEN 2
25 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
26 time_after(jiffies, mpath->exp_time) && \
27 !(mpath->flags & MESH_PATH_FIXED))
30 struct hlist_node list;
32 /* This indirection allows two different tables to point to the same
33 * mesh_path structure, useful when resizing
35 struct mesh_path *mpath;
38 static struct mesh_table *mesh_paths;
39 static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
41 int mesh_paths_generation;
43 /* This lock will have the grow table function as writer and add / delete nodes
44 * as readers. When reading the table (i.e. doing lookups) we are well protected
47 static DEFINE_RWLOCK(pathtbl_resize_lock);
51 * mesh_path_assign_nexthop - update mesh path next hop
53 * @mpath: mesh path to update
54 * @sta: next hop to assign
56 * Locking: mpath->state_lock must be held when calling this function
58 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
61 struct ieee80211_hdr *hdr;
62 struct sk_buff_head tmpq;
65 rcu_assign_pointer(mpath->next_hop, sta);
67 __skb_queue_head_init(&tmpq);
69 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
71 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
72 hdr = (struct ieee80211_hdr *) skb->data;
73 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
74 __skb_queue_tail(&tmpq, skb);
77 skb_queue_splice(&tmpq, &mpath->frame_queue);
78 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
83 * mesh_path_lookup - look up a path in the mesh path table
84 * @dst: hardware address (ETH_ALEN length) of destination
87 * Returns: pointer to the mesh path structure, or NULL if not found
89 * Locking: must be called within a read rcu section.
91 struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
93 struct mesh_path *mpath;
95 struct hlist_head *bucket;
96 struct mesh_table *tbl;
97 struct mpath_node *node;
99 tbl = rcu_dereference(mesh_paths);
101 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
102 hlist_for_each_entry_rcu(node, n, bucket, list) {
104 if (mpath->sdata == sdata &&
105 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
106 if (MPATH_EXPIRED(mpath)) {
107 spin_lock_bh(&mpath->state_lock);
108 if (MPATH_EXPIRED(mpath))
109 mpath->flags &= ~MESH_PATH_ACTIVE;
110 spin_unlock_bh(&mpath->state_lock);
118 struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
120 struct mesh_path *mpath;
121 struct hlist_node *n;
122 struct hlist_head *bucket;
123 struct mesh_table *tbl;
124 struct mpath_node *node;
126 tbl = rcu_dereference(mpp_paths);
128 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
129 hlist_for_each_entry_rcu(node, n, bucket, list) {
131 if (mpath->sdata == sdata &&
132 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
133 if (MPATH_EXPIRED(mpath)) {
134 spin_lock_bh(&mpath->state_lock);
135 if (MPATH_EXPIRED(mpath))
136 mpath->flags &= ~MESH_PATH_ACTIVE;
137 spin_unlock_bh(&mpath->state_lock);
147 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
149 * @sdata: local subif, or NULL for all entries
151 * Returns: pointer to the mesh path structure, or NULL if not found.
153 * Locking: must be called within a read rcu section.
155 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
157 struct mpath_node *node;
158 struct hlist_node *p;
162 for_each_mesh_entry(mesh_paths, p, node, i) {
163 if (sdata && node->mpath->sdata != sdata)
166 if (MPATH_EXPIRED(node->mpath)) {
167 spin_lock_bh(&node->mpath->state_lock);
168 if (MPATH_EXPIRED(node->mpath))
169 node->mpath->flags &= ~MESH_PATH_ACTIVE;
170 spin_unlock_bh(&node->mpath->state_lock);
180 * mesh_path_add - allocate and add a new path to the mesh path table
181 * @addr: destination address of the path (ETH_ALEN length)
182 * @sdata: local subif
184 * Returns: 0 on sucess
186 * State: the initial state of the new path is set to 0
188 int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
190 struct mesh_path *mpath, *new_mpath;
191 struct mpath_node *node, *new_node;
192 struct hlist_head *bucket;
193 struct hlist_node *n;
200 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
201 /* never add ourselves as neighbours */
204 if (is_multicast_ether_addr(dst))
207 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
211 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
215 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
219 read_lock(&pathtbl_resize_lock);
220 memcpy(new_mpath->dst, dst, ETH_ALEN);
221 new_mpath->sdata = sdata;
222 new_mpath->flags = 0;
223 skb_queue_head_init(&new_mpath->frame_queue);
224 new_node->mpath = new_mpath;
225 new_mpath->timer.data = (unsigned long) new_mpath;
226 new_mpath->timer.function = mesh_path_timer;
227 new_mpath->exp_time = jiffies;
228 spin_lock_init(&new_mpath->state_lock);
229 init_timer(&new_mpath->timer);
231 hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
232 bucket = &mesh_paths->hash_buckets[hash_idx];
234 spin_lock(&mesh_paths->hashwlock[hash_idx]);
237 hlist_for_each_entry(node, n, bucket, list) {
239 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
243 hlist_add_head_rcu(&new_node->list, bucket);
244 if (atomic_inc_return(&mesh_paths->entries) >=
245 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
248 mesh_paths_generation++;
250 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
251 read_unlock(&pathtbl_resize_lock);
253 struct mesh_table *oldtbl, *newtbl;
255 write_lock(&pathtbl_resize_lock);
257 newtbl = mesh_table_grow(mesh_paths);
259 write_unlock(&pathtbl_resize_lock);
262 rcu_assign_pointer(mesh_paths, newtbl);
263 write_unlock(&pathtbl_resize_lock);
266 mesh_table_free(oldtbl, false);
271 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
272 read_unlock(&pathtbl_resize_lock);
277 atomic_dec(&sdata->u.mesh.mpaths);
282 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
284 struct mesh_path *mpath, *new_mpath;
285 struct mpath_node *node, *new_node;
286 struct hlist_head *bucket;
287 struct hlist_node *n;
294 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
295 /* never add ourselves as neighbours */
298 if (is_multicast_ether_addr(dst))
302 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
306 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
310 read_lock(&pathtbl_resize_lock);
311 memcpy(new_mpath->dst, dst, ETH_ALEN);
312 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
313 new_mpath->sdata = sdata;
314 new_mpath->flags = 0;
315 skb_queue_head_init(&new_mpath->frame_queue);
316 new_node->mpath = new_mpath;
317 new_mpath->exp_time = jiffies;
318 spin_lock_init(&new_mpath->state_lock);
320 hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
321 bucket = &mpp_paths->hash_buckets[hash_idx];
323 spin_lock(&mpp_paths->hashwlock[hash_idx]);
326 hlist_for_each_entry(node, n, bucket, list) {
328 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
332 hlist_add_head_rcu(&new_node->list, bucket);
333 if (atomic_inc_return(&mpp_paths->entries) >=
334 mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
337 spin_unlock(&mpp_paths->hashwlock[hash_idx]);
338 read_unlock(&pathtbl_resize_lock);
340 struct mesh_table *oldtbl, *newtbl;
342 write_lock(&pathtbl_resize_lock);
344 newtbl = mesh_table_grow(mpp_paths);
346 write_unlock(&pathtbl_resize_lock);
349 rcu_assign_pointer(mpp_paths, newtbl);
350 write_unlock(&pathtbl_resize_lock);
353 mesh_table_free(oldtbl, false);
358 spin_unlock(&mpp_paths->hashwlock[hash_idx]);
359 read_unlock(&pathtbl_resize_lock);
369 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
371 * @sta: broken peer link
373 * This function must be called from the rate control algorithm if enough
374 * delivery errors suggest that a peer link is no longer usable.
376 void mesh_plink_broken(struct sta_info *sta)
378 struct mesh_path *mpath;
379 struct mpath_node *node;
380 struct hlist_node *p;
381 struct ieee80211_sub_if_data *sdata = sta->sdata;
385 for_each_mesh_entry(mesh_paths, p, node, i) {
387 spin_lock_bh(&mpath->state_lock);
388 if (mpath->next_hop == sta &&
389 mpath->flags & MESH_PATH_ACTIVE &&
390 !(mpath->flags & MESH_PATH_FIXED)) {
391 mpath->flags &= ~MESH_PATH_ACTIVE;
393 spin_unlock_bh(&mpath->state_lock);
394 mesh_path_error_tx(mpath->dst,
395 cpu_to_le32(mpath->dsn),
396 sdata->dev->broadcast, sdata);
398 spin_unlock_bh(&mpath->state_lock);
404 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
406 * @sta - mesh peer to match
408 * RCU notes: this function is called when a mesh plink transitions from
409 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
410 * allows path creation. This will happen before the sta can be freed (because
411 * sta_info_destroy() calls this) so any reader in a rcu read block will be
412 * protected against the plink disappearing.
414 void mesh_path_flush_by_nexthop(struct sta_info *sta)
416 struct mesh_path *mpath;
417 struct mpath_node *node;
418 struct hlist_node *p;
421 for_each_mesh_entry(mesh_paths, p, node, i) {
423 if (mpath->next_hop == sta)
424 mesh_path_del(mpath->dst, mpath->sdata);
428 void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
430 struct mesh_path *mpath;
431 struct mpath_node *node;
432 struct hlist_node *p;
435 for_each_mesh_entry(mesh_paths, p, node, i) {
437 if (mpath->sdata == sdata)
438 mesh_path_del(mpath->dst, mpath->sdata);
442 static void mesh_path_node_reclaim(struct rcu_head *rp)
444 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
445 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
447 del_timer_sync(&node->mpath->timer);
448 atomic_dec(&sdata->u.mesh.mpaths);
454 * mesh_path_del - delete a mesh path from the table
456 * @addr: dst address (ETH_ALEN length)
457 * @sdata: local subif
459 * Returns: 0 if succesful
461 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
463 struct mesh_path *mpath;
464 struct mpath_node *node;
465 struct hlist_head *bucket;
466 struct hlist_node *n;
470 read_lock(&pathtbl_resize_lock);
471 hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
472 bucket = &mesh_paths->hash_buckets[hash_idx];
474 spin_lock(&mesh_paths->hashwlock[hash_idx]);
475 hlist_for_each_entry(node, n, bucket, list) {
477 if (mpath->sdata == sdata &&
478 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
479 spin_lock_bh(&mpath->state_lock);
480 mpath->flags |= MESH_PATH_RESOLVING;
481 hlist_del_rcu(&node->list);
482 call_rcu(&node->rcu, mesh_path_node_reclaim);
483 atomic_dec(&mesh_paths->entries);
484 spin_unlock_bh(&mpath->state_lock);
491 mesh_paths_generation++;
492 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
493 read_unlock(&pathtbl_resize_lock);
498 * mesh_path_tx_pending - sends pending frames in a mesh path queue
500 * @mpath: mesh path to activate
502 * Locking: the state_lock of the mpath structure must NOT be held when calling
505 void mesh_path_tx_pending(struct mesh_path *mpath)
507 if (mpath->flags & MESH_PATH_ACTIVE)
508 ieee80211_add_pending_skbs(mpath->sdata->local,
509 &mpath->frame_queue);
513 * mesh_path_discard_frame - discard a frame whose path could not be resolved
515 * @skb: frame to discard
516 * @sdata: network subif the frame was to be sent through
518 * If the frame was being forwarded from another MP, a PERR frame will be sent
519 * to the precursor. The precursor's address (i.e. the previous hop) was saved
520 * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
521 * the destination is successfully resolved.
523 * Locking: the function must me called within a rcu_read_lock region
525 void mesh_path_discard_frame(struct sk_buff *skb,
526 struct ieee80211_sub_if_data *sdata)
528 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
529 struct mesh_path *mpath;
532 if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) {
537 mpath = mesh_path_lookup(da, sdata);
540 mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, sdata);
544 sdata->u.mesh.mshstats.dropped_frames_no_route++;
548 * mesh_path_flush_pending - free the pending queue of a mesh path
550 * @mpath: mesh path whose queue has to be freed
552 * Locking: the function must me called withing a rcu_read_lock region
554 void mesh_path_flush_pending(struct mesh_path *mpath)
558 while ((skb = skb_dequeue(&mpath->frame_queue)) &&
559 (mpath->flags & MESH_PATH_ACTIVE))
560 mesh_path_discard_frame(skb, mpath->sdata);
564 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
566 * @mpath: the mesh path to modify
567 * @next_hop: the next hop to force
569 * Locking: this function must be called holding mpath->state_lock
571 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
573 spin_lock_bh(&mpath->state_lock);
574 mesh_path_assign_nexthop(mpath, next_hop);
577 mpath->hop_count = 0;
579 mpath->flags |= MESH_PATH_FIXED;
580 mesh_path_activate(mpath);
581 spin_unlock_bh(&mpath->state_lock);
582 mesh_path_tx_pending(mpath);
585 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
587 struct mesh_path *mpath;
588 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
596 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
598 struct mesh_path *mpath;
599 struct mpath_node *node, *new_node;
602 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
603 if (new_node == NULL)
606 node = hlist_entry(p, struct mpath_node, list);
608 new_node->mpath = mpath;
609 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
610 hlist_add_head(&new_node->list,
611 &newtbl->hash_buckets[hash_idx]);
615 int mesh_pathtbl_init(void)
617 mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
620 mesh_paths->free_node = &mesh_path_node_free;
621 mesh_paths->copy_node = &mesh_path_node_copy;
622 mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
624 mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
626 mesh_table_free(mesh_paths, true);
629 mpp_paths->free_node = &mesh_path_node_free;
630 mpp_paths->copy_node = &mesh_path_node_copy;
631 mpp_paths->mean_chain_len = MEAN_CHAIN_LEN;
636 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
638 struct mesh_path *mpath;
639 struct mpath_node *node;
640 struct hlist_node *p;
643 read_lock(&pathtbl_resize_lock);
644 for_each_mesh_entry(mesh_paths, p, node, i) {
645 if (node->mpath->sdata != sdata)
648 spin_lock_bh(&mpath->state_lock);
649 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
650 (!(mpath->flags & MESH_PATH_FIXED)) &&
652 mpath->exp_time + MESH_PATH_EXPIRE)) {
653 spin_unlock_bh(&mpath->state_lock);
654 mesh_path_del(mpath->dst, mpath->sdata);
656 spin_unlock_bh(&mpath->state_lock);
658 read_unlock(&pathtbl_resize_lock);
661 void mesh_pathtbl_unregister(void)
663 mesh_table_free(mesh_paths, true);
664 mesh_table_free(mpp_paths, true);