Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / net / mac80211 / mesh.c
1 /*
2  * Copyright (c) 2008 open80211s Ltd.
3  * Authors:    Luis Carlos Cobo <luisca@cozybit.com>
4  *             Javier Cardona <javier@cozybit.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #include <asm/unaligned.h>
12 #include "ieee80211_i.h"
13 #include "mesh.h"
14
15 #define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
16 #define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
17
18 #define PP_OFFSET       1               /* Path Selection Protocol */
19 #define PM_OFFSET       5               /* Path Selection Metric   */
20 #define CC_OFFSET       9               /* Congestion Control Mode */
21 #define CAPAB_OFFSET 17
22 #define ACCEPT_PLINKS 0x80
23
24 #define TMR_RUNNING_HK  0
25 #define TMR_RUNNING_MP  1
26
27 int mesh_allocated;
28 static struct kmem_cache *rm_cache;
29
30 void ieee80211s_init(void)
31 {
32         mesh_pathtbl_init();
33         mesh_allocated = 1;
34         rm_cache = kmem_cache_create("mesh_rmc", sizeof(struct rmc_entry),
35                                      0, 0, NULL);
36 }
37
38 void ieee80211s_stop(void)
39 {
40         mesh_pathtbl_unregister();
41         kmem_cache_destroy(rm_cache);
42 }
43
44 static void ieee80211_mesh_housekeeping_timer(unsigned long data)
45 {
46         struct ieee80211_sub_if_data *sdata = (void *) data;
47         struct ieee80211_local *local = sdata->local;
48         struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
49
50         ifmsh->housekeeping = true;
51
52         if (local->quiescing) {
53                 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
54                 return;
55         }
56
57         queue_work(local->hw.workqueue, &ifmsh->work);
58 }
59
60 /**
61  * mesh_matches_local - check if the config of a mesh point matches ours
62  *
63  * @ie: information elements of a management frame from the mesh peer
64  * @sdata: local mesh subif
65  *
66  * This function checks if the mesh configuration of a mesh point matches the
67  * local mesh configuration, i.e. if both nodes belong to the same mesh network.
68  */
69 bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata)
70 {
71         struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
72
73         /*
74          * As support for each feature is added, check for matching
75          * - On mesh config capabilities
76          *   - Power Save Support En
77          *   - Sync support enabled
78          *   - Sync support active
79          *   - Sync support required from peer
80          *   - MDA enabled
81          * - Power management control on fc
82          */
83         if (ifmsh->mesh_id_len == ie->mesh_id_len &&
84                 memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
85                 memcmp(ifmsh->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 &&
86                 memcmp(ifmsh->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 &&
87                 memcmp(ifmsh->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0)
88                 return true;
89
90         return false;
91 }
92
93 /**
94  * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links
95  *
96  * @ie: information elements of a management frame from the mesh peer
97  */
98 bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
99 {
100         return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0;
101 }
102
103 /**
104  * mesh_accept_plinks_update: update accepting_plink in local mesh beacons
105  *
106  * @sdata: mesh interface in which mesh beacons are going to be updated
107  */
108 void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
109 {
110         bool free_plinks;
111
112         /* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0,
113          * the mesh interface might be able to establish plinks with peers that
114          * are already on the table but are not on PLINK_ESTAB state. However,
115          * in general the mesh interface is not accepting peer link requests
116          * from new peers, and that must be reflected in the beacon
117          */
118         free_plinks = mesh_plink_availables(sdata);
119
120         if (free_plinks != sdata->u.mesh.accepting_plinks)
121                 ieee80211_mesh_housekeeping_timer((unsigned long) sdata);
122 }
123
124 void mesh_ids_set_default(struct ieee80211_if_mesh *sta)
125 {
126         u8 def_id[4] = {0x00, 0x0F, 0xAC, 0xff};
127
128         memcpy(sta->mesh_pp_id, def_id, 4);
129         memcpy(sta->mesh_pm_id, def_id, 4);
130         memcpy(sta->mesh_cc_id, def_id, 4);
131 }
132
133 int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
134 {
135         int i;
136
137         sdata->u.mesh.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL);
138         if (!sdata->u.mesh.rmc)
139                 return -ENOMEM;
140         sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1;
141         for (i = 0; i < RMC_BUCKETS; i++)
142                 INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i].list);
143         return 0;
144 }
145
146 void mesh_rmc_free(struct ieee80211_sub_if_data *sdata)
147 {
148         struct mesh_rmc *rmc = sdata->u.mesh.rmc;
149         struct rmc_entry *p, *n;
150         int i;
151
152         if (!sdata->u.mesh.rmc)
153                 return;
154
155         for (i = 0; i < RMC_BUCKETS; i++)
156                 list_for_each_entry_safe(p, n, &rmc->bucket[i].list, list) {
157                         list_del(&p->list);
158                         kmem_cache_free(rm_cache, p);
159                 }
160
161         kfree(rmc);
162         sdata->u.mesh.rmc = NULL;
163 }
164
165 /**
166  * mesh_rmc_check - Check frame in recent multicast cache and add if absent.
167  *
168  * @sa:         source address
169  * @mesh_hdr:   mesh_header
170  *
171  * Returns: 0 if the frame is not in the cache, nonzero otherwise.
172  *
173  * Checks using the source address and the mesh sequence number if we have
174  * received this frame lately. If the frame is not in the cache, it is added to
175  * it.
176  */
177 int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
178                    struct ieee80211_sub_if_data *sdata)
179 {
180         struct mesh_rmc *rmc = sdata->u.mesh.rmc;
181         u32 seqnum = 0;
182         int entries = 0;
183         u8 idx;
184         struct rmc_entry *p, *n;
185
186         /* Don't care about endianness since only match matters */
187         memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum));
188         idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask;
189         list_for_each_entry_safe(p, n, &rmc->bucket[idx].list, list) {
190                 ++entries;
191                 if (time_after(jiffies, p->exp_time) ||
192                                 (entries == RMC_QUEUE_MAX_LEN)) {
193                         list_del(&p->list);
194                         kmem_cache_free(rm_cache, p);
195                         --entries;
196                 } else if ((seqnum == p->seqnum)
197                                 && (memcmp(sa, p->sa, ETH_ALEN) == 0))
198                         return -1;
199         }
200
201         p = kmem_cache_alloc(rm_cache, GFP_ATOMIC);
202         if (!p) {
203                 printk(KERN_DEBUG "o11s: could not allocate RMC entry\n");
204                 return 0;
205         }
206         p->seqnum = seqnum;
207         p->exp_time = jiffies + RMC_TIMEOUT;
208         memcpy(p->sa, sa, ETH_ALEN);
209         list_add(&p->list, &rmc->bucket[idx].list);
210         return 0;
211 }
212
213 void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
214 {
215         struct ieee80211_local *local = sdata->local;
216         struct ieee80211_supported_band *sband;
217         u8 *pos;
218         int len, i, rate;
219
220         sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
221         len = sband->n_bitrates;
222         if (len > 8)
223                 len = 8;
224         pos = skb_put(skb, len + 2);
225         *pos++ = WLAN_EID_SUPP_RATES;
226         *pos++ = len;
227         for (i = 0; i < len; i++) {
228                 rate = sband->bitrates[i].bitrate;
229                 *pos++ = (u8) (rate / 5);
230         }
231
232         if (sband->n_bitrates > len) {
233                 pos = skb_put(skb, sband->n_bitrates - len + 2);
234                 *pos++ = WLAN_EID_EXT_SUPP_RATES;
235                 *pos++ = sband->n_bitrates - len;
236                 for (i = len; i < sband->n_bitrates; i++) {
237                         rate = sband->bitrates[i].bitrate;
238                         *pos++ = (u8) (rate / 5);
239                 }
240         }
241
242         pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len);
243         *pos++ = WLAN_EID_MESH_ID;
244         *pos++ = sdata->u.mesh.mesh_id_len;
245         if (sdata->u.mesh.mesh_id_len)
246                 memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len);
247
248         pos = skb_put(skb, 21);
249         *pos++ = WLAN_EID_MESH_CONFIG;
250         *pos++ = IEEE80211_MESH_CONFIG_LEN;
251         /* Version */
252         *pos++ = 1;
253
254         /* Active path selection protocol ID */
255         memcpy(pos, sdata->u.mesh.mesh_pp_id, 4);
256         pos += 4;
257
258         /* Active path selection metric ID   */
259         memcpy(pos, sdata->u.mesh.mesh_pm_id, 4);
260         pos += 4;
261
262         /* Congestion control mode identifier */
263         memcpy(pos, sdata->u.mesh.mesh_cc_id, 4);
264         pos += 4;
265
266         /* Channel precedence:
267          * Not running simple channel unification protocol
268          */
269         memset(pos, 0x00, 4);
270         pos += 4;
271
272         /* Mesh capability */
273         sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata);
274         *pos++ = sdata->u.mesh.accepting_plinks ? ACCEPT_PLINKS : 0x00;
275         *pos++ = 0x00;
276
277         return;
278 }
279
280 u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl)
281 {
282         /* Use last four bytes of hw addr and interface index as hash index */
283         return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
284                 & tbl->hash_mask;
285 }
286
287 struct mesh_table *mesh_table_alloc(int size_order)
288 {
289         int i;
290         struct mesh_table *newtbl;
291
292         newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL);
293         if (!newtbl)
294                 return NULL;
295
296         newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
297                         (1 << size_order), GFP_KERNEL);
298
299         if (!newtbl->hash_buckets) {
300                 kfree(newtbl);
301                 return NULL;
302         }
303
304         newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
305                         (1 << size_order), GFP_KERNEL);
306         if (!newtbl->hashwlock) {
307                 kfree(newtbl->hash_buckets);
308                 kfree(newtbl);
309                 return NULL;
310         }
311
312         newtbl->size_order = size_order;
313         newtbl->hash_mask = (1 << size_order) - 1;
314         atomic_set(&newtbl->entries,  0);
315         get_random_bytes(&newtbl->hash_rnd,
316                         sizeof(newtbl->hash_rnd));
317         for (i = 0; i <= newtbl->hash_mask; i++)
318                 spin_lock_init(&newtbl->hashwlock[i]);
319
320         return newtbl;
321 }
322
323 static void __mesh_table_free(struct mesh_table *tbl)
324 {
325         kfree(tbl->hash_buckets);
326         kfree(tbl->hashwlock);
327         kfree(tbl);
328 }
329
330 void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
331 {
332         struct hlist_head *mesh_hash;
333         struct hlist_node *p, *q;
334         int i;
335
336         mesh_hash = tbl->hash_buckets;
337         for (i = 0; i <= tbl->hash_mask; i++) {
338                 spin_lock(&tbl->hashwlock[i]);
339                 hlist_for_each_safe(p, q, &mesh_hash[i]) {
340                         tbl->free_node(p, free_leafs);
341                         atomic_dec(&tbl->entries);
342                 }
343                 spin_unlock(&tbl->hashwlock[i]);
344         }
345         __mesh_table_free(tbl);
346 }
347
348 static void ieee80211_mesh_path_timer(unsigned long data)
349 {
350         struct ieee80211_sub_if_data *sdata =
351                 (struct ieee80211_sub_if_data *) data;
352         struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
353         struct ieee80211_local *local = sdata->local;
354
355         if (local->quiescing) {
356                 set_bit(TMR_RUNNING_MP, &ifmsh->timers_running);
357                 return;
358         }
359
360         queue_work(local->hw.workqueue, &ifmsh->work);
361 }
362
363 struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
364 {
365         struct mesh_table *newtbl;
366         struct hlist_head *oldhash;
367         struct hlist_node *p, *q;
368         int i;
369
370         if (atomic_read(&tbl->entries)
371                         < tbl->mean_chain_len * (tbl->hash_mask + 1))
372                 goto endgrow;
373
374         newtbl = mesh_table_alloc(tbl->size_order + 1);
375         if (!newtbl)
376                 goto endgrow;
377
378         newtbl->free_node = tbl->free_node;
379         newtbl->mean_chain_len = tbl->mean_chain_len;
380         newtbl->copy_node = tbl->copy_node;
381         atomic_set(&newtbl->entries, atomic_read(&tbl->entries));
382
383         oldhash = tbl->hash_buckets;
384         for (i = 0; i <= tbl->hash_mask; i++)
385                 hlist_for_each(p, &oldhash[i])
386                         if (tbl->copy_node(p, newtbl) < 0)
387                                 goto errcopy;
388
389         return newtbl;
390
391 errcopy:
392         for (i = 0; i <= newtbl->hash_mask; i++) {
393                 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
394                         tbl->free_node(p, 0);
395         }
396         __mesh_table_free(newtbl);
397 endgrow:
398         return NULL;
399 }
400
401 /**
402  * ieee80211_new_mesh_header - create a new mesh header
403  * @meshhdr:    uninitialized mesh header
404  * @sdata:      mesh interface to be used
405  *
406  * Return the header length.
407  */
408 int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
409                 struct ieee80211_sub_if_data *sdata)
410 {
411         meshhdr->flags = 0;
412         meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
413         put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum);
414         sdata->u.mesh.mesh_seqnum++;
415
416         return 6;
417 }
418
419 static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
420                            struct ieee80211_if_mesh *ifmsh)
421 {
422         bool free_plinks;
423
424 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
425         printk(KERN_DEBUG "%s: running mesh housekeeping\n",
426                sdata->dev->name);
427 #endif
428
429         ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
430         mesh_path_expire(sdata);
431
432         free_plinks = mesh_plink_availables(sdata);
433         if (free_plinks != sdata->u.mesh.accepting_plinks)
434                 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
435
436         ifmsh->housekeeping = false;
437         mod_timer(&ifmsh->housekeeping_timer,
438                   round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
439 }
440
441 #ifdef CONFIG_PM
442 void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
443 {
444         struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
445
446         /* might restart the timer but that doesn't matter */
447         cancel_work_sync(&ifmsh->work);
448
449         /* use atomic bitops in case both timers fire at the same time */
450
451         if (del_timer_sync(&ifmsh->housekeeping_timer))
452                 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
453         if (del_timer_sync(&ifmsh->mesh_path_timer))
454                 set_bit(TMR_RUNNING_MP, &ifmsh->timers_running);
455 }
456
457 void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
458 {
459         struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
460
461         if (test_and_clear_bit(TMR_RUNNING_HK, &ifmsh->timers_running))
462                 add_timer(&ifmsh->housekeeping_timer);
463         if (test_and_clear_bit(TMR_RUNNING_MP, &ifmsh->timers_running))
464                 add_timer(&ifmsh->mesh_path_timer);
465 }
466 #endif
467
468 void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
469 {
470         struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
471         struct ieee80211_local *local = sdata->local;
472
473         ifmsh->housekeeping = true;
474         queue_work(local->hw.workqueue, &ifmsh->work);
475         ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
476                                                 BSS_CHANGED_BEACON_ENABLED);
477 }
478
479 void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
480 {
481         del_timer_sync(&sdata->u.mesh.housekeeping_timer);
482         /*
483          * If the timer fired while we waited for it, it will have
484          * requeued the work. Now the work will be running again
485          * but will not rearm the timer again because it checks
486          * whether the interface is running, which, at this point,
487          * it no longer is.
488          */
489         cancel_work_sync(&sdata->u.mesh.work);
490
491         /*
492          * When we get here, the interface is marked down.
493          * Call synchronize_rcu() to wait for the RX path
494          * should it be using the interface and enqueuing
495          * frames at this very time on another CPU.
496          */
497         rcu_barrier(); /* Wait for RX path and call_rcu()'s */
498         skb_queue_purge(&sdata->u.mesh.skb_queue);
499 }
500
501 static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
502                                         u16 stype,
503                                         struct ieee80211_mgmt *mgmt,
504                                         size_t len,
505                                         struct ieee80211_rx_status *rx_status)
506 {
507         struct ieee80211_local *local = sdata->local;
508         struct ieee802_11_elems elems;
509         struct ieee80211_channel *channel;
510         u32 supp_rates = 0;
511         size_t baselen;
512         int freq;
513         enum ieee80211_band band = rx_status->band;
514
515         /* ignore ProbeResp to foreign address */
516         if (stype == IEEE80211_STYPE_PROBE_RESP &&
517             compare_ether_addr(mgmt->da, sdata->dev->dev_addr))
518                 return;
519
520         baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
521         if (baselen > len)
522                 return;
523
524         ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
525                                &elems);
526
527         if (elems.ds_params && elems.ds_params_len == 1)
528                 freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
529         else
530                 freq = rx_status->freq;
531
532         channel = ieee80211_get_channel(local->hw.wiphy, freq);
533
534         if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
535                 return;
536
537         if (elems.mesh_id && elems.mesh_config &&
538             mesh_matches_local(&elems, sdata)) {
539                 supp_rates = ieee80211_sta_get_rates(local, &elems, band);
540
541                 mesh_neighbour_update(mgmt->sa, supp_rates, sdata,
542                                       mesh_peer_accepts_plinks(&elems));
543         }
544 }
545
546 static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
547                                           struct ieee80211_mgmt *mgmt,
548                                           size_t len,
549                                           struct ieee80211_rx_status *rx_status)
550 {
551         switch (mgmt->u.action.category) {
552         case PLINK_CATEGORY:
553                 mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
554                 break;
555         case MESH_PATH_SEL_CATEGORY:
556                 mesh_rx_path_sel_frame(sdata, mgmt, len);
557                 break;
558         }
559 }
560
561 static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
562                                           struct sk_buff *skb)
563 {
564         struct ieee80211_rx_status *rx_status;
565         struct ieee80211_if_mesh *ifmsh;
566         struct ieee80211_mgmt *mgmt;
567         u16 stype;
568
569         ifmsh = &sdata->u.mesh;
570
571         rx_status = (struct ieee80211_rx_status *) skb->cb;
572         mgmt = (struct ieee80211_mgmt *) skb->data;
573         stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
574
575         switch (stype) {
576         case IEEE80211_STYPE_PROBE_RESP:
577         case IEEE80211_STYPE_BEACON:
578                 ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len,
579                                             rx_status);
580                 break;
581         case IEEE80211_STYPE_ACTION:
582                 ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status);
583                 break;
584         }
585
586         kfree_skb(skb);
587 }
588
589 static void ieee80211_mesh_work(struct work_struct *work)
590 {
591         struct ieee80211_sub_if_data *sdata =
592                 container_of(work, struct ieee80211_sub_if_data, u.mesh.work);
593         struct ieee80211_local *local = sdata->local;
594         struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
595         struct sk_buff *skb;
596
597         if (!netif_running(sdata->dev))
598                 return;
599
600         if (local->sw_scanning || local->hw_scanning)
601                 return;
602
603         while ((skb = skb_dequeue(&ifmsh->skb_queue)))
604                 ieee80211_mesh_rx_queued_mgmt(sdata, skb);
605
606         if (ifmsh->preq_queue_len &&
607             time_after(jiffies,
608                        ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval)))
609                 mesh_path_start_discovery(sdata);
610
611         if (ifmsh->housekeeping)
612                 ieee80211_mesh_housekeeping(sdata, ifmsh);
613 }
614
615 void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
616 {
617         struct ieee80211_sub_if_data *sdata;
618
619         rcu_read_lock();
620         list_for_each_entry_rcu(sdata, &local->interfaces, list)
621                 if (ieee80211_vif_is_mesh(&sdata->vif))
622                         queue_work(local->hw.workqueue, &sdata->u.mesh.work);
623         rcu_read_unlock();
624 }
625
626 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
627 {
628         struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
629
630         INIT_WORK(&ifmsh->work, ieee80211_mesh_work);
631         setup_timer(&ifmsh->housekeeping_timer,
632                     ieee80211_mesh_housekeeping_timer,
633                     (unsigned long) sdata);
634         skb_queue_head_init(&sdata->u.mesh.skb_queue);
635
636         ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T;
637         ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T;
638         ifmsh->mshcfg.dot11MeshHoldingTimeout = MESH_HOLD_T;
639         ifmsh->mshcfg.dot11MeshMaxRetries = MESH_MAX_RETR;
640         ifmsh->mshcfg.dot11MeshTTL = MESH_TTL;
641         ifmsh->mshcfg.auto_open_plinks = true;
642         ifmsh->mshcfg.dot11MeshMaxPeerLinks =
643                 MESH_MAX_ESTAB_PLINKS;
644         ifmsh->mshcfg.dot11MeshHWMPactivePathTimeout =
645                 MESH_PATH_TIMEOUT;
646         ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval =
647                 MESH_PREQ_MIN_INT;
648         ifmsh->mshcfg.dot11MeshHWMPnetDiameterTraversalTime =
649                 MESH_DIAM_TRAVERSAL_TIME;
650         ifmsh->mshcfg.dot11MeshHWMPmaxPREQretries =
651                 MESH_MAX_PREQ_RETRIES;
652         ifmsh->mshcfg.path_refresh_time =
653                 MESH_PATH_REFRESH_TIME;
654         ifmsh->mshcfg.min_discovery_timeout =
655                 MESH_MIN_DISCOVERY_TIMEOUT;
656         ifmsh->accepting_plinks = true;
657         ifmsh->preq_id = 0;
658         ifmsh->dsn = 0;
659         atomic_set(&ifmsh->mpaths, 0);
660         mesh_rmc_init(sdata);
661         ifmsh->last_preq = jiffies;
662         /* Allocate all mesh structures when creating the first mesh interface. */
663         if (!mesh_allocated)
664                 ieee80211s_init();
665         mesh_ids_set_default(ifmsh);
666         setup_timer(&ifmsh->mesh_path_timer,
667                     ieee80211_mesh_path_timer,
668                     (unsigned long) sdata);
669         INIT_LIST_HEAD(&ifmsh->preq_queue.list);
670         spin_lock_init(&ifmsh->mesh_preq_queue_lock);
671 }
672
673 ieee80211_rx_result
674 ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
675                        struct ieee80211_rx_status *rx_status)
676 {
677         struct ieee80211_local *local = sdata->local;
678         struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
679         struct ieee80211_mgmt *mgmt;
680         u16 fc;
681
682         if (skb->len < 24)
683                 return RX_DROP_MONITOR;
684
685         mgmt = (struct ieee80211_mgmt *) skb->data;
686         fc = le16_to_cpu(mgmt->frame_control);
687
688         switch (fc & IEEE80211_FCTL_STYPE) {
689         case IEEE80211_STYPE_PROBE_RESP:
690         case IEEE80211_STYPE_BEACON:
691         case IEEE80211_STYPE_ACTION:
692                 memcpy(skb->cb, rx_status, sizeof(*rx_status));
693                 skb_queue_tail(&ifmsh->skb_queue, skb);
694                 queue_work(local->hw.workqueue, &ifmsh->work);
695                 return RX_QUEUED;
696         }
697
698         return RX_CONTINUE;
699 }