Merge branch 'next/cross-platform' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / net / batman-adv / hard-interface.c
1 /*
2  * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21
22 #include "main.h"
23 #include "hard-interface.h"
24 #include "soft-interface.h"
25 #include "send.h"
26 #include "translation-table.h"
27 #include "routing.h"
28 #include "bat_sysfs.h"
29 #include "originator.h"
30 #include "hash.h"
31
32 #include <linux/if_arp.h>
33
34
35 static int batman_skb_recv(struct sk_buff *skb,
36                            struct net_device *dev,
37                            struct packet_type *ptype,
38                            struct net_device *orig_dev);
39
40 void hardif_free_rcu(struct rcu_head *rcu)
41 {
42         struct hard_iface *hard_iface;
43
44         hard_iface = container_of(rcu, struct hard_iface, rcu);
45         dev_put(hard_iface->net_dev);
46         kfree(hard_iface);
47 }
48
49 struct hard_iface *hardif_get_by_netdev(const struct net_device *net_dev)
50 {
51         struct hard_iface *hard_iface;
52
53         rcu_read_lock();
54         list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
55                 if (hard_iface->net_dev == net_dev &&
56                     atomic_inc_not_zero(&hard_iface->refcount))
57                         goto out;
58         }
59
60         hard_iface = NULL;
61
62 out:
63         rcu_read_unlock();
64         return hard_iface;
65 }
66
67 static int is_valid_iface(const struct net_device *net_dev)
68 {
69         if (net_dev->flags & IFF_LOOPBACK)
70                 return 0;
71
72         if (net_dev->type != ARPHRD_ETHER)
73                 return 0;
74
75         if (net_dev->addr_len != ETH_ALEN)
76                 return 0;
77
78         /* no batman over batman */
79         if (softif_is_valid(net_dev))
80                 return 0;
81
82         /* Device is being bridged */
83         /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
84                 return 0; */
85
86         return 1;
87 }
88
89 static struct hard_iface *hardif_get_active(const struct net_device *soft_iface)
90 {
91         struct hard_iface *hard_iface;
92
93         rcu_read_lock();
94         list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
95                 if (hard_iface->soft_iface != soft_iface)
96                         continue;
97
98                 if (hard_iface->if_status == IF_ACTIVE &&
99                     atomic_inc_not_zero(&hard_iface->refcount))
100                         goto out;
101         }
102
103         hard_iface = NULL;
104
105 out:
106         rcu_read_unlock();
107         return hard_iface;
108 }
109
110 static void primary_if_update_addr(struct bat_priv *bat_priv)
111 {
112         struct vis_packet *vis_packet;
113         struct hard_iface *primary_if;
114
115         primary_if = primary_if_get_selected(bat_priv);
116         if (!primary_if)
117                 goto out;
118
119         vis_packet = (struct vis_packet *)
120                                 bat_priv->my_vis_info->skb_packet->data;
121         memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
122         memcpy(vis_packet->sender_orig,
123                primary_if->net_dev->dev_addr, ETH_ALEN);
124
125 out:
126         if (primary_if)
127                 hardif_free_ref(primary_if);
128 }
129
130 static void primary_if_select(struct bat_priv *bat_priv,
131                               struct hard_iface *new_hard_iface)
132 {
133         struct hard_iface *curr_hard_iface;
134         struct batman_packet *batman_packet;
135
136         ASSERT_RTNL();
137
138         if (new_hard_iface && !atomic_inc_not_zero(&new_hard_iface->refcount))
139                 new_hard_iface = NULL;
140
141         curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1);
142         rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
143
144         if (curr_hard_iface)
145                 hardif_free_ref(curr_hard_iface);
146
147         if (!new_hard_iface)
148                 return;
149
150         batman_packet = (struct batman_packet *)(new_hard_iface->packet_buff);
151         batman_packet->flags = PRIMARIES_FIRST_HOP;
152         batman_packet->ttl = TTL;
153
154         primary_if_update_addr(bat_priv);
155 }
156
157 static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
158 {
159         if (hard_iface->net_dev->flags & IFF_UP)
160                 return true;
161
162         return false;
163 }
164
165 static void update_mac_addresses(struct hard_iface *hard_iface)
166 {
167         memcpy(((struct batman_packet *)(hard_iface->packet_buff))->orig,
168                hard_iface->net_dev->dev_addr, ETH_ALEN);
169         memcpy(((struct batman_packet *)(hard_iface->packet_buff))->prev_sender,
170                hard_iface->net_dev->dev_addr, ETH_ALEN);
171 }
172
173 static void check_known_mac_addr(const struct net_device *net_dev)
174 {
175         const struct hard_iface *hard_iface;
176
177         rcu_read_lock();
178         list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
179                 if ((hard_iface->if_status != IF_ACTIVE) &&
180                     (hard_iface->if_status != IF_TO_BE_ACTIVATED))
181                         continue;
182
183                 if (hard_iface->net_dev == net_dev)
184                         continue;
185
186                 if (!compare_eth(hard_iface->net_dev->dev_addr,
187                                  net_dev->dev_addr))
188                         continue;
189
190                 pr_warning("The newly added mac address (%pM) already exists "
191                            "on: %s\n", net_dev->dev_addr,
192                            hard_iface->net_dev->name);
193                 pr_warning("It is strongly recommended to keep mac addresses "
194                            "unique to avoid problems!\n");
195         }
196         rcu_read_unlock();
197 }
198
199 int hardif_min_mtu(struct net_device *soft_iface)
200 {
201         const struct bat_priv *bat_priv = netdev_priv(soft_iface);
202         const struct hard_iface *hard_iface;
203         /* allow big frames if all devices are capable to do so
204          * (have MTU > 1500 + BAT_HEADER_LEN) */
205         int min_mtu = ETH_DATA_LEN;
206
207         if (atomic_read(&bat_priv->fragmentation))
208                 goto out;
209
210         rcu_read_lock();
211         list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
212                 if ((hard_iface->if_status != IF_ACTIVE) &&
213                     (hard_iface->if_status != IF_TO_BE_ACTIVATED))
214                         continue;
215
216                 if (hard_iface->soft_iface != soft_iface)
217                         continue;
218
219                 min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN,
220                                 min_mtu);
221         }
222         rcu_read_unlock();
223 out:
224         return min_mtu;
225 }
226
227 /* adjusts the MTU if a new interface with a smaller MTU appeared. */
228 void update_min_mtu(struct net_device *soft_iface)
229 {
230         int min_mtu;
231
232         min_mtu = hardif_min_mtu(soft_iface);
233         if (soft_iface->mtu != min_mtu)
234                 soft_iface->mtu = min_mtu;
235 }
236
237 static void hardif_activate_interface(struct hard_iface *hard_iface)
238 {
239         struct bat_priv *bat_priv;
240         struct hard_iface *primary_if = NULL;
241
242         if (hard_iface->if_status != IF_INACTIVE)
243                 goto out;
244
245         bat_priv = netdev_priv(hard_iface->soft_iface);
246
247         update_mac_addresses(hard_iface);
248         hard_iface->if_status = IF_TO_BE_ACTIVATED;
249
250         /**
251          * the first active interface becomes our primary interface or
252          * the next active interface after the old primay interface was removed
253          */
254         primary_if = primary_if_get_selected(bat_priv);
255         if (!primary_if)
256                 primary_if_select(bat_priv, hard_iface);
257
258         bat_info(hard_iface->soft_iface, "Interface activated: %s\n",
259                  hard_iface->net_dev->name);
260
261         update_min_mtu(hard_iface->soft_iface);
262
263 out:
264         if (primary_if)
265                 hardif_free_ref(primary_if);
266 }
267
268 static void hardif_deactivate_interface(struct hard_iface *hard_iface)
269 {
270         if ((hard_iface->if_status != IF_ACTIVE) &&
271             (hard_iface->if_status != IF_TO_BE_ACTIVATED))
272                 return;
273
274         hard_iface->if_status = IF_INACTIVE;
275
276         bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n",
277                  hard_iface->net_dev->name);
278
279         update_min_mtu(hard_iface->soft_iface);
280 }
281
282 int hardif_enable_interface(struct hard_iface *hard_iface,
283                             const char *iface_name)
284 {
285         struct bat_priv *bat_priv;
286         struct batman_packet *batman_packet;
287         struct net_device *soft_iface;
288         int ret;
289
290         if (hard_iface->if_status != IF_NOT_IN_USE)
291                 goto out;
292
293         if (!atomic_inc_not_zero(&hard_iface->refcount))
294                 goto out;
295
296         soft_iface = dev_get_by_name(&init_net, iface_name);
297
298         if (!soft_iface) {
299                 soft_iface = softif_create(iface_name);
300
301                 if (!soft_iface) {
302                         ret = -ENOMEM;
303                         goto err;
304                 }
305
306                 /* dev_get_by_name() increases the reference counter for us */
307                 dev_hold(soft_iface);
308         }
309
310         if (!softif_is_valid(soft_iface)) {
311                 pr_err("Can't create batman mesh interface %s: "
312                        "already exists as regular interface\n",
313                        soft_iface->name);
314                 dev_put(soft_iface);
315                 ret = -EINVAL;
316                 goto err;
317         }
318
319         hard_iface->soft_iface = soft_iface;
320         bat_priv = netdev_priv(hard_iface->soft_iface);
321         hard_iface->packet_len = BAT_PACKET_LEN;
322         hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
323
324         if (!hard_iface->packet_buff) {
325                 bat_err(hard_iface->soft_iface, "Can't add interface packet "
326                         "(%s): out of memory\n", hard_iface->net_dev->name);
327                 ret = -ENOMEM;
328                 goto err;
329         }
330
331         batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
332         batman_packet->packet_type = BAT_PACKET;
333         batman_packet->version = COMPAT_VERSION;
334         batman_packet->flags = NO_FLAGS;
335         batman_packet->ttl = 2;
336         batman_packet->tq = TQ_MAX_VALUE;
337         batman_packet->tt_num_changes = 0;
338         batman_packet->ttvn = 0;
339
340         hard_iface->if_num = bat_priv->num_ifaces;
341         bat_priv->num_ifaces++;
342         hard_iface->if_status = IF_INACTIVE;
343         orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
344
345         hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
346         hard_iface->batman_adv_ptype.func = batman_skb_recv;
347         hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
348         dev_add_pack(&hard_iface->batman_adv_ptype);
349
350         atomic_set(&hard_iface->seqno, 1);
351         atomic_set(&hard_iface->frag_seqno, 1);
352         bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
353                  hard_iface->net_dev->name);
354
355         if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
356                 ETH_DATA_LEN + BAT_HEADER_LEN)
357                 bat_info(hard_iface->soft_iface,
358                         "The MTU of interface %s is too small (%i) to handle "
359                         "the transport of batman-adv packets. Packets going "
360                         "over this interface will be fragmented on layer2 "
361                         "which could impact the performance. Setting the MTU "
362                         "to %zi would solve the problem.\n",
363                         hard_iface->net_dev->name, hard_iface->net_dev->mtu,
364                         ETH_DATA_LEN + BAT_HEADER_LEN);
365
366         if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
367                 ETH_DATA_LEN + BAT_HEADER_LEN)
368                 bat_info(hard_iface->soft_iface,
369                         "The MTU of interface %s is too small (%i) to handle "
370                         "the transport of batman-adv packets. If you experience"
371                         " problems getting traffic through try increasing the "
372                         "MTU to %zi.\n",
373                         hard_iface->net_dev->name, hard_iface->net_dev->mtu,
374                         ETH_DATA_LEN + BAT_HEADER_LEN);
375
376         if (hardif_is_iface_up(hard_iface))
377                 hardif_activate_interface(hard_iface);
378         else
379                 bat_err(hard_iface->soft_iface, "Not using interface %s "
380                         "(retrying later): interface not active\n",
381                         hard_iface->net_dev->name);
382
383         /* begin scheduling originator messages on that interface */
384         schedule_own_packet(hard_iface);
385
386 out:
387         return 0;
388
389 err:
390         hardif_free_ref(hard_iface);
391         return ret;
392 }
393
394 void hardif_disable_interface(struct hard_iface *hard_iface)
395 {
396         struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
397         struct hard_iface *primary_if = NULL;
398
399         if (hard_iface->if_status == IF_ACTIVE)
400                 hardif_deactivate_interface(hard_iface);
401
402         if (hard_iface->if_status != IF_INACTIVE)
403                 goto out;
404
405         bat_info(hard_iface->soft_iface, "Removing interface: %s\n",
406                  hard_iface->net_dev->name);
407         dev_remove_pack(&hard_iface->batman_adv_ptype);
408
409         bat_priv->num_ifaces--;
410         orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
411
412         primary_if = primary_if_get_selected(bat_priv);
413         if (hard_iface == primary_if) {
414                 struct hard_iface *new_if;
415
416                 new_if = hardif_get_active(hard_iface->soft_iface);
417                 primary_if_select(bat_priv, new_if);
418
419                 if (new_if)
420                         hardif_free_ref(new_if);
421         }
422
423         kfree(hard_iface->packet_buff);
424         hard_iface->packet_buff = NULL;
425         hard_iface->if_status = IF_NOT_IN_USE;
426
427         /* delete all references to this hard_iface */
428         purge_orig_ref(bat_priv);
429         purge_outstanding_packets(bat_priv, hard_iface);
430         dev_put(hard_iface->soft_iface);
431
432         /* nobody uses this interface anymore */
433         if (!bat_priv->num_ifaces)
434                 softif_destroy(hard_iface->soft_iface);
435
436         hard_iface->soft_iface = NULL;
437         hardif_free_ref(hard_iface);
438
439 out:
440         if (primary_if)
441                 hardif_free_ref(primary_if);
442 }
443
444 static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
445 {
446         struct hard_iface *hard_iface;
447         int ret;
448
449         ASSERT_RTNL();
450
451         ret = is_valid_iface(net_dev);
452         if (ret != 1)
453                 goto out;
454
455         dev_hold(net_dev);
456
457         hard_iface = kmalloc(sizeof(*hard_iface), GFP_ATOMIC);
458         if (!hard_iface) {
459                 pr_err("Can't add interface (%s): out of memory\n",
460                        net_dev->name);
461                 goto release_dev;
462         }
463
464         ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
465         if (ret)
466                 goto free_if;
467
468         hard_iface->if_num = -1;
469         hard_iface->net_dev = net_dev;
470         hard_iface->soft_iface = NULL;
471         hard_iface->if_status = IF_NOT_IN_USE;
472         INIT_LIST_HEAD(&hard_iface->list);
473         /* extra reference for return */
474         atomic_set(&hard_iface->refcount, 2);
475
476         check_known_mac_addr(hard_iface->net_dev);
477         list_add_tail_rcu(&hard_iface->list, &hardif_list);
478
479         return hard_iface;
480
481 free_if:
482         kfree(hard_iface);
483 release_dev:
484         dev_put(net_dev);
485 out:
486         return NULL;
487 }
488
489 static void hardif_remove_interface(struct hard_iface *hard_iface)
490 {
491         ASSERT_RTNL();
492
493         /* first deactivate interface */
494         if (hard_iface->if_status != IF_NOT_IN_USE)
495                 hardif_disable_interface(hard_iface);
496
497         if (hard_iface->if_status != IF_NOT_IN_USE)
498                 return;
499
500         hard_iface->if_status = IF_TO_BE_REMOVED;
501         sysfs_del_hardif(&hard_iface->hardif_obj);
502         hardif_free_ref(hard_iface);
503 }
504
505 void hardif_remove_interfaces(void)
506 {
507         struct hard_iface *hard_iface, *hard_iface_tmp;
508
509         rtnl_lock();
510         list_for_each_entry_safe(hard_iface, hard_iface_tmp,
511                                  &hardif_list, list) {
512                 list_del_rcu(&hard_iface->list);
513                 hardif_remove_interface(hard_iface);
514         }
515         rtnl_unlock();
516 }
517
518 static int hard_if_event(struct notifier_block *this,
519                          unsigned long event, void *ptr)
520 {
521         struct net_device *net_dev = ptr;
522         struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
523         struct hard_iface *primary_if = NULL;
524         struct bat_priv *bat_priv;
525
526         if (!hard_iface && event == NETDEV_REGISTER)
527                 hard_iface = hardif_add_interface(net_dev);
528
529         if (!hard_iface)
530                 goto out;
531
532         switch (event) {
533         case NETDEV_UP:
534                 hardif_activate_interface(hard_iface);
535                 break;
536         case NETDEV_GOING_DOWN:
537         case NETDEV_DOWN:
538                 hardif_deactivate_interface(hard_iface);
539                 break;
540         case NETDEV_UNREGISTER:
541                 list_del_rcu(&hard_iface->list);
542
543                 hardif_remove_interface(hard_iface);
544                 break;
545         case NETDEV_CHANGEMTU:
546                 if (hard_iface->soft_iface)
547                         update_min_mtu(hard_iface->soft_iface);
548                 break;
549         case NETDEV_CHANGEADDR:
550                 if (hard_iface->if_status == IF_NOT_IN_USE)
551                         goto hardif_put;
552
553                 check_known_mac_addr(hard_iface->net_dev);
554                 update_mac_addresses(hard_iface);
555
556                 bat_priv = netdev_priv(hard_iface->soft_iface);
557                 primary_if = primary_if_get_selected(bat_priv);
558                 if (!primary_if)
559                         goto hardif_put;
560
561                 if (hard_iface == primary_if)
562                         primary_if_update_addr(bat_priv);
563                 break;
564         default:
565                 break;
566         }
567
568 hardif_put:
569         hardif_free_ref(hard_iface);
570 out:
571         if (primary_if)
572                 hardif_free_ref(primary_if);
573         return NOTIFY_DONE;
574 }
575
576 /* receive a packet with the batman ethertype coming on a hard
577  * interface */
578 static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
579                            struct packet_type *ptype,
580                            struct net_device *orig_dev)
581 {
582         struct bat_priv *bat_priv;
583         struct batman_packet *batman_packet;
584         struct hard_iface *hard_iface;
585         int ret;
586
587         hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
588         skb = skb_share_check(skb, GFP_ATOMIC);
589
590         /* skb was released by skb_share_check() */
591         if (!skb)
592                 goto err_out;
593
594         /* packet should hold at least type and version */
595         if (unlikely(!pskb_may_pull(skb, 2)))
596                 goto err_free;
597
598         /* expect a valid ethernet header here. */
599         if (unlikely(skb->mac_len != sizeof(struct ethhdr)
600                                 || !skb_mac_header(skb)))
601                 goto err_free;
602
603         if (!hard_iface->soft_iface)
604                 goto err_free;
605
606         bat_priv = netdev_priv(hard_iface->soft_iface);
607
608         if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
609                 goto err_free;
610
611         /* discard frames on not active interfaces */
612         if (hard_iface->if_status != IF_ACTIVE)
613                 goto err_free;
614
615         batman_packet = (struct batman_packet *)skb->data;
616
617         if (batman_packet->version != COMPAT_VERSION) {
618                 bat_dbg(DBG_BATMAN, bat_priv,
619                         "Drop packet: incompatible batman version (%i)\n",
620                         batman_packet->version);
621                 goto err_free;
622         }
623
624         /* all receive handlers return whether they received or reused
625          * the supplied skb. if not, we have to free the skb. */
626
627         switch (batman_packet->packet_type) {
628                 /* batman originator packet */
629         case BAT_PACKET:
630                 ret = recv_bat_packet(skb, hard_iface);
631                 break;
632
633                 /* batman icmp packet */
634         case BAT_ICMP:
635                 ret = recv_icmp_packet(skb, hard_iface);
636                 break;
637
638                 /* unicast packet */
639         case BAT_UNICAST:
640                 ret = recv_unicast_packet(skb, hard_iface);
641                 break;
642
643                 /* fragmented unicast packet */
644         case BAT_UNICAST_FRAG:
645                 ret = recv_ucast_frag_packet(skb, hard_iface);
646                 break;
647
648                 /* broadcast packet */
649         case BAT_BCAST:
650                 ret = recv_bcast_packet(skb, hard_iface);
651                 break;
652
653                 /* vis packet */
654         case BAT_VIS:
655                 ret = recv_vis_packet(skb, hard_iface);
656                 break;
657                 /* Translation table query (request or response) */
658         case BAT_TT_QUERY:
659                 ret = recv_tt_query(skb, hard_iface);
660                 break;
661                 /* Roaming advertisement */
662         case BAT_ROAM_ADV:
663                 ret = recv_roam_adv(skb, hard_iface);
664                 break;
665         default:
666                 ret = NET_RX_DROP;
667         }
668
669         if (ret == NET_RX_DROP)
670                 kfree_skb(skb);
671
672         /* return NET_RX_SUCCESS in any case as we
673          * most probably dropped the packet for
674          * routing-logical reasons. */
675
676         return NET_RX_SUCCESS;
677
678 err_free:
679         kfree_skb(skb);
680 err_out:
681         return NET_RX_DROP;
682 }
683
684 struct notifier_block hard_if_notifier = {
685         .notifier_call = hard_if_event,
686 };