2 * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
30 #include "aggregation.h"
32 #include <linux/netfilter_bridge.h>
34 static void send_outstanding_bcast_packet(struct work_struct *work);
36 /* apply hop penalty for a normal link */
37 static uint8_t hop_penalty(const uint8_t tq)
39 return (tq * (TQ_MAX_VALUE - TQ_HOP_PENALTY)) / (TQ_MAX_VALUE);
42 /* when do we schedule our own packet to be sent */
43 static unsigned long own_send_time(struct bat_priv *bat_priv)
45 return jiffies + msecs_to_jiffies(
46 atomic_read(&bat_priv->orig_interval) -
47 JITTER + (random32() % 2*JITTER));
50 /* when do we schedule a forwarded packet to be sent */
51 static unsigned long forward_send_time(struct bat_priv *bat_priv)
53 return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
56 /* send out an already prepared packet to the given address via the
57 * specified batman interface */
58 int send_skb_packet(struct sk_buff *skb,
59 struct batman_if *batman_if,
62 struct ethhdr *ethhdr;
64 if (batman_if->if_status != IF_ACTIVE)
67 if (unlikely(!batman_if->net_dev))
70 if (!(batman_if->net_dev->flags & IFF_UP)) {
71 pr_warning("Interface %s is not up - can't send packet via "
72 "that interface!\n", batman_if->dev);
76 /* push to the ethernet header. */
77 if (my_skb_push(skb, sizeof(struct ethhdr)) < 0)
80 skb_reset_mac_header(skb);
82 ethhdr = (struct ethhdr *) skb_mac_header(skb);
83 memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
84 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
85 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
87 skb_set_network_header(skb, ETH_HLEN);
88 skb->priority = TC_PRIO_CONTROL;
89 skb->protocol = __constant_htons(ETH_P_BATMAN);
91 skb->dev = batman_if->net_dev;
93 /* dev_queue_xmit() returns a negative result on error. However on
94 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
95 * (which is > 0). This will not be treated as an error.
96 * Also, if netfilter/ebtables wants to block outgoing batman
97 * packets then giving them a chance to do so here */
99 return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
103 return NET_XMIT_DROP;
106 /* sends a raw packet. */
107 void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
108 struct batman_if *batman_if, uint8_t *dst_addr)
113 skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr));
116 data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
117 memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len);
118 /* pull back to the batman "network header" */
119 skb_pull(skb, sizeof(struct ethhdr));
120 send_skb_packet(skb, batman_if, dst_addr);
123 /* Send a packet to a given interface */
124 static void send_packet_to_if(struct forw_packet *forw_packet,
125 struct batman_if *batman_if)
127 /* FIXME: each batman_if will be attached to a softif */
128 struct bat_priv *bat_priv = netdev_priv(soft_device);
132 struct batman_packet *batman_packet;
134 if (batman_if->if_status != IF_ACTIVE)
139 batman_packet = (struct batman_packet *)
140 (forw_packet->packet_buff);
142 /* adjust all flags and log packets */
143 while (aggregated_packet(buff_pos,
144 forw_packet->packet_len,
145 batman_packet->num_hna)) {
147 /* we might have aggregated direct link packets with an
148 * ordinary base packet */
149 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
150 (forw_packet->if_incoming == batman_if))
151 batman_packet->flags |= DIRECTLINK;
153 batman_packet->flags &= ~DIRECTLINK;
155 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
158 bat_dbg(DBG_BATMAN, bat_priv,
159 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
160 " IDF %s) on interface %s [%s]\n",
161 fwd_str, (packet_num > 0 ? "aggregated " : ""),
162 batman_packet->orig, ntohl(batman_packet->seqno),
163 batman_packet->tq, batman_packet->ttl,
164 (batman_packet->flags & DIRECTLINK ?
166 batman_if->dev, batman_if->addr_str);
168 buff_pos += sizeof(struct batman_packet) +
169 (batman_packet->num_hna * ETH_ALEN);
171 batman_packet = (struct batman_packet *)
172 (forw_packet->packet_buff + buff_pos);
175 send_raw_packet(forw_packet->packet_buff,
176 forw_packet->packet_len,
177 batman_if, broadcast_addr);
180 /* send a batman packet */
181 static void send_packet(struct forw_packet *forw_packet)
183 /* FIXME: each batman_if will be attached to a softif */
184 struct bat_priv *bat_priv = netdev_priv(soft_device);
185 struct batman_if *batman_if;
186 struct batman_packet *batman_packet =
187 (struct batman_packet *)(forw_packet->packet_buff);
188 unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
190 if (!forw_packet->if_incoming) {
191 pr_err("Error - can't forward packet: incoming iface not "
196 if (forw_packet->if_incoming->if_status != IF_ACTIVE)
199 /* multihomed peer assumed */
200 /* non-primary OGMs are only broadcasted on their interface */
201 if ((directlink && (batman_packet->ttl == 1)) ||
202 (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
204 /* FIXME: what about aggregated packets ? */
205 bat_dbg(DBG_BATMAN, bat_priv,
206 "%s packet (originator %pM, seqno %d, TTL %d) "
207 "on interface %s [%s]\n",
208 (forw_packet->own ? "Sending own" : "Forwarding"),
209 batman_packet->orig, ntohl(batman_packet->seqno),
210 batman_packet->ttl, forw_packet->if_incoming->dev,
211 forw_packet->if_incoming->addr_str);
213 send_raw_packet(forw_packet->packet_buff,
214 forw_packet->packet_len,
215 forw_packet->if_incoming,
220 /* broadcast on every interface */
222 list_for_each_entry_rcu(batman_if, &if_list, list)
223 send_packet_to_if(forw_packet, batman_if);
227 static void rebuild_batman_packet(struct batman_if *batman_if)
230 unsigned char *new_buff;
231 struct batman_packet *batman_packet;
233 new_len = sizeof(struct batman_packet) + (num_hna * ETH_ALEN);
234 new_buff = kmalloc(new_len, GFP_ATOMIC);
236 /* keep old buffer if kmalloc should fail */
238 memcpy(new_buff, batman_if->packet_buff,
239 sizeof(struct batman_packet));
240 batman_packet = (struct batman_packet *)new_buff;
242 batman_packet->num_hna = hna_local_fill_buffer(
243 new_buff + sizeof(struct batman_packet),
244 new_len - sizeof(struct batman_packet));
246 kfree(batman_if->packet_buff);
247 batman_if->packet_buff = new_buff;
248 batman_if->packet_len = new_len;
252 void schedule_own_packet(struct batman_if *batman_if)
254 /* FIXME: each batman_if will be attached to a softif */
255 struct bat_priv *bat_priv = netdev_priv(soft_device);
256 unsigned long send_time;
257 struct batman_packet *batman_packet;
260 if ((batman_if->if_status == IF_NOT_IN_USE) ||
261 (batman_if->if_status == IF_TO_BE_REMOVED))
264 vis_server = atomic_read(&bat_priv->vis_mode);
267 * the interface gets activated here to avoid race conditions between
268 * the moment of activating the interface in
269 * hardif_activate_interface() where the originator mac is set and
270 * outdated packets (especially uninitialized mac addresses) in the
273 if (batman_if->if_status == IF_TO_BE_ACTIVATED)
274 batman_if->if_status = IF_ACTIVE;
276 /* if local hna has changed and interface is a primary interface */
277 if ((atomic_read(&hna_local_changed)) &&
278 (batman_if == bat_priv->primary_if))
279 rebuild_batman_packet(batman_if);
282 * NOTE: packet_buff might just have been re-allocated in
283 * rebuild_batman_packet()
285 batman_packet = (struct batman_packet *)batman_if->packet_buff;
287 /* change sequence number to network order */
288 batman_packet->seqno =
289 htonl((uint32_t)atomic_read(&batman_if->seqno));
291 if (vis_server == VIS_TYPE_SERVER_SYNC)
292 batman_packet->flags |= VIS_SERVER;
294 batman_packet->flags &= ~VIS_SERVER;
296 atomic_inc(&batman_if->seqno);
298 slide_own_bcast_window(batman_if);
299 send_time = own_send_time(bat_priv);
300 add_bat_packet_to_list(bat_priv,
301 batman_if->packet_buff,
302 batman_if->packet_len,
303 batman_if, 1, send_time);
306 void schedule_forward_packet(struct orig_node *orig_node,
307 struct ethhdr *ethhdr,
308 struct batman_packet *batman_packet,
309 uint8_t directlink, int hna_buff_len,
310 struct batman_if *if_incoming)
312 /* FIXME: each batman_if will be attached to a softif */
313 struct bat_priv *bat_priv = netdev_priv(soft_device);
314 unsigned char in_tq, in_ttl, tq_avg = 0;
315 unsigned long send_time;
317 if (batman_packet->ttl <= 1) {
318 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
322 in_tq = batman_packet->tq;
323 in_ttl = batman_packet->ttl;
325 batman_packet->ttl--;
326 memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
328 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
329 * of our best tq value */
330 if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
332 /* rebroadcast ogm of best ranking neighbor as is */
333 if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) {
334 batman_packet->tq = orig_node->router->tq_avg;
336 if (orig_node->router->last_ttl)
337 batman_packet->ttl = orig_node->router->last_ttl
341 tq_avg = orig_node->router->tq_avg;
344 /* apply hop penalty */
345 batman_packet->tq = hop_penalty(batman_packet->tq);
347 bat_dbg(DBG_BATMAN, bat_priv,
348 "Forwarding packet: tq_orig: %i, tq_avg: %i, "
349 "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
350 in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
353 batman_packet->seqno = htonl(batman_packet->seqno);
355 /* switch of primaries first hop flag when forwarding */
356 batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
358 batman_packet->flags |= DIRECTLINK;
360 batman_packet->flags &= ~DIRECTLINK;
362 send_time = forward_send_time(bat_priv);
363 add_bat_packet_to_list(bat_priv,
364 (unsigned char *)batman_packet,
365 sizeof(struct batman_packet) + hna_buff_len,
366 if_incoming, 0, send_time);
369 static void forw_packet_free(struct forw_packet *forw_packet)
371 if (forw_packet->skb)
372 kfree_skb(forw_packet->skb);
373 kfree(forw_packet->packet_buff);
377 static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
378 unsigned long send_time)
381 INIT_HLIST_NODE(&forw_packet->list);
383 /* add new packet to packet list */
384 spin_lock_irqsave(&forw_bcast_list_lock, flags);
385 hlist_add_head(&forw_packet->list, &forw_bcast_list);
386 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
388 /* start timer for this packet */
389 INIT_DELAYED_WORK(&forw_packet->delayed_work,
390 send_outstanding_bcast_packet);
391 queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
395 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
396 /* add a broadcast packet to the queue and setup timers. broadcast packets
397 * are sent multiple times to increase probability for beeing received.
399 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
402 * The skb is not consumed, so the caller should make sure that the
404 int add_bcast_packet_to_list(struct sk_buff *skb)
406 struct forw_packet *forw_packet;
407 struct bcast_packet *bcast_packet;
408 /* FIXME: each batman_if will be attached to a softif */
409 struct bat_priv *bat_priv = netdev_priv(soft_device);
411 if (!atomic_dec_not_zero(&bcast_queue_left)) {
412 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
416 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
421 skb = skb_copy(skb, GFP_ATOMIC);
425 /* as we have a copy now, it is safe to decrease the TTL */
426 bcast_packet = (struct bcast_packet *)skb->data;
429 skb_reset_mac_header(skb);
431 forw_packet->skb = skb;
432 forw_packet->packet_buff = NULL;
434 /* how often did we send the bcast packet ? */
435 forw_packet->num_packets = 0;
437 _add_bcast_packet_to_list(forw_packet, 1);
443 atomic_inc(&bcast_queue_left);
445 return NETDEV_TX_BUSY;
448 static void send_outstanding_bcast_packet(struct work_struct *work)
450 struct batman_if *batman_if;
451 struct delayed_work *delayed_work =
452 container_of(work, struct delayed_work, work);
453 struct forw_packet *forw_packet =
454 container_of(delayed_work, struct forw_packet, delayed_work);
456 struct sk_buff *skb1;
458 spin_lock_irqsave(&forw_bcast_list_lock, flags);
459 hlist_del(&forw_packet->list);
460 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
462 if (atomic_read(&module_state) == MODULE_DEACTIVATING)
465 /* rebroadcast packet */
467 list_for_each_entry_rcu(batman_if, &if_list, list) {
468 /* send a copy of the saved skb */
469 skb1 = skb_copy(forw_packet->skb, GFP_ATOMIC);
471 send_skb_packet(skb1,
472 batman_if, broadcast_addr);
476 forw_packet->num_packets++;
478 /* if we still have some more bcasts to send */
479 if (forw_packet->num_packets < 3) {
480 _add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
485 forw_packet_free(forw_packet);
486 atomic_inc(&bcast_queue_left);
489 void send_outstanding_bat_packet(struct work_struct *work)
491 struct delayed_work *delayed_work =
492 container_of(work, struct delayed_work, work);
493 struct forw_packet *forw_packet =
494 container_of(delayed_work, struct forw_packet, delayed_work);
497 spin_lock_irqsave(&forw_bat_list_lock, flags);
498 hlist_del(&forw_packet->list);
499 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
501 if (atomic_read(&module_state) == MODULE_DEACTIVATING)
504 send_packet(forw_packet);
507 * we have to have at least one packet in the queue
508 * to determine the queues wake up time unless we are
511 if (forw_packet->own)
512 schedule_own_packet(forw_packet->if_incoming);
515 /* don't count own packet */
516 if (!forw_packet->own)
517 atomic_inc(&batman_queue_left);
519 forw_packet_free(forw_packet);
522 void purge_outstanding_packets(struct batman_if *batman_if)
524 /* FIXME: each batman_if will be attached to a softif */
525 struct bat_priv *bat_priv = netdev_priv(soft_device);
526 struct forw_packet *forw_packet;
527 struct hlist_node *tmp_node, *safe_tmp_node;
531 bat_dbg(DBG_BATMAN, bat_priv,
532 "purge_outstanding_packets(): %s\n",
535 bat_dbg(DBG_BATMAN, bat_priv,
536 "purge_outstanding_packets()\n");
538 /* free bcast list */
539 spin_lock_irqsave(&forw_bcast_list_lock, flags);
540 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
541 &forw_bcast_list, list) {
544 * if purge_outstanding_packets() was called with an argmument
545 * we delete only packets belonging to the given interface
548 (forw_packet->if_incoming != batman_if))
551 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
554 * send_outstanding_bcast_packet() will lock the list to
555 * delete the item from the list
557 cancel_delayed_work_sync(&forw_packet->delayed_work);
558 spin_lock_irqsave(&forw_bcast_list_lock, flags);
560 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
562 /* free batman packet list */
563 spin_lock_irqsave(&forw_bat_list_lock, flags);
564 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
565 &forw_bat_list, list) {
568 * if purge_outstanding_packets() was called with an argmument
569 * we delete only packets belonging to the given interface
572 (forw_packet->if_incoming != batman_if))
575 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
578 * send_outstanding_bat_packet() will lock the list to
579 * delete the item from the list
581 cancel_delayed_work_sync(&forw_packet->delayed_work);
582 spin_lock_irqsave(&forw_bat_list_lock, flags);
584 spin_unlock_irqrestore(&forw_bat_list_lock, flags);