Linux 3.2.102
[pandora-kernel.git] / net / batman-adv / send.c
1 /*
2  * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21
22 #include "main.h"
23 #include "send.h"
24 #include "routing.h"
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "vis.h"
29 #include "gateway_common.h"
30 #include "originator.h"
31 #include "bat_ogm.h"
32
33 static void send_outstanding_bcast_packet(struct work_struct *work);
34
35 /* send out an already prepared packet to the given address via the
36  * specified batman interface */
37 int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
38                     const uint8_t *dst_addr)
39 {
40         struct ethhdr *ethhdr;
41
42         if (hard_iface->if_status != IF_ACTIVE)
43                 goto send_skb_err;
44
45         if (unlikely(!hard_iface->net_dev))
46                 goto send_skb_err;
47
48         if (!(hard_iface->net_dev->flags & IFF_UP)) {
49                 pr_warning("Interface %s is not up - can't send packet via "
50                            "that interface!\n", hard_iface->net_dev->name);
51                 goto send_skb_err;
52         }
53
54         /* push to the ethernet header. */
55         if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0)
56                 goto send_skb_err;
57
58         skb_reset_mac_header(skb);
59
60         ethhdr = (struct ethhdr *) skb_mac_header(skb);
61         memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
62         memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
63         ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
64
65         skb_set_network_header(skb, ETH_HLEN);
66         skb->priority = TC_PRIO_CONTROL;
67         skb->protocol = __constant_htons(ETH_P_BATMAN);
68
69         skb->dev = hard_iface->net_dev;
70
71         /* dev_queue_xmit() returns a negative result on error.  However on
72          * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
73          * (which is > 0). This will not be treated as an error. */
74
75         return dev_queue_xmit(skb);
76 send_skb_err:
77         kfree_skb(skb);
78         return NET_XMIT_DROP;
79 }
80
81 static void realloc_packet_buffer(struct hard_iface *hard_iface,
82                                   int new_len)
83 {
84         unsigned char *new_buff;
85
86         new_buff = kmalloc(new_len, GFP_ATOMIC);
87
88         /* keep old buffer if kmalloc should fail */
89         if (new_buff) {
90                 memcpy(new_buff, hard_iface->packet_buff,
91                        BATMAN_OGM_LEN);
92
93                 kfree(hard_iface->packet_buff);
94                 hard_iface->packet_buff = new_buff;
95                 hard_iface->packet_len = new_len;
96         }
97 }
98
99 /* when calling this function (hard_iface == primary_if) has to be true */
100 static int prepare_packet_buffer(struct bat_priv *bat_priv,
101                                   struct hard_iface *hard_iface)
102 {
103         int new_len;
104
105         new_len = BATMAN_OGM_LEN +
106                   tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
107
108         /* if we have too many changes for one packet don't send any
109          * and wait for the tt table request which will be fragmented */
110         if (new_len > hard_iface->soft_iface->mtu)
111                 new_len = BATMAN_OGM_LEN;
112
113         realloc_packet_buffer(hard_iface, new_len);
114
115         atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
116
117         /* reset the sending counter */
118         atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
119
120         return tt_changes_fill_buffer(bat_priv,
121                                       hard_iface->packet_buff + BATMAN_OGM_LEN,
122                                       hard_iface->packet_len - BATMAN_OGM_LEN);
123 }
124
125 static int reset_packet_buffer(struct bat_priv *bat_priv,
126                                 struct hard_iface *hard_iface)
127 {
128         realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN);
129         return 0;
130 }
131
132 void schedule_bat_ogm(struct hard_iface *hard_iface)
133 {
134         struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
135         struct hard_iface *primary_if;
136         int tt_num_changes = -1;
137
138         if ((hard_iface->if_status == IF_NOT_IN_USE) ||
139             (hard_iface->if_status == IF_TO_BE_REMOVED))
140                 return;
141
142         /**
143          * the interface gets activated here to avoid race conditions between
144          * the moment of activating the interface in
145          * hardif_activate_interface() where the originator mac is set and
146          * outdated packets (especially uninitialized mac addresses) in the
147          * packet queue
148          */
149         if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
150                 hard_iface->if_status = IF_ACTIVE;
151
152         primary_if = primary_if_get_selected(bat_priv);
153
154         if (hard_iface == primary_if) {
155                 /* if at least one change happened */
156                 if (atomic_read(&bat_priv->tt_local_changes) > 0) {
157                         tt_commit_changes(bat_priv);
158                         tt_num_changes = prepare_packet_buffer(bat_priv,
159                                                                hard_iface);
160                 }
161
162                 /* if the changes have been sent often enough */
163                 if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
164                         tt_num_changes = reset_packet_buffer(bat_priv,
165                                                              hard_iface);
166         }
167
168         if (primary_if)
169                 hardif_free_ref(primary_if);
170
171         bat_ogm_schedule(hard_iface, tt_num_changes);
172 }
173
174 static void forw_packet_free(struct forw_packet *forw_packet)
175 {
176         if (forw_packet->skb)
177                 kfree_skb(forw_packet->skb);
178         if (forw_packet->if_incoming)
179                 hardif_free_ref(forw_packet->if_incoming);
180         kfree(forw_packet);
181 }
182
183 static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
184                                       struct forw_packet *forw_packet,
185                                       unsigned long send_time)
186 {
187         INIT_HLIST_NODE(&forw_packet->list);
188
189         /* add new packet to packet list */
190         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
191         hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
192         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
193
194         /* start timer for this packet */
195         INIT_DELAYED_WORK(&forw_packet->delayed_work,
196                           send_outstanding_bcast_packet);
197         queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
198                            send_time);
199 }
200
201 /* add a broadcast packet to the queue and setup timers. broadcast packets
202  * are sent multiple times to increase probability for being received.
203  *
204  * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
205  * errors.
206  *
207  * The skb is not consumed, so the caller should make sure that the
208  * skb is freed. */
209 int add_bcast_packet_to_list(struct bat_priv *bat_priv,
210                              const struct sk_buff *skb, unsigned long delay)
211 {
212         struct hard_iface *primary_if = NULL;
213         struct forw_packet *forw_packet;
214         struct bcast_packet *bcast_packet;
215         struct sk_buff *newskb;
216
217         if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
218                 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
219                 goto out;
220         }
221
222         primary_if = primary_if_get_selected(bat_priv);
223         if (!primary_if)
224                 goto out_and_inc;
225
226         forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
227
228         if (!forw_packet)
229                 goto out_and_inc;
230
231         newskb = skb_copy(skb, GFP_ATOMIC);
232         if (!newskb)
233                 goto packet_free;
234
235         /* as we have a copy now, it is safe to decrease the TTL */
236         bcast_packet = (struct bcast_packet *)newskb->data;
237         bcast_packet->ttl--;
238
239         skb_reset_mac_header(newskb);
240
241         forw_packet->skb = newskb;
242         forw_packet->if_incoming = primary_if;
243
244         /* how often did we send the bcast packet ? */
245         forw_packet->num_packets = 0;
246
247         _add_bcast_packet_to_list(bat_priv, forw_packet, delay);
248         return NETDEV_TX_OK;
249
250 packet_free:
251         kfree(forw_packet);
252 out_and_inc:
253         atomic_inc(&bat_priv->bcast_queue_left);
254 out:
255         if (primary_if)
256                 hardif_free_ref(primary_if);
257         return NETDEV_TX_BUSY;
258 }
259
260 static void send_outstanding_bcast_packet(struct work_struct *work)
261 {
262         struct hard_iface *hard_iface;
263         struct delayed_work *delayed_work =
264                 container_of(work, struct delayed_work, work);
265         struct forw_packet *forw_packet =
266                 container_of(delayed_work, struct forw_packet, delayed_work);
267         struct sk_buff *skb1;
268         struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
269         struct bat_priv *bat_priv = netdev_priv(soft_iface);
270
271         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
272         hlist_del(&forw_packet->list);
273         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
274
275         if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
276                 goto out;
277
278         /* rebroadcast packet */
279         rcu_read_lock();
280         list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
281                 if (hard_iface->soft_iface != soft_iface)
282                         continue;
283
284                 /* send a copy of the saved skb */
285                 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
286                 if (skb1)
287                         send_skb_packet(skb1, hard_iface, broadcast_addr);
288         }
289         rcu_read_unlock();
290
291         forw_packet->num_packets++;
292
293         /* if we still have some more bcasts to send */
294         if (forw_packet->num_packets < 3) {
295                 _add_bcast_packet_to_list(bat_priv, forw_packet,
296                                           ((5 * HZ) / 1000));
297                 return;
298         }
299
300 out:
301         forw_packet_free(forw_packet);
302         atomic_inc(&bat_priv->bcast_queue_left);
303 }
304
305 void send_outstanding_bat_ogm_packet(struct work_struct *work)
306 {
307         struct delayed_work *delayed_work =
308                 container_of(work, struct delayed_work, work);
309         struct forw_packet *forw_packet =
310                 container_of(delayed_work, struct forw_packet, delayed_work);
311         struct bat_priv *bat_priv;
312
313         bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
314         spin_lock_bh(&bat_priv->forw_bat_list_lock);
315         hlist_del(&forw_packet->list);
316         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
317
318         if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
319                 goto out;
320
321         bat_ogm_emit(forw_packet);
322
323         /**
324          * we have to have at least one packet in the queue
325          * to determine the queues wake up time unless we are
326          * shutting down
327          */
328         if (forw_packet->own)
329                 schedule_bat_ogm(forw_packet->if_incoming);
330
331 out:
332         /* don't count own packet */
333         if (!forw_packet->own)
334                 atomic_inc(&bat_priv->batman_queue_left);
335
336         forw_packet_free(forw_packet);
337 }
338
339 void purge_outstanding_packets(struct bat_priv *bat_priv,
340                                const struct hard_iface *hard_iface)
341 {
342         struct forw_packet *forw_packet;
343         struct hlist_node *tmp_node, *safe_tmp_node;
344         bool pending;
345
346         if (hard_iface)
347                 bat_dbg(DBG_BATMAN, bat_priv,
348                         "purge_outstanding_packets(): %s\n",
349                         hard_iface->net_dev->name);
350         else
351                 bat_dbg(DBG_BATMAN, bat_priv,
352                         "purge_outstanding_packets()\n");
353
354         /* free bcast list */
355         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
356         hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
357                                   &bat_priv->forw_bcast_list, list) {
358
359                 /**
360                  * if purge_outstanding_packets() was called with an argument
361                  * we delete only packets belonging to the given interface
362                  */
363                 if ((hard_iface) &&
364                     (forw_packet->if_incoming != hard_iface))
365                         continue;
366
367                 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
368
369                 /**
370                  * send_outstanding_bcast_packet() will lock the list to
371                  * delete the item from the list
372                  */
373                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
374                 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
375
376                 if (pending) {
377                         hlist_del(&forw_packet->list);
378                         if (!forw_packet->own)
379                                 atomic_inc(&bat_priv->bcast_queue_left);
380
381                         forw_packet_free(forw_packet);
382                 }
383         }
384         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
385
386         /* free batman packet list */
387         spin_lock_bh(&bat_priv->forw_bat_list_lock);
388         hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
389                                   &bat_priv->forw_bat_list, list) {
390
391                 /**
392                  * if purge_outstanding_packets() was called with an argument
393                  * we delete only packets belonging to the given interface
394                  */
395                 if ((hard_iface) &&
396                     (forw_packet->if_incoming != hard_iface))
397                         continue;
398
399                 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
400
401                 /**
402                  * send_outstanding_bat_packet() will lock the list to
403                  * delete the item from the list
404                  */
405                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
406                 spin_lock_bh(&bat_priv->forw_bat_list_lock);
407
408                 if (pending) {
409                         hlist_del(&forw_packet->list);
410                         if (!forw_packet->own)
411                                 atomic_inc(&bat_priv->batman_queue_left);
412
413                         forw_packet_free(forw_packet);
414                 }
415         }
416         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
417 }