Merge branch 'hwmon-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jdelv...
[pandora-kernel.git] / drivers / staging / batman-adv / translation-table.c
1 /*
2  * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21
22 #include "main.h"
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "types.h"
26 #include "hash.h"
27
28 static void hna_local_purge(struct work_struct *work);
29 static void _hna_global_del_orig(struct bat_priv *bat_priv,
30                                  struct hna_global_entry *hna_global_entry,
31                                  char *message);
32
33 static void hna_local_start_timer(struct bat_priv *bat_priv)
34 {
35         INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
36         queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
37 }
38
39 int hna_local_init(struct bat_priv *bat_priv)
40 {
41         if (bat_priv->hna_local_hash)
42                 return 1;
43
44         bat_priv->hna_local_hash = hash_new(128, compare_orig, choose_orig);
45
46         if (!bat_priv->hna_local_hash)
47                 return 0;
48
49         atomic_set(&bat_priv->hna_local_changed, 0);
50         hna_local_start_timer(bat_priv);
51
52         return 1;
53 }
54
55 void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
56 {
57         struct bat_priv *bat_priv = netdev_priv(soft_iface);
58         struct hna_local_entry *hna_local_entry;
59         struct hna_global_entry *hna_global_entry;
60         struct hashtable_t *swaphash;
61         unsigned long flags;
62         int required_bytes;
63
64         spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
65         hna_local_entry =
66                 ((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
67                                                      addr));
68         spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
69
70         if (hna_local_entry) {
71                 hna_local_entry->last_seen = jiffies;
72                 return;
73         }
74
75         /* only announce as many hosts as possible in the batman-packet and
76            space in batman_packet->num_hna That also should give a limit to
77            MAC-flooding. */
78         required_bytes = (bat_priv->num_local_hna + 1) * ETH_ALEN;
79         required_bytes += BAT_PACKET_LEN;
80
81         if ((required_bytes > ETH_DATA_LEN) ||
82             (atomic_read(&bat_priv->aggregation_enabled) &&
83              required_bytes > MAX_AGGREGATION_BYTES) ||
84             (bat_priv->num_local_hna + 1 > 255)) {
85                 bat_dbg(DBG_ROUTES, bat_priv,
86                         "Can't add new local hna entry (%pM): "
87                         "number of local hna entries exceeds packet size\n",
88                         addr);
89                 return;
90         }
91
92         bat_dbg(DBG_ROUTES, bat_priv,
93                 "Creating new local hna entry: %pM\n", addr);
94
95         hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC);
96         if (!hna_local_entry)
97                 return;
98
99         memcpy(hna_local_entry->addr, addr, ETH_ALEN);
100         hna_local_entry->last_seen = jiffies;
101
102         /* the batman interface mac address should never be purged */
103         if (compare_orig(addr, soft_iface->dev_addr))
104                 hna_local_entry->never_purge = 1;
105         else
106                 hna_local_entry->never_purge = 0;
107
108         spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
109
110         hash_add(bat_priv->hna_local_hash, hna_local_entry);
111         bat_priv->num_local_hna++;
112         atomic_set(&bat_priv->hna_local_changed, 1);
113
114         if (bat_priv->hna_local_hash->elements * 4 >
115                                         bat_priv->hna_local_hash->size) {
116                 swaphash = hash_resize(bat_priv->hna_local_hash,
117                                        bat_priv->hna_local_hash->size * 2);
118
119                 if (!swaphash)
120                         pr_err("Couldn't resize local hna hash table\n");
121                 else
122                         bat_priv->hna_local_hash = swaphash;
123         }
124
125         spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
126
127         /* remove address from global hash if present */
128         spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
129
130         hna_global_entry = ((struct hna_global_entry *)
131                                 hash_find(bat_priv->hna_global_hash, addr));
132
133         if (hna_global_entry)
134                 _hna_global_del_orig(bat_priv, hna_global_entry,
135                                      "local hna received");
136
137         spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
138 }
139
140 int hna_local_fill_buffer(struct bat_priv *bat_priv,
141                           unsigned char *buff, int buff_len)
142 {
143         struct hna_local_entry *hna_local_entry;
144         HASHIT(hashit);
145         int i = 0;
146         unsigned long flags;
147
148         spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
149
150         while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
151
152                 if (buff_len < (i + 1) * ETH_ALEN)
153                         break;
154
155                 hna_local_entry = hashit.bucket->data;
156                 memcpy(buff + (i * ETH_ALEN), hna_local_entry->addr, ETH_ALEN);
157
158                 i++;
159         }
160
161         /* if we did not get all new local hnas see you next time  ;-) */
162         if (i == bat_priv->num_local_hna)
163                 atomic_set(&bat_priv->hna_local_changed, 0);
164
165         spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
166         return i;
167 }
168
169 int hna_local_seq_print_text(struct seq_file *seq, void *offset)
170 {
171         struct net_device *net_dev = (struct net_device *)seq->private;
172         struct bat_priv *bat_priv = netdev_priv(net_dev);
173         struct hna_local_entry *hna_local_entry;
174         HASHIT(hashit);
175         HASHIT(hashit_count);
176         unsigned long flags;
177         size_t buf_size, pos;
178         char *buff;
179
180         if (!bat_priv->primary_if) {
181                 return seq_printf(seq, "BATMAN mesh %s disabled - "
182                                "please specify interfaces to enable it\n",
183                                net_dev->name);
184         }
185
186         seq_printf(seq, "Locally retrieved addresses (from %s) "
187                    "announced via HNA:\n",
188                    net_dev->name);
189
190         spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
191
192         buf_size = 1;
193         /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
194         while (hash_iterate(bat_priv->hna_local_hash, &hashit_count))
195                 buf_size += 21;
196
197         buff = kmalloc(buf_size, GFP_ATOMIC);
198         if (!buff) {
199                 spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
200                 return -ENOMEM;
201         }
202         buff[0] = '\0';
203         pos = 0;
204
205         while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
206                 hna_local_entry = hashit.bucket->data;
207
208                 pos += snprintf(buff + pos, 22, " * %pM\n",
209                                 hna_local_entry->addr);
210         }
211
212         spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
213
214         seq_printf(seq, "%s", buff);
215         kfree(buff);
216         return 0;
217 }
218
219 static void _hna_local_del(void *data, void *arg)
220 {
221         struct bat_priv *bat_priv = (struct bat_priv *)arg;
222
223         kfree(data);
224         bat_priv->num_local_hna--;
225         atomic_set(&bat_priv->hna_local_changed, 1);
226 }
227
228 static void hna_local_del(struct bat_priv *bat_priv,
229                           struct hna_local_entry *hna_local_entry,
230                           char *message)
231 {
232         bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
233                 hna_local_entry->addr, message);
234
235         hash_remove(bat_priv->hna_local_hash, hna_local_entry->addr);
236         _hna_local_del(hna_local_entry, bat_priv);
237 }
238
239 void hna_local_remove(struct bat_priv *bat_priv,
240                       uint8_t *addr, char *message)
241 {
242         struct hna_local_entry *hna_local_entry;
243         unsigned long flags;
244
245         spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
246
247         hna_local_entry = (struct hna_local_entry *)
248                 hash_find(bat_priv->hna_local_hash, addr);
249         if (hna_local_entry)
250                 hna_local_del(bat_priv, hna_local_entry, message);
251
252         spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
253 }
254
255 static void hna_local_purge(struct work_struct *work)
256 {
257         struct delayed_work *delayed_work =
258                 container_of(work, struct delayed_work, work);
259         struct bat_priv *bat_priv =
260                 container_of(delayed_work, struct bat_priv, hna_work);
261         struct hna_local_entry *hna_local_entry;
262         HASHIT(hashit);
263         unsigned long flags;
264         unsigned long timeout;
265
266         spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
267
268         while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
269                 hna_local_entry = hashit.bucket->data;
270
271                 timeout = hna_local_entry->last_seen + LOCAL_HNA_TIMEOUT * HZ;
272
273                 if ((!hna_local_entry->never_purge) &&
274                     time_after(jiffies, timeout))
275                         hna_local_del(bat_priv, hna_local_entry,
276                                       "address timed out");
277         }
278
279         spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
280         hna_local_start_timer(bat_priv);
281 }
282
283 void hna_local_free(struct bat_priv *bat_priv)
284 {
285         if (!bat_priv->hna_local_hash)
286                 return;
287
288         cancel_delayed_work_sync(&bat_priv->hna_work);
289         hash_delete(bat_priv->hna_local_hash, _hna_local_del, bat_priv);
290         bat_priv->hna_local_hash = NULL;
291 }
292
293 int hna_global_init(struct bat_priv *bat_priv)
294 {
295         if (bat_priv->hna_global_hash)
296                 return 1;
297
298         bat_priv->hna_global_hash = hash_new(128, compare_orig, choose_orig);
299
300         if (!bat_priv->hna_global_hash)
301                 return 0;
302
303         return 1;
304 }
305
306 void hna_global_add_orig(struct bat_priv *bat_priv,
307                          struct orig_node *orig_node,
308                          unsigned char *hna_buff, int hna_buff_len)
309 {
310         struct hna_global_entry *hna_global_entry;
311         struct hna_local_entry *hna_local_entry;
312         struct hashtable_t *swaphash;
313         int hna_buff_count = 0;
314         unsigned long flags;
315         unsigned char *hna_ptr;
316
317         while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) {
318                 spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
319
320                 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
321                 hna_global_entry = (struct hna_global_entry *)
322                         hash_find(bat_priv->hna_global_hash, hna_ptr);
323
324                 if (!hna_global_entry) {
325                         spin_unlock_irqrestore(&bat_priv->hna_ghash_lock,
326                                                flags);
327
328                         hna_global_entry =
329                                 kmalloc(sizeof(struct hna_global_entry),
330                                         GFP_ATOMIC);
331
332                         if (!hna_global_entry)
333                                 break;
334
335                         memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN);
336
337                         bat_dbg(DBG_ROUTES, bat_priv,
338                                 "Creating new global hna entry: "
339                                 "%pM (via %pM)\n",
340                                 hna_global_entry->addr, orig_node->orig);
341
342                         spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
343                         hash_add(bat_priv->hna_global_hash, hna_global_entry);
344
345                 }
346
347                 hna_global_entry->orig_node = orig_node;
348                 spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
349
350                 /* remove address from local hash if present */
351                 spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
352
353                 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
354                 hna_local_entry = (struct hna_local_entry *)
355                         hash_find(bat_priv->hna_local_hash, hna_ptr);
356
357                 if (hna_local_entry)
358                         hna_local_del(bat_priv, hna_local_entry,
359                                       "global hna received");
360
361                 spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
362
363                 hna_buff_count++;
364         }
365
366         /* initialize, and overwrite if malloc succeeds */
367         orig_node->hna_buff = NULL;
368         orig_node->hna_buff_len = 0;
369
370         if (hna_buff_len > 0) {
371                 orig_node->hna_buff = kmalloc(hna_buff_len, GFP_ATOMIC);
372                 if (orig_node->hna_buff) {
373                         memcpy(orig_node->hna_buff, hna_buff, hna_buff_len);
374                         orig_node->hna_buff_len = hna_buff_len;
375                 }
376         }
377
378         spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
379
380         if (bat_priv->hna_global_hash->elements * 4 >
381                                         bat_priv->hna_global_hash->size) {
382                 swaphash = hash_resize(bat_priv->hna_global_hash,
383                                        bat_priv->hna_global_hash->size * 2);
384
385                 if (!swaphash)
386                         pr_err("Couldn't resize global hna hash table\n");
387                 else
388                         bat_priv->hna_global_hash = swaphash;
389         }
390
391         spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
392 }
393
394 int hna_global_seq_print_text(struct seq_file *seq, void *offset)
395 {
396         struct net_device *net_dev = (struct net_device *)seq->private;
397         struct bat_priv *bat_priv = netdev_priv(net_dev);
398         struct hna_global_entry *hna_global_entry;
399         HASHIT(hashit);
400         HASHIT(hashit_count);
401         unsigned long flags;
402         size_t buf_size, pos;
403         char *buff;
404
405         if (!bat_priv->primary_if) {
406                 return seq_printf(seq, "BATMAN mesh %s disabled - "
407                                   "please specify interfaces to enable it\n",
408                                   net_dev->name);
409         }
410
411         seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
412                    net_dev->name);
413
414         spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
415
416         buf_size = 1;
417         /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
418         while (hash_iterate(bat_priv->hna_global_hash, &hashit_count))
419                 buf_size += 43;
420
421         buff = kmalloc(buf_size, GFP_ATOMIC);
422         if (!buff) {
423                 spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
424                 return -ENOMEM;
425         }
426         buff[0] = '\0';
427         pos = 0;
428
429         while (hash_iterate(bat_priv->hna_global_hash, &hashit)) {
430                 hna_global_entry = hashit.bucket->data;
431
432                 pos += snprintf(buff + pos, 44,
433                                 " * %pM via %pM\n", hna_global_entry->addr,
434                                 hna_global_entry->orig_node->orig);
435         }
436
437         spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
438
439         seq_printf(seq, "%s", buff);
440         kfree(buff);
441         return 0;
442 }
443
444 static void _hna_global_del_orig(struct bat_priv *bat_priv,
445                                  struct hna_global_entry *hna_global_entry,
446                                  char *message)
447 {
448         bat_dbg(DBG_ROUTES, bat_priv,
449                 "Deleting global hna entry %pM (via %pM): %s\n",
450                 hna_global_entry->addr, hna_global_entry->orig_node->orig,
451                 message);
452
453         hash_remove(bat_priv->hna_global_hash, hna_global_entry->addr);
454         kfree(hna_global_entry);
455 }
456
457 void hna_global_del_orig(struct bat_priv *bat_priv,
458                          struct orig_node *orig_node, char *message)
459 {
460         struct hna_global_entry *hna_global_entry;
461         int hna_buff_count = 0;
462         unsigned long flags;
463         unsigned char *hna_ptr;
464
465         if (orig_node->hna_buff_len == 0)
466                 return;
467
468         spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
469
470         while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
471                 hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
472                 hna_global_entry = (struct hna_global_entry *)
473                         hash_find(bat_priv->hna_global_hash, hna_ptr);
474
475                 if ((hna_global_entry) &&
476                     (hna_global_entry->orig_node == orig_node))
477                         _hna_global_del_orig(bat_priv, hna_global_entry,
478                                              message);
479
480                 hna_buff_count++;
481         }
482
483         spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
484
485         orig_node->hna_buff_len = 0;
486         kfree(orig_node->hna_buff);
487         orig_node->hna_buff = NULL;
488 }
489
490 static void hna_global_del(void *data, void *arg)
491 {
492         kfree(data);
493 }
494
495 void hna_global_free(struct bat_priv *bat_priv)
496 {
497         if (!bat_priv->hna_global_hash)
498                 return;
499
500         hash_delete(bat_priv->hna_global_hash, hna_global_del, NULL);
501         bat_priv->hna_global_hash = NULL;
502 }
503
504 struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
505 {
506         struct hna_global_entry *hna_global_entry;
507         unsigned long flags;
508
509         spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
510         hna_global_entry = (struct hna_global_entry *)
511                                 hash_find(bat_priv->hna_global_hash, addr);
512         spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
513
514         if (!hna_global_entry)
515                 return NULL;
516
517         return hna_global_entry->orig_node;
518 }