Merge branches 'release', 'asus', 'sony-laptop' and 'thinkpad' into release
[pandora-kernel.git] / drivers / infiniband / ulp / ipoib / ipoib_main.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * $Id: ipoib_main.c 1377 2004-12-23 19:57:12Z roland $
35  */
36
37 #include "ipoib.h"
38
39 #include <linux/module.h>
40
41 #include <linux/init.h>
42 #include <linux/slab.h>
43 #include <linux/kernel.h>
44
45 #include <linux/if_arp.h>       /* For ARPHRD_xxx */
46
47 #include <linux/ip.h>
48 #include <linux/in.h>
49
50 #include <net/dst.h>
51
52 MODULE_AUTHOR("Roland Dreier");
53 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
54 MODULE_LICENSE("Dual BSD/GPL");
55
56 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
57 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
58
59 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
60 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
61 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
62 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
63
64 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
65 int ipoib_debug_level;
66
67 module_param_named(debug_level, ipoib_debug_level, int, 0644);
68 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
69 #endif
70
71 struct ipoib_path_iter {
72         struct net_device *dev;
73         struct ipoib_path  path;
74 };
75
76 static const u8 ipv4_bcast_addr[] = {
77         0x00, 0xff, 0xff, 0xff,
78         0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
79         0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
80 };
81
82 struct workqueue_struct *ipoib_workqueue;
83
84 struct ib_sa_client ipoib_sa_client;
85
86 static void ipoib_add_one(struct ib_device *device);
87 static void ipoib_remove_one(struct ib_device *device);
88
89 static struct ib_client ipoib_client = {
90         .name   = "ipoib",
91         .add    = ipoib_add_one,
92         .remove = ipoib_remove_one
93 };
94
95 int ipoib_open(struct net_device *dev)
96 {
97         struct ipoib_dev_priv *priv = netdev_priv(dev);
98
99         ipoib_dbg(priv, "bringing up interface\n");
100
101         napi_enable(&priv->napi);
102         set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
103
104         if (ipoib_pkey_dev_delay_open(dev))
105                 return 0;
106
107         if (ipoib_ib_dev_open(dev)) {
108                 napi_disable(&priv->napi);
109                 return -EINVAL;
110         }
111
112         if (ipoib_ib_dev_up(dev)) {
113                 ipoib_ib_dev_stop(dev, 1);
114                 napi_disable(&priv->napi);
115                 return -EINVAL;
116         }
117
118         if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
119                 struct ipoib_dev_priv *cpriv;
120
121                 /* Bring up any child interfaces too */
122                 mutex_lock(&priv->vlan_mutex);
123                 list_for_each_entry(cpriv, &priv->child_intfs, list) {
124                         int flags;
125
126                         flags = cpriv->dev->flags;
127                         if (flags & IFF_UP)
128                                 continue;
129
130                         dev_change_flags(cpriv->dev, flags | IFF_UP);
131                 }
132                 mutex_unlock(&priv->vlan_mutex);
133         }
134
135         netif_start_queue(dev);
136
137         return 0;
138 }
139
140 static int ipoib_stop(struct net_device *dev)
141 {
142         struct ipoib_dev_priv *priv = netdev_priv(dev);
143
144         ipoib_dbg(priv, "stopping interface\n");
145
146         clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
147         napi_disable(&priv->napi);
148
149         netif_stop_queue(dev);
150
151         /*
152          * Now flush workqueue to make sure a scheduled task doesn't
153          * bring our internal state back up.
154          */
155         flush_workqueue(ipoib_workqueue);
156
157         ipoib_ib_dev_down(dev, 1);
158         ipoib_ib_dev_stop(dev, 1);
159
160         if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
161                 struct ipoib_dev_priv *cpriv;
162
163                 /* Bring down any child interfaces too */
164                 mutex_lock(&priv->vlan_mutex);
165                 list_for_each_entry(cpriv, &priv->child_intfs, list) {
166                         int flags;
167
168                         flags = cpriv->dev->flags;
169                         if (!(flags & IFF_UP))
170                                 continue;
171
172                         dev_change_flags(cpriv->dev, flags & ~IFF_UP);
173                 }
174                 mutex_unlock(&priv->vlan_mutex);
175         }
176
177         return 0;
178 }
179
180 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
181 {
182         struct ipoib_dev_priv *priv = netdev_priv(dev);
183
184         /* dev->mtu > 2K ==> connected mode */
185         if (ipoib_cm_admin_enabled(dev)) {
186                 if (new_mtu > ipoib_cm_max_mtu(dev))
187                         return -EINVAL;
188
189                 if (new_mtu > priv->mcast_mtu)
190                         ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
191                                    priv->mcast_mtu);
192
193                 dev->mtu = new_mtu;
194                 return 0;
195         }
196
197         if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
198                 return -EINVAL;
199
200         priv->admin_mtu = new_mtu;
201
202         dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
203
204         return 0;
205 }
206
207 static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
208 {
209         struct ipoib_dev_priv *priv = netdev_priv(dev);
210         struct rb_node *n = priv->path_tree.rb_node;
211         struct ipoib_path *path;
212         int ret;
213
214         while (n) {
215                 path = rb_entry(n, struct ipoib_path, rb_node);
216
217                 ret = memcmp(gid, path->pathrec.dgid.raw,
218                              sizeof (union ib_gid));
219
220                 if (ret < 0)
221                         n = n->rb_left;
222                 else if (ret > 0)
223                         n = n->rb_right;
224                 else
225                         return path;
226         }
227
228         return NULL;
229 }
230
231 static int __path_add(struct net_device *dev, struct ipoib_path *path)
232 {
233         struct ipoib_dev_priv *priv = netdev_priv(dev);
234         struct rb_node **n = &priv->path_tree.rb_node;
235         struct rb_node *pn = NULL;
236         struct ipoib_path *tpath;
237         int ret;
238
239         while (*n) {
240                 pn = *n;
241                 tpath = rb_entry(pn, struct ipoib_path, rb_node);
242
243                 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
244                              sizeof (union ib_gid));
245                 if (ret < 0)
246                         n = &pn->rb_left;
247                 else if (ret > 0)
248                         n = &pn->rb_right;
249                 else
250                         return -EEXIST;
251         }
252
253         rb_link_node(&path->rb_node, pn, n);
254         rb_insert_color(&path->rb_node, &priv->path_tree);
255
256         list_add_tail(&path->list, &priv->path_list);
257
258         return 0;
259 }
260
261 static void path_free(struct net_device *dev, struct ipoib_path *path)
262 {
263         struct ipoib_dev_priv *priv = netdev_priv(dev);
264         struct ipoib_neigh *neigh, *tn;
265         struct sk_buff *skb;
266         unsigned long flags;
267
268         while ((skb = __skb_dequeue(&path->queue)))
269                 dev_kfree_skb_irq(skb);
270
271         spin_lock_irqsave(&priv->lock, flags);
272
273         list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
274                 /*
275                  * It's safe to call ipoib_put_ah() inside priv->lock
276                  * here, because we know that path->ah will always
277                  * hold one more reference, so ipoib_put_ah() will
278                  * never do more than decrement the ref count.
279                  */
280                 if (neigh->ah)
281                         ipoib_put_ah(neigh->ah);
282
283                 ipoib_neigh_free(dev, neigh);
284         }
285
286         spin_unlock_irqrestore(&priv->lock, flags);
287
288         if (path->ah)
289                 ipoib_put_ah(path->ah);
290
291         kfree(path);
292 }
293
294 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
295
296 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
297 {
298         struct ipoib_path_iter *iter;
299
300         iter = kmalloc(sizeof *iter, GFP_KERNEL);
301         if (!iter)
302                 return NULL;
303
304         iter->dev = dev;
305         memset(iter->path.pathrec.dgid.raw, 0, 16);
306
307         if (ipoib_path_iter_next(iter)) {
308                 kfree(iter);
309                 return NULL;
310         }
311
312         return iter;
313 }
314
315 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
316 {
317         struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
318         struct rb_node *n;
319         struct ipoib_path *path;
320         int ret = 1;
321
322         spin_lock_irq(&priv->lock);
323
324         n = rb_first(&priv->path_tree);
325
326         while (n) {
327                 path = rb_entry(n, struct ipoib_path, rb_node);
328
329                 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
330                            sizeof (union ib_gid)) < 0) {
331                         iter->path = *path;
332                         ret = 0;
333                         break;
334                 }
335
336                 n = rb_next(n);
337         }
338
339         spin_unlock_irq(&priv->lock);
340
341         return ret;
342 }
343
344 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
345                           struct ipoib_path *path)
346 {
347         *path = iter->path;
348 }
349
350 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
351
352 void ipoib_flush_paths(struct net_device *dev)
353 {
354         struct ipoib_dev_priv *priv = netdev_priv(dev);
355         struct ipoib_path *path, *tp;
356         LIST_HEAD(remove_list);
357
358         spin_lock_irq(&priv->tx_lock);
359         spin_lock(&priv->lock);
360
361         list_splice(&priv->path_list, &remove_list);
362         INIT_LIST_HEAD(&priv->path_list);
363
364         list_for_each_entry(path, &remove_list, list)
365                 rb_erase(&path->rb_node, &priv->path_tree);
366
367         list_for_each_entry_safe(path, tp, &remove_list, list) {
368                 if (path->query)
369                         ib_sa_cancel_query(path->query_id, path->query);
370                 spin_unlock(&priv->lock);
371                 spin_unlock_irq(&priv->tx_lock);
372                 wait_for_completion(&path->done);
373                 path_free(dev, path);
374                 spin_lock_irq(&priv->tx_lock);
375                 spin_lock(&priv->lock);
376         }
377         spin_unlock(&priv->lock);
378         spin_unlock_irq(&priv->tx_lock);
379 }
380
381 static void path_rec_completion(int status,
382                                 struct ib_sa_path_rec *pathrec,
383                                 void *path_ptr)
384 {
385         struct ipoib_path *path = path_ptr;
386         struct net_device *dev = path->dev;
387         struct ipoib_dev_priv *priv = netdev_priv(dev);
388         struct ipoib_ah *ah = NULL;
389         struct ipoib_neigh *neigh, *tn;
390         struct sk_buff_head skqueue;
391         struct sk_buff *skb;
392         unsigned long flags;
393
394         if (!status)
395                 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n",
396                           be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid));
397         else
398                 ipoib_dbg(priv, "PathRec status %d for GID " IPOIB_GID_FMT "\n",
399                           status, IPOIB_GID_ARG(path->pathrec.dgid));
400
401         skb_queue_head_init(&skqueue);
402
403         if (!status) {
404                 struct ib_ah_attr av;
405
406                 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
407                         ah = ipoib_create_ah(dev, priv->pd, &av);
408         }
409
410         spin_lock_irqsave(&priv->lock, flags);
411
412         path->ah = ah;
413
414         if (ah) {
415                 path->pathrec = *pathrec;
416
417                 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
418                           ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
419
420                 while ((skb = __skb_dequeue(&path->queue)))
421                         __skb_queue_tail(&skqueue, skb);
422
423                 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
424                         kref_get(&path->ah->ref);
425                         neigh->ah = path->ah;
426                         memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
427                                sizeof(union ib_gid));
428
429                         if (ipoib_cm_enabled(dev, neigh->neighbour)) {
430                                 if (!ipoib_cm_get(neigh))
431                                         ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
432                                                                                path,
433                                                                                neigh));
434                                 if (!ipoib_cm_get(neigh)) {
435                                         list_del(&neigh->list);
436                                         if (neigh->ah)
437                                                 ipoib_put_ah(neigh->ah);
438                                         ipoib_neigh_free(dev, neigh);
439                                         continue;
440                                 }
441                         }
442
443                         while ((skb = __skb_dequeue(&neigh->queue)))
444                                 __skb_queue_tail(&skqueue, skb);
445                 }
446         }
447
448         path->query = NULL;
449         complete(&path->done);
450
451         spin_unlock_irqrestore(&priv->lock, flags);
452
453         while ((skb = __skb_dequeue(&skqueue))) {
454                 skb->dev = dev;
455                 if (dev_queue_xmit(skb))
456                         ipoib_warn(priv, "dev_queue_xmit failed "
457                                    "to requeue packet\n");
458         }
459 }
460
461 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
462 {
463         struct ipoib_dev_priv *priv = netdev_priv(dev);
464         struct ipoib_path *path;
465
466         if (!priv->broadcast)
467                 return NULL;
468
469         path = kzalloc(sizeof *path, GFP_ATOMIC);
470         if (!path)
471                 return NULL;
472
473         path->dev = dev;
474
475         skb_queue_head_init(&path->queue);
476
477         INIT_LIST_HEAD(&path->neigh_list);
478
479         memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
480         path->pathrec.sgid          = priv->local_gid;
481         path->pathrec.pkey          = cpu_to_be16(priv->pkey);
482         path->pathrec.numb_path     = 1;
483         path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
484
485         return path;
486 }
487
488 static int path_rec_start(struct net_device *dev,
489                           struct ipoib_path *path)
490 {
491         struct ipoib_dev_priv *priv = netdev_priv(dev);
492
493         ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n",
494                   IPOIB_GID_ARG(path->pathrec.dgid));
495
496         init_completion(&path->done);
497
498         path->query_id =
499                 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
500                                    &path->pathrec,
501                                    IB_SA_PATH_REC_DGID          |
502                                    IB_SA_PATH_REC_SGID          |
503                                    IB_SA_PATH_REC_NUMB_PATH     |
504                                    IB_SA_PATH_REC_TRAFFIC_CLASS |
505                                    IB_SA_PATH_REC_PKEY,
506                                    1000, GFP_ATOMIC,
507                                    path_rec_completion,
508                                    path, &path->query);
509         if (path->query_id < 0) {
510                 ipoib_warn(priv, "ib_sa_path_rec_get failed\n");
511                 path->query = NULL;
512                 return path->query_id;
513         }
514
515         return 0;
516 }
517
518 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
519 {
520         struct ipoib_dev_priv *priv = netdev_priv(dev);
521         struct ipoib_path *path;
522         struct ipoib_neigh *neigh;
523
524         neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
525         if (!neigh) {
526                 ++dev->stats.tx_dropped;
527                 dev_kfree_skb_any(skb);
528                 return;
529         }
530
531         /*
532          * We can only be called from ipoib_start_xmit, so we're
533          * inside tx_lock -- no need to save/restore flags.
534          */
535         spin_lock(&priv->lock);
536
537         path = __path_find(dev, skb->dst->neighbour->ha + 4);
538         if (!path) {
539                 path = path_rec_create(dev, skb->dst->neighbour->ha + 4);
540                 if (!path)
541                         goto err_path;
542
543                 __path_add(dev, path);
544         }
545
546         list_add_tail(&neigh->list, &path->neigh_list);
547
548         if (path->ah) {
549                 kref_get(&path->ah->ref);
550                 neigh->ah = path->ah;
551                 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
552                        sizeof(union ib_gid));
553
554                 if (ipoib_cm_enabled(dev, neigh->neighbour)) {
555                         if (!ipoib_cm_get(neigh))
556                                 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
557                         if (!ipoib_cm_get(neigh)) {
558                                 list_del(&neigh->list);
559                                 if (neigh->ah)
560                                         ipoib_put_ah(neigh->ah);
561                                 ipoib_neigh_free(dev, neigh);
562                                 goto err_drop;
563                         }
564                         if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
565                                 __skb_queue_tail(&neigh->queue, skb);
566                         else {
567                                 ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
568                                            skb_queue_len(&neigh->queue));
569                                 goto err_drop;
570                         }
571                 } else
572                         ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb->dst->neighbour->ha));
573         } else {
574                 neigh->ah  = NULL;
575
576                 if (!path->query && path_rec_start(dev, path))
577                         goto err_list;
578
579                 __skb_queue_tail(&neigh->queue, skb);
580         }
581
582         spin_unlock(&priv->lock);
583         return;
584
585 err_list:
586         list_del(&neigh->list);
587
588 err_path:
589         ipoib_neigh_free(dev, neigh);
590 err_drop:
591         ++dev->stats.tx_dropped;
592         dev_kfree_skb_any(skb);
593
594         spin_unlock(&priv->lock);
595 }
596
597 static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
598 {
599         struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
600
601         /* Look up path record for unicasts */
602         if (skb->dst->neighbour->ha[4] != 0xff) {
603                 neigh_add_path(skb, dev);
604                 return;
605         }
606
607         /* Add in the P_Key for multicasts */
608         skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
609         skb->dst->neighbour->ha[9] = priv->pkey & 0xff;
610         ipoib_mcast_send(dev, skb->dst->neighbour->ha + 4, skb);
611 }
612
613 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
614                              struct ipoib_pseudoheader *phdr)
615 {
616         struct ipoib_dev_priv *priv = netdev_priv(dev);
617         struct ipoib_path *path;
618
619         /*
620          * We can only be called from ipoib_start_xmit, so we're
621          * inside tx_lock -- no need to save/restore flags.
622          */
623         spin_lock(&priv->lock);
624
625         path = __path_find(dev, phdr->hwaddr + 4);
626         if (!path) {
627                 path = path_rec_create(dev, phdr->hwaddr + 4);
628                 if (path) {
629                         /* put pseudoheader back on for next time */
630                         skb_push(skb, sizeof *phdr);
631                         __skb_queue_tail(&path->queue, skb);
632
633                         if (path_rec_start(dev, path)) {
634                                 spin_unlock(&priv->lock);
635                                 path_free(dev, path);
636                                 return;
637                         } else
638                                 __path_add(dev, path);
639                 } else {
640                         ++dev->stats.tx_dropped;
641                         dev_kfree_skb_any(skb);
642                 }
643
644                 spin_unlock(&priv->lock);
645                 return;
646         }
647
648         if (path->ah) {
649                 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
650                           be16_to_cpu(path->pathrec.dlid));
651
652                 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
653         } else if ((path->query || !path_rec_start(dev, path)) &&
654                    skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
655                 /* put pseudoheader back on for next time */
656                 skb_push(skb, sizeof *phdr);
657                 __skb_queue_tail(&path->queue, skb);
658         } else {
659                 ++dev->stats.tx_dropped;
660                 dev_kfree_skb_any(skb);
661         }
662
663         spin_unlock(&priv->lock);
664 }
665
666 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
667 {
668         struct ipoib_dev_priv *priv = netdev_priv(dev);
669         struct ipoib_neigh *neigh;
670         unsigned long flags;
671
672         if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags)))
673                 return NETDEV_TX_LOCKED;
674
675         if (likely(skb->dst && skb->dst->neighbour)) {
676                 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
677                         ipoib_path_lookup(skb, dev);
678                         goto out;
679                 }
680
681                 neigh = *to_ipoib_neigh(skb->dst->neighbour);
682
683                 if (neigh->ah)
684                         if (unlikely((memcmp(&neigh->dgid.raw,
685                                             skb->dst->neighbour->ha + 4,
686                                             sizeof(union ib_gid))) ||
687                                          (neigh->dev != dev))) {
688                                 spin_lock(&priv->lock);
689                                 /*
690                                  * It's safe to call ipoib_put_ah() inside
691                                  * priv->lock here, because we know that
692                                  * path->ah will always hold one more reference,
693                                  * so ipoib_put_ah() will never do more than
694                                  * decrement the ref count.
695                                  */
696                                 ipoib_put_ah(neigh->ah);
697                                 list_del(&neigh->list);
698                                 ipoib_neigh_free(dev, neigh);
699                                 spin_unlock(&priv->lock);
700                                 ipoib_path_lookup(skb, dev);
701                                 goto out;
702                         }
703
704                 if (ipoib_cm_get(neigh)) {
705                         if (ipoib_cm_up(neigh)) {
706                                 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
707                                 goto out;
708                         }
709                 } else if (neigh->ah) {
710                         ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha));
711                         goto out;
712                 }
713
714                 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
715                         spin_lock(&priv->lock);
716                         __skb_queue_tail(&neigh->queue, skb);
717                         spin_unlock(&priv->lock);
718                 } else {
719                         ++dev->stats.tx_dropped;
720                         dev_kfree_skb_any(skb);
721                 }
722         } else {
723                 struct ipoib_pseudoheader *phdr =
724                         (struct ipoib_pseudoheader *) skb->data;
725                 skb_pull(skb, sizeof *phdr);
726
727                 if (phdr->hwaddr[4] == 0xff) {
728                         /* Add in the P_Key for multicast*/
729                         phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
730                         phdr->hwaddr[9] = priv->pkey & 0xff;
731
732                         ipoib_mcast_send(dev, phdr->hwaddr + 4, skb);
733                 } else {
734                         /* unicast GID -- should be ARP or RARP reply */
735
736                         if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
737                             (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
738                                 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x "
739                                            IPOIB_GID_FMT "\n",
740                                            skb->dst ? "neigh" : "dst",
741                                            be16_to_cpup((__be16 *) skb->data),
742                                            IPOIB_QPN(phdr->hwaddr),
743                                            IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
744                                 dev_kfree_skb_any(skb);
745                                 ++dev->stats.tx_dropped;
746                                 goto out;
747                         }
748
749                         unicast_arp_send(skb, dev, phdr);
750                 }
751         }
752
753 out:
754         spin_unlock_irqrestore(&priv->tx_lock, flags);
755
756         return NETDEV_TX_OK;
757 }
758
759 static void ipoib_timeout(struct net_device *dev)
760 {
761         struct ipoib_dev_priv *priv = netdev_priv(dev);
762
763         ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
764                    jiffies_to_msecs(jiffies - dev->trans_start));
765         ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
766                    netif_queue_stopped(dev),
767                    priv->tx_head, priv->tx_tail);
768         /* XXX reset QP, etc. */
769 }
770
771 static int ipoib_hard_header(struct sk_buff *skb,
772                              struct net_device *dev,
773                              unsigned short type,
774                              const void *daddr, const void *saddr, unsigned len)
775 {
776         struct ipoib_header *header;
777
778         header = (struct ipoib_header *) skb_push(skb, sizeof *header);
779
780         header->proto = htons(type);
781         header->reserved = 0;
782
783         /*
784          * If we don't have a neighbour structure, stuff the
785          * destination address onto the front of the skb so we can
786          * figure out where to send the packet later.
787          */
788         if ((!skb->dst || !skb->dst->neighbour) && daddr) {
789                 struct ipoib_pseudoheader *phdr =
790                         (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
791                 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
792         }
793
794         return 0;
795 }
796
797 static void ipoib_set_mcast_list(struct net_device *dev)
798 {
799         struct ipoib_dev_priv *priv = netdev_priv(dev);
800
801         if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
802                 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
803                 return;
804         }
805
806         queue_work(ipoib_workqueue, &priv->restart_task);
807 }
808
809 static void ipoib_neigh_cleanup(struct neighbour *n)
810 {
811         struct ipoib_neigh *neigh;
812         struct ipoib_dev_priv *priv = netdev_priv(n->dev);
813         unsigned long flags;
814         struct ipoib_ah *ah = NULL;
815
816         neigh = *to_ipoib_neigh(n);
817         if (neigh)
818                 priv = netdev_priv(neigh->dev);
819         else
820                 return;
821         ipoib_dbg(priv,
822                   "neigh_cleanup for %06x " IPOIB_GID_FMT "\n",
823                   IPOIB_QPN(n->ha),
824                   IPOIB_GID_RAW_ARG(n->ha + 4));
825
826         spin_lock_irqsave(&priv->lock, flags);
827
828         if (neigh->ah)
829                 ah = neigh->ah;
830         list_del(&neigh->list);
831         ipoib_neigh_free(n->dev, neigh);
832
833         spin_unlock_irqrestore(&priv->lock, flags);
834
835         if (ah)
836                 ipoib_put_ah(ah);
837 }
838
839 struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour,
840                                       struct net_device *dev)
841 {
842         struct ipoib_neigh *neigh;
843
844         neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
845         if (!neigh)
846                 return NULL;
847
848         neigh->neighbour = neighbour;
849         neigh->dev = dev;
850         *to_ipoib_neigh(neighbour) = neigh;
851         skb_queue_head_init(&neigh->queue);
852         ipoib_cm_set(neigh, NULL);
853
854         return neigh;
855 }
856
857 void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
858 {
859         struct sk_buff *skb;
860         *to_ipoib_neigh(neigh->neighbour) = NULL;
861         while ((skb = __skb_dequeue(&neigh->queue))) {
862                 ++dev->stats.tx_dropped;
863                 dev_kfree_skb_any(skb);
864         }
865         if (ipoib_cm_get(neigh))
866                 ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
867         kfree(neigh);
868 }
869
870 static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
871 {
872         parms->neigh_cleanup = ipoib_neigh_cleanup;
873
874         return 0;
875 }
876
877 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
878 {
879         struct ipoib_dev_priv *priv = netdev_priv(dev);
880
881         /* Allocate RX/TX "rings" to hold queued skbs */
882         priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
883                                 GFP_KERNEL);
884         if (!priv->rx_ring) {
885                 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
886                        ca->name, ipoib_recvq_size);
887                 goto out;
888         }
889
890         priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring,
891                                 GFP_KERNEL);
892         if (!priv->tx_ring) {
893                 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
894                        ca->name, ipoib_sendq_size);
895                 goto out_rx_ring_cleanup;
896         }
897
898         /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
899
900         if (ipoib_ib_dev_init(dev, ca, port))
901                 goto out_tx_ring_cleanup;
902
903         return 0;
904
905 out_tx_ring_cleanup:
906         kfree(priv->tx_ring);
907
908 out_rx_ring_cleanup:
909         kfree(priv->rx_ring);
910
911 out:
912         return -ENOMEM;
913 }
914
915 void ipoib_dev_cleanup(struct net_device *dev)
916 {
917         struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
918
919         ipoib_delete_debug_files(dev);
920
921         /* Delete any child interfaces first */
922         list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
923                 unregister_netdev(cpriv->dev);
924                 ipoib_dev_cleanup(cpriv->dev);
925                 free_netdev(cpriv->dev);
926         }
927
928         ipoib_ib_dev_cleanup(dev);
929
930         kfree(priv->rx_ring);
931         kfree(priv->tx_ring);
932
933         priv->rx_ring = NULL;
934         priv->tx_ring = NULL;
935 }
936
937 static const struct header_ops ipoib_header_ops = {
938         .create = ipoib_hard_header,
939 };
940
941 static void ipoib_setup(struct net_device *dev)
942 {
943         struct ipoib_dev_priv *priv = netdev_priv(dev);
944
945         dev->open                = ipoib_open;
946         dev->stop                = ipoib_stop;
947         dev->change_mtu          = ipoib_change_mtu;
948         dev->hard_start_xmit     = ipoib_start_xmit;
949         dev->tx_timeout          = ipoib_timeout;
950         dev->header_ops          = &ipoib_header_ops;
951         dev->set_multicast_list  = ipoib_set_mcast_list;
952         dev->neigh_setup         = ipoib_neigh_setup_dev;
953
954         netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
955
956         dev->watchdog_timeo      = HZ;
957
958         dev->flags              |= IFF_BROADCAST | IFF_MULTICAST;
959
960         /*
961          * We add in INFINIBAND_ALEN to allow for the destination
962          * address "pseudoheader" for skbs without neighbour struct.
963          */
964         dev->hard_header_len     = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
965         dev->addr_len            = INFINIBAND_ALEN;
966         dev->type                = ARPHRD_INFINIBAND;
967         dev->tx_queue_len        = ipoib_sendq_size * 2;
968         dev->features            = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
969
970         /* MTU will be reset when mcast join happens */
971         dev->mtu                 = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
972         priv->mcast_mtu          = priv->admin_mtu = dev->mtu;
973
974         memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
975
976         netif_carrier_off(dev);
977
978         priv->dev = dev;
979
980         spin_lock_init(&priv->lock);
981         spin_lock_init(&priv->tx_lock);
982
983         mutex_init(&priv->mcast_mutex);
984         mutex_init(&priv->vlan_mutex);
985
986         INIT_LIST_HEAD(&priv->path_list);
987         INIT_LIST_HEAD(&priv->child_intfs);
988         INIT_LIST_HEAD(&priv->dead_ahs);
989         INIT_LIST_HEAD(&priv->multicast_list);
990
991         INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
992         INIT_WORK(&priv->pkey_event_task, ipoib_pkey_event);
993         INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
994         INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush);
995         INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
996         INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
997 }
998
999 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
1000 {
1001         struct net_device *dev;
1002
1003         dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
1004                            ipoib_setup);
1005         if (!dev)
1006                 return NULL;
1007
1008         return netdev_priv(dev);
1009 }
1010
1011 static ssize_t show_pkey(struct device *dev,
1012                          struct device_attribute *attr, char *buf)
1013 {
1014         struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1015
1016         return sprintf(buf, "0x%04x\n", priv->pkey);
1017 }
1018 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1019
1020 static ssize_t show_umcast(struct device *dev,
1021                            struct device_attribute *attr, char *buf)
1022 {
1023         struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1024
1025         return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
1026 }
1027
1028 static ssize_t set_umcast(struct device *dev,
1029                           struct device_attribute *attr,
1030                           const char *buf, size_t count)
1031 {
1032         struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1033         unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1034
1035         if (umcast_val > 0) {
1036                 set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1037                 ipoib_warn(priv, "ignoring multicast groups joined directly "
1038                                 "by userspace\n");
1039         } else
1040                 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1041
1042         return count;
1043 }
1044 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
1045
1046 int ipoib_add_umcast_attr(struct net_device *dev)
1047 {
1048         return device_create_file(&dev->dev, &dev_attr_umcast);
1049 }
1050
1051 static ssize_t create_child(struct device *dev,
1052                             struct device_attribute *attr,
1053                             const char *buf, size_t count)
1054 {
1055         int pkey;
1056         int ret;
1057
1058         if (sscanf(buf, "%i", &pkey) != 1)
1059                 return -EINVAL;
1060
1061         if (pkey < 0 || pkey > 0xffff)
1062                 return -EINVAL;
1063
1064         /*
1065          * Set the full membership bit, so that we join the right
1066          * broadcast group, etc.
1067          */
1068         pkey |= 0x8000;
1069
1070         ret = ipoib_vlan_add(to_net_dev(dev), pkey);
1071
1072         return ret ? ret : count;
1073 }
1074 static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
1075
1076 static ssize_t delete_child(struct device *dev,
1077                             struct device_attribute *attr,
1078                             const char *buf, size_t count)
1079 {
1080         int pkey;
1081         int ret;
1082
1083         if (sscanf(buf, "%i", &pkey) != 1)
1084                 return -EINVAL;
1085
1086         if (pkey < 0 || pkey > 0xffff)
1087                 return -EINVAL;
1088
1089         ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
1090
1091         return ret ? ret : count;
1092
1093 }
1094 static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
1095
1096 int ipoib_add_pkey_attr(struct net_device *dev)
1097 {
1098         return device_create_file(&dev->dev, &dev_attr_pkey);
1099 }
1100
1101 static struct net_device *ipoib_add_port(const char *format,
1102                                          struct ib_device *hca, u8 port)
1103 {
1104         struct ipoib_dev_priv *priv;
1105         int result = -ENOMEM;
1106
1107         priv = ipoib_intf_alloc(format);
1108         if (!priv)
1109                 goto alloc_mem_failed;
1110
1111         SET_NETDEV_DEV(priv->dev, hca->dma_device);
1112
1113         result = ib_query_pkey(hca, port, 0, &priv->pkey);
1114         if (result) {
1115                 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
1116                        hca->name, port, result);
1117                 goto device_init_failed;
1118         }
1119
1120         /*
1121          * Set the full membership bit, so that we join the right
1122          * broadcast group, etc.
1123          */
1124         priv->pkey |= 0x8000;
1125
1126         priv->dev->broadcast[8] = priv->pkey >> 8;
1127         priv->dev->broadcast[9] = priv->pkey & 0xff;
1128
1129         result = ib_query_gid(hca, port, 0, &priv->local_gid);
1130         if (result) {
1131                 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
1132                        hca->name, port, result);
1133                 goto device_init_failed;
1134         } else
1135                 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1136
1137
1138         result = ipoib_dev_init(priv->dev, hca, port);
1139         if (result < 0) {
1140                 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
1141                        hca->name, port, result);
1142                 goto device_init_failed;
1143         }
1144
1145         INIT_IB_EVENT_HANDLER(&priv->event_handler,
1146                               priv->ca, ipoib_event);
1147         result = ib_register_event_handler(&priv->event_handler);
1148         if (result < 0) {
1149                 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1150                        "port %d (ret = %d)\n",
1151                        hca->name, port, result);
1152                 goto event_failed;
1153         }
1154
1155         result = register_netdev(priv->dev);
1156         if (result) {
1157                 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
1158                        hca->name, port, result);
1159                 goto register_failed;
1160         }
1161
1162         ipoib_create_debug_files(priv->dev);
1163
1164         if (ipoib_cm_add_mode_attr(priv->dev))
1165                 goto sysfs_failed;
1166         if (ipoib_add_pkey_attr(priv->dev))
1167                 goto sysfs_failed;
1168         if (ipoib_add_umcast_attr(priv->dev))
1169                 goto sysfs_failed;
1170         if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
1171                 goto sysfs_failed;
1172         if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
1173                 goto sysfs_failed;
1174
1175         return priv->dev;
1176
1177 sysfs_failed:
1178         ipoib_delete_debug_files(priv->dev);
1179         unregister_netdev(priv->dev);
1180
1181 register_failed:
1182         ib_unregister_event_handler(&priv->event_handler);
1183         flush_scheduled_work();
1184
1185 event_failed:
1186         ipoib_dev_cleanup(priv->dev);
1187
1188 device_init_failed:
1189         free_netdev(priv->dev);
1190
1191 alloc_mem_failed:
1192         return ERR_PTR(result);
1193 }
1194
1195 static void ipoib_add_one(struct ib_device *device)
1196 {
1197         struct list_head *dev_list;
1198         struct net_device *dev;
1199         struct ipoib_dev_priv *priv;
1200         int s, e, p;
1201
1202         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1203                 return;
1204
1205         dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1206         if (!dev_list)
1207                 return;
1208
1209         INIT_LIST_HEAD(dev_list);
1210
1211         if (device->node_type == RDMA_NODE_IB_SWITCH) {
1212                 s = 0;
1213                 e = 0;
1214         } else {
1215                 s = 1;
1216                 e = device->phys_port_cnt;
1217         }
1218
1219         for (p = s; p <= e; ++p) {
1220                 dev = ipoib_add_port("ib%d", device, p);
1221                 if (!IS_ERR(dev)) {
1222                         priv = netdev_priv(dev);
1223                         list_add_tail(&priv->list, dev_list);
1224                 }
1225         }
1226
1227         ib_set_client_data(device, &ipoib_client, dev_list);
1228 }
1229
1230 static void ipoib_remove_one(struct ib_device *device)
1231 {
1232         struct ipoib_dev_priv *priv, *tmp;
1233         struct list_head *dev_list;
1234
1235         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1236                 return;
1237
1238         dev_list = ib_get_client_data(device, &ipoib_client);
1239
1240         list_for_each_entry_safe(priv, tmp, dev_list, list) {
1241                 ib_unregister_event_handler(&priv->event_handler);
1242                 flush_scheduled_work();
1243
1244                 unregister_netdev(priv->dev);
1245                 ipoib_dev_cleanup(priv->dev);
1246                 free_netdev(priv->dev);
1247         }
1248
1249         kfree(dev_list);
1250 }
1251
1252 static int __init ipoib_init_module(void)
1253 {
1254         int ret;
1255
1256         ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
1257         ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
1258         ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
1259
1260         ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
1261         ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
1262         ipoib_sendq_size = max(ipoib_sendq_size, IPOIB_MIN_QUEUE_SIZE);
1263 #ifdef CONFIG_INFINIBAND_IPOIB_CM
1264         ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
1265 #endif
1266
1267         ret = ipoib_register_debugfs();
1268         if (ret)
1269                 return ret;
1270
1271         /*
1272          * We create our own workqueue mainly because we want to be
1273          * able to flush it when devices are being removed.  We can't
1274          * use schedule_work()/flush_scheduled_work() because both
1275          * unregister_netdev() and linkwatch_event take the rtnl lock,
1276          * so flush_scheduled_work() can deadlock during device
1277          * removal.
1278          */
1279         ipoib_workqueue = create_singlethread_workqueue("ipoib");
1280         if (!ipoib_workqueue) {
1281                 ret = -ENOMEM;
1282                 goto err_fs;
1283         }
1284
1285         ib_sa_register_client(&ipoib_sa_client);
1286
1287         ret = ib_register_client(&ipoib_client);
1288         if (ret)
1289                 goto err_sa;
1290
1291         return 0;
1292
1293 err_sa:
1294         ib_sa_unregister_client(&ipoib_sa_client);
1295         destroy_workqueue(ipoib_workqueue);
1296
1297 err_fs:
1298         ipoib_unregister_debugfs();
1299
1300         return ret;
1301 }
1302
1303 static void __exit ipoib_cleanup_module(void)
1304 {
1305         ib_unregister_client(&ipoib_client);
1306         ib_sa_unregister_client(&ipoib_sa_client);
1307         ipoib_unregister_debugfs();
1308         destroy_workqueue(ipoib_workqueue);
1309 }
1310
1311 module_init(ipoib_init_module);
1312 module_exit(ipoib_cleanup_module);