enic: Bug fix: Reset driver count of registered unicast addresses to zero during...
[pandora-kernel.git] / drivers / net / enic / enic_main.c
1 /*
2  * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * This program is free software; you may redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; version 2 of the License.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16  * SOFTWARE.
17  *
18  */
19
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/init.h>
26 #include <linux/workqueue.h>
27 #include <linux/pci.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/if_vlan.h>
32 #include <linux/ethtool.h>
33 #include <linux/in.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/rtnetlink.h>
38 #include <net/ip6_checksum.h>
39
40 #include "cq_enet_desc.h"
41 #include "vnic_dev.h"
42 #include "vnic_intr.h"
43 #include "vnic_stats.h"
44 #include "vnic_vic.h"
45 #include "enic_res.h"
46 #include "enic.h"
47 #include "enic_dev.h"
48
49 #define ENIC_NOTIFY_TIMER_PERIOD        (2 * HZ)
50 #define WQ_ENET_MAX_DESC_LEN            (1 << WQ_ENET_LEN_BITS)
51 #define MAX_TSO                         (1 << 16)
52 #define ENIC_DESC_MAX_SPLITS            (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
53
54 #define PCI_DEVICE_ID_CISCO_VIC_ENET         0x0043  /* ethernet vnic */
55 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN     0x0044  /* enet dynamic vnic */
56
57 /* Supported devices */
58 static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
59         { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
60         { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
61         { 0, }  /* end of table */
62 };
63
64 MODULE_DESCRIPTION(DRV_DESCRIPTION);
65 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
66 MODULE_LICENSE("GPL");
67 MODULE_VERSION(DRV_VERSION);
68 MODULE_DEVICE_TABLE(pci, enic_id_table);
69
70 struct enic_stat {
71         char name[ETH_GSTRING_LEN];
72         unsigned int offset;
73 };
74
75 #define ENIC_TX_STAT(stat)      \
76         { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
77 #define ENIC_RX_STAT(stat)      \
78         { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
79
80 static const struct enic_stat enic_tx_stats[] = {
81         ENIC_TX_STAT(tx_frames_ok),
82         ENIC_TX_STAT(tx_unicast_frames_ok),
83         ENIC_TX_STAT(tx_multicast_frames_ok),
84         ENIC_TX_STAT(tx_broadcast_frames_ok),
85         ENIC_TX_STAT(tx_bytes_ok),
86         ENIC_TX_STAT(tx_unicast_bytes_ok),
87         ENIC_TX_STAT(tx_multicast_bytes_ok),
88         ENIC_TX_STAT(tx_broadcast_bytes_ok),
89         ENIC_TX_STAT(tx_drops),
90         ENIC_TX_STAT(tx_errors),
91         ENIC_TX_STAT(tx_tso),
92 };
93
94 static const struct enic_stat enic_rx_stats[] = {
95         ENIC_RX_STAT(rx_frames_ok),
96         ENIC_RX_STAT(rx_frames_total),
97         ENIC_RX_STAT(rx_unicast_frames_ok),
98         ENIC_RX_STAT(rx_multicast_frames_ok),
99         ENIC_RX_STAT(rx_broadcast_frames_ok),
100         ENIC_RX_STAT(rx_bytes_ok),
101         ENIC_RX_STAT(rx_unicast_bytes_ok),
102         ENIC_RX_STAT(rx_multicast_bytes_ok),
103         ENIC_RX_STAT(rx_broadcast_bytes_ok),
104         ENIC_RX_STAT(rx_drop),
105         ENIC_RX_STAT(rx_no_bufs),
106         ENIC_RX_STAT(rx_errors),
107         ENIC_RX_STAT(rx_rss),
108         ENIC_RX_STAT(rx_crc_errors),
109         ENIC_RX_STAT(rx_frames_64),
110         ENIC_RX_STAT(rx_frames_127),
111         ENIC_RX_STAT(rx_frames_255),
112         ENIC_RX_STAT(rx_frames_511),
113         ENIC_RX_STAT(rx_frames_1023),
114         ENIC_RX_STAT(rx_frames_1518),
115         ENIC_RX_STAT(rx_frames_to_max),
116 };
117
118 static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
119 static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
120
121 static int enic_is_dynamic(struct enic *enic)
122 {
123         return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
124 }
125
126 static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
127 {
128         return rq;
129 }
130
131 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
132 {
133         return enic->rq_count + wq;
134 }
135
136 static inline unsigned int enic_legacy_io_intr(void)
137 {
138         return 0;
139 }
140
141 static inline unsigned int enic_legacy_err_intr(void)
142 {
143         return 1;
144 }
145
146 static inline unsigned int enic_legacy_notify_intr(void)
147 {
148         return 2;
149 }
150
151 static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
152 {
153         return rq;
154 }
155
156 static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
157 {
158         return enic->rq_count + wq;
159 }
160
161 static inline unsigned int enic_msix_err_intr(struct enic *enic)
162 {
163         return enic->rq_count + enic->wq_count;
164 }
165
166 static inline unsigned int enic_msix_notify_intr(struct enic *enic)
167 {
168         return enic->rq_count + enic->wq_count + 1;
169 }
170
171 static int enic_get_settings(struct net_device *netdev,
172         struct ethtool_cmd *ecmd)
173 {
174         struct enic *enic = netdev_priv(netdev);
175
176         ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
177         ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
178         ecmd->port = PORT_FIBRE;
179         ecmd->transceiver = XCVR_EXTERNAL;
180
181         if (netif_carrier_ok(netdev)) {
182                 ecmd->speed = vnic_dev_port_speed(enic->vdev);
183                 ecmd->duplex = DUPLEX_FULL;
184         } else {
185                 ecmd->speed = -1;
186                 ecmd->duplex = -1;
187         }
188
189         ecmd->autoneg = AUTONEG_DISABLE;
190
191         return 0;
192 }
193
194 static void enic_get_drvinfo(struct net_device *netdev,
195         struct ethtool_drvinfo *drvinfo)
196 {
197         struct enic *enic = netdev_priv(netdev);
198         struct vnic_devcmd_fw_info *fw_info;
199
200         enic_dev_fw_info(enic, &fw_info);
201
202         strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
203         strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
204         strncpy(drvinfo->fw_version, fw_info->fw_version,
205                 sizeof(drvinfo->fw_version));
206         strncpy(drvinfo->bus_info, pci_name(enic->pdev),
207                 sizeof(drvinfo->bus_info));
208 }
209
210 static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
211 {
212         unsigned int i;
213
214         switch (stringset) {
215         case ETH_SS_STATS:
216                 for (i = 0; i < enic_n_tx_stats; i++) {
217                         memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
218                         data += ETH_GSTRING_LEN;
219                 }
220                 for (i = 0; i < enic_n_rx_stats; i++) {
221                         memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
222                         data += ETH_GSTRING_LEN;
223                 }
224                 break;
225         }
226 }
227
228 static int enic_get_sset_count(struct net_device *netdev, int sset)
229 {
230         switch (sset) {
231         case ETH_SS_STATS:
232                 return enic_n_tx_stats + enic_n_rx_stats;
233         default:
234                 return -EOPNOTSUPP;
235         }
236 }
237
238 static void enic_get_ethtool_stats(struct net_device *netdev,
239         struct ethtool_stats *stats, u64 *data)
240 {
241         struct enic *enic = netdev_priv(netdev);
242         struct vnic_stats *vstats;
243         unsigned int i;
244
245         enic_dev_stats_dump(enic, &vstats);
246
247         for (i = 0; i < enic_n_tx_stats; i++)
248                 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
249         for (i = 0; i < enic_n_rx_stats; i++)
250                 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
251 }
252
253 static u32 enic_get_rx_csum(struct net_device *netdev)
254 {
255         struct enic *enic = netdev_priv(netdev);
256         return enic->csum_rx_enabled;
257 }
258
259 static int enic_set_rx_csum(struct net_device *netdev, u32 data)
260 {
261         struct enic *enic = netdev_priv(netdev);
262
263         if (data && !ENIC_SETTING(enic, RXCSUM))
264                 return -EINVAL;
265
266         enic->csum_rx_enabled = !!data;
267
268         return 0;
269 }
270
271 static int enic_set_tx_csum(struct net_device *netdev, u32 data)
272 {
273         struct enic *enic = netdev_priv(netdev);
274
275         if (data && !ENIC_SETTING(enic, TXCSUM))
276                 return -EINVAL;
277
278         if (data)
279                 netdev->features |= NETIF_F_HW_CSUM;
280         else
281                 netdev->features &= ~NETIF_F_HW_CSUM;
282
283         return 0;
284 }
285
286 static int enic_set_tso(struct net_device *netdev, u32 data)
287 {
288         struct enic *enic = netdev_priv(netdev);
289
290         if (data && !ENIC_SETTING(enic, TSO))
291                 return -EINVAL;
292
293         if (data)
294                 netdev->features |=
295                         NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN;
296         else
297                 netdev->features &=
298                         ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN);
299
300         return 0;
301 }
302
303 static u32 enic_get_msglevel(struct net_device *netdev)
304 {
305         struct enic *enic = netdev_priv(netdev);
306         return enic->msg_enable;
307 }
308
309 static void enic_set_msglevel(struct net_device *netdev, u32 value)
310 {
311         struct enic *enic = netdev_priv(netdev);
312         enic->msg_enable = value;
313 }
314
315 static int enic_get_coalesce(struct net_device *netdev,
316         struct ethtool_coalesce *ecmd)
317 {
318         struct enic *enic = netdev_priv(netdev);
319
320         ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
321         ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
322
323         return 0;
324 }
325
326 static int enic_set_coalesce(struct net_device *netdev,
327         struct ethtool_coalesce *ecmd)
328 {
329         struct enic *enic = netdev_priv(netdev);
330         u32 tx_coalesce_usecs;
331         u32 rx_coalesce_usecs;
332         unsigned int i, intr;
333
334         tx_coalesce_usecs = min_t(u32,
335                 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
336                 ecmd->tx_coalesce_usecs);
337         rx_coalesce_usecs = min_t(u32,
338                 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
339                 ecmd->rx_coalesce_usecs);
340
341         switch (vnic_dev_get_intr_mode(enic->vdev)) {
342         case VNIC_DEV_INTR_MODE_INTX:
343                 if (tx_coalesce_usecs != rx_coalesce_usecs)
344                         return -EINVAL;
345
346                 intr = enic_legacy_io_intr();
347                 vnic_intr_coalescing_timer_set(&enic->intr[intr],
348                         INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
349                 break;
350         case VNIC_DEV_INTR_MODE_MSI:
351                 if (tx_coalesce_usecs != rx_coalesce_usecs)
352                         return -EINVAL;
353
354                 vnic_intr_coalescing_timer_set(&enic->intr[0],
355                         INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
356                 break;
357         case VNIC_DEV_INTR_MODE_MSIX:
358                 for (i = 0; i < enic->wq_count; i++) {
359                         intr = enic_msix_wq_intr(enic, i);
360                         vnic_intr_coalescing_timer_set(&enic->intr[intr],
361                                 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
362                 }
363
364                 for (i = 0; i < enic->rq_count; i++) {
365                         intr = enic_msix_rq_intr(enic, i);
366                         vnic_intr_coalescing_timer_set(&enic->intr[intr],
367                                 INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs));
368                 }
369
370                 break;
371         default:
372                 break;
373         }
374
375         enic->tx_coalesce_usecs = tx_coalesce_usecs;
376         enic->rx_coalesce_usecs = rx_coalesce_usecs;
377
378         return 0;
379 }
380
381 static const struct ethtool_ops enic_ethtool_ops = {
382         .get_settings = enic_get_settings,
383         .get_drvinfo = enic_get_drvinfo,
384         .get_msglevel = enic_get_msglevel,
385         .set_msglevel = enic_set_msglevel,
386         .get_link = ethtool_op_get_link,
387         .get_strings = enic_get_strings,
388         .get_sset_count = enic_get_sset_count,
389         .get_ethtool_stats = enic_get_ethtool_stats,
390         .get_rx_csum = enic_get_rx_csum,
391         .set_rx_csum = enic_set_rx_csum,
392         .get_tx_csum = ethtool_op_get_tx_csum,
393         .set_tx_csum = enic_set_tx_csum,
394         .get_sg = ethtool_op_get_sg,
395         .set_sg = ethtool_op_set_sg,
396         .get_tso = ethtool_op_get_tso,
397         .set_tso = enic_set_tso,
398         .get_coalesce = enic_get_coalesce,
399         .set_coalesce = enic_set_coalesce,
400         .get_flags = ethtool_op_get_flags,
401 };
402
403 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
404 {
405         struct enic *enic = vnic_dev_priv(wq->vdev);
406
407         if (buf->sop)
408                 pci_unmap_single(enic->pdev, buf->dma_addr,
409                         buf->len, PCI_DMA_TODEVICE);
410         else
411                 pci_unmap_page(enic->pdev, buf->dma_addr,
412                         buf->len, PCI_DMA_TODEVICE);
413
414         if (buf->os_buf)
415                 dev_kfree_skb_any(buf->os_buf);
416 }
417
418 static void enic_wq_free_buf(struct vnic_wq *wq,
419         struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
420 {
421         enic_free_wq_buf(wq, buf);
422 }
423
424 static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
425         u8 type, u16 q_number, u16 completed_index, void *opaque)
426 {
427         struct enic *enic = vnic_dev_priv(vdev);
428
429         spin_lock(&enic->wq_lock[q_number]);
430
431         vnic_wq_service(&enic->wq[q_number], cq_desc,
432                 completed_index, enic_wq_free_buf,
433                 opaque);
434
435         if (netif_queue_stopped(enic->netdev) &&
436             vnic_wq_desc_avail(&enic->wq[q_number]) >=
437             (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
438                 netif_wake_queue(enic->netdev);
439
440         spin_unlock(&enic->wq_lock[q_number]);
441
442         return 0;
443 }
444
445 static void enic_log_q_error(struct enic *enic)
446 {
447         unsigned int i;
448         u32 error_status;
449
450         for (i = 0; i < enic->wq_count; i++) {
451                 error_status = vnic_wq_error_status(&enic->wq[i]);
452                 if (error_status)
453                         netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
454                                 i, error_status);
455         }
456
457         for (i = 0; i < enic->rq_count; i++) {
458                 error_status = vnic_rq_error_status(&enic->rq[i]);
459                 if (error_status)
460                         netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
461                                 i, error_status);
462         }
463 }
464
465 static void enic_msglvl_check(struct enic *enic)
466 {
467         u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
468
469         if (msg_enable != enic->msg_enable) {
470                 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
471                         enic->msg_enable, msg_enable);
472                 enic->msg_enable = msg_enable;
473         }
474 }
475
476 static void enic_mtu_check(struct enic *enic)
477 {
478         u32 mtu = vnic_dev_mtu(enic->vdev);
479         struct net_device *netdev = enic->netdev;
480
481         if (mtu && mtu != enic->port_mtu) {
482                 enic->port_mtu = mtu;
483                 if (mtu < netdev->mtu)
484                         netdev_warn(netdev,
485                                 "interface MTU (%d) set higher "
486                                 "than switch port MTU (%d)\n",
487                                 netdev->mtu, mtu);
488         }
489 }
490
491 static void enic_link_check(struct enic *enic)
492 {
493         int link_status = vnic_dev_link_status(enic->vdev);
494         int carrier_ok = netif_carrier_ok(enic->netdev);
495
496         if (link_status && !carrier_ok) {
497                 netdev_info(enic->netdev, "Link UP\n");
498                 netif_carrier_on(enic->netdev);
499         } else if (!link_status && carrier_ok) {
500                 netdev_info(enic->netdev, "Link DOWN\n");
501                 netif_carrier_off(enic->netdev);
502         }
503 }
504
505 static void enic_notify_check(struct enic *enic)
506 {
507         enic_msglvl_check(enic);
508         enic_mtu_check(enic);
509         enic_link_check(enic);
510 }
511
512 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
513
514 static irqreturn_t enic_isr_legacy(int irq, void *data)
515 {
516         struct net_device *netdev = data;
517         struct enic *enic = netdev_priv(netdev);
518         unsigned int io_intr = enic_legacy_io_intr();
519         unsigned int err_intr = enic_legacy_err_intr();
520         unsigned int notify_intr = enic_legacy_notify_intr();
521         u32 pba;
522
523         vnic_intr_mask(&enic->intr[io_intr]);
524
525         pba = vnic_intr_legacy_pba(enic->legacy_pba);
526         if (!pba) {
527                 vnic_intr_unmask(&enic->intr[io_intr]);
528                 return IRQ_NONE;        /* not our interrupt */
529         }
530
531         if (ENIC_TEST_INTR(pba, notify_intr)) {
532                 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
533                 enic_notify_check(enic);
534         }
535
536         if (ENIC_TEST_INTR(pba, err_intr)) {
537                 vnic_intr_return_all_credits(&enic->intr[err_intr]);
538                 enic_log_q_error(enic);
539                 /* schedule recovery from WQ/RQ error */
540                 schedule_work(&enic->reset);
541                 return IRQ_HANDLED;
542         }
543
544         if (ENIC_TEST_INTR(pba, io_intr)) {
545                 if (napi_schedule_prep(&enic->napi[0]))
546                         __napi_schedule(&enic->napi[0]);
547         } else {
548                 vnic_intr_unmask(&enic->intr[io_intr]);
549         }
550
551         return IRQ_HANDLED;
552 }
553
554 static irqreturn_t enic_isr_msi(int irq, void *data)
555 {
556         struct enic *enic = data;
557
558         /* With MSI, there is no sharing of interrupts, so this is
559          * our interrupt and there is no need to ack it.  The device
560          * is not providing per-vector masking, so the OS will not
561          * write to PCI config space to mask/unmask the interrupt.
562          * We're using mask_on_assertion for MSI, so the device
563          * automatically masks the interrupt when the interrupt is
564          * generated.  Later, when exiting polling, the interrupt
565          * will be unmasked (see enic_poll).
566          *
567          * Also, the device uses the same PCIe Traffic Class (TC)
568          * for Memory Write data and MSI, so there are no ordering
569          * issues; the MSI will always arrive at the Root Complex
570          * _after_ corresponding Memory Writes (i.e. descriptor
571          * writes).
572          */
573
574         napi_schedule(&enic->napi[0]);
575
576         return IRQ_HANDLED;
577 }
578
579 static irqreturn_t enic_isr_msix_rq(int irq, void *data)
580 {
581         struct napi_struct *napi = data;
582
583         /* schedule NAPI polling for RQ cleanup */
584         napi_schedule(napi);
585
586         return IRQ_HANDLED;
587 }
588
589 static irqreturn_t enic_isr_msix_wq(int irq, void *data)
590 {
591         struct enic *enic = data;
592         unsigned int cq = enic_cq_wq(enic, 0);
593         unsigned int intr = enic_msix_wq_intr(enic, 0);
594         unsigned int wq_work_to_do = -1; /* no limit */
595         unsigned int wq_work_done;
596
597         wq_work_done = vnic_cq_service(&enic->cq[cq],
598                 wq_work_to_do, enic_wq_service, NULL);
599
600         vnic_intr_return_credits(&enic->intr[intr],
601                 wq_work_done,
602                 1 /* unmask intr */,
603                 1 /* reset intr timer */);
604
605         return IRQ_HANDLED;
606 }
607
608 static irqreturn_t enic_isr_msix_err(int irq, void *data)
609 {
610         struct enic *enic = data;
611         unsigned int intr = enic_msix_err_intr(enic);
612
613         vnic_intr_return_all_credits(&enic->intr[intr]);
614
615         enic_log_q_error(enic);
616
617         /* schedule recovery from WQ/RQ error */
618         schedule_work(&enic->reset);
619
620         return IRQ_HANDLED;
621 }
622
623 static irqreturn_t enic_isr_msix_notify(int irq, void *data)
624 {
625         struct enic *enic = data;
626         unsigned int intr = enic_msix_notify_intr(enic);
627
628         vnic_intr_return_all_credits(&enic->intr[intr]);
629         enic_notify_check(enic);
630
631         return IRQ_HANDLED;
632 }
633
634 static inline void enic_queue_wq_skb_cont(struct enic *enic,
635         struct vnic_wq *wq, struct sk_buff *skb,
636         unsigned int len_left, int loopback)
637 {
638         skb_frag_t *frag;
639
640         /* Queue additional data fragments */
641         for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
642                 len_left -= frag->size;
643                 enic_queue_wq_desc_cont(wq, skb,
644                         pci_map_page(enic->pdev, frag->page,
645                                 frag->page_offset, frag->size,
646                                 PCI_DMA_TODEVICE),
647                         frag->size,
648                         (len_left == 0),        /* EOP? */
649                         loopback);
650         }
651 }
652
653 static inline void enic_queue_wq_skb_vlan(struct enic *enic,
654         struct vnic_wq *wq, struct sk_buff *skb,
655         int vlan_tag_insert, unsigned int vlan_tag, int loopback)
656 {
657         unsigned int head_len = skb_headlen(skb);
658         unsigned int len_left = skb->len - head_len;
659         int eop = (len_left == 0);
660
661         /* Queue the main skb fragment. The fragments are no larger
662          * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
663          * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
664          * per fragment is queued.
665          */
666         enic_queue_wq_desc(wq, skb,
667                 pci_map_single(enic->pdev, skb->data,
668                         head_len, PCI_DMA_TODEVICE),
669                 head_len,
670                 vlan_tag_insert, vlan_tag,
671                 eop, loopback);
672
673         if (!eop)
674                 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
675 }
676
677 static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
678         struct vnic_wq *wq, struct sk_buff *skb,
679         int vlan_tag_insert, unsigned int vlan_tag, int loopback)
680 {
681         unsigned int head_len = skb_headlen(skb);
682         unsigned int len_left = skb->len - head_len;
683         unsigned int hdr_len = skb_checksum_start_offset(skb);
684         unsigned int csum_offset = hdr_len + skb->csum_offset;
685         int eop = (len_left == 0);
686
687         /* Queue the main skb fragment. The fragments are no larger
688          * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
689          * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
690          * per fragment is queued.
691          */
692         enic_queue_wq_desc_csum_l4(wq, skb,
693                 pci_map_single(enic->pdev, skb->data,
694                         head_len, PCI_DMA_TODEVICE),
695                 head_len,
696                 csum_offset,
697                 hdr_len,
698                 vlan_tag_insert, vlan_tag,
699                 eop, loopback);
700
701         if (!eop)
702                 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
703 }
704
705 static inline void enic_queue_wq_skb_tso(struct enic *enic,
706         struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
707         int vlan_tag_insert, unsigned int vlan_tag, int loopback)
708 {
709         unsigned int frag_len_left = skb_headlen(skb);
710         unsigned int len_left = skb->len - frag_len_left;
711         unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
712         int eop = (len_left == 0);
713         unsigned int len;
714         dma_addr_t dma_addr;
715         unsigned int offset = 0;
716         skb_frag_t *frag;
717
718         /* Preload TCP csum field with IP pseudo hdr calculated
719          * with IP length set to zero.  HW will later add in length
720          * to each TCP segment resulting from the TSO.
721          */
722
723         if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
724                 ip_hdr(skb)->check = 0;
725                 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
726                         ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
727         } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
728                 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
729                         &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
730         }
731
732         /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
733          * for the main skb fragment
734          */
735         while (frag_len_left) {
736                 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
737                 dma_addr = pci_map_single(enic->pdev, skb->data + offset,
738                                 len, PCI_DMA_TODEVICE);
739                 enic_queue_wq_desc_tso(wq, skb,
740                         dma_addr,
741                         len,
742                         mss, hdr_len,
743                         vlan_tag_insert, vlan_tag,
744                         eop && (len == frag_len_left), loopback);
745                 frag_len_left -= len;
746                 offset += len;
747         }
748
749         if (eop)
750                 return;
751
752         /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
753          * for additional data fragments
754          */
755         for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
756                 len_left -= frag->size;
757                 frag_len_left = frag->size;
758                 offset = frag->page_offset;
759
760                 while (frag_len_left) {
761                         len = min(frag_len_left,
762                                 (unsigned int)WQ_ENET_MAX_DESC_LEN);
763                         dma_addr = pci_map_page(enic->pdev, frag->page,
764                                 offset, len,
765                                 PCI_DMA_TODEVICE);
766                         enic_queue_wq_desc_cont(wq, skb,
767                                 dma_addr,
768                                 len,
769                                 (len_left == 0) &&
770                                 (len == frag_len_left),         /* EOP? */
771                                 loopback);
772                         frag_len_left -= len;
773                         offset += len;
774                 }
775         }
776 }
777
778 static inline void enic_queue_wq_skb(struct enic *enic,
779         struct vnic_wq *wq, struct sk_buff *skb)
780 {
781         unsigned int mss = skb_shinfo(skb)->gso_size;
782         unsigned int vlan_tag = 0;
783         int vlan_tag_insert = 0;
784         int loopback = 0;
785
786         if (vlan_tx_tag_present(skb)) {
787                 /* VLAN tag from trunking driver */
788                 vlan_tag_insert = 1;
789                 vlan_tag = vlan_tx_tag_get(skb);
790         } else if (enic->loop_enable) {
791                 vlan_tag = enic->loop_tag;
792                 loopback = 1;
793         }
794
795         if (mss)
796                 enic_queue_wq_skb_tso(enic, wq, skb, mss,
797                         vlan_tag_insert, vlan_tag, loopback);
798         else if (skb->ip_summed == CHECKSUM_PARTIAL)
799                 enic_queue_wq_skb_csum_l4(enic, wq, skb,
800                         vlan_tag_insert, vlan_tag, loopback);
801         else
802                 enic_queue_wq_skb_vlan(enic, wq, skb,
803                         vlan_tag_insert, vlan_tag, loopback);
804 }
805
806 /* netif_tx_lock held, process context with BHs disabled, or BH */
807 static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
808         struct net_device *netdev)
809 {
810         struct enic *enic = netdev_priv(netdev);
811         struct vnic_wq *wq = &enic->wq[0];
812         unsigned long flags;
813
814         if (skb->len <= 0) {
815                 dev_kfree_skb(skb);
816                 return NETDEV_TX_OK;
817         }
818
819         /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
820          * which is very likely.  In the off chance it's going to take
821          * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
822          */
823
824         if (skb_shinfo(skb)->gso_size == 0 &&
825             skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
826             skb_linearize(skb)) {
827                 dev_kfree_skb(skb);
828                 return NETDEV_TX_OK;
829         }
830
831         spin_lock_irqsave(&enic->wq_lock[0], flags);
832
833         if (vnic_wq_desc_avail(wq) <
834             skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
835                 netif_stop_queue(netdev);
836                 /* This is a hard error, log it */
837                 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
838                 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
839                 return NETDEV_TX_BUSY;
840         }
841
842         enic_queue_wq_skb(enic, wq, skb);
843
844         if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
845                 netif_stop_queue(netdev);
846
847         spin_unlock_irqrestore(&enic->wq_lock[0], flags);
848
849         return NETDEV_TX_OK;
850 }
851
852 /* dev_base_lock rwlock held, nominally process context */
853 static struct net_device_stats *enic_get_stats(struct net_device *netdev)
854 {
855         struct enic *enic = netdev_priv(netdev);
856         struct net_device_stats *net_stats = &netdev->stats;
857         struct vnic_stats *stats;
858
859         enic_dev_stats_dump(enic, &stats);
860
861         net_stats->tx_packets = stats->tx.tx_frames_ok;
862         net_stats->tx_bytes = stats->tx.tx_bytes_ok;
863         net_stats->tx_errors = stats->tx.tx_errors;
864         net_stats->tx_dropped = stats->tx.tx_drops;
865
866         net_stats->rx_packets = stats->rx.rx_frames_ok;
867         net_stats->rx_bytes = stats->rx.rx_bytes_ok;
868         net_stats->rx_errors = stats->rx.rx_errors;
869         net_stats->multicast = stats->rx.rx_multicast_frames_ok;
870         net_stats->rx_over_errors = enic->rq_truncated_pkts;
871         net_stats->rx_crc_errors = enic->rq_bad_fcs;
872         net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
873
874         return net_stats;
875 }
876
877 static void enic_reset_addr_lists(struct enic *enic)
878 {
879         enic->mc_count = 0;
880         enic->uc_count = 0;
881         enic->flags = 0;
882 }
883
884 static int enic_set_mac_addr(struct net_device *netdev, char *addr)
885 {
886         struct enic *enic = netdev_priv(netdev);
887
888         if (enic_is_dynamic(enic)) {
889                 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
890                         return -EADDRNOTAVAIL;
891         } else {
892                 if (!is_valid_ether_addr(addr))
893                         return -EADDRNOTAVAIL;
894         }
895
896         memcpy(netdev->dev_addr, addr, netdev->addr_len);
897
898         return 0;
899 }
900
901 static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
902 {
903         struct enic *enic = netdev_priv(netdev);
904         struct sockaddr *saddr = p;
905         char *addr = saddr->sa_data;
906         int err;
907
908         if (netif_running(enic->netdev)) {
909                 err = enic_dev_del_station_addr(enic);
910                 if (err)
911                         return err;
912         }
913
914         err = enic_set_mac_addr(netdev, addr);
915         if (err)
916                 return err;
917
918         if (netif_running(enic->netdev)) {
919                 err = enic_dev_add_station_addr(enic);
920                 if (err)
921                         return err;
922         }
923
924         return err;
925 }
926
927 static int enic_set_mac_address(struct net_device *netdev, void *p)
928 {
929         struct sockaddr *saddr = p;
930         char *addr = saddr->sa_data;
931         struct enic *enic = netdev_priv(netdev);
932         int err;
933
934         err = enic_dev_del_station_addr(enic);
935         if (err)
936                 return err;
937
938         err = enic_set_mac_addr(netdev, addr);
939         if (err)
940                 return err;
941
942         return enic_dev_add_station_addr(enic);
943 }
944
945 static void enic_update_multicast_addr_list(struct enic *enic)
946 {
947         struct net_device *netdev = enic->netdev;
948         struct netdev_hw_addr *ha;
949         unsigned int mc_count = netdev_mc_count(netdev);
950         u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
951         unsigned int i, j;
952
953         if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
954                 netdev_warn(netdev, "Registering only %d out of %d "
955                         "multicast addresses\n",
956                         ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
957                 mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
958         }
959
960         /* Is there an easier way?  Trying to minimize to
961          * calls to add/del multicast addrs.  We keep the
962          * addrs from the last call in enic->mc_addr and
963          * look for changes to add/del.
964          */
965
966         i = 0;
967         netdev_for_each_mc_addr(ha, netdev) {
968                 if (i == mc_count)
969                         break;
970                 memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
971         }
972
973         for (i = 0; i < enic->mc_count; i++) {
974                 for (j = 0; j < mc_count; j++)
975                         if (compare_ether_addr(enic->mc_addr[i],
976                                 mc_addr[j]) == 0)
977                                 break;
978                 if (j == mc_count)
979                         enic_dev_del_addr(enic, enic->mc_addr[i]);
980         }
981
982         for (i = 0; i < mc_count; i++) {
983                 for (j = 0; j < enic->mc_count; j++)
984                         if (compare_ether_addr(mc_addr[i],
985                                 enic->mc_addr[j]) == 0)
986                                 break;
987                 if (j == enic->mc_count)
988                         enic_dev_add_addr(enic, mc_addr[i]);
989         }
990
991         /* Save the list to compare against next time
992          */
993
994         for (i = 0; i < mc_count; i++)
995                 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
996
997         enic->mc_count = mc_count;
998 }
999
1000 static void enic_update_unicast_addr_list(struct enic *enic)
1001 {
1002         struct net_device *netdev = enic->netdev;
1003         struct netdev_hw_addr *ha;
1004         unsigned int uc_count = netdev_uc_count(netdev);
1005         u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
1006         unsigned int i, j;
1007
1008         if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
1009                 netdev_warn(netdev, "Registering only %d out of %d "
1010                         "unicast addresses\n",
1011                         ENIC_UNICAST_PERFECT_FILTERS, uc_count);
1012                 uc_count = ENIC_UNICAST_PERFECT_FILTERS;
1013         }
1014
1015         /* Is there an easier way?  Trying to minimize to
1016          * calls to add/del unicast addrs.  We keep the
1017          * addrs from the last call in enic->uc_addr and
1018          * look for changes to add/del.
1019          */
1020
1021         i = 0;
1022         netdev_for_each_uc_addr(ha, netdev) {
1023                 if (i == uc_count)
1024                         break;
1025                 memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
1026         }
1027
1028         for (i = 0; i < enic->uc_count; i++) {
1029                 for (j = 0; j < uc_count; j++)
1030                         if (compare_ether_addr(enic->uc_addr[i],
1031                                 uc_addr[j]) == 0)
1032                                 break;
1033                 if (j == uc_count)
1034                         enic_dev_del_addr(enic, enic->uc_addr[i]);
1035         }
1036
1037         for (i = 0; i < uc_count; i++) {
1038                 for (j = 0; j < enic->uc_count; j++)
1039                         if (compare_ether_addr(uc_addr[i],
1040                                 enic->uc_addr[j]) == 0)
1041                                 break;
1042                 if (j == enic->uc_count)
1043                         enic_dev_add_addr(enic, uc_addr[i]);
1044         }
1045
1046         /* Save the list to compare against next time
1047          */
1048
1049         for (i = 0; i < uc_count; i++)
1050                 memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
1051
1052         enic->uc_count = uc_count;
1053 }
1054
1055 /* netif_tx_lock held, BHs disabled */
1056 static void enic_set_rx_mode(struct net_device *netdev)
1057 {
1058         struct enic *enic = netdev_priv(netdev);
1059         int directed = 1;
1060         int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
1061         int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
1062         int promisc = (netdev->flags & IFF_PROMISC) ||
1063                 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
1064         int allmulti = (netdev->flags & IFF_ALLMULTI) ||
1065                 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
1066         unsigned int flags = netdev->flags |
1067                 (allmulti ? IFF_ALLMULTI : 0) |
1068                 (promisc ? IFF_PROMISC : 0);
1069
1070         if (enic->flags != flags) {
1071                 enic->flags = flags;
1072                 enic_dev_packet_filter(enic, directed,
1073                         multicast, broadcast, promisc, allmulti);
1074         }
1075
1076         if (!promisc) {
1077                 enic_update_unicast_addr_list(enic);
1078                 if (!allmulti)
1079                         enic_update_multicast_addr_list(enic);
1080         }
1081 }
1082
1083 /* rtnl lock is held */
1084 static void enic_vlan_rx_register(struct net_device *netdev,
1085         struct vlan_group *vlan_group)
1086 {
1087         struct enic *enic = netdev_priv(netdev);
1088         enic->vlan_group = vlan_group;
1089 }
1090
1091 /* netif_tx_lock held, BHs disabled */
1092 static void enic_tx_timeout(struct net_device *netdev)
1093 {
1094         struct enic *enic = netdev_priv(netdev);
1095         schedule_work(&enic->reset);
1096 }
1097
1098 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1099 {
1100         struct enic *enic = netdev_priv(netdev);
1101
1102         if (vf != PORT_SELF_VF)
1103                 return -EOPNOTSUPP;
1104
1105         /* Ignore the vf argument for now. We can assume the request
1106          * is coming on a vf.
1107          */
1108         if (is_valid_ether_addr(mac)) {
1109                 memcpy(enic->pp.vf_mac, mac, ETH_ALEN);
1110                 return 0;
1111         } else
1112                 return -EINVAL;
1113 }
1114
1115 static int enic_set_port_profile(struct enic *enic, u8 *mac)
1116 {
1117         struct vic_provinfo *vp;
1118         u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
1119         u16 os_type = VIC_GENERIC_PROV_OS_TYPE_LINUX;
1120         char uuid_str[38];
1121         char client_mac_str[18];
1122         u8 *client_mac;
1123         int err;
1124
1125         err = enic_vnic_dev_deinit(enic);
1126         if (err)
1127                 return err;
1128
1129         switch (enic->pp.request) {
1130
1131         case PORT_REQUEST_ASSOCIATE:
1132
1133                 if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name))
1134                         return -EINVAL;
1135
1136                 if (!is_valid_ether_addr(mac))
1137                         return -EADDRNOTAVAIL;
1138
1139                 vp = vic_provinfo_alloc(GFP_KERNEL, oui,
1140                         VIC_PROVINFO_GENERIC_TYPE);
1141                 if (!vp)
1142                         return -ENOMEM;
1143
1144                 vic_provinfo_add_tlv(vp,
1145                         VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
1146                         strlen(enic->pp.name) + 1, enic->pp.name);
1147
1148                 if (!is_zero_ether_addr(enic->pp.mac_addr))
1149                         client_mac = enic->pp.mac_addr;
1150                 else
1151                         client_mac = mac;
1152
1153                 vic_provinfo_add_tlv(vp,
1154                         VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
1155                         ETH_ALEN, client_mac);
1156
1157                 sprintf(client_mac_str, "%pM", client_mac);
1158                 vic_provinfo_add_tlv(vp,
1159                         VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
1160                         sizeof(client_mac_str), client_mac_str);
1161
1162                 if (enic->pp.set & ENIC_SET_INSTANCE) {
1163                         sprintf(uuid_str, "%pUB", enic->pp.instance_uuid);
1164                         vic_provinfo_add_tlv(vp,
1165                                 VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
1166                                 sizeof(uuid_str), uuid_str);
1167                 }
1168
1169                 if (enic->pp.set & ENIC_SET_HOST) {
1170                         sprintf(uuid_str, "%pUB", enic->pp.host_uuid);
1171                         vic_provinfo_add_tlv(vp,
1172                                 VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
1173                                 sizeof(uuid_str), uuid_str);
1174                 }
1175
1176                 os_type = htons(os_type);
1177                 vic_provinfo_add_tlv(vp,
1178                         VIC_GENERIC_PROV_TLV_OS_TYPE,
1179                         sizeof(os_type), &os_type);
1180
1181                 err = enic_dev_init_prov(enic, vp);
1182                 vic_provinfo_free(vp);
1183                 if (err)
1184                         return err;
1185                 break;
1186
1187         case PORT_REQUEST_DISASSOCIATE:
1188                 break;
1189
1190         default:
1191                 return -EINVAL;
1192         }
1193
1194         /* Set flag to indicate that the port assoc/disassoc
1195          * request has been sent out to fw
1196          */
1197         enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
1198
1199         return 0;
1200 }
1201
1202 static int enic_set_vf_port(struct net_device *netdev, int vf,
1203         struct nlattr *port[])
1204 {
1205         struct enic *enic = netdev_priv(netdev);
1206         struct enic_port_profile new_pp;
1207         int err = 0;
1208
1209         memset(&new_pp, 0, sizeof(new_pp));
1210
1211         if (port[IFLA_PORT_REQUEST]) {
1212                 new_pp.set |= ENIC_SET_REQUEST;
1213                 new_pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1214         }
1215
1216         if (port[IFLA_PORT_PROFILE]) {
1217                 new_pp.set |= ENIC_SET_NAME;
1218                 memcpy(new_pp.name, nla_data(port[IFLA_PORT_PROFILE]),
1219                         PORT_PROFILE_MAX);
1220         }
1221
1222         if (port[IFLA_PORT_INSTANCE_UUID]) {
1223                 new_pp.set |= ENIC_SET_INSTANCE;
1224                 memcpy(new_pp.instance_uuid,
1225                         nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
1226         }
1227
1228         if (port[IFLA_PORT_HOST_UUID]) {
1229                 new_pp.set |= ENIC_SET_HOST;
1230                 memcpy(new_pp.host_uuid,
1231                         nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
1232         }
1233
1234         /* don't support VFs, yet */
1235         if (vf != PORT_SELF_VF)
1236                 return -EOPNOTSUPP;
1237
1238         if (!(new_pp.set & ENIC_SET_REQUEST))
1239                 return -EOPNOTSUPP;
1240
1241         if (new_pp.request == PORT_REQUEST_ASSOCIATE) {
1242                 /* Special case handling */
1243                 if (!is_zero_ether_addr(enic->pp.vf_mac))
1244                         memcpy(new_pp.mac_addr, enic->pp.vf_mac, ETH_ALEN);
1245
1246                 if (is_zero_ether_addr(netdev->dev_addr))
1247                         random_ether_addr(netdev->dev_addr);
1248         }
1249
1250         memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
1251
1252         err = enic_set_port_profile(enic, netdev->dev_addr);
1253         if (err)
1254                 goto set_port_profile_cleanup;
1255
1256 set_port_profile_cleanup:
1257         memset(enic->pp.vf_mac, 0, ETH_ALEN);
1258
1259         if (err || enic->pp.request == PORT_REQUEST_DISASSOCIATE) {
1260                 memset(netdev->dev_addr, 0, ETH_ALEN);
1261                 memset(enic->pp.mac_addr, 0, ETH_ALEN);
1262         }
1263
1264         return err;
1265 }
1266
1267 static int enic_get_vf_port(struct net_device *netdev, int vf,
1268         struct sk_buff *skb)
1269 {
1270         struct enic *enic = netdev_priv(netdev);
1271         int err, error, done;
1272         u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1273
1274         if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
1275                 return -ENODATA;
1276
1277         err = enic_dev_init_done(enic, &done, &error);
1278         if (err)
1279                 error = err;
1280
1281         switch (error) {
1282         case ERR_SUCCESS:
1283                 if (!done)
1284                         response = PORT_PROFILE_RESPONSE_INPROGRESS;
1285                 break;
1286         case ERR_EINVAL:
1287                 response = PORT_PROFILE_RESPONSE_INVALID;
1288                 break;
1289         case ERR_EBADSTATE:
1290                 response = PORT_PROFILE_RESPONSE_BADSTATE;
1291                 break;
1292         case ERR_ENOMEM:
1293                 response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES;
1294                 break;
1295         default:
1296                 response = PORT_PROFILE_RESPONSE_ERROR;
1297                 break;
1298         }
1299
1300         NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
1301         NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
1302         if (enic->pp.set & ENIC_SET_NAME)
1303                 NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
1304                         enic->pp.name);
1305         if (enic->pp.set & ENIC_SET_INSTANCE)
1306                 NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
1307                         enic->pp.instance_uuid);
1308         if (enic->pp.set & ENIC_SET_HOST)
1309                 NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
1310                         enic->pp.host_uuid);
1311
1312         return 0;
1313
1314 nla_put_failure:
1315         return -EMSGSIZE;
1316 }
1317
1318 static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
1319 {
1320         struct enic *enic = vnic_dev_priv(rq->vdev);
1321
1322         if (!buf->os_buf)
1323                 return;
1324
1325         pci_unmap_single(enic->pdev, buf->dma_addr,
1326                 buf->len, PCI_DMA_FROMDEVICE);
1327         dev_kfree_skb_any(buf->os_buf);
1328 }
1329
1330 static int enic_rq_alloc_buf(struct vnic_rq *rq)
1331 {
1332         struct enic *enic = vnic_dev_priv(rq->vdev);
1333         struct net_device *netdev = enic->netdev;
1334         struct sk_buff *skb;
1335         unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
1336         unsigned int os_buf_index = 0;
1337         dma_addr_t dma_addr;
1338
1339         skb = netdev_alloc_skb_ip_align(netdev, len);
1340         if (!skb)
1341                 return -ENOMEM;
1342
1343         dma_addr = pci_map_single(enic->pdev, skb->data,
1344                 len, PCI_DMA_FROMDEVICE);
1345
1346         enic_queue_rq_desc(rq, skb, os_buf_index,
1347                 dma_addr, len);
1348
1349         return 0;
1350 }
1351
1352 static void enic_rq_indicate_buf(struct vnic_rq *rq,
1353         struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1354         int skipped, void *opaque)
1355 {
1356         struct enic *enic = vnic_dev_priv(rq->vdev);
1357         struct net_device *netdev = enic->netdev;
1358         struct sk_buff *skb;
1359
1360         u8 type, color, eop, sop, ingress_port, vlan_stripped;
1361         u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
1362         u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
1363         u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
1364         u8 packet_error;
1365         u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
1366         u32 rss_hash;
1367
1368         if (skipped)
1369                 return;
1370
1371         skb = buf->os_buf;
1372         prefetch(skb->data - NET_IP_ALIGN);
1373         pci_unmap_single(enic->pdev, buf->dma_addr,
1374                 buf->len, PCI_DMA_FROMDEVICE);
1375
1376         cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1377                 &type, &color, &q_number, &completed_index,
1378                 &ingress_port, &fcoe, &eop, &sop, &rss_type,
1379                 &csum_not_calc, &rss_hash, &bytes_written,
1380                 &packet_error, &vlan_stripped, &vlan_tci, &checksum,
1381                 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1382                 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1383                 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1384                 &fcs_ok);
1385
1386         if (packet_error) {
1387
1388                 if (!fcs_ok) {
1389                         if (bytes_written > 0)
1390                                 enic->rq_bad_fcs++;
1391                         else if (bytes_written == 0)
1392                                 enic->rq_truncated_pkts++;
1393                 }
1394
1395                 dev_kfree_skb_any(skb);
1396
1397                 return;
1398         }
1399
1400         if (eop && bytes_written > 0) {
1401
1402                 /* Good receive
1403                  */
1404
1405                 skb_put(skb, bytes_written);
1406                 skb->protocol = eth_type_trans(skb, netdev);
1407
1408                 if (enic->csum_rx_enabled && !csum_not_calc) {
1409                         skb->csum = htons(checksum);
1410                         skb->ip_summed = CHECKSUM_COMPLETE;
1411                 }
1412
1413                 skb->dev = netdev;
1414
1415                 if (enic->vlan_group && vlan_stripped &&
1416                         (vlan_tci & CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK)) {
1417
1418                         if (netdev->features & NETIF_F_GRO)
1419                                 vlan_gro_receive(&enic->napi[q_number],
1420                                         enic->vlan_group, vlan_tci, skb);
1421                         else
1422                                 vlan_hwaccel_receive_skb(skb,
1423                                         enic->vlan_group, vlan_tci);
1424
1425                 } else {
1426
1427                         if (netdev->features & NETIF_F_GRO)
1428                                 napi_gro_receive(&enic->napi[q_number], skb);
1429                         else
1430                                 netif_receive_skb(skb);
1431
1432                 }
1433         } else {
1434
1435                 /* Buffer overflow
1436                  */
1437
1438                 dev_kfree_skb_any(skb);
1439         }
1440 }
1441
1442 static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1443         u8 type, u16 q_number, u16 completed_index, void *opaque)
1444 {
1445         struct enic *enic = vnic_dev_priv(vdev);
1446
1447         vnic_rq_service(&enic->rq[q_number], cq_desc,
1448                 completed_index, VNIC_RQ_RETURN_DESC,
1449                 enic_rq_indicate_buf, opaque);
1450
1451         return 0;
1452 }
1453
1454 static int enic_poll(struct napi_struct *napi, int budget)
1455 {
1456         struct net_device *netdev = napi->dev;
1457         struct enic *enic = netdev_priv(netdev);
1458         unsigned int cq_rq = enic_cq_rq(enic, 0);
1459         unsigned int cq_wq = enic_cq_wq(enic, 0);
1460         unsigned int intr = enic_legacy_io_intr();
1461         unsigned int rq_work_to_do = budget;
1462         unsigned int wq_work_to_do = -1; /* no limit */
1463         unsigned int  work_done, rq_work_done, wq_work_done;
1464         int err;
1465
1466         /* Service RQ (first) and WQ
1467          */
1468
1469         rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1470                 rq_work_to_do, enic_rq_service, NULL);
1471
1472         wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
1473                 wq_work_to_do, enic_wq_service, NULL);
1474
1475         /* Accumulate intr event credits for this polling
1476          * cycle.  An intr event is the completion of a
1477          * a WQ or RQ packet.
1478          */
1479
1480         work_done = rq_work_done + wq_work_done;
1481
1482         if (work_done > 0)
1483                 vnic_intr_return_credits(&enic->intr[intr],
1484                         work_done,
1485                         0 /* don't unmask intr */,
1486                         0 /* don't reset intr timer */);
1487
1488         err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1489
1490         /* Buffer allocation failed. Stay in polling
1491          * mode so we can try to fill the ring again.
1492          */
1493
1494         if (err)
1495                 rq_work_done = rq_work_to_do;
1496
1497         if (rq_work_done < rq_work_to_do) {
1498
1499                 /* Some work done, but not enough to stay in polling,
1500                  * exit polling
1501                  */
1502
1503                 napi_complete(napi);
1504                 vnic_intr_unmask(&enic->intr[intr]);
1505         }
1506
1507         return rq_work_done;
1508 }
1509
1510 static int enic_poll_msix(struct napi_struct *napi, int budget)
1511 {
1512         struct net_device *netdev = napi->dev;
1513         struct enic *enic = netdev_priv(netdev);
1514         unsigned int rq = (napi - &enic->napi[0]);
1515         unsigned int cq = enic_cq_rq(enic, rq);
1516         unsigned int intr = enic_msix_rq_intr(enic, rq);
1517         unsigned int work_to_do = budget;
1518         unsigned int work_done;
1519         int err;
1520
1521         /* Service RQ
1522          */
1523
1524         work_done = vnic_cq_service(&enic->cq[cq],
1525                 work_to_do, enic_rq_service, NULL);
1526
1527         /* Return intr event credits for this polling
1528          * cycle.  An intr event is the completion of a
1529          * RQ packet.
1530          */
1531
1532         if (work_done > 0)
1533                 vnic_intr_return_credits(&enic->intr[intr],
1534                         work_done,
1535                         0 /* don't unmask intr */,
1536                         0 /* don't reset intr timer */);
1537
1538         err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1539
1540         /* Buffer allocation failed. Stay in polling mode
1541          * so we can try to fill the ring again.
1542          */
1543
1544         if (err)
1545                 work_done = work_to_do;
1546
1547         if (work_done < work_to_do) {
1548
1549                 /* Some work done, but not enough to stay in polling,
1550                  * exit polling
1551                  */
1552
1553                 napi_complete(napi);
1554                 vnic_intr_unmask(&enic->intr[intr]);
1555         }
1556
1557         return work_done;
1558 }
1559
1560 static void enic_notify_timer(unsigned long data)
1561 {
1562         struct enic *enic = (struct enic *)data;
1563
1564         enic_notify_check(enic);
1565
1566         mod_timer(&enic->notify_timer,
1567                 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
1568 }
1569
1570 static void enic_free_intr(struct enic *enic)
1571 {
1572         struct net_device *netdev = enic->netdev;
1573         unsigned int i;
1574
1575         switch (vnic_dev_get_intr_mode(enic->vdev)) {
1576         case VNIC_DEV_INTR_MODE_INTX:
1577                 free_irq(enic->pdev->irq, netdev);
1578                 break;
1579         case VNIC_DEV_INTR_MODE_MSI:
1580                 free_irq(enic->pdev->irq, enic);
1581                 break;
1582         case VNIC_DEV_INTR_MODE_MSIX:
1583                 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1584                         if (enic->msix[i].requested)
1585                                 free_irq(enic->msix_entry[i].vector,
1586                                         enic->msix[i].devid);
1587                 break;
1588         default:
1589                 break;
1590         }
1591 }
1592
1593 static int enic_request_intr(struct enic *enic)
1594 {
1595         struct net_device *netdev = enic->netdev;
1596         unsigned int i, intr;
1597         int err = 0;
1598
1599         switch (vnic_dev_get_intr_mode(enic->vdev)) {
1600
1601         case VNIC_DEV_INTR_MODE_INTX:
1602
1603                 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1604                         IRQF_SHARED, netdev->name, netdev);
1605                 break;
1606
1607         case VNIC_DEV_INTR_MODE_MSI:
1608
1609                 err = request_irq(enic->pdev->irq, enic_isr_msi,
1610                         0, netdev->name, enic);
1611                 break;
1612
1613         case VNIC_DEV_INTR_MODE_MSIX:
1614
1615                 for (i = 0; i < enic->rq_count; i++) {
1616                         intr = enic_msix_rq_intr(enic, i);
1617                         sprintf(enic->msix[intr].devname,
1618                                 "%.11s-rx-%d", netdev->name, i);
1619                         enic->msix[intr].isr = enic_isr_msix_rq;
1620                         enic->msix[intr].devid = &enic->napi[i];
1621                 }
1622
1623                 for (i = 0; i < enic->wq_count; i++) {
1624                         intr = enic_msix_wq_intr(enic, i);
1625                         sprintf(enic->msix[intr].devname,
1626                                 "%.11s-tx-%d", netdev->name, i);
1627                         enic->msix[intr].isr = enic_isr_msix_wq;
1628                         enic->msix[intr].devid = enic;
1629                 }
1630
1631                 intr = enic_msix_err_intr(enic);
1632                 sprintf(enic->msix[intr].devname,
1633                         "%.11s-err", netdev->name);
1634                 enic->msix[intr].isr = enic_isr_msix_err;
1635                 enic->msix[intr].devid = enic;
1636
1637                 intr = enic_msix_notify_intr(enic);
1638                 sprintf(enic->msix[intr].devname,
1639                         "%.11s-notify", netdev->name);
1640                 enic->msix[intr].isr = enic_isr_msix_notify;
1641                 enic->msix[intr].devid = enic;
1642
1643                 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1644                         enic->msix[i].requested = 0;
1645
1646                 for (i = 0; i < enic->intr_count; i++) {
1647                         err = request_irq(enic->msix_entry[i].vector,
1648                                 enic->msix[i].isr, 0,
1649                                 enic->msix[i].devname,
1650                                 enic->msix[i].devid);
1651                         if (err) {
1652                                 enic_free_intr(enic);
1653                                 break;
1654                         }
1655                         enic->msix[i].requested = 1;
1656                 }
1657
1658                 break;
1659
1660         default:
1661                 break;
1662         }
1663
1664         return err;
1665 }
1666
1667 static void enic_synchronize_irqs(struct enic *enic)
1668 {
1669         unsigned int i;
1670
1671         switch (vnic_dev_get_intr_mode(enic->vdev)) {
1672         case VNIC_DEV_INTR_MODE_INTX:
1673         case VNIC_DEV_INTR_MODE_MSI:
1674                 synchronize_irq(enic->pdev->irq);
1675                 break;
1676         case VNIC_DEV_INTR_MODE_MSIX:
1677                 for (i = 0; i < enic->intr_count; i++)
1678                         synchronize_irq(enic->msix_entry[i].vector);
1679                 break;
1680         default:
1681                 break;
1682         }
1683 }
1684
1685 static int enic_dev_notify_set(struct enic *enic)
1686 {
1687         int err;
1688
1689         spin_lock(&enic->devcmd_lock);
1690         switch (vnic_dev_get_intr_mode(enic->vdev)) {
1691         case VNIC_DEV_INTR_MODE_INTX:
1692                 err = vnic_dev_notify_set(enic->vdev,
1693                         enic_legacy_notify_intr());
1694                 break;
1695         case VNIC_DEV_INTR_MODE_MSIX:
1696                 err = vnic_dev_notify_set(enic->vdev,
1697                         enic_msix_notify_intr(enic));
1698                 break;
1699         default:
1700                 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1701                 break;
1702         }
1703         spin_unlock(&enic->devcmd_lock);
1704
1705         return err;
1706 }
1707
1708 static void enic_notify_timer_start(struct enic *enic)
1709 {
1710         switch (vnic_dev_get_intr_mode(enic->vdev)) {
1711         case VNIC_DEV_INTR_MODE_MSI:
1712                 mod_timer(&enic->notify_timer, jiffies);
1713                 break;
1714         default:
1715                 /* Using intr for notification for INTx/MSI-X */
1716                 break;
1717         };
1718 }
1719
1720 /* rtnl lock is held, process context */
1721 static int enic_open(struct net_device *netdev)
1722 {
1723         struct enic *enic = netdev_priv(netdev);
1724         unsigned int i;
1725         int err;
1726
1727         err = enic_request_intr(enic);
1728         if (err) {
1729                 netdev_err(netdev, "Unable to request irq.\n");
1730                 return err;
1731         }
1732
1733         err = enic_dev_notify_set(enic);
1734         if (err) {
1735                 netdev_err(netdev,
1736                         "Failed to alloc notify buffer, aborting.\n");
1737                 goto err_out_free_intr;
1738         }
1739
1740         for (i = 0; i < enic->rq_count; i++) {
1741                 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1742                 /* Need at least one buffer on ring to get going */
1743                 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1744                         netdev_err(netdev, "Unable to alloc receive buffers\n");
1745                         err = -ENOMEM;
1746                         goto err_out_notify_unset;
1747                 }
1748         }
1749
1750         for (i = 0; i < enic->wq_count; i++)
1751                 vnic_wq_enable(&enic->wq[i]);
1752         for (i = 0; i < enic->rq_count; i++)
1753                 vnic_rq_enable(&enic->rq[i]);
1754
1755         if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
1756                 enic_dev_add_addr(enic, enic->pp.mac_addr);
1757         else
1758                 enic_dev_add_station_addr(enic);
1759         enic_set_rx_mode(netdev);
1760
1761         netif_wake_queue(netdev);
1762
1763         for (i = 0; i < enic->rq_count; i++)
1764                 napi_enable(&enic->napi[i]);
1765
1766         enic_dev_enable(enic);
1767
1768         for (i = 0; i < enic->intr_count; i++)
1769                 vnic_intr_unmask(&enic->intr[i]);
1770
1771         enic_notify_timer_start(enic);
1772
1773         return 0;
1774
1775 err_out_notify_unset:
1776         enic_dev_notify_unset(enic);
1777 err_out_free_intr:
1778         enic_free_intr(enic);
1779
1780         return err;
1781 }
1782
1783 /* rtnl lock is held, process context */
1784 static int enic_stop(struct net_device *netdev)
1785 {
1786         struct enic *enic = netdev_priv(netdev);
1787         unsigned int i;
1788         int err;
1789
1790         for (i = 0; i < enic->intr_count; i++) {
1791                 vnic_intr_mask(&enic->intr[i]);
1792                 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1793         }
1794
1795         enic_synchronize_irqs(enic);
1796
1797         del_timer_sync(&enic->notify_timer);
1798
1799         enic_dev_disable(enic);
1800
1801         for (i = 0; i < enic->rq_count; i++)
1802                 napi_disable(&enic->napi[i]);
1803
1804         netif_carrier_off(netdev);
1805         netif_tx_disable(netdev);
1806         if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
1807                 enic_dev_del_addr(enic, enic->pp.mac_addr);
1808         else
1809                 enic_dev_del_station_addr(enic);
1810
1811         for (i = 0; i < enic->wq_count; i++) {
1812                 err = vnic_wq_disable(&enic->wq[i]);
1813                 if (err)
1814                         return err;
1815         }
1816         for (i = 0; i < enic->rq_count; i++) {
1817                 err = vnic_rq_disable(&enic->rq[i]);
1818                 if (err)
1819                         return err;
1820         }
1821
1822         enic_dev_notify_unset(enic);
1823         enic_free_intr(enic);
1824
1825         for (i = 0; i < enic->wq_count; i++)
1826                 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1827         for (i = 0; i < enic->rq_count; i++)
1828                 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1829         for (i = 0; i < enic->cq_count; i++)
1830                 vnic_cq_clean(&enic->cq[i]);
1831         for (i = 0; i < enic->intr_count; i++)
1832                 vnic_intr_clean(&enic->intr[i]);
1833
1834         return 0;
1835 }
1836
1837 static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1838 {
1839         struct enic *enic = netdev_priv(netdev);
1840         int running = netif_running(netdev);
1841
1842         if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1843                 return -EINVAL;
1844
1845         if (running)
1846                 enic_stop(netdev);
1847
1848         netdev->mtu = new_mtu;
1849
1850         if (netdev->mtu > enic->port_mtu)
1851                 netdev_warn(netdev,
1852                         "interface MTU (%d) set higher than port MTU (%d)\n",
1853                         netdev->mtu, enic->port_mtu);
1854
1855         if (running)
1856                 enic_open(netdev);
1857
1858         return 0;
1859 }
1860
1861 #ifdef CONFIG_NET_POLL_CONTROLLER
1862 static void enic_poll_controller(struct net_device *netdev)
1863 {
1864         struct enic *enic = netdev_priv(netdev);
1865         struct vnic_dev *vdev = enic->vdev;
1866         unsigned int i, intr;
1867
1868         switch (vnic_dev_get_intr_mode(vdev)) {
1869         case VNIC_DEV_INTR_MODE_MSIX:
1870                 for (i = 0; i < enic->rq_count; i++) {
1871                         intr = enic_msix_rq_intr(enic, i);
1872                         enic_isr_msix_rq(enic->msix_entry[intr].vector,
1873                                 &enic->napi[i]);
1874                 }
1875                 intr = enic_msix_wq_intr(enic, i);
1876                 enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
1877                 break;
1878         case VNIC_DEV_INTR_MODE_MSI:
1879                 enic_isr_msi(enic->pdev->irq, enic);
1880                 break;
1881         case VNIC_DEV_INTR_MODE_INTX:
1882                 enic_isr_legacy(enic->pdev->irq, netdev);
1883                 break;
1884         default:
1885                 break;
1886         }
1887 }
1888 #endif
1889
1890 static int enic_dev_wait(struct vnic_dev *vdev,
1891         int (*start)(struct vnic_dev *, int),
1892         int (*finished)(struct vnic_dev *, int *),
1893         int arg)
1894 {
1895         unsigned long time;
1896         int done;
1897         int err;
1898
1899         BUG_ON(in_interrupt());
1900
1901         err = start(vdev, arg);
1902         if (err)
1903                 return err;
1904
1905         /* Wait for func to complete...2 seconds max
1906          */
1907
1908         time = jiffies + (HZ * 2);
1909         do {
1910
1911                 err = finished(vdev, &done);
1912                 if (err)
1913                         return err;
1914
1915                 if (done)
1916                         return 0;
1917
1918                 schedule_timeout_uninterruptible(HZ / 10);
1919
1920         } while (time_after(time, jiffies));
1921
1922         return -ETIMEDOUT;
1923 }
1924
1925 static int enic_dev_open(struct enic *enic)
1926 {
1927         int err;
1928
1929         err = enic_dev_wait(enic->vdev, vnic_dev_open,
1930                 vnic_dev_open_done, 0);
1931         if (err)
1932                 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
1933                         err);
1934
1935         return err;
1936 }
1937
1938 static int enic_dev_hang_reset(struct enic *enic)
1939 {
1940         int err;
1941
1942         err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
1943                 vnic_dev_hang_reset_done, 0);
1944         if (err)
1945                 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
1946                         err);
1947
1948         return err;
1949 }
1950
1951 static int enic_set_rsskey(struct enic *enic)
1952 {
1953         dma_addr_t rss_key_buf_pa;
1954         union vnic_rss_key *rss_key_buf_va = NULL;
1955         union vnic_rss_key rss_key = {
1956                 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
1957                 .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
1958                 .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
1959                 .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
1960         };
1961         int err;
1962
1963         rss_key_buf_va = pci_alloc_consistent(enic->pdev,
1964                 sizeof(union vnic_rss_key), &rss_key_buf_pa);
1965         if (!rss_key_buf_va)
1966                 return -ENOMEM;
1967
1968         memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
1969
1970         spin_lock(&enic->devcmd_lock);
1971         err = enic_set_rss_key(enic,
1972                 rss_key_buf_pa,
1973                 sizeof(union vnic_rss_key));
1974         spin_unlock(&enic->devcmd_lock);
1975
1976         pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
1977                 rss_key_buf_va, rss_key_buf_pa);
1978
1979         return err;
1980 }
1981
1982 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
1983 {
1984         dma_addr_t rss_cpu_buf_pa;
1985         union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1986         unsigned int i;
1987         int err;
1988
1989         rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
1990                 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
1991         if (!rss_cpu_buf_va)
1992                 return -ENOMEM;
1993
1994         for (i = 0; i < (1 << rss_hash_bits); i++)
1995                 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
1996
1997         spin_lock(&enic->devcmd_lock);
1998         err = enic_set_rss_cpu(enic,
1999                 rss_cpu_buf_pa,
2000                 sizeof(union vnic_rss_cpu));
2001         spin_unlock(&enic->devcmd_lock);
2002
2003         pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
2004                 rss_cpu_buf_va, rss_cpu_buf_pa);
2005
2006         return err;
2007 }
2008
2009 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
2010         u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
2011 {
2012         const u8 tso_ipid_split_en = 0;
2013         const u8 ig_vlan_strip_en = 1;
2014         int err;
2015
2016         /* Enable VLAN tag stripping.
2017         */
2018
2019         spin_lock(&enic->devcmd_lock);
2020         err = enic_set_nic_cfg(enic,
2021                 rss_default_cpu, rss_hash_type,
2022                 rss_hash_bits, rss_base_cpu,
2023                 rss_enable, tso_ipid_split_en,
2024                 ig_vlan_strip_en);
2025         spin_unlock(&enic->devcmd_lock);
2026
2027         return err;
2028 }
2029
2030 static int enic_set_rss_nic_cfg(struct enic *enic)
2031 {
2032         struct device *dev = enic_get_dev(enic);
2033         const u8 rss_default_cpu = 0;
2034         const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
2035                 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
2036                 NIC_CFG_RSS_HASH_TYPE_IPV6 |
2037                 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
2038         const u8 rss_hash_bits = 7;
2039         const u8 rss_base_cpu = 0;
2040         u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
2041
2042         if (rss_enable) {
2043                 if (!enic_set_rsskey(enic)) {
2044                         if (enic_set_rsscpu(enic, rss_hash_bits)) {
2045                                 rss_enable = 0;
2046                                 dev_warn(dev, "RSS disabled, "
2047                                         "Failed to set RSS cpu indirection table.");
2048                         }
2049                 } else {
2050                         rss_enable = 0;
2051                         dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
2052                 }
2053         }
2054
2055         return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
2056                 rss_hash_bits, rss_base_cpu, rss_enable);
2057 }
2058
2059 static void enic_reset(struct work_struct *work)
2060 {
2061         struct enic *enic = container_of(work, struct enic, reset);
2062
2063         if (!netif_running(enic->netdev))
2064                 return;
2065
2066         rtnl_lock();
2067
2068         enic_dev_hang_notify(enic);
2069         enic_stop(enic->netdev);
2070         enic_dev_hang_reset(enic);
2071         enic_reset_addr_lists(enic);
2072         enic_init_vnic_resources(enic);
2073         enic_set_rss_nic_cfg(enic);
2074         enic_dev_set_ig_vlan_rewrite_mode(enic);
2075         enic_open(enic->netdev);
2076
2077         rtnl_unlock();
2078 }
2079
2080 static int enic_set_intr_mode(struct enic *enic)
2081 {
2082         unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
2083         unsigned int m = 1;
2084         unsigned int i;
2085
2086         /* Set interrupt mode (INTx, MSI, MSI-X) depending
2087          * on system capabilities.
2088          *
2089          * Try MSI-X first
2090          *
2091          * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
2092          * (the second to last INTR is used for WQ/RQ errors)
2093          * (the last INTR is used for notifications)
2094          */
2095
2096         BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
2097         for (i = 0; i < n + m + 2; i++)
2098                 enic->msix_entry[i].entry = i;
2099
2100         /* Use multiple RQs if RSS is enabled
2101          */
2102
2103         if (ENIC_SETTING(enic, RSS) &&
2104             enic->config.intr_mode < 1 &&
2105             enic->rq_count >= n &&
2106             enic->wq_count >= m &&
2107             enic->cq_count >= n + m &&
2108             enic->intr_count >= n + m + 2) {
2109
2110                 if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
2111
2112                         enic->rq_count = n;
2113                         enic->wq_count = m;
2114                         enic->cq_count = n + m;
2115                         enic->intr_count = n + m + 2;
2116
2117                         vnic_dev_set_intr_mode(enic->vdev,
2118                                 VNIC_DEV_INTR_MODE_MSIX);
2119
2120                         return 0;
2121                 }
2122         }
2123
2124         if (enic->config.intr_mode < 1 &&
2125             enic->rq_count >= 1 &&
2126             enic->wq_count >= m &&
2127             enic->cq_count >= 1 + m &&
2128             enic->intr_count >= 1 + m + 2) {
2129                 if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) {
2130
2131                         enic->rq_count = 1;
2132                         enic->wq_count = m;
2133                         enic->cq_count = 1 + m;
2134                         enic->intr_count = 1 + m + 2;
2135
2136                         vnic_dev_set_intr_mode(enic->vdev,
2137                                 VNIC_DEV_INTR_MODE_MSIX);
2138
2139                         return 0;
2140                 }
2141         }
2142
2143         /* Next try MSI
2144          *
2145          * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
2146          */
2147
2148         if (enic->config.intr_mode < 2 &&
2149             enic->rq_count >= 1 &&
2150             enic->wq_count >= 1 &&
2151             enic->cq_count >= 2 &&
2152             enic->intr_count >= 1 &&
2153             !pci_enable_msi(enic->pdev)) {
2154
2155                 enic->rq_count = 1;
2156                 enic->wq_count = 1;
2157                 enic->cq_count = 2;
2158                 enic->intr_count = 1;
2159
2160                 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
2161
2162                 return 0;
2163         }
2164
2165         /* Next try INTx
2166          *
2167          * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2168          * (the first INTR is used for WQ/RQ)
2169          * (the second INTR is used for WQ/RQ errors)
2170          * (the last INTR is used for notifications)
2171          */
2172
2173         if (enic->config.intr_mode < 3 &&
2174             enic->rq_count >= 1 &&
2175             enic->wq_count >= 1 &&
2176             enic->cq_count >= 2 &&
2177             enic->intr_count >= 3) {
2178
2179                 enic->rq_count = 1;
2180                 enic->wq_count = 1;
2181                 enic->cq_count = 2;
2182                 enic->intr_count = 3;
2183
2184                 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
2185
2186                 return 0;
2187         }
2188
2189         vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2190
2191         return -EINVAL;
2192 }
2193
2194 static void enic_clear_intr_mode(struct enic *enic)
2195 {
2196         switch (vnic_dev_get_intr_mode(enic->vdev)) {
2197         case VNIC_DEV_INTR_MODE_MSIX:
2198                 pci_disable_msix(enic->pdev);
2199                 break;
2200         case VNIC_DEV_INTR_MODE_MSI:
2201                 pci_disable_msi(enic->pdev);
2202                 break;
2203         default:
2204                 break;
2205         }
2206
2207         vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2208 }
2209
2210 static const struct net_device_ops enic_netdev_dynamic_ops = {
2211         .ndo_open               = enic_open,
2212         .ndo_stop               = enic_stop,
2213         .ndo_start_xmit         = enic_hard_start_xmit,
2214         .ndo_get_stats          = enic_get_stats,
2215         .ndo_validate_addr      = eth_validate_addr,
2216         .ndo_set_rx_mode        = enic_set_rx_mode,
2217         .ndo_set_multicast_list = enic_set_rx_mode,
2218         .ndo_set_mac_address    = enic_set_mac_address_dynamic,
2219         .ndo_change_mtu         = enic_change_mtu,
2220         .ndo_vlan_rx_register   = enic_vlan_rx_register,
2221         .ndo_vlan_rx_add_vid    = enic_vlan_rx_add_vid,
2222         .ndo_vlan_rx_kill_vid   = enic_vlan_rx_kill_vid,
2223         .ndo_tx_timeout         = enic_tx_timeout,
2224         .ndo_set_vf_port        = enic_set_vf_port,
2225         .ndo_get_vf_port        = enic_get_vf_port,
2226 #ifdef IFLA_VF_MAX
2227         .ndo_set_vf_mac         = enic_set_vf_mac,
2228 #endif
2229 #ifdef CONFIG_NET_POLL_CONTROLLER
2230         .ndo_poll_controller    = enic_poll_controller,
2231 #endif
2232 };
2233
2234 static const struct net_device_ops enic_netdev_ops = {
2235         .ndo_open               = enic_open,
2236         .ndo_stop               = enic_stop,
2237         .ndo_start_xmit         = enic_hard_start_xmit,
2238         .ndo_get_stats          = enic_get_stats,
2239         .ndo_validate_addr      = eth_validate_addr,
2240         .ndo_set_mac_address    = enic_set_mac_address,
2241         .ndo_set_rx_mode        = enic_set_rx_mode,
2242         .ndo_set_multicast_list = enic_set_rx_mode,
2243         .ndo_change_mtu         = enic_change_mtu,
2244         .ndo_vlan_rx_register   = enic_vlan_rx_register,
2245         .ndo_vlan_rx_add_vid    = enic_vlan_rx_add_vid,
2246         .ndo_vlan_rx_kill_vid   = enic_vlan_rx_kill_vid,
2247         .ndo_tx_timeout         = enic_tx_timeout,
2248 #ifdef CONFIG_NET_POLL_CONTROLLER
2249         .ndo_poll_controller    = enic_poll_controller,
2250 #endif
2251 };
2252
2253 static void enic_dev_deinit(struct enic *enic)
2254 {
2255         unsigned int i;
2256
2257         for (i = 0; i < enic->rq_count; i++)
2258                 netif_napi_del(&enic->napi[i]);
2259
2260         enic_free_vnic_resources(enic);
2261         enic_clear_intr_mode(enic);
2262 }
2263
2264 static int enic_dev_init(struct enic *enic)
2265 {
2266         struct device *dev = enic_get_dev(enic);
2267         struct net_device *netdev = enic->netdev;
2268         unsigned int i;
2269         int err;
2270
2271         /* Get vNIC configuration
2272          */
2273
2274         err = enic_get_vnic_config(enic);
2275         if (err) {
2276                 dev_err(dev, "Get vNIC configuration failed, aborting\n");
2277                 return err;
2278         }
2279
2280         /* Get available resource counts
2281          */
2282
2283         enic_get_res_counts(enic);
2284
2285         /* Set interrupt mode based on resource counts and system
2286          * capabilities
2287          */
2288
2289         err = enic_set_intr_mode(enic);
2290         if (err) {
2291                 dev_err(dev, "Failed to set intr mode based on resource "
2292                         "counts and system capabilities, aborting\n");
2293                 return err;
2294         }
2295
2296         /* Allocate and configure vNIC resources
2297          */
2298
2299         err = enic_alloc_vnic_resources(enic);
2300         if (err) {
2301                 dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
2302                 goto err_out_free_vnic_resources;
2303         }
2304
2305         enic_init_vnic_resources(enic);
2306
2307         err = enic_set_rss_nic_cfg(enic);
2308         if (err) {
2309                 dev_err(dev, "Failed to config nic, aborting\n");
2310                 goto err_out_free_vnic_resources;
2311         }
2312
2313         switch (vnic_dev_get_intr_mode(enic->vdev)) {
2314         default:
2315                 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
2316                 break;
2317         case VNIC_DEV_INTR_MODE_MSIX:
2318                 for (i = 0; i < enic->rq_count; i++)
2319                         netif_napi_add(netdev, &enic->napi[i],
2320                                 enic_poll_msix, 64);
2321                 break;
2322         }
2323
2324         return 0;
2325
2326 err_out_free_vnic_resources:
2327         enic_clear_intr_mode(enic);
2328         enic_free_vnic_resources(enic);
2329
2330         return err;
2331 }
2332
2333 static void enic_iounmap(struct enic *enic)
2334 {
2335         unsigned int i;
2336
2337         for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2338                 if (enic->bar[i].vaddr)
2339                         iounmap(enic->bar[i].vaddr);
2340 }
2341
2342 static int __devinit enic_probe(struct pci_dev *pdev,
2343         const struct pci_device_id *ent)
2344 {
2345         struct device *dev = &pdev->dev;
2346         struct net_device *netdev;
2347         struct enic *enic;
2348         int using_dac = 0;
2349         unsigned int i;
2350         int err;
2351
2352         /* Allocate net device structure and initialize.  Private
2353          * instance data is initialized to zero.
2354          */
2355
2356         netdev = alloc_etherdev(sizeof(struct enic));
2357         if (!netdev) {
2358                 pr_err("Etherdev alloc failed, aborting\n");
2359                 return -ENOMEM;
2360         }
2361
2362         pci_set_drvdata(pdev, netdev);
2363
2364         SET_NETDEV_DEV(netdev, &pdev->dev);
2365
2366         enic = netdev_priv(netdev);
2367         enic->netdev = netdev;
2368         enic->pdev = pdev;
2369
2370         /* Setup PCI resources
2371          */
2372
2373         err = pci_enable_device_mem(pdev);
2374         if (err) {
2375                 dev_err(dev, "Cannot enable PCI device, aborting\n");
2376                 goto err_out_free_netdev;
2377         }
2378
2379         err = pci_request_regions(pdev, DRV_NAME);
2380         if (err) {
2381                 dev_err(dev, "Cannot request PCI regions, aborting\n");
2382                 goto err_out_disable_device;
2383         }
2384
2385         pci_set_master(pdev);
2386
2387         /* Query PCI controller on system for DMA addressing
2388          * limitation for the device.  Try 40-bit first, and
2389          * fail to 32-bit.
2390          */
2391
2392         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2393         if (err) {
2394                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2395                 if (err) {
2396                         dev_err(dev, "No usable DMA configuration, aborting\n");
2397                         goto err_out_release_regions;
2398                 }
2399                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2400                 if (err) {
2401                         dev_err(dev, "Unable to obtain %u-bit DMA "
2402                                 "for consistent allocations, aborting\n", 32);
2403                         goto err_out_release_regions;
2404                 }
2405         } else {
2406                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
2407                 if (err) {
2408                         dev_err(dev, "Unable to obtain %u-bit DMA "
2409                                 "for consistent allocations, aborting\n", 40);
2410                         goto err_out_release_regions;
2411                 }
2412                 using_dac = 1;
2413         }
2414
2415         /* Map vNIC resources from BAR0-5
2416          */
2417
2418         for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2419                 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2420                         continue;
2421                 enic->bar[i].len = pci_resource_len(pdev, i);
2422                 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2423                 if (!enic->bar[i].vaddr) {
2424                         dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
2425                         err = -ENODEV;
2426                         goto err_out_iounmap;
2427                 }
2428                 enic->bar[i].bus_addr = pci_resource_start(pdev, i);
2429         }
2430
2431         /* Register vNIC device
2432          */
2433
2434         enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2435                 ARRAY_SIZE(enic->bar));
2436         if (!enic->vdev) {
2437                 dev_err(dev, "vNIC registration failed, aborting\n");
2438                 err = -ENODEV;
2439                 goto err_out_iounmap;
2440         }
2441
2442         /* Issue device open to get device in known state
2443          */
2444
2445         err = enic_dev_open(enic);
2446         if (err) {
2447                 dev_err(dev, "vNIC dev open failed, aborting\n");
2448                 goto err_out_vnic_unregister;
2449         }
2450
2451         /* Setup devcmd lock
2452          */
2453
2454         spin_lock_init(&enic->devcmd_lock);
2455
2456         /*
2457          * Set ingress vlan rewrite mode before vnic initialization
2458          */
2459
2460         err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2461         if (err) {
2462                 dev_err(dev,
2463                         "Failed to set ingress vlan rewrite mode, aborting.\n");
2464                 goto err_out_dev_close;
2465         }
2466
2467         /* Issue device init to initialize the vnic-to-switch link.
2468          * We'll start with carrier off and wait for link UP
2469          * notification later to turn on carrier.  We don't need
2470          * to wait here for the vnic-to-switch link initialization
2471          * to complete; link UP notification is the indication that
2472          * the process is complete.
2473          */
2474
2475         netif_carrier_off(netdev);
2476
2477         /* Do not call dev_init for a dynamic vnic.
2478          * For a dynamic vnic, init_prov_info will be
2479          * called later by an upper layer.
2480          */
2481
2482         if (!enic_is_dynamic(enic)) {
2483                 err = vnic_dev_init(enic->vdev, 0);
2484                 if (err) {
2485                         dev_err(dev, "vNIC dev init failed, aborting\n");
2486                         goto err_out_dev_close;
2487                 }
2488         }
2489
2490         err = enic_dev_init(enic);
2491         if (err) {
2492                 dev_err(dev, "Device initialization failed, aborting\n");
2493                 goto err_out_dev_close;
2494         }
2495
2496         /* Setup notification timer, HW reset task, and wq locks
2497          */
2498
2499         init_timer(&enic->notify_timer);
2500         enic->notify_timer.function = enic_notify_timer;
2501         enic->notify_timer.data = (unsigned long)enic;
2502
2503         INIT_WORK(&enic->reset, enic_reset);
2504
2505         for (i = 0; i < enic->wq_count; i++)
2506                 spin_lock_init(&enic->wq_lock[i]);
2507
2508         /* Register net device
2509          */
2510
2511         enic->port_mtu = enic->config.mtu;
2512         (void)enic_change_mtu(netdev, enic->port_mtu);
2513
2514         err = enic_set_mac_addr(netdev, enic->mac_addr);
2515         if (err) {
2516                 dev_err(dev, "Invalid MAC address, aborting\n");
2517                 goto err_out_dev_deinit;
2518         }
2519
2520         enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2521         enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2522
2523         if (enic_is_dynamic(enic))
2524                 netdev->netdev_ops = &enic_netdev_dynamic_ops;
2525         else
2526                 netdev->netdev_ops = &enic_netdev_ops;
2527
2528         netdev->watchdog_timeo = 2 * HZ;
2529         netdev->ethtool_ops = &enic_ethtool_ops;
2530
2531         netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2532         if (ENIC_SETTING(enic, LOOP)) {
2533                 netdev->features &= ~NETIF_F_HW_VLAN_TX;
2534                 enic->loop_enable = 1;
2535                 enic->loop_tag = enic->config.loop_tag;
2536                 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2537         }
2538         if (ENIC_SETTING(enic, TXCSUM))
2539                 netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2540         if (ENIC_SETTING(enic, TSO))
2541                 netdev->features |= NETIF_F_TSO |
2542                         NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2543         if (ENIC_SETTING(enic, LRO))
2544                 netdev->features |= NETIF_F_GRO;
2545         if (using_dac)
2546                 netdev->features |= NETIF_F_HIGHDMA;
2547
2548         enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM);
2549
2550         err = register_netdev(netdev);
2551         if (err) {
2552                 dev_err(dev, "Cannot register net device, aborting\n");
2553                 goto err_out_dev_deinit;
2554         }
2555
2556         return 0;
2557
2558 err_out_dev_deinit:
2559         enic_dev_deinit(enic);
2560 err_out_dev_close:
2561         vnic_dev_close(enic->vdev);
2562 err_out_vnic_unregister:
2563         vnic_dev_unregister(enic->vdev);
2564 err_out_iounmap:
2565         enic_iounmap(enic);
2566 err_out_release_regions:
2567         pci_release_regions(pdev);
2568 err_out_disable_device:
2569         pci_disable_device(pdev);
2570 err_out_free_netdev:
2571         pci_set_drvdata(pdev, NULL);
2572         free_netdev(netdev);
2573
2574         return err;
2575 }
2576
2577 static void __devexit enic_remove(struct pci_dev *pdev)
2578 {
2579         struct net_device *netdev = pci_get_drvdata(pdev);
2580
2581         if (netdev) {
2582                 struct enic *enic = netdev_priv(netdev);
2583
2584                 cancel_work_sync(&enic->reset);
2585                 unregister_netdev(netdev);
2586                 enic_dev_deinit(enic);
2587                 vnic_dev_close(enic->vdev);
2588                 vnic_dev_unregister(enic->vdev);
2589                 enic_iounmap(enic);
2590                 pci_release_regions(pdev);
2591                 pci_disable_device(pdev);
2592                 pci_set_drvdata(pdev, NULL);
2593                 free_netdev(netdev);
2594         }
2595 }
2596
2597 static struct pci_driver enic_driver = {
2598         .name = DRV_NAME,
2599         .id_table = enic_id_table,
2600         .probe = enic_probe,
2601         .remove = __devexit_p(enic_remove),
2602 };
2603
2604 static int __init enic_init_module(void)
2605 {
2606         pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
2607
2608         return pci_register_driver(&enic_driver);
2609 }
2610
2611 static void __exit enic_cleanup_module(void)
2612 {
2613         pci_unregister_driver(&enic_driver);
2614 }
2615
2616 module_init(enic_init_module);
2617 module_exit(enic_cleanup_module);