enic: Clean up: Remove support for an older version of hardware
[pandora-kernel.git] / drivers / net / enic / enic_main.c
1 /*
2  * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * This program is free software; you may redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; version 2 of the License.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16  * SOFTWARE.
17  *
18  */
19
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/init.h>
26 #include <linux/workqueue.h>
27 #include <linux/pci.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/if_vlan.h>
32 #include <linux/ethtool.h>
33 #include <linux/in.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/rtnetlink.h>
38 #include <net/ip6_checksum.h>
39
40 #include "cq_enet_desc.h"
41 #include "vnic_dev.h"
42 #include "vnic_intr.h"
43 #include "vnic_stats.h"
44 #include "vnic_vic.h"
45 #include "enic_res.h"
46 #include "enic.h"
47 #include "enic_dev.h"
48
49 #define ENIC_NOTIFY_TIMER_PERIOD        (2 * HZ)
50 #define WQ_ENET_MAX_DESC_LEN            (1 << WQ_ENET_LEN_BITS)
51 #define MAX_TSO                         (1 << 16)
52 #define ENIC_DESC_MAX_SPLITS            (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
53
54 #define PCI_DEVICE_ID_CISCO_VIC_ENET         0x0043  /* ethernet vnic */
55 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN     0x0044  /* enet dynamic vnic */
56
57 /* Supported devices */
58 static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
59         { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
60         { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
61         { 0, }  /* end of table */
62 };
63
64 MODULE_DESCRIPTION(DRV_DESCRIPTION);
65 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
66 MODULE_LICENSE("GPL");
67 MODULE_VERSION(DRV_VERSION);
68 MODULE_DEVICE_TABLE(pci, enic_id_table);
69
70 struct enic_stat {
71         char name[ETH_GSTRING_LEN];
72         unsigned int offset;
73 };
74
75 #define ENIC_TX_STAT(stat)      \
76         { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
77 #define ENIC_RX_STAT(stat)      \
78         { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
79
80 static const struct enic_stat enic_tx_stats[] = {
81         ENIC_TX_STAT(tx_frames_ok),
82         ENIC_TX_STAT(tx_unicast_frames_ok),
83         ENIC_TX_STAT(tx_multicast_frames_ok),
84         ENIC_TX_STAT(tx_broadcast_frames_ok),
85         ENIC_TX_STAT(tx_bytes_ok),
86         ENIC_TX_STAT(tx_unicast_bytes_ok),
87         ENIC_TX_STAT(tx_multicast_bytes_ok),
88         ENIC_TX_STAT(tx_broadcast_bytes_ok),
89         ENIC_TX_STAT(tx_drops),
90         ENIC_TX_STAT(tx_errors),
91         ENIC_TX_STAT(tx_tso),
92 };
93
94 static const struct enic_stat enic_rx_stats[] = {
95         ENIC_RX_STAT(rx_frames_ok),
96         ENIC_RX_STAT(rx_frames_total),
97         ENIC_RX_STAT(rx_unicast_frames_ok),
98         ENIC_RX_STAT(rx_multicast_frames_ok),
99         ENIC_RX_STAT(rx_broadcast_frames_ok),
100         ENIC_RX_STAT(rx_bytes_ok),
101         ENIC_RX_STAT(rx_unicast_bytes_ok),
102         ENIC_RX_STAT(rx_multicast_bytes_ok),
103         ENIC_RX_STAT(rx_broadcast_bytes_ok),
104         ENIC_RX_STAT(rx_drop),
105         ENIC_RX_STAT(rx_no_bufs),
106         ENIC_RX_STAT(rx_errors),
107         ENIC_RX_STAT(rx_rss),
108         ENIC_RX_STAT(rx_crc_errors),
109         ENIC_RX_STAT(rx_frames_64),
110         ENIC_RX_STAT(rx_frames_127),
111         ENIC_RX_STAT(rx_frames_255),
112         ENIC_RX_STAT(rx_frames_511),
113         ENIC_RX_STAT(rx_frames_1023),
114         ENIC_RX_STAT(rx_frames_1518),
115         ENIC_RX_STAT(rx_frames_to_max),
116 };
117
118 static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
119 static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
120
121 static int enic_is_dynamic(struct enic *enic)
122 {
123         return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
124 }
125
126 static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
127 {
128         return rq;
129 }
130
131 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
132 {
133         return enic->rq_count + wq;
134 }
135
136 static inline unsigned int enic_legacy_io_intr(void)
137 {
138         return 0;
139 }
140
141 static inline unsigned int enic_legacy_err_intr(void)
142 {
143         return 1;
144 }
145
146 static inline unsigned int enic_legacy_notify_intr(void)
147 {
148         return 2;
149 }
150
151 static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
152 {
153         return rq;
154 }
155
156 static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
157 {
158         return enic->rq_count + wq;
159 }
160
161 static inline unsigned int enic_msix_err_intr(struct enic *enic)
162 {
163         return enic->rq_count + enic->wq_count;
164 }
165
166 static inline unsigned int enic_msix_notify_intr(struct enic *enic)
167 {
168         return enic->rq_count + enic->wq_count + 1;
169 }
170
171 static int enic_get_settings(struct net_device *netdev,
172         struct ethtool_cmd *ecmd)
173 {
174         struct enic *enic = netdev_priv(netdev);
175
176         ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
177         ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
178         ecmd->port = PORT_FIBRE;
179         ecmd->transceiver = XCVR_EXTERNAL;
180
181         if (netif_carrier_ok(netdev)) {
182                 ecmd->speed = vnic_dev_port_speed(enic->vdev);
183                 ecmd->duplex = DUPLEX_FULL;
184         } else {
185                 ecmd->speed = -1;
186                 ecmd->duplex = -1;
187         }
188
189         ecmd->autoneg = AUTONEG_DISABLE;
190
191         return 0;
192 }
193
194 static void enic_get_drvinfo(struct net_device *netdev,
195         struct ethtool_drvinfo *drvinfo)
196 {
197         struct enic *enic = netdev_priv(netdev);
198         struct vnic_devcmd_fw_info *fw_info;
199
200         enic_dev_fw_info(enic, &fw_info);
201
202         strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
203         strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
204         strncpy(drvinfo->fw_version, fw_info->fw_version,
205                 sizeof(drvinfo->fw_version));
206         strncpy(drvinfo->bus_info, pci_name(enic->pdev),
207                 sizeof(drvinfo->bus_info));
208 }
209
210 static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
211 {
212         unsigned int i;
213
214         switch (stringset) {
215         case ETH_SS_STATS:
216                 for (i = 0; i < enic_n_tx_stats; i++) {
217                         memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
218                         data += ETH_GSTRING_LEN;
219                 }
220                 for (i = 0; i < enic_n_rx_stats; i++) {
221                         memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
222                         data += ETH_GSTRING_LEN;
223                 }
224                 break;
225         }
226 }
227
228 static int enic_get_sset_count(struct net_device *netdev, int sset)
229 {
230         switch (sset) {
231         case ETH_SS_STATS:
232                 return enic_n_tx_stats + enic_n_rx_stats;
233         default:
234                 return -EOPNOTSUPP;
235         }
236 }
237
238 static void enic_get_ethtool_stats(struct net_device *netdev,
239         struct ethtool_stats *stats, u64 *data)
240 {
241         struct enic *enic = netdev_priv(netdev);
242         struct vnic_stats *vstats;
243         unsigned int i;
244
245         enic_dev_stats_dump(enic, &vstats);
246
247         for (i = 0; i < enic_n_tx_stats; i++)
248                 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
249         for (i = 0; i < enic_n_rx_stats; i++)
250                 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
251 }
252
253 static u32 enic_get_rx_csum(struct net_device *netdev)
254 {
255         struct enic *enic = netdev_priv(netdev);
256         return enic->csum_rx_enabled;
257 }
258
259 static int enic_set_rx_csum(struct net_device *netdev, u32 data)
260 {
261         struct enic *enic = netdev_priv(netdev);
262
263         if (data && !ENIC_SETTING(enic, RXCSUM))
264                 return -EINVAL;
265
266         enic->csum_rx_enabled = !!data;
267
268         return 0;
269 }
270
271 static int enic_set_tx_csum(struct net_device *netdev, u32 data)
272 {
273         struct enic *enic = netdev_priv(netdev);
274
275         if (data && !ENIC_SETTING(enic, TXCSUM))
276                 return -EINVAL;
277
278         if (data)
279                 netdev->features |= NETIF_F_HW_CSUM;
280         else
281                 netdev->features &= ~NETIF_F_HW_CSUM;
282
283         return 0;
284 }
285
286 static int enic_set_tso(struct net_device *netdev, u32 data)
287 {
288         struct enic *enic = netdev_priv(netdev);
289
290         if (data && !ENIC_SETTING(enic, TSO))
291                 return -EINVAL;
292
293         if (data)
294                 netdev->features |=
295                         NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN;
296         else
297                 netdev->features &=
298                         ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN);
299
300         return 0;
301 }
302
303 static u32 enic_get_msglevel(struct net_device *netdev)
304 {
305         struct enic *enic = netdev_priv(netdev);
306         return enic->msg_enable;
307 }
308
309 static void enic_set_msglevel(struct net_device *netdev, u32 value)
310 {
311         struct enic *enic = netdev_priv(netdev);
312         enic->msg_enable = value;
313 }
314
315 static int enic_get_coalesce(struct net_device *netdev,
316         struct ethtool_coalesce *ecmd)
317 {
318         struct enic *enic = netdev_priv(netdev);
319
320         ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
321         ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
322
323         return 0;
324 }
325
326 static int enic_set_coalesce(struct net_device *netdev,
327         struct ethtool_coalesce *ecmd)
328 {
329         struct enic *enic = netdev_priv(netdev);
330         u32 tx_coalesce_usecs;
331         u32 rx_coalesce_usecs;
332         unsigned int i, intr;
333
334         tx_coalesce_usecs = min_t(u32,
335                 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
336                 ecmd->tx_coalesce_usecs);
337         rx_coalesce_usecs = min_t(u32,
338                 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
339                 ecmd->rx_coalesce_usecs);
340
341         switch (vnic_dev_get_intr_mode(enic->vdev)) {
342         case VNIC_DEV_INTR_MODE_INTX:
343                 if (tx_coalesce_usecs != rx_coalesce_usecs)
344                         return -EINVAL;
345
346                 intr = enic_legacy_io_intr();
347                 vnic_intr_coalescing_timer_set(&enic->intr[intr],
348                         INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
349                 break;
350         case VNIC_DEV_INTR_MODE_MSI:
351                 if (tx_coalesce_usecs != rx_coalesce_usecs)
352                         return -EINVAL;
353
354                 vnic_intr_coalescing_timer_set(&enic->intr[0],
355                         INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
356                 break;
357         case VNIC_DEV_INTR_MODE_MSIX:
358                 for (i = 0; i < enic->wq_count; i++) {
359                         intr = enic_msix_wq_intr(enic, i);
360                         vnic_intr_coalescing_timer_set(&enic->intr[intr],
361                                 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
362                 }
363
364                 for (i = 0; i < enic->rq_count; i++) {
365                         intr = enic_msix_rq_intr(enic, i);
366                         vnic_intr_coalescing_timer_set(&enic->intr[intr],
367                                 INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs));
368                 }
369
370                 break;
371         default:
372                 break;
373         }
374
375         enic->tx_coalesce_usecs = tx_coalesce_usecs;
376         enic->rx_coalesce_usecs = rx_coalesce_usecs;
377
378         return 0;
379 }
380
381 static const struct ethtool_ops enic_ethtool_ops = {
382         .get_settings = enic_get_settings,
383         .get_drvinfo = enic_get_drvinfo,
384         .get_msglevel = enic_get_msglevel,
385         .set_msglevel = enic_set_msglevel,
386         .get_link = ethtool_op_get_link,
387         .get_strings = enic_get_strings,
388         .get_sset_count = enic_get_sset_count,
389         .get_ethtool_stats = enic_get_ethtool_stats,
390         .get_rx_csum = enic_get_rx_csum,
391         .set_rx_csum = enic_set_rx_csum,
392         .get_tx_csum = ethtool_op_get_tx_csum,
393         .set_tx_csum = enic_set_tx_csum,
394         .get_sg = ethtool_op_get_sg,
395         .set_sg = ethtool_op_set_sg,
396         .get_tso = ethtool_op_get_tso,
397         .set_tso = enic_set_tso,
398         .get_coalesce = enic_get_coalesce,
399         .set_coalesce = enic_set_coalesce,
400         .get_flags = ethtool_op_get_flags,
401 };
402
403 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
404 {
405         struct enic *enic = vnic_dev_priv(wq->vdev);
406
407         if (buf->sop)
408                 pci_unmap_single(enic->pdev, buf->dma_addr,
409                         buf->len, PCI_DMA_TODEVICE);
410         else
411                 pci_unmap_page(enic->pdev, buf->dma_addr,
412                         buf->len, PCI_DMA_TODEVICE);
413
414         if (buf->os_buf)
415                 dev_kfree_skb_any(buf->os_buf);
416 }
417
418 static void enic_wq_free_buf(struct vnic_wq *wq,
419         struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
420 {
421         enic_free_wq_buf(wq, buf);
422 }
423
424 static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
425         u8 type, u16 q_number, u16 completed_index, void *opaque)
426 {
427         struct enic *enic = vnic_dev_priv(vdev);
428
429         spin_lock(&enic->wq_lock[q_number]);
430
431         vnic_wq_service(&enic->wq[q_number], cq_desc,
432                 completed_index, enic_wq_free_buf,
433                 opaque);
434
435         if (netif_queue_stopped(enic->netdev) &&
436             vnic_wq_desc_avail(&enic->wq[q_number]) >=
437             (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
438                 netif_wake_queue(enic->netdev);
439
440         spin_unlock(&enic->wq_lock[q_number]);
441
442         return 0;
443 }
444
445 static void enic_log_q_error(struct enic *enic)
446 {
447         unsigned int i;
448         u32 error_status;
449
450         for (i = 0; i < enic->wq_count; i++) {
451                 error_status = vnic_wq_error_status(&enic->wq[i]);
452                 if (error_status)
453                         netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
454                                 i, error_status);
455         }
456
457         for (i = 0; i < enic->rq_count; i++) {
458                 error_status = vnic_rq_error_status(&enic->rq[i]);
459                 if (error_status)
460                         netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
461                                 i, error_status);
462         }
463 }
464
465 static void enic_msglvl_check(struct enic *enic)
466 {
467         u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
468
469         if (msg_enable != enic->msg_enable) {
470                 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
471                         enic->msg_enable, msg_enable);
472                 enic->msg_enable = msg_enable;
473         }
474 }
475
476 static void enic_mtu_check(struct enic *enic)
477 {
478         u32 mtu = vnic_dev_mtu(enic->vdev);
479         struct net_device *netdev = enic->netdev;
480
481         if (mtu && mtu != enic->port_mtu) {
482                 enic->port_mtu = mtu;
483                 if (mtu < netdev->mtu)
484                         netdev_warn(netdev,
485                                 "interface MTU (%d) set higher "
486                                 "than switch port MTU (%d)\n",
487                                 netdev->mtu, mtu);
488         }
489 }
490
491 static void enic_link_check(struct enic *enic)
492 {
493         int link_status = vnic_dev_link_status(enic->vdev);
494         int carrier_ok = netif_carrier_ok(enic->netdev);
495
496         if (link_status && !carrier_ok) {
497                 netdev_info(enic->netdev, "Link UP\n");
498                 netif_carrier_on(enic->netdev);
499         } else if (!link_status && carrier_ok) {
500                 netdev_info(enic->netdev, "Link DOWN\n");
501                 netif_carrier_off(enic->netdev);
502         }
503 }
504
505 static void enic_notify_check(struct enic *enic)
506 {
507         enic_msglvl_check(enic);
508         enic_mtu_check(enic);
509         enic_link_check(enic);
510 }
511
512 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
513
514 static irqreturn_t enic_isr_legacy(int irq, void *data)
515 {
516         struct net_device *netdev = data;
517         struct enic *enic = netdev_priv(netdev);
518         unsigned int io_intr = enic_legacy_io_intr();
519         unsigned int err_intr = enic_legacy_err_intr();
520         unsigned int notify_intr = enic_legacy_notify_intr();
521         u32 pba;
522
523         vnic_intr_mask(&enic->intr[io_intr]);
524
525         pba = vnic_intr_legacy_pba(enic->legacy_pba);
526         if (!pba) {
527                 vnic_intr_unmask(&enic->intr[io_intr]);
528                 return IRQ_NONE;        /* not our interrupt */
529         }
530
531         if (ENIC_TEST_INTR(pba, notify_intr)) {
532                 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
533                 enic_notify_check(enic);
534         }
535
536         if (ENIC_TEST_INTR(pba, err_intr)) {
537                 vnic_intr_return_all_credits(&enic->intr[err_intr]);
538                 enic_log_q_error(enic);
539                 /* schedule recovery from WQ/RQ error */
540                 schedule_work(&enic->reset);
541                 return IRQ_HANDLED;
542         }
543
544         if (ENIC_TEST_INTR(pba, io_intr)) {
545                 if (napi_schedule_prep(&enic->napi[0]))
546                         __napi_schedule(&enic->napi[0]);
547         } else {
548                 vnic_intr_unmask(&enic->intr[io_intr]);
549         }
550
551         return IRQ_HANDLED;
552 }
553
554 static irqreturn_t enic_isr_msi(int irq, void *data)
555 {
556         struct enic *enic = data;
557
558         /* With MSI, there is no sharing of interrupts, so this is
559          * our interrupt and there is no need to ack it.  The device
560          * is not providing per-vector masking, so the OS will not
561          * write to PCI config space to mask/unmask the interrupt.
562          * We're using mask_on_assertion for MSI, so the device
563          * automatically masks the interrupt when the interrupt is
564          * generated.  Later, when exiting polling, the interrupt
565          * will be unmasked (see enic_poll).
566          *
567          * Also, the device uses the same PCIe Traffic Class (TC)
568          * for Memory Write data and MSI, so there are no ordering
569          * issues; the MSI will always arrive at the Root Complex
570          * _after_ corresponding Memory Writes (i.e. descriptor
571          * writes).
572          */
573
574         napi_schedule(&enic->napi[0]);
575
576         return IRQ_HANDLED;
577 }
578
579 static irqreturn_t enic_isr_msix_rq(int irq, void *data)
580 {
581         struct napi_struct *napi = data;
582
583         /* schedule NAPI polling for RQ cleanup */
584         napi_schedule(napi);
585
586         return IRQ_HANDLED;
587 }
588
589 static irqreturn_t enic_isr_msix_wq(int irq, void *data)
590 {
591         struct enic *enic = data;
592         unsigned int cq = enic_cq_wq(enic, 0);
593         unsigned int intr = enic_msix_wq_intr(enic, 0);
594         unsigned int wq_work_to_do = -1; /* no limit */
595         unsigned int wq_work_done;
596
597         wq_work_done = vnic_cq_service(&enic->cq[cq],
598                 wq_work_to_do, enic_wq_service, NULL);
599
600         vnic_intr_return_credits(&enic->intr[intr],
601                 wq_work_done,
602                 1 /* unmask intr */,
603                 1 /* reset intr timer */);
604
605         return IRQ_HANDLED;
606 }
607
608 static irqreturn_t enic_isr_msix_err(int irq, void *data)
609 {
610         struct enic *enic = data;
611         unsigned int intr = enic_msix_err_intr(enic);
612
613         vnic_intr_return_all_credits(&enic->intr[intr]);
614
615         enic_log_q_error(enic);
616
617         /* schedule recovery from WQ/RQ error */
618         schedule_work(&enic->reset);
619
620         return IRQ_HANDLED;
621 }
622
623 static irqreturn_t enic_isr_msix_notify(int irq, void *data)
624 {
625         struct enic *enic = data;
626         unsigned int intr = enic_msix_notify_intr(enic);
627
628         vnic_intr_return_all_credits(&enic->intr[intr]);
629         enic_notify_check(enic);
630
631         return IRQ_HANDLED;
632 }
633
634 static inline void enic_queue_wq_skb_cont(struct enic *enic,
635         struct vnic_wq *wq, struct sk_buff *skb,
636         unsigned int len_left, int loopback)
637 {
638         skb_frag_t *frag;
639
640         /* Queue additional data fragments */
641         for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
642                 len_left -= frag->size;
643                 enic_queue_wq_desc_cont(wq, skb,
644                         pci_map_page(enic->pdev, frag->page,
645                                 frag->page_offset, frag->size,
646                                 PCI_DMA_TODEVICE),
647                         frag->size,
648                         (len_left == 0),        /* EOP? */
649                         loopback);
650         }
651 }
652
653 static inline void enic_queue_wq_skb_vlan(struct enic *enic,
654         struct vnic_wq *wq, struct sk_buff *skb,
655         int vlan_tag_insert, unsigned int vlan_tag, int loopback)
656 {
657         unsigned int head_len = skb_headlen(skb);
658         unsigned int len_left = skb->len - head_len;
659         int eop = (len_left == 0);
660
661         /* Queue the main skb fragment. The fragments are no larger
662          * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
663          * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
664          * per fragment is queued.
665          */
666         enic_queue_wq_desc(wq, skb,
667                 pci_map_single(enic->pdev, skb->data,
668                         head_len, PCI_DMA_TODEVICE),
669                 head_len,
670                 vlan_tag_insert, vlan_tag,
671                 eop, loopback);
672
673         if (!eop)
674                 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
675 }
676
677 static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
678         struct vnic_wq *wq, struct sk_buff *skb,
679         int vlan_tag_insert, unsigned int vlan_tag, int loopback)
680 {
681         unsigned int head_len = skb_headlen(skb);
682         unsigned int len_left = skb->len - head_len;
683         unsigned int hdr_len = skb_checksum_start_offset(skb);
684         unsigned int csum_offset = hdr_len + skb->csum_offset;
685         int eop = (len_left == 0);
686
687         /* Queue the main skb fragment. The fragments are no larger
688          * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
689          * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
690          * per fragment is queued.
691          */
692         enic_queue_wq_desc_csum_l4(wq, skb,
693                 pci_map_single(enic->pdev, skb->data,
694                         head_len, PCI_DMA_TODEVICE),
695                 head_len,
696                 csum_offset,
697                 hdr_len,
698                 vlan_tag_insert, vlan_tag,
699                 eop, loopback);
700
701         if (!eop)
702                 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
703 }
704
705 static inline void enic_queue_wq_skb_tso(struct enic *enic,
706         struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
707         int vlan_tag_insert, unsigned int vlan_tag, int loopback)
708 {
709         unsigned int frag_len_left = skb_headlen(skb);
710         unsigned int len_left = skb->len - frag_len_left;
711         unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
712         int eop = (len_left == 0);
713         unsigned int len;
714         dma_addr_t dma_addr;
715         unsigned int offset = 0;
716         skb_frag_t *frag;
717
718         /* Preload TCP csum field with IP pseudo hdr calculated
719          * with IP length set to zero.  HW will later add in length
720          * to each TCP segment resulting from the TSO.
721          */
722
723         if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
724                 ip_hdr(skb)->check = 0;
725                 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
726                         ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
727         } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
728                 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
729                         &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
730         }
731
732         /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
733          * for the main skb fragment
734          */
735         while (frag_len_left) {
736                 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
737                 dma_addr = pci_map_single(enic->pdev, skb->data + offset,
738                                 len, PCI_DMA_TODEVICE);
739                 enic_queue_wq_desc_tso(wq, skb,
740                         dma_addr,
741                         len,
742                         mss, hdr_len,
743                         vlan_tag_insert, vlan_tag,
744                         eop && (len == frag_len_left), loopback);
745                 frag_len_left -= len;
746                 offset += len;
747         }
748
749         if (eop)
750                 return;
751
752         /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
753          * for additional data fragments
754          */
755         for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
756                 len_left -= frag->size;
757                 frag_len_left = frag->size;
758                 offset = frag->page_offset;
759
760                 while (frag_len_left) {
761                         len = min(frag_len_left,
762                                 (unsigned int)WQ_ENET_MAX_DESC_LEN);
763                         dma_addr = pci_map_page(enic->pdev, frag->page,
764                                 offset, len,
765                                 PCI_DMA_TODEVICE);
766                         enic_queue_wq_desc_cont(wq, skb,
767                                 dma_addr,
768                                 len,
769                                 (len_left == 0) &&
770                                 (len == frag_len_left),         /* EOP? */
771                                 loopback);
772                         frag_len_left -= len;
773                         offset += len;
774                 }
775         }
776 }
777
778 static inline void enic_queue_wq_skb(struct enic *enic,
779         struct vnic_wq *wq, struct sk_buff *skb)
780 {
781         unsigned int mss = skb_shinfo(skb)->gso_size;
782         unsigned int vlan_tag = 0;
783         int vlan_tag_insert = 0;
784         int loopback = 0;
785
786         if (vlan_tx_tag_present(skb)) {
787                 /* VLAN tag from trunking driver */
788                 vlan_tag_insert = 1;
789                 vlan_tag = vlan_tx_tag_get(skb);
790         } else if (enic->loop_enable) {
791                 vlan_tag = enic->loop_tag;
792                 loopback = 1;
793         }
794
795         if (mss)
796                 enic_queue_wq_skb_tso(enic, wq, skb, mss,
797                         vlan_tag_insert, vlan_tag, loopback);
798         else if (skb->ip_summed == CHECKSUM_PARTIAL)
799                 enic_queue_wq_skb_csum_l4(enic, wq, skb,
800                         vlan_tag_insert, vlan_tag, loopback);
801         else
802                 enic_queue_wq_skb_vlan(enic, wq, skb,
803                         vlan_tag_insert, vlan_tag, loopback);
804 }
805
806 /* netif_tx_lock held, process context with BHs disabled, or BH */
807 static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
808         struct net_device *netdev)
809 {
810         struct enic *enic = netdev_priv(netdev);
811         struct vnic_wq *wq = &enic->wq[0];
812         unsigned long flags;
813
814         if (skb->len <= 0) {
815                 dev_kfree_skb(skb);
816                 return NETDEV_TX_OK;
817         }
818
819         /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
820          * which is very likely.  In the off chance it's going to take
821          * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
822          */
823
824         if (skb_shinfo(skb)->gso_size == 0 &&
825             skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
826             skb_linearize(skb)) {
827                 dev_kfree_skb(skb);
828                 return NETDEV_TX_OK;
829         }
830
831         spin_lock_irqsave(&enic->wq_lock[0], flags);
832
833         if (vnic_wq_desc_avail(wq) <
834             skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
835                 netif_stop_queue(netdev);
836                 /* This is a hard error, log it */
837                 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
838                 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
839                 return NETDEV_TX_BUSY;
840         }
841
842         enic_queue_wq_skb(enic, wq, skb);
843
844         if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
845                 netif_stop_queue(netdev);
846
847         spin_unlock_irqrestore(&enic->wq_lock[0], flags);
848
849         return NETDEV_TX_OK;
850 }
851
852 /* dev_base_lock rwlock held, nominally process context */
853 static struct net_device_stats *enic_get_stats(struct net_device *netdev)
854 {
855         struct enic *enic = netdev_priv(netdev);
856         struct net_device_stats *net_stats = &netdev->stats;
857         struct vnic_stats *stats;
858
859         enic_dev_stats_dump(enic, &stats);
860
861         net_stats->tx_packets = stats->tx.tx_frames_ok;
862         net_stats->tx_bytes = stats->tx.tx_bytes_ok;
863         net_stats->tx_errors = stats->tx.tx_errors;
864         net_stats->tx_dropped = stats->tx.tx_drops;
865
866         net_stats->rx_packets = stats->rx.rx_frames_ok;
867         net_stats->rx_bytes = stats->rx.rx_bytes_ok;
868         net_stats->rx_errors = stats->rx.rx_errors;
869         net_stats->multicast = stats->rx.rx_multicast_frames_ok;
870         net_stats->rx_over_errors = enic->rq_truncated_pkts;
871         net_stats->rx_crc_errors = enic->rq_bad_fcs;
872         net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
873
874         return net_stats;
875 }
876
877 static void enic_reset_multicast_list(struct enic *enic)
878 {
879         enic->mc_count = 0;
880         enic->flags = 0;
881 }
882
883 static int enic_set_mac_addr(struct net_device *netdev, char *addr)
884 {
885         struct enic *enic = netdev_priv(netdev);
886
887         if (enic_is_dynamic(enic)) {
888                 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
889                         return -EADDRNOTAVAIL;
890         } else {
891                 if (!is_valid_ether_addr(addr))
892                         return -EADDRNOTAVAIL;
893         }
894
895         memcpy(netdev->dev_addr, addr, netdev->addr_len);
896
897         return 0;
898 }
899
900 static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
901 {
902         struct enic *enic = netdev_priv(netdev);
903         struct sockaddr *saddr = p;
904         char *addr = saddr->sa_data;
905         int err;
906
907         if (netif_running(enic->netdev)) {
908                 err = enic_dev_del_station_addr(enic);
909                 if (err)
910                         return err;
911         }
912
913         err = enic_set_mac_addr(netdev, addr);
914         if (err)
915                 return err;
916
917         if (netif_running(enic->netdev)) {
918                 err = enic_dev_add_station_addr(enic);
919                 if (err)
920                         return err;
921         }
922
923         return err;
924 }
925
926 static int enic_set_mac_address(struct net_device *netdev, void *p)
927 {
928         struct sockaddr *saddr = p;
929         char *addr = saddr->sa_data;
930         struct enic *enic = netdev_priv(netdev);
931         int err;
932
933         err = enic_dev_del_station_addr(enic);
934         if (err)
935                 return err;
936
937         err = enic_set_mac_addr(netdev, addr);
938         if (err)
939                 return err;
940
941         return enic_dev_add_station_addr(enic);
942 }
943
944 static void enic_add_multicast_addr_list(struct enic *enic)
945 {
946         struct net_device *netdev = enic->netdev;
947         struct netdev_hw_addr *ha;
948         unsigned int mc_count = netdev_mc_count(netdev);
949         u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
950         unsigned int i, j;
951
952         if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
953                 netdev_warn(netdev, "Registering only %d out of %d "
954                         "multicast addresses\n",
955                         ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
956                 mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
957         }
958
959         /* Is there an easier way?  Trying to minimize to
960          * calls to add/del multicast addrs.  We keep the
961          * addrs from the last call in enic->mc_addr and
962          * look for changes to add/del.
963          */
964
965         i = 0;
966         netdev_for_each_mc_addr(ha, netdev) {
967                 if (i == mc_count)
968                         break;
969                 memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
970         }
971
972         for (i = 0; i < enic->mc_count; i++) {
973                 for (j = 0; j < mc_count; j++)
974                         if (compare_ether_addr(enic->mc_addr[i],
975                                 mc_addr[j]) == 0)
976                                 break;
977                 if (j == mc_count)
978                         enic_dev_del_addr(enic, enic->mc_addr[i]);
979         }
980
981         for (i = 0; i < mc_count; i++) {
982                 for (j = 0; j < enic->mc_count; j++)
983                         if (compare_ether_addr(mc_addr[i],
984                                 enic->mc_addr[j]) == 0)
985                                 break;
986                 if (j == enic->mc_count)
987                         enic_dev_add_addr(enic, mc_addr[i]);
988         }
989
990         /* Save the list to compare against next time
991          */
992
993         for (i = 0; i < mc_count; i++)
994                 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
995
996         enic->mc_count = mc_count;
997 }
998
999 static void enic_add_unicast_addr_list(struct enic *enic)
1000 {
1001         struct net_device *netdev = enic->netdev;
1002         struct netdev_hw_addr *ha;
1003         unsigned int uc_count = netdev_uc_count(netdev);
1004         u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
1005         unsigned int i, j;
1006
1007         if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
1008                 netdev_warn(netdev, "Registering only %d out of %d "
1009                         "unicast addresses\n",
1010                         ENIC_UNICAST_PERFECT_FILTERS, uc_count);
1011                 uc_count = ENIC_UNICAST_PERFECT_FILTERS;
1012         }
1013
1014         /* Is there an easier way?  Trying to minimize to
1015          * calls to add/del unicast addrs.  We keep the
1016          * addrs from the last call in enic->uc_addr and
1017          * look for changes to add/del.
1018          */
1019
1020         i = 0;
1021         netdev_for_each_uc_addr(ha, netdev) {
1022                 if (i == uc_count)
1023                         break;
1024                 memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
1025         }
1026
1027         for (i = 0; i < enic->uc_count; i++) {
1028                 for (j = 0; j < uc_count; j++)
1029                         if (compare_ether_addr(enic->uc_addr[i],
1030                                 uc_addr[j]) == 0)
1031                                 break;
1032                 if (j == uc_count)
1033                         enic_dev_del_addr(enic, enic->uc_addr[i]);
1034         }
1035
1036         for (i = 0; i < uc_count; i++) {
1037                 for (j = 0; j < enic->uc_count; j++)
1038                         if (compare_ether_addr(uc_addr[i],
1039                                 enic->uc_addr[j]) == 0)
1040                                 break;
1041                 if (j == enic->uc_count)
1042                         enic_dev_add_addr(enic, uc_addr[i]);
1043         }
1044
1045         /* Save the list to compare against next time
1046          */
1047
1048         for (i = 0; i < uc_count; i++)
1049                 memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
1050
1051         enic->uc_count = uc_count;
1052 }
1053
1054 /* netif_tx_lock held, BHs disabled */
1055 static void enic_set_rx_mode(struct net_device *netdev)
1056 {
1057         struct enic *enic = netdev_priv(netdev);
1058         int directed = 1;
1059         int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
1060         int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
1061         int promisc = (netdev->flags & IFF_PROMISC) ||
1062                 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
1063         int allmulti = (netdev->flags & IFF_ALLMULTI) ||
1064                 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
1065         unsigned int flags = netdev->flags |
1066                 (allmulti ? IFF_ALLMULTI : 0) |
1067                 (promisc ? IFF_PROMISC : 0);
1068
1069         if (enic->flags != flags) {
1070                 enic->flags = flags;
1071                 enic_dev_packet_filter(enic, directed,
1072                         multicast, broadcast, promisc, allmulti);
1073         }
1074
1075         if (!promisc) {
1076                 enic_add_unicast_addr_list(enic);
1077                 if (!allmulti)
1078                         enic_add_multicast_addr_list(enic);
1079         }
1080 }
1081
1082 /* rtnl lock is held */
1083 static void enic_vlan_rx_register(struct net_device *netdev,
1084         struct vlan_group *vlan_group)
1085 {
1086         struct enic *enic = netdev_priv(netdev);
1087         enic->vlan_group = vlan_group;
1088 }
1089
1090 /* netif_tx_lock held, BHs disabled */
1091 static void enic_tx_timeout(struct net_device *netdev)
1092 {
1093         struct enic *enic = netdev_priv(netdev);
1094         schedule_work(&enic->reset);
1095 }
1096
1097 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1098 {
1099         struct enic *enic = netdev_priv(netdev);
1100
1101         if (vf != PORT_SELF_VF)
1102                 return -EOPNOTSUPP;
1103
1104         /* Ignore the vf argument for now. We can assume the request
1105          * is coming on a vf.
1106          */
1107         if (is_valid_ether_addr(mac)) {
1108                 memcpy(enic->pp.vf_mac, mac, ETH_ALEN);
1109                 return 0;
1110         } else
1111                 return -EINVAL;
1112 }
1113
1114 static int enic_set_port_profile(struct enic *enic, u8 *mac)
1115 {
1116         struct vic_provinfo *vp;
1117         u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
1118         u16 os_type = VIC_GENERIC_PROV_OS_TYPE_LINUX;
1119         char uuid_str[38];
1120         char client_mac_str[18];
1121         u8 *client_mac;
1122         int err;
1123
1124         err = enic_vnic_dev_deinit(enic);
1125         if (err)
1126                 return err;
1127
1128         switch (enic->pp.request) {
1129
1130         case PORT_REQUEST_ASSOCIATE:
1131
1132                 if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name))
1133                         return -EINVAL;
1134
1135                 if (!is_valid_ether_addr(mac))
1136                         return -EADDRNOTAVAIL;
1137
1138                 vp = vic_provinfo_alloc(GFP_KERNEL, oui,
1139                         VIC_PROVINFO_GENERIC_TYPE);
1140                 if (!vp)
1141                         return -ENOMEM;
1142
1143                 vic_provinfo_add_tlv(vp,
1144                         VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
1145                         strlen(enic->pp.name) + 1, enic->pp.name);
1146
1147                 if (!is_zero_ether_addr(enic->pp.mac_addr))
1148                         client_mac = enic->pp.mac_addr;
1149                 else
1150                         client_mac = mac;
1151
1152                 vic_provinfo_add_tlv(vp,
1153                         VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
1154                         ETH_ALEN, client_mac);
1155
1156                 sprintf(client_mac_str, "%pM", client_mac);
1157                 vic_provinfo_add_tlv(vp,
1158                         VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
1159                         sizeof(client_mac_str), client_mac_str);
1160
1161                 if (enic->pp.set & ENIC_SET_INSTANCE) {
1162                         sprintf(uuid_str, "%pUB", enic->pp.instance_uuid);
1163                         vic_provinfo_add_tlv(vp,
1164                                 VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
1165                                 sizeof(uuid_str), uuid_str);
1166                 }
1167
1168                 if (enic->pp.set & ENIC_SET_HOST) {
1169                         sprintf(uuid_str, "%pUB", enic->pp.host_uuid);
1170                         vic_provinfo_add_tlv(vp,
1171                                 VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
1172                                 sizeof(uuid_str), uuid_str);
1173                 }
1174
1175                 os_type = htons(os_type);
1176                 vic_provinfo_add_tlv(vp,
1177                         VIC_GENERIC_PROV_TLV_OS_TYPE,
1178                         sizeof(os_type), &os_type);
1179
1180                 err = enic_dev_init_prov(enic, vp);
1181                 vic_provinfo_free(vp);
1182                 if (err)
1183                         return err;
1184                 break;
1185
1186         case PORT_REQUEST_DISASSOCIATE:
1187                 break;
1188
1189         default:
1190                 return -EINVAL;
1191         }
1192
1193         /* Set flag to indicate that the port assoc/disassoc
1194          * request has been sent out to fw
1195          */
1196         enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
1197
1198         return 0;
1199 }
1200
1201 static int enic_set_vf_port(struct net_device *netdev, int vf,
1202         struct nlattr *port[])
1203 {
1204         struct enic *enic = netdev_priv(netdev);
1205         struct enic_port_profile new_pp;
1206         int err = 0;
1207
1208         memset(&new_pp, 0, sizeof(new_pp));
1209
1210         if (port[IFLA_PORT_REQUEST]) {
1211                 new_pp.set |= ENIC_SET_REQUEST;
1212                 new_pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1213         }
1214
1215         if (port[IFLA_PORT_PROFILE]) {
1216                 new_pp.set |= ENIC_SET_NAME;
1217                 memcpy(new_pp.name, nla_data(port[IFLA_PORT_PROFILE]),
1218                         PORT_PROFILE_MAX);
1219         }
1220
1221         if (port[IFLA_PORT_INSTANCE_UUID]) {
1222                 new_pp.set |= ENIC_SET_INSTANCE;
1223                 memcpy(new_pp.instance_uuid,
1224                         nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
1225         }
1226
1227         if (port[IFLA_PORT_HOST_UUID]) {
1228                 new_pp.set |= ENIC_SET_HOST;
1229                 memcpy(new_pp.host_uuid,
1230                         nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
1231         }
1232
1233         /* don't support VFs, yet */
1234         if (vf != PORT_SELF_VF)
1235                 return -EOPNOTSUPP;
1236
1237         if (!(new_pp.set & ENIC_SET_REQUEST))
1238                 return -EOPNOTSUPP;
1239
1240         if (new_pp.request == PORT_REQUEST_ASSOCIATE) {
1241                 /* Special case handling */
1242                 if (!is_zero_ether_addr(enic->pp.vf_mac))
1243                         memcpy(new_pp.mac_addr, enic->pp.vf_mac, ETH_ALEN);
1244
1245                 if (is_zero_ether_addr(netdev->dev_addr))
1246                         random_ether_addr(netdev->dev_addr);
1247         }
1248
1249         memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
1250
1251         err = enic_set_port_profile(enic, netdev->dev_addr);
1252         if (err)
1253                 goto set_port_profile_cleanup;
1254
1255 set_port_profile_cleanup:
1256         memset(enic->pp.vf_mac, 0, ETH_ALEN);
1257
1258         if (err || enic->pp.request == PORT_REQUEST_DISASSOCIATE) {
1259                 memset(netdev->dev_addr, 0, ETH_ALEN);
1260                 memset(enic->pp.mac_addr, 0, ETH_ALEN);
1261         }
1262
1263         return err;
1264 }
1265
1266 static int enic_get_vf_port(struct net_device *netdev, int vf,
1267         struct sk_buff *skb)
1268 {
1269         struct enic *enic = netdev_priv(netdev);
1270         int err, error, done;
1271         u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1272
1273         if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
1274                 return -ENODATA;
1275
1276         err = enic_dev_init_done(enic, &done, &error);
1277         if (err)
1278                 error = err;
1279
1280         switch (error) {
1281         case ERR_SUCCESS:
1282                 if (!done)
1283                         response = PORT_PROFILE_RESPONSE_INPROGRESS;
1284                 break;
1285         case ERR_EINVAL:
1286                 response = PORT_PROFILE_RESPONSE_INVALID;
1287                 break;
1288         case ERR_EBADSTATE:
1289                 response = PORT_PROFILE_RESPONSE_BADSTATE;
1290                 break;
1291         case ERR_ENOMEM:
1292                 response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES;
1293                 break;
1294         default:
1295                 response = PORT_PROFILE_RESPONSE_ERROR;
1296                 break;
1297         }
1298
1299         NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
1300         NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
1301         if (enic->pp.set & ENIC_SET_NAME)
1302                 NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
1303                         enic->pp.name);
1304         if (enic->pp.set & ENIC_SET_INSTANCE)
1305                 NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
1306                         enic->pp.instance_uuid);
1307         if (enic->pp.set & ENIC_SET_HOST)
1308                 NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
1309                         enic->pp.host_uuid);
1310
1311         return 0;
1312
1313 nla_put_failure:
1314         return -EMSGSIZE;
1315 }
1316
1317 static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
1318 {
1319         struct enic *enic = vnic_dev_priv(rq->vdev);
1320
1321         if (!buf->os_buf)
1322                 return;
1323
1324         pci_unmap_single(enic->pdev, buf->dma_addr,
1325                 buf->len, PCI_DMA_FROMDEVICE);
1326         dev_kfree_skb_any(buf->os_buf);
1327 }
1328
1329 static int enic_rq_alloc_buf(struct vnic_rq *rq)
1330 {
1331         struct enic *enic = vnic_dev_priv(rq->vdev);
1332         struct net_device *netdev = enic->netdev;
1333         struct sk_buff *skb;
1334         unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
1335         unsigned int os_buf_index = 0;
1336         dma_addr_t dma_addr;
1337
1338         skb = netdev_alloc_skb_ip_align(netdev, len);
1339         if (!skb)
1340                 return -ENOMEM;
1341
1342         dma_addr = pci_map_single(enic->pdev, skb->data,
1343                 len, PCI_DMA_FROMDEVICE);
1344
1345         enic_queue_rq_desc(rq, skb, os_buf_index,
1346                 dma_addr, len);
1347
1348         return 0;
1349 }
1350
1351 static void enic_rq_indicate_buf(struct vnic_rq *rq,
1352         struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1353         int skipped, void *opaque)
1354 {
1355         struct enic *enic = vnic_dev_priv(rq->vdev);
1356         struct net_device *netdev = enic->netdev;
1357         struct sk_buff *skb;
1358
1359         u8 type, color, eop, sop, ingress_port, vlan_stripped;
1360         u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
1361         u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
1362         u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
1363         u8 packet_error;
1364         u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
1365         u32 rss_hash;
1366
1367         if (skipped)
1368                 return;
1369
1370         skb = buf->os_buf;
1371         prefetch(skb->data - NET_IP_ALIGN);
1372         pci_unmap_single(enic->pdev, buf->dma_addr,
1373                 buf->len, PCI_DMA_FROMDEVICE);
1374
1375         cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1376                 &type, &color, &q_number, &completed_index,
1377                 &ingress_port, &fcoe, &eop, &sop, &rss_type,
1378                 &csum_not_calc, &rss_hash, &bytes_written,
1379                 &packet_error, &vlan_stripped, &vlan_tci, &checksum,
1380                 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1381                 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1382                 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1383                 &fcs_ok);
1384
1385         if (packet_error) {
1386
1387                 if (!fcs_ok) {
1388                         if (bytes_written > 0)
1389                                 enic->rq_bad_fcs++;
1390                         else if (bytes_written == 0)
1391                                 enic->rq_truncated_pkts++;
1392                 }
1393
1394                 dev_kfree_skb_any(skb);
1395
1396                 return;
1397         }
1398
1399         if (eop && bytes_written > 0) {
1400
1401                 /* Good receive
1402                  */
1403
1404                 skb_put(skb, bytes_written);
1405                 skb->protocol = eth_type_trans(skb, netdev);
1406
1407                 if (enic->csum_rx_enabled && !csum_not_calc) {
1408                         skb->csum = htons(checksum);
1409                         skb->ip_summed = CHECKSUM_COMPLETE;
1410                 }
1411
1412                 skb->dev = netdev;
1413
1414                 if (enic->vlan_group && vlan_stripped &&
1415                         (vlan_tci & CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK)) {
1416
1417                         if (netdev->features & NETIF_F_GRO)
1418                                 vlan_gro_receive(&enic->napi[q_number],
1419                                         enic->vlan_group, vlan_tci, skb);
1420                         else
1421                                 vlan_hwaccel_receive_skb(skb,
1422                                         enic->vlan_group, vlan_tci);
1423
1424                 } else {
1425
1426                         if (netdev->features & NETIF_F_GRO)
1427                                 napi_gro_receive(&enic->napi[q_number], skb);
1428                         else
1429                                 netif_receive_skb(skb);
1430
1431                 }
1432         } else {
1433
1434                 /* Buffer overflow
1435                  */
1436
1437                 dev_kfree_skb_any(skb);
1438         }
1439 }
1440
1441 static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1442         u8 type, u16 q_number, u16 completed_index, void *opaque)
1443 {
1444         struct enic *enic = vnic_dev_priv(vdev);
1445
1446         vnic_rq_service(&enic->rq[q_number], cq_desc,
1447                 completed_index, VNIC_RQ_RETURN_DESC,
1448                 enic_rq_indicate_buf, opaque);
1449
1450         return 0;
1451 }
1452
1453 static int enic_poll(struct napi_struct *napi, int budget)
1454 {
1455         struct net_device *netdev = napi->dev;
1456         struct enic *enic = netdev_priv(netdev);
1457         unsigned int cq_rq = enic_cq_rq(enic, 0);
1458         unsigned int cq_wq = enic_cq_wq(enic, 0);
1459         unsigned int intr = enic_legacy_io_intr();
1460         unsigned int rq_work_to_do = budget;
1461         unsigned int wq_work_to_do = -1; /* no limit */
1462         unsigned int  work_done, rq_work_done, wq_work_done;
1463         int err;
1464
1465         /* Service RQ (first) and WQ
1466          */
1467
1468         rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1469                 rq_work_to_do, enic_rq_service, NULL);
1470
1471         wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
1472                 wq_work_to_do, enic_wq_service, NULL);
1473
1474         /* Accumulate intr event credits for this polling
1475          * cycle.  An intr event is the completion of a
1476          * a WQ or RQ packet.
1477          */
1478
1479         work_done = rq_work_done + wq_work_done;
1480
1481         if (work_done > 0)
1482                 vnic_intr_return_credits(&enic->intr[intr],
1483                         work_done,
1484                         0 /* don't unmask intr */,
1485                         0 /* don't reset intr timer */);
1486
1487         err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1488
1489         /* Buffer allocation failed. Stay in polling
1490          * mode so we can try to fill the ring again.
1491          */
1492
1493         if (err)
1494                 rq_work_done = rq_work_to_do;
1495
1496         if (rq_work_done < rq_work_to_do) {
1497
1498                 /* Some work done, but not enough to stay in polling,
1499                  * exit polling
1500                  */
1501
1502                 napi_complete(napi);
1503                 vnic_intr_unmask(&enic->intr[intr]);
1504         }
1505
1506         return rq_work_done;
1507 }
1508
1509 static int enic_poll_msix(struct napi_struct *napi, int budget)
1510 {
1511         struct net_device *netdev = napi->dev;
1512         struct enic *enic = netdev_priv(netdev);
1513         unsigned int rq = (napi - &enic->napi[0]);
1514         unsigned int cq = enic_cq_rq(enic, rq);
1515         unsigned int intr = enic_msix_rq_intr(enic, rq);
1516         unsigned int work_to_do = budget;
1517         unsigned int work_done;
1518         int err;
1519
1520         /* Service RQ
1521          */
1522
1523         work_done = vnic_cq_service(&enic->cq[cq],
1524                 work_to_do, enic_rq_service, NULL);
1525
1526         /* Return intr event credits for this polling
1527          * cycle.  An intr event is the completion of a
1528          * RQ packet.
1529          */
1530
1531         if (work_done > 0)
1532                 vnic_intr_return_credits(&enic->intr[intr],
1533                         work_done,
1534                         0 /* don't unmask intr */,
1535                         0 /* don't reset intr timer */);
1536
1537         err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1538
1539         /* Buffer allocation failed. Stay in polling mode
1540          * so we can try to fill the ring again.
1541          */
1542
1543         if (err)
1544                 work_done = work_to_do;
1545
1546         if (work_done < work_to_do) {
1547
1548                 /* Some work done, but not enough to stay in polling,
1549                  * exit polling
1550                  */
1551
1552                 napi_complete(napi);
1553                 vnic_intr_unmask(&enic->intr[intr]);
1554         }
1555
1556         return work_done;
1557 }
1558
1559 static void enic_notify_timer(unsigned long data)
1560 {
1561         struct enic *enic = (struct enic *)data;
1562
1563         enic_notify_check(enic);
1564
1565         mod_timer(&enic->notify_timer,
1566                 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
1567 }
1568
1569 static void enic_free_intr(struct enic *enic)
1570 {
1571         struct net_device *netdev = enic->netdev;
1572         unsigned int i;
1573
1574         switch (vnic_dev_get_intr_mode(enic->vdev)) {
1575         case VNIC_DEV_INTR_MODE_INTX:
1576                 free_irq(enic->pdev->irq, netdev);
1577                 break;
1578         case VNIC_DEV_INTR_MODE_MSI:
1579                 free_irq(enic->pdev->irq, enic);
1580                 break;
1581         case VNIC_DEV_INTR_MODE_MSIX:
1582                 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1583                         if (enic->msix[i].requested)
1584                                 free_irq(enic->msix_entry[i].vector,
1585                                         enic->msix[i].devid);
1586                 break;
1587         default:
1588                 break;
1589         }
1590 }
1591
1592 static int enic_request_intr(struct enic *enic)
1593 {
1594         struct net_device *netdev = enic->netdev;
1595         unsigned int i, intr;
1596         int err = 0;
1597
1598         switch (vnic_dev_get_intr_mode(enic->vdev)) {
1599
1600         case VNIC_DEV_INTR_MODE_INTX:
1601
1602                 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1603                         IRQF_SHARED, netdev->name, netdev);
1604                 break;
1605
1606         case VNIC_DEV_INTR_MODE_MSI:
1607
1608                 err = request_irq(enic->pdev->irq, enic_isr_msi,
1609                         0, netdev->name, enic);
1610                 break;
1611
1612         case VNIC_DEV_INTR_MODE_MSIX:
1613
1614                 for (i = 0; i < enic->rq_count; i++) {
1615                         intr = enic_msix_rq_intr(enic, i);
1616                         sprintf(enic->msix[intr].devname,
1617                                 "%.11s-rx-%d", netdev->name, i);
1618                         enic->msix[intr].isr = enic_isr_msix_rq;
1619                         enic->msix[intr].devid = &enic->napi[i];
1620                 }
1621
1622                 for (i = 0; i < enic->wq_count; i++) {
1623                         intr = enic_msix_wq_intr(enic, i);
1624                         sprintf(enic->msix[intr].devname,
1625                                 "%.11s-tx-%d", netdev->name, i);
1626                         enic->msix[intr].isr = enic_isr_msix_wq;
1627                         enic->msix[intr].devid = enic;
1628                 }
1629
1630                 intr = enic_msix_err_intr(enic);
1631                 sprintf(enic->msix[intr].devname,
1632                         "%.11s-err", netdev->name);
1633                 enic->msix[intr].isr = enic_isr_msix_err;
1634                 enic->msix[intr].devid = enic;
1635
1636                 intr = enic_msix_notify_intr(enic);
1637                 sprintf(enic->msix[intr].devname,
1638                         "%.11s-notify", netdev->name);
1639                 enic->msix[intr].isr = enic_isr_msix_notify;
1640                 enic->msix[intr].devid = enic;
1641
1642                 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1643                         enic->msix[i].requested = 0;
1644
1645                 for (i = 0; i < enic->intr_count; i++) {
1646                         err = request_irq(enic->msix_entry[i].vector,
1647                                 enic->msix[i].isr, 0,
1648                                 enic->msix[i].devname,
1649                                 enic->msix[i].devid);
1650                         if (err) {
1651                                 enic_free_intr(enic);
1652                                 break;
1653                         }
1654                         enic->msix[i].requested = 1;
1655                 }
1656
1657                 break;
1658
1659         default:
1660                 break;
1661         }
1662
1663         return err;
1664 }
1665
1666 static void enic_synchronize_irqs(struct enic *enic)
1667 {
1668         unsigned int i;
1669
1670         switch (vnic_dev_get_intr_mode(enic->vdev)) {
1671         case VNIC_DEV_INTR_MODE_INTX:
1672         case VNIC_DEV_INTR_MODE_MSI:
1673                 synchronize_irq(enic->pdev->irq);
1674                 break;
1675         case VNIC_DEV_INTR_MODE_MSIX:
1676                 for (i = 0; i < enic->intr_count; i++)
1677                         synchronize_irq(enic->msix_entry[i].vector);
1678                 break;
1679         default:
1680                 break;
1681         }
1682 }
1683
1684 static int enic_dev_notify_set(struct enic *enic)
1685 {
1686         int err;
1687
1688         spin_lock(&enic->devcmd_lock);
1689         switch (vnic_dev_get_intr_mode(enic->vdev)) {
1690         case VNIC_DEV_INTR_MODE_INTX:
1691                 err = vnic_dev_notify_set(enic->vdev,
1692                         enic_legacy_notify_intr());
1693                 break;
1694         case VNIC_DEV_INTR_MODE_MSIX:
1695                 err = vnic_dev_notify_set(enic->vdev,
1696                         enic_msix_notify_intr(enic));
1697                 break;
1698         default:
1699                 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1700                 break;
1701         }
1702         spin_unlock(&enic->devcmd_lock);
1703
1704         return err;
1705 }
1706
1707 static void enic_notify_timer_start(struct enic *enic)
1708 {
1709         switch (vnic_dev_get_intr_mode(enic->vdev)) {
1710         case VNIC_DEV_INTR_MODE_MSI:
1711                 mod_timer(&enic->notify_timer, jiffies);
1712                 break;
1713         default:
1714                 /* Using intr for notification for INTx/MSI-X */
1715                 break;
1716         };
1717 }
1718
1719 /* rtnl lock is held, process context */
1720 static int enic_open(struct net_device *netdev)
1721 {
1722         struct enic *enic = netdev_priv(netdev);
1723         unsigned int i;
1724         int err;
1725
1726         err = enic_request_intr(enic);
1727         if (err) {
1728                 netdev_err(netdev, "Unable to request irq.\n");
1729                 return err;
1730         }
1731
1732         err = enic_dev_notify_set(enic);
1733         if (err) {
1734                 netdev_err(netdev,
1735                         "Failed to alloc notify buffer, aborting.\n");
1736                 goto err_out_free_intr;
1737         }
1738
1739         for (i = 0; i < enic->rq_count; i++) {
1740                 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1741                 /* Need at least one buffer on ring to get going */
1742                 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1743                         netdev_err(netdev, "Unable to alloc receive buffers\n");
1744                         err = -ENOMEM;
1745                         goto err_out_notify_unset;
1746                 }
1747         }
1748
1749         for (i = 0; i < enic->wq_count; i++)
1750                 vnic_wq_enable(&enic->wq[i]);
1751         for (i = 0; i < enic->rq_count; i++)
1752                 vnic_rq_enable(&enic->rq[i]);
1753
1754         if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
1755                 enic_dev_add_addr(enic, enic->pp.mac_addr);
1756         else
1757                 enic_dev_add_station_addr(enic);
1758         enic_set_rx_mode(netdev);
1759
1760         netif_wake_queue(netdev);
1761
1762         for (i = 0; i < enic->rq_count; i++)
1763                 napi_enable(&enic->napi[i]);
1764
1765         enic_dev_enable(enic);
1766
1767         for (i = 0; i < enic->intr_count; i++)
1768                 vnic_intr_unmask(&enic->intr[i]);
1769
1770         enic_notify_timer_start(enic);
1771
1772         return 0;
1773
1774 err_out_notify_unset:
1775         enic_dev_notify_unset(enic);
1776 err_out_free_intr:
1777         enic_free_intr(enic);
1778
1779         return err;
1780 }
1781
1782 /* rtnl lock is held, process context */
1783 static int enic_stop(struct net_device *netdev)
1784 {
1785         struct enic *enic = netdev_priv(netdev);
1786         unsigned int i;
1787         int err;
1788
1789         for (i = 0; i < enic->intr_count; i++) {
1790                 vnic_intr_mask(&enic->intr[i]);
1791                 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1792         }
1793
1794         enic_synchronize_irqs(enic);
1795
1796         del_timer_sync(&enic->notify_timer);
1797
1798         enic_dev_disable(enic);
1799
1800         for (i = 0; i < enic->rq_count; i++)
1801                 napi_disable(&enic->napi[i]);
1802
1803         netif_carrier_off(netdev);
1804         netif_tx_disable(netdev);
1805         if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
1806                 enic_dev_del_addr(enic, enic->pp.mac_addr);
1807         else
1808                 enic_dev_del_station_addr(enic);
1809
1810         for (i = 0; i < enic->wq_count; i++) {
1811                 err = vnic_wq_disable(&enic->wq[i]);
1812                 if (err)
1813                         return err;
1814         }
1815         for (i = 0; i < enic->rq_count; i++) {
1816                 err = vnic_rq_disable(&enic->rq[i]);
1817                 if (err)
1818                         return err;
1819         }
1820
1821         enic_dev_notify_unset(enic);
1822         enic_free_intr(enic);
1823
1824         for (i = 0; i < enic->wq_count; i++)
1825                 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1826         for (i = 0; i < enic->rq_count; i++)
1827                 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1828         for (i = 0; i < enic->cq_count; i++)
1829                 vnic_cq_clean(&enic->cq[i]);
1830         for (i = 0; i < enic->intr_count; i++)
1831                 vnic_intr_clean(&enic->intr[i]);
1832
1833         return 0;
1834 }
1835
1836 static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1837 {
1838         struct enic *enic = netdev_priv(netdev);
1839         int running = netif_running(netdev);
1840
1841         if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1842                 return -EINVAL;
1843
1844         if (running)
1845                 enic_stop(netdev);
1846
1847         netdev->mtu = new_mtu;
1848
1849         if (netdev->mtu > enic->port_mtu)
1850                 netdev_warn(netdev,
1851                         "interface MTU (%d) set higher than port MTU (%d)\n",
1852                         netdev->mtu, enic->port_mtu);
1853
1854         if (running)
1855                 enic_open(netdev);
1856
1857         return 0;
1858 }
1859
1860 #ifdef CONFIG_NET_POLL_CONTROLLER
1861 static void enic_poll_controller(struct net_device *netdev)
1862 {
1863         struct enic *enic = netdev_priv(netdev);
1864         struct vnic_dev *vdev = enic->vdev;
1865         unsigned int i, intr;
1866
1867         switch (vnic_dev_get_intr_mode(vdev)) {
1868         case VNIC_DEV_INTR_MODE_MSIX:
1869                 for (i = 0; i < enic->rq_count; i++) {
1870                         intr = enic_msix_rq_intr(enic, i);
1871                         enic_isr_msix_rq(enic->msix_entry[intr].vector,
1872                                 &enic->napi[i]);
1873                 }
1874                 intr = enic_msix_wq_intr(enic, i);
1875                 enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
1876                 break;
1877         case VNIC_DEV_INTR_MODE_MSI:
1878                 enic_isr_msi(enic->pdev->irq, enic);
1879                 break;
1880         case VNIC_DEV_INTR_MODE_INTX:
1881                 enic_isr_legacy(enic->pdev->irq, netdev);
1882                 break;
1883         default:
1884                 break;
1885         }
1886 }
1887 #endif
1888
1889 static int enic_dev_wait(struct vnic_dev *vdev,
1890         int (*start)(struct vnic_dev *, int),
1891         int (*finished)(struct vnic_dev *, int *),
1892         int arg)
1893 {
1894         unsigned long time;
1895         int done;
1896         int err;
1897
1898         BUG_ON(in_interrupt());
1899
1900         err = start(vdev, arg);
1901         if (err)
1902                 return err;
1903
1904         /* Wait for func to complete...2 seconds max
1905          */
1906
1907         time = jiffies + (HZ * 2);
1908         do {
1909
1910                 err = finished(vdev, &done);
1911                 if (err)
1912                         return err;
1913
1914                 if (done)
1915                         return 0;
1916
1917                 schedule_timeout_uninterruptible(HZ / 10);
1918
1919         } while (time_after(time, jiffies));
1920
1921         return -ETIMEDOUT;
1922 }
1923
1924 static int enic_dev_open(struct enic *enic)
1925 {
1926         int err;
1927
1928         err = enic_dev_wait(enic->vdev, vnic_dev_open,
1929                 vnic_dev_open_done, 0);
1930         if (err)
1931                 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
1932                         err);
1933
1934         return err;
1935 }
1936
1937 static int enic_dev_hang_reset(struct enic *enic)
1938 {
1939         int err;
1940
1941         err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
1942                 vnic_dev_hang_reset_done, 0);
1943         if (err)
1944                 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
1945                         err);
1946
1947         return err;
1948 }
1949
1950 static int enic_set_rsskey(struct enic *enic)
1951 {
1952         dma_addr_t rss_key_buf_pa;
1953         union vnic_rss_key *rss_key_buf_va = NULL;
1954         union vnic_rss_key rss_key = {
1955                 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
1956                 .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
1957                 .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
1958                 .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
1959         };
1960         int err;
1961
1962         rss_key_buf_va = pci_alloc_consistent(enic->pdev,
1963                 sizeof(union vnic_rss_key), &rss_key_buf_pa);
1964         if (!rss_key_buf_va)
1965                 return -ENOMEM;
1966
1967         memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
1968
1969         spin_lock(&enic->devcmd_lock);
1970         err = enic_set_rss_key(enic,
1971                 rss_key_buf_pa,
1972                 sizeof(union vnic_rss_key));
1973         spin_unlock(&enic->devcmd_lock);
1974
1975         pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
1976                 rss_key_buf_va, rss_key_buf_pa);
1977
1978         return err;
1979 }
1980
1981 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
1982 {
1983         dma_addr_t rss_cpu_buf_pa;
1984         union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1985         unsigned int i;
1986         int err;
1987
1988         rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
1989                 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
1990         if (!rss_cpu_buf_va)
1991                 return -ENOMEM;
1992
1993         for (i = 0; i < (1 << rss_hash_bits); i++)
1994                 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
1995
1996         spin_lock(&enic->devcmd_lock);
1997         err = enic_set_rss_cpu(enic,
1998                 rss_cpu_buf_pa,
1999                 sizeof(union vnic_rss_cpu));
2000         spin_unlock(&enic->devcmd_lock);
2001
2002         pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
2003                 rss_cpu_buf_va, rss_cpu_buf_pa);
2004
2005         return err;
2006 }
2007
2008 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
2009         u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
2010 {
2011         const u8 tso_ipid_split_en = 0;
2012         const u8 ig_vlan_strip_en = 1;
2013         int err;
2014
2015         /* Enable VLAN tag stripping.
2016         */
2017
2018         spin_lock(&enic->devcmd_lock);
2019         err = enic_set_nic_cfg(enic,
2020                 rss_default_cpu, rss_hash_type,
2021                 rss_hash_bits, rss_base_cpu,
2022                 rss_enable, tso_ipid_split_en,
2023                 ig_vlan_strip_en);
2024         spin_unlock(&enic->devcmd_lock);
2025
2026         return err;
2027 }
2028
2029 static int enic_set_rss_nic_cfg(struct enic *enic)
2030 {
2031         struct device *dev = enic_get_dev(enic);
2032         const u8 rss_default_cpu = 0;
2033         const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
2034                 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
2035                 NIC_CFG_RSS_HASH_TYPE_IPV6 |
2036                 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
2037         const u8 rss_hash_bits = 7;
2038         const u8 rss_base_cpu = 0;
2039         u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
2040
2041         if (rss_enable) {
2042                 if (!enic_set_rsskey(enic)) {
2043                         if (enic_set_rsscpu(enic, rss_hash_bits)) {
2044                                 rss_enable = 0;
2045                                 dev_warn(dev, "RSS disabled, "
2046                                         "Failed to set RSS cpu indirection table.");
2047                         }
2048                 } else {
2049                         rss_enable = 0;
2050                         dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
2051                 }
2052         }
2053
2054         return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
2055                 rss_hash_bits, rss_base_cpu, rss_enable);
2056 }
2057
2058 static void enic_reset(struct work_struct *work)
2059 {
2060         struct enic *enic = container_of(work, struct enic, reset);
2061
2062         if (!netif_running(enic->netdev))
2063                 return;
2064
2065         rtnl_lock();
2066
2067         enic_dev_hang_notify(enic);
2068         enic_stop(enic->netdev);
2069         enic_dev_hang_reset(enic);
2070         enic_reset_multicast_list(enic);
2071         enic_init_vnic_resources(enic);
2072         enic_set_rss_nic_cfg(enic);
2073         enic_dev_set_ig_vlan_rewrite_mode(enic);
2074         enic_open(enic->netdev);
2075
2076         rtnl_unlock();
2077 }
2078
2079 static int enic_set_intr_mode(struct enic *enic)
2080 {
2081         unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
2082         unsigned int m = 1;
2083         unsigned int i;
2084
2085         /* Set interrupt mode (INTx, MSI, MSI-X) depending
2086          * on system capabilities.
2087          *
2088          * Try MSI-X first
2089          *
2090          * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
2091          * (the second to last INTR is used for WQ/RQ errors)
2092          * (the last INTR is used for notifications)
2093          */
2094
2095         BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
2096         for (i = 0; i < n + m + 2; i++)
2097                 enic->msix_entry[i].entry = i;
2098
2099         /* Use multiple RQs if RSS is enabled
2100          */
2101
2102         if (ENIC_SETTING(enic, RSS) &&
2103             enic->config.intr_mode < 1 &&
2104             enic->rq_count >= n &&
2105             enic->wq_count >= m &&
2106             enic->cq_count >= n + m &&
2107             enic->intr_count >= n + m + 2) {
2108
2109                 if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
2110
2111                         enic->rq_count = n;
2112                         enic->wq_count = m;
2113                         enic->cq_count = n + m;
2114                         enic->intr_count = n + m + 2;
2115
2116                         vnic_dev_set_intr_mode(enic->vdev,
2117                                 VNIC_DEV_INTR_MODE_MSIX);
2118
2119                         return 0;
2120                 }
2121         }
2122
2123         if (enic->config.intr_mode < 1 &&
2124             enic->rq_count >= 1 &&
2125             enic->wq_count >= m &&
2126             enic->cq_count >= 1 + m &&
2127             enic->intr_count >= 1 + m + 2) {
2128                 if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) {
2129
2130                         enic->rq_count = 1;
2131                         enic->wq_count = m;
2132                         enic->cq_count = 1 + m;
2133                         enic->intr_count = 1 + m + 2;
2134
2135                         vnic_dev_set_intr_mode(enic->vdev,
2136                                 VNIC_DEV_INTR_MODE_MSIX);
2137
2138                         return 0;
2139                 }
2140         }
2141
2142         /* Next try MSI
2143          *
2144          * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
2145          */
2146
2147         if (enic->config.intr_mode < 2 &&
2148             enic->rq_count >= 1 &&
2149             enic->wq_count >= 1 &&
2150             enic->cq_count >= 2 &&
2151             enic->intr_count >= 1 &&
2152             !pci_enable_msi(enic->pdev)) {
2153
2154                 enic->rq_count = 1;
2155                 enic->wq_count = 1;
2156                 enic->cq_count = 2;
2157                 enic->intr_count = 1;
2158
2159                 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
2160
2161                 return 0;
2162         }
2163
2164         /* Next try INTx
2165          *
2166          * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2167          * (the first INTR is used for WQ/RQ)
2168          * (the second INTR is used for WQ/RQ errors)
2169          * (the last INTR is used for notifications)
2170          */
2171
2172         if (enic->config.intr_mode < 3 &&
2173             enic->rq_count >= 1 &&
2174             enic->wq_count >= 1 &&
2175             enic->cq_count >= 2 &&
2176             enic->intr_count >= 3) {
2177
2178                 enic->rq_count = 1;
2179                 enic->wq_count = 1;
2180                 enic->cq_count = 2;
2181                 enic->intr_count = 3;
2182
2183                 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
2184
2185                 return 0;
2186         }
2187
2188         vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2189
2190         return -EINVAL;
2191 }
2192
2193 static void enic_clear_intr_mode(struct enic *enic)
2194 {
2195         switch (vnic_dev_get_intr_mode(enic->vdev)) {
2196         case VNIC_DEV_INTR_MODE_MSIX:
2197                 pci_disable_msix(enic->pdev);
2198                 break;
2199         case VNIC_DEV_INTR_MODE_MSI:
2200                 pci_disable_msi(enic->pdev);
2201                 break;
2202         default:
2203                 break;
2204         }
2205
2206         vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2207 }
2208
2209 static const struct net_device_ops enic_netdev_dynamic_ops = {
2210         .ndo_open               = enic_open,
2211         .ndo_stop               = enic_stop,
2212         .ndo_start_xmit         = enic_hard_start_xmit,
2213         .ndo_get_stats          = enic_get_stats,
2214         .ndo_validate_addr      = eth_validate_addr,
2215         .ndo_set_rx_mode        = enic_set_rx_mode,
2216         .ndo_set_multicast_list = enic_set_rx_mode,
2217         .ndo_set_mac_address    = enic_set_mac_address_dynamic,
2218         .ndo_change_mtu         = enic_change_mtu,
2219         .ndo_vlan_rx_register   = enic_vlan_rx_register,
2220         .ndo_vlan_rx_add_vid    = enic_vlan_rx_add_vid,
2221         .ndo_vlan_rx_kill_vid   = enic_vlan_rx_kill_vid,
2222         .ndo_tx_timeout         = enic_tx_timeout,
2223         .ndo_set_vf_port        = enic_set_vf_port,
2224         .ndo_get_vf_port        = enic_get_vf_port,
2225 #ifdef IFLA_VF_MAX
2226         .ndo_set_vf_mac         = enic_set_vf_mac,
2227 #endif
2228 #ifdef CONFIG_NET_POLL_CONTROLLER
2229         .ndo_poll_controller    = enic_poll_controller,
2230 #endif
2231 };
2232
2233 static const struct net_device_ops enic_netdev_ops = {
2234         .ndo_open               = enic_open,
2235         .ndo_stop               = enic_stop,
2236         .ndo_start_xmit         = enic_hard_start_xmit,
2237         .ndo_get_stats          = enic_get_stats,
2238         .ndo_validate_addr      = eth_validate_addr,
2239         .ndo_set_mac_address    = enic_set_mac_address,
2240         .ndo_set_rx_mode        = enic_set_rx_mode,
2241         .ndo_set_multicast_list = enic_set_rx_mode,
2242         .ndo_change_mtu         = enic_change_mtu,
2243         .ndo_vlan_rx_register   = enic_vlan_rx_register,
2244         .ndo_vlan_rx_add_vid    = enic_vlan_rx_add_vid,
2245         .ndo_vlan_rx_kill_vid   = enic_vlan_rx_kill_vid,
2246         .ndo_tx_timeout         = enic_tx_timeout,
2247 #ifdef CONFIG_NET_POLL_CONTROLLER
2248         .ndo_poll_controller    = enic_poll_controller,
2249 #endif
2250 };
2251
2252 static void enic_dev_deinit(struct enic *enic)
2253 {
2254         unsigned int i;
2255
2256         for (i = 0; i < enic->rq_count; i++)
2257                 netif_napi_del(&enic->napi[i]);
2258
2259         enic_free_vnic_resources(enic);
2260         enic_clear_intr_mode(enic);
2261 }
2262
2263 static int enic_dev_init(struct enic *enic)
2264 {
2265         struct device *dev = enic_get_dev(enic);
2266         struct net_device *netdev = enic->netdev;
2267         unsigned int i;
2268         int err;
2269
2270         /* Get vNIC configuration
2271          */
2272
2273         err = enic_get_vnic_config(enic);
2274         if (err) {
2275                 dev_err(dev, "Get vNIC configuration failed, aborting\n");
2276                 return err;
2277         }
2278
2279         /* Get available resource counts
2280          */
2281
2282         enic_get_res_counts(enic);
2283
2284         /* Set interrupt mode based on resource counts and system
2285          * capabilities
2286          */
2287
2288         err = enic_set_intr_mode(enic);
2289         if (err) {
2290                 dev_err(dev, "Failed to set intr mode based on resource "
2291                         "counts and system capabilities, aborting\n");
2292                 return err;
2293         }
2294
2295         /* Allocate and configure vNIC resources
2296          */
2297
2298         err = enic_alloc_vnic_resources(enic);
2299         if (err) {
2300                 dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
2301                 goto err_out_free_vnic_resources;
2302         }
2303
2304         enic_init_vnic_resources(enic);
2305
2306         err = enic_set_rss_nic_cfg(enic);
2307         if (err) {
2308                 dev_err(dev, "Failed to config nic, aborting\n");
2309                 goto err_out_free_vnic_resources;
2310         }
2311
2312         switch (vnic_dev_get_intr_mode(enic->vdev)) {
2313         default:
2314                 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
2315                 break;
2316         case VNIC_DEV_INTR_MODE_MSIX:
2317                 for (i = 0; i < enic->rq_count; i++)
2318                         netif_napi_add(netdev, &enic->napi[i],
2319                                 enic_poll_msix, 64);
2320                 break;
2321         }
2322
2323         return 0;
2324
2325 err_out_free_vnic_resources:
2326         enic_clear_intr_mode(enic);
2327         enic_free_vnic_resources(enic);
2328
2329         return err;
2330 }
2331
2332 static void enic_iounmap(struct enic *enic)
2333 {
2334         unsigned int i;
2335
2336         for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2337                 if (enic->bar[i].vaddr)
2338                         iounmap(enic->bar[i].vaddr);
2339 }
2340
2341 static int __devinit enic_probe(struct pci_dev *pdev,
2342         const struct pci_device_id *ent)
2343 {
2344         struct device *dev = &pdev->dev;
2345         struct net_device *netdev;
2346         struct enic *enic;
2347         int using_dac = 0;
2348         unsigned int i;
2349         int err;
2350
2351         /* Allocate net device structure and initialize.  Private
2352          * instance data is initialized to zero.
2353          */
2354
2355         netdev = alloc_etherdev(sizeof(struct enic));
2356         if (!netdev) {
2357                 pr_err("Etherdev alloc failed, aborting\n");
2358                 return -ENOMEM;
2359         }
2360
2361         pci_set_drvdata(pdev, netdev);
2362
2363         SET_NETDEV_DEV(netdev, &pdev->dev);
2364
2365         enic = netdev_priv(netdev);
2366         enic->netdev = netdev;
2367         enic->pdev = pdev;
2368
2369         /* Setup PCI resources
2370          */
2371
2372         err = pci_enable_device_mem(pdev);
2373         if (err) {
2374                 dev_err(dev, "Cannot enable PCI device, aborting\n");
2375                 goto err_out_free_netdev;
2376         }
2377
2378         err = pci_request_regions(pdev, DRV_NAME);
2379         if (err) {
2380                 dev_err(dev, "Cannot request PCI regions, aborting\n");
2381                 goto err_out_disable_device;
2382         }
2383
2384         pci_set_master(pdev);
2385
2386         /* Query PCI controller on system for DMA addressing
2387          * limitation for the device.  Try 40-bit first, and
2388          * fail to 32-bit.
2389          */
2390
2391         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2392         if (err) {
2393                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2394                 if (err) {
2395                         dev_err(dev, "No usable DMA configuration, aborting\n");
2396                         goto err_out_release_regions;
2397                 }
2398                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2399                 if (err) {
2400                         dev_err(dev, "Unable to obtain %u-bit DMA "
2401                                 "for consistent allocations, aborting\n", 32);
2402                         goto err_out_release_regions;
2403                 }
2404         } else {
2405                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
2406                 if (err) {
2407                         dev_err(dev, "Unable to obtain %u-bit DMA "
2408                                 "for consistent allocations, aborting\n", 40);
2409                         goto err_out_release_regions;
2410                 }
2411                 using_dac = 1;
2412         }
2413
2414         /* Map vNIC resources from BAR0-5
2415          */
2416
2417         for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2418                 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2419                         continue;
2420                 enic->bar[i].len = pci_resource_len(pdev, i);
2421                 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2422                 if (!enic->bar[i].vaddr) {
2423                         dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
2424                         err = -ENODEV;
2425                         goto err_out_iounmap;
2426                 }
2427                 enic->bar[i].bus_addr = pci_resource_start(pdev, i);
2428         }
2429
2430         /* Register vNIC device
2431          */
2432
2433         enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2434                 ARRAY_SIZE(enic->bar));
2435         if (!enic->vdev) {
2436                 dev_err(dev, "vNIC registration failed, aborting\n");
2437                 err = -ENODEV;
2438                 goto err_out_iounmap;
2439         }
2440
2441         /* Issue device open to get device in known state
2442          */
2443
2444         err = enic_dev_open(enic);
2445         if (err) {
2446                 dev_err(dev, "vNIC dev open failed, aborting\n");
2447                 goto err_out_vnic_unregister;
2448         }
2449
2450         /* Setup devcmd lock
2451          */
2452
2453         spin_lock_init(&enic->devcmd_lock);
2454
2455         /*
2456          * Set ingress vlan rewrite mode before vnic initialization
2457          */
2458
2459         err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2460         if (err) {
2461                 dev_err(dev,
2462                         "Failed to set ingress vlan rewrite mode, aborting.\n");
2463                 goto err_out_dev_close;
2464         }
2465
2466         /* Issue device init to initialize the vnic-to-switch link.
2467          * We'll start with carrier off and wait for link UP
2468          * notification later to turn on carrier.  We don't need
2469          * to wait here for the vnic-to-switch link initialization
2470          * to complete; link UP notification is the indication that
2471          * the process is complete.
2472          */
2473
2474         netif_carrier_off(netdev);
2475
2476         /* Do not call dev_init for a dynamic vnic.
2477          * For a dynamic vnic, init_prov_info will be
2478          * called later by an upper layer.
2479          */
2480
2481         if (!enic_is_dynamic(enic)) {
2482                 err = vnic_dev_init(enic->vdev, 0);
2483                 if (err) {
2484                         dev_err(dev, "vNIC dev init failed, aborting\n");
2485                         goto err_out_dev_close;
2486                 }
2487         }
2488
2489         err = enic_dev_init(enic);
2490         if (err) {
2491                 dev_err(dev, "Device initialization failed, aborting\n");
2492                 goto err_out_dev_close;
2493         }
2494
2495         /* Setup notification timer, HW reset task, and wq locks
2496          */
2497
2498         init_timer(&enic->notify_timer);
2499         enic->notify_timer.function = enic_notify_timer;
2500         enic->notify_timer.data = (unsigned long)enic;
2501
2502         INIT_WORK(&enic->reset, enic_reset);
2503
2504         for (i = 0; i < enic->wq_count; i++)
2505                 spin_lock_init(&enic->wq_lock[i]);
2506
2507         /* Register net device
2508          */
2509
2510         enic->port_mtu = enic->config.mtu;
2511         (void)enic_change_mtu(netdev, enic->port_mtu);
2512
2513         err = enic_set_mac_addr(netdev, enic->mac_addr);
2514         if (err) {
2515                 dev_err(dev, "Invalid MAC address, aborting\n");
2516                 goto err_out_dev_deinit;
2517         }
2518
2519         enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2520         enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2521
2522         if (enic_is_dynamic(enic))
2523                 netdev->netdev_ops = &enic_netdev_dynamic_ops;
2524         else
2525                 netdev->netdev_ops = &enic_netdev_ops;
2526
2527         netdev->watchdog_timeo = 2 * HZ;
2528         netdev->ethtool_ops = &enic_ethtool_ops;
2529
2530         netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2531         if (ENIC_SETTING(enic, LOOP)) {
2532                 netdev->features &= ~NETIF_F_HW_VLAN_TX;
2533                 enic->loop_enable = 1;
2534                 enic->loop_tag = enic->config.loop_tag;
2535                 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2536         }
2537         if (ENIC_SETTING(enic, TXCSUM))
2538                 netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2539         if (ENIC_SETTING(enic, TSO))
2540                 netdev->features |= NETIF_F_TSO |
2541                         NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2542         if (ENIC_SETTING(enic, LRO))
2543                 netdev->features |= NETIF_F_GRO;
2544         if (using_dac)
2545                 netdev->features |= NETIF_F_HIGHDMA;
2546
2547         enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM);
2548
2549         err = register_netdev(netdev);
2550         if (err) {
2551                 dev_err(dev, "Cannot register net device, aborting\n");
2552                 goto err_out_dev_deinit;
2553         }
2554
2555         return 0;
2556
2557 err_out_dev_deinit:
2558         enic_dev_deinit(enic);
2559 err_out_dev_close:
2560         vnic_dev_close(enic->vdev);
2561 err_out_vnic_unregister:
2562         vnic_dev_unregister(enic->vdev);
2563 err_out_iounmap:
2564         enic_iounmap(enic);
2565 err_out_release_regions:
2566         pci_release_regions(pdev);
2567 err_out_disable_device:
2568         pci_disable_device(pdev);
2569 err_out_free_netdev:
2570         pci_set_drvdata(pdev, NULL);
2571         free_netdev(netdev);
2572
2573         return err;
2574 }
2575
2576 static void __devexit enic_remove(struct pci_dev *pdev)
2577 {
2578         struct net_device *netdev = pci_get_drvdata(pdev);
2579
2580         if (netdev) {
2581                 struct enic *enic = netdev_priv(netdev);
2582
2583                 cancel_work_sync(&enic->reset);
2584                 unregister_netdev(netdev);
2585                 enic_dev_deinit(enic);
2586                 vnic_dev_close(enic->vdev);
2587                 vnic_dev_unregister(enic->vdev);
2588                 enic_iounmap(enic);
2589                 pci_release_regions(pdev);
2590                 pci_disable_device(pdev);
2591                 pci_set_drvdata(pdev, NULL);
2592                 free_netdev(netdev);
2593         }
2594 }
2595
2596 static struct pci_driver enic_driver = {
2597         .name = DRV_NAME,
2598         .id_table = enic_id_table,
2599         .probe = enic_probe,
2600         .remove = __devexit_p(enic_remove),
2601 };
2602
2603 static int __init enic_init_module(void)
2604 {
2605         pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
2606
2607         return pci_register_driver(&enic_driver);
2608 }
2609
2610 static void __exit enic_cleanup_module(void)
2611 {
2612         pci_unregister_driver(&enic_driver);
2613 }
2614
2615 module_init(enic_init_module);
2616 module_exit(enic_cleanup_module);