ore: Enable RAID5 mounts
[pandora-kernel.git] / drivers / net / bna / bnad_ethtool.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18
19 #include "cna.h"
20
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/rtnetlink.h>
25
26 #include "bna.h"
27
28 #include "bnad.h"
29
30 #define BNAD_NUM_TXF_COUNTERS 12
31 #define BNAD_NUM_RXF_COUNTERS 10
32 #define BNAD_NUM_CQ_COUNTERS 3
33 #define BNAD_NUM_RXQ_COUNTERS 6
34 #define BNAD_NUM_TXQ_COUNTERS 5
35
36 #define BNAD_ETHTOOL_STATS_NUM                                          \
37         (sizeof(struct rtnl_link_stats64) / sizeof(u64) +       \
38         sizeof(struct bnad_drv_stats) / sizeof(u64) +           \
39         offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64))
40
41 static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
42         "rx_packets",
43         "tx_packets",
44         "rx_bytes",
45         "tx_bytes",
46         "rx_errors",
47         "tx_errors",
48         "rx_dropped",
49         "tx_dropped",
50         "multicast",
51         "collisions",
52
53         "rx_length_errors",
54         "rx_over_errors",
55         "rx_crc_errors",
56         "rx_frame_errors",
57         "rx_fifo_errors",
58         "rx_missed_errors",
59
60         "tx_aborted_errors",
61         "tx_carrier_errors",
62         "tx_fifo_errors",
63         "tx_heartbeat_errors",
64         "tx_window_errors",
65
66         "rx_compressed",
67         "tx_compressed",
68
69         "netif_queue_stop",
70         "netif_queue_wakeup",
71         "netif_queue_stopped",
72         "tso4",
73         "tso6",
74         "tso_err",
75         "tcpcsum_offload",
76         "udpcsum_offload",
77         "csum_help",
78         "csum_help_err",
79         "hw_stats_updates",
80         "netif_rx_schedule",
81         "netif_rx_complete",
82         "netif_rx_dropped",
83
84         "link_toggle",
85         "cee_up",
86
87         "rxp_info_alloc_failed",
88         "mbox_intr_disabled",
89         "mbox_intr_enabled",
90         "tx_unmap_q_alloc_failed",
91         "rx_unmap_q_alloc_failed",
92         "rxbuf_alloc_failed",
93
94         "mac_frame_64",
95         "mac_frame_65_127",
96         "mac_frame_128_255",
97         "mac_frame_256_511",
98         "mac_frame_512_1023",
99         "mac_frame_1024_1518",
100         "mac_frame_1518_1522",
101         "mac_rx_bytes",
102         "mac_rx_packets",
103         "mac_rx_fcs_error",
104         "mac_rx_multicast",
105         "mac_rx_broadcast",
106         "mac_rx_control_frames",
107         "mac_rx_pause",
108         "mac_rx_unknown_opcode",
109         "mac_rx_alignment_error",
110         "mac_rx_frame_length_error",
111         "mac_rx_code_error",
112         "mac_rx_carrier_sense_error",
113         "mac_rx_undersize",
114         "mac_rx_oversize",
115         "mac_rx_fragments",
116         "mac_rx_jabber",
117         "mac_rx_drop",
118
119         "mac_tx_bytes",
120         "mac_tx_packets",
121         "mac_tx_multicast",
122         "mac_tx_broadcast",
123         "mac_tx_pause",
124         "mac_tx_deferral",
125         "mac_tx_excessive_deferral",
126         "mac_tx_single_collision",
127         "mac_tx_muliple_collision",
128         "mac_tx_late_collision",
129         "mac_tx_excessive_collision",
130         "mac_tx_total_collision",
131         "mac_tx_pause_honored",
132         "mac_tx_drop",
133         "mac_tx_jabber",
134         "mac_tx_fcs_error",
135         "mac_tx_control_frame",
136         "mac_tx_oversize",
137         "mac_tx_undersize",
138         "mac_tx_fragments",
139
140         "bpc_tx_pause_0",
141         "bpc_tx_pause_1",
142         "bpc_tx_pause_2",
143         "bpc_tx_pause_3",
144         "bpc_tx_pause_4",
145         "bpc_tx_pause_5",
146         "bpc_tx_pause_6",
147         "bpc_tx_pause_7",
148         "bpc_tx_zero_pause_0",
149         "bpc_tx_zero_pause_1",
150         "bpc_tx_zero_pause_2",
151         "bpc_tx_zero_pause_3",
152         "bpc_tx_zero_pause_4",
153         "bpc_tx_zero_pause_5",
154         "bpc_tx_zero_pause_6",
155         "bpc_tx_zero_pause_7",
156         "bpc_tx_first_pause_0",
157         "bpc_tx_first_pause_1",
158         "bpc_tx_first_pause_2",
159         "bpc_tx_first_pause_3",
160         "bpc_tx_first_pause_4",
161         "bpc_tx_first_pause_5",
162         "bpc_tx_first_pause_6",
163         "bpc_tx_first_pause_7",
164
165         "bpc_rx_pause_0",
166         "bpc_rx_pause_1",
167         "bpc_rx_pause_2",
168         "bpc_rx_pause_3",
169         "bpc_rx_pause_4",
170         "bpc_rx_pause_5",
171         "bpc_rx_pause_6",
172         "bpc_rx_pause_7",
173         "bpc_rx_zero_pause_0",
174         "bpc_rx_zero_pause_1",
175         "bpc_rx_zero_pause_2",
176         "bpc_rx_zero_pause_3",
177         "bpc_rx_zero_pause_4",
178         "bpc_rx_zero_pause_5",
179         "bpc_rx_zero_pause_6",
180         "bpc_rx_zero_pause_7",
181         "bpc_rx_first_pause_0",
182         "bpc_rx_first_pause_1",
183         "bpc_rx_first_pause_2",
184         "bpc_rx_first_pause_3",
185         "bpc_rx_first_pause_4",
186         "bpc_rx_first_pause_5",
187         "bpc_rx_first_pause_6",
188         "bpc_rx_first_pause_7",
189
190         "rad_rx_frames",
191         "rad_rx_octets",
192         "rad_rx_vlan_frames",
193         "rad_rx_ucast",
194         "rad_rx_ucast_octets",
195         "rad_rx_ucast_vlan",
196         "rad_rx_mcast",
197         "rad_rx_mcast_octets",
198         "rad_rx_mcast_vlan",
199         "rad_rx_bcast",
200         "rad_rx_bcast_octets",
201         "rad_rx_bcast_vlan",
202         "rad_rx_drops",
203
204         "fc_rx_ucast_octets",
205         "fc_rx_ucast",
206         "fc_rx_ucast_vlan",
207         "fc_rx_mcast_octets",
208         "fc_rx_mcast",
209         "fc_rx_mcast_vlan",
210         "fc_rx_bcast_octets",
211         "fc_rx_bcast",
212         "fc_rx_bcast_vlan",
213
214         "fc_tx_ucast_octets",
215         "fc_tx_ucast",
216         "fc_tx_ucast_vlan",
217         "fc_tx_mcast_octets",
218         "fc_tx_mcast",
219         "fc_tx_mcast_vlan",
220         "fc_tx_bcast_octets",
221         "fc_tx_bcast",
222         "fc_tx_bcast_vlan",
223         "fc_tx_parity_errors",
224         "fc_tx_timeout",
225         "fc_tx_fid_parity_errors",
226 };
227
228 static int
229 bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
230 {
231         cmd->supported = SUPPORTED_10000baseT_Full;
232         cmd->advertising = ADVERTISED_10000baseT_Full;
233         cmd->autoneg = AUTONEG_DISABLE;
234         cmd->supported |= SUPPORTED_FIBRE;
235         cmd->advertising |= ADVERTISED_FIBRE;
236         cmd->port = PORT_FIBRE;
237         cmd->phy_address = 0;
238
239         if (netif_carrier_ok(netdev)) {
240                 ethtool_cmd_speed_set(cmd, SPEED_10000);
241                 cmd->duplex = DUPLEX_FULL;
242         } else {
243                 ethtool_cmd_speed_set(cmd, -1);
244                 cmd->duplex = -1;
245         }
246         cmd->transceiver = XCVR_EXTERNAL;
247         cmd->maxtxpkt = 0;
248         cmd->maxrxpkt = 0;
249
250         return 0;
251 }
252
253 static int
254 bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
255 {
256         /* 10G full duplex setting supported only */
257         if (cmd->autoneg == AUTONEG_ENABLE)
258                 return -EOPNOTSUPP; else {
259                 if ((ethtool_cmd_speed(cmd) == SPEED_10000)
260                     && (cmd->duplex == DUPLEX_FULL))
261                         return 0;
262         }
263
264         return -EOPNOTSUPP;
265 }
266
267 static void
268 bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
269 {
270         struct bnad *bnad = netdev_priv(netdev);
271         struct bfa_ioc_attr *ioc_attr;
272         unsigned long flags;
273
274         strcpy(drvinfo->driver, BNAD_NAME);
275         strcpy(drvinfo->version, BNAD_VERSION);
276
277         ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
278         if (ioc_attr) {
279                 spin_lock_irqsave(&bnad->bna_lock, flags);
280                 bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
281                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
282
283                 strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
284                         sizeof(drvinfo->fw_version) - 1);
285                 kfree(ioc_attr);
286         }
287
288         strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
289 }
290
291 static int
292 get_regs(struct bnad *bnad, u32 * regs)
293 {
294         int num = 0, i;
295         u32 reg_addr;
296         unsigned long flags;
297
298 #define BNAD_GET_REG(addr)                                      \
299 do {                                                            \
300         if (regs)                                               \
301                 regs[num++] = readl(bnad->bar0 + (addr));       \
302         else                                                    \
303                 num++;                                          \
304 } while (0)
305
306         spin_lock_irqsave(&bnad->bna_lock, flags);
307
308         /* DMA Block Internal Registers */
309         BNAD_GET_REG(DMA_CTRL_REG0);
310         BNAD_GET_REG(DMA_CTRL_REG1);
311         BNAD_GET_REG(DMA_ERR_INT_STATUS);
312         BNAD_GET_REG(DMA_ERR_INT_ENABLE);
313         BNAD_GET_REG(DMA_ERR_INT_STATUS_SET);
314
315         /* APP Block Register Address Offset from BAR0 */
316         BNAD_GET_REG(HOSTFN0_INT_STATUS);
317         BNAD_GET_REG(HOSTFN0_INT_MASK);
318         BNAD_GET_REG(HOST_PAGE_NUM_FN0);
319         BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN0);
320         BNAD_GET_REG(FN0_PCIE_ERR_REG);
321         BNAD_GET_REG(FN0_ERR_TYPE_STATUS_REG);
322         BNAD_GET_REG(FN0_ERR_TYPE_MSK_STATUS_REG);
323
324         BNAD_GET_REG(HOSTFN1_INT_STATUS);
325         BNAD_GET_REG(HOSTFN1_INT_MASK);
326         BNAD_GET_REG(HOST_PAGE_NUM_FN1);
327         BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN1);
328         BNAD_GET_REG(FN1_PCIE_ERR_REG);
329         BNAD_GET_REG(FN1_ERR_TYPE_STATUS_REG);
330         BNAD_GET_REG(FN1_ERR_TYPE_MSK_STATUS_REG);
331
332         BNAD_GET_REG(PCIE_MISC_REG);
333
334         BNAD_GET_REG(HOST_SEM0_INFO_REG);
335         BNAD_GET_REG(HOST_SEM1_INFO_REG);
336         BNAD_GET_REG(HOST_SEM2_INFO_REG);
337         BNAD_GET_REG(HOST_SEM3_INFO_REG);
338
339         BNAD_GET_REG(TEMPSENSE_CNTL_REG);
340         BNAD_GET_REG(TEMPSENSE_STAT_REG);
341
342         BNAD_GET_REG(APP_LOCAL_ERR_STAT);
343         BNAD_GET_REG(APP_LOCAL_ERR_MSK);
344
345         BNAD_GET_REG(PCIE_LNK_ERR_STAT);
346         BNAD_GET_REG(PCIE_LNK_ERR_MSK);
347
348         BNAD_GET_REG(FCOE_FIP_ETH_TYPE);
349         BNAD_GET_REG(RESV_ETH_TYPE);
350
351         BNAD_GET_REG(HOSTFN2_INT_STATUS);
352         BNAD_GET_REG(HOSTFN2_INT_MASK);
353         BNAD_GET_REG(HOST_PAGE_NUM_FN2);
354         BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN2);
355         BNAD_GET_REG(FN2_PCIE_ERR_REG);
356         BNAD_GET_REG(FN2_ERR_TYPE_STATUS_REG);
357         BNAD_GET_REG(FN2_ERR_TYPE_MSK_STATUS_REG);
358
359         BNAD_GET_REG(HOSTFN3_INT_STATUS);
360         BNAD_GET_REG(HOSTFN3_INT_MASK);
361         BNAD_GET_REG(HOST_PAGE_NUM_FN3);
362         BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN3);
363         BNAD_GET_REG(FN3_PCIE_ERR_REG);
364         BNAD_GET_REG(FN3_ERR_TYPE_STATUS_REG);
365         BNAD_GET_REG(FN3_ERR_TYPE_MSK_STATUS_REG);
366
367         /* Host Command Status Registers */
368         reg_addr = HOST_CMDSTS0_CLR_REG;
369         for (i = 0; i < 16; i++) {
370                 BNAD_GET_REG(reg_addr);
371                 BNAD_GET_REG(reg_addr + 4);
372                 BNAD_GET_REG(reg_addr + 8);
373                 reg_addr += 0x10;
374         }
375
376         /* Function ID register */
377         BNAD_GET_REG(FNC_ID_REG);
378
379         /* Function personality register */
380         BNAD_GET_REG(FNC_PERS_REG);
381
382         /* Operation mode register */
383         BNAD_GET_REG(OP_MODE);
384
385         /* LPU0 Registers */
386         BNAD_GET_REG(LPU0_MBOX_CTL_REG);
387         BNAD_GET_REG(LPU0_MBOX_CMD_REG);
388         BNAD_GET_REG(LPU0_MBOX_LINK_0REG);
389         BNAD_GET_REG(LPU1_MBOX_LINK_0REG);
390         BNAD_GET_REG(LPU0_MBOX_STATUS_0REG);
391         BNAD_GET_REG(LPU1_MBOX_STATUS_0REG);
392         BNAD_GET_REG(LPU0_ERR_STATUS_REG);
393         BNAD_GET_REG(LPU0_ERR_SET_REG);
394
395         /* LPU1 Registers */
396         BNAD_GET_REG(LPU1_MBOX_CTL_REG);
397         BNAD_GET_REG(LPU1_MBOX_CMD_REG);
398         BNAD_GET_REG(LPU0_MBOX_LINK_1REG);
399         BNAD_GET_REG(LPU1_MBOX_LINK_1REG);
400         BNAD_GET_REG(LPU0_MBOX_STATUS_1REG);
401         BNAD_GET_REG(LPU1_MBOX_STATUS_1REG);
402         BNAD_GET_REG(LPU1_ERR_STATUS_REG);
403         BNAD_GET_REG(LPU1_ERR_SET_REG);
404
405         /* PSS Registers */
406         BNAD_GET_REG(PSS_CTL_REG);
407         BNAD_GET_REG(PSS_ERR_STATUS_REG);
408         BNAD_GET_REG(ERR_STATUS_SET);
409         BNAD_GET_REG(PSS_RAM_ERR_STATUS_REG);
410
411         /* Catapult CPQ Registers */
412         BNAD_GET_REG(HOSTFN0_LPU0_MBOX0_CMD_STAT);
413         BNAD_GET_REG(HOSTFN0_LPU1_MBOX0_CMD_STAT);
414         BNAD_GET_REG(LPU0_HOSTFN0_MBOX0_CMD_STAT);
415         BNAD_GET_REG(LPU1_HOSTFN0_MBOX0_CMD_STAT);
416
417         BNAD_GET_REG(HOSTFN0_LPU0_MBOX1_CMD_STAT);
418         BNAD_GET_REG(HOSTFN0_LPU1_MBOX1_CMD_STAT);
419         BNAD_GET_REG(LPU0_HOSTFN0_MBOX1_CMD_STAT);
420         BNAD_GET_REG(LPU1_HOSTFN0_MBOX1_CMD_STAT);
421
422         BNAD_GET_REG(HOSTFN1_LPU0_MBOX0_CMD_STAT);
423         BNAD_GET_REG(HOSTFN1_LPU1_MBOX0_CMD_STAT);
424         BNAD_GET_REG(LPU0_HOSTFN1_MBOX0_CMD_STAT);
425         BNAD_GET_REG(LPU1_HOSTFN1_MBOX0_CMD_STAT);
426
427         BNAD_GET_REG(HOSTFN1_LPU0_MBOX1_CMD_STAT);
428         BNAD_GET_REG(HOSTFN1_LPU1_MBOX1_CMD_STAT);
429         BNAD_GET_REG(LPU0_HOSTFN1_MBOX1_CMD_STAT);
430         BNAD_GET_REG(LPU1_HOSTFN1_MBOX1_CMD_STAT);
431
432         BNAD_GET_REG(HOSTFN2_LPU0_MBOX0_CMD_STAT);
433         BNAD_GET_REG(HOSTFN2_LPU1_MBOX0_CMD_STAT);
434         BNAD_GET_REG(LPU0_HOSTFN2_MBOX0_CMD_STAT);
435         BNAD_GET_REG(LPU1_HOSTFN2_MBOX0_CMD_STAT);
436
437         BNAD_GET_REG(HOSTFN2_LPU0_MBOX1_CMD_STAT);
438         BNAD_GET_REG(HOSTFN2_LPU1_MBOX1_CMD_STAT);
439         BNAD_GET_REG(LPU0_HOSTFN2_MBOX1_CMD_STAT);
440         BNAD_GET_REG(LPU1_HOSTFN2_MBOX1_CMD_STAT);
441
442         BNAD_GET_REG(HOSTFN3_LPU0_MBOX0_CMD_STAT);
443         BNAD_GET_REG(HOSTFN3_LPU1_MBOX0_CMD_STAT);
444         BNAD_GET_REG(LPU0_HOSTFN3_MBOX0_CMD_STAT);
445         BNAD_GET_REG(LPU1_HOSTFN3_MBOX0_CMD_STAT);
446
447         BNAD_GET_REG(HOSTFN3_LPU0_MBOX1_CMD_STAT);
448         BNAD_GET_REG(HOSTFN3_LPU1_MBOX1_CMD_STAT);
449         BNAD_GET_REG(LPU0_HOSTFN3_MBOX1_CMD_STAT);
450         BNAD_GET_REG(LPU1_HOSTFN3_MBOX1_CMD_STAT);
451
452         /* Host Function Force Parity Error Registers */
453         BNAD_GET_REG(HOSTFN0_LPU_FORCE_PERR);
454         BNAD_GET_REG(HOSTFN1_LPU_FORCE_PERR);
455         BNAD_GET_REG(HOSTFN2_LPU_FORCE_PERR);
456         BNAD_GET_REG(HOSTFN3_LPU_FORCE_PERR);
457
458         /* LL Port[0|1] Halt Mask Registers */
459         BNAD_GET_REG(LL_HALT_MSK_P0);
460         BNAD_GET_REG(LL_HALT_MSK_P1);
461
462         /* LL Port[0|1] Error Mask Registers */
463         BNAD_GET_REG(LL_ERR_MSK_P0);
464         BNAD_GET_REG(LL_ERR_MSK_P1);
465
466         /* EMC FLI Registers */
467         BNAD_GET_REG(FLI_CMD_REG);
468         BNAD_GET_REG(FLI_ADDR_REG);
469         BNAD_GET_REG(FLI_CTL_REG);
470         BNAD_GET_REG(FLI_WRDATA_REG);
471         BNAD_GET_REG(FLI_RDDATA_REG);
472         BNAD_GET_REG(FLI_DEV_STATUS_REG);
473         BNAD_GET_REG(FLI_SIG_WD_REG);
474
475         BNAD_GET_REG(FLI_DEV_VENDOR_REG);
476         BNAD_GET_REG(FLI_ERR_STATUS_REG);
477
478         /* RxAdm 0 Registers */
479         BNAD_GET_REG(RAD0_CTL_REG);
480         BNAD_GET_REG(RAD0_PE_PARM_REG);
481         BNAD_GET_REG(RAD0_BCN_REG);
482         BNAD_GET_REG(RAD0_DEFAULT_REG);
483         BNAD_GET_REG(RAD0_PROMISC_REG);
484         BNAD_GET_REG(RAD0_BCNQ_REG);
485         BNAD_GET_REG(RAD0_DEFAULTQ_REG);
486
487         BNAD_GET_REG(RAD0_ERR_STS);
488         BNAD_GET_REG(RAD0_SET_ERR_STS);
489         BNAD_GET_REG(RAD0_ERR_INT_EN);
490         BNAD_GET_REG(RAD0_FIRST_ERR);
491         BNAD_GET_REG(RAD0_FORCE_ERR);
492
493         BNAD_GET_REG(RAD0_MAC_MAN_1H);
494         BNAD_GET_REG(RAD0_MAC_MAN_1L);
495         BNAD_GET_REG(RAD0_MAC_MAN_2H);
496         BNAD_GET_REG(RAD0_MAC_MAN_2L);
497         BNAD_GET_REG(RAD0_MAC_MAN_3H);
498         BNAD_GET_REG(RAD0_MAC_MAN_3L);
499         BNAD_GET_REG(RAD0_MAC_MAN_4H);
500         BNAD_GET_REG(RAD0_MAC_MAN_4L);
501
502         BNAD_GET_REG(RAD0_LAST4_IP);
503
504         /* RxAdm 1 Registers */
505         BNAD_GET_REG(RAD1_CTL_REG);
506         BNAD_GET_REG(RAD1_PE_PARM_REG);
507         BNAD_GET_REG(RAD1_BCN_REG);
508         BNAD_GET_REG(RAD1_DEFAULT_REG);
509         BNAD_GET_REG(RAD1_PROMISC_REG);
510         BNAD_GET_REG(RAD1_BCNQ_REG);
511         BNAD_GET_REG(RAD1_DEFAULTQ_REG);
512
513         BNAD_GET_REG(RAD1_ERR_STS);
514         BNAD_GET_REG(RAD1_SET_ERR_STS);
515         BNAD_GET_REG(RAD1_ERR_INT_EN);
516
517         /* TxA0 Registers */
518         BNAD_GET_REG(TXA0_CTRL_REG);
519         /* TxA0 TSO Sequence # Registers (RO) */
520         for (i = 0; i < 8; i++) {
521                 BNAD_GET_REG(TXA0_TSO_TCP_SEQ_REG(i));
522                 BNAD_GET_REG(TXA0_TSO_IP_INFO_REG(i));
523         }
524
525         /* TxA1 Registers */
526         BNAD_GET_REG(TXA1_CTRL_REG);
527         /* TxA1 TSO Sequence # Registers (RO) */
528         for (i = 0; i < 8; i++) {
529                 BNAD_GET_REG(TXA1_TSO_TCP_SEQ_REG(i));
530                 BNAD_GET_REG(TXA1_TSO_IP_INFO_REG(i));
531         }
532
533         /* RxA Registers */
534         BNAD_GET_REG(RXA0_CTL_REG);
535         BNAD_GET_REG(RXA1_CTL_REG);
536
537         /* PLB0 Registers */
538         BNAD_GET_REG(PLB0_ECM_TIMER_REG);
539         BNAD_GET_REG(PLB0_RL_CTL);
540         for (i = 0; i < 8; i++)
541                 BNAD_GET_REG(PLB0_RL_MAX_BC(i));
542         BNAD_GET_REG(PLB0_RL_TU_PRIO);
543         for (i = 0; i < 8; i++)
544                 BNAD_GET_REG(PLB0_RL_BYTE_CNT(i));
545         BNAD_GET_REG(PLB0_RL_MIN_REG);
546         BNAD_GET_REG(PLB0_RL_MAX_REG);
547         BNAD_GET_REG(PLB0_EMS_ADD_REG);
548
549         /* PLB1 Registers */
550         BNAD_GET_REG(PLB1_ECM_TIMER_REG);
551         BNAD_GET_REG(PLB1_RL_CTL);
552         for (i = 0; i < 8; i++)
553                 BNAD_GET_REG(PLB1_RL_MAX_BC(i));
554         BNAD_GET_REG(PLB1_RL_TU_PRIO);
555         for (i = 0; i < 8; i++)
556                 BNAD_GET_REG(PLB1_RL_BYTE_CNT(i));
557         BNAD_GET_REG(PLB1_RL_MIN_REG);
558         BNAD_GET_REG(PLB1_RL_MAX_REG);
559         BNAD_GET_REG(PLB1_EMS_ADD_REG);
560
561         /* HQM Control Register */
562         BNAD_GET_REG(HQM0_CTL_REG);
563         BNAD_GET_REG(HQM0_RXQ_STOP_SEM);
564         BNAD_GET_REG(HQM0_TXQ_STOP_SEM);
565         BNAD_GET_REG(HQM1_CTL_REG);
566         BNAD_GET_REG(HQM1_RXQ_STOP_SEM);
567         BNAD_GET_REG(HQM1_TXQ_STOP_SEM);
568
569         /* LUT Registers */
570         BNAD_GET_REG(LUT0_ERR_STS);
571         BNAD_GET_REG(LUT0_SET_ERR_STS);
572         BNAD_GET_REG(LUT1_ERR_STS);
573         BNAD_GET_REG(LUT1_SET_ERR_STS);
574
575         /* TRC Registers */
576         BNAD_GET_REG(TRC_CTL_REG);
577         BNAD_GET_REG(TRC_MODS_REG);
578         BNAD_GET_REG(TRC_TRGC_REG);
579         BNAD_GET_REG(TRC_CNT1_REG);
580         BNAD_GET_REG(TRC_CNT2_REG);
581         BNAD_GET_REG(TRC_NXTS_REG);
582         BNAD_GET_REG(TRC_DIRR_REG);
583         for (i = 0; i < 10; i++)
584                 BNAD_GET_REG(TRC_TRGM_REG(i));
585         for (i = 0; i < 10; i++)
586                 BNAD_GET_REG(TRC_NXTM_REG(i));
587         for (i = 0; i < 10; i++)
588                 BNAD_GET_REG(TRC_STRM_REG(i));
589
590         spin_unlock_irqrestore(&bnad->bna_lock, flags);
591 #undef BNAD_GET_REG
592         return num;
593 }
594 static int
595 bnad_get_regs_len(struct net_device *netdev)
596 {
597         int ret = get_regs(netdev_priv(netdev), NULL) * sizeof(u32);
598         return ret;
599 }
600
601 static void
602 bnad_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
603 {
604         memset(buf, 0, bnad_get_regs_len(netdev));
605         get_regs(netdev_priv(netdev), buf);
606 }
607
608 static void
609 bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
610 {
611         wolinfo->supported = 0;
612         wolinfo->wolopts = 0;
613 }
614
615 static int
616 bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
617 {
618         struct bnad *bnad = netdev_priv(netdev);
619         unsigned long flags;
620
621         /* Lock rqd. to access bnad->bna_lock */
622         spin_lock_irqsave(&bnad->bna_lock, flags);
623         coalesce->use_adaptive_rx_coalesce =
624                 (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
625         spin_unlock_irqrestore(&bnad->bna_lock, flags);
626
627         coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
628                                         BFI_COALESCING_TIMER_UNIT;
629         coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
630                                         BFI_COALESCING_TIMER_UNIT;
631         coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
632
633         return 0;
634 }
635
636 static int
637 bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
638 {
639         struct bnad *bnad = netdev_priv(netdev);
640         unsigned long flags;
641         int dim_timer_del = 0;
642
643         if (coalesce->rx_coalesce_usecs == 0 ||
644             coalesce->rx_coalesce_usecs >
645             BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
646                 return -EINVAL;
647
648         if (coalesce->tx_coalesce_usecs == 0 ||
649             coalesce->tx_coalesce_usecs >
650             BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
651                 return -EINVAL;
652
653         mutex_lock(&bnad->conf_mutex);
654         /*
655          * Do not need to store rx_coalesce_usecs here
656          * Every time DIM is disabled, we can get it from the
657          * stack.
658          */
659         spin_lock_irqsave(&bnad->bna_lock, flags);
660         if (coalesce->use_adaptive_rx_coalesce) {
661                 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
662                         bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
663                         bnad_dim_timer_start(bnad);
664                 }
665         } else {
666                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
667                         bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
668                         dim_timer_del = bnad_dim_timer_running(bnad);
669                         if (dim_timer_del) {
670                                 clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
671                                                         &bnad->run_flags);
672                                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
673                                 del_timer_sync(&bnad->dim_timer);
674                                 spin_lock_irqsave(&bnad->bna_lock, flags);
675                         }
676                         bnad_rx_coalescing_timeo_set(bnad);
677                 }
678         }
679         if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
680                                         BFI_COALESCING_TIMER_UNIT) {
681                 bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
682                                                 BFI_COALESCING_TIMER_UNIT;
683                 bnad_tx_coalescing_timeo_set(bnad);
684         }
685
686         if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
687                                         BFI_COALESCING_TIMER_UNIT) {
688                 bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
689                                                 BFI_COALESCING_TIMER_UNIT;
690
691                 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
692                         bnad_rx_coalescing_timeo_set(bnad);
693
694         }
695
696         /* Add Tx Inter-pkt DMA count?  */
697
698         spin_unlock_irqrestore(&bnad->bna_lock, flags);
699
700         mutex_unlock(&bnad->conf_mutex);
701         return 0;
702 }
703
704 static void
705 bnad_get_ringparam(struct net_device *netdev,
706                    struct ethtool_ringparam *ringparam)
707 {
708         struct bnad *bnad = netdev_priv(netdev);
709
710         ringparam->rx_max_pending = BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq;
711         ringparam->rx_mini_max_pending = 0;
712         ringparam->rx_jumbo_max_pending = 0;
713         ringparam->tx_max_pending = BNAD_MAX_Q_DEPTH;
714
715         ringparam->rx_pending = bnad->rxq_depth;
716         ringparam->rx_mini_max_pending = 0;
717         ringparam->rx_jumbo_max_pending = 0;
718         ringparam->tx_pending = bnad->txq_depth;
719 }
720
721 static int
722 bnad_set_ringparam(struct net_device *netdev,
723                    struct ethtool_ringparam *ringparam)
724 {
725         int i, current_err, err = 0;
726         struct bnad *bnad = netdev_priv(netdev);
727
728         mutex_lock(&bnad->conf_mutex);
729         if (ringparam->rx_pending == bnad->rxq_depth &&
730             ringparam->tx_pending == bnad->txq_depth) {
731                 mutex_unlock(&bnad->conf_mutex);
732                 return 0;
733         }
734
735         if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
736             ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq ||
737             !BNA_POWER_OF_2(ringparam->rx_pending)) {
738                 mutex_unlock(&bnad->conf_mutex);
739                 return -EINVAL;
740         }
741         if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
742             ringparam->tx_pending > BNAD_MAX_Q_DEPTH ||
743             !BNA_POWER_OF_2(ringparam->tx_pending)) {
744                 mutex_unlock(&bnad->conf_mutex);
745                 return -EINVAL;
746         }
747
748         if (ringparam->rx_pending != bnad->rxq_depth) {
749                 bnad->rxq_depth = ringparam->rx_pending;
750                 for (i = 0; i < bnad->num_rx; i++) {
751                         if (!bnad->rx_info[i].rx)
752                                 continue;
753                         bnad_cleanup_rx(bnad, i);
754                         current_err = bnad_setup_rx(bnad, i);
755                         if (current_err && !err)
756                                 err = current_err;
757                 }
758         }
759         if (ringparam->tx_pending != bnad->txq_depth) {
760                 bnad->txq_depth = ringparam->tx_pending;
761                 for (i = 0; i < bnad->num_tx; i++) {
762                         if (!bnad->tx_info[i].tx)
763                                 continue;
764                         bnad_cleanup_tx(bnad, i);
765                         current_err = bnad_setup_tx(bnad, i);
766                         if (current_err && !err)
767                                 err = current_err;
768                 }
769         }
770
771         mutex_unlock(&bnad->conf_mutex);
772         return err;
773 }
774
775 static void
776 bnad_get_pauseparam(struct net_device *netdev,
777                     struct ethtool_pauseparam *pauseparam)
778 {
779         struct bnad *bnad = netdev_priv(netdev);
780
781         pauseparam->autoneg = 0;
782         pauseparam->rx_pause = bnad->bna.port.pause_config.rx_pause;
783         pauseparam->tx_pause = bnad->bna.port.pause_config.tx_pause;
784 }
785
786 static int
787 bnad_set_pauseparam(struct net_device *netdev,
788                     struct ethtool_pauseparam *pauseparam)
789 {
790         struct bnad *bnad = netdev_priv(netdev);
791         struct bna_pause_config pause_config;
792         unsigned long flags;
793
794         if (pauseparam->autoneg == AUTONEG_ENABLE)
795                 return -EINVAL;
796
797         mutex_lock(&bnad->conf_mutex);
798         if (pauseparam->rx_pause != bnad->bna.port.pause_config.rx_pause ||
799             pauseparam->tx_pause != bnad->bna.port.pause_config.tx_pause) {
800                 pause_config.rx_pause = pauseparam->rx_pause;
801                 pause_config.tx_pause = pauseparam->tx_pause;
802                 spin_lock_irqsave(&bnad->bna_lock, flags);
803                 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
804                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
805         }
806         mutex_unlock(&bnad->conf_mutex);
807         return 0;
808 }
809
810 static void
811 bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
812 {
813         struct bnad *bnad = netdev_priv(netdev);
814         int i, j, q_num;
815         u64 bmap;
816
817         mutex_lock(&bnad->conf_mutex);
818
819         switch (stringset) {
820         case ETH_SS_STATS:
821                 for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
822                         BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
823                                    ETH_GSTRING_LEN));
824                         memcpy(string, bnad_net_stats_strings[i],
825                                ETH_GSTRING_LEN);
826                         string += ETH_GSTRING_LEN;
827                 }
828                 bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
829                         ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
830                 for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
831                         if (bmap & 1) {
832                                 sprintf(string, "txf%d_ucast_octets", i);
833                                 string += ETH_GSTRING_LEN;
834                                 sprintf(string, "txf%d_ucast", i);
835                                 string += ETH_GSTRING_LEN;
836                                 sprintf(string, "txf%d_ucast_vlan", i);
837                                 string += ETH_GSTRING_LEN;
838                                 sprintf(string, "txf%d_mcast_octets", i);
839                                 string += ETH_GSTRING_LEN;
840                                 sprintf(string, "txf%d_mcast", i);
841                                 string += ETH_GSTRING_LEN;
842                                 sprintf(string, "txf%d_mcast_vlan", i);
843                                 string += ETH_GSTRING_LEN;
844                                 sprintf(string, "txf%d_bcast_octets", i);
845                                 string += ETH_GSTRING_LEN;
846                                 sprintf(string, "txf%d_bcast", i);
847                                 string += ETH_GSTRING_LEN;
848                                 sprintf(string, "txf%d_bcast_vlan", i);
849                                 string += ETH_GSTRING_LEN;
850                                 sprintf(string, "txf%d_errors", i);
851                                 string += ETH_GSTRING_LEN;
852                                 sprintf(string, "txf%d_filter_vlan", i);
853                                 string += ETH_GSTRING_LEN;
854                                 sprintf(string, "txf%d_filter_mac_sa", i);
855                                 string += ETH_GSTRING_LEN;
856                         }
857                         bmap >>= 1;
858                 }
859
860                 bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
861                         ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
862                 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
863                         if (bmap & 1) {
864                                 sprintf(string, "rxf%d_ucast_octets", i);
865                                 string += ETH_GSTRING_LEN;
866                                 sprintf(string, "rxf%d_ucast", i);
867                                 string += ETH_GSTRING_LEN;
868                                 sprintf(string, "rxf%d_ucast_vlan", i);
869                                 string += ETH_GSTRING_LEN;
870                                 sprintf(string, "rxf%d_mcast_octets", i);
871                                 string += ETH_GSTRING_LEN;
872                                 sprintf(string, "rxf%d_mcast", i);
873                                 string += ETH_GSTRING_LEN;
874                                 sprintf(string, "rxf%d_mcast_vlan", i);
875                                 string += ETH_GSTRING_LEN;
876                                 sprintf(string, "rxf%d_bcast_octets", i);
877                                 string += ETH_GSTRING_LEN;
878                                 sprintf(string, "rxf%d_bcast", i);
879                                 string += ETH_GSTRING_LEN;
880                                 sprintf(string, "rxf%d_bcast_vlan", i);
881                                 string += ETH_GSTRING_LEN;
882                                 sprintf(string, "rxf%d_frame_drops", i);
883                                 string += ETH_GSTRING_LEN;
884                         }
885                         bmap >>= 1;
886                 }
887
888                 q_num = 0;
889                 for (i = 0; i < bnad->num_rx; i++) {
890                         if (!bnad->rx_info[i].rx)
891                                 continue;
892                         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
893                                 sprintf(string, "cq%d_producer_index", q_num);
894                                 string += ETH_GSTRING_LEN;
895                                 sprintf(string, "cq%d_consumer_index", q_num);
896                                 string += ETH_GSTRING_LEN;
897                                 sprintf(string, "cq%d_hw_producer_index",
898                                         q_num);
899                                 string += ETH_GSTRING_LEN;
900                                 q_num++;
901                         }
902                 }
903
904                 q_num = 0;
905                 for (i = 0; i < bnad->num_rx; i++) {
906                         if (!bnad->rx_info[i].rx)
907                                 continue;
908                         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
909                                 sprintf(string, "rxq%d_packets", q_num);
910                                 string += ETH_GSTRING_LEN;
911                                 sprintf(string, "rxq%d_bytes", q_num);
912                                 string += ETH_GSTRING_LEN;
913                                 sprintf(string, "rxq%d_packets_with_error",
914                                                                 q_num);
915                                 string += ETH_GSTRING_LEN;
916                                 sprintf(string, "rxq%d_allocbuf_failed", q_num);
917                                 string += ETH_GSTRING_LEN;
918                                 sprintf(string, "rxq%d_producer_index", q_num);
919                                 string += ETH_GSTRING_LEN;
920                                 sprintf(string, "rxq%d_consumer_index", q_num);
921                                 string += ETH_GSTRING_LEN;
922                                 q_num++;
923                                 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
924                                         bnad->rx_info[i].rx_ctrl[j].ccb->
925                                         rcb[1] &&
926                                         bnad->rx_info[i].rx_ctrl[j].ccb->
927                                         rcb[1]->rxq) {
928                                         sprintf(string, "rxq%d_packets", q_num);
929                                         string += ETH_GSTRING_LEN;
930                                         sprintf(string, "rxq%d_bytes", q_num);
931                                         string += ETH_GSTRING_LEN;
932                                         sprintf(string,
933                                         "rxq%d_packets_with_error", q_num);
934                                         string += ETH_GSTRING_LEN;
935                                         sprintf(string, "rxq%d_allocbuf_failed",
936                                                                 q_num);
937                                         string += ETH_GSTRING_LEN;
938                                         sprintf(string, "rxq%d_producer_index",
939                                                                 q_num);
940                                         string += ETH_GSTRING_LEN;
941                                         sprintf(string, "rxq%d_consumer_index",
942                                                                 q_num);
943                                         string += ETH_GSTRING_LEN;
944                                         q_num++;
945                                 }
946                         }
947                 }
948
949                 q_num = 0;
950                 for (i = 0; i < bnad->num_tx; i++) {
951                         if (!bnad->tx_info[i].tx)
952                                 continue;
953                         for (j = 0; j < bnad->num_txq_per_tx; j++) {
954                                 sprintf(string, "txq%d_packets", q_num);
955                                 string += ETH_GSTRING_LEN;
956                                 sprintf(string, "txq%d_bytes", q_num);
957                                 string += ETH_GSTRING_LEN;
958                                 sprintf(string, "txq%d_producer_index", q_num);
959                                 string += ETH_GSTRING_LEN;
960                                 sprintf(string, "txq%d_consumer_index", q_num);
961                                 string += ETH_GSTRING_LEN;
962                                 sprintf(string, "txq%d_hw_consumer_index",
963                                                                         q_num);
964                                 string += ETH_GSTRING_LEN;
965                                 q_num++;
966                         }
967                 }
968
969                 break;
970
971         default:
972                 break;
973         }
974
975         mutex_unlock(&bnad->conf_mutex);
976 }
977
978 static int
979 bnad_get_stats_count_locked(struct net_device *netdev)
980 {
981         struct bnad *bnad = netdev_priv(netdev);
982         int i, j, count, rxf_active_num = 0, txf_active_num = 0;
983         u64 bmap;
984
985         bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
986                         ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
987         for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
988                 if (bmap & 1)
989                         txf_active_num++;
990                 bmap >>= 1;
991         }
992         bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
993                         ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
994         for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
995                 if (bmap & 1)
996                         rxf_active_num++;
997                 bmap >>= 1;
998         }
999         count = BNAD_ETHTOOL_STATS_NUM +
1000                 txf_active_num * BNAD_NUM_TXF_COUNTERS +
1001                 rxf_active_num * BNAD_NUM_RXF_COUNTERS;
1002
1003         for (i = 0; i < bnad->num_rx; i++) {
1004                 if (!bnad->rx_info[i].rx)
1005                         continue;
1006                 count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
1007                 count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
1008                 for (j = 0; j < bnad->num_rxp_per_rx; j++)
1009                         if (bnad->rx_info[i].rx_ctrl[j].ccb &&
1010                                 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
1011                                 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
1012                                 count +=  BNAD_NUM_RXQ_COUNTERS;
1013         }
1014
1015         for (i = 0; i < bnad->num_tx; i++) {
1016                 if (!bnad->tx_info[i].tx)
1017                         continue;
1018                 count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
1019         }
1020         return count;
1021 }
1022
1023 static int
1024 bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
1025 {
1026         int i, j;
1027         struct bna_rcb *rcb = NULL;
1028         struct bna_tcb *tcb = NULL;
1029
1030         for (i = 0; i < bnad->num_rx; i++) {
1031                 if (!bnad->rx_info[i].rx)
1032                         continue;
1033                 for (j = 0; j < bnad->num_rxp_per_rx; j++)
1034                         if (bnad->rx_info[i].rx_ctrl[j].ccb &&
1035                                 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
1036                                 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
1037                                 buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
1038                                                 ccb->producer_index;
1039                                 buf[bi++] = 0; /* ccb->consumer_index */
1040                                 buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
1041                                                 ccb->hw_producer_index);
1042                         }
1043         }
1044         for (i = 0; i < bnad->num_rx; i++) {
1045                 if (!bnad->rx_info[i].rx)
1046                         continue;
1047                 for (j = 0; j < bnad->num_rxp_per_rx; j++)
1048                         if (bnad->rx_info[i].rx_ctrl[j].ccb) {
1049                                 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
1050                                         bnad->rx_info[i].rx_ctrl[j].ccb->
1051                                         rcb[0]->rxq) {
1052                                         rcb = bnad->rx_info[i].rx_ctrl[j].
1053                                                         ccb->rcb[0];
1054                                         buf[bi++] = rcb->rxq->rx_packets;
1055                                         buf[bi++] = rcb->rxq->rx_bytes;
1056                                         buf[bi++] = rcb->rxq->
1057                                                         rx_packets_with_error;
1058                                         buf[bi++] = rcb->rxq->
1059                                                         rxbuf_alloc_failed;
1060                                         buf[bi++] = rcb->producer_index;
1061                                         buf[bi++] = rcb->consumer_index;
1062                                 }
1063                                 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
1064                                         bnad->rx_info[i].rx_ctrl[j].ccb->
1065                                         rcb[1]->rxq) {
1066                                         rcb = bnad->rx_info[i].rx_ctrl[j].
1067                                                                 ccb->rcb[1];
1068                                         buf[bi++] = rcb->rxq->rx_packets;
1069                                         buf[bi++] = rcb->rxq->rx_bytes;
1070                                         buf[bi++] = rcb->rxq->
1071                                                         rx_packets_with_error;
1072                                         buf[bi++] = rcb->rxq->
1073                                                         rxbuf_alloc_failed;
1074                                         buf[bi++] = rcb->producer_index;
1075                                         buf[bi++] = rcb->consumer_index;
1076                                 }
1077                         }
1078         }
1079
1080         for (i = 0; i < bnad->num_tx; i++) {
1081                 if (!bnad->tx_info[i].tx)
1082                         continue;
1083                 for (j = 0; j < bnad->num_txq_per_tx; j++)
1084                         if (bnad->tx_info[i].tcb[j] &&
1085                                 bnad->tx_info[i].tcb[j]->txq) {
1086                                 tcb = bnad->tx_info[i].tcb[j];
1087                                 buf[bi++] = tcb->txq->tx_packets;
1088                                 buf[bi++] = tcb->txq->tx_bytes;
1089                                 buf[bi++] = tcb->producer_index;
1090                                 buf[bi++] = tcb->consumer_index;
1091                                 buf[bi++] = *(tcb->hw_consumer_index);
1092                         }
1093         }
1094
1095         return bi;
1096 }
1097
1098 static void
1099 bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
1100                        u64 *buf)
1101 {
1102         struct bnad *bnad = netdev_priv(netdev);
1103         int i, j, bi;
1104         unsigned long flags;
1105         struct rtnl_link_stats64 *net_stats64;
1106         u64 *stats64;
1107         u64 bmap;
1108
1109         mutex_lock(&bnad->conf_mutex);
1110         if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
1111                 mutex_unlock(&bnad->conf_mutex);
1112                 return;
1113         }
1114
1115         /*
1116          * Used bna_lock to sync reads from bna_stats, which is written
1117          * under the same lock
1118          */
1119         spin_lock_irqsave(&bnad->bna_lock, flags);
1120         bi = 0;
1121         memset(buf, 0, stats->n_stats * sizeof(u64));
1122
1123         net_stats64 = (struct rtnl_link_stats64 *)buf;
1124         bnad_netdev_qstats_fill(bnad, net_stats64);
1125         bnad_netdev_hwstats_fill(bnad, net_stats64);
1126
1127         bi = sizeof(*net_stats64) / sizeof(u64);
1128
1129         /* Get netif_queue_stopped from stack */
1130         bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
1131
1132         /* Fill driver stats into ethtool buffers */
1133         stats64 = (u64 *)&bnad->stats.drv_stats;
1134         for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
1135                 buf[bi++] = stats64[i];
1136
1137         /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
1138         stats64 = (u64 *) bnad->stats.bna_stats->hw_stats;
1139         for (i = 0;
1140              i < offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64);
1141              i++)
1142                 buf[bi++] = stats64[i];
1143
1144         /* Fill txf stats into ethtool buffers */
1145         bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
1146                         ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
1147         for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
1148                 if (bmap & 1) {
1149                         stats64 = (u64 *)&bnad->stats.bna_stats->
1150                                                 hw_stats->txf_stats[i];
1151                         for (j = 0; j < sizeof(struct bfi_ll_stats_txf) /
1152                                         sizeof(u64); j++)
1153                                 buf[bi++] = stats64[j];
1154                 }
1155                 bmap >>= 1;
1156         }
1157
1158         /*  Fill rxf stats into ethtool buffers */
1159         bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
1160                         ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
1161         for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
1162                 if (bmap & 1) {
1163                         stats64 = (u64 *)&bnad->stats.bna_stats->
1164                                                 hw_stats->rxf_stats[i];
1165                         for (j = 0; j < sizeof(struct bfi_ll_stats_rxf) /
1166                                         sizeof(u64); j++)
1167                                 buf[bi++] = stats64[j];
1168                 }
1169                 bmap >>= 1;
1170         }
1171
1172         /* Fill per Q stats into ethtool buffers */
1173         bi = bnad_per_q_stats_fill(bnad, buf, bi);
1174
1175         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1176
1177         mutex_unlock(&bnad->conf_mutex);
1178 }
1179
1180 static int
1181 bnad_get_sset_count(struct net_device *netdev, int sset)
1182 {
1183         switch (sset) {
1184         case ETH_SS_STATS:
1185                 return bnad_get_stats_count_locked(netdev);
1186         default:
1187                 return -EOPNOTSUPP;
1188         }
1189 }
1190
1191 static struct ethtool_ops bnad_ethtool_ops = {
1192         .get_settings = bnad_get_settings,
1193         .set_settings = bnad_set_settings,
1194         .get_drvinfo = bnad_get_drvinfo,
1195         .get_regs_len = bnad_get_regs_len,
1196         .get_regs = bnad_get_regs,
1197         .get_wol = bnad_get_wol,
1198         .get_link = ethtool_op_get_link,
1199         .get_coalesce = bnad_get_coalesce,
1200         .set_coalesce = bnad_set_coalesce,
1201         .get_ringparam = bnad_get_ringparam,
1202         .set_ringparam = bnad_set_ringparam,
1203         .get_pauseparam = bnad_get_pauseparam,
1204         .set_pauseparam = bnad_set_pauseparam,
1205         .get_strings = bnad_get_strings,
1206         .get_ethtool_stats = bnad_get_ethtool_stats,
1207         .get_sset_count = bnad_get_sset_count
1208 };
1209
1210 void
1211 bnad_set_ethtool_ops(struct net_device *netdev)
1212 {
1213         SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
1214 }