Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / drivers / net / ethernet / brocade / bna / bnad_ethtool.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18
19 #include "cna.h"
20
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/rtnetlink.h>
25
26 #include "bna.h"
27
28 #include "bnad.h"
29
30 #define BNAD_NUM_TXF_COUNTERS 12
31 #define BNAD_NUM_RXF_COUNTERS 10
32 #define BNAD_NUM_CQ_COUNTERS (3 + 5)
33 #define BNAD_NUM_RXQ_COUNTERS 6
34 #define BNAD_NUM_TXQ_COUNTERS 5
35
36 #define BNAD_ETHTOOL_STATS_NUM                                          \
37         (sizeof(struct rtnl_link_stats64) / sizeof(u64) +       \
38         sizeof(struct bnad_drv_stats) / sizeof(u64) +           \
39         offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
40
41 static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
42         "rx_packets",
43         "tx_packets",
44         "rx_bytes",
45         "tx_bytes",
46         "rx_errors",
47         "tx_errors",
48         "rx_dropped",
49         "tx_dropped",
50         "multicast",
51         "collisions",
52
53         "rx_length_errors",
54         "rx_over_errors",
55         "rx_crc_errors",
56         "rx_frame_errors",
57         "rx_fifo_errors",
58         "rx_missed_errors",
59
60         "tx_aborted_errors",
61         "tx_carrier_errors",
62         "tx_fifo_errors",
63         "tx_heartbeat_errors",
64         "tx_window_errors",
65
66         "rx_compressed",
67         "tx_compressed",
68
69         "netif_queue_stop",
70         "netif_queue_wakeup",
71         "netif_queue_stopped",
72         "tso4",
73         "tso6",
74         "tso_err",
75         "tcpcsum_offload",
76         "udpcsum_offload",
77         "csum_help",
78         "tx_skb_too_short",
79         "tx_skb_stopping",
80         "tx_skb_max_vectors",
81         "tx_skb_mss_too_long",
82         "tx_skb_tso_too_short",
83         "tx_skb_tso_prepare",
84         "tx_skb_non_tso_too_long",
85         "tx_skb_tcp_hdr",
86         "tx_skb_udp_hdr",
87         "tx_skb_csum_err",
88         "tx_skb_headlen_too_long",
89         "tx_skb_headlen_zero",
90         "tx_skb_frag_zero",
91         "tx_skb_len_mismatch",
92         "hw_stats_updates",
93         "netif_rx_dropped",
94
95         "link_toggle",
96         "cee_toggle",
97
98         "rxp_info_alloc_failed",
99         "mbox_intr_disabled",
100         "mbox_intr_enabled",
101         "tx_unmap_q_alloc_failed",
102         "rx_unmap_q_alloc_failed",
103         "rxbuf_alloc_failed",
104
105         "mac_frame_64",
106         "mac_frame_65_127",
107         "mac_frame_128_255",
108         "mac_frame_256_511",
109         "mac_frame_512_1023",
110         "mac_frame_1024_1518",
111         "mac_frame_1518_1522",
112         "mac_rx_bytes",
113         "mac_rx_packets",
114         "mac_rx_fcs_error",
115         "mac_rx_multicast",
116         "mac_rx_broadcast",
117         "mac_rx_control_frames",
118         "mac_rx_pause",
119         "mac_rx_unknown_opcode",
120         "mac_rx_alignment_error",
121         "mac_rx_frame_length_error",
122         "mac_rx_code_error",
123         "mac_rx_carrier_sense_error",
124         "mac_rx_undersize",
125         "mac_rx_oversize",
126         "mac_rx_fragments",
127         "mac_rx_jabber",
128         "mac_rx_drop",
129
130         "mac_tx_bytes",
131         "mac_tx_packets",
132         "mac_tx_multicast",
133         "mac_tx_broadcast",
134         "mac_tx_pause",
135         "mac_tx_deferral",
136         "mac_tx_excessive_deferral",
137         "mac_tx_single_collision",
138         "mac_tx_muliple_collision",
139         "mac_tx_late_collision",
140         "mac_tx_excessive_collision",
141         "mac_tx_total_collision",
142         "mac_tx_pause_honored",
143         "mac_tx_drop",
144         "mac_tx_jabber",
145         "mac_tx_fcs_error",
146         "mac_tx_control_frame",
147         "mac_tx_oversize",
148         "mac_tx_undersize",
149         "mac_tx_fragments",
150
151         "bpc_tx_pause_0",
152         "bpc_tx_pause_1",
153         "bpc_tx_pause_2",
154         "bpc_tx_pause_3",
155         "bpc_tx_pause_4",
156         "bpc_tx_pause_5",
157         "bpc_tx_pause_6",
158         "bpc_tx_pause_7",
159         "bpc_tx_zero_pause_0",
160         "bpc_tx_zero_pause_1",
161         "bpc_tx_zero_pause_2",
162         "bpc_tx_zero_pause_3",
163         "bpc_tx_zero_pause_4",
164         "bpc_tx_zero_pause_5",
165         "bpc_tx_zero_pause_6",
166         "bpc_tx_zero_pause_7",
167         "bpc_tx_first_pause_0",
168         "bpc_tx_first_pause_1",
169         "bpc_tx_first_pause_2",
170         "bpc_tx_first_pause_3",
171         "bpc_tx_first_pause_4",
172         "bpc_tx_first_pause_5",
173         "bpc_tx_first_pause_6",
174         "bpc_tx_first_pause_7",
175
176         "bpc_rx_pause_0",
177         "bpc_rx_pause_1",
178         "bpc_rx_pause_2",
179         "bpc_rx_pause_3",
180         "bpc_rx_pause_4",
181         "bpc_rx_pause_5",
182         "bpc_rx_pause_6",
183         "bpc_rx_pause_7",
184         "bpc_rx_zero_pause_0",
185         "bpc_rx_zero_pause_1",
186         "bpc_rx_zero_pause_2",
187         "bpc_rx_zero_pause_3",
188         "bpc_rx_zero_pause_4",
189         "bpc_rx_zero_pause_5",
190         "bpc_rx_zero_pause_6",
191         "bpc_rx_zero_pause_7",
192         "bpc_rx_first_pause_0",
193         "bpc_rx_first_pause_1",
194         "bpc_rx_first_pause_2",
195         "bpc_rx_first_pause_3",
196         "bpc_rx_first_pause_4",
197         "bpc_rx_first_pause_5",
198         "bpc_rx_first_pause_6",
199         "bpc_rx_first_pause_7",
200
201         "rad_rx_frames",
202         "rad_rx_octets",
203         "rad_rx_vlan_frames",
204         "rad_rx_ucast",
205         "rad_rx_ucast_octets",
206         "rad_rx_ucast_vlan",
207         "rad_rx_mcast",
208         "rad_rx_mcast_octets",
209         "rad_rx_mcast_vlan",
210         "rad_rx_bcast",
211         "rad_rx_bcast_octets",
212         "rad_rx_bcast_vlan",
213         "rad_rx_drops",
214
215         "rlb_rad_rx_frames",
216         "rlb_rad_rx_octets",
217         "rlb_rad_rx_vlan_frames",
218         "rlb_rad_rx_ucast",
219         "rlb_rad_rx_ucast_octets",
220         "rlb_rad_rx_ucast_vlan",
221         "rlb_rad_rx_mcast",
222         "rlb_rad_rx_mcast_octets",
223         "rlb_rad_rx_mcast_vlan",
224         "rlb_rad_rx_bcast",
225         "rlb_rad_rx_bcast_octets",
226         "rlb_rad_rx_bcast_vlan",
227         "rlb_rad_rx_drops",
228
229         "fc_rx_ucast_octets",
230         "fc_rx_ucast",
231         "fc_rx_ucast_vlan",
232         "fc_rx_mcast_octets",
233         "fc_rx_mcast",
234         "fc_rx_mcast_vlan",
235         "fc_rx_bcast_octets",
236         "fc_rx_bcast",
237         "fc_rx_bcast_vlan",
238
239         "fc_tx_ucast_octets",
240         "fc_tx_ucast",
241         "fc_tx_ucast_vlan",
242         "fc_tx_mcast_octets",
243         "fc_tx_mcast",
244         "fc_tx_mcast_vlan",
245         "fc_tx_bcast_octets",
246         "fc_tx_bcast",
247         "fc_tx_bcast_vlan",
248         "fc_tx_parity_errors",
249         "fc_tx_timeout",
250         "fc_tx_fid_parity_errors",
251 };
252
253 static int
254 bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
255 {
256         cmd->supported = SUPPORTED_10000baseT_Full;
257         cmd->advertising = ADVERTISED_10000baseT_Full;
258         cmd->autoneg = AUTONEG_DISABLE;
259         cmd->supported |= SUPPORTED_FIBRE;
260         cmd->advertising |= ADVERTISED_FIBRE;
261         cmd->port = PORT_FIBRE;
262         cmd->phy_address = 0;
263
264         if (netif_carrier_ok(netdev)) {
265                 ethtool_cmd_speed_set(cmd, SPEED_10000);
266                 cmd->duplex = DUPLEX_FULL;
267         } else {
268                 ethtool_cmd_speed_set(cmd, -1);
269                 cmd->duplex = -1;
270         }
271         cmd->transceiver = XCVR_EXTERNAL;
272         cmd->maxtxpkt = 0;
273         cmd->maxrxpkt = 0;
274
275         return 0;
276 }
277
278 static int
279 bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
280 {
281         /* 10G full duplex setting supported only */
282         if (cmd->autoneg == AUTONEG_ENABLE)
283                 return -EOPNOTSUPP; else {
284                 if ((ethtool_cmd_speed(cmd) == SPEED_10000)
285                     && (cmd->duplex == DUPLEX_FULL))
286                         return 0;
287         }
288
289         return -EOPNOTSUPP;
290 }
291
292 static void
293 bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
294 {
295         struct bnad *bnad = netdev_priv(netdev);
296         struct bfa_ioc_attr *ioc_attr;
297         unsigned long flags;
298
299         strcpy(drvinfo->driver, BNAD_NAME);
300         strcpy(drvinfo->version, BNAD_VERSION);
301
302         ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
303         if (ioc_attr) {
304                 spin_lock_irqsave(&bnad->bna_lock, flags);
305                 bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
306                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
307
308                 strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
309                         sizeof(drvinfo->fw_version) - 1);
310                 kfree(ioc_attr);
311         }
312
313         strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
314 }
315
316 static void
317 bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
318 {
319         wolinfo->supported = 0;
320         wolinfo->wolopts = 0;
321 }
322
323 static int
324 bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
325 {
326         struct bnad *bnad = netdev_priv(netdev);
327         unsigned long flags;
328
329         /* Lock rqd. to access bnad->bna_lock */
330         spin_lock_irqsave(&bnad->bna_lock, flags);
331         coalesce->use_adaptive_rx_coalesce =
332                 (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
333         spin_unlock_irqrestore(&bnad->bna_lock, flags);
334
335         coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
336                                         BFI_COALESCING_TIMER_UNIT;
337         coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
338                                         BFI_COALESCING_TIMER_UNIT;
339         coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
340
341         return 0;
342 }
343
344 static int
345 bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
346 {
347         struct bnad *bnad = netdev_priv(netdev);
348         unsigned long flags;
349         int to_del = 0;
350
351         if (coalesce->rx_coalesce_usecs == 0 ||
352             coalesce->rx_coalesce_usecs >
353             BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
354                 return -EINVAL;
355
356         if (coalesce->tx_coalesce_usecs == 0 ||
357             coalesce->tx_coalesce_usecs >
358             BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
359                 return -EINVAL;
360
361         mutex_lock(&bnad->conf_mutex);
362         /*
363          * Do not need to store rx_coalesce_usecs here
364          * Every time DIM is disabled, we can get it from the
365          * stack.
366          */
367         spin_lock_irqsave(&bnad->bna_lock, flags);
368         if (coalesce->use_adaptive_rx_coalesce) {
369                 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
370                         bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
371                         bnad_dim_timer_start(bnad);
372                 }
373         } else {
374                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
375                         bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
376                         if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
377                             test_bit(BNAD_RF_DIM_TIMER_RUNNING,
378                             &bnad->run_flags)) {
379                                 clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
380                                                         &bnad->run_flags);
381                                 to_del = 1;
382                         }
383                         spin_unlock_irqrestore(&bnad->bna_lock, flags);
384                         if (to_del)
385                                 del_timer_sync(&bnad->dim_timer);
386                         spin_lock_irqsave(&bnad->bna_lock, flags);
387                         bnad_rx_coalescing_timeo_set(bnad);
388                 }
389         }
390         if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
391                                         BFI_COALESCING_TIMER_UNIT) {
392                 bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
393                                                 BFI_COALESCING_TIMER_UNIT;
394                 bnad_tx_coalescing_timeo_set(bnad);
395         }
396
397         if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
398                                         BFI_COALESCING_TIMER_UNIT) {
399                 bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
400                                                 BFI_COALESCING_TIMER_UNIT;
401
402                 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
403                         bnad_rx_coalescing_timeo_set(bnad);
404
405         }
406
407         /* Add Tx Inter-pkt DMA count?  */
408
409         spin_unlock_irqrestore(&bnad->bna_lock, flags);
410
411         mutex_unlock(&bnad->conf_mutex);
412         return 0;
413 }
414
415 static void
416 bnad_get_ringparam(struct net_device *netdev,
417                    struct ethtool_ringparam *ringparam)
418 {
419         struct bnad *bnad = netdev_priv(netdev);
420
421         ringparam->rx_max_pending = BNAD_MAX_RXQ_DEPTH;
422         ringparam->tx_max_pending = BNAD_MAX_TXQ_DEPTH;
423
424         ringparam->rx_pending = bnad->rxq_depth;
425         ringparam->tx_pending = bnad->txq_depth;
426 }
427
428 static int
429 bnad_set_ringparam(struct net_device *netdev,
430                    struct ethtool_ringparam *ringparam)
431 {
432         int i, current_err, err = 0;
433         struct bnad *bnad = netdev_priv(netdev);
434         unsigned long flags;
435
436         mutex_lock(&bnad->conf_mutex);
437         if (ringparam->rx_pending == bnad->rxq_depth &&
438             ringparam->tx_pending == bnad->txq_depth) {
439                 mutex_unlock(&bnad->conf_mutex);
440                 return 0;
441         }
442
443         if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
444             ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
445             !BNA_POWER_OF_2(ringparam->rx_pending)) {
446                 mutex_unlock(&bnad->conf_mutex);
447                 return -EINVAL;
448         }
449         if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
450             ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
451             !BNA_POWER_OF_2(ringparam->tx_pending)) {
452                 mutex_unlock(&bnad->conf_mutex);
453                 return -EINVAL;
454         }
455
456         if (ringparam->rx_pending != bnad->rxq_depth) {
457                 bnad->rxq_depth = ringparam->rx_pending;
458                 if (!netif_running(netdev)) {
459                         mutex_unlock(&bnad->conf_mutex);
460                         return 0;
461                 }
462
463                 for (i = 0; i < bnad->num_rx; i++) {
464                         if (!bnad->rx_info[i].rx)
465                                 continue;
466                         bnad_cleanup_rx(bnad, i);
467                         current_err = bnad_setup_rx(bnad, i);
468                         if (current_err && !err)
469                                 err = current_err;
470                 }
471
472                 if (!err && bnad->rx_info[0].rx) {
473                         /* restore rx configuration */
474                         bnad_restore_vlans(bnad, 0);
475                         bnad_enable_default_bcast(bnad);
476                         spin_lock_irqsave(&bnad->bna_lock, flags);
477                         bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
478                         spin_unlock_irqrestore(&bnad->bna_lock, flags);
479                         bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI |
480                                              BNAD_CF_PROMISC);
481                         bnad_set_rx_mode(netdev);
482                 }
483         }
484         if (ringparam->tx_pending != bnad->txq_depth) {
485                 bnad->txq_depth = ringparam->tx_pending;
486                 if (!netif_running(netdev)) {
487                         mutex_unlock(&bnad->conf_mutex);
488                         return 0;
489                 }
490
491                 for (i = 0; i < bnad->num_tx; i++) {
492                         if (!bnad->tx_info[i].tx)
493                                 continue;
494                         bnad_cleanup_tx(bnad, i);
495                         current_err = bnad_setup_tx(bnad, i);
496                         if (current_err && !err)
497                                 err = current_err;
498                 }
499         }
500
501         mutex_unlock(&bnad->conf_mutex);
502         return err;
503 }
504
505 static void
506 bnad_get_pauseparam(struct net_device *netdev,
507                     struct ethtool_pauseparam *pauseparam)
508 {
509         struct bnad *bnad = netdev_priv(netdev);
510
511         pauseparam->autoneg = 0;
512         pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause;
513         pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause;
514 }
515
516 static int
517 bnad_set_pauseparam(struct net_device *netdev,
518                     struct ethtool_pauseparam *pauseparam)
519 {
520         struct bnad *bnad = netdev_priv(netdev);
521         struct bna_pause_config pause_config;
522         unsigned long flags;
523
524         if (pauseparam->autoneg == AUTONEG_ENABLE)
525                 return -EINVAL;
526
527         mutex_lock(&bnad->conf_mutex);
528         if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause ||
529             pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) {
530                 pause_config.rx_pause = pauseparam->rx_pause;
531                 pause_config.tx_pause = pauseparam->tx_pause;
532                 spin_lock_irqsave(&bnad->bna_lock, flags);
533                 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
534                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
535         }
536         mutex_unlock(&bnad->conf_mutex);
537         return 0;
538 }
539
540 static void
541 bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
542 {
543         struct bnad *bnad = netdev_priv(netdev);
544         int i, j, q_num;
545         u32 bmap;
546
547         mutex_lock(&bnad->conf_mutex);
548
549         switch (stringset) {
550         case ETH_SS_STATS:
551                 for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
552                         BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
553                                    ETH_GSTRING_LEN));
554                         memcpy(string, bnad_net_stats_strings[i],
555                                ETH_GSTRING_LEN);
556                         string += ETH_GSTRING_LEN;
557                 }
558                 bmap = bna_tx_rid_mask(&bnad->bna);
559                 for (i = 0; bmap; i++) {
560                         if (bmap & 1) {
561                                 sprintf(string, "txf%d_ucast_octets", i);
562                                 string += ETH_GSTRING_LEN;
563                                 sprintf(string, "txf%d_ucast", i);
564                                 string += ETH_GSTRING_LEN;
565                                 sprintf(string, "txf%d_ucast_vlan", i);
566                                 string += ETH_GSTRING_LEN;
567                                 sprintf(string, "txf%d_mcast_octets", i);
568                                 string += ETH_GSTRING_LEN;
569                                 sprintf(string, "txf%d_mcast", i);
570                                 string += ETH_GSTRING_LEN;
571                                 sprintf(string, "txf%d_mcast_vlan", i);
572                                 string += ETH_GSTRING_LEN;
573                                 sprintf(string, "txf%d_bcast_octets", i);
574                                 string += ETH_GSTRING_LEN;
575                                 sprintf(string, "txf%d_bcast", i);
576                                 string += ETH_GSTRING_LEN;
577                                 sprintf(string, "txf%d_bcast_vlan", i);
578                                 string += ETH_GSTRING_LEN;
579                                 sprintf(string, "txf%d_errors", i);
580                                 string += ETH_GSTRING_LEN;
581                                 sprintf(string, "txf%d_filter_vlan", i);
582                                 string += ETH_GSTRING_LEN;
583                                 sprintf(string, "txf%d_filter_mac_sa", i);
584                                 string += ETH_GSTRING_LEN;
585                         }
586                         bmap >>= 1;
587                 }
588
589                 bmap = bna_rx_rid_mask(&bnad->bna);
590                 for (i = 0; bmap; i++) {
591                         if (bmap & 1) {
592                                 sprintf(string, "rxf%d_ucast_octets", i);
593                                 string += ETH_GSTRING_LEN;
594                                 sprintf(string, "rxf%d_ucast", i);
595                                 string += ETH_GSTRING_LEN;
596                                 sprintf(string, "rxf%d_ucast_vlan", i);
597                                 string += ETH_GSTRING_LEN;
598                                 sprintf(string, "rxf%d_mcast_octets", i);
599                                 string += ETH_GSTRING_LEN;
600                                 sprintf(string, "rxf%d_mcast", i);
601                                 string += ETH_GSTRING_LEN;
602                                 sprintf(string, "rxf%d_mcast_vlan", i);
603                                 string += ETH_GSTRING_LEN;
604                                 sprintf(string, "rxf%d_bcast_octets", i);
605                                 string += ETH_GSTRING_LEN;
606                                 sprintf(string, "rxf%d_bcast", i);
607                                 string += ETH_GSTRING_LEN;
608                                 sprintf(string, "rxf%d_bcast_vlan", i);
609                                 string += ETH_GSTRING_LEN;
610                                 sprintf(string, "rxf%d_frame_drops", i);
611                                 string += ETH_GSTRING_LEN;
612                         }
613                         bmap >>= 1;
614                 }
615
616                 q_num = 0;
617                 for (i = 0; i < bnad->num_rx; i++) {
618                         if (!bnad->rx_info[i].rx)
619                                 continue;
620                         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
621                                 sprintf(string, "cq%d_producer_index", q_num);
622                                 string += ETH_GSTRING_LEN;
623                                 sprintf(string, "cq%d_consumer_index", q_num);
624                                 string += ETH_GSTRING_LEN;
625                                 sprintf(string, "cq%d_hw_producer_index",
626                                         q_num);
627                                 string += ETH_GSTRING_LEN;
628                                 sprintf(string, "cq%d_intr", q_num);
629                                 string += ETH_GSTRING_LEN;
630                                 sprintf(string, "cq%d_poll", q_num);
631                                 string += ETH_GSTRING_LEN;
632                                 sprintf(string, "cq%d_schedule", q_num);
633                                 string += ETH_GSTRING_LEN;
634                                 sprintf(string, "cq%d_keep_poll", q_num);
635                                 string += ETH_GSTRING_LEN;
636                                 sprintf(string, "cq%d_complete", q_num);
637                                 string += ETH_GSTRING_LEN;
638                                 q_num++;
639                         }
640                 }
641
642                 q_num = 0;
643                 for (i = 0; i < bnad->num_rx; i++) {
644                         if (!bnad->rx_info[i].rx)
645                                 continue;
646                         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
647                                 sprintf(string, "rxq%d_packets", q_num);
648                                 string += ETH_GSTRING_LEN;
649                                 sprintf(string, "rxq%d_bytes", q_num);
650                                 string += ETH_GSTRING_LEN;
651                                 sprintf(string, "rxq%d_packets_with_error",
652                                                                 q_num);
653                                 string += ETH_GSTRING_LEN;
654                                 sprintf(string, "rxq%d_allocbuf_failed", q_num);
655                                 string += ETH_GSTRING_LEN;
656                                 sprintf(string, "rxq%d_producer_index", q_num);
657                                 string += ETH_GSTRING_LEN;
658                                 sprintf(string, "rxq%d_consumer_index", q_num);
659                                 string += ETH_GSTRING_LEN;
660                                 q_num++;
661                                 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
662                                         bnad->rx_info[i].rx_ctrl[j].ccb->
663                                         rcb[1] &&
664                                         bnad->rx_info[i].rx_ctrl[j].ccb->
665                                         rcb[1]->rxq) {
666                                         sprintf(string, "rxq%d_packets", q_num);
667                                         string += ETH_GSTRING_LEN;
668                                         sprintf(string, "rxq%d_bytes", q_num);
669                                         string += ETH_GSTRING_LEN;
670                                         sprintf(string,
671                                         "rxq%d_packets_with_error", q_num);
672                                         string += ETH_GSTRING_LEN;
673                                         sprintf(string, "rxq%d_allocbuf_failed",
674                                                                 q_num);
675                                         string += ETH_GSTRING_LEN;
676                                         sprintf(string, "rxq%d_producer_index",
677                                                                 q_num);
678                                         string += ETH_GSTRING_LEN;
679                                         sprintf(string, "rxq%d_consumer_index",
680                                                                 q_num);
681                                         string += ETH_GSTRING_LEN;
682                                         q_num++;
683                                 }
684                         }
685                 }
686
687                 q_num = 0;
688                 for (i = 0; i < bnad->num_tx; i++) {
689                         if (!bnad->tx_info[i].tx)
690                                 continue;
691                         for (j = 0; j < bnad->num_txq_per_tx; j++) {
692                                 sprintf(string, "txq%d_packets", q_num);
693                                 string += ETH_GSTRING_LEN;
694                                 sprintf(string, "txq%d_bytes", q_num);
695                                 string += ETH_GSTRING_LEN;
696                                 sprintf(string, "txq%d_producer_index", q_num);
697                                 string += ETH_GSTRING_LEN;
698                                 sprintf(string, "txq%d_consumer_index", q_num);
699                                 string += ETH_GSTRING_LEN;
700                                 sprintf(string, "txq%d_hw_consumer_index",
701                                                                         q_num);
702                                 string += ETH_GSTRING_LEN;
703                                 q_num++;
704                         }
705                 }
706
707                 break;
708
709         default:
710                 break;
711         }
712
713         mutex_unlock(&bnad->conf_mutex);
714 }
715
716 static int
717 bnad_get_stats_count_locked(struct net_device *netdev)
718 {
719         struct bnad *bnad = netdev_priv(netdev);
720         int i, j, count = 0, rxf_active_num = 0, txf_active_num = 0;
721         u32 bmap;
722
723         bmap = bna_tx_rid_mask(&bnad->bna);
724         for (i = 0; bmap; i++) {
725                 if (bmap & 1)
726                         txf_active_num++;
727                 bmap >>= 1;
728         }
729         bmap = bna_rx_rid_mask(&bnad->bna);
730         for (i = 0; bmap; i++) {
731                 if (bmap & 1)
732                         rxf_active_num++;
733                 bmap >>= 1;
734         }
735         count = BNAD_ETHTOOL_STATS_NUM +
736                 txf_active_num * BNAD_NUM_TXF_COUNTERS +
737                 rxf_active_num * BNAD_NUM_RXF_COUNTERS;
738
739         for (i = 0; i < bnad->num_rx; i++) {
740                 if (!bnad->rx_info[i].rx)
741                         continue;
742                 count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
743                 count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
744                 for (j = 0; j < bnad->num_rxp_per_rx; j++)
745                         if (bnad->rx_info[i].rx_ctrl[j].ccb &&
746                                 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
747                                 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
748                                 count +=  BNAD_NUM_RXQ_COUNTERS;
749         }
750
751         for (i = 0; i < bnad->num_tx; i++) {
752                 if (!bnad->tx_info[i].tx)
753                         continue;
754                 count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
755         }
756         return count;
757 }
758
759 static int
760 bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
761 {
762         int i, j;
763         struct bna_rcb *rcb = NULL;
764         struct bna_tcb *tcb = NULL;
765
766         for (i = 0; i < bnad->num_rx; i++) {
767                 if (!bnad->rx_info[i].rx)
768                         continue;
769                 for (j = 0; j < bnad->num_rxp_per_rx; j++)
770                         if (bnad->rx_info[i].rx_ctrl[j].ccb &&
771                                 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
772                                 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
773                                 buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
774                                                 ccb->producer_index;
775                                 buf[bi++] = 0; /* ccb->consumer_index */
776                                 buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
777                                                 ccb->hw_producer_index);
778
779                                 buf[bi++] = bnad->rx_info[i].
780                                                 rx_ctrl[j].rx_intr_ctr;
781                                 buf[bi++] = bnad->rx_info[i].
782                                                 rx_ctrl[j].rx_poll_ctr;
783                                 buf[bi++] = bnad->rx_info[i].
784                                                 rx_ctrl[j].rx_schedule;
785                                 buf[bi++] = bnad->rx_info[i].
786                                                 rx_ctrl[j].rx_keep_poll;
787                                 buf[bi++] = bnad->rx_info[i].
788                                                 rx_ctrl[j].rx_complete;
789                         }
790         }
791         for (i = 0; i < bnad->num_rx; i++) {
792                 if (!bnad->rx_info[i].rx)
793                         continue;
794                 for (j = 0; j < bnad->num_rxp_per_rx; j++)
795                         if (bnad->rx_info[i].rx_ctrl[j].ccb) {
796                                 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
797                                         bnad->rx_info[i].rx_ctrl[j].ccb->
798                                         rcb[0]->rxq) {
799                                         rcb = bnad->rx_info[i].rx_ctrl[j].
800                                                         ccb->rcb[0];
801                                         buf[bi++] = rcb->rxq->rx_packets;
802                                         buf[bi++] = rcb->rxq->rx_bytes;
803                                         buf[bi++] = rcb->rxq->
804                                                         rx_packets_with_error;
805                                         buf[bi++] = rcb->rxq->
806                                                         rxbuf_alloc_failed;
807                                         buf[bi++] = rcb->producer_index;
808                                         buf[bi++] = rcb->consumer_index;
809                                 }
810                                 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
811                                         bnad->rx_info[i].rx_ctrl[j].ccb->
812                                         rcb[1]->rxq) {
813                                         rcb = bnad->rx_info[i].rx_ctrl[j].
814                                                                 ccb->rcb[1];
815                                         buf[bi++] = rcb->rxq->rx_packets;
816                                         buf[bi++] = rcb->rxq->rx_bytes;
817                                         buf[bi++] = rcb->rxq->
818                                                         rx_packets_with_error;
819                                         buf[bi++] = rcb->rxq->
820                                                         rxbuf_alloc_failed;
821                                         buf[bi++] = rcb->producer_index;
822                                         buf[bi++] = rcb->consumer_index;
823                                 }
824                         }
825         }
826
827         for (i = 0; i < bnad->num_tx; i++) {
828                 if (!bnad->tx_info[i].tx)
829                         continue;
830                 for (j = 0; j < bnad->num_txq_per_tx; j++)
831                         if (bnad->tx_info[i].tcb[j] &&
832                                 bnad->tx_info[i].tcb[j]->txq) {
833                                 tcb = bnad->tx_info[i].tcb[j];
834                                 buf[bi++] = tcb->txq->tx_packets;
835                                 buf[bi++] = tcb->txq->tx_bytes;
836                                 buf[bi++] = tcb->producer_index;
837                                 buf[bi++] = tcb->consumer_index;
838                                 buf[bi++] = *(tcb->hw_consumer_index);
839                         }
840         }
841
842         return bi;
843 }
844
845 static void
846 bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
847                        u64 *buf)
848 {
849         struct bnad *bnad = netdev_priv(netdev);
850         int i, j, bi;
851         unsigned long flags;
852         struct rtnl_link_stats64 *net_stats64;
853         u64 *stats64;
854         u32 bmap;
855
856         mutex_lock(&bnad->conf_mutex);
857         if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
858                 mutex_unlock(&bnad->conf_mutex);
859                 return;
860         }
861
862         /*
863          * Used bna_lock to sync reads from bna_stats, which is written
864          * under the same lock
865          */
866         spin_lock_irqsave(&bnad->bna_lock, flags);
867         bi = 0;
868         memset(buf, 0, stats->n_stats * sizeof(u64));
869
870         net_stats64 = (struct rtnl_link_stats64 *)buf;
871         bnad_netdev_qstats_fill(bnad, net_stats64);
872         bnad_netdev_hwstats_fill(bnad, net_stats64);
873
874         bi = sizeof(*net_stats64) / sizeof(u64);
875
876         /* Get netif_queue_stopped from stack */
877         bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
878
879         /* Fill driver stats into ethtool buffers */
880         stats64 = (u64 *)&bnad->stats.drv_stats;
881         for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
882                 buf[bi++] = stats64[i];
883
884         /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
885         stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats;
886         for (i = 0;
887              i < offsetof(struct bfi_enet_stats, rxf_stats[0]) /
888                 sizeof(u64);
889              i++)
890                 buf[bi++] = stats64[i];
891
892         /* Fill txf stats into ethtool buffers */
893         bmap = bna_tx_rid_mask(&bnad->bna);
894         for (i = 0; bmap; i++) {
895                 if (bmap & 1) {
896                         stats64 = (u64 *)&bnad->stats.bna_stats->
897                                                 hw_stats.txf_stats[i];
898                         for (j = 0; j < sizeof(struct bfi_enet_stats_txf) /
899                                         sizeof(u64); j++)
900                                 buf[bi++] = stats64[j];
901                 }
902                 bmap >>= 1;
903         }
904
905         /*  Fill rxf stats into ethtool buffers */
906         bmap = bna_rx_rid_mask(&bnad->bna);
907         for (i = 0; bmap; i++) {
908                 if (bmap & 1) {
909                         stats64 = (u64 *)&bnad->stats.bna_stats->
910                                                 hw_stats.rxf_stats[i];
911                         for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) /
912                                         sizeof(u64); j++)
913                                 buf[bi++] = stats64[j];
914                 }
915                 bmap >>= 1;
916         }
917
918         /* Fill per Q stats into ethtool buffers */
919         bi = bnad_per_q_stats_fill(bnad, buf, bi);
920
921         spin_unlock_irqrestore(&bnad->bna_lock, flags);
922
923         mutex_unlock(&bnad->conf_mutex);
924 }
925
926 static int
927 bnad_get_sset_count(struct net_device *netdev, int sset)
928 {
929         switch (sset) {
930         case ETH_SS_STATS:
931                 return bnad_get_stats_count_locked(netdev);
932         default:
933                 return -EOPNOTSUPP;
934         }
935 }
936
937 static struct ethtool_ops bnad_ethtool_ops = {
938         .get_settings = bnad_get_settings,
939         .set_settings = bnad_set_settings,
940         .get_drvinfo = bnad_get_drvinfo,
941         .get_wol = bnad_get_wol,
942         .get_link = ethtool_op_get_link,
943         .get_coalesce = bnad_get_coalesce,
944         .set_coalesce = bnad_set_coalesce,
945         .get_ringparam = bnad_get_ringparam,
946         .set_ringparam = bnad_set_ringparam,
947         .get_pauseparam = bnad_get_pauseparam,
948         .set_pauseparam = bnad_set_pauseparam,
949         .get_strings = bnad_get_strings,
950         .get_ethtool_stats = bnad_get_ethtool_stats,
951         .get_sset_count = bnad_get_sset_count
952 };
953
954 void
955 bnad_set_ethtool_ops(struct net_device *netdev)
956 {
957         SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
958 }