ixgbe: Move interrupt related values out of ring and into q_vector
[pandora-kernel.git] / drivers / net / ixgbe / ixgbe_ethtool.c
1 /*******************************************************************************
2
3   Intel 10 Gigabit PCI Express Linux driver
4   Copyright(c) 1999 - 2011 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 /* ethtool support for ixgbe */
29
30 #include <linux/interrupt.h>
31 #include <linux/types.h>
32 #include <linux/module.h>
33 #include <linux/slab.h>
34 #include <linux/pci.h>
35 #include <linux/netdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/vmalloc.h>
38 #include <linux/uaccess.h>
39
40 #include "ixgbe.h"
41
42
43 #define IXGBE_ALL_RAR_ENTRIES 16
44
45 enum {NETDEV_STATS, IXGBE_STATS};
46
47 struct ixgbe_stats {
48         char stat_string[ETH_GSTRING_LEN];
49         int type;
50         int sizeof_stat;
51         int stat_offset;
52 };
53
54 #define IXGBE_STAT(m)           IXGBE_STATS, \
55                                 sizeof(((struct ixgbe_adapter *)0)->m), \
56                                 offsetof(struct ixgbe_adapter, m)
57 #define IXGBE_NETDEV_STAT(m)    NETDEV_STATS, \
58                                 sizeof(((struct rtnl_link_stats64 *)0)->m), \
59                                 offsetof(struct rtnl_link_stats64, m)
60
61 static struct ixgbe_stats ixgbe_gstrings_stats[] = {
62         {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
63         {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
64         {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
65         {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
66         {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
67         {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
68         {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
69         {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
70         {"lsc_int", IXGBE_STAT(lsc_int)},
71         {"tx_busy", IXGBE_STAT(tx_busy)},
72         {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
73         {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
74         {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
75         {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
76         {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
77         {"multicast", IXGBE_NETDEV_STAT(multicast)},
78         {"broadcast", IXGBE_STAT(stats.bprc)},
79         {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
80         {"collisions", IXGBE_NETDEV_STAT(collisions)},
81         {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
82         {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
83         {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
84         {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
85         {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
86         {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
87         {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
88         {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
89         {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
90         {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
91         {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
92         {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
93         {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
94         {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
95         {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
96         {"tx_restart_queue", IXGBE_STAT(restart_queue)},
97         {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
98         {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
99         {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
100         {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
101         {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
102         {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
103         {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
104         {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
105         {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
106         {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
107         {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
108         {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
109         {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
110         {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
111 #ifdef IXGBE_FCOE
112         {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
113         {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
114         {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
115         {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
116         {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
117         {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
118 #endif /* IXGBE_FCOE */
119 };
120
121 #define IXGBE_QUEUE_STATS_LEN \
122         ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
123         ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
124         (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
125 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
126 #define IXGBE_PB_STATS_LEN ( \
127                  (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \
128                  IXGBE_FLAG_DCB_ENABLED) ? \
129                  (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
130                   sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
131                   sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
132                   sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
133                   / sizeof(u64) : 0)
134 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
135                          IXGBE_PB_STATS_LEN + \
136                          IXGBE_QUEUE_STATS_LEN)
137
138 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
139         "Register test  (offline)", "Eeprom test    (offline)",
140         "Interrupt test (offline)", "Loopback test  (offline)",
141         "Link test   (on/offline)"
142 };
143 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
144
145 static int ixgbe_get_settings(struct net_device *netdev,
146                               struct ethtool_cmd *ecmd)
147 {
148         struct ixgbe_adapter *adapter = netdev_priv(netdev);
149         struct ixgbe_hw *hw = &adapter->hw;
150         u32 link_speed = 0;
151         bool link_up;
152
153         ecmd->supported = SUPPORTED_10000baseT_Full;
154         ecmd->autoneg = AUTONEG_ENABLE;
155         ecmd->transceiver = XCVR_EXTERNAL;
156         if ((hw->phy.media_type == ixgbe_media_type_copper) ||
157             (hw->phy.multispeed_fiber)) {
158                 ecmd->supported |= (SUPPORTED_1000baseT_Full |
159                                     SUPPORTED_Autoneg);
160
161                 switch (hw->mac.type) {
162                 case ixgbe_mac_X540:
163                         ecmd->supported |= SUPPORTED_100baseT_Full;
164                         break;
165                 default:
166                         break;
167                 }
168
169                 ecmd->advertising = ADVERTISED_Autoneg;
170                 if (hw->phy.autoneg_advertised) {
171                         if (hw->phy.autoneg_advertised &
172                             IXGBE_LINK_SPEED_100_FULL)
173                                 ecmd->advertising |= ADVERTISED_100baseT_Full;
174                         if (hw->phy.autoneg_advertised &
175                             IXGBE_LINK_SPEED_10GB_FULL)
176                                 ecmd->advertising |= ADVERTISED_10000baseT_Full;
177                         if (hw->phy.autoneg_advertised &
178                             IXGBE_LINK_SPEED_1GB_FULL)
179                                 ecmd->advertising |= ADVERTISED_1000baseT_Full;
180                 } else {
181                         /*
182                          * Default advertised modes in case
183                          * phy.autoneg_advertised isn't set.
184                          */
185                         ecmd->advertising |= (ADVERTISED_10000baseT_Full |
186                                               ADVERTISED_1000baseT_Full);
187                         if (hw->mac.type == ixgbe_mac_X540)
188                                 ecmd->advertising |= ADVERTISED_100baseT_Full;
189                 }
190
191                 if (hw->phy.media_type == ixgbe_media_type_copper) {
192                         ecmd->supported |= SUPPORTED_TP;
193                         ecmd->advertising |= ADVERTISED_TP;
194                         ecmd->port = PORT_TP;
195                 } else {
196                         ecmd->supported |= SUPPORTED_FIBRE;
197                         ecmd->advertising |= ADVERTISED_FIBRE;
198                         ecmd->port = PORT_FIBRE;
199                 }
200         } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
201                 /* Set as FIBRE until SERDES defined in kernel */
202                 if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
203                         ecmd->supported = (SUPPORTED_1000baseT_Full |
204                                            SUPPORTED_FIBRE);
205                         ecmd->advertising = (ADVERTISED_1000baseT_Full |
206                                              ADVERTISED_FIBRE);
207                         ecmd->port = PORT_FIBRE;
208                         ecmd->autoneg = AUTONEG_DISABLE;
209                 } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
210                            (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
211                         ecmd->supported |= (SUPPORTED_1000baseT_Full |
212                                             SUPPORTED_Autoneg |
213                                             SUPPORTED_FIBRE);
214                         ecmd->advertising = (ADVERTISED_10000baseT_Full |
215                                              ADVERTISED_1000baseT_Full |
216                                              ADVERTISED_Autoneg |
217                                              ADVERTISED_FIBRE);
218                         ecmd->port = PORT_FIBRE;
219                 } else {
220                         ecmd->supported |= (SUPPORTED_1000baseT_Full |
221                                             SUPPORTED_FIBRE);
222                         ecmd->advertising = (ADVERTISED_10000baseT_Full |
223                                              ADVERTISED_1000baseT_Full |
224                                              ADVERTISED_FIBRE);
225                         ecmd->port = PORT_FIBRE;
226                 }
227         } else {
228                 ecmd->supported |= SUPPORTED_FIBRE;
229                 ecmd->advertising = (ADVERTISED_10000baseT_Full |
230                                      ADVERTISED_FIBRE);
231                 ecmd->port = PORT_FIBRE;
232                 ecmd->autoneg = AUTONEG_DISABLE;
233         }
234
235         /* Get PHY type */
236         switch (adapter->hw.phy.type) {
237         case ixgbe_phy_tn:
238         case ixgbe_phy_aq:
239         case ixgbe_phy_cu_unknown:
240                 /* Copper 10G-BASET */
241                 ecmd->port = PORT_TP;
242                 break;
243         case ixgbe_phy_qt:
244                 ecmd->port = PORT_FIBRE;
245                 break;
246         case ixgbe_phy_nl:
247         case ixgbe_phy_sfp_passive_tyco:
248         case ixgbe_phy_sfp_passive_unknown:
249         case ixgbe_phy_sfp_ftl:
250         case ixgbe_phy_sfp_avago:
251         case ixgbe_phy_sfp_intel:
252         case ixgbe_phy_sfp_unknown:
253                 switch (adapter->hw.phy.sfp_type) {
254                 /* SFP+ devices, further checking needed */
255                 case ixgbe_sfp_type_da_cu:
256                 case ixgbe_sfp_type_da_cu_core0:
257                 case ixgbe_sfp_type_da_cu_core1:
258                         ecmd->port = PORT_DA;
259                         break;
260                 case ixgbe_sfp_type_sr:
261                 case ixgbe_sfp_type_lr:
262                 case ixgbe_sfp_type_srlr_core0:
263                 case ixgbe_sfp_type_srlr_core1:
264                         ecmd->port = PORT_FIBRE;
265                         break;
266                 case ixgbe_sfp_type_not_present:
267                         ecmd->port = PORT_NONE;
268                         break;
269                 case ixgbe_sfp_type_1g_cu_core0:
270                 case ixgbe_sfp_type_1g_cu_core1:
271                         ecmd->port = PORT_TP;
272                         ecmd->supported = SUPPORTED_TP;
273                         ecmd->advertising = (ADVERTISED_1000baseT_Full |
274                                              ADVERTISED_TP);
275                         break;
276                 case ixgbe_sfp_type_unknown:
277                 default:
278                         ecmd->port = PORT_OTHER;
279                         break;
280                 }
281                 break;
282         case ixgbe_phy_xaui:
283                 ecmd->port = PORT_NONE;
284                 break;
285         case ixgbe_phy_unknown:
286         case ixgbe_phy_generic:
287         case ixgbe_phy_sfp_unsupported:
288         default:
289                 ecmd->port = PORT_OTHER;
290                 break;
291         }
292
293         hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
294         if (link_up) {
295                 switch (link_speed) {
296                 case IXGBE_LINK_SPEED_10GB_FULL:
297                         ethtool_cmd_speed_set(ecmd, SPEED_10000);
298                         break;
299                 case IXGBE_LINK_SPEED_1GB_FULL:
300                         ethtool_cmd_speed_set(ecmd, SPEED_1000);
301                         break;
302                 case IXGBE_LINK_SPEED_100_FULL:
303                         ethtool_cmd_speed_set(ecmd, SPEED_100);
304                         break;
305                 default:
306                         break;
307                 }
308                 ecmd->duplex = DUPLEX_FULL;
309         } else {
310                 ethtool_cmd_speed_set(ecmd, -1);
311                 ecmd->duplex = -1;
312         }
313
314         return 0;
315 }
316
317 static int ixgbe_set_settings(struct net_device *netdev,
318                               struct ethtool_cmd *ecmd)
319 {
320         struct ixgbe_adapter *adapter = netdev_priv(netdev);
321         struct ixgbe_hw *hw = &adapter->hw;
322         u32 advertised, old;
323         s32 err = 0;
324
325         if ((hw->phy.media_type == ixgbe_media_type_copper) ||
326             (hw->phy.multispeed_fiber)) {
327                 /* 10000/copper and 1000/copper must autoneg
328                  * this function does not support any duplex forcing, but can
329                  * limit the advertising of the adapter to only 10000 or 1000 */
330                 if (ecmd->autoneg == AUTONEG_DISABLE)
331                         return -EINVAL;
332
333                 old = hw->phy.autoneg_advertised;
334                 advertised = 0;
335                 if (ecmd->advertising & ADVERTISED_10000baseT_Full)
336                         advertised |= IXGBE_LINK_SPEED_10GB_FULL;
337
338                 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
339                         advertised |= IXGBE_LINK_SPEED_1GB_FULL;
340
341                 if (ecmd->advertising & ADVERTISED_100baseT_Full)
342                         advertised |= IXGBE_LINK_SPEED_100_FULL;
343
344                 if (old == advertised)
345                         return err;
346                 /* this sets the link speed and restarts auto-neg */
347                 hw->mac.autotry_restart = true;
348                 err = hw->mac.ops.setup_link(hw, advertised, true, true);
349                 if (err) {
350                         e_info(probe, "setup link failed with code %d\n", err);
351                         hw->mac.ops.setup_link(hw, old, true, true);
352                 }
353         } else {
354                 /* in this case we currently only support 10Gb/FULL */
355                 u32 speed = ethtool_cmd_speed(ecmd);
356                 if ((ecmd->autoneg == AUTONEG_ENABLE) ||
357                     (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
358                     (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
359                         return -EINVAL;
360         }
361
362         return err;
363 }
364
365 static void ixgbe_get_pauseparam(struct net_device *netdev,
366                                  struct ethtool_pauseparam *pause)
367 {
368         struct ixgbe_adapter *adapter = netdev_priv(netdev);
369         struct ixgbe_hw *hw = &adapter->hw;
370
371         /*
372          * Flow Control Autoneg isn't on if
373          *  - we didn't ask for it OR
374          *  - it failed, we know this by tx & rx being off
375          */
376         if (hw->fc.disable_fc_autoneg ||
377             (hw->fc.current_mode == ixgbe_fc_none))
378                 pause->autoneg = 0;
379         else
380                 pause->autoneg = 1;
381
382         if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
383                 pause->rx_pause = 1;
384         } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
385                 pause->tx_pause = 1;
386         } else if (hw->fc.current_mode == ixgbe_fc_full) {
387                 pause->rx_pause = 1;
388                 pause->tx_pause = 1;
389 #ifdef CONFIG_DCB
390         } else if (hw->fc.current_mode == ixgbe_fc_pfc) {
391                 pause->rx_pause = 0;
392                 pause->tx_pause = 0;
393 #endif
394         }
395 }
396
397 static int ixgbe_set_pauseparam(struct net_device *netdev,
398                                 struct ethtool_pauseparam *pause)
399 {
400         struct ixgbe_adapter *adapter = netdev_priv(netdev);
401         struct ixgbe_hw *hw = &adapter->hw;
402         struct ixgbe_fc_info fc;
403
404 #ifdef CONFIG_DCB
405         if (adapter->dcb_cfg.pfc_mode_enable ||
406                 ((hw->mac.type == ixgbe_mac_82598EB) &&
407                 (adapter->flags & IXGBE_FLAG_DCB_ENABLED)))
408                 return -EINVAL;
409
410 #endif
411         fc = hw->fc;
412
413         if (pause->autoneg != AUTONEG_ENABLE)
414                 fc.disable_fc_autoneg = true;
415         else
416                 fc.disable_fc_autoneg = false;
417
418         if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
419                 fc.requested_mode = ixgbe_fc_full;
420         else if (pause->rx_pause && !pause->tx_pause)
421                 fc.requested_mode = ixgbe_fc_rx_pause;
422         else if (!pause->rx_pause && pause->tx_pause)
423                 fc.requested_mode = ixgbe_fc_tx_pause;
424         else if (!pause->rx_pause && !pause->tx_pause)
425                 fc.requested_mode = ixgbe_fc_none;
426         else
427                 return -EINVAL;
428
429 #ifdef CONFIG_DCB
430         adapter->last_lfc_mode = fc.requested_mode;
431 #endif
432
433         /* if the thing changed then we'll update and use new autoneg */
434         if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
435                 hw->fc = fc;
436                 if (netif_running(netdev))
437                         ixgbe_reinit_locked(adapter);
438                 else
439                         ixgbe_reset(adapter);
440         }
441
442         return 0;
443 }
444
445 static void ixgbe_do_reset(struct net_device *netdev)
446 {
447         struct ixgbe_adapter *adapter = netdev_priv(netdev);
448
449         if (netif_running(netdev))
450                 ixgbe_reinit_locked(adapter);
451         else
452                 ixgbe_reset(adapter);
453 }
454
455 static u32 ixgbe_get_rx_csum(struct net_device *netdev)
456 {
457         struct ixgbe_adapter *adapter = netdev_priv(netdev);
458         return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
459 }
460
461 static void ixgbe_set_rsc(struct ixgbe_adapter *adapter)
462 {
463         int i;
464
465         for (i = 0; i < adapter->num_rx_queues; i++) {
466                 struct ixgbe_ring *ring = adapter->rx_ring[i];
467                 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
468                         set_ring_rsc_enabled(ring);
469                         ixgbe_configure_rscctl(adapter, ring);
470                 } else {
471                         ixgbe_clear_rscctl(adapter, ring);
472                 }
473         }
474 }
475
476 static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
477 {
478         struct ixgbe_adapter *adapter = netdev_priv(netdev);
479         bool need_reset = false;
480
481         if (data) {
482                 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
483         } else {
484                 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
485
486                 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
487                         adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
488                         netdev->features &= ~NETIF_F_LRO;
489                 }
490
491                 switch (adapter->hw.mac.type) {
492                 case ixgbe_mac_X540:
493                         ixgbe_set_rsc(adapter);
494                         break;
495                 case ixgbe_mac_82599EB:
496                         need_reset = true;
497                         break;
498                 default:
499                         break;
500                 }
501         }
502
503         if (need_reset)
504                 ixgbe_do_reset(netdev);
505
506         return 0;
507 }
508
509 static u32 ixgbe_get_tx_csum(struct net_device *netdev)
510 {
511         return (netdev->features & NETIF_F_IP_CSUM) != 0;
512 }
513
514 static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
515 {
516         struct ixgbe_adapter *adapter = netdev_priv(netdev);
517         u32 feature_list;
518
519         feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
520         switch (adapter->hw.mac.type) {
521         case ixgbe_mac_82599EB:
522         case ixgbe_mac_X540:
523                 feature_list |= NETIF_F_SCTP_CSUM;
524                 break;
525         default:
526                 break;
527         }
528         if (data)
529                 netdev->features |= feature_list;
530         else
531                 netdev->features &= ~feature_list;
532
533         return 0;
534 }
535
536 static int ixgbe_set_tso(struct net_device *netdev, u32 data)
537 {
538         if (data) {
539                 netdev->features |= NETIF_F_TSO;
540                 netdev->features |= NETIF_F_TSO6;
541         } else {
542                 netdev->features &= ~NETIF_F_TSO;
543                 netdev->features &= ~NETIF_F_TSO6;
544         }
545         return 0;
546 }
547
548 static u32 ixgbe_get_msglevel(struct net_device *netdev)
549 {
550         struct ixgbe_adapter *adapter = netdev_priv(netdev);
551         return adapter->msg_enable;
552 }
553
554 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
555 {
556         struct ixgbe_adapter *adapter = netdev_priv(netdev);
557         adapter->msg_enable = data;
558 }
559
560 static int ixgbe_get_regs_len(struct net_device *netdev)
561 {
562 #define IXGBE_REGS_LEN  1128
563         return IXGBE_REGS_LEN * sizeof(u32);
564 }
565
566 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
567
568 static void ixgbe_get_regs(struct net_device *netdev,
569                            struct ethtool_regs *regs, void *p)
570 {
571         struct ixgbe_adapter *adapter = netdev_priv(netdev);
572         struct ixgbe_hw *hw = &adapter->hw;
573         u32 *regs_buff = p;
574         u8 i;
575
576         memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
577
578         regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
579
580         /* General Registers */
581         regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
582         regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
583         regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
584         regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
585         regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
586         regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
587         regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
588         regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
589
590         /* NVM Register */
591         regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
592         regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
593         regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
594         regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
595         regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
596         regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
597         regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
598         regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
599         regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
600         regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
601
602         /* Interrupt */
603         /* don't read EICR because it can clear interrupt causes, instead
604          * read EICS which is a shadow but doesn't clear EICR */
605         regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
606         regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
607         regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
608         regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
609         regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
610         regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
611         regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
612         regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
613         regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
614         regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
615         regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
616         regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
617
618         /* Flow Control */
619         regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
620         regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
621         regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
622         regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
623         regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
624         for (i = 0; i < 8; i++) {
625                 switch (hw->mac.type) {
626                 case ixgbe_mac_82598EB:
627                         regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
628                         regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
629                         break;
630                 case ixgbe_mac_82599EB:
631                         regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
632                         regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
633                         break;
634                 default:
635                         break;
636                 }
637         }
638         regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
639         regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
640
641         /* Receive DMA */
642         for (i = 0; i < 64; i++)
643                 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
644         for (i = 0; i < 64; i++)
645                 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
646         for (i = 0; i < 64; i++)
647                 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
648         for (i = 0; i < 64; i++)
649                 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
650         for (i = 0; i < 64; i++)
651                 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
652         for (i = 0; i < 64; i++)
653                 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
654         for (i = 0; i < 16; i++)
655                 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
656         for (i = 0; i < 16; i++)
657                 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
658         regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
659         for (i = 0; i < 8; i++)
660                 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
661         regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
662         regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
663
664         /* Receive */
665         regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
666         regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
667         for (i = 0; i < 16; i++)
668                 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
669         for (i = 0; i < 16; i++)
670                 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
671         regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
672         regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
673         regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
674         regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
675         regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
676         regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
677         for (i = 0; i < 8; i++)
678                 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
679         for (i = 0; i < 8; i++)
680                 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
681         regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
682
683         /* Transmit */
684         for (i = 0; i < 32; i++)
685                 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
686         for (i = 0; i < 32; i++)
687                 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
688         for (i = 0; i < 32; i++)
689                 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
690         for (i = 0; i < 32; i++)
691                 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
692         for (i = 0; i < 32; i++)
693                 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
694         for (i = 0; i < 32; i++)
695                 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
696         for (i = 0; i < 32; i++)
697                 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
698         for (i = 0; i < 32; i++)
699                 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
700         regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
701         for (i = 0; i < 16; i++)
702                 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
703         regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
704         for (i = 0; i < 8; i++)
705                 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
706         regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
707
708         /* Wake Up */
709         regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
710         regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
711         regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
712         regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
713         regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
714         regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
715         regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
716         regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
717         regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
718
719         /* DCB */
720         regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
721         regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
722         regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
723         regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
724         for (i = 0; i < 8; i++)
725                 regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
726         for (i = 0; i < 8; i++)
727                 regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
728         for (i = 0; i < 8; i++)
729                 regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
730         for (i = 0; i < 8; i++)
731                 regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
732         for (i = 0; i < 8; i++)
733                 regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
734         for (i = 0; i < 8; i++)
735                 regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
736
737         /* Statistics */
738         regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
739         regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
740         regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
741         regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
742         for (i = 0; i < 8; i++)
743                 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
744         regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
745         regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
746         regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
747         regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
748         regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
749         regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
750         regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
751         for (i = 0; i < 8; i++)
752                 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
753         for (i = 0; i < 8; i++)
754                 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
755         for (i = 0; i < 8; i++)
756                 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
757         for (i = 0; i < 8; i++)
758                 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
759         regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
760         regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
761         regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
762         regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
763         regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
764         regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
765         regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
766         regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
767         regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
768         regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
769         regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
770         regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
771         for (i = 0; i < 8; i++)
772                 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
773         regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
774         regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
775         regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
776         regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
777         regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
778         regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
779         regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
780         regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
781         regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
782         regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
783         regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
784         regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
785         regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
786         regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
787         regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
788         regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
789         regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
790         regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
791         regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
792         for (i = 0; i < 16; i++)
793                 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
794         for (i = 0; i < 16; i++)
795                 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
796         for (i = 0; i < 16; i++)
797                 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
798         for (i = 0; i < 16; i++)
799                 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
800
801         /* MAC */
802         regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
803         regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
804         regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
805         regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
806         regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
807         regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
808         regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
809         regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
810         regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
811         regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
812         regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
813         regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
814         regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
815         regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
816         regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
817         regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
818         regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
819         regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
820         regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
821         regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
822         regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
823         regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
824         regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
825         regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
826         regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
827         regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
828         regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
829         regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
830         regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
831         regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
832         regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
833         regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
834         regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
835
836         /* Diagnostic */
837         regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
838         for (i = 0; i < 8; i++)
839                 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
840         regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
841         for (i = 0; i < 4; i++)
842                 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
843         regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
844         regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
845         for (i = 0; i < 8; i++)
846                 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
847         regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
848         for (i = 0; i < 4; i++)
849                 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
850         regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
851         regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
852         regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
853         regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
854         regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
855         regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
856         regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
857         regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
858         regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
859         regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
860         regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
861         for (i = 0; i < 8; i++)
862                 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
863         regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
864         regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
865         regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
866         regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
867         regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
868         regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
869         regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
870         regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
871         regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
872 }
873
874 static int ixgbe_get_eeprom_len(struct net_device *netdev)
875 {
876         struct ixgbe_adapter *adapter = netdev_priv(netdev);
877         return adapter->hw.eeprom.word_size * 2;
878 }
879
880 static int ixgbe_get_eeprom(struct net_device *netdev,
881                             struct ethtool_eeprom *eeprom, u8 *bytes)
882 {
883         struct ixgbe_adapter *adapter = netdev_priv(netdev);
884         struct ixgbe_hw *hw = &adapter->hw;
885         u16 *eeprom_buff;
886         int first_word, last_word, eeprom_len;
887         int ret_val = 0;
888         u16 i;
889
890         if (eeprom->len == 0)
891                 return -EINVAL;
892
893         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
894
895         first_word = eeprom->offset >> 1;
896         last_word = (eeprom->offset + eeprom->len - 1) >> 1;
897         eeprom_len = last_word - first_word + 1;
898
899         eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
900         if (!eeprom_buff)
901                 return -ENOMEM;
902
903         ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
904                                              eeprom_buff);
905
906         /* Device's eeprom is always little-endian, word addressable */
907         for (i = 0; i < eeprom_len; i++)
908                 le16_to_cpus(&eeprom_buff[i]);
909
910         memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
911         kfree(eeprom_buff);
912
913         return ret_val;
914 }
915
916 static void ixgbe_get_drvinfo(struct net_device *netdev,
917                               struct ethtool_drvinfo *drvinfo)
918 {
919         struct ixgbe_adapter *adapter = netdev_priv(netdev);
920         char firmware_version[32];
921
922         strncpy(drvinfo->driver, ixgbe_driver_name,
923                 sizeof(drvinfo->driver) - 1);
924         strncpy(drvinfo->version, ixgbe_driver_version,
925                 sizeof(drvinfo->version) - 1);
926
927         snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
928                  (adapter->eeprom_version & 0xF000) >> 12,
929                  (adapter->eeprom_version & 0x0FF0) >> 4,
930                  adapter->eeprom_version & 0x000F);
931
932         strncpy(drvinfo->fw_version, firmware_version,
933                 sizeof(drvinfo->fw_version));
934         strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
935                 sizeof(drvinfo->bus_info));
936         drvinfo->n_stats = IXGBE_STATS_LEN;
937         drvinfo->testinfo_len = IXGBE_TEST_LEN;
938         drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
939 }
940
941 static void ixgbe_get_ringparam(struct net_device *netdev,
942                                 struct ethtool_ringparam *ring)
943 {
944         struct ixgbe_adapter *adapter = netdev_priv(netdev);
945         struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
946         struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
947
948         ring->rx_max_pending = IXGBE_MAX_RXD;
949         ring->tx_max_pending = IXGBE_MAX_TXD;
950         ring->rx_mini_max_pending = 0;
951         ring->rx_jumbo_max_pending = 0;
952         ring->rx_pending = rx_ring->count;
953         ring->tx_pending = tx_ring->count;
954         ring->rx_mini_pending = 0;
955         ring->rx_jumbo_pending = 0;
956 }
957
958 static int ixgbe_set_ringparam(struct net_device *netdev,
959                                struct ethtool_ringparam *ring)
960 {
961         struct ixgbe_adapter *adapter = netdev_priv(netdev);
962         struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
963         int i, err = 0;
964         u32 new_rx_count, new_tx_count;
965         bool need_update = false;
966
967         if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
968                 return -EINVAL;
969
970         new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD);
971         new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD);
972         new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
973
974         new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD);
975         new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
976         new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
977
978         if ((new_tx_count == adapter->tx_ring[0]->count) &&
979             (new_rx_count == adapter->rx_ring[0]->count)) {
980                 /* nothing to do */
981                 return 0;
982         }
983
984         while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
985                 usleep_range(1000, 2000);
986
987         if (!netif_running(adapter->netdev)) {
988                 for (i = 0; i < adapter->num_tx_queues; i++)
989                         adapter->tx_ring[i]->count = new_tx_count;
990                 for (i = 0; i < adapter->num_rx_queues; i++)
991                         adapter->rx_ring[i]->count = new_rx_count;
992                 adapter->tx_ring_count = new_tx_count;
993                 adapter->rx_ring_count = new_rx_count;
994                 goto clear_reset;
995         }
996
997         temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
998         if (!temp_tx_ring) {
999                 err = -ENOMEM;
1000                 goto clear_reset;
1001         }
1002
1003         if (new_tx_count != adapter->tx_ring_count) {
1004                 for (i = 0; i < adapter->num_tx_queues; i++) {
1005                         memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
1006                                sizeof(struct ixgbe_ring));
1007                         temp_tx_ring[i].count = new_tx_count;
1008                         err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
1009                         if (err) {
1010                                 while (i) {
1011                                         i--;
1012                                         ixgbe_free_tx_resources(&temp_tx_ring[i]);
1013                                 }
1014                                 goto clear_reset;
1015                         }
1016                 }
1017                 need_update = true;
1018         }
1019
1020         temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
1021         if (!temp_rx_ring) {
1022                 err = -ENOMEM;
1023                 goto err_setup;
1024         }
1025
1026         if (new_rx_count != adapter->rx_ring_count) {
1027                 for (i = 0; i < adapter->num_rx_queues; i++) {
1028                         memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
1029                                sizeof(struct ixgbe_ring));
1030                         temp_rx_ring[i].count = new_rx_count;
1031                         err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
1032                         if (err) {
1033                                 while (i) {
1034                                         i--;
1035                                         ixgbe_free_rx_resources(&temp_rx_ring[i]);
1036                                 }
1037                                 goto err_setup;
1038                         }
1039                 }
1040                 need_update = true;
1041         }
1042
1043         /* if rings need to be updated, here's the place to do it in one shot */
1044         if (need_update) {
1045                 ixgbe_down(adapter);
1046
1047                 /* tx */
1048                 if (new_tx_count != adapter->tx_ring_count) {
1049                         for (i = 0; i < adapter->num_tx_queues; i++) {
1050                                 ixgbe_free_tx_resources(adapter->tx_ring[i]);
1051                                 memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
1052                                        sizeof(struct ixgbe_ring));
1053                         }
1054                         adapter->tx_ring_count = new_tx_count;
1055                 }
1056
1057                 /* rx */
1058                 if (new_rx_count != adapter->rx_ring_count) {
1059                         for (i = 0; i < adapter->num_rx_queues; i++) {
1060                                 ixgbe_free_rx_resources(adapter->rx_ring[i]);
1061                                 memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
1062                                        sizeof(struct ixgbe_ring));
1063                         }
1064                         adapter->rx_ring_count = new_rx_count;
1065                 }
1066                 ixgbe_up(adapter);
1067         }
1068
1069         vfree(temp_rx_ring);
1070 err_setup:
1071         vfree(temp_tx_ring);
1072 clear_reset:
1073         clear_bit(__IXGBE_RESETTING, &adapter->state);
1074         return err;
1075 }
1076
1077 static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1078 {
1079         switch (sset) {
1080         case ETH_SS_TEST:
1081                 return IXGBE_TEST_LEN;
1082         case ETH_SS_STATS:
1083                 return IXGBE_STATS_LEN;
1084         default:
1085                 return -EOPNOTSUPP;
1086         }
1087 }
1088
1089 static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1090                                     struct ethtool_stats *stats, u64 *data)
1091 {
1092         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1093         struct rtnl_link_stats64 temp;
1094         const struct rtnl_link_stats64 *net_stats;
1095         unsigned int start;
1096         struct ixgbe_ring *ring;
1097         int i, j;
1098         char *p = NULL;
1099
1100         ixgbe_update_stats(adapter);
1101         net_stats = dev_get_stats(netdev, &temp);
1102         for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1103                 switch (ixgbe_gstrings_stats[i].type) {
1104                 case NETDEV_STATS:
1105                         p = (char *) net_stats +
1106                                         ixgbe_gstrings_stats[i].stat_offset;
1107                         break;
1108                 case IXGBE_STATS:
1109                         p = (char *) adapter +
1110                                         ixgbe_gstrings_stats[i].stat_offset;
1111                         break;
1112                 }
1113
1114                 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1115                            sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1116         }
1117         for (j = 0; j < adapter->num_tx_queues; j++) {
1118                 ring = adapter->tx_ring[j];
1119                 do {
1120                         start = u64_stats_fetch_begin_bh(&ring->syncp);
1121                         data[i]   = ring->stats.packets;
1122                         data[i+1] = ring->stats.bytes;
1123                 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1124                 i += 2;
1125         }
1126         for (j = 0; j < adapter->num_rx_queues; j++) {
1127                 ring = adapter->rx_ring[j];
1128                 do {
1129                         start = u64_stats_fetch_begin_bh(&ring->syncp);
1130                         data[i]   = ring->stats.packets;
1131                         data[i+1] = ring->stats.bytes;
1132                 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1133                 i += 2;
1134         }
1135         if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1136                 for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
1137                         data[i++] = adapter->stats.pxontxc[j];
1138                         data[i++] = adapter->stats.pxofftxc[j];
1139                 }
1140                 for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
1141                         data[i++] = adapter->stats.pxonrxc[j];
1142                         data[i++] = adapter->stats.pxoffrxc[j];
1143                 }
1144         }
1145 }
1146
1147 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1148                               u8 *data)
1149 {
1150         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1151         char *p = (char *)data;
1152         int i;
1153
1154         switch (stringset) {
1155         case ETH_SS_TEST:
1156                 memcpy(data, *ixgbe_gstrings_test,
1157                        IXGBE_TEST_LEN * ETH_GSTRING_LEN);
1158                 break;
1159         case ETH_SS_STATS:
1160                 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1161                         memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1162                                ETH_GSTRING_LEN);
1163                         p += ETH_GSTRING_LEN;
1164                 }
1165                 for (i = 0; i < adapter->num_tx_queues; i++) {
1166                         sprintf(p, "tx_queue_%u_packets", i);
1167                         p += ETH_GSTRING_LEN;
1168                         sprintf(p, "tx_queue_%u_bytes", i);
1169                         p += ETH_GSTRING_LEN;
1170                 }
1171                 for (i = 0; i < adapter->num_rx_queues; i++) {
1172                         sprintf(p, "rx_queue_%u_packets", i);
1173                         p += ETH_GSTRING_LEN;
1174                         sprintf(p, "rx_queue_%u_bytes", i);
1175                         p += ETH_GSTRING_LEN;
1176                 }
1177                 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1178                         for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
1179                                 sprintf(p, "tx_pb_%u_pxon", i);
1180                                 p += ETH_GSTRING_LEN;
1181                                 sprintf(p, "tx_pb_%u_pxoff", i);
1182                                 p += ETH_GSTRING_LEN;
1183                         }
1184                         for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
1185                                 sprintf(p, "rx_pb_%u_pxon", i);
1186                                 p += ETH_GSTRING_LEN;
1187                                 sprintf(p, "rx_pb_%u_pxoff", i);
1188                                 p += ETH_GSTRING_LEN;
1189                         }
1190                 }
1191                 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1192                 break;
1193         }
1194 }
1195
1196 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1197 {
1198         struct ixgbe_hw *hw = &adapter->hw;
1199         bool link_up;
1200         u32 link_speed = 0;
1201         *data = 0;
1202
1203         hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1204         if (link_up)
1205                 return *data;
1206         else
1207                 *data = 1;
1208         return *data;
1209 }
1210
1211 /* ethtool register test data */
1212 struct ixgbe_reg_test {
1213         u16 reg;
1214         u8  array_len;
1215         u8  test_type;
1216         u32 mask;
1217         u32 write;
1218 };
1219
1220 /* In the hardware, registers are laid out either singly, in arrays
1221  * spaced 0x40 bytes apart, or in contiguous tables.  We assume
1222  * most tests take place on arrays or single registers (handled
1223  * as a single-element array) and special-case the tables.
1224  * Table tests are always pattern tests.
1225  *
1226  * We also make provision for some required setup steps by specifying
1227  * registers to be written without any read-back testing.
1228  */
1229
1230 #define PATTERN_TEST    1
1231 #define SET_READ_TEST   2
1232 #define WRITE_NO_TEST   3
1233 #define TABLE32_TEST    4
1234 #define TABLE64_TEST_LO 5
1235 #define TABLE64_TEST_HI 6
1236
1237 /* default 82599 register test */
1238 static const struct ixgbe_reg_test reg_test_82599[] = {
1239         { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1240         { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1241         { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1242         { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1243         { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1244         { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1245         { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1246         { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1247         { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1248         { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1249         { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1250         { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1251         { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1252         { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1253         { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1254         { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1255         { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1256         { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1257         { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1258         { 0, 0, 0, 0 }
1259 };
1260
1261 /* default 82598 register test */
1262 static const struct ixgbe_reg_test reg_test_82598[] = {
1263         { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1264         { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1265         { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1266         { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1267         { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1268         { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1269         { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1270         /* Enable all four RX queues before testing. */
1271         { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1272         /* RDH is read-only for 82598, only test RDT. */
1273         { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1274         { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1275         { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1276         { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1277         { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1278         { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1279         { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1280         { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1281         { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1282         { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1283         { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1284         { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1285         { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1286         { 0, 0, 0, 0 }
1287 };
1288
1289 static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1290                              u32 mask, u32 write)
1291 {
1292         u32 pat, val, before;
1293         static const u32 test_pattern[] = {
1294                 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1295
1296         for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1297                 before = readl(adapter->hw.hw_addr + reg);
1298                 writel((test_pattern[pat] & write),
1299                        (adapter->hw.hw_addr + reg));
1300                 val = readl(adapter->hw.hw_addr + reg);
1301                 if (val != (test_pattern[pat] & write & mask)) {
1302                         e_err(drv, "pattern test reg %04X failed: got "
1303                               "0x%08X expected 0x%08X\n",
1304                               reg, val, (test_pattern[pat] & write & mask));
1305                         *data = reg;
1306                         writel(before, adapter->hw.hw_addr + reg);
1307                         return 1;
1308                 }
1309                 writel(before, adapter->hw.hw_addr + reg);
1310         }
1311         return 0;
1312 }
1313
1314 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1315                               u32 mask, u32 write)
1316 {
1317         u32 val, before;
1318         before = readl(adapter->hw.hw_addr + reg);
1319         writel((write & mask), (adapter->hw.hw_addr + reg));
1320         val = readl(adapter->hw.hw_addr + reg);
1321         if ((write & mask) != (val & mask)) {
1322                 e_err(drv, "set/check reg %04X test failed: got 0x%08X "
1323                       "expected 0x%08X\n", reg, (val & mask), (write & mask));
1324                 *data = reg;
1325                 writel(before, (adapter->hw.hw_addr + reg));
1326                 return 1;
1327         }
1328         writel(before, (adapter->hw.hw_addr + reg));
1329         return 0;
1330 }
1331
1332 #define REG_PATTERN_TEST(reg, mask, write)                                    \
1333         do {                                                                  \
1334                 if (reg_pattern_test(adapter, data, reg, mask, write))        \
1335                         return 1;                                             \
1336         } while (0)                                                           \
1337
1338
1339 #define REG_SET_AND_CHECK(reg, mask, write)                                   \
1340         do {                                                                  \
1341                 if (reg_set_and_check(adapter, data, reg, mask, write))       \
1342                         return 1;                                             \
1343         } while (0)                                                           \
1344
1345 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1346 {
1347         const struct ixgbe_reg_test *test;
1348         u32 value, before, after;
1349         u32 i, toggle;
1350
1351         switch (adapter->hw.mac.type) {
1352         case ixgbe_mac_82598EB:
1353                 toggle = 0x7FFFF3FF;
1354                 test = reg_test_82598;
1355                 break;
1356         case ixgbe_mac_82599EB:
1357         case ixgbe_mac_X540:
1358                 toggle = 0x7FFFF30F;
1359                 test = reg_test_82599;
1360                 break;
1361         default:
1362                 *data = 1;
1363                 return 1;
1364                 break;
1365         }
1366
1367         /*
1368          * Because the status register is such a special case,
1369          * we handle it separately from the rest of the register
1370          * tests.  Some bits are read-only, some toggle, and some
1371          * are writeable on newer MACs.
1372          */
1373         before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
1374         value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
1375         IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
1376         after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
1377         if (value != after) {
1378                 e_err(drv, "failed STATUS register test got: 0x%08X "
1379                       "expected: 0x%08X\n", after, value);
1380                 *data = 1;
1381                 return 1;
1382         }
1383         /* restore previous status */
1384         IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
1385
1386         /*
1387          * Perform the remainder of the register test, looping through
1388          * the test table until we either fail or reach the null entry.
1389          */
1390         while (test->reg) {
1391                 for (i = 0; i < test->array_len; i++) {
1392                         switch (test->test_type) {
1393                         case PATTERN_TEST:
1394                                 REG_PATTERN_TEST(test->reg + (i * 0x40),
1395                                                  test->mask,
1396                                                  test->write);
1397                                 break;
1398                         case SET_READ_TEST:
1399                                 REG_SET_AND_CHECK(test->reg + (i * 0x40),
1400                                                   test->mask,
1401                                                   test->write);
1402                                 break;
1403                         case WRITE_NO_TEST:
1404                                 writel(test->write,
1405                                        (adapter->hw.hw_addr + test->reg)
1406                                        + (i * 0x40));
1407                                 break;
1408                         case TABLE32_TEST:
1409                                 REG_PATTERN_TEST(test->reg + (i * 4),
1410                                                  test->mask,
1411                                                  test->write);
1412                                 break;
1413                         case TABLE64_TEST_LO:
1414                                 REG_PATTERN_TEST(test->reg + (i * 8),
1415                                                  test->mask,
1416                                                  test->write);
1417                                 break;
1418                         case TABLE64_TEST_HI:
1419                                 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1420                                                  test->mask,
1421                                                  test->write);
1422                                 break;
1423                         }
1424                 }
1425                 test++;
1426         }
1427
1428         *data = 0;
1429         return 0;
1430 }
1431
1432 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1433 {
1434         struct ixgbe_hw *hw = &adapter->hw;
1435         if (hw->eeprom.ops.validate_checksum(hw, NULL))
1436                 *data = 1;
1437         else
1438                 *data = 0;
1439         return *data;
1440 }
1441
1442 static irqreturn_t ixgbe_test_intr(int irq, void *data)
1443 {
1444         struct net_device *netdev = (struct net_device *) data;
1445         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1446
1447         adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1448
1449         return IRQ_HANDLED;
1450 }
1451
1452 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1453 {
1454         struct net_device *netdev = adapter->netdev;
1455         u32 mask, i = 0, shared_int = true;
1456         u32 irq = adapter->pdev->irq;
1457
1458         *data = 0;
1459
1460         /* Hook up test interrupt handler just for this test */
1461         if (adapter->msix_entries) {
1462                 /* NOTE: we don't test MSI-X interrupts here, yet */
1463                 return 0;
1464         } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1465                 shared_int = false;
1466                 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1467                                 netdev)) {
1468                         *data = 1;
1469                         return -1;
1470                 }
1471         } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1472                                 netdev->name, netdev)) {
1473                 shared_int = false;
1474         } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1475                                netdev->name, netdev)) {
1476                 *data = 1;
1477                 return -1;
1478         }
1479         e_info(hw, "testing %s interrupt\n", shared_int ?
1480                "shared" : "unshared");
1481
1482         /* Disable all the interrupts */
1483         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1484         usleep_range(10000, 20000);
1485
1486         /* Test each interrupt */
1487         for (; i < 10; i++) {
1488                 /* Interrupt to test */
1489                 mask = 1 << i;
1490
1491                 if (!shared_int) {
1492                         /*
1493                          * Disable the interrupts to be reported in
1494                          * the cause register and then force the same
1495                          * interrupt and see if one gets posted.  If
1496                          * an interrupt was posted to the bus, the
1497                          * test failed.
1498                          */
1499                         adapter->test_icr = 0;
1500                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1501                                         ~mask & 0x00007FFF);
1502                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1503                                         ~mask & 0x00007FFF);
1504                         usleep_range(10000, 20000);
1505
1506                         if (adapter->test_icr & mask) {
1507                                 *data = 3;
1508                                 break;
1509                         }
1510                 }
1511
1512                 /*
1513                  * Enable the interrupt to be reported in the cause
1514                  * register and then force the same interrupt and see
1515                  * if one gets posted.  If an interrupt was not posted
1516                  * to the bus, the test failed.
1517                  */
1518                 adapter->test_icr = 0;
1519                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1520                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1521                 usleep_range(10000, 20000);
1522
1523                 if (!(adapter->test_icr &mask)) {
1524                         *data = 4;
1525                         break;
1526                 }
1527
1528                 if (!shared_int) {
1529                         /*
1530                          * Disable the other interrupts to be reported in
1531                          * the cause register and then force the other
1532                          * interrupts and see if any get posted.  If
1533                          * an interrupt was posted to the bus, the
1534                          * test failed.
1535                          */
1536                         adapter->test_icr = 0;
1537                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1538                                         ~mask & 0x00007FFF);
1539                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1540                                         ~mask & 0x00007FFF);
1541                         usleep_range(10000, 20000);
1542
1543                         if (adapter->test_icr) {
1544                                 *data = 5;
1545                                 break;
1546                         }
1547                 }
1548         }
1549
1550         /* Disable all the interrupts */
1551         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1552         usleep_range(10000, 20000);
1553
1554         /* Unhook test interrupt handler */
1555         free_irq(irq, netdev);
1556
1557         return *data;
1558 }
1559
1560 static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1561 {
1562         struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1563         struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1564         struct ixgbe_hw *hw = &adapter->hw;
1565         u32 reg_ctl;
1566
1567         /* shut down the DMA engines now so they can be reinitialized later */
1568
1569         /* first Rx */
1570         reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1571         reg_ctl &= ~IXGBE_RXCTRL_RXEN;
1572         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
1573         ixgbe_disable_rx_queue(adapter, rx_ring);
1574
1575         /* now Tx */
1576         reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1577         reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1578         IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1579
1580         switch (hw->mac.type) {
1581         case ixgbe_mac_82599EB:
1582         case ixgbe_mac_X540:
1583                 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1584                 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1585                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1586                 break;
1587         default:
1588                 break;
1589         }
1590
1591         ixgbe_reset(adapter);
1592
1593         ixgbe_free_tx_resources(&adapter->test_tx_ring);
1594         ixgbe_free_rx_resources(&adapter->test_rx_ring);
1595 }
1596
1597 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1598 {
1599         struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1600         struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1601         u32 rctl, reg_data;
1602         int ret_val;
1603         int err;
1604
1605         /* Setup Tx descriptor ring and Tx buffers */
1606         tx_ring->count = IXGBE_DEFAULT_TXD;
1607         tx_ring->queue_index = 0;
1608         tx_ring->dev = &adapter->pdev->dev;
1609         tx_ring->netdev = adapter->netdev;
1610         tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1611         tx_ring->numa_node = adapter->node;
1612
1613         err = ixgbe_setup_tx_resources(tx_ring);
1614         if (err)
1615                 return 1;
1616
1617         switch (adapter->hw.mac.type) {
1618         case ixgbe_mac_82599EB:
1619         case ixgbe_mac_X540:
1620                 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1621                 reg_data |= IXGBE_DMATXCTL_TE;
1622                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1623                 break;
1624         default:
1625                 break;
1626         }
1627
1628         ixgbe_configure_tx_ring(adapter, tx_ring);
1629
1630         /* Setup Rx Descriptor ring and Rx buffers */
1631         rx_ring->count = IXGBE_DEFAULT_RXD;
1632         rx_ring->queue_index = 0;
1633         rx_ring->dev = &adapter->pdev->dev;
1634         rx_ring->netdev = adapter->netdev;
1635         rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1636         rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
1637         rx_ring->numa_node = adapter->node;
1638
1639         err = ixgbe_setup_rx_resources(rx_ring);
1640         if (err) {
1641                 ret_val = 4;
1642                 goto err_nomem;
1643         }
1644
1645         rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1646         IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
1647
1648         ixgbe_configure_rx_ring(adapter, rx_ring);
1649
1650         rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
1651         IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1652
1653         return 0;
1654
1655 err_nomem:
1656         ixgbe_free_desc_rings(adapter);
1657         return ret_val;
1658 }
1659
1660 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1661 {
1662         struct ixgbe_hw *hw = &adapter->hw;
1663         u32 reg_data;
1664
1665         /* X540 needs to set the MACC.FLU bit to force link up */
1666         if (adapter->hw.mac.type == ixgbe_mac_X540) {
1667                 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MACC);
1668                 reg_data |= IXGBE_MACC_FLU;
1669                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MACC, reg_data);
1670         }
1671
1672         /* right now we only support MAC loopback in the driver */
1673         reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1674         /* Setup MAC loopback */
1675         reg_data |= IXGBE_HLREG0_LPBK;
1676         IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1677
1678         reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1679         reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1680         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
1681
1682         reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
1683         reg_data &= ~IXGBE_AUTOC_LMS_MASK;
1684         reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
1685         IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
1686         IXGBE_WRITE_FLUSH(&adapter->hw);
1687         usleep_range(10000, 20000);
1688
1689         /* Disable Atlas Tx lanes; re-enabled in reset path */
1690         if (hw->mac.type == ixgbe_mac_82598EB) {
1691                 u8 atlas;
1692
1693                 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1694                 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1695                 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1696
1697                 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1698                 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1699                 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1700
1701                 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1702                 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1703                 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1704
1705                 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1706                 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1707                 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1708         }
1709
1710         return 0;
1711 }
1712
1713 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1714 {
1715         u32 reg_data;
1716
1717         reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1718         reg_data &= ~IXGBE_HLREG0_LPBK;
1719         IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1720 }
1721
1722 static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1723                                       unsigned int frame_size)
1724 {
1725         memset(skb->data, 0xFF, frame_size);
1726         frame_size &= ~1;
1727         memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
1728         memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
1729         memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
1730 }
1731
1732 static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
1733                                     unsigned int frame_size)
1734 {
1735         frame_size &= ~1;
1736         if (*(skb->data + 3) == 0xFF) {
1737                 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1738                     (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
1739                         return 0;
1740                 }
1741         }
1742         return 13;
1743 }
1744
1745 static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1746                                   struct ixgbe_ring *tx_ring,
1747                                   unsigned int size)
1748 {
1749         union ixgbe_adv_rx_desc *rx_desc;
1750         struct ixgbe_rx_buffer *rx_buffer_info;
1751         struct ixgbe_tx_buffer *tx_buffer_info;
1752         const int bufsz = rx_ring->rx_buf_len;
1753         u32 staterr;
1754         u16 rx_ntc, tx_ntc, count = 0;
1755
1756         /* initialize next to clean and descriptor values */
1757         rx_ntc = rx_ring->next_to_clean;
1758         tx_ntc = tx_ring->next_to_clean;
1759         rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
1760         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1761
1762         while (staterr & IXGBE_RXD_STAT_DD) {
1763                 /* check Rx buffer */
1764                 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1765
1766                 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
1767                 dma_unmap_single(rx_ring->dev,
1768                                  rx_buffer_info->dma,
1769                                  bufsz,
1770                                  DMA_FROM_DEVICE);
1771                 rx_buffer_info->dma = 0;
1772
1773                 /* verify contents of skb */
1774                 if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size))
1775                         count++;
1776
1777                 /* unmap buffer on Tx side */
1778                 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1779                 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1780
1781                 /* increment Rx/Tx next to clean counters */
1782                 rx_ntc++;
1783                 if (rx_ntc == rx_ring->count)
1784                         rx_ntc = 0;
1785                 tx_ntc++;
1786                 if (tx_ntc == tx_ring->count)
1787                         tx_ntc = 0;
1788
1789                 /* fetch next descriptor */
1790                 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
1791                 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1792         }
1793
1794         /* re-map buffers to ring, store next to clean values */
1795         ixgbe_alloc_rx_buffers(rx_ring, count);
1796         rx_ring->next_to_clean = rx_ntc;
1797         tx_ring->next_to_clean = tx_ntc;
1798
1799         return count;
1800 }
1801
1802 static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1803 {
1804         struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1805         struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1806         int i, j, lc, good_cnt, ret_val = 0;
1807         unsigned int size = 1024;
1808         netdev_tx_t tx_ret_val;
1809         struct sk_buff *skb;
1810
1811         /* allocate test skb */
1812         skb = alloc_skb(size, GFP_KERNEL);
1813         if (!skb)
1814                 return 11;
1815
1816         /* place data into test skb */
1817         ixgbe_create_lbtest_frame(skb, size);
1818         skb_put(skb, size);
1819
1820         /*
1821          * Calculate the loop count based on the largest descriptor ring
1822          * The idea is to wrap the largest ring a number of times using 64
1823          * send/receive pairs during each loop
1824          */
1825
1826         if (rx_ring->count <= tx_ring->count)
1827                 lc = ((tx_ring->count / 64) * 2) + 1;
1828         else
1829                 lc = ((rx_ring->count / 64) * 2) + 1;
1830
1831         for (j = 0; j <= lc; j++) {
1832                 /* reset count of good packets */
1833                 good_cnt = 0;
1834
1835                 /* place 64 packets on the transmit queue*/
1836                 for (i = 0; i < 64; i++) {
1837                         skb_get(skb);
1838                         tx_ret_val = ixgbe_xmit_frame_ring(skb,
1839                                                            adapter,
1840                                                            tx_ring);
1841                         if (tx_ret_val == NETDEV_TX_OK)
1842                                 good_cnt++;
1843                 }
1844
1845                 if (good_cnt != 64) {
1846                         ret_val = 12;
1847                         break;
1848                 }
1849
1850                 /* allow 200 milliseconds for packets to go from Tx to Rx */
1851                 msleep(200);
1852
1853                 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
1854                 if (good_cnt != 64) {
1855                         ret_val = 13;
1856                         break;
1857                 }
1858         }
1859
1860         /* free the original skb */
1861         kfree_skb(skb);
1862
1863         return ret_val;
1864 }
1865
1866 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
1867 {
1868         *data = ixgbe_setup_desc_rings(adapter);
1869         if (*data)
1870                 goto out;
1871         *data = ixgbe_setup_loopback_test(adapter);
1872         if (*data)
1873                 goto err_loopback;
1874         *data = ixgbe_run_loopback_test(adapter);
1875         ixgbe_loopback_cleanup(adapter);
1876
1877 err_loopback:
1878         ixgbe_free_desc_rings(adapter);
1879 out:
1880         return *data;
1881 }
1882
1883 static void ixgbe_diag_test(struct net_device *netdev,
1884                             struct ethtool_test *eth_test, u64 *data)
1885 {
1886         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1887         bool if_running = netif_running(netdev);
1888
1889         set_bit(__IXGBE_TESTING, &adapter->state);
1890         if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1891                 /* Offline tests */
1892
1893                 e_info(hw, "offline testing starting\n");
1894
1895                 /* Link test performed before hardware reset so autoneg doesn't
1896                  * interfere with test result */
1897                 if (ixgbe_link_test(adapter, &data[4]))
1898                         eth_test->flags |= ETH_TEST_FL_FAILED;
1899
1900                 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
1901                         int i;
1902                         for (i = 0; i < adapter->num_vfs; i++) {
1903                                 if (adapter->vfinfo[i].clear_to_send) {
1904                                         netdev_warn(netdev, "%s",
1905                                                     "offline diagnostic is not "
1906                                                     "supported when VFs are "
1907                                                     "present\n");
1908                                         data[0] = 1;
1909                                         data[1] = 1;
1910                                         data[2] = 1;
1911                                         data[3] = 1;
1912                                         eth_test->flags |= ETH_TEST_FL_FAILED;
1913                                         clear_bit(__IXGBE_TESTING,
1914                                                   &adapter->state);
1915                                         goto skip_ol_tests;
1916                                 }
1917                         }
1918                 }
1919
1920                 if (if_running)
1921                         /* indicate we're in test mode */
1922                         dev_close(netdev);
1923                 else
1924                         ixgbe_reset(adapter);
1925
1926                 e_info(hw, "register testing starting\n");
1927                 if (ixgbe_reg_test(adapter, &data[0]))
1928                         eth_test->flags |= ETH_TEST_FL_FAILED;
1929
1930                 ixgbe_reset(adapter);
1931                 e_info(hw, "eeprom testing starting\n");
1932                 if (ixgbe_eeprom_test(adapter, &data[1]))
1933                         eth_test->flags |= ETH_TEST_FL_FAILED;
1934
1935                 ixgbe_reset(adapter);
1936                 e_info(hw, "interrupt testing starting\n");
1937                 if (ixgbe_intr_test(adapter, &data[2]))
1938                         eth_test->flags |= ETH_TEST_FL_FAILED;
1939
1940                 /* If SRIOV or VMDq is enabled then skip MAC
1941                  * loopback diagnostic. */
1942                 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
1943                                       IXGBE_FLAG_VMDQ_ENABLED)) {
1944                         e_info(hw, "Skip MAC loopback diagnostic in VT "
1945                                "mode\n");
1946                         data[3] = 0;
1947                         goto skip_loopback;
1948                 }
1949
1950                 ixgbe_reset(adapter);
1951                 e_info(hw, "loopback testing starting\n");
1952                 if (ixgbe_loopback_test(adapter, &data[3]))
1953                         eth_test->flags |= ETH_TEST_FL_FAILED;
1954
1955 skip_loopback:
1956                 ixgbe_reset(adapter);
1957
1958                 clear_bit(__IXGBE_TESTING, &adapter->state);
1959                 if (if_running)
1960                         dev_open(netdev);
1961         } else {
1962                 e_info(hw, "online testing starting\n");
1963                 /* Online tests */
1964                 if (ixgbe_link_test(adapter, &data[4]))
1965                         eth_test->flags |= ETH_TEST_FL_FAILED;
1966
1967                 /* Online tests aren't run; pass by default */
1968                 data[0] = 0;
1969                 data[1] = 0;
1970                 data[2] = 0;
1971                 data[3] = 0;
1972
1973                 clear_bit(__IXGBE_TESTING, &adapter->state);
1974         }
1975 skip_ol_tests:
1976         msleep_interruptible(4 * 1000);
1977 }
1978
1979 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
1980                                struct ethtool_wolinfo *wol)
1981 {
1982         struct ixgbe_hw *hw = &adapter->hw;
1983         int retval = 1;
1984
1985         /* WOL not supported except for the following */
1986         switch(hw->device_id) {
1987         case IXGBE_DEV_ID_82599_SFP:
1988                 /* Only this subdevice supports WOL */
1989                 if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) {
1990                         wol->supported = 0;
1991                         break;
1992                 }
1993                 retval = 0;
1994                 break;
1995         case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
1996                 /* All except this subdevice support WOL */
1997                 if (hw->subsystem_device_id ==
1998                     IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
1999                         wol->supported = 0;
2000                         break;
2001                 }
2002                 retval = 0;
2003                 break;
2004         case IXGBE_DEV_ID_82599_KX4:
2005                 retval = 0;
2006                 break;
2007         default:
2008                 wol->supported = 0;
2009         }
2010
2011         return retval;
2012 }
2013
2014 static void ixgbe_get_wol(struct net_device *netdev,
2015                           struct ethtool_wolinfo *wol)
2016 {
2017         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2018
2019         wol->supported = WAKE_UCAST | WAKE_MCAST |
2020                          WAKE_BCAST | WAKE_MAGIC;
2021         wol->wolopts = 0;
2022
2023         if (ixgbe_wol_exclusion(adapter, wol) ||
2024             !device_can_wakeup(&adapter->pdev->dev))
2025                 return;
2026
2027         if (adapter->wol & IXGBE_WUFC_EX)
2028                 wol->wolopts |= WAKE_UCAST;
2029         if (adapter->wol & IXGBE_WUFC_MC)
2030                 wol->wolopts |= WAKE_MCAST;
2031         if (adapter->wol & IXGBE_WUFC_BC)
2032                 wol->wolopts |= WAKE_BCAST;
2033         if (adapter->wol & IXGBE_WUFC_MAG)
2034                 wol->wolopts |= WAKE_MAGIC;
2035 }
2036
2037 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2038 {
2039         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2040
2041         if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
2042                 return -EOPNOTSUPP;
2043
2044         if (ixgbe_wol_exclusion(adapter, wol))
2045                 return wol->wolopts ? -EOPNOTSUPP : 0;
2046
2047         adapter->wol = 0;
2048
2049         if (wol->wolopts & WAKE_UCAST)
2050                 adapter->wol |= IXGBE_WUFC_EX;
2051         if (wol->wolopts & WAKE_MCAST)
2052                 adapter->wol |= IXGBE_WUFC_MC;
2053         if (wol->wolopts & WAKE_BCAST)
2054                 adapter->wol |= IXGBE_WUFC_BC;
2055         if (wol->wolopts & WAKE_MAGIC)
2056                 adapter->wol |= IXGBE_WUFC_MAG;
2057
2058         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2059
2060         return 0;
2061 }
2062
2063 static int ixgbe_nway_reset(struct net_device *netdev)
2064 {
2065         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2066
2067         if (netif_running(netdev))
2068                 ixgbe_reinit_locked(adapter);
2069
2070         return 0;
2071 }
2072
2073 static int ixgbe_set_phys_id(struct net_device *netdev,
2074                              enum ethtool_phys_id_state state)
2075 {
2076         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2077         struct ixgbe_hw *hw = &adapter->hw;
2078
2079         switch (state) {
2080         case ETHTOOL_ID_ACTIVE:
2081                 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2082                 return 2;
2083
2084         case ETHTOOL_ID_ON:
2085                 hw->mac.ops.led_on(hw, IXGBE_LED_ON);
2086                 break;
2087
2088         case ETHTOOL_ID_OFF:
2089                 hw->mac.ops.led_off(hw, IXGBE_LED_ON);
2090                 break;
2091
2092         case ETHTOOL_ID_INACTIVE:
2093                 /* Restore LED settings */
2094                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2095                 break;
2096         }
2097
2098         return 0;
2099 }
2100
2101 static int ixgbe_get_coalesce(struct net_device *netdev,
2102                               struct ethtool_coalesce *ec)
2103 {
2104         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2105
2106         ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
2107
2108         /* only valid if in constant ITR mode */
2109         switch (adapter->rx_itr_setting) {
2110         case 0:
2111                 /* throttling disabled */
2112                 ec->rx_coalesce_usecs = 0;
2113                 break;
2114         case 1:
2115                 /* dynamic ITR mode */
2116                 ec->rx_coalesce_usecs = 1;
2117                 break;
2118         default:
2119                 /* fixed interrupt rate mode */
2120                 ec->rx_coalesce_usecs = 1000000/adapter->rx_eitr_param;
2121                 break;
2122         }
2123
2124         /* if in mixed tx/rx queues per vector mode, report only rx settings */
2125         if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2126                 return 0;
2127
2128         /* only valid if in constant ITR mode */
2129         switch (adapter->tx_itr_setting) {
2130         case 0:
2131                 /* throttling disabled */
2132                 ec->tx_coalesce_usecs = 0;
2133                 break;
2134         case 1:
2135                 /* dynamic ITR mode */
2136                 ec->tx_coalesce_usecs = 1;
2137                 break;
2138         default:
2139                 ec->tx_coalesce_usecs = 1000000/adapter->tx_eitr_param;
2140                 break;
2141         }
2142
2143         return 0;
2144 }
2145
2146 /*
2147  * this function must be called before setting the new value of
2148  * rx_itr_setting
2149  */
2150 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
2151                              struct ethtool_coalesce *ec)
2152 {
2153         struct net_device *netdev = adapter->netdev;
2154
2155         if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
2156                 return false;
2157
2158         /* if interrupt rate is too high then disable RSC */
2159         if (ec->rx_coalesce_usecs != 1 &&
2160             ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) {
2161                 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2162                         e_info(probe, "rx-usecs set too low, "
2163                                       "disabling RSC\n");
2164                         adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2165                         return true;
2166                 }
2167         } else {
2168                 /* check the feature flag value and enable RSC if necessary */
2169                 if ((netdev->features & NETIF_F_LRO) &&
2170                     !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2171                         e_info(probe, "rx-usecs set to %d, "
2172                                       "re-enabling RSC\n",
2173                                ec->rx_coalesce_usecs);
2174                         adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2175                         return true;
2176                 }
2177         }
2178         return false;
2179 }
2180
2181 static int ixgbe_set_coalesce(struct net_device *netdev,
2182                               struct ethtool_coalesce *ec)
2183 {
2184         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2185         struct ixgbe_q_vector *q_vector;
2186         int i;
2187         bool need_reset = false;
2188
2189         /* don't accept tx specific changes if we've got mixed RxTx vectors */
2190         if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
2191            && ec->tx_coalesce_usecs)
2192                 return -EINVAL;
2193
2194         if (ec->tx_max_coalesced_frames_irq)
2195                 adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
2196
2197         if (ec->rx_coalesce_usecs > 1) {
2198                 /* check the limits */
2199                 if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
2200                     (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
2201                         return -EINVAL;
2202
2203                 /* check the old value and enable RSC if necessary */
2204                 need_reset = ixgbe_update_rsc(adapter, ec);
2205
2206                 /* store the value in ints/second */
2207                 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
2208
2209                 /* static value of interrupt rate */
2210                 adapter->rx_itr_setting = adapter->rx_eitr_param;
2211                 /* clear the lower bit as its used for dynamic state */
2212                 adapter->rx_itr_setting &= ~1;
2213         } else if (ec->rx_coalesce_usecs == 1) {
2214                 /* check the old value and enable RSC if necessary */
2215                 need_reset = ixgbe_update_rsc(adapter, ec);
2216
2217                 /* 1 means dynamic mode */
2218                 adapter->rx_eitr_param = 20000;
2219                 adapter->rx_itr_setting = 1;
2220         } else {
2221                 /* check the old value and enable RSC if necessary */
2222                 need_reset = ixgbe_update_rsc(adapter, ec);
2223                 /*
2224                  * any other value means disable eitr, which is best
2225                  * served by setting the interrupt rate very high
2226                  */
2227                 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
2228                 adapter->rx_itr_setting = 0;
2229         }
2230
2231         if (ec->tx_coalesce_usecs > 1) {
2232                 /*
2233                  * don't have to worry about max_int as above because
2234                  * tx vectors don't do hardware RSC (an rx function)
2235                  */
2236                 /* check the limits */
2237                 if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
2238                     (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
2239                         return -EINVAL;
2240
2241                 /* store the value in ints/second */
2242                 adapter->tx_eitr_param = 1000000/ec->tx_coalesce_usecs;
2243
2244                 /* static value of interrupt rate */
2245                 adapter->tx_itr_setting = adapter->tx_eitr_param;
2246
2247                 /* clear the lower bit as its used for dynamic state */
2248                 adapter->tx_itr_setting &= ~1;
2249         } else if (ec->tx_coalesce_usecs == 1) {
2250                 /* 1 means dynamic mode */
2251                 adapter->tx_eitr_param = 10000;
2252                 adapter->tx_itr_setting = 1;
2253         } else {
2254                 adapter->tx_eitr_param = IXGBE_MAX_INT_RATE;
2255                 adapter->tx_itr_setting = 0;
2256         }
2257
2258         /* MSI/MSIx Interrupt Mode */
2259         if (adapter->flags &
2260             (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) {
2261                 int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2262                 for (i = 0; i < num_vectors; i++) {
2263                         q_vector = adapter->q_vector[i];
2264                         if (q_vector->tx.count && !q_vector->rx.count)
2265                                 /* tx only */
2266                                 q_vector->eitr = adapter->tx_eitr_param;
2267                         else
2268                                 /* rx only or mixed */
2269                                 q_vector->eitr = adapter->rx_eitr_param;
2270                         q_vector->tx.work_limit = adapter->tx_work_limit;
2271                         ixgbe_write_eitr(q_vector);
2272                 }
2273         /* Legacy Interrupt Mode */
2274         } else {
2275                 q_vector = adapter->q_vector[0];
2276                 q_vector->eitr = adapter->rx_eitr_param;
2277                 q_vector->tx.work_limit = adapter->tx_work_limit;
2278                 ixgbe_write_eitr(q_vector);
2279         }
2280
2281         /*
2282          * do reset here at the end to make sure EITR==0 case is handled
2283          * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2284          * also locks in RSC enable/disable which requires reset
2285          */
2286         if (need_reset)
2287                 ixgbe_do_reset(netdev);
2288
2289         return 0;
2290 }
2291
2292 static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2293 {
2294         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2295         bool need_reset = false;
2296         int rc;
2297
2298 #ifdef CONFIG_IXGBE_DCB
2299         if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
2300             !(data & ETH_FLAG_RXVLAN))
2301                 return -EINVAL;
2302 #endif
2303
2304         need_reset = (data & ETH_FLAG_RXVLAN) !=
2305                      (netdev->features & NETIF_F_HW_VLAN_RX);
2306
2307         if ((data & ETH_FLAG_RXHASH) &&
2308             !(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
2309                 return -EOPNOTSUPP;
2310
2311         rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE |
2312                                   ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
2313                                   ETH_FLAG_RXHASH);
2314         if (rc)
2315                 return rc;
2316
2317         /* if state changes we need to update adapter->flags and reset */
2318         if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
2319             (!!(data & ETH_FLAG_LRO) !=
2320              !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
2321                 if ((data & ETH_FLAG_LRO) &&
2322                     (!adapter->rx_itr_setting ||
2323                      (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) {
2324                         e_info(probe, "rx-usecs set too low, "
2325                                       "not enabling RSC.\n");
2326                 } else {
2327                         adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
2328                         switch (adapter->hw.mac.type) {
2329                         case ixgbe_mac_X540:
2330                                 ixgbe_set_rsc(adapter);
2331                                 break;
2332                         case ixgbe_mac_82599EB:
2333                                 need_reset = true;
2334                                 break;
2335                         default:
2336                                 break;
2337                         }
2338                 }
2339         }
2340
2341         /*
2342          * Check if Flow Director n-tuple support was enabled or disabled.  If
2343          * the state changed, we need to reset.
2344          */
2345         if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
2346                 /* turn off ATR, enable perfect filters and reset */
2347                 if (data & ETH_FLAG_NTUPLE) {
2348                         adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
2349                         adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
2350                         need_reset = true;
2351                 }
2352         } else if (!(data & ETH_FLAG_NTUPLE)) {
2353                 /* turn off Flow Director, set ATR and reset */
2354                 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
2355                 if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
2356                     !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
2357                         adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
2358                 need_reset = true;
2359         }
2360
2361         if (need_reset)
2362                 ixgbe_do_reset(netdev);
2363
2364         return 0;
2365 }
2366
2367 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2368                                         struct ethtool_rxnfc *cmd)
2369 {
2370         union ixgbe_atr_input *mask = &adapter->fdir_mask;
2371         struct ethtool_rx_flow_spec *fsp =
2372                 (struct ethtool_rx_flow_spec *)&cmd->fs;
2373         struct hlist_node *node, *node2;
2374         struct ixgbe_fdir_filter *rule = NULL;
2375
2376         /* report total rule count */
2377         cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2378
2379         hlist_for_each_entry_safe(rule, node, node2,
2380                                   &adapter->fdir_filter_list, fdir_node) {
2381                 if (fsp->location <= rule->sw_idx)
2382                         break;
2383         }
2384
2385         if (!rule || fsp->location != rule->sw_idx)
2386                 return -EINVAL;
2387
2388         /* fill out the flow spec entry */
2389
2390         /* set flow type field */
2391         switch (rule->filter.formatted.flow_type) {
2392         case IXGBE_ATR_FLOW_TYPE_TCPV4:
2393                 fsp->flow_type = TCP_V4_FLOW;
2394                 break;
2395         case IXGBE_ATR_FLOW_TYPE_UDPV4:
2396                 fsp->flow_type = UDP_V4_FLOW;
2397                 break;
2398         case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2399                 fsp->flow_type = SCTP_V4_FLOW;
2400                 break;
2401         case IXGBE_ATR_FLOW_TYPE_IPV4:
2402                 fsp->flow_type = IP_USER_FLOW;
2403                 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2404                 fsp->h_u.usr_ip4_spec.proto = 0;
2405                 fsp->m_u.usr_ip4_spec.proto = 0;
2406                 break;
2407         default:
2408                 return -EINVAL;
2409         }
2410
2411         fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2412         fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2413         fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2414         fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2415         fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2416         fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2417         fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2418         fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2419         fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2420         fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2421         fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2422         fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2423         fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2424         fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2425         fsp->flow_type |= FLOW_EXT;
2426
2427         /* record action */
2428         if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2429                 fsp->ring_cookie = RX_CLS_FLOW_DISC;
2430         else
2431                 fsp->ring_cookie = rule->action;
2432
2433         return 0;
2434 }
2435
2436 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2437                                       struct ethtool_rxnfc *cmd,
2438                                       u32 *rule_locs)
2439 {
2440         struct hlist_node *node, *node2;
2441         struct ixgbe_fdir_filter *rule;
2442         int cnt = 0;
2443
2444         /* report total rule count */
2445         cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2446
2447         hlist_for_each_entry_safe(rule, node, node2,
2448                                   &adapter->fdir_filter_list, fdir_node) {
2449                 if (cnt == cmd->rule_cnt)
2450                         return -EMSGSIZE;
2451                 rule_locs[cnt] = rule->sw_idx;
2452                 cnt++;
2453         }
2454
2455         return 0;
2456 }
2457
2458 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2459                            void *rule_locs)
2460 {
2461         struct ixgbe_adapter *adapter = netdev_priv(dev);
2462         int ret = -EOPNOTSUPP;
2463
2464         switch (cmd->cmd) {
2465         case ETHTOOL_GRXRINGS:
2466                 cmd->data = adapter->num_rx_queues;
2467                 ret = 0;
2468                 break;
2469         case ETHTOOL_GRXCLSRLCNT:
2470                 cmd->rule_cnt = adapter->fdir_filter_count;
2471                 ret = 0;
2472                 break;
2473         case ETHTOOL_GRXCLSRULE:
2474                 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2475                 break;
2476         case ETHTOOL_GRXCLSRLALL:
2477                 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd,
2478                                                  (u32 *)rule_locs);
2479                 break;
2480         default:
2481                 break;
2482         }
2483
2484         return ret;
2485 }
2486
2487 static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2488                                            struct ixgbe_fdir_filter *input,
2489                                            u16 sw_idx)
2490 {
2491         struct ixgbe_hw *hw = &adapter->hw;
2492         struct hlist_node *node, *node2, *parent;
2493         struct ixgbe_fdir_filter *rule;
2494         int err = -EINVAL;
2495
2496         parent = NULL;
2497         rule = NULL;
2498
2499         hlist_for_each_entry_safe(rule, node, node2,
2500                                   &adapter->fdir_filter_list, fdir_node) {
2501                 /* hash found, or no matching entry */
2502                 if (rule->sw_idx >= sw_idx)
2503                         break;
2504                 parent = node;
2505         }
2506
2507         /* if there is an old rule occupying our place remove it */
2508         if (rule && (rule->sw_idx == sw_idx)) {
2509                 if (!input || (rule->filter.formatted.bkt_hash !=
2510                                input->filter.formatted.bkt_hash)) {
2511                         err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2512                                                                 &rule->filter,
2513                                                                 sw_idx);
2514                 }
2515
2516                 hlist_del(&rule->fdir_node);
2517                 kfree(rule);
2518                 adapter->fdir_filter_count--;
2519         }
2520
2521         /*
2522          * If no input this was a delete, err should be 0 if a rule was
2523          * successfully found and removed from the list else -EINVAL
2524          */
2525         if (!input)
2526                 return err;
2527
2528         /* initialize node and set software index */
2529         INIT_HLIST_NODE(&input->fdir_node);
2530
2531         /* add filter to the list */
2532         if (parent)
2533                 hlist_add_after(parent, &input->fdir_node);
2534         else
2535                 hlist_add_head(&input->fdir_node,
2536                                &adapter->fdir_filter_list);
2537
2538         /* update counts */
2539         adapter->fdir_filter_count++;
2540
2541         return 0;
2542 }
2543
2544 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2545                                        u8 *flow_type)
2546 {
2547         switch (fsp->flow_type & ~FLOW_EXT) {
2548         case TCP_V4_FLOW:
2549                 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2550                 break;
2551         case UDP_V4_FLOW:
2552                 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2553                 break;
2554         case SCTP_V4_FLOW:
2555                 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2556                 break;
2557         case IP_USER_FLOW:
2558                 switch (fsp->h_u.usr_ip4_spec.proto) {
2559                 case IPPROTO_TCP:
2560                         *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2561                         break;
2562                 case IPPROTO_UDP:
2563                         *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2564                         break;
2565                 case IPPROTO_SCTP:
2566                         *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2567                         break;
2568                 case 0:
2569                         if (!fsp->m_u.usr_ip4_spec.proto) {
2570                                 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2571                                 break;
2572                         }
2573                 default:
2574                         return 0;
2575                 }
2576                 break;
2577         default:
2578                 return 0;
2579         }
2580
2581         return 1;
2582 }
2583
2584 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2585                                         struct ethtool_rxnfc *cmd)
2586 {
2587         struct ethtool_rx_flow_spec *fsp =
2588                 (struct ethtool_rx_flow_spec *)&cmd->fs;
2589         struct ixgbe_hw *hw = &adapter->hw;
2590         struct ixgbe_fdir_filter *input;
2591         union ixgbe_atr_input mask;
2592         int err;
2593
2594         if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2595                 return -EOPNOTSUPP;
2596
2597         /*
2598          * Don't allow programming if the action is a queue greater than
2599          * the number of online Rx queues.
2600          */
2601         if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
2602             (fsp->ring_cookie >= adapter->num_rx_queues))
2603                 return -EINVAL;
2604
2605         /* Don't allow indexes to exist outside of available space */
2606         if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2607                 e_err(drv, "Location out of range\n");
2608                 return -EINVAL;
2609         }
2610
2611         input = kzalloc(sizeof(*input), GFP_ATOMIC);
2612         if (!input)
2613                 return -ENOMEM;
2614
2615         memset(&mask, 0, sizeof(union ixgbe_atr_input));
2616
2617         /* set SW index */
2618         input->sw_idx = fsp->location;
2619
2620         /* record flow type */
2621         if (!ixgbe_flowspec_to_flow_type(fsp,
2622                                          &input->filter.formatted.flow_type)) {
2623                 e_err(drv, "Unrecognized flow type\n");
2624                 goto err_out;
2625         }
2626
2627         mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2628                                    IXGBE_ATR_L4TYPE_MASK;
2629
2630         if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2631                 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2632
2633         /* Copy input into formatted structures */
2634         input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2635         mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2636         input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2637         mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2638         input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2639         mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2640         input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2641         mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2642
2643         if (fsp->flow_type & FLOW_EXT) {
2644                 input->filter.formatted.vm_pool =
2645                                 (unsigned char)ntohl(fsp->h_ext.data[1]);
2646                 mask.formatted.vm_pool =
2647                                 (unsigned char)ntohl(fsp->m_ext.data[1]);
2648                 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2649                 mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2650                 input->filter.formatted.flex_bytes =
2651                                                 fsp->h_ext.vlan_etype;
2652                 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2653         }
2654
2655         /* determine if we need to drop or route the packet */
2656         if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2657                 input->action = IXGBE_FDIR_DROP_QUEUE;
2658         else
2659                 input->action = fsp->ring_cookie;
2660
2661         spin_lock(&adapter->fdir_perfect_lock);
2662
2663         if (hlist_empty(&adapter->fdir_filter_list)) {
2664                 /* save mask and program input mask into HW */
2665                 memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2666                 err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2667                 if (err) {
2668                         e_err(drv, "Error writing mask\n");
2669                         goto err_out_w_lock;
2670                 }
2671         } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2672                 e_err(drv, "Only one mask supported per port\n");
2673                 goto err_out_w_lock;
2674         }
2675
2676         /* apply mask and compute/store hash */
2677         ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2678
2679         /* program filters to filter memory */
2680         err = ixgbe_fdir_write_perfect_filter_82599(hw,
2681                                 &input->filter, input->sw_idx,
2682                                 (input->action == IXGBE_FDIR_DROP_QUEUE) ?
2683                                 IXGBE_FDIR_DROP_QUEUE :
2684                                 adapter->rx_ring[input->action]->reg_idx);
2685         if (err)
2686                 goto err_out_w_lock;
2687
2688         ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2689
2690         spin_unlock(&adapter->fdir_perfect_lock);
2691
2692         return err;
2693 err_out_w_lock:
2694         spin_unlock(&adapter->fdir_perfect_lock);
2695 err_out:
2696         kfree(input);
2697         return -EINVAL;
2698 }
2699
2700 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2701                                         struct ethtool_rxnfc *cmd)
2702 {
2703         struct ethtool_rx_flow_spec *fsp =
2704                 (struct ethtool_rx_flow_spec *)&cmd->fs;
2705         int err;
2706
2707         spin_lock(&adapter->fdir_perfect_lock);
2708         err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2709         spin_unlock(&adapter->fdir_perfect_lock);
2710
2711         return err;
2712 }
2713
2714 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2715 {
2716         struct ixgbe_adapter *adapter = netdev_priv(dev);
2717         int ret = -EOPNOTSUPP;
2718
2719         switch (cmd->cmd) {
2720         case ETHTOOL_SRXCLSRLINS:
2721                 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
2722                 break;
2723         case ETHTOOL_SRXCLSRLDEL:
2724                 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
2725                 break;
2726         default:
2727                 break;
2728         }
2729
2730         return ret;
2731 }
2732
2733 static const struct ethtool_ops ixgbe_ethtool_ops = {
2734         .get_settings           = ixgbe_get_settings,
2735         .set_settings           = ixgbe_set_settings,
2736         .get_drvinfo            = ixgbe_get_drvinfo,
2737         .get_regs_len           = ixgbe_get_regs_len,
2738         .get_regs               = ixgbe_get_regs,
2739         .get_wol                = ixgbe_get_wol,
2740         .set_wol                = ixgbe_set_wol,
2741         .nway_reset             = ixgbe_nway_reset,
2742         .get_link               = ethtool_op_get_link,
2743         .get_eeprom_len         = ixgbe_get_eeprom_len,
2744         .get_eeprom             = ixgbe_get_eeprom,
2745         .get_ringparam          = ixgbe_get_ringparam,
2746         .set_ringparam          = ixgbe_set_ringparam,
2747         .get_pauseparam         = ixgbe_get_pauseparam,
2748         .set_pauseparam         = ixgbe_set_pauseparam,
2749         .get_rx_csum            = ixgbe_get_rx_csum,
2750         .set_rx_csum            = ixgbe_set_rx_csum,
2751         .get_tx_csum            = ixgbe_get_tx_csum,
2752         .set_tx_csum            = ixgbe_set_tx_csum,
2753         .get_sg                 = ethtool_op_get_sg,
2754         .set_sg                 = ethtool_op_set_sg,
2755         .get_msglevel           = ixgbe_get_msglevel,
2756         .set_msglevel           = ixgbe_set_msglevel,
2757         .get_tso                = ethtool_op_get_tso,
2758         .set_tso                = ixgbe_set_tso,
2759         .self_test              = ixgbe_diag_test,
2760         .get_strings            = ixgbe_get_strings,
2761         .set_phys_id            = ixgbe_set_phys_id,
2762         .get_sset_count         = ixgbe_get_sset_count,
2763         .get_ethtool_stats      = ixgbe_get_ethtool_stats,
2764         .get_coalesce           = ixgbe_get_coalesce,
2765         .set_coalesce           = ixgbe_set_coalesce,
2766         .get_flags              = ethtool_op_get_flags,
2767         .set_flags              = ixgbe_set_flags,
2768         .get_rxnfc              = ixgbe_get_rxnfc,
2769         .set_rxnfc              = ixgbe_set_rxnfc,
2770 };
2771
2772 void ixgbe_set_ethtool_ops(struct net_device *netdev)
2773 {
2774         SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
2775 }