4f9d33f3cca1899a9fa745af4cb49c734383031f
[pandora-kernel.git] / drivers / net / sfc / falcon.c
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2006-2009 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include <linux/i2c.h>
17 #include <linux/mii.h>
18 #include <linux/slab.h>
19 #include "net_driver.h"
20 #include "bitfield.h"
21 #include "efx.h"
22 #include "mac.h"
23 #include "spi.h"
24 #include "nic.h"
25 #include "regs.h"
26 #include "io.h"
27 #include "mdio_10g.h"
28 #include "phy.h"
29 #include "workarounds.h"
30
31 /* Hardware control for SFC4000 (aka Falcon). */
32
33 static const unsigned int
34 /* "Large" EEPROM device: Atmel AT25640 or similar
35  * 8 KB, 16-bit address, 32 B write block */
36 large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
37                      | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
38                      | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
39 /* Default flash device: Atmel AT25F1024
40  * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
41 default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
42                       | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
43                       | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
44                       | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
45                       | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
46
47 /**************************************************************************
48  *
49  * I2C bus - this is a bit-bashing interface using GPIO pins
50  * Note that it uses the output enables to tristate the outputs
51  * SDA is the data pin and SCL is the clock
52  *
53  **************************************************************************
54  */
55 static void falcon_setsda(void *data, int state)
56 {
57         struct efx_nic *efx = (struct efx_nic *)data;
58         efx_oword_t reg;
59
60         efx_reado(efx, &reg, FR_AB_GPIO_CTL);
61         EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
62         efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
63 }
64
65 static void falcon_setscl(void *data, int state)
66 {
67         struct efx_nic *efx = (struct efx_nic *)data;
68         efx_oword_t reg;
69
70         efx_reado(efx, &reg, FR_AB_GPIO_CTL);
71         EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
72         efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
73 }
74
75 static int falcon_getsda(void *data)
76 {
77         struct efx_nic *efx = (struct efx_nic *)data;
78         efx_oword_t reg;
79
80         efx_reado(efx, &reg, FR_AB_GPIO_CTL);
81         return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
82 }
83
84 static int falcon_getscl(void *data)
85 {
86         struct efx_nic *efx = (struct efx_nic *)data;
87         efx_oword_t reg;
88
89         efx_reado(efx, &reg, FR_AB_GPIO_CTL);
90         return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
91 }
92
93 static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
94         .setsda         = falcon_setsda,
95         .setscl         = falcon_setscl,
96         .getsda         = falcon_getsda,
97         .getscl         = falcon_getscl,
98         .udelay         = 5,
99         /* Wait up to 50 ms for slave to let us pull SCL high */
100         .timeout        = DIV_ROUND_UP(HZ, 20),
101 };
102
103 static void falcon_push_irq_moderation(struct efx_channel *channel)
104 {
105         efx_dword_t timer_cmd;
106         struct efx_nic *efx = channel->efx;
107
108         /* Set timer register */
109         if (channel->irq_moderation) {
110                 EFX_POPULATE_DWORD_2(timer_cmd,
111                                      FRF_AB_TC_TIMER_MODE,
112                                      FFE_BB_TIMER_MODE_INT_HLDOFF,
113                                      FRF_AB_TC_TIMER_VAL,
114                                      channel->irq_moderation - 1);
115         } else {
116                 EFX_POPULATE_DWORD_2(timer_cmd,
117                                      FRF_AB_TC_TIMER_MODE,
118                                      FFE_BB_TIMER_MODE_DIS,
119                                      FRF_AB_TC_TIMER_VAL, 0);
120         }
121         BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
122         efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
123                                channel->channel);
124 }
125
126 static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
127
128 static void falcon_prepare_flush(struct efx_nic *efx)
129 {
130         falcon_deconfigure_mac_wrapper(efx);
131
132         /* Wait for the tx and rx fifo's to get to the next packet boundary
133          * (~1ms without back-pressure), then to drain the remainder of the
134          * fifo's at data path speeds (negligible), with a healthy margin. */
135         msleep(10);
136 }
137
138 /* Acknowledge a legacy interrupt from Falcon
139  *
140  * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
141  *
142  * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
143  * BIU. Interrupt acknowledge is read sensitive so must write instead
144  * (then read to ensure the BIU collector is flushed)
145  *
146  * NB most hardware supports MSI interrupts
147  */
148 inline void falcon_irq_ack_a1(struct efx_nic *efx)
149 {
150         efx_dword_t reg;
151
152         EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
153         efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
154         efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
155 }
156
157
158 irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
159 {
160         struct efx_nic *efx = dev_id;
161         efx_oword_t *int_ker = efx->irq_status.addr;
162         struct efx_channel *channel;
163         int syserr;
164         int queues;
165
166         /* Check to see if this is our interrupt.  If it isn't, we
167          * exit without having touched the hardware.
168          */
169         if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
170                 netif_vdbg(efx, intr, efx->net_dev,
171                            "IRQ %d on CPU %d not for me\n", irq,
172                            raw_smp_processor_id());
173                 return IRQ_NONE;
174         }
175         efx->last_irq_cpu = raw_smp_processor_id();
176         netif_vdbg(efx, intr, efx->net_dev,
177                    "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
178                    irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
179
180         /* Determine interrupting queues, clear interrupt status
181          * register and acknowledge the device interrupt.
182          */
183         BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
184         queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
185
186         /* Check to see if we have a serious error condition */
187         if (queues & (1U << efx->fatal_irq_level)) {
188                 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
189                 if (unlikely(syserr))
190                         return efx_nic_fatal_interrupt(efx);
191         }
192
193         EFX_ZERO_OWORD(*int_ker);
194         wmb(); /* Ensure the vector is cleared before interrupt ack */
195         falcon_irq_ack_a1(efx);
196
197         /* Schedule processing of any interrupting queues */
198         channel = &efx->channel[0];
199         while (queues) {
200                 if (queues & 0x01)
201                         efx_schedule_channel(channel);
202                 channel++;
203                 queues >>= 1;
204         }
205
206         return IRQ_HANDLED;
207 }
208 /**************************************************************************
209  *
210  * EEPROM/flash
211  *
212  **************************************************************************
213  */
214
215 #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
216
217 static int falcon_spi_poll(struct efx_nic *efx)
218 {
219         efx_oword_t reg;
220         efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
221         return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
222 }
223
224 /* Wait for SPI command completion */
225 static int falcon_spi_wait(struct efx_nic *efx)
226 {
227         /* Most commands will finish quickly, so we start polling at
228          * very short intervals.  Sometimes the command may have to
229          * wait for VPD or expansion ROM access outside of our
230          * control, so we allow up to 100 ms. */
231         unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
232         int i;
233
234         for (i = 0; i < 10; i++) {
235                 if (!falcon_spi_poll(efx))
236                         return 0;
237                 udelay(10);
238         }
239
240         for (;;) {
241                 if (!falcon_spi_poll(efx))
242                         return 0;
243                 if (time_after_eq(jiffies, timeout)) {
244                         netif_err(efx, hw, efx->net_dev,
245                                   "timed out waiting for SPI\n");
246                         return -ETIMEDOUT;
247                 }
248                 schedule_timeout_uninterruptible(1);
249         }
250 }
251
252 int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
253                    unsigned int command, int address,
254                    const void *in, void *out, size_t len)
255 {
256         bool addressed = (address >= 0);
257         bool reading = (out != NULL);
258         efx_oword_t reg;
259         int rc;
260
261         /* Input validation */
262         if (len > FALCON_SPI_MAX_LEN)
263                 return -EINVAL;
264         BUG_ON(!mutex_is_locked(&efx->spi_lock));
265
266         /* Check that previous command is not still running */
267         rc = falcon_spi_poll(efx);
268         if (rc)
269                 return rc;
270
271         /* Program address register, if we have an address */
272         if (addressed) {
273                 EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
274                 efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
275         }
276
277         /* Program data register, if we have data */
278         if (in != NULL) {
279                 memcpy(&reg, in, len);
280                 efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
281         }
282
283         /* Issue read/write command */
284         EFX_POPULATE_OWORD_7(reg,
285                              FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
286                              FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
287                              FRF_AB_EE_SPI_HCMD_DABCNT, len,
288                              FRF_AB_EE_SPI_HCMD_READ, reading,
289                              FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
290                              FRF_AB_EE_SPI_HCMD_ADBCNT,
291                              (addressed ? spi->addr_len : 0),
292                              FRF_AB_EE_SPI_HCMD_ENC, command);
293         efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
294
295         /* Wait for read/write to complete */
296         rc = falcon_spi_wait(efx);
297         if (rc)
298                 return rc;
299
300         /* Read data */
301         if (out != NULL) {
302                 efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
303                 memcpy(out, &reg, len);
304         }
305
306         return 0;
307 }
308
309 static size_t
310 falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
311 {
312         return min(FALCON_SPI_MAX_LEN,
313                    (spi->block_size - (start & (spi->block_size - 1))));
314 }
315
316 static inline u8
317 efx_spi_munge_command(const struct efx_spi_device *spi,
318                       const u8 command, const unsigned int address)
319 {
320         return command | (((address >> 8) & spi->munge_address) << 3);
321 }
322
323 /* Wait up to 10 ms for buffered write completion */
324 int
325 falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
326 {
327         unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
328         u8 status;
329         int rc;
330
331         for (;;) {
332                 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
333                                     &status, sizeof(status));
334                 if (rc)
335                         return rc;
336                 if (!(status & SPI_STATUS_NRDY))
337                         return 0;
338                 if (time_after_eq(jiffies, timeout)) {
339                         netif_err(efx, hw, efx->net_dev,
340                                   "SPI write timeout on device %d"
341                                   " last status=0x%02x\n",
342                                   spi->device_id, status);
343                         return -ETIMEDOUT;
344                 }
345                 schedule_timeout_uninterruptible(1);
346         }
347 }
348
349 int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
350                     loff_t start, size_t len, size_t *retlen, u8 *buffer)
351 {
352         size_t block_len, pos = 0;
353         unsigned int command;
354         int rc = 0;
355
356         while (pos < len) {
357                 block_len = min(len - pos, FALCON_SPI_MAX_LEN);
358
359                 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
360                 rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
361                                     buffer + pos, block_len);
362                 if (rc)
363                         break;
364                 pos += block_len;
365
366                 /* Avoid locking up the system */
367                 cond_resched();
368                 if (signal_pending(current)) {
369                         rc = -EINTR;
370                         break;
371                 }
372         }
373
374         if (retlen)
375                 *retlen = pos;
376         return rc;
377 }
378
379 int
380 falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
381                  loff_t start, size_t len, size_t *retlen, const u8 *buffer)
382 {
383         u8 verify_buffer[FALCON_SPI_MAX_LEN];
384         size_t block_len, pos = 0;
385         unsigned int command;
386         int rc = 0;
387
388         while (pos < len) {
389                 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
390                 if (rc)
391                         break;
392
393                 block_len = min(len - pos,
394                                 falcon_spi_write_limit(spi, start + pos));
395                 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
396                 rc = falcon_spi_cmd(efx, spi, command, start + pos,
397                                     buffer + pos, NULL, block_len);
398                 if (rc)
399                         break;
400
401                 rc = falcon_spi_wait_write(efx, spi);
402                 if (rc)
403                         break;
404
405                 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
406                 rc = falcon_spi_cmd(efx, spi, command, start + pos,
407                                     NULL, verify_buffer, block_len);
408                 if (memcmp(verify_buffer, buffer + pos, block_len)) {
409                         rc = -EIO;
410                         break;
411                 }
412
413                 pos += block_len;
414
415                 /* Avoid locking up the system */
416                 cond_resched();
417                 if (signal_pending(current)) {
418                         rc = -EINTR;
419                         break;
420                 }
421         }
422
423         if (retlen)
424                 *retlen = pos;
425         return rc;
426 }
427
428 /**************************************************************************
429  *
430  * MAC wrapper
431  *
432  **************************************************************************
433  */
434
435 static void falcon_push_multicast_hash(struct efx_nic *efx)
436 {
437         union efx_multicast_hash *mc_hash = &efx->multicast_hash;
438
439         WARN_ON(!mutex_is_locked(&efx->mac_lock));
440
441         efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
442         efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
443 }
444
445 static void falcon_reset_macs(struct efx_nic *efx)
446 {
447         struct falcon_nic_data *nic_data = efx->nic_data;
448         efx_oword_t reg, mac_ctrl;
449         int count;
450
451         if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
452                 /* It's not safe to use GLB_CTL_REG to reset the
453                  * macs, so instead use the internal MAC resets
454                  */
455                 if (!EFX_IS10G(efx)) {
456                         EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1);
457                         efx_writeo(efx, &reg, FR_AB_GM_CFG1);
458                         udelay(1000);
459
460                         EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0);
461                         efx_writeo(efx, &reg, FR_AB_GM_CFG1);
462                         udelay(1000);
463                         return;
464                 } else {
465                         EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
466                         efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
467
468                         for (count = 0; count < 10000; count++) {
469                                 efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
470                                 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
471                                     0)
472                                         return;
473                                 udelay(10);
474                         }
475
476                         netif_err(efx, hw, efx->net_dev,
477                                   "timed out waiting for XMAC core reset\n");
478                 }
479         }
480
481         /* Mac stats will fail whist the TX fifo is draining */
482         WARN_ON(nic_data->stats_disable_count == 0);
483
484         efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
485         EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
486         efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
487
488         efx_reado(efx, &reg, FR_AB_GLB_CTL);
489         EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
490         EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
491         EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
492         efx_writeo(efx, &reg, FR_AB_GLB_CTL);
493
494         count = 0;
495         while (1) {
496                 efx_reado(efx, &reg, FR_AB_GLB_CTL);
497                 if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
498                     !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
499                     !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
500                         netif_dbg(efx, hw, efx->net_dev,
501                                   "Completed MAC reset after %d loops\n",
502                                   count);
503                         break;
504                 }
505                 if (count > 20) {
506                         netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
507                         break;
508                 }
509                 count++;
510                 udelay(10);
511         }
512
513         /* Ensure the correct MAC is selected before statistics
514          * are re-enabled by the caller */
515         efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
516
517         /* This can run even when the GMAC is selected */
518         falcon_setup_xaui(efx);
519 }
520
521 void falcon_drain_tx_fifo(struct efx_nic *efx)
522 {
523         efx_oword_t reg;
524
525         if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) ||
526             (efx->loopback_mode != LOOPBACK_NONE))
527                 return;
528
529         efx_reado(efx, &reg, FR_AB_MAC_CTRL);
530         /* There is no point in draining more than once */
531         if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
532                 return;
533
534         falcon_reset_macs(efx);
535 }
536
537 static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
538 {
539         efx_oword_t reg;
540
541         if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
542                 return;
543
544         /* Isolate the MAC -> RX */
545         efx_reado(efx, &reg, FR_AZ_RX_CFG);
546         EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
547         efx_writeo(efx, &reg, FR_AZ_RX_CFG);
548
549         /* Isolate TX -> MAC */
550         falcon_drain_tx_fifo(efx);
551 }
552
553 void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
554 {
555         struct efx_link_state *link_state = &efx->link_state;
556         efx_oword_t reg;
557         int link_speed, isolate;
558
559         isolate = (efx->reset_pending != RESET_TYPE_NONE);
560
561         switch (link_state->speed) {
562         case 10000: link_speed = 3; break;
563         case 1000:  link_speed = 2; break;
564         case 100:   link_speed = 1; break;
565         default:    link_speed = 0; break;
566         }
567         /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
568          * as advertised.  Disable to ensure packets are not
569          * indefinitely held and TX queue can be flushed at any point
570          * while the link is down. */
571         EFX_POPULATE_OWORD_5(reg,
572                              FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
573                              FRF_AB_MAC_BCAD_ACPT, 1,
574                              FRF_AB_MAC_UC_PROM, efx->promiscuous,
575                              FRF_AB_MAC_LINK_STATUS, 1, /* always set */
576                              FRF_AB_MAC_SPEED, link_speed);
577         /* On B0, MAC backpressure can be disabled and packets get
578          * discarded. */
579         if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
580                 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
581                                     !link_state->up || isolate);
582         }
583
584         efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
585
586         /* Restore the multicast hash registers. */
587         falcon_push_multicast_hash(efx);
588
589         efx_reado(efx, &reg, FR_AZ_RX_CFG);
590         /* Enable XOFF signal from RX FIFO (we enabled it during NIC
591          * initialisation but it may read back as 0) */
592         EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
593         /* Unisolate the MAC -> RX */
594         if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
595                 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
596         efx_writeo(efx, &reg, FR_AZ_RX_CFG);
597 }
598
599 static void falcon_stats_request(struct efx_nic *efx)
600 {
601         struct falcon_nic_data *nic_data = efx->nic_data;
602         efx_oword_t reg;
603
604         WARN_ON(nic_data->stats_pending);
605         WARN_ON(nic_data->stats_disable_count);
606
607         if (nic_data->stats_dma_done == NULL)
608                 return; /* no mac selected */
609
610         *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
611         nic_data->stats_pending = true;
612         wmb(); /* ensure done flag is clear */
613
614         /* Initiate DMA transfer of stats */
615         EFX_POPULATE_OWORD_2(reg,
616                              FRF_AB_MAC_STAT_DMA_CMD, 1,
617                              FRF_AB_MAC_STAT_DMA_ADR,
618                              efx->stats_buffer.dma_addr);
619         efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
620
621         mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
622 }
623
624 static void falcon_stats_complete(struct efx_nic *efx)
625 {
626         struct falcon_nic_data *nic_data = efx->nic_data;
627
628         if (!nic_data->stats_pending)
629                 return;
630
631         nic_data->stats_pending = 0;
632         if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
633                 rmb(); /* read the done flag before the stats */
634                 efx->mac_op->update_stats(efx);
635         } else {
636                 netif_err(efx, hw, efx->net_dev,
637                           "timed out waiting for statistics\n");
638         }
639 }
640
641 static void falcon_stats_timer_func(unsigned long context)
642 {
643         struct efx_nic *efx = (struct efx_nic *)context;
644         struct falcon_nic_data *nic_data = efx->nic_data;
645
646         spin_lock(&efx->stats_lock);
647
648         falcon_stats_complete(efx);
649         if (nic_data->stats_disable_count == 0)
650                 falcon_stats_request(efx);
651
652         spin_unlock(&efx->stats_lock);
653 }
654
655 static void falcon_switch_mac(struct efx_nic *efx);
656
657 static bool falcon_loopback_link_poll(struct efx_nic *efx)
658 {
659         struct efx_link_state old_state = efx->link_state;
660
661         WARN_ON(!mutex_is_locked(&efx->mac_lock));
662         WARN_ON(!LOOPBACK_INTERNAL(efx));
663
664         efx->link_state.fd = true;
665         efx->link_state.fc = efx->wanted_fc;
666         efx->link_state.up = true;
667
668         if (efx->loopback_mode == LOOPBACK_GMAC)
669                 efx->link_state.speed = 1000;
670         else
671                 efx->link_state.speed = 10000;
672
673         return !efx_link_state_equal(&efx->link_state, &old_state);
674 }
675
676 static int falcon_reconfigure_port(struct efx_nic *efx)
677 {
678         int rc;
679
680         WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0);
681
682         /* Poll the PHY link state *before* reconfiguring it. This means we
683          * will pick up the correct speed (in loopback) to select the correct
684          * MAC.
685          */
686         if (LOOPBACK_INTERNAL(efx))
687                 falcon_loopback_link_poll(efx);
688         else
689                 efx->phy_op->poll(efx);
690
691         falcon_stop_nic_stats(efx);
692         falcon_deconfigure_mac_wrapper(efx);
693
694         falcon_switch_mac(efx);
695
696         efx->phy_op->reconfigure(efx);
697         rc = efx->mac_op->reconfigure(efx);
698         BUG_ON(rc);
699
700         falcon_start_nic_stats(efx);
701
702         /* Synchronise efx->link_state with the kernel */
703         efx_link_status_changed(efx);
704
705         return 0;
706 }
707
708 /**************************************************************************
709  *
710  * PHY access via GMII
711  *
712  **************************************************************************
713  */
714
715 /* Wait for GMII access to complete */
716 static int falcon_gmii_wait(struct efx_nic *efx)
717 {
718         efx_oword_t md_stat;
719         int count;
720
721         /* wait upto 50ms - taken max from datasheet */
722         for (count = 0; count < 5000; count++) {
723                 efx_reado(efx, &md_stat, FR_AB_MD_STAT);
724                 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
725                         if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
726                             EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
727                                 netif_err(efx, hw, efx->net_dev,
728                                           "error from GMII access "
729                                           EFX_OWORD_FMT"\n",
730                                           EFX_OWORD_VAL(md_stat));
731                                 return -EIO;
732                         }
733                         return 0;
734                 }
735                 udelay(10);
736         }
737         netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
738         return -ETIMEDOUT;
739 }
740
741 /* Write an MDIO register of a PHY connected to Falcon. */
742 static int falcon_mdio_write(struct net_device *net_dev,
743                              int prtad, int devad, u16 addr, u16 value)
744 {
745         struct efx_nic *efx = netdev_priv(net_dev);
746         efx_oword_t reg;
747         int rc;
748
749         netif_vdbg(efx, hw, efx->net_dev,
750                    "writing MDIO %d register %d.%d with 0x%04x\n",
751                     prtad, devad, addr, value);
752
753         mutex_lock(&efx->mdio_lock);
754
755         /* Check MDIO not currently being accessed */
756         rc = falcon_gmii_wait(efx);
757         if (rc)
758                 goto out;
759
760         /* Write the address/ID register */
761         EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
762         efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
763
764         EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
765                              FRF_AB_MD_DEV_ADR, devad);
766         efx_writeo(efx, &reg, FR_AB_MD_ID);
767
768         /* Write data */
769         EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
770         efx_writeo(efx, &reg, FR_AB_MD_TXD);
771
772         EFX_POPULATE_OWORD_2(reg,
773                              FRF_AB_MD_WRC, 1,
774                              FRF_AB_MD_GC, 0);
775         efx_writeo(efx, &reg, FR_AB_MD_CS);
776
777         /* Wait for data to be written */
778         rc = falcon_gmii_wait(efx);
779         if (rc) {
780                 /* Abort the write operation */
781                 EFX_POPULATE_OWORD_2(reg,
782                                      FRF_AB_MD_WRC, 0,
783                                      FRF_AB_MD_GC, 1);
784                 efx_writeo(efx, &reg, FR_AB_MD_CS);
785                 udelay(10);
786         }
787
788 out:
789         mutex_unlock(&efx->mdio_lock);
790         return rc;
791 }
792
793 /* Read an MDIO register of a PHY connected to Falcon. */
794 static int falcon_mdio_read(struct net_device *net_dev,
795                             int prtad, int devad, u16 addr)
796 {
797         struct efx_nic *efx = netdev_priv(net_dev);
798         efx_oword_t reg;
799         int rc;
800
801         mutex_lock(&efx->mdio_lock);
802
803         /* Check MDIO not currently being accessed */
804         rc = falcon_gmii_wait(efx);
805         if (rc)
806                 goto out;
807
808         EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
809         efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
810
811         EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
812                              FRF_AB_MD_DEV_ADR, devad);
813         efx_writeo(efx, &reg, FR_AB_MD_ID);
814
815         /* Request data to be read */
816         EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
817         efx_writeo(efx, &reg, FR_AB_MD_CS);
818
819         /* Wait for data to become available */
820         rc = falcon_gmii_wait(efx);
821         if (rc == 0) {
822                 efx_reado(efx, &reg, FR_AB_MD_RXD);
823                 rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
824                 netif_vdbg(efx, hw, efx->net_dev,
825                            "read from MDIO %d register %d.%d, got %04x\n",
826                            prtad, devad, addr, rc);
827         } else {
828                 /* Abort the read operation */
829                 EFX_POPULATE_OWORD_2(reg,
830                                      FRF_AB_MD_RIC, 0,
831                                      FRF_AB_MD_GC, 1);
832                 efx_writeo(efx, &reg, FR_AB_MD_CS);
833
834                 netif_dbg(efx, hw, efx->net_dev,
835                           "read from MDIO %d register %d.%d, got error %d\n",
836                           prtad, devad, addr, rc);
837         }
838
839 out:
840         mutex_unlock(&efx->mdio_lock);
841         return rc;
842 }
843
844 static void falcon_clock_mac(struct efx_nic *efx)
845 {
846         unsigned strap_val;
847         efx_oword_t nic_stat;
848
849         /* Configure the NIC generated MAC clock correctly */
850         efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
851         strap_val = EFX_IS10G(efx) ? 5 : 3;
852         if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
853                 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1);
854                 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val);
855                 efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT);
856         } else {
857                 /* Falcon A1 does not support 1G/10G speed switching
858                  * and must not be used with a PHY that does. */
859                 BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) !=
860                        strap_val);
861         }
862 }
863
864 static void falcon_switch_mac(struct efx_nic *efx)
865 {
866         struct efx_mac_operations *old_mac_op = efx->mac_op;
867         struct falcon_nic_data *nic_data = efx->nic_data;
868         unsigned int stats_done_offset;
869
870         WARN_ON(!mutex_is_locked(&efx->mac_lock));
871         WARN_ON(nic_data->stats_disable_count == 0);
872
873         efx->mac_op = (EFX_IS10G(efx) ?
874                        &falcon_xmac_operations : &falcon_gmac_operations);
875
876         if (EFX_IS10G(efx))
877                 stats_done_offset = XgDmaDone_offset;
878         else
879                 stats_done_offset = GDmaDone_offset;
880         nic_data->stats_dma_done = efx->stats_buffer.addr + stats_done_offset;
881
882         if (old_mac_op == efx->mac_op)
883                 return;
884
885         falcon_clock_mac(efx);
886
887         netif_dbg(efx, hw, efx->net_dev, "selected %cMAC\n",
888                   EFX_IS10G(efx) ? 'X' : 'G');
889         /* Not all macs support a mac-level link state */
890         efx->xmac_poll_required = false;
891         falcon_reset_macs(efx);
892 }
893
894 /* This call is responsible for hooking in the MAC and PHY operations */
895 static int falcon_probe_port(struct efx_nic *efx)
896 {
897         int rc;
898
899         switch (efx->phy_type) {
900         case PHY_TYPE_SFX7101:
901                 efx->phy_op = &falcon_sfx7101_phy_ops;
902                 break;
903         case PHY_TYPE_SFT9001A:
904         case PHY_TYPE_SFT9001B:
905                 efx->phy_op = &falcon_sft9001_phy_ops;
906                 break;
907         case PHY_TYPE_QT2022C2:
908         case PHY_TYPE_QT2025C:
909                 efx->phy_op = &falcon_qt202x_phy_ops;
910                 break;
911         default:
912                 netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
913                           efx->phy_type);
914                 return -ENODEV;
915         }
916
917         /* Fill out MDIO structure and loopback modes */
918         efx->mdio.mdio_read = falcon_mdio_read;
919         efx->mdio.mdio_write = falcon_mdio_write;
920         rc = efx->phy_op->probe(efx);
921         if (rc != 0)
922                 return rc;
923
924         /* Initial assumption */
925         efx->link_state.speed = 10000;
926         efx->link_state.fd = true;
927
928         /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
929         if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
930                 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
931         else
932                 efx->wanted_fc = EFX_FC_RX;
933         if (efx->mdio.mmds & MDIO_DEVS_AN)
934                 efx->wanted_fc |= EFX_FC_AUTO;
935
936         /* Allocate buffer for stats */
937         rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
938                                   FALCON_MAC_STATS_SIZE);
939         if (rc)
940                 return rc;
941         netif_dbg(efx, probe, efx->net_dev,
942                   "stats buffer at %llx (virt %p phys %llx)\n",
943                   (u64)efx->stats_buffer.dma_addr,
944                   efx->stats_buffer.addr,
945                   (u64)virt_to_phys(efx->stats_buffer.addr));
946
947         return 0;
948 }
949
950 static void falcon_remove_port(struct efx_nic *efx)
951 {
952         efx->phy_op->remove(efx);
953         efx_nic_free_buffer(efx, &efx->stats_buffer);
954 }
955
956 /**************************************************************************
957  *
958  * Falcon test code
959  *
960  **************************************************************************/
961
962 static int
963 falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
964 {
965         struct falcon_nvconfig *nvconfig;
966         struct efx_spi_device *spi;
967         void *region;
968         int rc, magic_num, struct_ver;
969         __le16 *word, *limit;
970         u32 csum;
971
972         spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
973         if (!spi)
974                 return -EINVAL;
975
976         region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
977         if (!region)
978                 return -ENOMEM;
979         nvconfig = region + FALCON_NVCONFIG_OFFSET;
980
981         mutex_lock(&efx->spi_lock);
982         rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
983         mutex_unlock(&efx->spi_lock);
984         if (rc) {
985                 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
986                           efx->spi_flash ? "flash" : "EEPROM");
987                 rc = -EIO;
988                 goto out;
989         }
990
991         magic_num = le16_to_cpu(nvconfig->board_magic_num);
992         struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
993
994         rc = -EINVAL;
995         if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
996                 netif_err(efx, hw, efx->net_dev,
997                           "NVRAM bad magic 0x%x\n", magic_num);
998                 goto out;
999         }
1000         if (struct_ver < 2) {
1001                 netif_err(efx, hw, efx->net_dev,
1002                           "NVRAM has ancient version 0x%x\n", struct_ver);
1003                 goto out;
1004         } else if (struct_ver < 4) {
1005                 word = &nvconfig->board_magic_num;
1006                 limit = (__le16 *) (nvconfig + 1);
1007         } else {
1008                 word = region;
1009                 limit = region + FALCON_NVCONFIG_END;
1010         }
1011         for (csum = 0; word < limit; ++word)
1012                 csum += le16_to_cpu(*word);
1013
1014         if (~csum & 0xffff) {
1015                 netif_err(efx, hw, efx->net_dev,
1016                           "NVRAM has incorrect checksum\n");
1017                 goto out;
1018         }
1019
1020         rc = 0;
1021         if (nvconfig_out)
1022                 memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
1023
1024  out:
1025         kfree(region);
1026         return rc;
1027 }
1028
1029 static int falcon_test_nvram(struct efx_nic *efx)
1030 {
1031         return falcon_read_nvram(efx, NULL);
1032 }
1033
1034 static const struct efx_nic_register_test falcon_b0_register_tests[] = {
1035         { FR_AZ_ADR_REGION,
1036           EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
1037         { FR_AZ_RX_CFG,
1038           EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
1039         { FR_AZ_TX_CFG,
1040           EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
1041         { FR_AZ_TX_RESERVED,
1042           EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
1043         { FR_AB_MAC_CTRL,
1044           EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
1045         { FR_AZ_SRM_TX_DC_CFG,
1046           EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
1047         { FR_AZ_RX_DC_CFG,
1048           EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
1049         { FR_AZ_RX_DC_PF_WM,
1050           EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
1051         { FR_BZ_DP_CTRL,
1052           EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
1053         { FR_AB_GM_CFG2,
1054           EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
1055         { FR_AB_GMF_CFG0,
1056           EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
1057         { FR_AB_XM_GLB_CFG,
1058           EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
1059         { FR_AB_XM_TX_CFG,
1060           EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
1061         { FR_AB_XM_RX_CFG,
1062           EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
1063         { FR_AB_XM_RX_PARAM,
1064           EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
1065         { FR_AB_XM_FC,
1066           EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
1067         { FR_AB_XM_ADR_LO,
1068           EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
1069         { FR_AB_XX_SD_CTL,
1070           EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
1071 };
1072
1073 static int falcon_b0_test_registers(struct efx_nic *efx)
1074 {
1075         return efx_nic_test_registers(efx, falcon_b0_register_tests,
1076                                       ARRAY_SIZE(falcon_b0_register_tests));
1077 }
1078
1079 /**************************************************************************
1080  *
1081  * Device reset
1082  *
1083  **************************************************************************
1084  */
1085
1086 /* Resets NIC to known state.  This routine must be called in process
1087  * context and is allowed to sleep. */
1088 static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1089 {
1090         struct falcon_nic_data *nic_data = efx->nic_data;
1091         efx_oword_t glb_ctl_reg_ker;
1092         int rc;
1093
1094         netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
1095                   RESET_TYPE(method));
1096
1097         /* Initiate device reset */
1098         if (method == RESET_TYPE_WORLD) {
1099                 rc = pci_save_state(efx->pci_dev);
1100                 if (rc) {
1101                         netif_err(efx, drv, efx->net_dev,
1102                                   "failed to backup PCI state of primary "
1103                                   "function prior to hardware reset\n");
1104                         goto fail1;
1105                 }
1106                 if (efx_nic_is_dual_func(efx)) {
1107                         rc = pci_save_state(nic_data->pci_dev2);
1108                         if (rc) {
1109                                 netif_err(efx, drv, efx->net_dev,
1110                                           "failed to backup PCI state of "
1111                                           "secondary function prior to "
1112                                           "hardware reset\n");
1113                                 goto fail2;
1114                         }
1115                 }
1116
1117                 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
1118                                      FRF_AB_EXT_PHY_RST_DUR,
1119                                      FFE_AB_EXT_PHY_RST_DUR_10240US,
1120                                      FRF_AB_SWRST, 1);
1121         } else {
1122                 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
1123                                      /* exclude PHY from "invisible" reset */
1124                                      FRF_AB_EXT_PHY_RST_CTL,
1125                                      method == RESET_TYPE_INVISIBLE,
1126                                      /* exclude EEPROM/flash and PCIe */
1127                                      FRF_AB_PCIE_CORE_RST_CTL, 1,
1128                                      FRF_AB_PCIE_NSTKY_RST_CTL, 1,
1129                                      FRF_AB_PCIE_SD_RST_CTL, 1,
1130                                      FRF_AB_EE_RST_CTL, 1,
1131                                      FRF_AB_EXT_PHY_RST_DUR,
1132                                      FFE_AB_EXT_PHY_RST_DUR_10240US,
1133                                      FRF_AB_SWRST, 1);
1134         }
1135         efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
1136
1137         netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
1138         schedule_timeout_uninterruptible(HZ / 20);
1139
1140         /* Restore PCI configuration if needed */
1141         if (method == RESET_TYPE_WORLD) {
1142                 if (efx_nic_is_dual_func(efx)) {
1143                         rc = pci_restore_state(nic_data->pci_dev2);
1144                         if (rc) {
1145                                 netif_err(efx, drv, efx->net_dev,
1146                                           "failed to restore PCI config for "
1147                                           "the secondary function\n");
1148                                 goto fail3;
1149                         }
1150                 }
1151                 rc = pci_restore_state(efx->pci_dev);
1152                 if (rc) {
1153                         netif_err(efx, drv, efx->net_dev,
1154                                   "failed to restore PCI config for the "
1155                                   "primary function\n");
1156                         goto fail4;
1157                 }
1158                 netif_dbg(efx, drv, efx->net_dev,
1159                           "successfully restored PCI config\n");
1160         }
1161
1162         /* Assert that reset complete */
1163         efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
1164         if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
1165                 rc = -ETIMEDOUT;
1166                 netif_err(efx, hw, efx->net_dev,
1167                           "timed out waiting for hardware reset\n");
1168                 goto fail5;
1169         }
1170         netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
1171
1172         return 0;
1173
1174         /* pci_save_state() and pci_restore_state() MUST be called in pairs */
1175 fail2:
1176 fail3:
1177         pci_restore_state(efx->pci_dev);
1178 fail1:
1179 fail4:
1180 fail5:
1181         return rc;
1182 }
1183
1184 static void falcon_monitor(struct efx_nic *efx)
1185 {
1186         bool link_changed;
1187         int rc;
1188
1189         BUG_ON(!mutex_is_locked(&efx->mac_lock));
1190
1191         rc = falcon_board(efx)->type->monitor(efx);
1192         if (rc) {
1193                 netif_err(efx, hw, efx->net_dev,
1194                           "Board sensor %s; shutting down PHY\n",
1195                           (rc == -ERANGE) ? "reported fault" : "failed");
1196                 efx->phy_mode |= PHY_MODE_LOW_POWER;
1197                 rc = __efx_reconfigure_port(efx);
1198                 WARN_ON(rc);
1199         }
1200
1201         if (LOOPBACK_INTERNAL(efx))
1202                 link_changed = falcon_loopback_link_poll(efx);
1203         else
1204                 link_changed = efx->phy_op->poll(efx);
1205
1206         if (link_changed) {
1207                 falcon_stop_nic_stats(efx);
1208                 falcon_deconfigure_mac_wrapper(efx);
1209
1210                 falcon_switch_mac(efx);
1211                 rc = efx->mac_op->reconfigure(efx);
1212                 BUG_ON(rc);
1213
1214                 falcon_start_nic_stats(efx);
1215
1216                 efx_link_status_changed(efx);
1217         }
1218
1219         if (EFX_IS10G(efx))
1220                 falcon_poll_xmac(efx);
1221 }
1222
1223 /* Zeroes out the SRAM contents.  This routine must be called in
1224  * process context and is allowed to sleep.
1225  */
1226 static int falcon_reset_sram(struct efx_nic *efx)
1227 {
1228         efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
1229         int count;
1230
1231         /* Set the SRAM wake/sleep GPIO appropriately. */
1232         efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
1233         EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
1234         EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
1235         efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
1236
1237         /* Initiate SRAM reset */
1238         EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
1239                              FRF_AZ_SRM_INIT_EN, 1,
1240                              FRF_AZ_SRM_NB_SZ, 0);
1241         efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
1242
1243         /* Wait for SRAM reset to complete */
1244         count = 0;
1245         do {
1246                 netif_dbg(efx, hw, efx->net_dev,
1247                           "waiting for SRAM reset (attempt %d)...\n", count);
1248
1249                 /* SRAM reset is slow; expect around 16ms */
1250                 schedule_timeout_uninterruptible(HZ / 50);
1251
1252                 /* Check for reset complete */
1253                 efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
1254                 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
1255                         netif_dbg(efx, hw, efx->net_dev,
1256                                   "SRAM reset complete\n");
1257
1258                         return 0;
1259                 }
1260         } while (++count < 20); /* wait upto 0.4 sec */
1261
1262         netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
1263         return -ETIMEDOUT;
1264 }
1265
1266 static int falcon_spi_device_init(struct efx_nic *efx,
1267                                   struct efx_spi_device **spi_device_ret,
1268                                   unsigned int device_id, u32 device_type)
1269 {
1270         struct efx_spi_device *spi_device;
1271
1272         if (device_type != 0) {
1273                 spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
1274                 if (!spi_device)
1275                         return -ENOMEM;
1276                 spi_device->device_id = device_id;
1277                 spi_device->size =
1278                         1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
1279                 spi_device->addr_len =
1280                         SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
1281                 spi_device->munge_address = (spi_device->size == 1 << 9 &&
1282                                              spi_device->addr_len == 1);
1283                 spi_device->erase_command =
1284                         SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
1285                 spi_device->erase_size =
1286                         1 << SPI_DEV_TYPE_FIELD(device_type,
1287                                                 SPI_DEV_TYPE_ERASE_SIZE);
1288                 spi_device->block_size =
1289                         1 << SPI_DEV_TYPE_FIELD(device_type,
1290                                                 SPI_DEV_TYPE_BLOCK_SIZE);
1291         } else {
1292                 spi_device = NULL;
1293         }
1294
1295         kfree(*spi_device_ret);
1296         *spi_device_ret = spi_device;
1297         return 0;
1298 }
1299
1300 static void falcon_remove_spi_devices(struct efx_nic *efx)
1301 {
1302         kfree(efx->spi_eeprom);
1303         efx->spi_eeprom = NULL;
1304         kfree(efx->spi_flash);
1305         efx->spi_flash = NULL;
1306 }
1307
1308 /* Extract non-volatile configuration */
1309 static int falcon_probe_nvconfig(struct efx_nic *efx)
1310 {
1311         struct falcon_nvconfig *nvconfig;
1312         int board_rev;
1313         int rc;
1314
1315         nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
1316         if (!nvconfig)
1317                 return -ENOMEM;
1318
1319         rc = falcon_read_nvram(efx, nvconfig);
1320         if (rc == -EINVAL) {
1321                 netif_err(efx, probe, efx->net_dev,
1322                           "NVRAM is invalid therefore using defaults\n");
1323                 efx->phy_type = PHY_TYPE_NONE;
1324                 efx->mdio.prtad = MDIO_PRTAD_NONE;
1325                 board_rev = 0;
1326                 rc = 0;
1327         } else if (rc) {
1328                 goto fail1;
1329         } else {
1330                 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
1331                 struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
1332
1333                 efx->phy_type = v2->port0_phy_type;
1334                 efx->mdio.prtad = v2->port0_phy_addr;
1335                 board_rev = le16_to_cpu(v2->board_revision);
1336
1337                 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
1338                         rc = falcon_spi_device_init(
1339                                 efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
1340                                 le32_to_cpu(v3->spi_device_type
1341                                             [FFE_AB_SPI_DEVICE_FLASH]));
1342                         if (rc)
1343                                 goto fail2;
1344                         rc = falcon_spi_device_init(
1345                                 efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
1346                                 le32_to_cpu(v3->spi_device_type
1347                                             [FFE_AB_SPI_DEVICE_EEPROM]));
1348                         if (rc)
1349                                 goto fail2;
1350                 }
1351         }
1352
1353         /* Read the MAC addresses */
1354         memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
1355
1356         netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
1357                   efx->phy_type, efx->mdio.prtad);
1358
1359         rc = falcon_probe_board(efx, board_rev);
1360         if (rc)
1361                 goto fail2;
1362
1363         kfree(nvconfig);
1364         return 0;
1365
1366  fail2:
1367         falcon_remove_spi_devices(efx);
1368  fail1:
1369         kfree(nvconfig);
1370         return rc;
1371 }
1372
1373 /* Probe all SPI devices on the NIC */
1374 static void falcon_probe_spi_devices(struct efx_nic *efx)
1375 {
1376         efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
1377         int boot_dev;
1378
1379         efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
1380         efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
1381         efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1382
1383         if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
1384                 boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
1385                             FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
1386                 netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
1387                           boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
1388                           "flash" : "EEPROM");
1389         } else {
1390                 /* Disable VPD and set clock dividers to safe
1391                  * values for initial programming. */
1392                 boot_dev = -1;
1393                 netif_dbg(efx, probe, efx->net_dev,
1394                           "Booted from internal ASIC settings;"
1395                           " setting SPI config\n");
1396                 EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
1397                                      /* 125 MHz / 7 ~= 20 MHz */
1398                                      FRF_AB_EE_SF_CLOCK_DIV, 7,
1399                                      /* 125 MHz / 63 ~= 2 MHz */
1400                                      FRF_AB_EE_EE_CLOCK_DIV, 63);
1401                 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1402         }
1403
1404         if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
1405                 falcon_spi_device_init(efx, &efx->spi_flash,
1406                                        FFE_AB_SPI_DEVICE_FLASH,
1407                                        default_flash_type);
1408         if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
1409                 falcon_spi_device_init(efx, &efx->spi_eeprom,
1410                                        FFE_AB_SPI_DEVICE_EEPROM,
1411                                        large_eeprom_type);
1412 }
1413
1414 static int falcon_probe_nic(struct efx_nic *efx)
1415 {
1416         struct falcon_nic_data *nic_data;
1417         struct falcon_board *board;
1418         int rc;
1419
1420         /* Allocate storage for hardware specific data */
1421         nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
1422         if (!nic_data)
1423                 return -ENOMEM;
1424         efx->nic_data = nic_data;
1425
1426         rc = -ENODEV;
1427
1428         if (efx_nic_fpga_ver(efx) != 0) {
1429                 netif_err(efx, probe, efx->net_dev,
1430                           "Falcon FPGA not supported\n");
1431                 goto fail1;
1432         }
1433
1434         if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
1435                 efx_oword_t nic_stat;
1436                 struct pci_dev *dev;
1437                 u8 pci_rev = efx->pci_dev->revision;
1438
1439                 if ((pci_rev == 0xff) || (pci_rev == 0)) {
1440                         netif_err(efx, probe, efx->net_dev,
1441                                   "Falcon rev A0 not supported\n");
1442                         goto fail1;
1443                 }
1444                 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
1445                 if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
1446                         netif_err(efx, probe, efx->net_dev,
1447                                   "Falcon rev A1 1G not supported\n");
1448                         goto fail1;
1449                 }
1450                 if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
1451                         netif_err(efx, probe, efx->net_dev,
1452                                   "Falcon rev A1 PCI-X not supported\n");
1453                         goto fail1;
1454                 }
1455
1456                 dev = pci_dev_get(efx->pci_dev);
1457                 while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
1458                                              dev))) {
1459                         if (dev->bus == efx->pci_dev->bus &&
1460                             dev->devfn == efx->pci_dev->devfn + 1) {
1461                                 nic_data->pci_dev2 = dev;
1462                                 break;
1463                         }
1464                 }
1465                 if (!nic_data->pci_dev2) {
1466                         netif_err(efx, probe, efx->net_dev,
1467                                   "failed to find secondary function\n");
1468                         rc = -ENODEV;
1469                         goto fail2;
1470                 }
1471         }
1472
1473         /* Now we can reset the NIC */
1474         rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
1475         if (rc) {
1476                 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
1477                 goto fail3;
1478         }
1479
1480         /* Allocate memory for INT_KER */
1481         rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
1482         if (rc)
1483                 goto fail4;
1484         BUG_ON(efx->irq_status.dma_addr & 0x0f);
1485
1486         netif_dbg(efx, probe, efx->net_dev,
1487                   "INT_KER at %llx (virt %p phys %llx)\n",
1488                   (u64)efx->irq_status.dma_addr,
1489                   efx->irq_status.addr,
1490                   (u64)virt_to_phys(efx->irq_status.addr));
1491
1492         falcon_probe_spi_devices(efx);
1493
1494         /* Read in the non-volatile configuration */
1495         rc = falcon_probe_nvconfig(efx);
1496         if (rc)
1497                 goto fail5;
1498
1499         /* Initialise I2C adapter */
1500         board = falcon_board(efx);
1501         board->i2c_adap.owner = THIS_MODULE;
1502         board->i2c_data = falcon_i2c_bit_operations;
1503         board->i2c_data.data = efx;
1504         board->i2c_adap.algo_data = &board->i2c_data;
1505         board->i2c_adap.dev.parent = &efx->pci_dev->dev;
1506         strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
1507                 sizeof(board->i2c_adap.name));
1508         rc = i2c_bit_add_bus(&board->i2c_adap);
1509         if (rc)
1510                 goto fail5;
1511
1512         rc = falcon_board(efx)->type->init(efx);
1513         if (rc) {
1514                 netif_err(efx, probe, efx->net_dev,
1515                           "failed to initialise board\n");
1516                 goto fail6;
1517         }
1518
1519         nic_data->stats_disable_count = 1;
1520         setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
1521                     (unsigned long)efx);
1522
1523         return 0;
1524
1525  fail6:
1526         BUG_ON(i2c_del_adapter(&board->i2c_adap));
1527         memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
1528  fail5:
1529         falcon_remove_spi_devices(efx);
1530         efx_nic_free_buffer(efx, &efx->irq_status);
1531  fail4:
1532  fail3:
1533         if (nic_data->pci_dev2) {
1534                 pci_dev_put(nic_data->pci_dev2);
1535                 nic_data->pci_dev2 = NULL;
1536         }
1537  fail2:
1538  fail1:
1539         kfree(efx->nic_data);
1540         return rc;
1541 }
1542
1543 static void falcon_init_rx_cfg(struct efx_nic *efx)
1544 {
1545         /* Prior to Siena the RX DMA engine will split each frame at
1546          * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
1547          * be so large that that never happens. */
1548         const unsigned huge_buf_size = (3 * 4096) >> 5;
1549         /* RX control FIFO thresholds (32 entries) */
1550         const unsigned ctrl_xon_thr = 20;
1551         const unsigned ctrl_xoff_thr = 25;
1552         /* RX data FIFO thresholds (256-byte units; size varies) */
1553         int data_xon_thr = efx_nic_rx_xon_thresh >> 8;
1554         int data_xoff_thr = efx_nic_rx_xoff_thresh >> 8;
1555         efx_oword_t reg;
1556
1557         efx_reado(efx, &reg, FR_AZ_RX_CFG);
1558         if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
1559                 /* Data FIFO size is 5.5K */
1560                 if (data_xon_thr < 0)
1561                         data_xon_thr = 512 >> 8;
1562                 if (data_xoff_thr < 0)
1563                         data_xoff_thr = 2048 >> 8;
1564                 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
1565                 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
1566                                     huge_buf_size);
1567                 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr);
1568                 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr);
1569                 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
1570                 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
1571         } else {
1572                 /* Data FIFO size is 80K; register fields moved */
1573                 if (data_xon_thr < 0)
1574                         data_xon_thr = 27648 >> 8; /* ~3*max MTU */
1575                 if (data_xoff_thr < 0)
1576                         data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
1577                 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
1578                 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
1579                                     huge_buf_size);
1580                 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr);
1581                 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr);
1582                 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
1583                 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
1584                 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
1585
1586                 /* Enable hash insertion. This is broken for the
1587                  * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
1588                  * IPv4 hashes. */
1589                 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
1590                 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
1591                 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
1592         }
1593         /* Always enable XOFF signal from RX FIFO.  We enable
1594          * or disable transmission of pause frames at the MAC. */
1595         EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
1596         efx_writeo(efx, &reg, FR_AZ_RX_CFG);
1597 }
1598
1599 /* This call performs hardware-specific global initialisation, such as
1600  * defining the descriptor cache sizes and number of RSS channels.
1601  * It does not set up any buffers, descriptor rings or event queues.
1602  */
1603 static int falcon_init_nic(struct efx_nic *efx)
1604 {
1605         efx_oword_t temp;
1606         int rc;
1607
1608         /* Use on-chip SRAM */
1609         efx_reado(efx, &temp, FR_AB_NIC_STAT);
1610         EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
1611         efx_writeo(efx, &temp, FR_AB_NIC_STAT);
1612
1613         /* Set the source of the GMAC clock */
1614         if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
1615                 efx_reado(efx, &temp, FR_AB_GPIO_CTL);
1616                 EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
1617                 efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
1618         }
1619
1620         /* Select the correct MAC */
1621         falcon_clock_mac(efx);
1622
1623         rc = falcon_reset_sram(efx);
1624         if (rc)
1625                 return rc;
1626
1627         /* Clear the parity enables on the TX data fifos as
1628          * they produce false parity errors because of timing issues
1629          */
1630         if (EFX_WORKAROUND_5129(efx)) {
1631                 efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
1632                 EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
1633                 efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
1634         }
1635
1636         if (EFX_WORKAROUND_7244(efx)) {
1637                 efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
1638                 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
1639                 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
1640                 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
1641                 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
1642                 efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
1643         }
1644
1645         /* XXX This is documented only for Falcon A0/A1 */
1646         /* Setup RX.  Wait for descriptor is broken and must
1647          * be disabled.  RXDP recovery shouldn't be needed, but is.
1648          */
1649         efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
1650         EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
1651         EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
1652         if (EFX_WORKAROUND_5583(efx))
1653                 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
1654         efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
1655
1656         /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
1657          * descriptors (which is bad).
1658          */
1659         efx_reado(efx, &temp, FR_AZ_TX_CFG);
1660         EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
1661         efx_writeo(efx, &temp, FR_AZ_TX_CFG);
1662
1663         falcon_init_rx_cfg(efx);
1664
1665         if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1666                 /* Set hash key for IPv4 */
1667                 memcpy(&temp, efx->rx_hash_key, sizeof(temp));
1668                 efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
1669
1670                 /* Set destination of both TX and RX Flush events */
1671                 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
1672                 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
1673         }
1674
1675         efx_nic_init_common(efx);
1676
1677         return 0;
1678 }
1679
1680 static void falcon_remove_nic(struct efx_nic *efx)
1681 {
1682         struct falcon_nic_data *nic_data = efx->nic_data;
1683         struct falcon_board *board = falcon_board(efx);
1684         int rc;
1685
1686         board->type->fini(efx);
1687
1688         /* Remove I2C adapter and clear it in preparation for a retry */
1689         rc = i2c_del_adapter(&board->i2c_adap);
1690         BUG_ON(rc);
1691         memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
1692
1693         falcon_remove_spi_devices(efx);
1694         efx_nic_free_buffer(efx, &efx->irq_status);
1695
1696         falcon_reset_hw(efx, RESET_TYPE_ALL);
1697
1698         /* Release the second function after the reset */
1699         if (nic_data->pci_dev2) {
1700                 pci_dev_put(nic_data->pci_dev2);
1701                 nic_data->pci_dev2 = NULL;
1702         }
1703
1704         /* Tear down the private nic state */
1705         kfree(efx->nic_data);
1706         efx->nic_data = NULL;
1707 }
1708
1709 static void falcon_update_nic_stats(struct efx_nic *efx)
1710 {
1711         struct falcon_nic_data *nic_data = efx->nic_data;
1712         efx_oword_t cnt;
1713
1714         if (nic_data->stats_disable_count)
1715                 return;
1716
1717         efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
1718         efx->n_rx_nodesc_drop_cnt +=
1719                 EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
1720
1721         if (nic_data->stats_pending &&
1722             *nic_data->stats_dma_done == FALCON_STATS_DONE) {
1723                 nic_data->stats_pending = false;
1724                 rmb(); /* read the done flag before the stats */
1725                 efx->mac_op->update_stats(efx);
1726         }
1727 }
1728
1729 void falcon_start_nic_stats(struct efx_nic *efx)
1730 {
1731         struct falcon_nic_data *nic_data = efx->nic_data;
1732
1733         spin_lock_bh(&efx->stats_lock);
1734         if (--nic_data->stats_disable_count == 0)
1735                 falcon_stats_request(efx);
1736         spin_unlock_bh(&efx->stats_lock);
1737 }
1738
1739 void falcon_stop_nic_stats(struct efx_nic *efx)
1740 {
1741         struct falcon_nic_data *nic_data = efx->nic_data;
1742         int i;
1743
1744         might_sleep();
1745
1746         spin_lock_bh(&efx->stats_lock);
1747         ++nic_data->stats_disable_count;
1748         spin_unlock_bh(&efx->stats_lock);
1749
1750         del_timer_sync(&nic_data->stats_timer);
1751
1752         /* Wait enough time for the most recent transfer to
1753          * complete. */
1754         for (i = 0; i < 4 && nic_data->stats_pending; i++) {
1755                 if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
1756                         break;
1757                 msleep(1);
1758         }
1759
1760         spin_lock_bh(&efx->stats_lock);
1761         falcon_stats_complete(efx);
1762         spin_unlock_bh(&efx->stats_lock);
1763 }
1764
1765 static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1766 {
1767         falcon_board(efx)->type->set_id_led(efx, mode);
1768 }
1769
1770 /**************************************************************************
1771  *
1772  * Wake on LAN
1773  *
1774  **************************************************************************
1775  */
1776
1777 static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
1778 {
1779         wol->supported = 0;
1780         wol->wolopts = 0;
1781         memset(&wol->sopass, 0, sizeof(wol->sopass));
1782 }
1783
1784 static int falcon_set_wol(struct efx_nic *efx, u32 type)
1785 {
1786         if (type != 0)
1787                 return -EINVAL;
1788         return 0;
1789 }
1790
1791 /**************************************************************************
1792  *
1793  * Revision-dependent attributes used by efx.c and nic.c
1794  *
1795  **************************************************************************
1796  */
1797
1798 struct efx_nic_type falcon_a1_nic_type = {
1799         .probe = falcon_probe_nic,
1800         .remove = falcon_remove_nic,
1801         .init = falcon_init_nic,
1802         .fini = efx_port_dummy_op_void,
1803         .monitor = falcon_monitor,
1804         .reset = falcon_reset_hw,
1805         .probe_port = falcon_probe_port,
1806         .remove_port = falcon_remove_port,
1807         .prepare_flush = falcon_prepare_flush,
1808         .update_stats = falcon_update_nic_stats,
1809         .start_stats = falcon_start_nic_stats,
1810         .stop_stats = falcon_stop_nic_stats,
1811         .set_id_led = falcon_set_id_led,
1812         .push_irq_moderation = falcon_push_irq_moderation,
1813         .push_multicast_hash = falcon_push_multicast_hash,
1814         .reconfigure_port = falcon_reconfigure_port,
1815         .get_wol = falcon_get_wol,
1816         .set_wol = falcon_set_wol,
1817         .resume_wol = efx_port_dummy_op_void,
1818         .test_nvram = falcon_test_nvram,
1819         .default_mac_ops = &falcon_xmac_operations,
1820
1821         .revision = EFX_REV_FALCON_A1,
1822         .mem_map_size = 0x20000,
1823         .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
1824         .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
1825         .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
1826         .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
1827         .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
1828         .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
1829         .rx_buffer_padding = 0x24,
1830         .max_interrupt_mode = EFX_INT_MODE_MSI,
1831         .phys_addr_channels = 4,
1832         .tx_dc_base = 0x130000,
1833         .rx_dc_base = 0x100000,
1834         .offload_features = NETIF_F_IP_CSUM,
1835         .reset_world_flags = ETH_RESET_IRQ,
1836 };
1837
1838 struct efx_nic_type falcon_b0_nic_type = {
1839         .probe = falcon_probe_nic,
1840         .remove = falcon_remove_nic,
1841         .init = falcon_init_nic,
1842         .fini = efx_port_dummy_op_void,
1843         .monitor = falcon_monitor,
1844         .reset = falcon_reset_hw,
1845         .probe_port = falcon_probe_port,
1846         .remove_port = falcon_remove_port,
1847         .prepare_flush = falcon_prepare_flush,
1848         .update_stats = falcon_update_nic_stats,
1849         .start_stats = falcon_start_nic_stats,
1850         .stop_stats = falcon_stop_nic_stats,
1851         .set_id_led = falcon_set_id_led,
1852         .push_irq_moderation = falcon_push_irq_moderation,
1853         .push_multicast_hash = falcon_push_multicast_hash,
1854         .reconfigure_port = falcon_reconfigure_port,
1855         .get_wol = falcon_get_wol,
1856         .set_wol = falcon_set_wol,
1857         .resume_wol = efx_port_dummy_op_void,
1858         .test_registers = falcon_b0_test_registers,
1859         .test_nvram = falcon_test_nvram,
1860         .default_mac_ops = &falcon_xmac_operations,
1861
1862         .revision = EFX_REV_FALCON_B0,
1863         /* Map everything up to and including the RSS indirection
1864          * table.  Don't map MSI-X table, MSI-X PBA since Linux
1865          * requires that they not be mapped.  */
1866         .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
1867                          FR_BZ_RX_INDIRECTION_TBL_STEP *
1868                          FR_BZ_RX_INDIRECTION_TBL_ROWS),
1869         .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
1870         .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
1871         .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
1872         .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
1873         .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
1874         .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
1875         .rx_buffer_hash_size = 0x10,
1876         .rx_buffer_padding = 0,
1877         .max_interrupt_mode = EFX_INT_MODE_MSIX,
1878         .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
1879                                    * interrupt handler only supports 32
1880                                    * channels */
1881         .tx_dc_base = 0x130000,
1882         .rx_dc_base = 0x100000,
1883         .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH,
1884         .reset_world_flags = ETH_RESET_IRQ,
1885 };
1886