sfc: Fix two causes of flush failure
[pandora-kernel.git] / drivers / net / ethernet / sfc / nic.c
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2006-2011 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include "net_driver.h"
18 #include "bitfield.h"
19 #include "efx.h"
20 #include "nic.h"
21 #include "regs.h"
22 #include "io.h"
23 #include "workarounds.h"
24
25 /**************************************************************************
26  *
27  * Configurable values
28  *
29  **************************************************************************
30  */
31
32 /* This is set to 16 for a good reason.  In summary, if larger than
33  * 16, the descriptor cache holds more than a default socket
34  * buffer's worth of packets (for UDP we can only have at most one
35  * socket buffer's worth outstanding).  This combined with the fact
36  * that we only get 1 TX event per descriptor cache means the NIC
37  * goes idle.
38  */
39 #define TX_DC_ENTRIES 16
40 #define TX_DC_ENTRIES_ORDER 1
41
42 #define RX_DC_ENTRIES 64
43 #define RX_DC_ENTRIES_ORDER 3
44
45 /* If EFX_MAX_INT_ERRORS internal errors occur within
46  * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
47  * disable it.
48  */
49 #define EFX_INT_ERROR_EXPIRE 3600
50 #define EFX_MAX_INT_ERRORS 5
51
52 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
53  */
54 #define EFX_FLUSH_INTERVAL 10
55 #define EFX_FLUSH_POLL_COUNT 100
56
57 /* Size and alignment of special buffers (4KB) */
58 #define EFX_BUF_SIZE 4096
59
60 /* Depth of RX flush request fifo */
61 #define EFX_RX_FLUSH_COUNT 4
62
63 /* Generated event code for efx_generate_test_event() */
64 #define EFX_CHANNEL_MAGIC_TEST(_channel)        \
65         (0x00010100 + (_channel)->channel)
66
67 /* Generated event code for efx_generate_fill_event() */
68 #define EFX_CHANNEL_MAGIC_FILL(_channel)        \
69         (0x00010200 + (_channel)->channel)
70
71 /**************************************************************************
72  *
73  * Solarstorm hardware access
74  *
75  **************************************************************************/
76
77 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
78                                      unsigned int index)
79 {
80         efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
81                         value, index);
82 }
83
84 /* Read the current event from the event queue */
85 static inline efx_qword_t *efx_event(struct efx_channel *channel,
86                                      unsigned int index)
87 {
88         return ((efx_qword_t *) (channel->eventq.addr)) +
89                 (index & channel->eventq_mask);
90 }
91
92 /* See if an event is present
93  *
94  * We check both the high and low dword of the event for all ones.  We
95  * wrote all ones when we cleared the event, and no valid event can
96  * have all ones in either its high or low dwords.  This approach is
97  * robust against reordering.
98  *
99  * Note that using a single 64-bit comparison is incorrect; even
100  * though the CPU read will be atomic, the DMA write may not be.
101  */
102 static inline int efx_event_present(efx_qword_t *event)
103 {
104         return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
105                   EFX_DWORD_IS_ALL_ONES(event->dword[1]));
106 }
107
108 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
109                                      const efx_oword_t *mask)
110 {
111         return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
112                 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
113 }
114
115 int efx_nic_test_registers(struct efx_nic *efx,
116                            const struct efx_nic_register_test *regs,
117                            size_t n_regs)
118 {
119         unsigned address = 0, i, j;
120         efx_oword_t mask, imask, original, reg, buf;
121
122         /* Falcon should be in loopback to isolate the XMAC from the PHY */
123         WARN_ON(!LOOPBACK_INTERNAL(efx));
124
125         for (i = 0; i < n_regs; ++i) {
126                 address = regs[i].address;
127                 mask = imask = regs[i].mask;
128                 EFX_INVERT_OWORD(imask);
129
130                 efx_reado(efx, &original, address);
131
132                 /* bit sweep on and off */
133                 for (j = 0; j < 128; j++) {
134                         if (!EFX_EXTRACT_OWORD32(mask, j, j))
135                                 continue;
136
137                         /* Test this testable bit can be set in isolation */
138                         EFX_AND_OWORD(reg, original, mask);
139                         EFX_SET_OWORD32(reg, j, j, 1);
140
141                         efx_writeo(efx, &reg, address);
142                         efx_reado(efx, &buf, address);
143
144                         if (efx_masked_compare_oword(&reg, &buf, &mask))
145                                 goto fail;
146
147                         /* Test this testable bit can be cleared in isolation */
148                         EFX_OR_OWORD(reg, original, mask);
149                         EFX_SET_OWORD32(reg, j, j, 0);
150
151                         efx_writeo(efx, &reg, address);
152                         efx_reado(efx, &buf, address);
153
154                         if (efx_masked_compare_oword(&reg, &buf, &mask))
155                                 goto fail;
156                 }
157
158                 efx_writeo(efx, &original, address);
159         }
160
161         return 0;
162
163 fail:
164         netif_err(efx, hw, efx->net_dev,
165                   "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
166                   " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
167                   EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
168         return -EIO;
169 }
170
171 /**************************************************************************
172  *
173  * Special buffer handling
174  * Special buffers are used for event queues and the TX and RX
175  * descriptor rings.
176  *
177  *************************************************************************/
178
179 /*
180  * Initialise a special buffer
181  *
182  * This will define a buffer (previously allocated via
183  * efx_alloc_special_buffer()) in the buffer table, allowing
184  * it to be used for event queues, descriptor rings etc.
185  */
186 static void
187 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
188 {
189         efx_qword_t buf_desc;
190         int index;
191         dma_addr_t dma_addr;
192         int i;
193
194         EFX_BUG_ON_PARANOID(!buffer->addr);
195
196         /* Write buffer descriptors to NIC */
197         for (i = 0; i < buffer->entries; i++) {
198                 index = buffer->index + i;
199                 dma_addr = buffer->dma_addr + (i * 4096);
200                 netif_dbg(efx, probe, efx->net_dev,
201                           "mapping special buffer %d at %llx\n",
202                           index, (unsigned long long)dma_addr);
203                 EFX_POPULATE_QWORD_3(buf_desc,
204                                      FRF_AZ_BUF_ADR_REGION, 0,
205                                      FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
206                                      FRF_AZ_BUF_OWNER_ID_FBUF, 0);
207                 efx_write_buf_tbl(efx, &buf_desc, index);
208         }
209 }
210
211 /* Unmaps a buffer and clears the buffer table entries */
212 static void
213 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
214 {
215         efx_oword_t buf_tbl_upd;
216         unsigned int start = buffer->index;
217         unsigned int end = (buffer->index + buffer->entries - 1);
218
219         if (!buffer->entries)
220                 return;
221
222         netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
223                   buffer->index, buffer->index + buffer->entries - 1);
224
225         EFX_POPULATE_OWORD_4(buf_tbl_upd,
226                              FRF_AZ_BUF_UPD_CMD, 0,
227                              FRF_AZ_BUF_CLR_CMD, 1,
228                              FRF_AZ_BUF_CLR_END_ID, end,
229                              FRF_AZ_BUF_CLR_START_ID, start);
230         efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
231 }
232
233 /*
234  * Allocate a new special buffer
235  *
236  * This allocates memory for a new buffer, clears it and allocates a
237  * new buffer ID range.  It does not write into the buffer table.
238  *
239  * This call will allocate 4KB buffers, since 8KB buffers can't be
240  * used for event queues and descriptor rings.
241  */
242 static int efx_alloc_special_buffer(struct efx_nic *efx,
243                                     struct efx_special_buffer *buffer,
244                                     unsigned int len)
245 {
246         len = ALIGN(len, EFX_BUF_SIZE);
247
248         buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
249                                           &buffer->dma_addr, GFP_KERNEL);
250         if (!buffer->addr)
251                 return -ENOMEM;
252         buffer->len = len;
253         buffer->entries = len / EFX_BUF_SIZE;
254         BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
255
256         /* All zeros is a potentially valid event so memset to 0xff */
257         memset(buffer->addr, 0xff, len);
258
259         /* Select new buffer ID */
260         buffer->index = efx->next_buffer_table;
261         efx->next_buffer_table += buffer->entries;
262
263         netif_dbg(efx, probe, efx->net_dev,
264                   "allocating special buffers %d-%d at %llx+%x "
265                   "(virt %p phys %llx)\n", buffer->index,
266                   buffer->index + buffer->entries - 1,
267                   (u64)buffer->dma_addr, len,
268                   buffer->addr, (u64)virt_to_phys(buffer->addr));
269
270         return 0;
271 }
272
273 static void
274 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
275 {
276         if (!buffer->addr)
277                 return;
278
279         netif_dbg(efx, hw, efx->net_dev,
280                   "deallocating special buffers %d-%d at %llx+%x "
281                   "(virt %p phys %llx)\n", buffer->index,
282                   buffer->index + buffer->entries - 1,
283                   (u64)buffer->dma_addr, buffer->len,
284                   buffer->addr, (u64)virt_to_phys(buffer->addr));
285
286         dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
287                           buffer->dma_addr);
288         buffer->addr = NULL;
289         buffer->entries = 0;
290 }
291
292 /**************************************************************************
293  *
294  * Generic buffer handling
295  * These buffers are used for interrupt status and MAC stats
296  *
297  **************************************************************************/
298
299 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
300                          unsigned int len)
301 {
302         buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
303                                             &buffer->dma_addr);
304         if (!buffer->addr)
305                 return -ENOMEM;
306         buffer->len = len;
307         memset(buffer->addr, 0, len);
308         return 0;
309 }
310
311 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
312 {
313         if (buffer->addr) {
314                 pci_free_consistent(efx->pci_dev, buffer->len,
315                                     buffer->addr, buffer->dma_addr);
316                 buffer->addr = NULL;
317         }
318 }
319
320 /**************************************************************************
321  *
322  * TX path
323  *
324  **************************************************************************/
325
326 /* Returns a pointer to the specified transmit descriptor in the TX
327  * descriptor queue belonging to the specified channel.
328  */
329 static inline efx_qword_t *
330 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
331 {
332         return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
333 }
334
335 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
336 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
337 {
338         unsigned write_ptr;
339         efx_dword_t reg;
340
341         write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
342         EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
343         efx_writed_page(tx_queue->efx, &reg,
344                         FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
345 }
346
347 /* Write pointer and first descriptor for TX descriptor ring */
348 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
349                                     const efx_qword_t *txd)
350 {
351         unsigned write_ptr;
352         efx_oword_t reg;
353
354         BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
355         BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
356
357         write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
358         EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
359                              FRF_AZ_TX_DESC_WPTR, write_ptr);
360         reg.qword[0] = *txd;
361         efx_writeo_page(tx_queue->efx, &reg,
362                         FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
363 }
364
365 static inline bool
366 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
367 {
368         unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
369
370         if (empty_read_count == 0)
371                 return false;
372
373         tx_queue->empty_read_count = 0;
374         return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
375 }
376
377 /* For each entry inserted into the software descriptor ring, create a
378  * descriptor in the hardware TX descriptor ring (in host memory), and
379  * write a doorbell.
380  */
381 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
382 {
383
384         struct efx_tx_buffer *buffer;
385         efx_qword_t *txd;
386         unsigned write_ptr;
387         unsigned old_write_count = tx_queue->write_count;
388
389         BUG_ON(tx_queue->write_count == tx_queue->insert_count);
390
391         do {
392                 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
393                 buffer = &tx_queue->buffer[write_ptr];
394                 txd = efx_tx_desc(tx_queue, write_ptr);
395                 ++tx_queue->write_count;
396
397                 /* Create TX descriptor ring entry */
398                 EFX_POPULATE_QWORD_4(*txd,
399                                      FSF_AZ_TX_KER_CONT, buffer->continuation,
400                                      FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
401                                      FSF_AZ_TX_KER_BUF_REGION, 0,
402                                      FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
403         } while (tx_queue->write_count != tx_queue->insert_count);
404
405         wmb(); /* Ensure descriptors are written before they are fetched */
406
407         if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
408                 txd = efx_tx_desc(tx_queue,
409                                   old_write_count & tx_queue->ptr_mask);
410                 efx_push_tx_desc(tx_queue, txd);
411                 ++tx_queue->pushes;
412         } else {
413                 efx_notify_tx_desc(tx_queue);
414         }
415 }
416
417 /* Allocate hardware resources for a TX queue */
418 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
419 {
420         struct efx_nic *efx = tx_queue->efx;
421         unsigned entries;
422
423         entries = tx_queue->ptr_mask + 1;
424         return efx_alloc_special_buffer(efx, &tx_queue->txd,
425                                         entries * sizeof(efx_qword_t));
426 }
427
428 void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
429 {
430         struct efx_nic *efx = tx_queue->efx;
431         efx_oword_t reg;
432
433         tx_queue->flushed = FLUSH_NONE;
434
435         /* Pin TX descriptor ring */
436         efx_init_special_buffer(efx, &tx_queue->txd);
437
438         /* Push TX descriptor ring to card */
439         EFX_POPULATE_OWORD_10(reg,
440                               FRF_AZ_TX_DESCQ_EN, 1,
441                               FRF_AZ_TX_ISCSI_DDIG_EN, 0,
442                               FRF_AZ_TX_ISCSI_HDIG_EN, 0,
443                               FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
444                               FRF_AZ_TX_DESCQ_EVQ_ID,
445                               tx_queue->channel->channel,
446                               FRF_AZ_TX_DESCQ_OWNER_ID, 0,
447                               FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
448                               FRF_AZ_TX_DESCQ_SIZE,
449                               __ffs(tx_queue->txd.entries),
450                               FRF_AZ_TX_DESCQ_TYPE, 0,
451                               FRF_BZ_TX_NON_IP_DROP_DIS, 1);
452
453         if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
454                 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
455                 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
456                 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
457                                     !csum);
458         }
459
460         efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
461                          tx_queue->queue);
462
463         if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
464                 /* Only 128 bits in this register */
465                 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
466
467                 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
468                 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
469                         clear_bit_le(tx_queue->queue, (void *)&reg);
470                 else
471                         set_bit_le(tx_queue->queue, (void *)&reg);
472                 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
473         }
474
475         if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
476                 EFX_POPULATE_OWORD_1(reg,
477                                      FRF_BZ_TX_PACE,
478                                      (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
479                                      FFE_BZ_TX_PACE_OFF :
480                                      FFE_BZ_TX_PACE_RESERVED);
481                 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
482                                  tx_queue->queue);
483         }
484 }
485
486 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
487 {
488         struct efx_nic *efx = tx_queue->efx;
489         efx_oword_t tx_flush_descq;
490
491         tx_queue->flushed = FLUSH_PENDING;
492
493         /* Post a flush command */
494         EFX_POPULATE_OWORD_2(tx_flush_descq,
495                              FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
496                              FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
497         efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
498 }
499
500 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
501 {
502         struct efx_nic *efx = tx_queue->efx;
503         efx_oword_t tx_desc_ptr;
504
505         /* The queue should have been flushed */
506         WARN_ON(tx_queue->flushed != FLUSH_DONE);
507
508         /* Remove TX descriptor ring from card */
509         EFX_ZERO_OWORD(tx_desc_ptr);
510         efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
511                          tx_queue->queue);
512
513         /* Unpin TX descriptor ring */
514         efx_fini_special_buffer(efx, &tx_queue->txd);
515 }
516
517 /* Free buffers backing TX queue */
518 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
519 {
520         efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
521 }
522
523 /**************************************************************************
524  *
525  * RX path
526  *
527  **************************************************************************/
528
529 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
530 static inline efx_qword_t *
531 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
532 {
533         return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
534 }
535
536 /* This creates an entry in the RX descriptor queue */
537 static inline void
538 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
539 {
540         struct efx_rx_buffer *rx_buf;
541         efx_qword_t *rxd;
542
543         rxd = efx_rx_desc(rx_queue, index);
544         rx_buf = efx_rx_buffer(rx_queue, index);
545         EFX_POPULATE_QWORD_3(*rxd,
546                              FSF_AZ_RX_KER_BUF_SIZE,
547                              rx_buf->len -
548                              rx_queue->efx->type->rx_buffer_padding,
549                              FSF_AZ_RX_KER_BUF_REGION, 0,
550                              FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
551 }
552
553 /* This writes to the RX_DESC_WPTR register for the specified receive
554  * descriptor ring.
555  */
556 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
557 {
558         struct efx_nic *efx = rx_queue->efx;
559         efx_dword_t reg;
560         unsigned write_ptr;
561
562         while (rx_queue->notified_count != rx_queue->added_count) {
563                 efx_build_rx_desc(
564                         rx_queue,
565                         rx_queue->notified_count & rx_queue->ptr_mask);
566                 ++rx_queue->notified_count;
567         }
568
569         wmb();
570         write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
571         EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
572         efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
573                         efx_rx_queue_index(rx_queue));
574 }
575
576 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
577 {
578         struct efx_nic *efx = rx_queue->efx;
579         unsigned entries;
580
581         entries = rx_queue->ptr_mask + 1;
582         return efx_alloc_special_buffer(efx, &rx_queue->rxd,
583                                         entries * sizeof(efx_qword_t));
584 }
585
586 void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
587 {
588         efx_oword_t rx_desc_ptr;
589         struct efx_nic *efx = rx_queue->efx;
590         bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
591         bool iscsi_digest_en = is_b0;
592
593         netif_dbg(efx, hw, efx->net_dev,
594                   "RX queue %d ring in special buffers %d-%d\n",
595                   efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
596                   rx_queue->rxd.index + rx_queue->rxd.entries - 1);
597
598         rx_queue->flushed = FLUSH_NONE;
599
600         /* Pin RX descriptor ring */
601         efx_init_special_buffer(efx, &rx_queue->rxd);
602
603         /* Push RX descriptor ring to card */
604         EFX_POPULATE_OWORD_10(rx_desc_ptr,
605                               FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
606                               FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
607                               FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
608                               FRF_AZ_RX_DESCQ_EVQ_ID,
609                               efx_rx_queue_channel(rx_queue)->channel,
610                               FRF_AZ_RX_DESCQ_OWNER_ID, 0,
611                               FRF_AZ_RX_DESCQ_LABEL,
612                               efx_rx_queue_index(rx_queue),
613                               FRF_AZ_RX_DESCQ_SIZE,
614                               __ffs(rx_queue->rxd.entries),
615                               FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
616                               /* For >=B0 this is scatter so disable */
617                               FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
618                               FRF_AZ_RX_DESCQ_EN, 1);
619         efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
620                          efx_rx_queue_index(rx_queue));
621 }
622
623 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
624 {
625         struct efx_nic *efx = rx_queue->efx;
626         efx_oword_t rx_flush_descq;
627
628         rx_queue->flushed = FLUSH_PENDING;
629
630         /* Post a flush command */
631         EFX_POPULATE_OWORD_2(rx_flush_descq,
632                              FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
633                              FRF_AZ_RX_FLUSH_DESCQ,
634                              efx_rx_queue_index(rx_queue));
635         efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
636 }
637
638 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
639 {
640         efx_oword_t rx_desc_ptr;
641         struct efx_nic *efx = rx_queue->efx;
642
643         /* The queue should already have been flushed */
644         WARN_ON(rx_queue->flushed != FLUSH_DONE);
645
646         /* Remove RX descriptor ring from card */
647         EFX_ZERO_OWORD(rx_desc_ptr);
648         efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
649                          efx_rx_queue_index(rx_queue));
650
651         /* Unpin RX descriptor ring */
652         efx_fini_special_buffer(efx, &rx_queue->rxd);
653 }
654
655 /* Free buffers backing RX queue */
656 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
657 {
658         efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
659 }
660
661 /**************************************************************************
662  *
663  * Event queue processing
664  * Event queues are processed by per-channel tasklets.
665  *
666  **************************************************************************/
667
668 /* Update a channel's event queue's read pointer (RPTR) register
669  *
670  * This writes the EVQ_RPTR_REG register for the specified channel's
671  * event queue.
672  */
673 void efx_nic_eventq_read_ack(struct efx_channel *channel)
674 {
675         efx_dword_t reg;
676         struct efx_nic *efx = channel->efx;
677
678         EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
679                              channel->eventq_read_ptr & channel->eventq_mask);
680         efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
681                          channel->channel);
682 }
683
684 /* Use HW to insert a SW defined event */
685 static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
686 {
687         efx_oword_t drv_ev_reg;
688
689         BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
690                      FRF_AZ_DRV_EV_DATA_WIDTH != 64);
691         drv_ev_reg.u32[0] = event->u32[0];
692         drv_ev_reg.u32[1] = event->u32[1];
693         drv_ev_reg.u32[2] = 0;
694         drv_ev_reg.u32[3] = 0;
695         EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
696         efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
697 }
698
699 /* Handle a transmit completion event
700  *
701  * The NIC batches TX completion events; the message we receive is of
702  * the form "complete all TX events up to this index".
703  */
704 static int
705 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
706 {
707         unsigned int tx_ev_desc_ptr;
708         unsigned int tx_ev_q_label;
709         struct efx_tx_queue *tx_queue;
710         struct efx_nic *efx = channel->efx;
711         int tx_packets = 0;
712
713         if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
714                 /* Transmit completion */
715                 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
716                 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
717                 tx_queue = efx_channel_get_tx_queue(
718                         channel, tx_ev_q_label % EFX_TXQ_TYPES);
719                 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
720                               tx_queue->ptr_mask);
721                 channel->irq_mod_score += tx_packets;
722                 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
723         } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
724                 /* Rewrite the FIFO write pointer */
725                 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
726                 tx_queue = efx_channel_get_tx_queue(
727                         channel, tx_ev_q_label % EFX_TXQ_TYPES);
728
729                 if (efx_dev_registered(efx))
730                         netif_tx_lock(efx->net_dev);
731                 efx_notify_tx_desc(tx_queue);
732                 if (efx_dev_registered(efx))
733                         netif_tx_unlock(efx->net_dev);
734         } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
735                    EFX_WORKAROUND_10727(efx)) {
736                 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
737         } else {
738                 netif_err(efx, tx_err, efx->net_dev,
739                           "channel %d unexpected TX event "
740                           EFX_QWORD_FMT"\n", channel->channel,
741                           EFX_QWORD_VAL(*event));
742         }
743
744         return tx_packets;
745 }
746
747 /* Detect errors included in the rx_evt_pkt_ok bit. */
748 static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
749                                  const efx_qword_t *event,
750                                  bool *rx_ev_pkt_ok,
751                                  bool *discard)
752 {
753         struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
754         struct efx_nic *efx = rx_queue->efx;
755         bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
756         bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
757         bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
758         bool rx_ev_other_err, rx_ev_pause_frm;
759         bool rx_ev_hdr_type, rx_ev_mcast_pkt;
760         unsigned rx_ev_pkt_type;
761
762         rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
763         rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
764         rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
765         rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
766         rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
767                                                  FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
768         rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
769                                                   FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
770         rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
771                                                    FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
772         rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
773         rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
774         rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
775                           0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
776         rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
777
778         /* Every error apart from tobe_disc and pause_frm */
779         rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
780                            rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
781                            rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
782
783         /* Count errors that are not in MAC stats.  Ignore expected
784          * checksum errors during self-test. */
785         if (rx_ev_frm_trunc)
786                 ++channel->n_rx_frm_trunc;
787         else if (rx_ev_tobe_disc)
788                 ++channel->n_rx_tobe_disc;
789         else if (!efx->loopback_selftest) {
790                 if (rx_ev_ip_hdr_chksum_err)
791                         ++channel->n_rx_ip_hdr_chksum_err;
792                 else if (rx_ev_tcp_udp_chksum_err)
793                         ++channel->n_rx_tcp_udp_chksum_err;
794         }
795
796         /* The frame must be discarded if any of these are true. */
797         *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
798                     rx_ev_tobe_disc | rx_ev_pause_frm);
799
800         /* TOBE_DISC is expected on unicast mismatches; don't print out an
801          * error message.  FRM_TRUNC indicates RXDP dropped the packet due
802          * to a FIFO overflow.
803          */
804 #ifdef EFX_ENABLE_DEBUG
805         if (rx_ev_other_err && net_ratelimit()) {
806                 netif_dbg(efx, rx_err, efx->net_dev,
807                           " RX queue %d unexpected RX event "
808                           EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
809                           efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
810                           rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
811                           rx_ev_ip_hdr_chksum_err ?
812                           " [IP_HDR_CHKSUM_ERR]" : "",
813                           rx_ev_tcp_udp_chksum_err ?
814                           " [TCP_UDP_CHKSUM_ERR]" : "",
815                           rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
816                           rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
817                           rx_ev_drib_nib ? " [DRIB_NIB]" : "",
818                           rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
819                           rx_ev_pause_frm ? " [PAUSE]" : "");
820         }
821 #endif
822 }
823
824 /* Handle receive events that are not in-order. */
825 static void
826 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
827 {
828         struct efx_nic *efx = rx_queue->efx;
829         unsigned expected, dropped;
830
831         expected = rx_queue->removed_count & rx_queue->ptr_mask;
832         dropped = (index - expected) & rx_queue->ptr_mask;
833         netif_info(efx, rx_err, efx->net_dev,
834                    "dropped %d events (index=%d expected=%d)\n",
835                    dropped, index, expected);
836
837         efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
838                            RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
839 }
840
841 /* Handle a packet received event
842  *
843  * The NIC gives a "discard" flag if it's a unicast packet with the
844  * wrong destination address
845  * Also "is multicast" and "matches multicast filter" flags can be used to
846  * discard non-matching multicast packets.
847  */
848 static void
849 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
850 {
851         unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
852         unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
853         unsigned expected_ptr;
854         bool rx_ev_pkt_ok, discard = false, checksummed;
855         struct efx_rx_queue *rx_queue;
856
857         /* Basic packet information */
858         rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
859         rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
860         rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
861         WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
862         WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
863         WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
864                 channel->channel);
865
866         rx_queue = efx_channel_get_rx_queue(channel);
867
868         rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
869         expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
870         if (unlikely(rx_ev_desc_ptr != expected_ptr))
871                 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
872
873         if (likely(rx_ev_pkt_ok)) {
874                 /* If packet is marked as OK and packet type is TCP/IP or
875                  * UDP/IP, then we can rely on the hardware checksum.
876                  */
877                 checksummed =
878                         rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
879                         rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP;
880         } else {
881                 efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
882                 checksummed = false;
883         }
884
885         /* Detect multicast packets that didn't match the filter */
886         rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
887         if (rx_ev_mcast_pkt) {
888                 unsigned int rx_ev_mcast_hash_match =
889                         EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
890
891                 if (unlikely(!rx_ev_mcast_hash_match)) {
892                         ++channel->n_rx_mcast_mismatch;
893                         discard = true;
894                 }
895         }
896
897         channel->irq_mod_score += 2;
898
899         /* Handle received packet */
900         efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
901                       checksummed, discard);
902 }
903
904 static void
905 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
906 {
907         struct efx_nic *efx = channel->efx;
908         unsigned code;
909
910         code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
911         if (code == EFX_CHANNEL_MAGIC_TEST(channel))
912                 ; /* ignore */
913         else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
914                 /* The queue must be empty, so we won't receive any rx
915                  * events, so efx_process_channel() won't refill the
916                  * queue. Refill it here */
917                 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
918         else
919                 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
920                           "generated event "EFX_QWORD_FMT"\n",
921                           channel->channel, EFX_QWORD_VAL(*event));
922 }
923
924 static void
925 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
926 {
927         struct efx_nic *efx = channel->efx;
928         unsigned int ev_sub_code;
929         unsigned int ev_sub_data;
930
931         ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
932         ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
933
934         switch (ev_sub_code) {
935         case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
936                 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
937                            channel->channel, ev_sub_data);
938                 break;
939         case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
940                 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
941                            channel->channel, ev_sub_data);
942                 break;
943         case FSE_AZ_EVQ_INIT_DONE_EV:
944                 netif_dbg(efx, hw, efx->net_dev,
945                           "channel %d EVQ %d initialised\n",
946                           channel->channel, ev_sub_data);
947                 break;
948         case FSE_AZ_SRM_UPD_DONE_EV:
949                 netif_vdbg(efx, hw, efx->net_dev,
950                            "channel %d SRAM update done\n", channel->channel);
951                 break;
952         case FSE_AZ_WAKE_UP_EV:
953                 netif_vdbg(efx, hw, efx->net_dev,
954                            "channel %d RXQ %d wakeup event\n",
955                            channel->channel, ev_sub_data);
956                 break;
957         case FSE_AZ_TIMER_EV:
958                 netif_vdbg(efx, hw, efx->net_dev,
959                            "channel %d RX queue %d timer expired\n",
960                            channel->channel, ev_sub_data);
961                 break;
962         case FSE_AA_RX_RECOVER_EV:
963                 netif_err(efx, rx_err, efx->net_dev,
964                           "channel %d seen DRIVER RX_RESET event. "
965                         "Resetting.\n", channel->channel);
966                 atomic_inc(&efx->rx_reset);
967                 efx_schedule_reset(efx,
968                                    EFX_WORKAROUND_6555(efx) ?
969                                    RESET_TYPE_RX_RECOVERY :
970                                    RESET_TYPE_DISABLE);
971                 break;
972         case FSE_BZ_RX_DSC_ERROR_EV:
973                 netif_err(efx, rx_err, efx->net_dev,
974                           "RX DMA Q %d reports descriptor fetch error."
975                           " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
976                 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
977                 break;
978         case FSE_BZ_TX_DSC_ERROR_EV:
979                 netif_err(efx, tx_err, efx->net_dev,
980                           "TX DMA Q %d reports descriptor fetch error."
981                           " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
982                 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
983                 break;
984         default:
985                 netif_vdbg(efx, hw, efx->net_dev,
986                            "channel %d unknown driver event code %d "
987                            "data %04x\n", channel->channel, ev_sub_code,
988                            ev_sub_data);
989                 break;
990         }
991 }
992
993 int efx_nic_process_eventq(struct efx_channel *channel, int budget)
994 {
995         struct efx_nic *efx = channel->efx;
996         unsigned int read_ptr;
997         efx_qword_t event, *p_event;
998         int ev_code;
999         int tx_packets = 0;
1000         int spent = 0;
1001
1002         read_ptr = channel->eventq_read_ptr;
1003
1004         for (;;) {
1005                 p_event = efx_event(channel, read_ptr);
1006                 event = *p_event;
1007
1008                 if (!efx_event_present(&event))
1009                         /* End of events */
1010                         break;
1011
1012                 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1013                            "channel %d event is "EFX_QWORD_FMT"\n",
1014                            channel->channel, EFX_QWORD_VAL(event));
1015
1016                 /* Clear this event by marking it all ones */
1017                 EFX_SET_QWORD(*p_event);
1018
1019                 ++read_ptr;
1020
1021                 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1022
1023                 switch (ev_code) {
1024                 case FSE_AZ_EV_CODE_RX_EV:
1025                         efx_handle_rx_event(channel, &event);
1026                         if (++spent == budget)
1027                                 goto out;
1028                         break;
1029                 case FSE_AZ_EV_CODE_TX_EV:
1030                         tx_packets += efx_handle_tx_event(channel, &event);
1031                         if (tx_packets > efx->txq_entries) {
1032                                 spent = budget;
1033                                 goto out;
1034                         }
1035                         break;
1036                 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1037                         efx_handle_generated_event(channel, &event);
1038                         break;
1039                 case FSE_AZ_EV_CODE_DRIVER_EV:
1040                         efx_handle_driver_event(channel, &event);
1041                         break;
1042                 case FSE_CZ_EV_CODE_MCDI_EV:
1043                         efx_mcdi_process_event(channel, &event);
1044                         break;
1045                 case FSE_AZ_EV_CODE_GLOBAL_EV:
1046                         if (efx->type->handle_global_event &&
1047                             efx->type->handle_global_event(channel, &event))
1048                                 break;
1049                         /* else fall through */
1050                 default:
1051                         netif_err(channel->efx, hw, channel->efx->net_dev,
1052                                   "channel %d unknown event type %d (data "
1053                                   EFX_QWORD_FMT ")\n", channel->channel,
1054                                   ev_code, EFX_QWORD_VAL(event));
1055                 }
1056         }
1057
1058 out:
1059         channel->eventq_read_ptr = read_ptr;
1060         return spent;
1061 }
1062
1063 /* Check whether an event is present in the eventq at the current
1064  * read pointer.  Only useful for self-test.
1065  */
1066 bool efx_nic_event_present(struct efx_channel *channel)
1067 {
1068         return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
1069 }
1070
1071 /* Allocate buffer table entries for event queue */
1072 int efx_nic_probe_eventq(struct efx_channel *channel)
1073 {
1074         struct efx_nic *efx = channel->efx;
1075         unsigned entries;
1076
1077         entries = channel->eventq_mask + 1;
1078         return efx_alloc_special_buffer(efx, &channel->eventq,
1079                                         entries * sizeof(efx_qword_t));
1080 }
1081
1082 void efx_nic_init_eventq(struct efx_channel *channel)
1083 {
1084         efx_oword_t reg;
1085         struct efx_nic *efx = channel->efx;
1086
1087         netif_dbg(efx, hw, efx->net_dev,
1088                   "channel %d event queue in special buffers %d-%d\n",
1089                   channel->channel, channel->eventq.index,
1090                   channel->eventq.index + channel->eventq.entries - 1);
1091
1092         if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1093                 EFX_POPULATE_OWORD_3(reg,
1094                                      FRF_CZ_TIMER_Q_EN, 1,
1095                                      FRF_CZ_HOST_NOTIFY_MODE, 0,
1096                                      FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1097                 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1098         }
1099
1100         /* Pin event queue buffer */
1101         efx_init_special_buffer(efx, &channel->eventq);
1102
1103         /* Fill event queue with all ones (i.e. empty events) */
1104         memset(channel->eventq.addr, 0xff, channel->eventq.len);
1105
1106         /* Push event queue to card */
1107         EFX_POPULATE_OWORD_3(reg,
1108                              FRF_AZ_EVQ_EN, 1,
1109                              FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1110                              FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1111         efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1112                          channel->channel);
1113
1114         efx->type->push_irq_moderation(channel);
1115 }
1116
1117 void efx_nic_fini_eventq(struct efx_channel *channel)
1118 {
1119         efx_oword_t reg;
1120         struct efx_nic *efx = channel->efx;
1121
1122         /* Remove event queue from card */
1123         EFX_ZERO_OWORD(reg);
1124         efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1125                          channel->channel);
1126         if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1127                 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1128
1129         /* Unpin event queue */
1130         efx_fini_special_buffer(efx, &channel->eventq);
1131 }
1132
1133 /* Free buffers backing event queue */
1134 void efx_nic_remove_eventq(struct efx_channel *channel)
1135 {
1136         efx_free_special_buffer(channel->efx, &channel->eventq);
1137 }
1138
1139
1140 void efx_nic_generate_test_event(struct efx_channel *channel)
1141 {
1142         unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
1143         efx_qword_t test_event;
1144
1145         EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1146                              FSE_AZ_EV_CODE_DRV_GEN_EV,
1147                              FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1148         efx_generate_event(channel, &test_event);
1149 }
1150
1151 void efx_nic_generate_fill_event(struct efx_channel *channel)
1152 {
1153         unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
1154         efx_qword_t test_event;
1155
1156         EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1157                              FSE_AZ_EV_CODE_DRV_GEN_EV,
1158                              FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1159         efx_generate_event(channel, &test_event);
1160 }
1161
1162 /**************************************************************************
1163  *
1164  * Flush handling
1165  *
1166  **************************************************************************/
1167
1168
1169 static void efx_poll_flush_events(struct efx_nic *efx)
1170 {
1171         struct efx_channel *channel = efx_get_channel(efx, 0);
1172         struct efx_tx_queue *tx_queue;
1173         struct efx_rx_queue *rx_queue;
1174         unsigned int read_ptr = channel->eventq_read_ptr;
1175         unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
1176
1177         do {
1178                 efx_qword_t *event = efx_event(channel, read_ptr);
1179                 int ev_code, ev_sub_code, ev_queue;
1180                 bool ev_failed;
1181
1182                 if (!efx_event_present(event))
1183                         break;
1184
1185                 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
1186                 ev_sub_code = EFX_QWORD_FIELD(*event,
1187                                               FSF_AZ_DRIVER_EV_SUBCODE);
1188                 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1189                     ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1190                         ev_queue = EFX_QWORD_FIELD(*event,
1191                                                    FSF_AZ_DRIVER_EV_SUBDATA);
1192                         if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
1193                                 tx_queue = efx_get_tx_queue(
1194                                         efx, ev_queue / EFX_TXQ_TYPES,
1195                                         ev_queue % EFX_TXQ_TYPES);
1196                                 tx_queue->flushed = FLUSH_DONE;
1197                         }
1198                 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1199                            ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
1200                         ev_queue = EFX_QWORD_FIELD(
1201                                 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1202                         ev_failed = EFX_QWORD_FIELD(
1203                                 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1204                         if (ev_queue < efx->n_rx_channels) {
1205                                 rx_queue = efx_get_rx_queue(efx, ev_queue);
1206                                 rx_queue->flushed =
1207                                         ev_failed ? FLUSH_FAILED : FLUSH_DONE;
1208                         }
1209                 }
1210
1211                 /* We're about to destroy the queue anyway, so
1212                  * it's ok to throw away every non-flush event */
1213                 EFX_SET_QWORD(*event);
1214
1215                 ++read_ptr;
1216         } while (read_ptr != end_ptr);
1217
1218         channel->eventq_read_ptr = read_ptr;
1219 }
1220
1221 /* Handle tx and rx flushes at the same time, since they run in
1222  * parallel in the hardware and there's no reason for us to
1223  * serialise them */
1224 int efx_nic_flush_queues(struct efx_nic *efx)
1225 {
1226         struct efx_channel *channel;
1227         struct efx_rx_queue *rx_queue;
1228         struct efx_tx_queue *tx_queue;
1229         int i, tx_pending, rx_pending;
1230
1231         /* If necessary prepare the hardware for flushing */
1232         efx->type->prepare_flush(efx);
1233
1234         /* Flush all tx queues in parallel */
1235         efx_for_each_channel(channel, efx) {
1236                 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1237                         if (tx_queue->initialised)
1238                                 efx_flush_tx_queue(tx_queue);
1239                 }
1240         }
1241
1242         /* The hardware supports four concurrent rx flushes, each of which may
1243          * need to be retried if there is an outstanding descriptor fetch */
1244         for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
1245                 rx_pending = tx_pending = 0;
1246                 efx_for_each_channel(channel, efx) {
1247                         efx_for_each_channel_rx_queue(rx_queue, channel) {
1248                                 if (rx_queue->flushed == FLUSH_PENDING)
1249                                         ++rx_pending;
1250                         }
1251                 }
1252                 efx_for_each_channel(channel, efx) {
1253                         efx_for_each_channel_rx_queue(rx_queue, channel) {
1254                                 if (rx_pending == EFX_RX_FLUSH_COUNT)
1255                                         break;
1256                                 if (rx_queue->flushed == FLUSH_FAILED ||
1257                                     rx_queue->flushed == FLUSH_NONE) {
1258                                         efx_flush_rx_queue(rx_queue);
1259                                         ++rx_pending;
1260                                 }
1261                         }
1262                         efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1263                                 if (tx_queue->initialised &&
1264                                     tx_queue->flushed != FLUSH_DONE) {
1265                                         efx_oword_t txd_ptr_tbl;
1266
1267                                         efx_reado_table(efx, &txd_ptr_tbl,
1268                                                         FR_BZ_TX_DESC_PTR_TBL,
1269                                                         tx_queue->queue);
1270                                         if (EFX_OWORD_FIELD(txd_ptr_tbl,
1271                                                             FRF_AZ_TX_DESCQ_FLUSH) ||
1272                                             EFX_OWORD_FIELD(txd_ptr_tbl,
1273                                                             FRF_AZ_TX_DESCQ_EN))
1274                                                 ++tx_pending;
1275                                         else
1276                                                 tx_queue->flushed = FLUSH_DONE;
1277                                 }
1278                         }
1279                 }
1280
1281                 if (rx_pending == 0 && tx_pending == 0) {
1282                         efx->type->finish_flush(efx);
1283                         return 0;
1284                 }
1285
1286                 msleep(EFX_FLUSH_INTERVAL);
1287                 efx_poll_flush_events(efx);
1288         }
1289
1290         /* Mark the queues as all flushed. We're going to return failure
1291          * leading to a reset, or fake up success anyway */
1292         efx_for_each_channel(channel, efx) {
1293                 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1294                         if (tx_queue->initialised &&
1295                             tx_queue->flushed != FLUSH_DONE)
1296                                 netif_err(efx, hw, efx->net_dev,
1297                                           "tx queue %d flush command timed out\n",
1298                                           tx_queue->queue);
1299                         tx_queue->flushed = FLUSH_DONE;
1300                 }
1301                 efx_for_each_channel_rx_queue(rx_queue, channel) {
1302                         if (rx_queue->flushed != FLUSH_DONE)
1303                                 netif_err(efx, hw, efx->net_dev,
1304                                           "rx queue %d flush command timed out\n",
1305                                           efx_rx_queue_index(rx_queue));
1306                         rx_queue->flushed = FLUSH_DONE;
1307                 }
1308         }
1309
1310         efx->type->finish_flush(efx);
1311         return -ETIMEDOUT;
1312 }
1313
1314 /**************************************************************************
1315  *
1316  * Hardware interrupts
1317  * The hardware interrupt handler does very little work; all the event
1318  * queue processing is carried out by per-channel tasklets.
1319  *
1320  **************************************************************************/
1321
1322 /* Enable/disable/generate interrupts */
1323 static inline void efx_nic_interrupts(struct efx_nic *efx,
1324                                       bool enabled, bool force)
1325 {
1326         efx_oword_t int_en_reg_ker;
1327
1328         EFX_POPULATE_OWORD_3(int_en_reg_ker,
1329                              FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
1330                              FRF_AZ_KER_INT_KER, force,
1331                              FRF_AZ_DRV_INT_EN_KER, enabled);
1332         efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1333 }
1334
1335 void efx_nic_enable_interrupts(struct efx_nic *efx)
1336 {
1337         struct efx_channel *channel;
1338
1339         EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1340         wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1341
1342         /* Enable interrupts */
1343         efx_nic_interrupts(efx, true, false);
1344
1345         /* Force processing of all the channels to get the EVQ RPTRs up to
1346            date */
1347         efx_for_each_channel(channel, efx)
1348                 efx_schedule_channel(channel);
1349 }
1350
1351 void efx_nic_disable_interrupts(struct efx_nic *efx)
1352 {
1353         /* Disable interrupts */
1354         efx_nic_interrupts(efx, false, false);
1355 }
1356
1357 /* Generate a test interrupt
1358  * Interrupt must already have been enabled, otherwise nasty things
1359  * may happen.
1360  */
1361 void efx_nic_generate_interrupt(struct efx_nic *efx)
1362 {
1363         efx_nic_interrupts(efx, true, true);
1364 }
1365
1366 /* Process a fatal interrupt
1367  * Disable bus mastering ASAP and schedule a reset
1368  */
1369 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1370 {
1371         struct falcon_nic_data *nic_data = efx->nic_data;
1372         efx_oword_t *int_ker = efx->irq_status.addr;
1373         efx_oword_t fatal_intr;
1374         int error, mem_perr;
1375
1376         efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1377         error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1378
1379         netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1380                   EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1381                   EFX_OWORD_VAL(fatal_intr),
1382                   error ? "disabling bus mastering" : "no recognised error");
1383
1384         /* If this is a memory parity error dump which blocks are offending */
1385         mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1386                     EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1387         if (mem_perr) {
1388                 efx_oword_t reg;
1389                 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1390                 netif_err(efx, hw, efx->net_dev,
1391                           "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1392                           EFX_OWORD_VAL(reg));
1393         }
1394
1395         /* Disable both devices */
1396         pci_clear_master(efx->pci_dev);
1397         if (efx_nic_is_dual_func(efx))
1398                 pci_clear_master(nic_data->pci_dev2);
1399         efx_nic_disable_interrupts(efx);
1400
1401         /* Count errors and reset or disable the NIC accordingly */
1402         if (efx->int_error_count == 0 ||
1403             time_after(jiffies, efx->int_error_expire)) {
1404                 efx->int_error_count = 0;
1405                 efx->int_error_expire =
1406                         jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1407         }
1408         if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1409                 netif_err(efx, hw, efx->net_dev,
1410                           "SYSTEM ERROR - reset scheduled\n");
1411                 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1412         } else {
1413                 netif_err(efx, hw, efx->net_dev,
1414                           "SYSTEM ERROR - max number of errors seen."
1415                           "NIC will be disabled\n");
1416                 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1417         }
1418
1419         return IRQ_HANDLED;
1420 }
1421
1422 /* Handle a legacy interrupt
1423  * Acknowledges the interrupt and schedule event queue processing.
1424  */
1425 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1426 {
1427         struct efx_nic *efx = dev_id;
1428         efx_oword_t *int_ker = efx->irq_status.addr;
1429         irqreturn_t result = IRQ_NONE;
1430         struct efx_channel *channel;
1431         efx_dword_t reg;
1432         u32 queues;
1433         int syserr;
1434
1435         /* Could this be ours?  If interrupts are disabled then the
1436          * channel state may not be valid.
1437          */
1438         if (!efx->legacy_irq_enabled)
1439                 return result;
1440
1441         /* Read the ISR which also ACKs the interrupts */
1442         efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1443         queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1444
1445         /* Check to see if we have a serious error condition */
1446         if (queues & (1U << efx->fatal_irq_level)) {
1447                 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1448                 if (unlikely(syserr))
1449                         return efx_nic_fatal_interrupt(efx);
1450         }
1451
1452         if (queues != 0) {
1453                 if (EFX_WORKAROUND_15783(efx))
1454                         efx->irq_zero_count = 0;
1455
1456                 /* Schedule processing of any interrupting queues */
1457                 efx_for_each_channel(channel, efx) {
1458                         if (queues & 1)
1459                                 efx_schedule_channel(channel);
1460                         queues >>= 1;
1461                 }
1462                 result = IRQ_HANDLED;
1463
1464         } else if (EFX_WORKAROUND_15783(efx)) {
1465                 efx_qword_t *event;
1466
1467                 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1468                  * because this might be a shared interrupt. */
1469                 if (efx->irq_zero_count++ == 0)
1470                         result = IRQ_HANDLED;
1471
1472                 /* Ensure we schedule or rearm all event queues */
1473                 efx_for_each_channel(channel, efx) {
1474                         event = efx_event(channel, channel->eventq_read_ptr);
1475                         if (efx_event_present(event))
1476                                 efx_schedule_channel(channel);
1477                         else
1478                                 efx_nic_eventq_read_ack(channel);
1479                 }
1480         }
1481
1482         if (result == IRQ_HANDLED) {
1483                 efx->last_irq_cpu = raw_smp_processor_id();
1484                 netif_vdbg(efx, intr, efx->net_dev,
1485                            "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1486                            irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1487         }
1488
1489         return result;
1490 }
1491
1492 /* Handle an MSI interrupt
1493  *
1494  * Handle an MSI hardware interrupt.  This routine schedules event
1495  * queue processing.  No interrupt acknowledgement cycle is necessary.
1496  * Also, we never need to check that the interrupt is for us, since
1497  * MSI interrupts cannot be shared.
1498  */
1499 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1500 {
1501         struct efx_channel *channel = *(struct efx_channel **)dev_id;
1502         struct efx_nic *efx = channel->efx;
1503         efx_oword_t *int_ker = efx->irq_status.addr;
1504         int syserr;
1505
1506         efx->last_irq_cpu = raw_smp_processor_id();
1507         netif_vdbg(efx, intr, efx->net_dev,
1508                    "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1509                    irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1510
1511         /* Check to see if we have a serious error condition */
1512         if (channel->channel == efx->fatal_irq_level) {
1513                 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1514                 if (unlikely(syserr))
1515                         return efx_nic_fatal_interrupt(efx);
1516         }
1517
1518         /* Schedule processing of the channel */
1519         efx_schedule_channel(channel);
1520
1521         return IRQ_HANDLED;
1522 }
1523
1524
1525 /* Setup RSS indirection table.
1526  * This maps from the hash value of the packet to RXQ
1527  */
1528 void efx_nic_push_rx_indir_table(struct efx_nic *efx)
1529 {
1530         size_t i = 0;
1531         efx_dword_t dword;
1532
1533         if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1534                 return;
1535
1536         BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1537                      FR_BZ_RX_INDIRECTION_TBL_ROWS);
1538
1539         for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1540                 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1541                                      efx->rx_indir_table[i]);
1542                 efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
1543         }
1544 }
1545
1546 /* Hook interrupt handler(s)
1547  * Try MSI and then legacy interrupts.
1548  */
1549 int efx_nic_init_interrupt(struct efx_nic *efx)
1550 {
1551         struct efx_channel *channel;
1552         int rc;
1553
1554         if (!EFX_INT_MODE_USE_MSI(efx)) {
1555                 irq_handler_t handler;
1556                 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1557                         handler = efx_legacy_interrupt;
1558                 else
1559                         handler = falcon_legacy_interrupt_a1;
1560
1561                 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1562                                  efx->name, efx);
1563                 if (rc) {
1564                         netif_err(efx, drv, efx->net_dev,
1565                                   "failed to hook legacy IRQ %d\n",
1566                                   efx->pci_dev->irq);
1567                         goto fail1;
1568                 }
1569                 return 0;
1570         }
1571
1572         /* Hook MSI or MSI-X interrupt */
1573         efx_for_each_channel(channel, efx) {
1574                 rc = request_irq(channel->irq, efx_msi_interrupt,
1575                                  IRQF_PROBE_SHARED, /* Not shared */
1576                                  efx->channel_name[channel->channel],
1577                                  &efx->channel[channel->channel]);
1578                 if (rc) {
1579                         netif_err(efx, drv, efx->net_dev,
1580                                   "failed to hook IRQ %d\n", channel->irq);
1581                         goto fail2;
1582                 }
1583         }
1584
1585         return 0;
1586
1587  fail2:
1588         efx_for_each_channel(channel, efx)
1589                 free_irq(channel->irq, &efx->channel[channel->channel]);
1590  fail1:
1591         return rc;
1592 }
1593
1594 void efx_nic_fini_interrupt(struct efx_nic *efx)
1595 {
1596         struct efx_channel *channel;
1597         efx_oword_t reg;
1598
1599         /* Disable MSI/MSI-X interrupts */
1600         efx_for_each_channel(channel, efx) {
1601                 if (channel->irq)
1602                         free_irq(channel->irq, &efx->channel[channel->channel]);
1603         }
1604
1605         /* ACK legacy interrupt */
1606         if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1607                 efx_reado(efx, &reg, FR_BZ_INT_ISR0);
1608         else
1609                 falcon_irq_ack_a1(efx);
1610
1611         /* Disable legacy interrupt */
1612         if (efx->legacy_irq)
1613                 free_irq(efx->legacy_irq, efx);
1614 }
1615
1616 u32 efx_nic_fpga_ver(struct efx_nic *efx)
1617 {
1618         efx_oword_t altera_build;
1619         efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1620         return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1621 }
1622
1623 void efx_nic_init_common(struct efx_nic *efx)
1624 {
1625         efx_oword_t temp;
1626
1627         /* Set positions of descriptor caches in SRAM. */
1628         EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
1629                              efx->type->tx_dc_base / 8);
1630         efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1631         EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
1632                              efx->type->rx_dc_base / 8);
1633         efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1634
1635         /* Set TX descriptor cache size. */
1636         BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1637         EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1638         efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1639
1640         /* Set RX descriptor cache size.  Set low watermark to size-8, as
1641          * this allows most efficient prefetching.
1642          */
1643         BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1644         EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1645         efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1646         EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1647         efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1648
1649         /* Program INT_KER address */
1650         EFX_POPULATE_OWORD_2(temp,
1651                              FRF_AZ_NORM_INT_VEC_DIS_KER,
1652                              EFX_INT_MODE_USE_MSI(efx),
1653                              FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1654         efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1655
1656         if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1657                 /* Use an interrupt level unused by event queues */
1658                 efx->fatal_irq_level = 0x1f;
1659         else
1660                 /* Use a valid MSI-X vector */
1661                 efx->fatal_irq_level = 0;
1662
1663         /* Enable all the genuinely fatal interrupts.  (They are still
1664          * masked by the overall interrupt mask, controlled by
1665          * falcon_interrupts()).
1666          *
1667          * Note: All other fatal interrupts are enabled
1668          */
1669         EFX_POPULATE_OWORD_3(temp,
1670                              FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1671                              FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1672                              FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1673         if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1674                 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1675         EFX_INVERT_OWORD(temp);
1676         efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1677
1678         efx_nic_push_rx_indir_table(efx);
1679
1680         /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1681          * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1682          */
1683         efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1684         EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1685         EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1686         EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1687         EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1688         EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1689         /* Enable SW_EV to inherit in char driver - assume harmless here */
1690         EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1691         /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1692         EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1693         /* Disable hardware watchdog which can misfire */
1694         EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1695         /* Squash TX of packets of 16 bytes or less */
1696         if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1697                 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1698         efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1699
1700         if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1701                 EFX_POPULATE_OWORD_4(temp,
1702                                      /* Default values */
1703                                      FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1704                                      FRF_BZ_TX_PACE_SB_AF, 0xb,
1705                                      FRF_BZ_TX_PACE_FB_BASE, 0,
1706                                      /* Allow large pace values in the
1707                                       * fast bin. */
1708                                      FRF_BZ_TX_PACE_BIN_TH,
1709                                      FFE_BZ_TX_PACE_RESERVED);
1710                 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1711         }
1712 }
1713
1714 /* Register dump */
1715
1716 #define REGISTER_REVISION_A     1
1717 #define REGISTER_REVISION_B     2
1718 #define REGISTER_REVISION_C     3
1719 #define REGISTER_REVISION_Z     3       /* latest revision */
1720
1721 struct efx_nic_reg {
1722         u32 offset:24;
1723         u32 min_revision:2, max_revision:2;
1724 };
1725
1726 #define REGISTER(name, min_rev, max_rev) {                              \
1727         FR_ ## min_rev ## max_rev ## _ ## name,                         \
1728         REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev    \
1729 }
1730 #define REGISTER_AA(name) REGISTER(name, A, A)
1731 #define REGISTER_AB(name) REGISTER(name, A, B)
1732 #define REGISTER_AZ(name) REGISTER(name, A, Z)
1733 #define REGISTER_BB(name) REGISTER(name, B, B)
1734 #define REGISTER_BZ(name) REGISTER(name, B, Z)
1735 #define REGISTER_CZ(name) REGISTER(name, C, Z)
1736
1737 static const struct efx_nic_reg efx_nic_regs[] = {
1738         REGISTER_AZ(ADR_REGION),
1739         REGISTER_AZ(INT_EN_KER),
1740         REGISTER_BZ(INT_EN_CHAR),
1741         REGISTER_AZ(INT_ADR_KER),
1742         REGISTER_BZ(INT_ADR_CHAR),
1743         /* INT_ACK_KER is WO */
1744         /* INT_ISR0 is RC */
1745         REGISTER_AZ(HW_INIT),
1746         REGISTER_CZ(USR_EV_CFG),
1747         REGISTER_AB(EE_SPI_HCMD),
1748         REGISTER_AB(EE_SPI_HADR),
1749         REGISTER_AB(EE_SPI_HDATA),
1750         REGISTER_AB(EE_BASE_PAGE),
1751         REGISTER_AB(EE_VPD_CFG0),
1752         /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
1753         /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
1754         /* PCIE_CORE_INDIRECT is indirect */
1755         REGISTER_AB(NIC_STAT),
1756         REGISTER_AB(GPIO_CTL),
1757         REGISTER_AB(GLB_CTL),
1758         /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
1759         REGISTER_BZ(DP_CTRL),
1760         REGISTER_AZ(MEM_STAT),
1761         REGISTER_AZ(CS_DEBUG),
1762         REGISTER_AZ(ALTERA_BUILD),
1763         REGISTER_AZ(CSR_SPARE),
1764         REGISTER_AB(PCIE_SD_CTL0123),
1765         REGISTER_AB(PCIE_SD_CTL45),
1766         REGISTER_AB(PCIE_PCS_CTL_STAT),
1767         /* DEBUG_DATA_OUT is not used */
1768         /* DRV_EV is WO */
1769         REGISTER_AZ(EVQ_CTL),
1770         REGISTER_AZ(EVQ_CNT1),
1771         REGISTER_AZ(EVQ_CNT2),
1772         REGISTER_AZ(BUF_TBL_CFG),
1773         REGISTER_AZ(SRM_RX_DC_CFG),
1774         REGISTER_AZ(SRM_TX_DC_CFG),
1775         REGISTER_AZ(SRM_CFG),
1776         /* BUF_TBL_UPD is WO */
1777         REGISTER_AZ(SRM_UPD_EVQ),
1778         REGISTER_AZ(SRAM_PARITY),
1779         REGISTER_AZ(RX_CFG),
1780         REGISTER_BZ(RX_FILTER_CTL),
1781         /* RX_FLUSH_DESCQ is WO */
1782         REGISTER_AZ(RX_DC_CFG),
1783         REGISTER_AZ(RX_DC_PF_WM),
1784         REGISTER_BZ(RX_RSS_TKEY),
1785         /* RX_NODESC_DROP is RC */
1786         REGISTER_AA(RX_SELF_RST),
1787         /* RX_DEBUG, RX_PUSH_DROP are not used */
1788         REGISTER_CZ(RX_RSS_IPV6_REG1),
1789         REGISTER_CZ(RX_RSS_IPV6_REG2),
1790         REGISTER_CZ(RX_RSS_IPV6_REG3),
1791         /* TX_FLUSH_DESCQ is WO */
1792         REGISTER_AZ(TX_DC_CFG),
1793         REGISTER_AA(TX_CHKSM_CFG),
1794         REGISTER_AZ(TX_CFG),
1795         /* TX_PUSH_DROP is not used */
1796         REGISTER_AZ(TX_RESERVED),
1797         REGISTER_BZ(TX_PACE),
1798         /* TX_PACE_DROP_QID is RC */
1799         REGISTER_BB(TX_VLAN),
1800         REGISTER_BZ(TX_IPFIL_PORTEN),
1801         REGISTER_AB(MD_TXD),
1802         REGISTER_AB(MD_RXD),
1803         REGISTER_AB(MD_CS),
1804         REGISTER_AB(MD_PHY_ADR),
1805         REGISTER_AB(MD_ID),
1806         /* MD_STAT is RC */
1807         REGISTER_AB(MAC_STAT_DMA),
1808         REGISTER_AB(MAC_CTRL),
1809         REGISTER_BB(GEN_MODE),
1810         REGISTER_AB(MAC_MC_HASH_REG0),
1811         REGISTER_AB(MAC_MC_HASH_REG1),
1812         REGISTER_AB(GM_CFG1),
1813         REGISTER_AB(GM_CFG2),
1814         /* GM_IPG and GM_HD are not used */
1815         REGISTER_AB(GM_MAX_FLEN),
1816         /* GM_TEST is not used */
1817         REGISTER_AB(GM_ADR1),
1818         REGISTER_AB(GM_ADR2),
1819         REGISTER_AB(GMF_CFG0),
1820         REGISTER_AB(GMF_CFG1),
1821         REGISTER_AB(GMF_CFG2),
1822         REGISTER_AB(GMF_CFG3),
1823         REGISTER_AB(GMF_CFG4),
1824         REGISTER_AB(GMF_CFG5),
1825         REGISTER_BB(TX_SRC_MAC_CTL),
1826         REGISTER_AB(XM_ADR_LO),
1827         REGISTER_AB(XM_ADR_HI),
1828         REGISTER_AB(XM_GLB_CFG),
1829         REGISTER_AB(XM_TX_CFG),
1830         REGISTER_AB(XM_RX_CFG),
1831         REGISTER_AB(XM_MGT_INT_MASK),
1832         REGISTER_AB(XM_FC),
1833         REGISTER_AB(XM_PAUSE_TIME),
1834         REGISTER_AB(XM_TX_PARAM),
1835         REGISTER_AB(XM_RX_PARAM),
1836         /* XM_MGT_INT_MSK (note no 'A') is RC */
1837         REGISTER_AB(XX_PWR_RST),
1838         REGISTER_AB(XX_SD_CTL),
1839         REGISTER_AB(XX_TXDRV_CTL),
1840         /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
1841         /* XX_CORE_STAT is partly RC */
1842 };
1843
1844 struct efx_nic_reg_table {
1845         u32 offset:24;
1846         u32 min_revision:2, max_revision:2;
1847         u32 step:6, rows:21;
1848 };
1849
1850 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
1851         offset,                                                         \
1852         REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev,   \
1853         step, rows                                                      \
1854 }
1855 #define REGISTER_TABLE(name, min_rev, max_rev)                          \
1856         REGISTER_TABLE_DIMENSIONS(                                      \
1857                 name, FR_ ## min_rev ## max_rev ## _ ## name,           \
1858                 min_rev, max_rev,                                       \
1859                 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP,        \
1860                 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
1861 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
1862 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
1863 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
1864 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
1865 #define REGISTER_TABLE_BB_CZ(name)                                      \
1866         REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B,           \
1867                                   FR_BZ_ ## name ## _STEP,              \
1868                                   FR_BB_ ## name ## _ROWS),             \
1869         REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z,           \
1870                                   FR_BZ_ ## name ## _STEP,              \
1871                                   FR_CZ_ ## name ## _ROWS)
1872 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
1873
1874 static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1875         /* DRIVER is not used */
1876         /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
1877         REGISTER_TABLE_BB(TX_IPFIL_TBL),
1878         REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
1879         REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
1880         REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
1881         REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
1882         REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
1883         REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
1884         REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
1885         /* We can't reasonably read all of the buffer table (up to 8MB!).
1886          * However this driver will only use a few entries.  Reading
1887          * 1K entries allows for some expansion of queue count and
1888          * size before we need to change the version. */
1889         REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
1890                                   A, A, 8, 1024),
1891         REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
1892                                   B, Z, 8, 1024),
1893         REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
1894         REGISTER_TABLE_BB_CZ(TIMER_TBL),
1895         REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
1896         REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
1897         /* TX_FILTER_TBL0 is huge and not used by this driver */
1898         REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
1899         REGISTER_TABLE_CZ(MC_TREG_SMEM),
1900         /* MSIX_PBA_TABLE is not mapped */
1901         /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
1902         REGISTER_TABLE_BZ(RX_FILTER_TBL0),
1903 };
1904
1905 size_t efx_nic_get_regs_len(struct efx_nic *efx)
1906 {
1907         const struct efx_nic_reg *reg;
1908         const struct efx_nic_reg_table *table;
1909         size_t len = 0;
1910
1911         for (reg = efx_nic_regs;
1912              reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
1913              reg++)
1914                 if (efx->type->revision >= reg->min_revision &&
1915                     efx->type->revision <= reg->max_revision)
1916                         len += sizeof(efx_oword_t);
1917
1918         for (table = efx_nic_reg_tables;
1919              table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
1920              table++)
1921                 if (efx->type->revision >= table->min_revision &&
1922                     efx->type->revision <= table->max_revision)
1923                         len += table->rows * min_t(size_t, table->step, 16);
1924
1925         return len;
1926 }
1927
1928 void efx_nic_get_regs(struct efx_nic *efx, void *buf)
1929 {
1930         const struct efx_nic_reg *reg;
1931         const struct efx_nic_reg_table *table;
1932
1933         for (reg = efx_nic_regs;
1934              reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
1935              reg++) {
1936                 if (efx->type->revision >= reg->min_revision &&
1937                     efx->type->revision <= reg->max_revision) {
1938                         efx_reado(efx, (efx_oword_t *)buf, reg->offset);
1939                         buf += sizeof(efx_oword_t);
1940                 }
1941         }
1942
1943         for (table = efx_nic_reg_tables;
1944              table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
1945              table++) {
1946                 size_t size, i;
1947
1948                 if (!(efx->type->revision >= table->min_revision &&
1949                       efx->type->revision <= table->max_revision))
1950                         continue;
1951
1952                 size = min_t(size_t, table->step, 16);
1953
1954                 for (i = 0; i < table->rows; i++) {
1955                         switch (table->step) {
1956                         case 4: /* 32-bit register or SRAM */
1957                                 efx_readd_table(efx, buf, table->offset, i);
1958                                 break;
1959                         case 8: /* 64-bit SRAM */
1960                                 efx_sram_readq(efx,
1961                                                efx->membase + table->offset,
1962                                                buf, i);
1963                                 break;
1964                         case 16: /* 128-bit register */
1965                                 efx_reado_table(efx, buf, table->offset, i);
1966                                 break;
1967                         case 32: /* 128-bit register, interleaved */
1968                                 efx_reado_table(efx, buf, table->offset, 2 * i);
1969                                 break;
1970                         default:
1971                                 WARN_ON(1);
1972                                 return;
1973                         }
1974                         buf += size;
1975                 }
1976         }
1977 }