1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2011 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include "net_driver.h"
23 #include "workarounds.h"
25 /**************************************************************************
29 **************************************************************************
32 /* This is set to 16 for a good reason. In summary, if larger than
33 * 16, the descriptor cache holds more than a default socket
34 * buffer's worth of packets (for UDP we can only have at most one
35 * socket buffer's worth outstanding). This combined with the fact
36 * that we only get 1 TX event per descriptor cache means the NIC
39 #define TX_DC_ENTRIES 16
40 #define TX_DC_ENTRIES_ORDER 1
42 #define RX_DC_ENTRIES 64
43 #define RX_DC_ENTRIES_ORDER 3
45 /* If EFX_MAX_INT_ERRORS internal errors occur within
46 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
49 #define EFX_INT_ERROR_EXPIRE 3600
50 #define EFX_MAX_INT_ERRORS 5
52 /* Depth of RX flush request fifo */
53 #define EFX_RX_FLUSH_COUNT 4
55 /* Driver generated events */
56 #define _EFX_CHANNEL_MAGIC_TEST 0x000101
57 #define _EFX_CHANNEL_MAGIC_FILL 0x000102
58 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
59 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
61 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
62 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
64 #define EFX_CHANNEL_MAGIC_TEST(_channel) \
65 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
66 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
68 efx_rx_queue_index(_rx_queue))
69 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
71 efx_rx_queue_index(_rx_queue))
72 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
76 /**************************************************************************
78 * Solarstorm hardware access
80 **************************************************************************/
82 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
85 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
89 /* Read the current event from the event queue */
90 static inline efx_qword_t *efx_event(struct efx_channel *channel,
93 return ((efx_qword_t *) (channel->eventq.addr)) +
94 (index & channel->eventq_mask);
97 /* See if an event is present
99 * We check both the high and low dword of the event for all ones. We
100 * wrote all ones when we cleared the event, and no valid event can
101 * have all ones in either its high or low dwords. This approach is
102 * robust against reordering.
104 * Note that using a single 64-bit comparison is incorrect; even
105 * though the CPU read will be atomic, the DMA write may not be.
107 static inline int efx_event_present(efx_qword_t *event)
109 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
110 EFX_DWORD_IS_ALL_ONES(event->dword[1]));
113 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
114 const efx_oword_t *mask)
116 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
117 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
120 int efx_nic_test_registers(struct efx_nic *efx,
121 const struct efx_nic_register_test *regs,
124 unsigned address = 0, i, j;
125 efx_oword_t mask, imask, original, reg, buf;
127 /* Falcon should be in loopback to isolate the XMAC from the PHY */
128 WARN_ON(!LOOPBACK_INTERNAL(efx));
130 for (i = 0; i < n_regs; ++i) {
131 address = regs[i].address;
132 mask = imask = regs[i].mask;
133 EFX_INVERT_OWORD(imask);
135 efx_reado(efx, &original, address);
137 /* bit sweep on and off */
138 for (j = 0; j < 128; j++) {
139 if (!EFX_EXTRACT_OWORD32(mask, j, j))
142 /* Test this testable bit can be set in isolation */
143 EFX_AND_OWORD(reg, original, mask);
144 EFX_SET_OWORD32(reg, j, j, 1);
146 efx_writeo(efx, ®, address);
147 efx_reado(efx, &buf, address);
149 if (efx_masked_compare_oword(®, &buf, &mask))
152 /* Test this testable bit can be cleared in isolation */
153 EFX_OR_OWORD(reg, original, mask);
154 EFX_SET_OWORD32(reg, j, j, 0);
156 efx_writeo(efx, ®, address);
157 efx_reado(efx, &buf, address);
159 if (efx_masked_compare_oword(®, &buf, &mask))
163 efx_writeo(efx, &original, address);
169 netif_err(efx, hw, efx->net_dev,
170 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
171 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
172 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
176 /**************************************************************************
178 * Special buffer handling
179 * Special buffers are used for event queues and the TX and RX
182 *************************************************************************/
185 * Initialise a special buffer
187 * This will define a buffer (previously allocated via
188 * efx_alloc_special_buffer()) in the buffer table, allowing
189 * it to be used for event queues, descriptor rings etc.
192 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
194 efx_qword_t buf_desc;
199 EFX_BUG_ON_PARANOID(!buffer->addr);
201 /* Write buffer descriptors to NIC */
202 for (i = 0; i < buffer->entries; i++) {
203 index = buffer->index + i;
204 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE);
205 netif_dbg(efx, probe, efx->net_dev,
206 "mapping special buffer %d at %llx\n",
207 index, (unsigned long long)dma_addr);
208 EFX_POPULATE_QWORD_3(buf_desc,
209 FRF_AZ_BUF_ADR_REGION, 0,
210 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
211 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
212 efx_write_buf_tbl(efx, &buf_desc, index);
216 /* Unmaps a buffer and clears the buffer table entries */
218 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
220 efx_oword_t buf_tbl_upd;
221 unsigned int start = buffer->index;
222 unsigned int end = (buffer->index + buffer->entries - 1);
224 if (!buffer->entries)
227 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
228 buffer->index, buffer->index + buffer->entries - 1);
230 EFX_POPULATE_OWORD_4(buf_tbl_upd,
231 FRF_AZ_BUF_UPD_CMD, 0,
232 FRF_AZ_BUF_CLR_CMD, 1,
233 FRF_AZ_BUF_CLR_END_ID, end,
234 FRF_AZ_BUF_CLR_START_ID, start);
235 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
239 * Allocate a new special buffer
241 * This allocates memory for a new buffer, clears it and allocates a
242 * new buffer ID range. It does not write into the buffer table.
244 * This call will allocate 4KB buffers, since 8KB buffers can't be
245 * used for event queues and descriptor rings.
247 static int efx_alloc_special_buffer(struct efx_nic *efx,
248 struct efx_special_buffer *buffer,
251 len = ALIGN(len, EFX_BUF_SIZE);
253 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
254 &buffer->dma_addr, GFP_KERNEL);
258 buffer->entries = len / EFX_BUF_SIZE;
259 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
261 /* All zeros is a potentially valid event so memset to 0xff */
262 memset(buffer->addr, 0xff, len);
264 /* Select new buffer ID */
265 buffer->index = efx->next_buffer_table;
266 efx->next_buffer_table += buffer->entries;
268 netif_dbg(efx, probe, efx->net_dev,
269 "allocating special buffers %d-%d at %llx+%x "
270 "(virt %p phys %llx)\n", buffer->index,
271 buffer->index + buffer->entries - 1,
272 (u64)buffer->dma_addr, len,
273 buffer->addr, (u64)virt_to_phys(buffer->addr));
279 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
284 netif_dbg(efx, hw, efx->net_dev,
285 "deallocating special buffers %d-%d at %llx+%x "
286 "(virt %p phys %llx)\n", buffer->index,
287 buffer->index + buffer->entries - 1,
288 (u64)buffer->dma_addr, buffer->len,
289 buffer->addr, (u64)virt_to_phys(buffer->addr));
291 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
297 /**************************************************************************
299 * Generic buffer handling
300 * These buffers are used for interrupt status and MAC stats
302 **************************************************************************/
304 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
307 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
312 memset(buffer->addr, 0, len);
316 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
319 pci_free_consistent(efx->pci_dev, buffer->len,
320 buffer->addr, buffer->dma_addr);
325 /**************************************************************************
329 **************************************************************************/
331 /* Returns a pointer to the specified transmit descriptor in the TX
332 * descriptor queue belonging to the specified channel.
334 static inline efx_qword_t *
335 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
337 return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
340 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
341 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
346 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
347 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
348 efx_writed_page(tx_queue->efx, ®,
349 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
352 /* Write pointer and first descriptor for TX descriptor ring */
353 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
354 const efx_qword_t *txd)
359 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
360 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
362 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
363 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
364 FRF_AZ_TX_DESC_WPTR, write_ptr);
366 efx_writeo_page(tx_queue->efx, ®,
367 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
371 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
373 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
375 if (empty_read_count == 0)
378 tx_queue->empty_read_count = 0;
379 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
382 /* For each entry inserted into the software descriptor ring, create a
383 * descriptor in the hardware TX descriptor ring (in host memory), and
386 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
389 struct efx_tx_buffer *buffer;
392 unsigned old_write_count = tx_queue->write_count;
394 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
397 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
398 buffer = &tx_queue->buffer[write_ptr];
399 txd = efx_tx_desc(tx_queue, write_ptr);
400 ++tx_queue->write_count;
402 /* Create TX descriptor ring entry */
403 EFX_POPULATE_QWORD_4(*txd,
404 FSF_AZ_TX_KER_CONT, buffer->continuation,
405 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
406 FSF_AZ_TX_KER_BUF_REGION, 0,
407 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
408 } while (tx_queue->write_count != tx_queue->insert_count);
410 wmb(); /* Ensure descriptors are written before they are fetched */
412 if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
413 txd = efx_tx_desc(tx_queue,
414 old_write_count & tx_queue->ptr_mask);
415 efx_push_tx_desc(tx_queue, txd);
418 efx_notify_tx_desc(tx_queue);
422 /* Allocate hardware resources for a TX queue */
423 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
425 struct efx_nic *efx = tx_queue->efx;
428 entries = tx_queue->ptr_mask + 1;
429 return efx_alloc_special_buffer(efx, &tx_queue->txd,
430 entries * sizeof(efx_qword_t));
433 void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
435 struct efx_nic *efx = tx_queue->efx;
438 /* Pin TX descriptor ring */
439 efx_init_special_buffer(efx, &tx_queue->txd);
441 /* Push TX descriptor ring to card */
442 EFX_POPULATE_OWORD_10(reg,
443 FRF_AZ_TX_DESCQ_EN, 1,
444 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
445 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
446 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
447 FRF_AZ_TX_DESCQ_EVQ_ID,
448 tx_queue->channel->channel,
449 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
450 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
451 FRF_AZ_TX_DESCQ_SIZE,
452 __ffs(tx_queue->txd.entries),
453 FRF_AZ_TX_DESCQ_TYPE, 0,
454 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
456 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
457 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
458 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
459 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
463 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base,
466 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
467 /* Only 128 bits in this register */
468 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
470 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG);
471 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
472 clear_bit_le(tx_queue->queue, (void *)®);
474 set_bit_le(tx_queue->queue, (void *)®);
475 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG);
478 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
479 EFX_POPULATE_OWORD_1(reg,
481 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
483 FFE_BZ_TX_PACE_RESERVED);
484 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL,
489 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
491 struct efx_nic *efx = tx_queue->efx;
492 efx_oword_t tx_flush_descq;
494 EFX_POPULATE_OWORD_2(tx_flush_descq,
495 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
496 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
497 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
500 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
502 struct efx_nic *efx = tx_queue->efx;
503 efx_oword_t tx_desc_ptr;
505 /* Remove TX descriptor ring from card */
506 EFX_ZERO_OWORD(tx_desc_ptr);
507 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
510 /* Unpin TX descriptor ring */
511 efx_fini_special_buffer(efx, &tx_queue->txd);
514 /* Free buffers backing TX queue */
515 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
517 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
520 /**************************************************************************
524 **************************************************************************/
526 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
527 static inline efx_qword_t *
528 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
530 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
533 /* This creates an entry in the RX descriptor queue */
535 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
537 struct efx_rx_buffer *rx_buf;
540 rxd = efx_rx_desc(rx_queue, index);
541 rx_buf = efx_rx_buffer(rx_queue, index);
542 EFX_POPULATE_QWORD_3(*rxd,
543 FSF_AZ_RX_KER_BUF_SIZE,
545 rx_queue->efx->type->rx_buffer_padding,
546 FSF_AZ_RX_KER_BUF_REGION, 0,
547 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
550 /* This writes to the RX_DESC_WPTR register for the specified receive
553 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
555 struct efx_nic *efx = rx_queue->efx;
559 while (rx_queue->notified_count != rx_queue->added_count) {
562 rx_queue->notified_count & rx_queue->ptr_mask);
563 ++rx_queue->notified_count;
567 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
568 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
569 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0,
570 efx_rx_queue_index(rx_queue));
573 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
575 struct efx_nic *efx = rx_queue->efx;
578 entries = rx_queue->ptr_mask + 1;
579 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
580 entries * sizeof(efx_qword_t));
583 void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
585 efx_oword_t rx_desc_ptr;
586 struct efx_nic *efx = rx_queue->efx;
587 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
588 bool iscsi_digest_en = is_b0;
590 netif_dbg(efx, hw, efx->net_dev,
591 "RX queue %d ring in special buffers %d-%d\n",
592 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
593 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
595 /* Pin RX descriptor ring */
596 efx_init_special_buffer(efx, &rx_queue->rxd);
598 /* Push RX descriptor ring to card */
599 EFX_POPULATE_OWORD_10(rx_desc_ptr,
600 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
601 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
602 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
603 FRF_AZ_RX_DESCQ_EVQ_ID,
604 efx_rx_queue_channel(rx_queue)->channel,
605 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
606 FRF_AZ_RX_DESCQ_LABEL,
607 efx_rx_queue_index(rx_queue),
608 FRF_AZ_RX_DESCQ_SIZE,
609 __ffs(rx_queue->rxd.entries),
610 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
611 /* For >=B0 this is scatter so disable */
612 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
613 FRF_AZ_RX_DESCQ_EN, 1);
614 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
615 efx_rx_queue_index(rx_queue));
618 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
620 struct efx_nic *efx = rx_queue->efx;
621 efx_oword_t rx_flush_descq;
623 EFX_POPULATE_OWORD_2(rx_flush_descq,
624 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
625 FRF_AZ_RX_FLUSH_DESCQ,
626 efx_rx_queue_index(rx_queue));
627 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
630 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
632 efx_oword_t rx_desc_ptr;
633 struct efx_nic *efx = rx_queue->efx;
635 /* Remove RX descriptor ring from card */
636 EFX_ZERO_OWORD(rx_desc_ptr);
637 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
638 efx_rx_queue_index(rx_queue));
640 /* Unpin RX descriptor ring */
641 efx_fini_special_buffer(efx, &rx_queue->rxd);
644 /* Free buffers backing RX queue */
645 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
647 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
650 /**************************************************************************
654 **************************************************************************/
656 /* efx_nic_flush_queues() must be woken up when all flushes are completed,
657 * or more RX flushes can be kicked off.
659 static bool efx_flush_wake(struct efx_nic *efx)
661 /* Ensure that all updates are visible to efx_nic_flush_queues() */
664 return (atomic_read(&efx->drain_pending) == 0 ||
665 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
666 && atomic_read(&efx->rxq_flush_pending) > 0));
669 /* Flush all the transmit queues, and continue flushing receive queues until
670 * they're all flushed. Wait for the DRAIN events to be recieved so that there
671 * are no more RX and TX events left on any channel. */
672 int efx_nic_flush_queues(struct efx_nic *efx)
674 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
675 struct efx_channel *channel;
676 struct efx_rx_queue *rx_queue;
677 struct efx_tx_queue *tx_queue;
681 efx->type->prepare_flush(efx);
683 efx_for_each_channel(channel, efx) {
684 efx_for_each_channel_tx_queue(tx_queue, channel) {
685 atomic_inc(&efx->drain_pending);
686 efx_flush_tx_queue(tx_queue);
688 efx_for_each_channel_rx_queue(rx_queue, channel) {
689 atomic_inc(&efx->drain_pending);
690 rx_queue->flush_pending = true;
691 atomic_inc(&efx->rxq_flush_pending);
695 while (timeout && atomic_read(&efx->drain_pending) > 0) {
696 /* The hardware supports four concurrent rx flushes, each of
697 * which may need to be retried if there is an outstanding
700 efx_for_each_channel(channel, efx) {
701 efx_for_each_channel_rx_queue(rx_queue, channel) {
702 if (atomic_read(&efx->rxq_flush_outstanding) >=
706 if (rx_queue->flush_pending) {
707 rx_queue->flush_pending = false;
708 atomic_dec(&efx->rxq_flush_pending);
709 atomic_inc(&efx->rxq_flush_outstanding);
710 efx_flush_rx_queue(rx_queue);
715 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx),
719 if (atomic_read(&efx->drain_pending)) {
720 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
721 "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
722 atomic_read(&efx->rxq_flush_outstanding),
723 atomic_read(&efx->rxq_flush_pending));
726 atomic_set(&efx->drain_pending, 0);
727 atomic_set(&efx->rxq_flush_pending, 0);
728 atomic_set(&efx->rxq_flush_outstanding, 0);
736 /**************************************************************************
738 * Event queue processing
739 * Event queues are processed by per-channel tasklets.
741 **************************************************************************/
743 /* Update a channel's event queue's read pointer (RPTR) register
745 * This writes the EVQ_RPTR_REG register for the specified channel's
748 void efx_nic_eventq_read_ack(struct efx_channel *channel)
751 struct efx_nic *efx = channel->efx;
753 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
754 channel->eventq_read_ptr & channel->eventq_mask);
755 efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base,
759 /* Use HW to insert a SW defined event */
760 void efx_generate_event(struct efx_nic *efx, unsigned int evq,
763 efx_oword_t drv_ev_reg;
765 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
766 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
767 drv_ev_reg.u32[0] = event->u32[0];
768 drv_ev_reg.u32[1] = event->u32[1];
769 drv_ev_reg.u32[2] = 0;
770 drv_ev_reg.u32[3] = 0;
771 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
772 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
775 static void efx_magic_event(struct efx_channel *channel, u32 magic)
779 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
780 FSE_AZ_EV_CODE_DRV_GEN_EV,
781 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
782 efx_generate_event(channel->efx, channel->channel, &event);
785 /* Handle a transmit completion event
787 * The NIC batches TX completion events; the message we receive is of
788 * the form "complete all TX events up to this index".
791 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
793 unsigned int tx_ev_desc_ptr;
794 unsigned int tx_ev_q_label;
795 struct efx_tx_queue *tx_queue;
796 struct efx_nic *efx = channel->efx;
799 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
802 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
803 /* Transmit completion */
804 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
805 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
806 tx_queue = efx_channel_get_tx_queue(
807 channel, tx_ev_q_label % EFX_TXQ_TYPES);
808 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
810 channel->irq_mod_score += tx_packets;
811 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
812 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
813 /* Rewrite the FIFO write pointer */
814 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
815 tx_queue = efx_channel_get_tx_queue(
816 channel, tx_ev_q_label % EFX_TXQ_TYPES);
818 netif_tx_lock(efx->net_dev);
819 efx_notify_tx_desc(tx_queue);
820 netif_tx_unlock(efx->net_dev);
821 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
822 EFX_WORKAROUND_10727(efx)) {
823 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
825 netif_err(efx, tx_err, efx->net_dev,
826 "channel %d unexpected TX event "
827 EFX_QWORD_FMT"\n", channel->channel,
828 EFX_QWORD_VAL(*event));
834 /* Detect errors included in the rx_evt_pkt_ok bit. */
835 static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
836 const efx_qword_t *event)
838 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
839 struct efx_nic *efx = rx_queue->efx;
840 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
841 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
842 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
843 bool rx_ev_other_err, rx_ev_pause_frm;
844 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
845 unsigned rx_ev_pkt_type;
847 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
848 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
849 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
850 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
851 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
852 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
853 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
854 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
855 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
856 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
857 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
858 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
859 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
860 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
861 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
863 /* Every error apart from tobe_disc and pause_frm */
864 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
865 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
866 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
868 /* Count errors that are not in MAC stats. Ignore expected
869 * checksum errors during self-test. */
871 ++channel->n_rx_frm_trunc;
872 else if (rx_ev_tobe_disc)
873 ++channel->n_rx_tobe_disc;
874 else if (!efx->loopback_selftest) {
875 if (rx_ev_ip_hdr_chksum_err)
876 ++channel->n_rx_ip_hdr_chksum_err;
877 else if (rx_ev_tcp_udp_chksum_err)
878 ++channel->n_rx_tcp_udp_chksum_err;
881 /* TOBE_DISC is expected on unicast mismatches; don't print out an
882 * error message. FRM_TRUNC indicates RXDP dropped the packet due
883 * to a FIFO overflow.
886 if (rx_ev_other_err && net_ratelimit()) {
887 netif_dbg(efx, rx_err, efx->net_dev,
888 " RX queue %d unexpected RX event "
889 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
890 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
891 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
892 rx_ev_ip_hdr_chksum_err ?
893 " [IP_HDR_CHKSUM_ERR]" : "",
894 rx_ev_tcp_udp_chksum_err ?
895 " [TCP_UDP_CHKSUM_ERR]" : "",
896 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
897 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
898 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
899 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
900 rx_ev_pause_frm ? " [PAUSE]" : "");
904 /* The frame must be discarded if any of these are true. */
905 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
906 rx_ev_tobe_disc | rx_ev_pause_frm) ?
907 EFX_RX_PKT_DISCARD : 0;
910 /* Handle receive events that are not in-order. */
912 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
914 struct efx_nic *efx = rx_queue->efx;
915 unsigned expected, dropped;
917 expected = rx_queue->removed_count & rx_queue->ptr_mask;
918 dropped = (index - expected) & rx_queue->ptr_mask;
919 netif_info(efx, rx_err, efx->net_dev,
920 "dropped %d events (index=%d expected=%d)\n",
921 dropped, index, expected);
923 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
924 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
927 /* Handle a packet received event
929 * The NIC gives a "discard" flag if it's a unicast packet with the
930 * wrong destination address
931 * Also "is multicast" and "matches multicast filter" flags can be used to
932 * discard non-matching multicast packets.
935 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
937 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
938 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
939 unsigned expected_ptr;
942 struct efx_rx_queue *rx_queue;
943 struct efx_nic *efx = channel->efx;
945 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
948 /* Basic packet information */
949 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
950 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
951 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
952 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
953 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
954 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
957 rx_queue = efx_channel_get_rx_queue(channel);
959 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
960 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
961 if (unlikely(rx_ev_desc_ptr != expected_ptr))
962 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
964 if (likely(rx_ev_pkt_ok)) {
965 /* If packet is marked as OK and packet type is TCP/IP or
966 * UDP/IP, then we can rely on the hardware checksum.
968 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
969 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ?
970 EFX_RX_PKT_CSUMMED : 0;
972 flags = efx_handle_rx_not_ok(rx_queue, event);
975 /* Detect multicast packets that didn't match the filter */
976 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
977 if (rx_ev_mcast_pkt) {
978 unsigned int rx_ev_mcast_hash_match =
979 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
981 if (unlikely(!rx_ev_mcast_hash_match)) {
982 ++channel->n_rx_mcast_mismatch;
983 flags |= EFX_RX_PKT_DISCARD;
987 channel->irq_mod_score += 2;
989 /* Handle received packet */
990 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags);
993 /* If this flush done event corresponds to a &struct efx_tx_queue, then
994 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
995 * of all transmit completions.
998 efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1000 struct efx_tx_queue *tx_queue;
1003 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1004 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1005 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
1006 qid % EFX_TXQ_TYPES);
1008 efx_magic_event(tx_queue->channel,
1009 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1013 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1014 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1015 * the RX queue back to the mask of RX queues in need of flushing.
1018 efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1020 struct efx_channel *channel;
1021 struct efx_rx_queue *rx_queue;
1025 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1026 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1027 if (qid >= efx->n_channels)
1029 channel = efx_get_channel(efx, qid);
1030 if (!efx_channel_has_rx_queue(channel))
1032 rx_queue = efx_channel_get_rx_queue(channel);
1035 netif_info(efx, hw, efx->net_dev,
1036 "RXQ %d flush retry\n", qid);
1037 rx_queue->flush_pending = true;
1038 atomic_inc(&efx->rxq_flush_pending);
1040 efx_magic_event(efx_rx_queue_channel(rx_queue),
1041 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1043 atomic_dec(&efx->rxq_flush_outstanding);
1044 if (efx_flush_wake(efx))
1045 wake_up(&efx->flush_wq);
1049 efx_handle_drain_event(struct efx_channel *channel)
1051 struct efx_nic *efx = channel->efx;
1053 WARN_ON(atomic_read(&efx->drain_pending) == 0);
1054 atomic_dec(&efx->drain_pending);
1055 if (efx_flush_wake(efx))
1056 wake_up(&efx->flush_wq);
1060 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
1062 struct efx_nic *efx = channel->efx;
1063 struct efx_rx_queue *rx_queue =
1064 efx_channel_has_rx_queue(channel) ?
1065 efx_channel_get_rx_queue(channel) : NULL;
1066 unsigned magic, code;
1068 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1069 code = _EFX_CHANNEL_MAGIC_CODE(magic);
1071 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
1073 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
1074 /* The queue must be empty, so we won't receive any rx
1075 * events, so efx_process_channel() won't refill the
1076 * queue. Refill it here */
1077 efx_fast_push_rx_descriptors(rx_queue);
1078 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1079 rx_queue->enabled = false;
1080 efx_handle_drain_event(channel);
1081 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1082 efx_handle_drain_event(channel);
1084 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1085 "generated event "EFX_QWORD_FMT"\n",
1086 channel->channel, EFX_QWORD_VAL(*event));
1091 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1093 struct efx_nic *efx = channel->efx;
1094 unsigned int ev_sub_code;
1095 unsigned int ev_sub_data;
1097 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1098 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1100 switch (ev_sub_code) {
1101 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1102 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1103 channel->channel, ev_sub_data);
1104 efx_handle_tx_flush_done(efx, event);
1106 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1107 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1108 channel->channel, ev_sub_data);
1109 efx_handle_rx_flush_done(efx, event);
1111 case FSE_AZ_EVQ_INIT_DONE_EV:
1112 netif_dbg(efx, hw, efx->net_dev,
1113 "channel %d EVQ %d initialised\n",
1114 channel->channel, ev_sub_data);
1116 case FSE_AZ_SRM_UPD_DONE_EV:
1117 netif_vdbg(efx, hw, efx->net_dev,
1118 "channel %d SRAM update done\n", channel->channel);
1120 case FSE_AZ_WAKE_UP_EV:
1121 netif_vdbg(efx, hw, efx->net_dev,
1122 "channel %d RXQ %d wakeup event\n",
1123 channel->channel, ev_sub_data);
1125 case FSE_AZ_TIMER_EV:
1126 netif_vdbg(efx, hw, efx->net_dev,
1127 "channel %d RX queue %d timer expired\n",
1128 channel->channel, ev_sub_data);
1130 case FSE_AA_RX_RECOVER_EV:
1131 netif_err(efx, rx_err, efx->net_dev,
1132 "channel %d seen DRIVER RX_RESET event. "
1133 "Resetting.\n", channel->channel);
1134 atomic_inc(&efx->rx_reset);
1135 efx_schedule_reset(efx,
1136 EFX_WORKAROUND_6555(efx) ?
1137 RESET_TYPE_RX_RECOVERY :
1138 RESET_TYPE_DISABLE);
1140 case FSE_BZ_RX_DSC_ERROR_EV:
1141 netif_err(efx, rx_err, efx->net_dev,
1142 "RX DMA Q %d reports descriptor fetch error."
1143 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
1144 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
1146 case FSE_BZ_TX_DSC_ERROR_EV:
1147 netif_err(efx, tx_err, efx->net_dev,
1148 "TX DMA Q %d reports descriptor fetch error."
1149 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
1150 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
1153 netif_vdbg(efx, hw, efx->net_dev,
1154 "channel %d unknown driver event code %d "
1155 "data %04x\n", channel->channel, ev_sub_code,
1161 int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1163 struct efx_nic *efx = channel->efx;
1164 unsigned int read_ptr;
1165 efx_qword_t event, *p_event;
1170 read_ptr = channel->eventq_read_ptr;
1173 p_event = efx_event(channel, read_ptr);
1176 if (!efx_event_present(&event))
1180 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1181 "channel %d event is "EFX_QWORD_FMT"\n",
1182 channel->channel, EFX_QWORD_VAL(event));
1184 /* Clear this event by marking it all ones */
1185 EFX_SET_QWORD(*p_event);
1189 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1192 case FSE_AZ_EV_CODE_RX_EV:
1193 efx_handle_rx_event(channel, &event);
1194 if (++spent == budget)
1197 case FSE_AZ_EV_CODE_TX_EV:
1198 tx_packets += efx_handle_tx_event(channel, &event);
1199 if (tx_packets > efx->txq_entries) {
1204 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1205 efx_handle_generated_event(channel, &event);
1207 case FSE_AZ_EV_CODE_DRIVER_EV:
1208 efx_handle_driver_event(channel, &event);
1210 case FSE_CZ_EV_CODE_MCDI_EV:
1211 efx_mcdi_process_event(channel, &event);
1213 case FSE_AZ_EV_CODE_GLOBAL_EV:
1214 if (efx->type->handle_global_event &&
1215 efx->type->handle_global_event(channel, &event))
1217 /* else fall through */
1219 netif_err(channel->efx, hw, channel->efx->net_dev,
1220 "channel %d unknown event type %d (data "
1221 EFX_QWORD_FMT ")\n", channel->channel,
1222 ev_code, EFX_QWORD_VAL(event));
1227 channel->eventq_read_ptr = read_ptr;
1231 /* Check whether an event is present in the eventq at the current
1232 * read pointer. Only useful for self-test.
1234 bool efx_nic_event_present(struct efx_channel *channel)
1236 return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
1239 /* Allocate buffer table entries for event queue */
1240 int efx_nic_probe_eventq(struct efx_channel *channel)
1242 struct efx_nic *efx = channel->efx;
1245 entries = channel->eventq_mask + 1;
1246 return efx_alloc_special_buffer(efx, &channel->eventq,
1247 entries * sizeof(efx_qword_t));
1250 void efx_nic_init_eventq(struct efx_channel *channel)
1253 struct efx_nic *efx = channel->efx;
1255 netif_dbg(efx, hw, efx->net_dev,
1256 "channel %d event queue in special buffers %d-%d\n",
1257 channel->channel, channel->eventq.index,
1258 channel->eventq.index + channel->eventq.entries - 1);
1260 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1261 EFX_POPULATE_OWORD_3(reg,
1262 FRF_CZ_TIMER_Q_EN, 1,
1263 FRF_CZ_HOST_NOTIFY_MODE, 0,
1264 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1265 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
1268 /* Pin event queue buffer */
1269 efx_init_special_buffer(efx, &channel->eventq);
1271 /* Fill event queue with all ones (i.e. empty events) */
1272 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1274 /* Push event queue to card */
1275 EFX_POPULATE_OWORD_3(reg,
1277 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1278 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1279 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1282 efx->type->push_irq_moderation(channel);
1285 void efx_nic_fini_eventq(struct efx_channel *channel)
1288 struct efx_nic *efx = channel->efx;
1290 /* Remove event queue from card */
1291 EFX_ZERO_OWORD(reg);
1292 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1294 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1295 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
1297 /* Unpin event queue */
1298 efx_fini_special_buffer(efx, &channel->eventq);
1301 /* Free buffers backing event queue */
1302 void efx_nic_remove_eventq(struct efx_channel *channel)
1304 efx_free_special_buffer(channel->efx, &channel->eventq);
1308 void efx_nic_generate_test_event(struct efx_channel *channel)
1310 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
1313 void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
1315 efx_magic_event(efx_rx_queue_channel(rx_queue),
1316 EFX_CHANNEL_MAGIC_FILL(rx_queue));
1319 /**************************************************************************
1321 * Hardware interrupts
1322 * The hardware interrupt handler does very little work; all the event
1323 * queue processing is carried out by per-channel tasklets.
1325 **************************************************************************/
1327 /* Enable/disable/generate interrupts */
1328 static inline void efx_nic_interrupts(struct efx_nic *efx,
1329 bool enabled, bool force)
1331 efx_oword_t int_en_reg_ker;
1333 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1334 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1335 FRF_AZ_KER_INT_KER, force,
1336 FRF_AZ_DRV_INT_EN_KER, enabled);
1337 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1340 void efx_nic_enable_interrupts(struct efx_nic *efx)
1342 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1343 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1345 efx_nic_interrupts(efx, true, false);
1348 void efx_nic_disable_interrupts(struct efx_nic *efx)
1350 /* Disable interrupts */
1351 efx_nic_interrupts(efx, false, false);
1354 /* Generate a test interrupt
1355 * Interrupt must already have been enabled, otherwise nasty things
1358 void efx_nic_generate_interrupt(struct efx_nic *efx)
1360 efx_nic_interrupts(efx, true, true);
1363 /* Process a fatal interrupt
1364 * Disable bus mastering ASAP and schedule a reset
1366 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1368 struct falcon_nic_data *nic_data = efx->nic_data;
1369 efx_oword_t *int_ker = efx->irq_status.addr;
1370 efx_oword_t fatal_intr;
1371 int error, mem_perr;
1373 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1374 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1376 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1377 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1378 EFX_OWORD_VAL(fatal_intr),
1379 error ? "disabling bus mastering" : "no recognised error");
1381 /* If this is a memory parity error dump which blocks are offending */
1382 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1383 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1386 efx_reado(efx, ®, FR_AZ_MEM_STAT);
1387 netif_err(efx, hw, efx->net_dev,
1388 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1389 EFX_OWORD_VAL(reg));
1392 /* Disable both devices */
1393 pci_clear_master(efx->pci_dev);
1394 if (efx_nic_is_dual_func(efx))
1395 pci_clear_master(nic_data->pci_dev2);
1396 efx_nic_disable_interrupts(efx);
1398 /* Count errors and reset or disable the NIC accordingly */
1399 if (efx->int_error_count == 0 ||
1400 time_after(jiffies, efx->int_error_expire)) {
1401 efx->int_error_count = 0;
1402 efx->int_error_expire =
1403 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1405 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1406 netif_err(efx, hw, efx->net_dev,
1407 "SYSTEM ERROR - reset scheduled\n");
1408 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1410 netif_err(efx, hw, efx->net_dev,
1411 "SYSTEM ERROR - max number of errors seen."
1412 "NIC will be disabled\n");
1413 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1419 /* Handle a legacy interrupt
1420 * Acknowledges the interrupt and schedule event queue processing.
1422 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1424 struct efx_nic *efx = dev_id;
1425 efx_oword_t *int_ker = efx->irq_status.addr;
1426 irqreturn_t result = IRQ_NONE;
1427 struct efx_channel *channel;
1432 /* Could this be ours? If interrupts are disabled then the
1433 * channel state may not be valid.
1435 if (!efx->legacy_irq_enabled)
1438 /* Read the ISR which also ACKs the interrupts */
1439 efx_readd(efx, ®, FR_BZ_INT_ISR0);
1440 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1442 /* Handle non-event-queue sources */
1443 if (queues & (1U << efx->irq_level)) {
1444 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1445 if (unlikely(syserr))
1446 return efx_nic_fatal_interrupt(efx);
1447 efx->last_irq_cpu = raw_smp_processor_id();
1451 if (EFX_WORKAROUND_15783(efx))
1452 efx->irq_zero_count = 0;
1454 /* Schedule processing of any interrupting queues */
1455 efx_for_each_channel(channel, efx) {
1457 efx_schedule_channel_irq(channel);
1460 result = IRQ_HANDLED;
1462 } else if (EFX_WORKAROUND_15783(efx)) {
1465 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1466 * because this might be a shared interrupt. */
1467 if (efx->irq_zero_count++ == 0)
1468 result = IRQ_HANDLED;
1470 /* Ensure we schedule or rearm all event queues */
1471 efx_for_each_channel(channel, efx) {
1472 event = efx_event(channel, channel->eventq_read_ptr);
1473 if (efx_event_present(event))
1474 efx_schedule_channel_irq(channel);
1476 efx_nic_eventq_read_ack(channel);
1480 if (result == IRQ_HANDLED)
1481 netif_vdbg(efx, intr, efx->net_dev,
1482 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1483 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1488 /* Handle an MSI interrupt
1490 * Handle an MSI hardware interrupt. This routine schedules event
1491 * queue processing. No interrupt acknowledgement cycle is necessary.
1492 * Also, we never need to check that the interrupt is for us, since
1493 * MSI interrupts cannot be shared.
1495 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1497 struct efx_channel *channel = *(struct efx_channel **)dev_id;
1498 struct efx_nic *efx = channel->efx;
1499 efx_oword_t *int_ker = efx->irq_status.addr;
1502 netif_vdbg(efx, intr, efx->net_dev,
1503 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1504 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1506 /* Handle non-event-queue sources */
1507 if (channel->channel == efx->irq_level) {
1508 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1509 if (unlikely(syserr))
1510 return efx_nic_fatal_interrupt(efx);
1511 efx->last_irq_cpu = raw_smp_processor_id();
1514 /* Schedule processing of the channel */
1515 efx_schedule_channel_irq(channel);
1521 /* Setup RSS indirection table.
1522 * This maps from the hash value of the packet to RXQ
1524 void efx_nic_push_rx_indir_table(struct efx_nic *efx)
1529 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1532 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1533 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1535 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1536 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1537 efx->rx_indir_table[i]);
1538 efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
1542 /* Hook interrupt handler(s)
1543 * Try MSI and then legacy interrupts.
1545 int efx_nic_init_interrupt(struct efx_nic *efx)
1547 struct efx_channel *channel;
1550 if (!EFX_INT_MODE_USE_MSI(efx)) {
1551 irq_handler_t handler;
1552 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1553 handler = efx_legacy_interrupt;
1555 handler = falcon_legacy_interrupt_a1;
1557 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1560 netif_err(efx, drv, efx->net_dev,
1561 "failed to hook legacy IRQ %d\n",
1568 /* Hook MSI or MSI-X interrupt */
1569 efx_for_each_channel(channel, efx) {
1570 rc = request_irq(channel->irq, efx_msi_interrupt,
1571 IRQF_PROBE_SHARED, /* Not shared */
1572 efx->channel_name[channel->channel],
1573 &efx->channel[channel->channel]);
1575 netif_err(efx, drv, efx->net_dev,
1576 "failed to hook IRQ %d\n", channel->irq);
1584 efx_for_each_channel(channel, efx)
1585 free_irq(channel->irq, &efx->channel[channel->channel]);
1590 void efx_nic_fini_interrupt(struct efx_nic *efx)
1592 struct efx_channel *channel;
1595 /* Disable MSI/MSI-X interrupts */
1596 efx_for_each_channel(channel, efx) {
1598 free_irq(channel->irq, &efx->channel[channel->channel]);
1601 /* ACK legacy interrupt */
1602 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1603 efx_reado(efx, ®, FR_BZ_INT_ISR0);
1605 falcon_irq_ack_a1(efx);
1607 /* Disable legacy interrupt */
1608 if (efx->legacy_irq)
1609 free_irq(efx->legacy_irq, efx);
1612 u32 efx_nic_fpga_ver(struct efx_nic *efx)
1614 efx_oword_t altera_build;
1615 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1616 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1619 void efx_nic_init_common(struct efx_nic *efx)
1623 /* Set positions of descriptor caches in SRAM. */
1624 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
1625 efx->type->tx_dc_base / 8);
1626 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1627 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
1628 efx->type->rx_dc_base / 8);
1629 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1631 /* Set TX descriptor cache size. */
1632 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1633 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1634 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1636 /* Set RX descriptor cache size. Set low watermark to size-8, as
1637 * this allows most efficient prefetching.
1639 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1640 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1641 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1642 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1643 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1645 /* Program INT_KER address */
1646 EFX_POPULATE_OWORD_2(temp,
1647 FRF_AZ_NORM_INT_VEC_DIS_KER,
1648 EFX_INT_MODE_USE_MSI(efx),
1649 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1650 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1652 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1653 /* Use an interrupt level unused by event queues */
1654 efx->irq_level = 0x1f;
1656 /* Use a valid MSI-X vector */
1659 /* Enable all the genuinely fatal interrupts. (They are still
1660 * masked by the overall interrupt mask, controlled by
1661 * falcon_interrupts()).
1663 * Note: All other fatal interrupts are enabled
1665 EFX_POPULATE_OWORD_3(temp,
1666 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1667 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1668 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1669 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1670 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1671 EFX_INVERT_OWORD(temp);
1672 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1674 efx_nic_push_rx_indir_table(efx);
1676 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1677 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1679 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1680 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1681 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1682 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1683 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1684 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1685 /* Enable SW_EV to inherit in char driver - assume harmless here */
1686 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1687 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1688 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1689 /* Disable hardware watchdog which can misfire */
1690 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1691 /* Squash TX of packets of 16 bytes or less */
1692 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1693 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1694 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1696 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1697 EFX_POPULATE_OWORD_4(temp,
1698 /* Default values */
1699 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1700 FRF_BZ_TX_PACE_SB_AF, 0xb,
1701 FRF_BZ_TX_PACE_FB_BASE, 0,
1702 /* Allow large pace values in the
1704 FRF_BZ_TX_PACE_BIN_TH,
1705 FFE_BZ_TX_PACE_RESERVED);
1706 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1712 #define REGISTER_REVISION_A 1
1713 #define REGISTER_REVISION_B 2
1714 #define REGISTER_REVISION_C 3
1715 #define REGISTER_REVISION_Z 3 /* latest revision */
1717 struct efx_nic_reg {
1719 u32 min_revision:2, max_revision:2;
1722 #define REGISTER(name, min_rev, max_rev) { \
1723 FR_ ## min_rev ## max_rev ## _ ## name, \
1724 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
1726 #define REGISTER_AA(name) REGISTER(name, A, A)
1727 #define REGISTER_AB(name) REGISTER(name, A, B)
1728 #define REGISTER_AZ(name) REGISTER(name, A, Z)
1729 #define REGISTER_BB(name) REGISTER(name, B, B)
1730 #define REGISTER_BZ(name) REGISTER(name, B, Z)
1731 #define REGISTER_CZ(name) REGISTER(name, C, Z)
1733 static const struct efx_nic_reg efx_nic_regs[] = {
1734 REGISTER_AZ(ADR_REGION),
1735 REGISTER_AZ(INT_EN_KER),
1736 REGISTER_BZ(INT_EN_CHAR),
1737 REGISTER_AZ(INT_ADR_KER),
1738 REGISTER_BZ(INT_ADR_CHAR),
1739 /* INT_ACK_KER is WO */
1740 /* INT_ISR0 is RC */
1741 REGISTER_AZ(HW_INIT),
1742 REGISTER_CZ(USR_EV_CFG),
1743 REGISTER_AB(EE_SPI_HCMD),
1744 REGISTER_AB(EE_SPI_HADR),
1745 REGISTER_AB(EE_SPI_HDATA),
1746 REGISTER_AB(EE_BASE_PAGE),
1747 REGISTER_AB(EE_VPD_CFG0),
1748 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
1749 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
1750 /* PCIE_CORE_INDIRECT is indirect */
1751 REGISTER_AB(NIC_STAT),
1752 REGISTER_AB(GPIO_CTL),
1753 REGISTER_AB(GLB_CTL),
1754 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
1755 REGISTER_BZ(DP_CTRL),
1756 REGISTER_AZ(MEM_STAT),
1757 REGISTER_AZ(CS_DEBUG),
1758 REGISTER_AZ(ALTERA_BUILD),
1759 REGISTER_AZ(CSR_SPARE),
1760 REGISTER_AB(PCIE_SD_CTL0123),
1761 REGISTER_AB(PCIE_SD_CTL45),
1762 REGISTER_AB(PCIE_PCS_CTL_STAT),
1763 /* DEBUG_DATA_OUT is not used */
1765 REGISTER_AZ(EVQ_CTL),
1766 REGISTER_AZ(EVQ_CNT1),
1767 REGISTER_AZ(EVQ_CNT2),
1768 REGISTER_AZ(BUF_TBL_CFG),
1769 REGISTER_AZ(SRM_RX_DC_CFG),
1770 REGISTER_AZ(SRM_TX_DC_CFG),
1771 REGISTER_AZ(SRM_CFG),
1772 /* BUF_TBL_UPD is WO */
1773 REGISTER_AZ(SRM_UPD_EVQ),
1774 REGISTER_AZ(SRAM_PARITY),
1775 REGISTER_AZ(RX_CFG),
1776 REGISTER_BZ(RX_FILTER_CTL),
1777 /* RX_FLUSH_DESCQ is WO */
1778 REGISTER_AZ(RX_DC_CFG),
1779 REGISTER_AZ(RX_DC_PF_WM),
1780 REGISTER_BZ(RX_RSS_TKEY),
1781 /* RX_NODESC_DROP is RC */
1782 REGISTER_AA(RX_SELF_RST),
1783 /* RX_DEBUG, RX_PUSH_DROP are not used */
1784 REGISTER_CZ(RX_RSS_IPV6_REG1),
1785 REGISTER_CZ(RX_RSS_IPV6_REG2),
1786 REGISTER_CZ(RX_RSS_IPV6_REG3),
1787 /* TX_FLUSH_DESCQ is WO */
1788 REGISTER_AZ(TX_DC_CFG),
1789 REGISTER_AA(TX_CHKSM_CFG),
1790 REGISTER_AZ(TX_CFG),
1791 /* TX_PUSH_DROP is not used */
1792 REGISTER_AZ(TX_RESERVED),
1793 REGISTER_BZ(TX_PACE),
1794 /* TX_PACE_DROP_QID is RC */
1795 REGISTER_BB(TX_VLAN),
1796 REGISTER_BZ(TX_IPFIL_PORTEN),
1797 REGISTER_AB(MD_TXD),
1798 REGISTER_AB(MD_RXD),
1800 REGISTER_AB(MD_PHY_ADR),
1803 REGISTER_AB(MAC_STAT_DMA),
1804 REGISTER_AB(MAC_CTRL),
1805 REGISTER_BB(GEN_MODE),
1806 REGISTER_AB(MAC_MC_HASH_REG0),
1807 REGISTER_AB(MAC_MC_HASH_REG1),
1808 REGISTER_AB(GM_CFG1),
1809 REGISTER_AB(GM_CFG2),
1810 /* GM_IPG and GM_HD are not used */
1811 REGISTER_AB(GM_MAX_FLEN),
1812 /* GM_TEST is not used */
1813 REGISTER_AB(GM_ADR1),
1814 REGISTER_AB(GM_ADR2),
1815 REGISTER_AB(GMF_CFG0),
1816 REGISTER_AB(GMF_CFG1),
1817 REGISTER_AB(GMF_CFG2),
1818 REGISTER_AB(GMF_CFG3),
1819 REGISTER_AB(GMF_CFG4),
1820 REGISTER_AB(GMF_CFG5),
1821 REGISTER_BB(TX_SRC_MAC_CTL),
1822 REGISTER_AB(XM_ADR_LO),
1823 REGISTER_AB(XM_ADR_HI),
1824 REGISTER_AB(XM_GLB_CFG),
1825 REGISTER_AB(XM_TX_CFG),
1826 REGISTER_AB(XM_RX_CFG),
1827 REGISTER_AB(XM_MGT_INT_MASK),
1829 REGISTER_AB(XM_PAUSE_TIME),
1830 REGISTER_AB(XM_TX_PARAM),
1831 REGISTER_AB(XM_RX_PARAM),
1832 /* XM_MGT_INT_MSK (note no 'A') is RC */
1833 REGISTER_AB(XX_PWR_RST),
1834 REGISTER_AB(XX_SD_CTL),
1835 REGISTER_AB(XX_TXDRV_CTL),
1836 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
1837 /* XX_CORE_STAT is partly RC */
1840 struct efx_nic_reg_table {
1842 u32 min_revision:2, max_revision:2;
1843 u32 step:6, rows:21;
1846 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
1848 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
1851 #define REGISTER_TABLE(name, min_rev, max_rev) \
1852 REGISTER_TABLE_DIMENSIONS( \
1853 name, FR_ ## min_rev ## max_rev ## _ ## name, \
1855 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
1856 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
1857 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
1858 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
1859 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
1860 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
1861 #define REGISTER_TABLE_BB_CZ(name) \
1862 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
1863 FR_BZ_ ## name ## _STEP, \
1864 FR_BB_ ## name ## _ROWS), \
1865 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
1866 FR_BZ_ ## name ## _STEP, \
1867 FR_CZ_ ## name ## _ROWS)
1868 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
1870 static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1871 /* DRIVER is not used */
1872 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
1873 REGISTER_TABLE_BB(TX_IPFIL_TBL),
1874 REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
1875 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
1876 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
1877 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
1878 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
1879 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
1880 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
1881 /* We can't reasonably read all of the buffer table (up to 8MB!).
1882 * However this driver will only use a few entries. Reading
1883 * 1K entries allows for some expansion of queue count and
1884 * size before we need to change the version. */
1885 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
1887 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
1889 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
1890 REGISTER_TABLE_BB_CZ(TIMER_TBL),
1891 REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
1892 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
1893 /* TX_FILTER_TBL0 is huge and not used by this driver */
1894 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
1895 REGISTER_TABLE_CZ(MC_TREG_SMEM),
1896 /* MSIX_PBA_TABLE is not mapped */
1897 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
1898 REGISTER_TABLE_BZ(RX_FILTER_TBL0),
1901 size_t efx_nic_get_regs_len(struct efx_nic *efx)
1903 const struct efx_nic_reg *reg;
1904 const struct efx_nic_reg_table *table;
1907 for (reg = efx_nic_regs;
1908 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
1910 if (efx->type->revision >= reg->min_revision &&
1911 efx->type->revision <= reg->max_revision)
1912 len += sizeof(efx_oword_t);
1914 for (table = efx_nic_reg_tables;
1915 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
1917 if (efx->type->revision >= table->min_revision &&
1918 efx->type->revision <= table->max_revision)
1919 len += table->rows * min_t(size_t, table->step, 16);
1924 void efx_nic_get_regs(struct efx_nic *efx, void *buf)
1926 const struct efx_nic_reg *reg;
1927 const struct efx_nic_reg_table *table;
1929 for (reg = efx_nic_regs;
1930 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
1932 if (efx->type->revision >= reg->min_revision &&
1933 efx->type->revision <= reg->max_revision) {
1934 efx_reado(efx, (efx_oword_t *)buf, reg->offset);
1935 buf += sizeof(efx_oword_t);
1939 for (table = efx_nic_reg_tables;
1940 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
1944 if (!(efx->type->revision >= table->min_revision &&
1945 efx->type->revision <= table->max_revision))
1948 size = min_t(size_t, table->step, 16);
1950 for (i = 0; i < table->rows; i++) {
1951 switch (table->step) {
1952 case 4: /* 32-bit register or SRAM */
1953 efx_readd_table(efx, buf, table->offset, i);
1955 case 8: /* 64-bit SRAM */
1957 efx->membase + table->offset,
1960 case 16: /* 128-bit register */
1961 efx_reado_table(efx, buf, table->offset, i);
1963 case 32: /* 128-bit register, interleaved */
1964 efx_reado_table(efx, buf, table->offset, 2 * i);