1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
10 #include "net_driver.h"
11 #include "ef10_regs.h"
14 #include "mcdi_pcol.h"
16 #include "workarounds.h"
18 #include "ef10_sriov.h"
20 #include <linux/jhash.h>
21 #include <linux/wait.h>
22 #include <linux/workqueue.h>
24 /* Hardware control for EF10 architecture including 'Huntington'. */
26 #define EFX_EF10_DRVGEN_EV 7
32 /* The reserved RSS context value */
33 #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
34 /* The maximum size of a shared RSS context */
35 /* TODO: this should really be from the mcdi protocol export */
36 #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
38 /* The filter table(s) are managed by firmware and we have write-only
39 * access. When removing filters we must identify them to the
40 * firmware by a 64-bit handle, but this is too wide for Linux kernel
41 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
42 * be able to tell in advance whether a requested insertion will
43 * replace an existing filter. Therefore we maintain a software hash
44 * table, which should be at least as large as the hardware hash
47 * Huntington has a single 8K filter table shared between all filter
48 * types and both ports.
50 #define HUNT_FILTER_TBL_ROWS 8192
52 #define EFX_EF10_FILTER_ID_INVALID 0xffff
53 struct efx_ef10_dev_addr {
58 struct efx_ef10_filter_table {
59 /* The RX match field masks supported by this fw & hw, in order of priority */
60 enum efx_filter_match_flags rx_match_flags[
61 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
62 unsigned int rx_match_count;
65 unsigned long spec; /* pointer to spec plus flag bits */
66 /* BUSY flag indicates that an update is in progress. AUTO_OLD is
67 * used to mark and sweep MAC filters for the device address lists.
69 #define EFX_EF10_FILTER_FLAG_BUSY 1UL
70 #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
71 #define EFX_EF10_FILTER_FLAGS 3UL
72 u64 handle; /* firmware handle */
74 wait_queue_head_t waitq;
75 /* Shadow of net_device address lists, guarded by mac_lock */
76 #define EFX_EF10_FILTER_DEV_UC_MAX 32
77 #define EFX_EF10_FILTER_DEV_MC_MAX 256
78 struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
79 struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
82 /* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */
88 /* An arbitrary search limit for the software hash table */
89 #define EFX_EF10_FILTER_SEARCH_LIMIT 200
91 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
92 static void efx_ef10_filter_table_remove(struct efx_nic *efx);
94 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
98 efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS);
99 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
100 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
103 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
107 bar = efx->type->mem_bar;
108 return resource_size(&efx->pci_dev->resource[bar]);
111 static bool efx_ef10_is_vf(struct efx_nic *efx)
113 return efx->type->is_vf;
116 static int efx_ef10_get_pf_index(struct efx_nic *efx)
118 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
119 struct efx_ef10_nic_data *nic_data = efx->nic_data;
123 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
124 sizeof(outbuf), &outlen);
127 if (outlen < sizeof(outbuf))
130 nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
134 #ifdef CONFIG_SFC_SRIOV
135 static int efx_ef10_get_vf_index(struct efx_nic *efx)
137 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
138 struct efx_ef10_nic_data *nic_data = efx->nic_data;
142 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
143 sizeof(outbuf), &outlen);
146 if (outlen < sizeof(outbuf))
149 nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
154 static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
156 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
157 struct efx_ef10_nic_data *nic_data = efx->nic_data;
161 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
163 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
164 outbuf, sizeof(outbuf), &outlen);
167 if (outlen < sizeof(outbuf)) {
168 netif_err(efx, drv, efx->net_dev,
169 "unable to read datapath firmware capabilities\n");
173 nic_data->datapath_caps =
174 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
176 /* record the DPCPU firmware IDs to determine VEB vswitching support.
178 nic_data->rx_dpcpu_fw_id =
179 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
180 nic_data->tx_dpcpu_fw_id =
181 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
183 if (!(nic_data->datapath_caps &
184 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
185 netif_err(efx, drv, efx->net_dev,
186 "current firmware does not support TSO\n");
190 if (!(nic_data->datapath_caps &
191 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
192 netif_err(efx, probe, efx->net_dev,
193 "current firmware does not support an RX prefix\n");
200 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
202 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
205 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
206 outbuf, sizeof(outbuf), NULL);
209 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
210 return rc > 0 ? rc : -ERANGE;
213 static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
215 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
219 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
221 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
222 outbuf, sizeof(outbuf), &outlen);
225 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
228 ether_addr_copy(mac_address,
229 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
233 static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
235 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
236 MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
240 MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
241 EVB_PORT_ID_ASSIGNED);
242 rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
243 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
247 if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
250 num_addrs = MCDI_DWORD(outbuf,
251 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
253 WARN_ON(num_addrs != 1);
255 ether_addr_copy(mac_address,
256 MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
261 static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
262 struct device_attribute *attr,
265 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
267 return sprintf(buf, "%d\n",
268 ((efx->mcdi->fn_flags) &
269 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
273 static ssize_t efx_ef10_show_primary_flag(struct device *dev,
274 struct device_attribute *attr,
277 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
279 return sprintf(buf, "%d\n",
280 ((efx->mcdi->fn_flags) &
281 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
285 static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
287 static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
289 static int efx_ef10_probe(struct efx_nic *efx)
291 struct efx_ef10_nic_data *nic_data;
292 struct net_device *net_dev = efx->net_dev;
295 /* We can have one VI for each 8K region. However, until we
296 * use TX option descriptors we need two TX queues per channel.
301 efx_ef10_mem_map_size(efx) /
302 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
303 if (WARN_ON(efx->max_channels == 0))
306 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
309 efx->nic_data = nic_data;
311 /* we assume later that we can copy from this buffer in dwords */
312 BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
314 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
315 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
319 /* Get the MC's warm boot count. In case it's rebooting right
320 * now, be prepared to retry.
324 rc = efx_ef10_get_warm_boot_count(efx);
331 nic_data->warm_boot_count = rc;
333 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
335 nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
337 /* In case we're recovering from a crash (kexec), we want to
338 * cancel any outstanding request by the previous user of this
339 * function. We send a special message using the least
340 * significant bits of the 'high' (doorbell) register.
342 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
344 rc = efx_mcdi_init(efx);
348 /* Reset (most) configuration for this function */
349 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
353 /* Enable event logging */
354 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
358 rc = device_create_file(&efx->pci_dev->dev,
359 &dev_attr_link_control_flag);
363 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
367 rc = efx_ef10_get_pf_index(efx);
371 rc = efx_ef10_init_datapath_caps(efx);
375 efx->rx_packet_len_offset =
376 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
378 rc = efx_mcdi_port_get_number(efx);
382 net_dev->dev_port = rc;
384 rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
388 rc = efx_ef10_get_sysclk_freq(efx);
391 efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
393 /* Check whether firmware supports bug 35388 workaround.
394 * First try to enable it, then if we get EPERM, just
395 * ask if it's already enabled
397 rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL);
399 nic_data->workaround_35388 = true;
400 } else if (rc == -EPERM) {
401 unsigned int enabled;
403 rc = efx_mcdi_get_workarounds(efx, NULL, &enabled);
406 nic_data->workaround_35388 = enabled &
407 MC_CMD_GET_WORKAROUNDS_OUT_BUG35388;
408 } else if (rc != -ENOSYS && rc != -ENOENT) {
411 netif_dbg(efx, probe, efx->net_dev,
412 "workaround for bug 35388 is %sabled\n",
413 nic_data->workaround_35388 ? "en" : "dis");
415 rc = efx_mcdi_mon_probe(efx);
416 if (rc && rc != -EPERM)
419 efx_ptp_probe(efx, NULL);
421 #ifdef CONFIG_SFC_SRIOV
422 if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
423 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
424 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
426 efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
429 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
434 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
436 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
440 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
443 efx->nic_data = NULL;
447 static int efx_ef10_free_vis(struct efx_nic *efx)
449 MCDI_DECLARE_BUF_ERR(outbuf);
451 int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
452 outbuf, sizeof(outbuf), &outlen);
454 /* -EALREADY means nothing to free, so ignore */
458 efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
465 static void efx_ef10_free_piobufs(struct efx_nic *efx)
467 struct efx_ef10_nic_data *nic_data = efx->nic_data;
468 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
472 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
474 for (i = 0; i < nic_data->n_piobufs; i++) {
475 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
476 nic_data->piobuf_handle[i]);
477 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
482 nic_data->n_piobufs = 0;
485 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
487 struct efx_ef10_nic_data *nic_data = efx->nic_data;
488 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
493 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
495 for (i = 0; i < n; i++) {
496 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
497 outbuf, sizeof(outbuf), &outlen);
500 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
504 nic_data->piobuf_handle[i] =
505 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
506 netif_dbg(efx, probe, efx->net_dev,
507 "allocated PIO buffer %u handle %x\n", i,
508 nic_data->piobuf_handle[i]);
511 nic_data->n_piobufs = i;
513 efx_ef10_free_piobufs(efx);
517 static int efx_ef10_link_piobufs(struct efx_nic *efx)
519 struct efx_ef10_nic_data *nic_data = efx->nic_data;
520 _MCDI_DECLARE_BUF(inbuf,
521 max(MC_CMD_LINK_PIOBUF_IN_LEN,
522 MC_CMD_UNLINK_PIOBUF_IN_LEN));
523 struct efx_channel *channel;
524 struct efx_tx_queue *tx_queue;
525 unsigned int offset, index;
528 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
529 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
531 memset(inbuf, 0, sizeof(inbuf));
533 /* Link a buffer to each VI in the write-combining mapping */
534 for (index = 0; index < nic_data->n_piobufs; ++index) {
535 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
536 nic_data->piobuf_handle[index]);
537 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
538 nic_data->pio_write_vi_base + index);
539 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
540 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
543 netif_err(efx, drv, efx->net_dev,
544 "failed to link VI %u to PIO buffer %u (%d)\n",
545 nic_data->pio_write_vi_base + index, index,
549 netif_dbg(efx, probe, efx->net_dev,
550 "linked VI %u to PIO buffer %u\n",
551 nic_data->pio_write_vi_base + index, index);
554 /* Link a buffer to each TX queue */
555 efx_for_each_channel(channel, efx) {
556 efx_for_each_channel_tx_queue(tx_queue, channel) {
557 /* We assign the PIO buffers to queues in
558 * reverse order to allow for the following
561 offset = ((efx->tx_channel_offset + efx->n_tx_channels -
562 tx_queue->channel->channel - 1) *
564 index = offset / ER_DZ_TX_PIOBUF_SIZE;
565 offset = offset % ER_DZ_TX_PIOBUF_SIZE;
567 /* When the host page size is 4K, the first
568 * host page in the WC mapping may be within
569 * the same VI page as the last TX queue. We
570 * can only link one buffer to each VI.
572 if (tx_queue->queue == nic_data->pio_write_vi_base) {
576 MCDI_SET_DWORD(inbuf,
577 LINK_PIOBUF_IN_PIOBUF_HANDLE,
578 nic_data->piobuf_handle[index]);
579 MCDI_SET_DWORD(inbuf,
580 LINK_PIOBUF_IN_TXQ_INSTANCE,
582 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
583 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
588 /* This is non-fatal; the TX path just
589 * won't use PIO for this queue
591 netif_err(efx, drv, efx->net_dev,
592 "failed to link VI %u to PIO buffer %u (%d)\n",
593 tx_queue->queue, index, rc);
594 tx_queue->piobuf = NULL;
597 nic_data->pio_write_base +
598 index * EFX_VI_PAGE_SIZE + offset;
599 tx_queue->piobuf_offset = offset;
600 netif_dbg(efx, probe, efx->net_dev,
601 "linked VI %u to PIO buffer %u offset %x addr %p\n",
602 tx_queue->queue, index,
603 tx_queue->piobuf_offset,
613 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
614 nic_data->pio_write_vi_base + index);
615 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
616 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
622 #else /* !EFX_USE_PIO */
624 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
626 return n == 0 ? 0 : -ENOBUFS;
629 static int efx_ef10_link_piobufs(struct efx_nic *efx)
634 static void efx_ef10_free_piobufs(struct efx_nic *efx)
638 #endif /* EFX_USE_PIO */
640 static void efx_ef10_remove(struct efx_nic *efx)
642 struct efx_ef10_nic_data *nic_data = efx->nic_data;
645 #ifdef CONFIG_SFC_SRIOV
646 struct efx_ef10_nic_data *nic_data_pf;
647 struct pci_dev *pci_dev_pf;
648 struct efx_nic *efx_pf;
651 if (efx->pci_dev->is_virtfn) {
652 pci_dev_pf = efx->pci_dev->physfn;
654 efx_pf = pci_get_drvdata(pci_dev_pf);
655 nic_data_pf = efx_pf->nic_data;
656 vf = nic_data_pf->vf + nic_data->vf_index;
659 netif_info(efx, drv, efx->net_dev,
660 "Could not get the PF id from VF\n");
666 efx_mcdi_mon_remove(efx);
668 efx_ef10_rx_free_indir_table(efx);
670 if (nic_data->wc_membase)
671 iounmap(nic_data->wc_membase);
673 rc = efx_ef10_free_vis(efx);
676 if (!nic_data->must_restore_piobufs)
677 efx_ef10_free_piobufs(efx);
679 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
680 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
683 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
687 static int efx_ef10_probe_pf(struct efx_nic *efx)
689 return efx_ef10_probe(efx);
692 int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
694 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
696 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
697 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
701 int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
703 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
705 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
706 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
710 int efx_ef10_vport_add_mac(struct efx_nic *efx,
711 unsigned int port_id, u8 *mac)
713 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
715 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
716 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
718 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
719 sizeof(inbuf), NULL, 0, NULL);
722 int efx_ef10_vport_del_mac(struct efx_nic *efx,
723 unsigned int port_id, u8 *mac)
725 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
727 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
728 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
730 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
731 sizeof(inbuf), NULL, 0, NULL);
734 #ifdef CONFIG_SFC_SRIOV
735 static int efx_ef10_probe_vf(struct efx_nic *efx)
738 struct pci_dev *pci_dev_pf;
740 /* If the parent PF has no VF data structure, it doesn't know about this
741 * VF so fail probe. The VF needs to be re-created. This can happen
742 * if the PF driver is unloaded while the VF is assigned to a guest.
744 pci_dev_pf = efx->pci_dev->physfn;
746 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
747 struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
749 if (!nic_data_pf->vf) {
750 netif_info(efx, drv, efx->net_dev,
751 "The VF cannot link to its parent PF; "
752 "please destroy and re-create the VF\n");
757 rc = efx_ef10_probe(efx);
761 rc = efx_ef10_get_vf_index(efx);
765 if (efx->pci_dev->is_virtfn) {
766 if (efx->pci_dev->physfn) {
767 struct efx_nic *efx_pf =
768 pci_get_drvdata(efx->pci_dev->physfn);
769 struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
770 struct efx_ef10_nic_data *nic_data = efx->nic_data;
772 nic_data_p->vf[nic_data->vf_index].efx = efx;
773 nic_data_p->vf[nic_data->vf_index].pci_dev =
776 netif_info(efx, drv, efx->net_dev,
777 "Could not get the PF id from VF\n");
783 efx_ef10_remove(efx);
787 static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
793 static int efx_ef10_alloc_vis(struct efx_nic *efx,
794 unsigned int min_vis, unsigned int max_vis)
796 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
797 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
798 struct efx_ef10_nic_data *nic_data = efx->nic_data;
802 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
803 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
804 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
805 outbuf, sizeof(outbuf), &outlen);
809 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
812 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
813 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
815 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
816 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
820 /* Note that the failure path of this function does not free
821 * resources, as this will be done by efx_ef10_remove().
823 static int efx_ef10_dimension_resources(struct efx_nic *efx)
825 struct efx_ef10_nic_data *nic_data = efx->nic_data;
826 unsigned int uc_mem_map_size, wc_mem_map_size;
827 unsigned int min_vis, pio_write_vi_base, max_vis;
828 void __iomem *membase;
831 min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
834 /* Try to allocate PIO buffers if wanted and if the full
835 * number of PIO buffers would be sufficient to allocate one
836 * copy-buffer per TX channel. Failure is non-fatal, as there
837 * are only a small number of PIO buffers shared between all
838 * functions of the controller.
840 if (efx_piobuf_size != 0 &&
841 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
842 efx->n_tx_channels) {
843 unsigned int n_piobufs =
844 DIV_ROUND_UP(efx->n_tx_channels,
845 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
847 rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
849 netif_err(efx, probe, efx->net_dev,
850 "failed to allocate PIO buffers (%d)\n", rc);
852 netif_dbg(efx, probe, efx->net_dev,
853 "allocated %u PIO buffers\n", n_piobufs);
856 nic_data->n_piobufs = 0;
859 /* PIO buffers should be mapped with write-combining enabled,
860 * and we want to make single UC and WC mappings rather than
861 * several of each (in fact that's the only option if host
862 * page size is >4K). So we may allocate some extra VIs just
863 * for writing PIO buffers through.
865 * The UC mapping contains (min_vis - 1) complete VIs and the
866 * first half of the next VI. Then the WC mapping begins with
867 * the second half of this last VI.
869 uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
871 if (nic_data->n_piobufs) {
872 /* pio_write_vi_base rounds down to give the number of complete
873 * VIs inside the UC mapping.
875 pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
876 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
877 nic_data->n_piobufs) *
880 max_vis = pio_write_vi_base + nic_data->n_piobufs;
882 pio_write_vi_base = 0;
887 /* In case the last attached driver failed to free VIs, do it now */
888 rc = efx_ef10_free_vis(efx);
892 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
896 /* If we didn't get enough VIs to map all the PIO buffers, free the
899 if (nic_data->n_piobufs &&
900 nic_data->n_allocated_vis <
901 pio_write_vi_base + nic_data->n_piobufs) {
902 netif_dbg(efx, probe, efx->net_dev,
903 "%u VIs are not sufficient to map %u PIO buffers\n",
904 nic_data->n_allocated_vis, nic_data->n_piobufs);
905 efx_ef10_free_piobufs(efx);
908 /* Shrink the original UC mapping of the memory BAR */
909 membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
911 netif_err(efx, probe, efx->net_dev,
912 "could not shrink memory BAR to %x\n",
916 iounmap(efx->membase);
917 efx->membase = membase;
919 /* Set up the WC mapping if needed */
920 if (wc_mem_map_size) {
921 nic_data->wc_membase = ioremap_wc(efx->membase_phys +
924 if (!nic_data->wc_membase) {
925 netif_err(efx, probe, efx->net_dev,
926 "could not allocate WC mapping of size %x\n",
930 nic_data->pio_write_vi_base = pio_write_vi_base;
931 nic_data->pio_write_base =
932 nic_data->wc_membase +
933 (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
936 rc = efx_ef10_link_piobufs(efx);
938 efx_ef10_free_piobufs(efx);
941 netif_dbg(efx, probe, efx->net_dev,
942 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
943 &efx->membase_phys, efx->membase, uc_mem_map_size,
944 nic_data->wc_membase, wc_mem_map_size);
949 static int efx_ef10_init_nic(struct efx_nic *efx)
951 struct efx_ef10_nic_data *nic_data = efx->nic_data;
954 if (nic_data->must_check_datapath_caps) {
955 rc = efx_ef10_init_datapath_caps(efx);
958 nic_data->must_check_datapath_caps = false;
961 if (nic_data->must_realloc_vis) {
962 /* We cannot let the number of VIs change now */
963 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
964 nic_data->n_allocated_vis);
967 nic_data->must_realloc_vis = false;
970 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
971 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
973 rc = efx_ef10_link_piobufs(efx);
975 efx_ef10_free_piobufs(efx);
978 /* Log an error on failure, but this is non-fatal */
980 netif_err(efx, drv, efx->net_dev,
981 "failed to restore PIO buffers (%d)\n", rc);
982 nic_data->must_restore_piobufs = false;
985 /* don't fail init if RSS setup doesn't work */
986 efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
991 static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
993 struct efx_ef10_nic_data *nic_data = efx->nic_data;
994 #ifdef CONFIG_SFC_SRIOV
998 /* All our allocations have been reset */
999 nic_data->must_realloc_vis = true;
1000 nic_data->must_restore_filters = true;
1001 nic_data->must_restore_piobufs = true;
1002 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1004 /* Driver-created vswitches and vports must be re-created */
1005 nic_data->must_probe_vswitching = true;
1006 nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
1007 #ifdef CONFIG_SFC_SRIOV
1009 for (i = 0; i < efx->vf_count; i++)
1010 nic_data->vf[i].vport_id = 0;
1014 static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
1016 if (reason == RESET_TYPE_MC_FAILURE)
1017 return RESET_TYPE_DATAPATH;
1019 return efx_mcdi_map_reset_reason(reason);
1022 static int efx_ef10_map_reset_flags(u32 *flags)
1025 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
1026 ETH_RESET_SHARED_SHIFT),
1027 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
1028 ETH_RESET_OFFLOAD | ETH_RESET_MAC |
1029 ETH_RESET_PHY | ETH_RESET_MGMT) <<
1030 ETH_RESET_SHARED_SHIFT)
1033 /* We assume for now that our PCI function is permitted to
1037 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
1038 *flags &= ~EF10_RESET_MC;
1039 return RESET_TYPE_WORLD;
1042 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
1043 *flags &= ~EF10_RESET_PORT;
1044 return RESET_TYPE_ALL;
1047 /* no invisible reset implemented */
1052 static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
1054 int rc = efx_mcdi_reset(efx, reset_type);
1056 /* Unprivileged functions return -EPERM, but need to return success
1057 * here so that the datapath is brought back up.
1059 if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
1062 /* If it was a port reset, trigger reallocation of MC resources.
1063 * Note that on an MC reset nothing needs to be done now because we'll
1064 * detect the MC reset later and handle it then.
1065 * For an FLR, we never get an MC reset event, but the MC has reset all
1066 * resources assigned to us, so we have to trigger reallocation now.
1068 if ((reset_type == RESET_TYPE_ALL ||
1069 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
1070 efx_ef10_reset_mc_allocations(efx);
1074 #define EF10_DMA_STAT(ext_name, mcdi_name) \
1075 [EF10_STAT_ ## ext_name] = \
1076 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1077 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
1078 [EF10_STAT_ ## int_name] = \
1079 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1080 #define EF10_OTHER_STAT(ext_name) \
1081 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
1082 #define GENERIC_SW_STAT(ext_name) \
1083 [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
1085 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
1086 EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
1087 EF10_DMA_STAT(port_tx_packets, TX_PKTS),
1088 EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
1089 EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
1090 EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
1091 EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
1092 EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
1093 EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
1094 EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
1095 EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
1096 EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
1097 EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
1098 EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
1099 EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
1100 EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
1101 EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
1102 EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
1103 EF10_OTHER_STAT(port_rx_good_bytes),
1104 EF10_OTHER_STAT(port_rx_bad_bytes),
1105 EF10_DMA_STAT(port_rx_packets, RX_PKTS),
1106 EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
1107 EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
1108 EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
1109 EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
1110 EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
1111 EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
1112 EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
1113 EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
1114 EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
1115 EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
1116 EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
1117 EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
1118 EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
1119 EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
1120 EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
1121 EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
1122 EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
1123 EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
1124 EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
1125 EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
1126 EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
1127 GENERIC_SW_STAT(rx_nodesc_trunc),
1128 GENERIC_SW_STAT(rx_noskb_drops),
1129 EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
1130 EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
1131 EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
1132 EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
1133 EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
1134 EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
1135 EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
1136 EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
1137 EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
1138 EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
1139 EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
1140 EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
1141 EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
1142 EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
1143 EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
1144 EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
1145 EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
1146 EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
1147 EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
1148 EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
1149 EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
1150 EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
1151 EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
1152 EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
1153 EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
1154 EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
1155 EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
1156 EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
1157 EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
1158 EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
1161 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
1162 (1ULL << EF10_STAT_port_tx_packets) | \
1163 (1ULL << EF10_STAT_port_tx_pause) | \
1164 (1ULL << EF10_STAT_port_tx_unicast) | \
1165 (1ULL << EF10_STAT_port_tx_multicast) | \
1166 (1ULL << EF10_STAT_port_tx_broadcast) | \
1167 (1ULL << EF10_STAT_port_rx_bytes) | \
1169 EF10_STAT_port_rx_bytes_minus_good_bytes) | \
1170 (1ULL << EF10_STAT_port_rx_good_bytes) | \
1171 (1ULL << EF10_STAT_port_rx_bad_bytes) | \
1172 (1ULL << EF10_STAT_port_rx_packets) | \
1173 (1ULL << EF10_STAT_port_rx_good) | \
1174 (1ULL << EF10_STAT_port_rx_bad) | \
1175 (1ULL << EF10_STAT_port_rx_pause) | \
1176 (1ULL << EF10_STAT_port_rx_control) | \
1177 (1ULL << EF10_STAT_port_rx_unicast) | \
1178 (1ULL << EF10_STAT_port_rx_multicast) | \
1179 (1ULL << EF10_STAT_port_rx_broadcast) | \
1180 (1ULL << EF10_STAT_port_rx_lt64) | \
1181 (1ULL << EF10_STAT_port_rx_64) | \
1182 (1ULL << EF10_STAT_port_rx_65_to_127) | \
1183 (1ULL << EF10_STAT_port_rx_128_to_255) | \
1184 (1ULL << EF10_STAT_port_rx_256_to_511) | \
1185 (1ULL << EF10_STAT_port_rx_512_to_1023) |\
1186 (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
1187 (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
1188 (1ULL << EF10_STAT_port_rx_gtjumbo) | \
1189 (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
1190 (1ULL << EF10_STAT_port_rx_overflow) | \
1191 (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
1192 (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
1193 (1ULL << GENERIC_STAT_rx_noskb_drops))
1195 /* These statistics are only provided by the 10G MAC. For a 10G/40G
1196 * switchable port we do not expose these because they might not
1197 * include all the packets they should.
1199 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
1200 (1ULL << EF10_STAT_port_tx_lt64) | \
1201 (1ULL << EF10_STAT_port_tx_64) | \
1202 (1ULL << EF10_STAT_port_tx_65_to_127) |\
1203 (1ULL << EF10_STAT_port_tx_128_to_255) |\
1204 (1ULL << EF10_STAT_port_tx_256_to_511) |\
1205 (1ULL << EF10_STAT_port_tx_512_to_1023) |\
1206 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
1207 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
1209 /* These statistics are only provided by the 40G MAC. For a 10G/40G
1210 * switchable port we do expose these because the errors will otherwise
1213 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
1214 (1ULL << EF10_STAT_port_rx_length_error))
1216 /* These statistics are only provided if the firmware supports the
1217 * capability PM_AND_RXDP_COUNTERS.
1219 #define HUNT_PM_AND_RXDP_STAT_MASK ( \
1220 (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \
1221 (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \
1222 (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \
1223 (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \
1224 (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \
1225 (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \
1226 (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \
1227 (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \
1228 (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \
1229 (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \
1230 (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
1231 (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
1233 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
1235 u64 raw_mask = HUNT_COMMON_STAT_MASK;
1236 u32 port_caps = efx_mcdi_phy_get_caps(efx);
1237 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1239 if (!(efx->mcdi->fn_flags &
1240 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
1243 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
1244 raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
1246 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1248 if (nic_data->datapath_caps &
1249 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
1250 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
1255 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
1257 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1260 raw_mask[0] = efx_ef10_raw_stat_mask(efx);
1262 /* Only show vadaptor stats when EVB capability is present */
1263 if (nic_data->datapath_caps &
1264 (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
1265 raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
1266 raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
1271 #if BITS_PER_LONG == 64
1272 mask[0] = raw_mask[0];
1273 mask[1] = raw_mask[1];
1275 mask[0] = raw_mask[0] & 0xffffffff;
1276 mask[1] = raw_mask[0] >> 32;
1277 mask[2] = raw_mask[1] & 0xffffffff;
1278 mask[3] = raw_mask[1] >> 32;
1282 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
1284 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1286 efx_ef10_get_stat_mask(efx, mask);
1287 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
1291 static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
1292 struct rtnl_link_stats64 *core_stats)
1294 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1295 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1296 u64 *stats = nic_data->stats;
1297 size_t stats_count = 0, index;
1299 efx_ef10_get_stat_mask(efx, mask);
1302 for_each_set_bit(index, mask, EF10_STAT_COUNT) {
1303 if (efx_ef10_stat_desc[index].name) {
1304 *full_stats++ = stats[index];
1313 if (nic_data->datapath_caps &
1314 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
1315 /* Use vadaptor stats. */
1316 core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
1317 stats[EF10_STAT_rx_multicast] +
1318 stats[EF10_STAT_rx_broadcast];
1319 core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
1320 stats[EF10_STAT_tx_multicast] +
1321 stats[EF10_STAT_tx_broadcast];
1322 core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
1323 stats[EF10_STAT_rx_multicast_bytes] +
1324 stats[EF10_STAT_rx_broadcast_bytes];
1325 core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
1326 stats[EF10_STAT_tx_multicast_bytes] +
1327 stats[EF10_STAT_tx_broadcast_bytes];
1328 core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
1329 stats[GENERIC_STAT_rx_noskb_drops];
1330 core_stats->multicast = stats[EF10_STAT_rx_multicast];
1331 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
1332 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
1333 core_stats->rx_errors = core_stats->rx_crc_errors;
1334 core_stats->tx_errors = stats[EF10_STAT_tx_bad];
1336 /* Use port stats. */
1337 core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
1338 core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
1339 core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
1340 core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
1341 core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
1342 stats[GENERIC_STAT_rx_nodesc_trunc] +
1343 stats[GENERIC_STAT_rx_noskb_drops];
1344 core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
1345 core_stats->rx_length_errors =
1346 stats[EF10_STAT_port_rx_gtjumbo] +
1347 stats[EF10_STAT_port_rx_length_error];
1348 core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
1349 core_stats->rx_frame_errors =
1350 stats[EF10_STAT_port_rx_align_error];
1351 core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
1352 core_stats->rx_errors = (core_stats->rx_length_errors +
1353 core_stats->rx_crc_errors +
1354 core_stats->rx_frame_errors);
1360 static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
1362 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1363 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1364 __le64 generation_start, generation_end;
1365 u64 *stats = nic_data->stats;
1368 efx_ef10_get_stat_mask(efx, mask);
1370 dma_stats = efx->stats_buffer.addr;
1371 nic_data = efx->nic_data;
1373 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
1374 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
1377 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
1378 stats, efx->stats_buffer.addr, false);
1380 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1381 if (generation_end != generation_start)
1384 /* Update derived statistics */
1385 efx_nic_fix_nodesc_drop_stat(efx,
1386 &stats[EF10_STAT_port_rx_nodesc_drops]);
1387 stats[EF10_STAT_port_rx_good_bytes] =
1388 stats[EF10_STAT_port_rx_bytes] -
1389 stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
1390 efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
1391 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
1392 efx_update_sw_stats(efx, stats);
1397 static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
1398 struct rtnl_link_stats64 *core_stats)
1402 /* If we're unlucky enough to read statistics during the DMA, wait
1403 * up to 10ms for it to finish (typically takes <500us)
1405 for (retry = 0; retry < 100; ++retry) {
1406 if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
1411 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1414 static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
1416 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
1417 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1418 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1419 __le64 generation_start, generation_end;
1420 u64 *stats = nic_data->stats;
1421 u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
1422 struct efx_buffer stats_buf;
1426 spin_unlock_bh(&efx->stats_lock);
1428 if (in_interrupt()) {
1429 /* If in atomic context, cannot update stats. Just update the
1430 * software stats and return so the caller can continue.
1432 spin_lock_bh(&efx->stats_lock);
1433 efx_update_sw_stats(efx, stats);
1437 efx_ef10_get_stat_mask(efx, mask);
1439 rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
1441 spin_lock_bh(&efx->stats_lock);
1445 dma_stats = stats_buf.addr;
1446 dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
1448 MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
1449 MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
1450 MAC_STATS_IN_DMA, 1);
1451 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
1452 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1454 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
1456 spin_lock_bh(&efx->stats_lock);
1458 /* Expect ENOENT if DMA queues have not been set up */
1459 if (rc != -ENOENT || atomic_read(&efx->active_queues))
1460 efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
1461 sizeof(inbuf), NULL, 0, rc);
1465 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
1466 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
1471 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
1472 stats, stats_buf.addr, false);
1474 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1475 if (generation_end != generation_start) {
1480 efx_update_sw_stats(efx, stats);
1482 efx_nic_free_buffer(efx, &stats_buf);
1486 static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
1487 struct rtnl_link_stats64 *core_stats)
1489 if (efx_ef10_try_update_nic_stats_vf(efx))
1492 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1495 static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
1497 struct efx_nic *efx = channel->efx;
1498 unsigned int mode, value;
1499 efx_dword_t timer_cmd;
1501 if (channel->irq_moderation) {
1503 value = channel->irq_moderation - 1;
1509 if (EFX_EF10_WORKAROUND_35388(efx)) {
1510 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
1511 EFE_DD_EVQ_IND_TIMER_FLAGS,
1512 ERF_DD_EVQ_IND_TIMER_MODE, mode,
1513 ERF_DD_EVQ_IND_TIMER_VAL, value);
1514 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
1517 EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
1518 ERF_DZ_TC_TIMER_VAL, value);
1519 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
1524 static void efx_ef10_get_wol_vf(struct efx_nic *efx,
1525 struct ethtool_wolinfo *wol) {}
1527 static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
1532 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
1536 memset(&wol->sopass, 0, sizeof(wol->sopass));
1539 static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
1546 static void efx_ef10_mcdi_request(struct efx_nic *efx,
1547 const efx_dword_t *hdr, size_t hdr_len,
1548 const efx_dword_t *sdu, size_t sdu_len)
1550 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1551 u8 *pdu = nic_data->mcdi_buf.addr;
1553 memcpy(pdu, hdr, hdr_len);
1554 memcpy(pdu + hdr_len, sdu, sdu_len);
1557 /* The hardware provides 'low' and 'high' (doorbell) registers
1558 * for passing the 64-bit address of an MCDI request to
1559 * firmware. However the dwords are swapped by firmware. The
1560 * least significant bits of the doorbell are then 0 for all
1561 * MCDI requests due to alignment.
1563 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
1565 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
1569 static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
1571 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1572 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
1575 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
1579 efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
1580 size_t offset, size_t outlen)
1582 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1583 const u8 *pdu = nic_data->mcdi_buf.addr;
1585 memcpy(outbuf, pdu + offset, outlen);
1588 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
1590 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1593 rc = efx_ef10_get_warm_boot_count(efx);
1595 /* The firmware is presumably in the process of
1596 * rebooting. However, we are supposed to report each
1597 * reboot just once, so we must only do that once we
1598 * can read and store the updated warm boot count.
1603 if (rc == nic_data->warm_boot_count)
1606 nic_data->warm_boot_count = rc;
1608 /* All our allocations have been reset */
1609 efx_ef10_reset_mc_allocations(efx);
1611 /* The datapath firmware might have been changed */
1612 nic_data->must_check_datapath_caps = true;
1614 /* MAC statistics have been cleared on the NIC; clear the local
1615 * statistic that we update with efx_update_diff_stat().
1617 nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
1622 /* Handle an MSI interrupt
1624 * Handle an MSI hardware interrupt. This routine schedules event
1625 * queue processing. No interrupt acknowledgement cycle is necessary.
1626 * Also, we never need to check that the interrupt is for us, since
1627 * MSI interrupts cannot be shared.
1629 static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
1631 struct efx_msi_context *context = dev_id;
1632 struct efx_nic *efx = context->efx;
1634 netif_vdbg(efx, intr, efx->net_dev,
1635 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
1637 if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
1638 /* Note test interrupts */
1639 if (context->index == efx->irq_level)
1640 efx->last_irq_cpu = raw_smp_processor_id();
1642 /* Schedule processing of the channel */
1643 efx_schedule_channel_irq(efx->channel[context->index]);
1649 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
1651 struct efx_nic *efx = dev_id;
1652 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
1653 struct efx_channel *channel;
1657 /* Read the ISR which also ACKs the interrupts */
1658 efx_readd(efx, ®, ER_DZ_BIU_INT_ISR);
1659 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
1664 if (likely(soft_enabled)) {
1665 /* Note test interrupts */
1666 if (queues & (1U << efx->irq_level))
1667 efx->last_irq_cpu = raw_smp_processor_id();
1669 efx_for_each_channel(channel, efx) {
1671 efx_schedule_channel_irq(channel);
1676 netif_vdbg(efx, intr, efx->net_dev,
1677 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1678 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1683 static void efx_ef10_irq_test_generate(struct efx_nic *efx)
1685 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
1687 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
1689 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
1690 (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
1691 inbuf, sizeof(inbuf), NULL, 0, NULL);
1694 static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
1696 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
1697 (tx_queue->ptr_mask + 1) *
1698 sizeof(efx_qword_t),
1702 /* This writes to the TX_DESC_WPTR and also pushes data */
1703 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
1704 const efx_qword_t *txd)
1706 unsigned int write_ptr;
1709 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1710 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
1711 reg.qword[0] = *txd;
1712 efx_writeo_page(tx_queue->efx, ®,
1713 ER_DZ_TX_DESC_UPD, tx_queue->queue);
1716 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
1718 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
1720 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
1721 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
1722 struct efx_channel *channel = tx_queue->channel;
1723 struct efx_nic *efx = tx_queue->efx;
1724 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1726 dma_addr_t dma_addr;
1730 BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
1732 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
1733 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
1734 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
1735 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
1736 MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
1737 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
1738 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
1739 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
1740 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
1742 dma_addr = tx_queue->txd.buf.dma_addr;
1744 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
1745 tx_queue->queue, entries, (u64)dma_addr);
1747 for (i = 0; i < entries; ++i) {
1748 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
1749 dma_addr += EFX_BUF_SIZE;
1752 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
1754 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
1759 /* A previous user of this TX queue might have set us up the
1760 * bomb by writing a descriptor to the TX push collector but
1761 * not the doorbell. (Each collector belongs to a port, not a
1762 * queue or function, so cannot easily be reset.) We must
1763 * attempt to push a no-op descriptor in its place.
1765 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
1766 tx_queue->insert_count = 1;
1767 txd = efx_tx_desc(tx_queue, 0);
1768 EFX_POPULATE_QWORD_4(*txd,
1769 ESF_DZ_TX_DESC_IS_OPT, true,
1770 ESF_DZ_TX_OPTION_TYPE,
1771 ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
1772 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
1773 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
1774 tx_queue->write_count = 1;
1776 efx_ef10_push_tx_desc(tx_queue, txd);
1781 netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
1785 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
1787 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
1788 MCDI_DECLARE_BUF_ERR(outbuf);
1789 struct efx_nic *efx = tx_queue->efx;
1793 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
1796 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
1797 outbuf, sizeof(outbuf), &outlen);
1799 if (rc && rc != -EALREADY)
1805 efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
1806 outbuf, outlen, rc);
1809 static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
1811 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
1814 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
1815 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
1817 unsigned int write_ptr;
1820 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1821 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
1822 efx_writed_page(tx_queue->efx, ®,
1823 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
1826 static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
1828 unsigned int old_write_count = tx_queue->write_count;
1829 struct efx_tx_buffer *buffer;
1830 unsigned int write_ptr;
1833 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
1836 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1837 buffer = &tx_queue->buffer[write_ptr];
1838 txd = efx_tx_desc(tx_queue, write_ptr);
1839 ++tx_queue->write_count;
1841 /* Create TX descriptor ring entry */
1842 if (buffer->flags & EFX_TX_BUF_OPTION) {
1843 *txd = buffer->option;
1845 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
1846 EFX_POPULATE_QWORD_3(
1849 buffer->flags & EFX_TX_BUF_CONT,
1850 ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
1851 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
1853 } while (tx_queue->write_count != tx_queue->insert_count);
1855 wmb(); /* Ensure descriptors are written before they are fetched */
1857 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
1858 txd = efx_tx_desc(tx_queue,
1859 old_write_count & tx_queue->ptr_mask);
1860 efx_ef10_push_tx_desc(tx_queue, txd);
1863 efx_ef10_notify_tx_desc(tx_queue);
1867 static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
1868 bool exclusive, unsigned *context_size)
1870 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
1871 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
1872 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1875 u32 alloc_type = exclusive ?
1876 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
1877 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
1878 unsigned rss_spread = exclusive ?
1880 min(rounddown_pow_of_two(efx->rss_spread),
1881 EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
1883 if (!exclusive && rss_spread == 1) {
1884 *context = EFX_EF10_RSS_CONTEXT_INVALID;
1890 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
1891 nic_data->vport_id);
1892 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
1893 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
1895 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
1896 outbuf, sizeof(outbuf), &outlen);
1900 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
1903 *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
1906 *context_size = rss_spread;
1911 static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
1913 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
1916 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
1919 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
1924 static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
1925 const u32 *rx_indir_table)
1927 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
1928 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
1931 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
1933 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1934 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
1936 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
1938 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
1939 (u8) rx_indir_table[i];
1941 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
1942 sizeof(tablebuf), NULL, 0, NULL);
1946 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
1948 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
1949 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
1950 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
1951 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
1952 efx->rx_hash_key[i];
1954 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
1955 sizeof(keybuf), NULL, 0, NULL);
1958 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
1960 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1962 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
1963 efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
1964 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1967 static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
1968 unsigned *context_size)
1970 u32 new_rx_rss_context;
1971 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1972 int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
1973 false, context_size);
1978 nic_data->rx_rss_context = new_rx_rss_context;
1979 nic_data->rx_rss_context_exclusive = false;
1980 efx_set_default_rx_indir_table(efx);
1984 static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
1985 const u32 *rx_indir_table)
1987 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1989 u32 new_rx_rss_context;
1991 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID ||
1992 !nic_data->rx_rss_context_exclusive) {
1993 rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
1995 if (rc == -EOPNOTSUPP)
2000 new_rx_rss_context = nic_data->rx_rss_context;
2003 rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context,
2008 if (nic_data->rx_rss_context != new_rx_rss_context)
2009 efx_ef10_rx_free_indir_table(efx);
2010 nic_data->rx_rss_context = new_rx_rss_context;
2011 nic_data->rx_rss_context_exclusive = true;
2012 if (rx_indir_table != efx->rx_indir_table)
2013 memcpy(efx->rx_indir_table, rx_indir_table,
2014 sizeof(efx->rx_indir_table));
2018 if (new_rx_rss_context != nic_data->rx_rss_context)
2019 efx_ef10_free_rss_context(efx, new_rx_rss_context);
2021 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
2025 static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
2026 const u32 *rx_indir_table)
2030 if (efx->rss_spread == 1)
2033 rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table);
2035 if (rc == -ENOBUFS && !user) {
2036 unsigned context_size;
2037 bool mismatch = false;
2040 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch;
2042 mismatch = rx_indir_table[i] !=
2043 ethtool_rxfh_indir_default(i, efx->rss_spread);
2045 rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
2047 if (context_size != efx->rss_spread)
2048 netif_warn(efx, probe, efx->net_dev,
2049 "Could not allocate an exclusive RSS"
2050 " context; allocated a shared one of"
2052 " Wanted %u, got %u.\n",
2053 efx->rss_spread, context_size);
2055 netif_warn(efx, probe, efx->net_dev,
2056 "Could not allocate an exclusive RSS"
2057 " context; allocated a shared one but"
2058 " could not apply custom"
2061 netif_info(efx, probe, efx->net_dev,
2062 "Could not allocate an exclusive RSS"
2063 " context; allocated a shared one.\n");
2069 static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
2070 const u32 *rx_indir_table
2071 __attribute__ ((unused)))
2073 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2077 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
2079 return efx_ef10_rx_push_shared_rss_config(efx, NULL);
2082 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
2084 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
2085 (rx_queue->ptr_mask + 1) *
2086 sizeof(efx_qword_t),
2090 static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
2092 MCDI_DECLARE_BUF(inbuf,
2093 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
2095 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
2096 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
2097 struct efx_nic *efx = rx_queue->efx;
2098 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2100 dma_addr_t dma_addr;
2103 BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
2105 rx_queue->scatter_n = 0;
2106 rx_queue->scatter_len = 0;
2108 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
2109 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
2110 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
2111 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
2112 efx_rx_queue_index(rx_queue));
2113 MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
2114 INIT_RXQ_IN_FLAG_PREFIX, 1,
2115 INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
2116 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
2117 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
2119 dma_addr = rx_queue->rxd.buf.dma_addr;
2121 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
2122 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
2124 for (i = 0; i < entries; ++i) {
2125 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
2126 dma_addr += EFX_BUF_SIZE;
2129 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
2131 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
2134 netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
2135 efx_rx_queue_index(rx_queue));
2138 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
2140 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
2141 MCDI_DECLARE_BUF_ERR(outbuf);
2142 struct efx_nic *efx = rx_queue->efx;
2146 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
2147 efx_rx_queue_index(rx_queue));
2149 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
2150 outbuf, sizeof(outbuf), &outlen);
2152 if (rc && rc != -EALREADY)
2158 efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
2159 outbuf, outlen, rc);
2162 static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
2164 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
2167 /* This creates an entry in the RX descriptor queue */
2169 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
2171 struct efx_rx_buffer *rx_buf;
2174 rxd = efx_rx_desc(rx_queue, index);
2175 rx_buf = efx_rx_buffer(rx_queue, index);
2176 EFX_POPULATE_QWORD_2(*rxd,
2177 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
2178 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
2181 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
2183 struct efx_nic *efx = rx_queue->efx;
2184 unsigned int write_count;
2187 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
2188 write_count = rx_queue->added_count & ~7;
2189 if (rx_queue->notified_count == write_count)
2193 efx_ef10_build_rx_desc(
2195 rx_queue->notified_count & rx_queue->ptr_mask);
2196 while (++rx_queue->notified_count != write_count);
2199 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
2200 write_count & rx_queue->ptr_mask);
2201 efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD,
2202 efx_rx_queue_index(rx_queue));
2205 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
2207 static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
2209 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
2210 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
2213 EFX_POPULATE_QWORD_2(event,
2214 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
2215 ESF_DZ_EV_DATA, EFX_EF10_REFILL);
2217 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
2219 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
2220 * already swapped the data to little-endian order.
2222 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
2223 sizeof(efx_qword_t));
2225 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
2226 inbuf, sizeof(inbuf), 0,
2227 efx_ef10_rx_defer_refill_complete, 0);
2231 efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
2232 int rc, efx_dword_t *outbuf,
2233 size_t outlen_actual)
2238 static int efx_ef10_ev_probe(struct efx_channel *channel)
2240 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
2241 (channel->eventq_mask + 1) *
2242 sizeof(efx_qword_t),
2246 static void efx_ef10_ev_fini(struct efx_channel *channel)
2248 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
2249 MCDI_DECLARE_BUF_ERR(outbuf);
2250 struct efx_nic *efx = channel->efx;
2254 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
2256 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
2257 outbuf, sizeof(outbuf), &outlen);
2259 if (rc && rc != -EALREADY)
2265 efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
2266 outbuf, outlen, rc);
2269 static int efx_ef10_ev_init(struct efx_channel *channel)
2271 MCDI_DECLARE_BUF(inbuf,
2272 MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
2274 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
2275 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
2276 struct efx_nic *efx = channel->efx;
2277 struct efx_ef10_nic_data *nic_data;
2278 bool supports_rx_merge;
2279 size_t inlen, outlen;
2280 unsigned int enabled, implemented;
2281 dma_addr_t dma_addr;
2285 nic_data = efx->nic_data;
2287 !!(nic_data->datapath_caps &
2288 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
2290 /* Fill event queue with all ones (i.e. empty events) */
2291 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
2293 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
2294 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
2295 /* INIT_EVQ expects index in vector table, not absolute */
2296 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
2297 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
2298 INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
2299 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
2300 INIT_EVQ_IN_FLAG_TX_MERGE, 1,
2301 INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
2302 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
2303 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
2304 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
2305 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
2306 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
2307 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
2308 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
2310 dma_addr = channel->eventq.buf.dma_addr;
2311 for (i = 0; i < entries; ++i) {
2312 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
2313 dma_addr += EFX_BUF_SIZE;
2316 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
2318 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
2319 outbuf, sizeof(outbuf), &outlen);
2320 /* IRQ return is ignored */
2321 if (channel->channel || rc)
2324 /* Successfully created event queue on channel 0 */
2325 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
2326 if (rc == -ENOSYS) {
2327 /* GET_WORKAROUNDS was implemented before the bug26807
2328 * workaround, thus the latter must be unavailable in this fw
2330 nic_data->workaround_26807 = false;
2335 nic_data->workaround_26807 =
2336 !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
2338 if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
2339 !nic_data->workaround_26807) {
2342 rc = efx_mcdi_set_workaround(efx,
2343 MC_CMD_WORKAROUND_BUG26807,
2348 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
2349 netif_info(efx, drv, efx->net_dev,
2350 "other functions on NIC have been reset\n");
2351 /* MC's boot count has incremented */
2352 ++nic_data->warm_boot_count;
2354 nic_data->workaround_26807 = true;
2355 } else if (rc == -EPERM) {
2365 efx_ef10_ev_fini(channel);
2369 static void efx_ef10_ev_remove(struct efx_channel *channel)
2371 efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
2374 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
2375 unsigned int rx_queue_label)
2377 struct efx_nic *efx = rx_queue->efx;
2379 netif_info(efx, hw, efx->net_dev,
2380 "rx event arrived on queue %d labeled as queue %u\n",
2381 efx_rx_queue_index(rx_queue), rx_queue_label);
2383 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
2387 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
2388 unsigned int actual, unsigned int expected)
2390 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
2391 struct efx_nic *efx = rx_queue->efx;
2393 netif_info(efx, hw, efx->net_dev,
2394 "dropped %d events (index=%d expected=%d)\n",
2395 dropped, actual, expected);
2397 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
2400 /* partially received RX was aborted. clean up. */
2401 static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
2403 unsigned int rx_desc_ptr;
2405 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
2406 "scattered RX aborted (dropping %u buffers)\n",
2407 rx_queue->scatter_n);
2409 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
2411 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
2412 0, EFX_RX_PKT_DISCARD);
2414 rx_queue->removed_count += rx_queue->scatter_n;
2415 rx_queue->scatter_n = 0;
2416 rx_queue->scatter_len = 0;
2417 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
2420 static int efx_ef10_handle_rx_event(struct efx_channel *channel,
2421 const efx_qword_t *event)
2423 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
2424 unsigned int n_descs, n_packets, i;
2425 struct efx_nic *efx = channel->efx;
2426 struct efx_rx_queue *rx_queue;
2430 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
2433 /* Basic packet information */
2434 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
2435 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
2436 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
2437 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
2438 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
2440 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
2441 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
2443 EFX_QWORD_VAL(*event));
2445 rx_queue = efx_channel_get_rx_queue(channel);
2447 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
2448 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
2450 n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
2451 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
2453 if (n_descs != rx_queue->scatter_n + 1) {
2454 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2456 /* detect rx abort */
2457 if (unlikely(n_descs == rx_queue->scatter_n)) {
2458 if (rx_queue->scatter_n == 0 || rx_bytes != 0)
2459 netdev_WARN(efx->net_dev,
2460 "invalid RX abort: scatter_n=%u event="
2462 rx_queue->scatter_n,
2463 EFX_QWORD_VAL(*event));
2464 efx_ef10_handle_rx_abort(rx_queue);
2468 /* Check that RX completion merging is valid, i.e.
2469 * the current firmware supports it and this is a
2470 * non-scattered packet.
2472 if (!(nic_data->datapath_caps &
2473 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
2474 rx_queue->scatter_n != 0 || rx_cont) {
2475 efx_ef10_handle_rx_bad_lbits(
2476 rx_queue, next_ptr_lbits,
2477 (rx_queue->removed_count +
2478 rx_queue->scatter_n + 1) &
2479 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
2483 /* Merged completion for multiple non-scattered packets */
2484 rx_queue->scatter_n = 1;
2485 rx_queue->scatter_len = 0;
2486 n_packets = n_descs;
2487 ++channel->n_rx_merge_events;
2488 channel->n_rx_merge_packets += n_packets;
2489 flags |= EFX_RX_PKT_PREFIX_LEN;
2491 ++rx_queue->scatter_n;
2492 rx_queue->scatter_len += rx_bytes;
2498 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
2499 flags |= EFX_RX_PKT_DISCARD;
2501 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
2502 channel->n_rx_ip_hdr_chksum_err += n_packets;
2503 } else if (unlikely(EFX_QWORD_FIELD(*event,
2504 ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
2505 channel->n_rx_tcp_udp_chksum_err += n_packets;
2506 } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
2507 rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
2508 flags |= EFX_RX_PKT_CSUMMED;
2511 if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
2512 flags |= EFX_RX_PKT_TCP;
2514 channel->irq_mod_score += 2 * n_packets;
2516 /* Handle received packet(s) */
2517 for (i = 0; i < n_packets; i++) {
2518 efx_rx_packet(rx_queue,
2519 rx_queue->removed_count & rx_queue->ptr_mask,
2520 rx_queue->scatter_n, rx_queue->scatter_len,
2522 rx_queue->removed_count += rx_queue->scatter_n;
2525 rx_queue->scatter_n = 0;
2526 rx_queue->scatter_len = 0;
2532 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
2534 struct efx_nic *efx = channel->efx;
2535 struct efx_tx_queue *tx_queue;
2536 unsigned int tx_ev_desc_ptr;
2537 unsigned int tx_ev_q_label;
2540 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
2543 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
2546 /* Transmit completion */
2547 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
2548 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
2549 tx_queue = efx_channel_get_tx_queue(channel,
2550 tx_ev_q_label % EFX_TXQ_TYPES);
2551 tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
2552 tx_queue->ptr_mask);
2553 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
2559 efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
2561 struct efx_nic *efx = channel->efx;
2564 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
2567 case ESE_DZ_DRV_TIMER_EV:
2568 case ESE_DZ_DRV_WAKE_UP_EV:
2570 case ESE_DZ_DRV_START_UP_EV:
2571 /* event queue init complete. ok. */
2574 netif_err(efx, hw, efx->net_dev,
2575 "channel %d unknown driver event type %d"
2576 " (data " EFX_QWORD_FMT ")\n",
2577 channel->channel, subcode,
2578 EFX_QWORD_VAL(*event));
2583 static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
2586 struct efx_nic *efx = channel->efx;
2589 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
2593 channel->event_test_cpu = raw_smp_processor_id();
2595 case EFX_EF10_REFILL:
2596 /* The queue must be empty, so we won't receive any rx
2597 * events, so efx_process_channel() won't refill the
2598 * queue. Refill it here
2600 efx_fast_push_rx_descriptors(&channel->rx_queue, true);
2603 netif_err(efx, hw, efx->net_dev,
2604 "channel %d unknown driver event type %u"
2605 " (data " EFX_QWORD_FMT ")\n",
2606 channel->channel, (unsigned) subcode,
2607 EFX_QWORD_VAL(*event));
2611 static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
2613 struct efx_nic *efx = channel->efx;
2614 efx_qword_t event, *p_event;
2615 unsigned int read_ptr;
2623 read_ptr = channel->eventq_read_ptr;
2626 p_event = efx_event(channel, read_ptr);
2629 if (!efx_event_present(&event))
2632 EFX_SET_QWORD(*p_event);
2636 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
2638 netif_vdbg(efx, drv, efx->net_dev,
2639 "processing event on %d " EFX_QWORD_FMT "\n",
2640 channel->channel, EFX_QWORD_VAL(event));
2643 case ESE_DZ_EV_CODE_MCDI_EV:
2644 efx_mcdi_process_event(channel, &event);
2646 case ESE_DZ_EV_CODE_RX_EV:
2647 spent += efx_ef10_handle_rx_event(channel, &event);
2648 if (spent >= quota) {
2649 /* XXX can we split a merged event to
2650 * avoid going over-quota?
2656 case ESE_DZ_EV_CODE_TX_EV:
2657 tx_descs += efx_ef10_handle_tx_event(channel, &event);
2658 if (tx_descs > efx->txq_entries) {
2661 } else if (++spent == quota) {
2665 case ESE_DZ_EV_CODE_DRIVER_EV:
2666 efx_ef10_handle_driver_event(channel, &event);
2667 if (++spent == quota)
2670 case EFX_EF10_DRVGEN_EV:
2671 efx_ef10_handle_driver_generated_event(channel, &event);
2674 netif_err(efx, hw, efx->net_dev,
2675 "channel %d unknown event type %d"
2676 " (data " EFX_QWORD_FMT ")\n",
2677 channel->channel, ev_code,
2678 EFX_QWORD_VAL(event));
2683 channel->eventq_read_ptr = read_ptr;
2687 static void efx_ef10_ev_read_ack(struct efx_channel *channel)
2689 struct efx_nic *efx = channel->efx;
2692 if (EFX_EF10_WORKAROUND_35388(efx)) {
2693 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
2694 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
2695 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
2696 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
2698 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
2699 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
2700 ERF_DD_EVQ_IND_RPTR,
2701 (channel->eventq_read_ptr &
2702 channel->eventq_mask) >>
2703 ERF_DD_EVQ_IND_RPTR_WIDTH);
2704 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
2706 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
2707 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
2708 ERF_DD_EVQ_IND_RPTR,
2709 channel->eventq_read_ptr &
2710 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
2711 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
2714 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
2715 channel->eventq_read_ptr &
2716 channel->eventq_mask);
2717 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
2721 static void efx_ef10_ev_test_generate(struct efx_channel *channel)
2723 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
2724 struct efx_nic *efx = channel->efx;
2728 EFX_POPULATE_QWORD_2(event,
2729 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
2730 ESF_DZ_EV_DATA, EFX_EF10_TEST);
2732 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
2734 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
2735 * already swapped the data to little-endian order.
2737 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
2738 sizeof(efx_qword_t));
2740 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
2749 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
2752 void efx_ef10_handle_drain_event(struct efx_nic *efx)
2754 if (atomic_dec_and_test(&efx->active_queues))
2755 wake_up(&efx->flush_wq);
2757 WARN_ON(atomic_read(&efx->active_queues) < 0);
2760 static int efx_ef10_fini_dmaq(struct efx_nic *efx)
2762 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2763 struct efx_channel *channel;
2764 struct efx_tx_queue *tx_queue;
2765 struct efx_rx_queue *rx_queue;
2768 /* If the MC has just rebooted, the TX/RX queues will have already been
2769 * torn down, but efx->active_queues needs to be set to zero.
2771 if (nic_data->must_realloc_vis) {
2772 atomic_set(&efx->active_queues, 0);
2776 /* Do not attempt to write to the NIC during EEH recovery */
2777 if (efx->state != STATE_RECOVERY) {
2778 efx_for_each_channel(channel, efx) {
2779 efx_for_each_channel_rx_queue(rx_queue, channel)
2780 efx_ef10_rx_fini(rx_queue);
2781 efx_for_each_channel_tx_queue(tx_queue, channel)
2782 efx_ef10_tx_fini(tx_queue);
2785 wait_event_timeout(efx->flush_wq,
2786 atomic_read(&efx->active_queues) == 0,
2787 msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
2788 pending = atomic_read(&efx->active_queues);
2790 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
2799 static void efx_ef10_prepare_flr(struct efx_nic *efx)
2801 atomic_set(&efx->active_queues, 0);
2804 static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
2805 const struct efx_filter_spec *right)
2807 if ((left->match_flags ^ right->match_flags) |
2808 ((left->flags ^ right->flags) &
2809 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
2812 return memcmp(&left->outer_vid, &right->outer_vid,
2813 sizeof(struct efx_filter_spec) -
2814 offsetof(struct efx_filter_spec, outer_vid)) == 0;
2817 static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
2819 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
2820 return jhash2((const u32 *)&spec->outer_vid,
2821 (sizeof(struct efx_filter_spec) -
2822 offsetof(struct efx_filter_spec, outer_vid)) / 4,
2824 /* XXX should we randomise the initval? */
2827 /* Decide whether a filter should be exclusive or else should allow
2828 * delivery to additional recipients. Currently we decide that
2829 * filters for specific local unicast MAC and IP addresses are
2832 static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
2834 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
2835 !is_multicast_ether_addr(spec->loc_mac))
2838 if ((spec->match_flags &
2839 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
2840 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
2841 if (spec->ether_type == htons(ETH_P_IP) &&
2842 !ipv4_is_multicast(spec->loc_host[0]))
2844 if (spec->ether_type == htons(ETH_P_IPV6) &&
2845 ((const u8 *)spec->loc_host)[0] != 0xff)
2852 static struct efx_filter_spec *
2853 efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
2854 unsigned int filter_idx)
2856 return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
2857 ~EFX_EF10_FILTER_FLAGS);
2861 efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
2862 unsigned int filter_idx)
2864 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
2868 efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
2869 unsigned int filter_idx,
2870 const struct efx_filter_spec *spec,
2873 table->entry[filter_idx].spec = (unsigned long)spec | flags;
2876 static void efx_ef10_filter_push_prep(struct efx_nic *efx,
2877 const struct efx_filter_spec *spec,
2878 efx_dword_t *inbuf, u64 handle,
2881 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2883 memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
2886 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2887 MC_CMD_FILTER_OP_IN_OP_REPLACE);
2888 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
2890 u32 match_fields = 0;
2892 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2893 efx_ef10_filter_is_exclusive(spec) ?
2894 MC_CMD_FILTER_OP_IN_OP_INSERT :
2895 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
2897 /* Convert match flags and values. Unlike almost
2898 * everything else in MCDI, these fields are in
2899 * network byte order.
2901 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
2903 is_multicast_ether_addr(spec->loc_mac) ?
2904 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
2905 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
2906 #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
2907 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
2909 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
2910 mcdi_field ## _LBN; \
2912 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
2913 sizeof(spec->gen_field)); \
2914 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
2915 &spec->gen_field, sizeof(spec->gen_field)); \
2917 COPY_FIELD(REM_HOST, rem_host, SRC_IP);
2918 COPY_FIELD(LOC_HOST, loc_host, DST_IP);
2919 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
2920 COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
2921 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
2922 COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
2923 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
2924 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
2925 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
2926 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
2928 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
2932 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
2933 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
2934 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
2935 MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
2936 MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
2937 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
2938 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
2939 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
2940 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
2941 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
2943 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
2944 (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
2945 MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
2946 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
2947 if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
2948 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
2949 spec->rss_context !=
2950 EFX_FILTER_RSS_CONTEXT_DEFAULT ?
2951 spec->rss_context : nic_data->rx_rss_context);
2954 static int efx_ef10_filter_push(struct efx_nic *efx,
2955 const struct efx_filter_spec *spec,
2956 u64 *handle, bool replacing)
2958 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2959 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
2962 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
2963 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2964 outbuf, sizeof(outbuf), NULL);
2966 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
2968 rc = -EBUSY; /* to match efx_farch_filter_insert() */
2972 static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
2973 enum efx_filter_match_flags match_flags)
2975 unsigned int match_pri;
2978 match_pri < table->rx_match_count;
2980 if (table->rx_match_flags[match_pri] == match_flags)
2983 return -EPROTONOSUPPORT;
2986 static s32 efx_ef10_filter_insert(struct efx_nic *efx,
2987 struct efx_filter_spec *spec,
2990 struct efx_ef10_filter_table *table = efx->filter_state;
2991 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
2992 struct efx_filter_spec *saved_spec;
2993 unsigned int match_pri, hash;
2994 unsigned int priv_flags;
2995 bool replacing = false;
3001 /* For now, only support RX filters */
3002 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
3006 rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
3011 hash = efx_ef10_filter_hash(spec);
3012 is_mc_recip = efx_filter_is_mc_recipient(spec);
3014 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
3016 /* Find any existing filters with the same match tuple or
3017 * else a free slot to insert at. If any of them are busy,
3018 * we have to wait and retry.
3021 unsigned int depth = 1;
3024 spin_lock_bh(&efx->filter_lock);
3027 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3028 saved_spec = efx_ef10_filter_entry_spec(table, i);
3033 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
3034 if (table->entry[i].spec &
3035 EFX_EF10_FILTER_FLAG_BUSY)
3037 if (spec->priority < saved_spec->priority &&
3038 spec->priority != EFX_FILTER_PRI_AUTO) {
3043 /* This is the only one */
3044 if (spec->priority ==
3045 saved_spec->priority &&
3052 } else if (spec->priority >
3053 saved_spec->priority ||
3055 saved_spec->priority &&
3060 __set_bit(depth, mc_rem_map);
3064 /* Once we reach the maximum search depth, use
3065 * the first suitable slot or return -EBUSY if
3068 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
3069 if (ins_index < 0) {
3079 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
3080 spin_unlock_bh(&efx->filter_lock);
3085 /* Create a software table entry if necessary, and mark it
3086 * busy. We might yet fail to insert, but any attempt to
3087 * insert a conflicting filter while we're waiting for the
3088 * firmware must find the busy entry.
3090 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
3092 if (spec->priority == EFX_FILTER_PRI_AUTO &&
3093 saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
3094 /* Just make sure it won't be removed */
3095 if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
3096 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
3097 table->entry[ins_index].spec &=
3098 ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
3103 priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
3105 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
3110 *saved_spec = *spec;
3113 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
3114 priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
3116 /* Mark lower-priority multicast recipients busy prior to removal */
3118 unsigned int depth, i;
3120 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
3121 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3122 if (test_bit(depth, mc_rem_map))
3123 table->entry[i].spec |=
3124 EFX_EF10_FILTER_FLAG_BUSY;
3128 spin_unlock_bh(&efx->filter_lock);
3130 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
3133 /* Finalise the software table entry */
3134 spin_lock_bh(&efx->filter_lock);
3137 /* Update the fields that may differ */
3138 if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
3139 saved_spec->flags |=
3140 EFX_FILTER_FLAG_RX_OVER_AUTO;
3141 saved_spec->priority = spec->priority;
3142 saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
3143 saved_spec->flags |= spec->flags;
3144 saved_spec->rss_context = spec->rss_context;
3145 saved_spec->dmaq_id = spec->dmaq_id;
3147 } else if (!replacing) {
3151 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
3153 /* Remove and finalise entries for lower-priority multicast
3157 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3158 unsigned int depth, i;
3160 memset(inbuf, 0, sizeof(inbuf));
3162 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
3163 if (!test_bit(depth, mc_rem_map))
3166 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3167 saved_spec = efx_ef10_filter_entry_spec(table, i);
3168 priv_flags = efx_ef10_filter_entry_flags(table, i);
3171 spin_unlock_bh(&efx->filter_lock);
3172 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3173 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
3174 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3175 table->entry[i].handle);
3176 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
3177 inbuf, sizeof(inbuf),
3179 spin_lock_bh(&efx->filter_lock);
3187 priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
3189 efx_ef10_filter_set_entry(table, i, saved_spec,
3194 /* If successful, return the inserted filter ID */
3196 rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
3198 wake_up_all(&table->waitq);
3200 spin_unlock_bh(&efx->filter_lock);
3201 finish_wait(&table->waitq, &wait);
3205 static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
3207 /* no need to do anything here on EF10 */
3211 * If !by_index, remove by ID
3212 * If by_index, remove by index
3213 * Filter ID may come from userland and must be range-checked.
3215 static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
3216 unsigned int priority_mask,
3217 u32 filter_id, bool by_index)
3219 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
3220 struct efx_ef10_filter_table *table = efx->filter_state;
3221 MCDI_DECLARE_BUF(inbuf,
3222 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
3223 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
3224 struct efx_filter_spec *spec;
3228 /* Find the software table entry and mark it busy. Don't
3229 * remove it yet; any attempt to update while we're waiting
3230 * for the firmware must find the busy entry.
3233 spin_lock_bh(&efx->filter_lock);
3234 if (!(table->entry[filter_idx].spec &
3235 EFX_EF10_FILTER_FLAG_BUSY))
3237 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
3238 spin_unlock_bh(&efx->filter_lock);
3242 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3245 efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
3246 filter_id / HUNT_FILTER_TBL_ROWS)) {
3251 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
3252 priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
3253 /* Just remove flags */
3254 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
3255 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
3260 if (!(priority_mask & (1U << spec->priority))) {
3265 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
3266 spin_unlock_bh(&efx->filter_lock);
3268 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
3269 /* Reset to an automatic filter */
3271 struct efx_filter_spec new_spec = *spec;
3273 new_spec.priority = EFX_FILTER_PRI_AUTO;
3274 new_spec.flags = (EFX_FILTER_FLAG_RX |
3275 EFX_FILTER_FLAG_RX_RSS);
3276 new_spec.dmaq_id = 0;
3277 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
3278 rc = efx_ef10_filter_push(efx, &new_spec,
3279 &table->entry[filter_idx].handle,
3282 spin_lock_bh(&efx->filter_lock);
3286 /* Really remove the filter */
3288 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3289 efx_ef10_filter_is_exclusive(spec) ?
3290 MC_CMD_FILTER_OP_IN_OP_REMOVE :
3291 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
3292 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3293 table->entry[filter_idx].handle);
3294 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
3295 inbuf, sizeof(inbuf), NULL, 0, NULL);
3297 spin_lock_bh(&efx->filter_lock);
3300 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
3304 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
3305 wake_up_all(&table->waitq);
3307 spin_unlock_bh(&efx->filter_lock);
3308 finish_wait(&table->waitq, &wait);
3312 static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
3313 enum efx_filter_priority priority,
3316 return efx_ef10_filter_remove_internal(efx, 1U << priority,
3320 static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
3322 return filter_id % HUNT_FILTER_TBL_ROWS;
3325 static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
3326 enum efx_filter_priority priority,
3329 return efx_ef10_filter_remove_internal(efx, 1U << priority,
3333 static int efx_ef10_filter_get_safe(struct efx_nic *efx,
3334 enum efx_filter_priority priority,
3335 u32 filter_id, struct efx_filter_spec *spec)
3337 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
3338 struct efx_ef10_filter_table *table = efx->filter_state;
3339 const struct efx_filter_spec *saved_spec;
3342 spin_lock_bh(&efx->filter_lock);
3343 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
3344 if (saved_spec && saved_spec->priority == priority &&
3345 efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
3346 filter_id / HUNT_FILTER_TBL_ROWS) {
3347 *spec = *saved_spec;
3352 spin_unlock_bh(&efx->filter_lock);
3356 static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
3357 enum efx_filter_priority priority)
3359 unsigned int priority_mask;
3363 priority_mask = (((1U << (priority + 1)) - 1) &
3364 ~(1U << EFX_FILTER_PRI_AUTO));
3366 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
3367 rc = efx_ef10_filter_remove_internal(efx, priority_mask,
3369 if (rc && rc != -ENOENT)
3376 static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
3377 enum efx_filter_priority priority)
3379 struct efx_ef10_filter_table *table = efx->filter_state;
3380 unsigned int filter_idx;
3383 spin_lock_bh(&efx->filter_lock);
3384 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3385 if (table->entry[filter_idx].spec &&
3386 efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
3390 spin_unlock_bh(&efx->filter_lock);
3394 static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
3396 struct efx_ef10_filter_table *table = efx->filter_state;
3398 return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
3401 static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
3402 enum efx_filter_priority priority,
3405 struct efx_ef10_filter_table *table = efx->filter_state;
3406 struct efx_filter_spec *spec;
3407 unsigned int filter_idx;
3410 spin_lock_bh(&efx->filter_lock);
3411 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3412 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3413 if (spec && spec->priority == priority) {
3414 if (count == size) {
3418 buf[count++] = (efx_ef10_filter_rx_match_pri(
3419 table, spec->match_flags) *
3420 HUNT_FILTER_TBL_ROWS +
3424 spin_unlock_bh(&efx->filter_lock);
3428 #ifdef CONFIG_RFS_ACCEL
3430 static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
3432 static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
3433 struct efx_filter_spec *spec)
3435 struct efx_ef10_filter_table *table = efx->filter_state;
3436 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3437 struct efx_filter_spec *saved_spec;
3438 unsigned int hash, i, depth = 1;
3439 bool replacing = false;
3444 /* Must be an RX filter without RSS and not for a multicast
3445 * destination address (RFS only works for connected sockets).
3446 * These restrictions allow us to pass only a tiny amount of
3447 * data through to the completion function.
3449 EFX_WARN_ON_PARANOID(spec->flags !=
3450 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
3451 EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
3452 EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
3454 hash = efx_ef10_filter_hash(spec);
3456 spin_lock_bh(&efx->filter_lock);
3458 /* Find any existing filter with the same match tuple or else
3459 * a free slot to insert at. If an existing filter is busy,
3460 * we have to give up.
3463 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3464 saved_spec = efx_ef10_filter_entry_spec(table, i);
3469 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
3470 if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
3474 if (spec->priority < saved_spec->priority) {
3482 /* Once we reach the maximum search depth, use the
3483 * first suitable slot or return -EBUSY if there was
3486 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
3487 if (ins_index < 0) {
3497 /* Create a software table entry if necessary, and mark it
3498 * busy. We might yet fail to insert, but any attempt to
3499 * insert a conflicting filter while we're waiting for the
3500 * firmware must find the busy entry.
3502 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
3506 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
3511 *saved_spec = *spec;
3513 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
3514 EFX_EF10_FILTER_FLAG_BUSY);
3516 spin_unlock_bh(&efx->filter_lock);
3518 /* Pack up the variables needed on completion */
3519 cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
3521 efx_ef10_filter_push_prep(efx, spec, inbuf,
3522 table->entry[ins_index].handle, replacing);
3523 efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
3524 MC_CMD_FILTER_OP_OUT_LEN,
3525 efx_ef10_filter_rfs_insert_complete, cookie);
3530 spin_unlock_bh(&efx->filter_lock);
3535 efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
3536 int rc, efx_dword_t *outbuf,
3537 size_t outlen_actual)
3539 struct efx_ef10_filter_table *table = efx->filter_state;
3540 unsigned int ins_index, dmaq_id;
3541 struct efx_filter_spec *spec;
3544 /* Unpack the cookie */
3545 replacing = cookie >> 31;
3546 ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
3547 dmaq_id = cookie & 0xffff;
3549 spin_lock_bh(&efx->filter_lock);
3550 spec = efx_ef10_filter_entry_spec(table, ins_index);
3552 table->entry[ins_index].handle =
3553 MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
3555 spec->dmaq_id = dmaq_id;
3556 } else if (!replacing) {
3560 efx_ef10_filter_set_entry(table, ins_index, spec, 0);
3561 spin_unlock_bh(&efx->filter_lock);
3563 wake_up_all(&table->waitq);
3567 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
3568 unsigned long filter_idx,
3569 int rc, efx_dword_t *outbuf,
3570 size_t outlen_actual);
3572 static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
3573 unsigned int filter_idx)
3575 struct efx_ef10_filter_table *table = efx->filter_state;
3576 struct efx_filter_spec *spec =
3577 efx_ef10_filter_entry_spec(table, filter_idx);
3578 MCDI_DECLARE_BUF(inbuf,
3579 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
3580 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
3583 (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
3584 spec->priority != EFX_FILTER_PRI_HINT ||
3585 !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
3586 flow_id, filter_idx))
3589 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3590 MC_CMD_FILTER_OP_IN_OP_REMOVE);
3591 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3592 table->entry[filter_idx].handle);
3593 if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
3594 efx_ef10_filter_rfs_expire_complete, filter_idx))
3597 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
3602 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
3603 unsigned long filter_idx,
3604 int rc, efx_dword_t *outbuf,
3605 size_t outlen_actual)
3607 struct efx_ef10_filter_table *table = efx->filter_state;
3608 struct efx_filter_spec *spec =
3609 efx_ef10_filter_entry_spec(table, filter_idx);
3611 spin_lock_bh(&efx->filter_lock);
3614 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
3616 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
3617 wake_up_all(&table->waitq);
3618 spin_unlock_bh(&efx->filter_lock);
3621 #endif /* CONFIG_RFS_ACCEL */
3623 static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
3625 int match_flags = 0;
3627 #define MAP_FLAG(gen_flag, mcdi_field) { \
3628 u32 old_mcdi_flags = mcdi_flags; \
3629 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
3630 mcdi_field ## _LBN); \
3631 if (mcdi_flags != old_mcdi_flags) \
3632 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
3634 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
3635 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
3636 MAP_FLAG(REM_HOST, SRC_IP);
3637 MAP_FLAG(LOC_HOST, DST_IP);
3638 MAP_FLAG(REM_MAC, SRC_MAC);
3639 MAP_FLAG(REM_PORT, SRC_PORT);
3640 MAP_FLAG(LOC_MAC, DST_MAC);
3641 MAP_FLAG(LOC_PORT, DST_PORT);
3642 MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
3643 MAP_FLAG(INNER_VID, INNER_VLAN);
3644 MAP_FLAG(OUTER_VID, OUTER_VLAN);
3645 MAP_FLAG(IP_PROTO, IP_PROTO);
3648 /* Did we map them all? */
3655 static int efx_ef10_filter_table_probe(struct efx_nic *efx)
3657 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
3658 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
3659 unsigned int pd_match_pri, pd_match_count;
3660 struct efx_ef10_filter_table *table;
3664 table = kzalloc(sizeof(*table), GFP_KERNEL);
3668 /* Find out which RX filter types are supported, and their priorities */
3669 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
3670 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
3671 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
3672 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
3676 pd_match_count = MCDI_VAR_ARRAY_LEN(
3677 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
3678 table->rx_match_count = 0;
3680 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
3684 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
3686 rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
3688 netif_dbg(efx, probe, efx->net_dev,
3689 "%s: fw flags %#x pri %u not supported in driver\n",
3690 __func__, mcdi_flags, pd_match_pri);
3692 netif_dbg(efx, probe, efx->net_dev,
3693 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
3694 __func__, mcdi_flags, pd_match_pri,
3695 rc, table->rx_match_count);
3696 table->rx_match_flags[table->rx_match_count++] = rc;
3700 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
3701 if (!table->entry) {
3706 table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
3707 table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
3708 table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
3710 efx->filter_state = table;
3711 init_waitqueue_head(&table->waitq);
3719 /* Caller must hold efx->filter_sem for read if race against
3720 * efx_ef10_filter_table_remove() is possible
3722 static void efx_ef10_filter_table_restore(struct efx_nic *efx)
3724 struct efx_ef10_filter_table *table = efx->filter_state;
3725 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3726 struct efx_filter_spec *spec;
3727 unsigned int filter_idx;
3728 bool failed = false;
3731 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
3733 if (!nic_data->must_restore_filters)
3739 spin_lock_bh(&efx->filter_lock);
3741 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3742 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3746 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
3747 spin_unlock_bh(&efx->filter_lock);
3749 rc = efx_ef10_filter_push(efx, spec,
3750 &table->entry[filter_idx].handle,
3755 spin_lock_bh(&efx->filter_lock);
3758 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
3760 table->entry[filter_idx].spec &=
3761 ~EFX_EF10_FILTER_FLAG_BUSY;
3765 spin_unlock_bh(&efx->filter_lock);
3768 netif_err(efx, hw, efx->net_dev,
3769 "unable to restore all filters\n");
3771 nic_data->must_restore_filters = false;
3774 /* Caller must hold efx->filter_sem for write */
3775 static void efx_ef10_filter_table_remove(struct efx_nic *efx)
3777 struct efx_ef10_filter_table *table = efx->filter_state;
3778 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3779 struct efx_filter_spec *spec;
3780 unsigned int filter_idx;
3783 efx->filter_state = NULL;
3787 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3788 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3792 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3793 efx_ef10_filter_is_exclusive(spec) ?
3794 MC_CMD_FILTER_OP_IN_OP_REMOVE :
3795 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
3796 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3797 table->entry[filter_idx].handle);
3798 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
3801 netdev_WARN(efx->net_dev,
3802 "filter_idx=%#x handle=%#llx\n",
3804 table->entry[filter_idx].handle);
3808 vfree(table->entry);
3812 #define EFX_EF10_FILTER_DO_MARK_OLD(id) \
3813 if (id != EFX_EF10_FILTER_ID_INVALID) { \
3814 filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
3815 WARN_ON(!table->entry[filter_idx].spec); \
3816 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; \
3818 static void efx_ef10_filter_mark_old(struct efx_nic *efx)
3820 struct efx_ef10_filter_table *table = efx->filter_state;
3821 unsigned int filter_idx, i;
3826 /* Mark old filters that may need to be removed */
3827 spin_lock_bh(&efx->filter_lock);
3828 for (i = 0; i < table->dev_uc_count; i++)
3829 EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id);
3830 for (i = 0; i < table->dev_mc_count; i++)
3831 EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id);
3832 EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id);
3833 EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id);
3834 EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id);
3835 spin_unlock_bh(&efx->filter_lock);
3837 #undef EFX_EF10_FILTER_DO_MARK_OLD
3839 static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc)
3841 struct efx_ef10_filter_table *table = efx->filter_state;
3842 struct net_device *net_dev = efx->net_dev;
3843 struct netdev_hw_addr *uc;
3847 table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
3848 addr_count = netdev_uc_count(net_dev);
3849 if (net_dev->flags & IFF_PROMISC)
3851 table->dev_uc_count = 1 + addr_count;
3852 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
3854 netdev_for_each_uc_addr(uc, net_dev) {
3855 if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
3859 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
3860 table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
3865 static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc)
3867 struct efx_ef10_filter_table *table = efx->filter_state;
3868 struct net_device *net_dev = efx->net_dev;
3869 struct netdev_hw_addr *mc;
3870 unsigned int i, addr_count;
3872 table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
3873 table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
3874 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
3877 addr_count = netdev_mc_count(net_dev);
3879 netdev_for_each_mc_addr(mc, net_dev) {
3880 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
3884 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
3885 table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
3889 table->dev_mc_count = i;
3892 static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
3893 bool multicast, bool rollback)
3895 struct efx_ef10_filter_table *table = efx->filter_state;
3896 struct efx_ef10_dev_addr *addr_list;
3897 struct efx_filter_spec spec;
3904 addr_list = table->dev_mc_list;
3905 addr_count = table->dev_mc_count;
3907 addr_list = table->dev_uc_list;
3908 addr_count = table->dev_uc_count;
3911 /* Insert/renew filters */
3912 for (i = 0; i < addr_count; i++) {
3913 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
3914 EFX_FILTER_FLAG_RX_RSS,
3916 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
3918 rc = efx_ef10_filter_insert(efx, &spec, true);
3921 netif_info(efx, drv, efx->net_dev,
3922 "efx_ef10_filter_insert failed rc=%d\n",
3924 /* Fall back to promiscuous */
3925 for (j = 0; j < i; j++) {
3926 if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
3928 efx_ef10_filter_remove_unsafe(
3929 efx, EFX_FILTER_PRI_AUTO,
3931 addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
3935 /* mark as not inserted, and carry on */
3936 rc = EFX_EF10_FILTER_ID_INVALID;
3939 addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc);
3942 if (multicast && rollback) {
3943 /* Also need an Ethernet broadcast filter */
3944 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
3945 EFX_FILTER_FLAG_RX_RSS,
3947 eth_broadcast_addr(baddr);
3948 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
3949 rc = efx_ef10_filter_insert(efx, &spec, true);
3951 netif_warn(efx, drv, efx->net_dev,
3952 "Broadcast filter insert failed rc=%d\n", rc);
3953 /* Fall back to promiscuous */
3954 for (j = 0; j < i; j++) {
3955 if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
3957 efx_ef10_filter_remove_unsafe(
3958 efx, EFX_FILTER_PRI_AUTO,
3960 addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
3964 table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
3971 static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
3974 struct efx_ef10_filter_table *table = efx->filter_state;
3975 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3976 struct efx_filter_spec spec;
3980 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
3981 EFX_FILTER_FLAG_RX_RSS,
3985 efx_filter_set_mc_def(&spec);
3987 efx_filter_set_uc_def(&spec);
3989 rc = efx_ef10_filter_insert(efx, &spec, true);
3991 netif_warn(efx, drv, efx->net_dev,
3992 "%scast mismatch filter insert failed rc=%d\n",
3993 multicast ? "Multi" : "Uni", rc);
3994 } else if (multicast) {
3995 table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc);
3996 if (!nic_data->workaround_26807) {
3997 /* Also need an Ethernet broadcast filter */
3998 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
3999 EFX_FILTER_FLAG_RX_RSS,
4001 eth_broadcast_addr(baddr);
4002 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
4004 rc = efx_ef10_filter_insert(efx, &spec, true);
4006 netif_warn(efx, drv, efx->net_dev,
4007 "Broadcast filter insert failed rc=%d\n",
4010 /* Roll back the mc_def filter */
4011 efx_ef10_filter_remove_unsafe(
4012 efx, EFX_FILTER_PRI_AUTO,
4014 table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
4018 table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
4023 table->ucdef_id = rc;
4029 /* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD
4030 * flag or removes these filters, we don't need to hold the filter_lock while
4031 * scanning for these filters.
4033 static void efx_ef10_filter_remove_old(struct efx_nic *efx)
4035 struct efx_ef10_filter_table *table = efx->filter_state;
4036 bool remove_failed = false;
4039 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
4040 if (ACCESS_ONCE(table->entry[i].spec) &
4041 EFX_EF10_FILTER_FLAG_AUTO_OLD) {
4042 if (efx_ef10_filter_remove_internal(
4043 efx, 1U << EFX_FILTER_PRI_AUTO,
4045 remove_failed = true;
4048 WARN_ON(remove_failed);
4051 static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
4053 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4054 u8 mac_old[ETH_ALEN];
4057 /* Only reconfigure a PF-created vport */
4058 if (is_zero_ether_addr(nic_data->vport_mac))
4061 efx_device_detach_sync(efx);
4062 efx_net_stop(efx->net_dev);
4063 down_write(&efx->filter_sem);
4064 efx_ef10_filter_table_remove(efx);
4065 up_write(&efx->filter_sem);
4067 rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
4069 goto restore_filters;
4071 ether_addr_copy(mac_old, nic_data->vport_mac);
4072 rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
4073 nic_data->vport_mac);
4075 goto restore_vadaptor;
4077 rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
4078 efx->net_dev->dev_addr);
4080 ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
4082 rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
4084 /* Failed to add original MAC, so clear vport_mac */
4085 eth_zero_addr(nic_data->vport_mac);
4091 rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
4095 down_write(&efx->filter_sem);
4096 rc2 = efx_ef10_filter_table_probe(efx);
4097 up_write(&efx->filter_sem);
4101 rc2 = efx_net_open(efx->net_dev);
4105 netif_device_attach(efx->net_dev);
4110 netif_err(efx, drv, efx->net_dev,
4111 "Failed to restore when changing MAC address - scheduling reset\n");
4112 efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
4114 return rc ? rc : rc2;
4117 /* Caller must hold efx->filter_sem for read if race against
4118 * efx_ef10_filter_table_remove() is possible
4120 static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
4122 struct efx_ef10_filter_table *table = efx->filter_state;
4123 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4124 struct net_device *net_dev = efx->net_dev;
4125 bool uc_promisc = false, mc_promisc = false;
4127 if (!efx_dev_registered(efx))
4133 efx_ef10_filter_mark_old(efx);
4135 /* Copy/convert the address lists; add the primary station
4136 * address and broadcast address
4138 netif_addr_lock_bh(net_dev);
4139 efx_ef10_filter_uc_addr_list(efx, &uc_promisc);
4140 efx_ef10_filter_mc_addr_list(efx, &mc_promisc);
4141 netif_addr_unlock_bh(net_dev);
4143 /* Insert/renew unicast filters */
4145 efx_ef10_filter_insert_def(efx, false, false);
4146 efx_ef10_filter_insert_addr_list(efx, false, false);
4148 /* If any of the filters failed to insert, fall back to
4149 * promiscuous mode - add in the uc_def filter. But keep
4150 * our individual unicast filters.
4152 if (efx_ef10_filter_insert_addr_list(efx, false, false))
4153 efx_ef10_filter_insert_def(efx, false, false);
4156 /* Insert/renew multicast filters */
4157 /* If changing promiscuous state with cascaded multicast filters, remove
4158 * old filters first, so that packets are dropped rather than duplicated
4160 if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc)
4161 efx_ef10_filter_remove_old(efx);
4163 if (nic_data->workaround_26807) {
4164 /* If we failed to insert promiscuous filters, rollback
4165 * and fall back to individual multicast filters
4167 if (efx_ef10_filter_insert_def(efx, true, true)) {
4168 /* Changing promisc state, so remove old filters */
4169 efx_ef10_filter_remove_old(efx);
4170 efx_ef10_filter_insert_addr_list(efx, true, false);
4173 /* If we failed to insert promiscuous filters, don't
4174 * rollback. Regardless, also insert the mc_list
4176 efx_ef10_filter_insert_def(efx, true, false);
4177 efx_ef10_filter_insert_addr_list(efx, true, false);
4180 /* If any filters failed to insert, rollback and fall back to
4181 * promiscuous mode - mc_def filter and maybe broadcast. If
4182 * that fails, roll back again and insert as many of our
4183 * individual multicast filters as we can.
4185 if (efx_ef10_filter_insert_addr_list(efx, true, true)) {
4186 /* Changing promisc state, so remove old filters */
4187 if (nic_data->workaround_26807)
4188 efx_ef10_filter_remove_old(efx);
4189 if (efx_ef10_filter_insert_def(efx, true, true))
4190 efx_ef10_filter_insert_addr_list(efx, true, false);
4194 efx_ef10_filter_remove_old(efx);
4195 efx->mc_promisc = mc_promisc;
4198 static int efx_ef10_set_mac_address(struct efx_nic *efx)
4200 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
4201 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4202 bool was_enabled = efx->port_enabled;
4205 efx_device_detach_sync(efx);
4206 efx_net_stop(efx->net_dev);
4207 down_write(&efx->filter_sem);
4208 efx_ef10_filter_table_remove(efx);
4210 ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
4211 efx->net_dev->dev_addr);
4212 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
4213 nic_data->vport_id);
4214 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
4215 sizeof(inbuf), NULL, 0, NULL);
4217 efx_ef10_filter_table_probe(efx);
4218 up_write(&efx->filter_sem);
4220 efx_net_open(efx->net_dev);
4221 netif_device_attach(efx->net_dev);
4223 #ifdef CONFIG_SFC_SRIOV
4224 if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
4225 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
4228 struct efx_nic *efx_pf;
4230 /* Switch to PF and change MAC address on vport */
4231 efx_pf = pci_get_drvdata(pci_dev_pf);
4233 rc = efx_ef10_sriov_set_vf_mac(efx_pf,
4235 efx->net_dev->dev_addr);
4237 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
4238 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
4241 /* MAC address successfully changed by VF (with MAC
4242 * spoofing) so update the parent PF if possible.
4244 for (i = 0; i < efx_pf->vf_count; ++i) {
4245 struct ef10_vf *vf = nic_data->vf + i;
4247 if (vf->efx == efx) {
4248 ether_addr_copy(vf->mac,
4249 efx->net_dev->dev_addr);
4257 netif_err(efx, drv, efx->net_dev,
4258 "Cannot change MAC address; use sfboot to enable"
4259 " mac-spoofing on this interface\n");
4260 } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
4261 /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
4262 * fall-back to the method of changing the MAC address on the
4263 * vport. This only applies to PFs because such versions of
4264 * MCFW do not support VFs.
4266 rc = efx_ef10_vport_set_mac_address(efx);
4268 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
4269 sizeof(inbuf), NULL, 0, rc);
4275 static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
4277 efx_ef10_filter_sync_rx_mode(efx);
4279 return efx_mcdi_set_mac(efx);
4282 static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
4284 efx_ef10_filter_sync_rx_mode(efx);
4289 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
4291 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
4293 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
4294 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
4298 /* MC BISTs follow a different poll mechanism to phy BISTs.
4299 * The BIST is done in the poll handler on the MC, and the MCDI command
4300 * will block until the BIST is done.
4302 static int efx_ef10_poll_bist(struct efx_nic *efx)
4305 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
4309 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
4310 outbuf, sizeof(outbuf), &outlen);
4314 if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
4317 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
4319 case MC_CMD_POLL_BIST_PASSED:
4320 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
4322 case MC_CMD_POLL_BIST_TIMEOUT:
4323 netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
4325 case MC_CMD_POLL_BIST_FAILED:
4326 netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
4329 netif_err(efx, hw, efx->net_dev,
4330 "BIST returned unknown result %u", result);
4335 static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
4339 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
4341 rc = efx_ef10_start_bist(efx, bist_type);
4345 return efx_ef10_poll_bist(efx);
4349 efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
4353 efx_reset_down(efx, RESET_TYPE_WORLD);
4355 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
4356 NULL, 0, NULL, 0, NULL);
4360 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
4361 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
4363 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
4368 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
4369 return rc ? rc : rc2;
4372 #ifdef CONFIG_SFC_MTD
4374 struct efx_ef10_nvram_type_info {
4375 u16 type, type_mask;
4380 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
4381 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
4382 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
4383 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
4384 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
4385 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
4386 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
4387 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
4388 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
4389 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
4390 { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
4391 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
4394 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
4395 struct efx_mcdi_mtd_partition *part,
4398 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
4399 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
4400 const struct efx_ef10_nvram_type_info *info;
4401 size_t size, erase_size, outlen;
4405 for (info = efx_ef10_nvram_types; ; info++) {
4407 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
4409 if ((type & ~info->type_mask) == info->type)
4412 if (info->port != efx_port_num(efx))
4415 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
4419 return -ENODEV; /* hide it */
4421 part->nvram_type = type;
4423 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
4424 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
4425 outbuf, sizeof(outbuf), &outlen);
4428 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
4430 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
4431 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
4432 part->fw_subtype = MCDI_DWORD(outbuf,
4433 NVRAM_METADATA_OUT_SUBTYPE);
4435 part->common.dev_type_name = "EF10 NVRAM manager";
4436 part->common.type_name = info->name;
4438 part->common.mtd.type = MTD_NORFLASH;
4439 part->common.mtd.flags = MTD_CAP_NORFLASH;
4440 part->common.mtd.size = size;
4441 part->common.mtd.erasesize = erase_size;
4446 static int efx_ef10_mtd_probe(struct efx_nic *efx)
4448 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
4449 struct efx_mcdi_mtd_partition *parts;
4450 size_t outlen, n_parts_total, i, n_parts;
4456 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
4457 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
4458 outbuf, sizeof(outbuf), &outlen);
4461 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
4464 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
4466 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
4469 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
4474 for (i = 0; i < n_parts_total; i++) {
4475 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
4477 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
4480 else if (rc != -ENODEV)
4484 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
4491 #endif /* CONFIG_SFC_MTD */
4493 static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
4495 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
4498 static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
4501 static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
4504 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
4507 if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
4508 channel->sync_events_state == SYNC_EVENTS_VALID ||
4509 (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
4511 channel->sync_events_state = SYNC_EVENTS_REQUESTED;
4513 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
4514 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
4515 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
4518 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
4519 inbuf, sizeof(inbuf), NULL, 0, NULL);
4522 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
4523 SYNC_EVENTS_DISABLED;
4528 static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
4531 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
4534 if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
4535 (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
4537 if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
4538 channel->sync_events_state = SYNC_EVENTS_DISABLED;
4541 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
4542 SYNC_EVENTS_DISABLED;
4544 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
4545 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
4546 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
4547 MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
4548 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
4551 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
4552 inbuf, sizeof(inbuf), NULL, 0, NULL);
4557 static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
4560 int (*set)(struct efx_channel *channel, bool temp);
4561 struct efx_channel *channel;
4564 efx_ef10_rx_enable_timestamping :
4565 efx_ef10_rx_disable_timestamping;
4567 efx_for_each_channel(channel, efx) {
4568 int rc = set(channel, temp);
4569 if (en && rc != 0) {
4570 efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
4578 static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
4579 struct hwtstamp_config *init)
4584 static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
4585 struct hwtstamp_config *init)
4589 switch (init->rx_filter) {
4590 case HWTSTAMP_FILTER_NONE:
4591 efx_ef10_ptp_set_ts_sync_events(efx, false, false);
4592 /* if TX timestamping is still requested then leave PTP on */
4593 return efx_ptp_change_mode(efx,
4594 init->tx_type != HWTSTAMP_TX_OFF, 0);
4595 case HWTSTAMP_FILTER_ALL:
4596 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4597 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4598 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4599 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
4600 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4601 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
4602 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
4603 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4604 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
4605 case HWTSTAMP_FILTER_PTP_V2_EVENT:
4606 case HWTSTAMP_FILTER_PTP_V2_SYNC:
4607 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
4608 init->rx_filter = HWTSTAMP_FILTER_ALL;
4609 rc = efx_ptp_change_mode(efx, true, 0);
4611 rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
4613 efx_ptp_change_mode(efx, false, 0);
4620 const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
4622 .mem_bar = EFX_MEM_VF_BAR,
4623 .mem_map_size = efx_ef10_mem_map_size,
4624 .probe = efx_ef10_probe_vf,
4625 .remove = efx_ef10_remove,
4626 .dimension_resources = efx_ef10_dimension_resources,
4627 .init = efx_ef10_init_nic,
4628 .fini = efx_port_dummy_op_void,
4629 .map_reset_reason = efx_ef10_map_reset_reason,
4630 .map_reset_flags = efx_ef10_map_reset_flags,
4631 .reset = efx_ef10_reset,
4632 .probe_port = efx_mcdi_port_probe,
4633 .remove_port = efx_mcdi_port_remove,
4634 .fini_dmaq = efx_ef10_fini_dmaq,
4635 .prepare_flr = efx_ef10_prepare_flr,
4636 .finish_flr = efx_port_dummy_op_void,
4637 .describe_stats = efx_ef10_describe_stats,
4638 .update_stats = efx_ef10_update_stats_vf,
4639 .start_stats = efx_port_dummy_op_void,
4640 .pull_stats = efx_port_dummy_op_void,
4641 .stop_stats = efx_port_dummy_op_void,
4642 .set_id_led = efx_mcdi_set_id_led,
4643 .push_irq_moderation = efx_ef10_push_irq_moderation,
4644 .reconfigure_mac = efx_ef10_mac_reconfigure_vf,
4645 .check_mac_fault = efx_mcdi_mac_check_fault,
4646 .reconfigure_port = efx_mcdi_port_reconfigure,
4647 .get_wol = efx_ef10_get_wol_vf,
4648 .set_wol = efx_ef10_set_wol_vf,
4649 .resume_wol = efx_port_dummy_op_void,
4650 .mcdi_request = efx_ef10_mcdi_request,
4651 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
4652 .mcdi_read_response = efx_ef10_mcdi_read_response,
4653 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
4654 .irq_enable_master = efx_port_dummy_op_void,
4655 .irq_test_generate = efx_ef10_irq_test_generate,
4656 .irq_disable_non_ev = efx_port_dummy_op_void,
4657 .irq_handle_msi = efx_ef10_msi_interrupt,
4658 .irq_handle_legacy = efx_ef10_legacy_interrupt,
4659 .tx_probe = efx_ef10_tx_probe,
4660 .tx_init = efx_ef10_tx_init,
4661 .tx_remove = efx_ef10_tx_remove,
4662 .tx_write = efx_ef10_tx_write,
4663 .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
4664 .rx_probe = efx_ef10_rx_probe,
4665 .rx_init = efx_ef10_rx_init,
4666 .rx_remove = efx_ef10_rx_remove,
4667 .rx_write = efx_ef10_rx_write,
4668 .rx_defer_refill = efx_ef10_rx_defer_refill,
4669 .ev_probe = efx_ef10_ev_probe,
4670 .ev_init = efx_ef10_ev_init,
4671 .ev_fini = efx_ef10_ev_fini,
4672 .ev_remove = efx_ef10_ev_remove,
4673 .ev_process = efx_ef10_ev_process,
4674 .ev_read_ack = efx_ef10_ev_read_ack,
4675 .ev_test_generate = efx_ef10_ev_test_generate,
4676 .filter_table_probe = efx_ef10_filter_table_probe,
4677 .filter_table_restore = efx_ef10_filter_table_restore,
4678 .filter_table_remove = efx_ef10_filter_table_remove,
4679 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
4680 .filter_insert = efx_ef10_filter_insert,
4681 .filter_remove_safe = efx_ef10_filter_remove_safe,
4682 .filter_get_safe = efx_ef10_filter_get_safe,
4683 .filter_clear_rx = efx_ef10_filter_clear_rx,
4684 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
4685 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
4686 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
4687 #ifdef CONFIG_RFS_ACCEL
4688 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
4689 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
4691 #ifdef CONFIG_SFC_MTD
4692 .mtd_probe = efx_port_dummy_op_int,
4694 .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
4695 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
4696 #ifdef CONFIG_SFC_SRIOV
4697 .vswitching_probe = efx_ef10_vswitching_probe_vf,
4698 .vswitching_restore = efx_ef10_vswitching_restore_vf,
4699 .vswitching_remove = efx_ef10_vswitching_remove_vf,
4700 .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id,
4702 .get_mac_address = efx_ef10_get_mac_address_vf,
4703 .set_mac_address = efx_ef10_set_mac_address,
4705 .revision = EFX_REV_HUNT_A0,
4706 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
4707 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
4708 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
4709 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
4710 .can_rx_scatter = true,
4711 .always_rx_scatter = true,
4712 .max_interrupt_mode = EFX_INT_MODE_MSIX,
4713 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
4714 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4715 NETIF_F_RXHASH | NETIF_F_NTUPLE),
4717 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
4718 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
4719 1 << HWTSTAMP_FILTER_ALL,
4722 const struct efx_nic_type efx_hunt_a0_nic_type = {
4724 .mem_bar = EFX_MEM_BAR,
4725 .mem_map_size = efx_ef10_mem_map_size,
4726 .probe = efx_ef10_probe_pf,
4727 .remove = efx_ef10_remove,
4728 .dimension_resources = efx_ef10_dimension_resources,
4729 .init = efx_ef10_init_nic,
4730 .fini = efx_port_dummy_op_void,
4731 .map_reset_reason = efx_ef10_map_reset_reason,
4732 .map_reset_flags = efx_ef10_map_reset_flags,
4733 .reset = efx_ef10_reset,
4734 .probe_port = efx_mcdi_port_probe,
4735 .remove_port = efx_mcdi_port_remove,
4736 .fini_dmaq = efx_ef10_fini_dmaq,
4737 .prepare_flr = efx_ef10_prepare_flr,
4738 .finish_flr = efx_port_dummy_op_void,
4739 .describe_stats = efx_ef10_describe_stats,
4740 .update_stats = efx_ef10_update_stats_pf,
4741 .start_stats = efx_mcdi_mac_start_stats,
4742 .pull_stats = efx_mcdi_mac_pull_stats,
4743 .stop_stats = efx_mcdi_mac_stop_stats,
4744 .set_id_led = efx_mcdi_set_id_led,
4745 .push_irq_moderation = efx_ef10_push_irq_moderation,
4746 .reconfigure_mac = efx_ef10_mac_reconfigure,
4747 .check_mac_fault = efx_mcdi_mac_check_fault,
4748 .reconfigure_port = efx_mcdi_port_reconfigure,
4749 .get_wol = efx_ef10_get_wol,
4750 .set_wol = efx_ef10_set_wol,
4751 .resume_wol = efx_port_dummy_op_void,
4752 .test_chip = efx_ef10_test_chip,
4753 .test_nvram = efx_mcdi_nvram_test_all,
4754 .mcdi_request = efx_ef10_mcdi_request,
4755 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
4756 .mcdi_read_response = efx_ef10_mcdi_read_response,
4757 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
4758 .irq_enable_master = efx_port_dummy_op_void,
4759 .irq_test_generate = efx_ef10_irq_test_generate,
4760 .irq_disable_non_ev = efx_port_dummy_op_void,
4761 .irq_handle_msi = efx_ef10_msi_interrupt,
4762 .irq_handle_legacy = efx_ef10_legacy_interrupt,
4763 .tx_probe = efx_ef10_tx_probe,
4764 .tx_init = efx_ef10_tx_init,
4765 .tx_remove = efx_ef10_tx_remove,
4766 .tx_write = efx_ef10_tx_write,
4767 .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
4768 .rx_probe = efx_ef10_rx_probe,
4769 .rx_init = efx_ef10_rx_init,
4770 .rx_remove = efx_ef10_rx_remove,
4771 .rx_write = efx_ef10_rx_write,
4772 .rx_defer_refill = efx_ef10_rx_defer_refill,
4773 .ev_probe = efx_ef10_ev_probe,
4774 .ev_init = efx_ef10_ev_init,
4775 .ev_fini = efx_ef10_ev_fini,
4776 .ev_remove = efx_ef10_ev_remove,
4777 .ev_process = efx_ef10_ev_process,
4778 .ev_read_ack = efx_ef10_ev_read_ack,
4779 .ev_test_generate = efx_ef10_ev_test_generate,
4780 .filter_table_probe = efx_ef10_filter_table_probe,
4781 .filter_table_restore = efx_ef10_filter_table_restore,
4782 .filter_table_remove = efx_ef10_filter_table_remove,
4783 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
4784 .filter_insert = efx_ef10_filter_insert,
4785 .filter_remove_safe = efx_ef10_filter_remove_safe,
4786 .filter_get_safe = efx_ef10_filter_get_safe,
4787 .filter_clear_rx = efx_ef10_filter_clear_rx,
4788 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
4789 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
4790 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
4791 #ifdef CONFIG_RFS_ACCEL
4792 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
4793 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
4795 #ifdef CONFIG_SFC_MTD
4796 .mtd_probe = efx_ef10_mtd_probe,
4797 .mtd_rename = efx_mcdi_mtd_rename,
4798 .mtd_read = efx_mcdi_mtd_read,
4799 .mtd_erase = efx_mcdi_mtd_erase,
4800 .mtd_write = efx_mcdi_mtd_write,
4801 .mtd_sync = efx_mcdi_mtd_sync,
4803 .ptp_write_host_time = efx_ef10_ptp_write_host_time,
4804 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
4805 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
4806 #ifdef CONFIG_SFC_SRIOV
4807 .sriov_configure = efx_ef10_sriov_configure,
4808 .sriov_init = efx_ef10_sriov_init,
4809 .sriov_fini = efx_ef10_sriov_fini,
4810 .sriov_wanted = efx_ef10_sriov_wanted,
4811 .sriov_reset = efx_ef10_sriov_reset,
4812 .sriov_flr = efx_ef10_sriov_flr,
4813 .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
4814 .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
4815 .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
4816 .sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
4817 .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
4818 .vswitching_probe = efx_ef10_vswitching_probe_pf,
4819 .vswitching_restore = efx_ef10_vswitching_restore_pf,
4820 .vswitching_remove = efx_ef10_vswitching_remove_pf,
4822 .get_mac_address = efx_ef10_get_mac_address_pf,
4823 .set_mac_address = efx_ef10_set_mac_address,
4825 .revision = EFX_REV_HUNT_A0,
4826 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
4827 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
4828 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
4829 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
4830 .can_rx_scatter = true,
4831 .always_rx_scatter = true,
4832 .max_interrupt_mode = EFX_INT_MODE_MSIX,
4833 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
4834 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4835 NETIF_F_RXHASH | NETIF_F_NTUPLE),
4837 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
4838 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
4839 1 << HWTSTAMP_FILTER_ALL,