2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/delay.h>
38 #include "t4_values.h"
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
56 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
60 u32 val = t4_read_reg(adapter, reg);
62 if (!!(val & mask) == polarity) {
74 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
88 * Sets a register field specified by the supplied mask to the
91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
101 * t4_read_indirect - read indirectly addressed registers
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
109 * Reads registers that are accessed indirectly through an address/data
112 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
124 * t4_write_indirect - write indirectly addressed registers
126 * @addr_reg: register holding the indirect addresses
127 * @data_reg: register holding the value for the indirect registers
128 * @vals: values to write
129 * @nregs: how many indirect registers to write
130 * @start_idx: address of first indirect register to write
132 * Writes a sequential block of registers that are accessed indirectly
133 * through an address/data register pair.
135 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 unsigned int data_reg, const u32 *vals,
137 unsigned int nregs, unsigned int start_idx)
140 t4_write_reg(adap, addr_reg, start_idx++);
141 t4_write_reg(adap, data_reg, *vals++);
146 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
147 * mechanism. This guarantees that we get the real value even if we're
148 * operating within a Virtual Machine and the Hypervisor is trapping our
149 * Configuration Space accesses.
151 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
153 u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
155 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
160 if (is_t4(adap->params.chip))
163 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
164 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
166 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
167 * Configuration Space read. (None of the other fields matter when
168 * ENABLE is 0 so a simple register write is easier than a
169 * read-modify-write via t4_set_reg_field().)
171 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
175 * t4_report_fw_error - report firmware error
178 * The adapter firmware can indicate error conditions to the host.
179 * If the firmware has indicated an error, print out the reason for
180 * the firmware error.
182 static void t4_report_fw_error(struct adapter *adap)
184 static const char *const reason[] = {
185 "Crash", /* PCIE_FW_EVAL_CRASH */
186 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
187 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
188 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
189 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
190 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
191 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
192 "Reserved", /* reserved */
196 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
197 if (pcie_fw & PCIE_FW_ERR_F)
198 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
199 reason[PCIE_FW_EVAL_G(pcie_fw)]);
203 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
205 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
208 for ( ; nflit; nflit--, mbox_addr += 8)
209 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
213 * Handle a FW assertion reported in a mailbox.
215 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
217 struct fw_debug_cmd asrt;
219 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
220 dev_alert(adap->pdev_dev,
221 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
222 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
223 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
226 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
228 dev_err(adap->pdev_dev,
229 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
230 (unsigned long long)t4_read_reg64(adap, data_reg),
231 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
232 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
233 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
234 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
235 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
236 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
237 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
241 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
243 * @mbox: index of the mailbox to use
244 * @cmd: the command to write
245 * @size: command length in bytes
246 * @rpl: where to optionally store the reply
247 * @sleep_ok: if true we may sleep while awaiting command completion
248 * @timeout: time to wait for command to finish before timing out
250 * Sends the given command to FW through the selected mailbox and waits
251 * for the FW to execute the command. If @rpl is not %NULL it is used to
252 * store the FW's reply to the command. The command and its optional
253 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
254 * to respond. @sleep_ok determines whether we may sleep while awaiting
255 * the response. If sleeping is allowed we use progressive backoff
258 * The return value is 0 on success or a negative errno on failure. A
259 * failure can happen either because we are not able to execute the
260 * command or FW executes it but signals an error. In the latter case
261 * the return value is the error code indicated by FW (negated).
263 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
264 int size, void *rpl, bool sleep_ok, int timeout)
266 static const int delay[] = {
267 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
272 int i, ms, delay_idx;
273 const __be64 *p = cmd;
274 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
275 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
277 if ((size & 15) || size > MBOX_LEN)
281 * If the device is off-line, as in EEH, commands will time out.
282 * Fail them early so we don't waste time waiting.
284 if (adap->pdev->error_state != pci_channel_io_normal)
287 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
288 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
289 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
291 if (v != MBOX_OWNER_DRV)
292 return v ? -EBUSY : -ETIMEDOUT;
294 for (i = 0; i < size; i += 8)
295 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
297 t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
298 t4_read_reg(adap, ctl_reg); /* flush write */
303 for (i = 0; i < timeout; i += ms) {
305 ms = delay[delay_idx]; /* last element may repeat */
306 if (delay_idx < ARRAY_SIZE(delay) - 1)
312 v = t4_read_reg(adap, ctl_reg);
313 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
314 if (!(v & MBMSGVALID_F)) {
315 t4_write_reg(adap, ctl_reg, 0);
319 res = t4_read_reg64(adap, data_reg);
320 if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
321 fw_asrt(adap, data_reg);
322 res = FW_CMD_RETVAL_V(EIO);
324 get_mbox_rpl(adap, rpl, size / 8, data_reg);
327 if (FW_CMD_RETVAL_G((int)res))
328 dump_mbox(adap, mbox, data_reg);
329 t4_write_reg(adap, ctl_reg, 0);
330 return -FW_CMD_RETVAL_G((int)res);
334 dump_mbox(adap, mbox, data_reg);
335 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
336 *(const u8 *)cmd, mbox);
337 t4_report_fw_error(adap);
341 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
342 void *rpl, bool sleep_ok)
344 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
349 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
351 * @win: PCI-E Memory Window to use
352 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
353 * @addr: address within indicated memory type
354 * @len: amount of memory to transfer
355 * @hbuf: host memory buffer
356 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
358 * Reads/writes an [almost] arbitrary memory region in the firmware: the
359 * firmware memory address and host buffer must be aligned on 32-bit
360 * boudaries; the length may be arbitrary. The memory is transferred as
361 * a raw byte sequence from/to the firmware's memory. If this memory
362 * contains data structures which contain multi-byte integers, it's the
363 * caller's responsibility to perform appropriate byte order conversions.
365 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
366 u32 len, void *hbuf, int dir)
368 u32 pos, offset, resid, memoffset;
369 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
372 /* Argument sanity checks ...
374 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
378 /* It's convenient to be able to handle lengths which aren't a
379 * multiple of 32-bits because we often end up transferring files to
380 * the firmware. So we'll handle that by normalizing the length here
381 * and then handling any residual transfer at the end.
386 /* Offset into the region of memory which is being accessed
389 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
390 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
392 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
393 if (mtype != MEM_MC1)
394 memoffset = (mtype * (edc_size * 1024 * 1024));
396 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
397 MA_EXT_MEMORY0_BAR_A));
398 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
401 /* Determine the PCIE_MEM_ACCESS_OFFSET */
402 addr = addr + memoffset;
404 /* Each PCI-E Memory Window is programmed with a window size -- or
405 * "aperture" -- which controls the granularity of its mapping onto
406 * adapter memory. We need to grab that aperture in order to know
407 * how to use the specified window. The window is also programmed
408 * with the base address of the Memory Window in BAR0's address
409 * space. For T4 this is an absolute PCI-E Bus Address. For T5
410 * the address is relative to BAR0.
412 mem_reg = t4_read_reg(adap,
413 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
415 mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
416 mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
417 if (is_t4(adap->params.chip))
418 mem_base -= adap->t4_bar0;
419 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
421 /* Calculate our initial PCI-E Memory Window Position and Offset into
424 pos = addr & ~(mem_aperture-1);
427 /* Set up initial PCI-E Memory Window to cover the start of our
428 * transfer. (Read it back to ensure that changes propagate before we
429 * attempt to use the new value.)
432 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
435 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
437 /* Transfer data to/from the adapter as long as there's an integral
438 * number of 32-bit transfers to complete.
440 * A note on Endianness issues:
442 * The "register" reads and writes below from/to the PCI-E Memory
443 * Window invoke the standard adapter Big-Endian to PCI-E Link
444 * Little-Endian "swizzel." As a result, if we have the following
445 * data in adapter memory:
447 * Memory: ... | b0 | b1 | b2 | b3 | ...
448 * Address: i+0 i+1 i+2 i+3
450 * Then a read of the adapter memory via the PCI-E Memory Window
455 * [ b3 | b2 | b1 | b0 ]
457 * If this value is stored into local memory on a Little-Endian system
458 * it will show up correctly in local memory as:
460 * ( ..., b0, b1, b2, b3, ... )
462 * But on a Big-Endian system, the store will show up in memory
463 * incorrectly swizzled as:
465 * ( ..., b3, b2, b1, b0, ... )
467 * So we need to account for this in the reads and writes to the
468 * PCI-E Memory Window below by undoing the register read/write
472 if (dir == T4_MEMORY_READ)
473 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
476 t4_write_reg(adap, mem_base + offset,
477 (__force u32)cpu_to_le32(*buf++));
478 offset += sizeof(__be32);
479 len -= sizeof(__be32);
481 /* If we've reached the end of our current window aperture,
482 * move the PCI-E Memory Window on to the next. Note that
483 * doing this here after "len" may be 0 allows us to set up
484 * the PCI-E Memory Window for a possible final residual
487 if (offset == mem_aperture) {
491 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
494 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
499 /* If the original transfer had a length which wasn't a multiple of
500 * 32-bits, now's where we need to finish off the transfer of the
501 * residual amount. The PCI-E Memory Window has already been moved
502 * above (if necessary) to cover this final transfer.
512 if (dir == T4_MEMORY_READ) {
513 last.word = le32_to_cpu(
514 (__force __le32)t4_read_reg(adap,
516 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
517 bp[i] = last.byte[i];
520 for (i = resid; i < 4; i++)
522 t4_write_reg(adap, mem_base + offset,
523 (__force u32)cpu_to_le32(last.word));
530 /* Return the specified PCI-E Configuration Space register from our Physical
531 * Function. We try first via a Firmware LDST Command since we prefer to let
532 * the firmware own all of these registers, but if that fails we go for it
533 * directly ourselves.
535 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
537 u32 val, ldst_addrspace;
539 /* If fw_attach != 0, construct and send the Firmware LDST Command to
540 * retrieve the specified PCI-E Configuration Space register.
542 struct fw_ldst_cmd ldst_cmd;
545 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
546 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
547 ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
551 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
552 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
553 ldst_cmd.u.pcie.ctrl_to_fn =
554 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
555 ldst_cmd.u.pcie.r = reg;
557 /* If the LDST Command succeeds, return the result, otherwise
558 * fall through to reading it directly ourselves ...
560 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
563 val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
565 /* Read the desired Configuration Space register via the PCI-E
566 * Backdoor mechanism.
568 t4_hw_pci_read_cfg4(adap, reg, &val);
572 /* Get the window based on base passed to it.
573 * Window aperture is currently unhandled, but there is no use case for it
576 static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
581 if (is_t4(adap->params.chip)) {
584 /* Truncation intentional: we only read the bottom 32-bits of
585 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
586 * mechanism to read BAR0 instead of using
587 * pci_resource_start() because we could be operating from
588 * within a Virtual Machine which is trapping our accesses to
589 * our Configuration Space and we need to set up the PCI-E
590 * Memory Window decoders with the actual addresses which will
591 * be coming across the PCI-E link.
593 bar0 = t4_read_pcie_cfg4(adap, pci_base);
595 adap->t4_bar0 = bar0;
597 ret = bar0 + memwin_base;
599 /* For T5, only relative offset inside the PCIe BAR is passed */
605 /* Get the default utility window (win0) used by everyone */
606 u32 t4_get_util_window(struct adapter *adap)
608 return t4_get_window(adap, PCI_BASE_ADDRESS_0,
609 PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
612 /* Set up memory window for accessing adapter memory ranges. (Read
613 * back MA register to ensure that changes propagate before we attempt
614 * to use the new values.)
616 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
619 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
620 memwin_base | BIR_V(0) |
621 WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
623 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
627 * t4_get_regs_len - return the size of the chips register set
628 * @adapter: the adapter
630 * Returns the size of the chip's BAR0 register space.
632 unsigned int t4_get_regs_len(struct adapter *adapter)
634 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
636 switch (chip_version) {
638 return T4_REGMAP_SIZE;
642 return T5_REGMAP_SIZE;
645 dev_err(adapter->pdev_dev,
646 "Unsupported chip version %d\n", chip_version);
651 * t4_get_regs - read chip registers into provided buffer
653 * @buf: register buffer
654 * @buf_size: size (in bytes) of register buffer
656 * If the provided register buffer isn't large enough for the chip's
657 * full register range, the register dump will be truncated to the
658 * register buffer's size.
660 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
662 static const unsigned int t4_reg_ranges[] = {
884 static const unsigned int t5_reg_ranges[] = {
1324 static const unsigned int t6_reg_ranges[] = {
1662 u32 *buf_end = (u32 *)((char *)buf + buf_size);
1663 const unsigned int *reg_ranges;
1664 int reg_ranges_size, range;
1665 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
1667 /* Select the right set of register ranges to dump depending on the
1668 * adapter chip type.
1670 switch (chip_version) {
1672 reg_ranges = t4_reg_ranges;
1673 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
1677 reg_ranges = t5_reg_ranges;
1678 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
1682 reg_ranges = t6_reg_ranges;
1683 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
1687 dev_err(adap->pdev_dev,
1688 "Unsupported chip version %d\n", chip_version);
1692 /* Clear the register buffer and insert the appropriate register
1693 * values selected by the above register ranges.
1695 memset(buf, 0, buf_size);
1696 for (range = 0; range < reg_ranges_size; range += 2) {
1697 unsigned int reg = reg_ranges[range];
1698 unsigned int last_reg = reg_ranges[range + 1];
1699 u32 *bufp = (u32 *)((char *)buf + reg);
1701 /* Iterate across the register range filling in the register
1702 * buffer but don't write past the end of the register buffer.
1704 while (reg <= last_reg && bufp < buf_end) {
1705 *bufp++ = t4_read_reg(adap, reg);
1711 #define EEPROM_STAT_ADDR 0x7bfc
1712 #define VPD_BASE 0x400
1713 #define VPD_BASE_OLD 0
1714 #define VPD_LEN 1024
1715 #define CHELSIO_VPD_UNIQUE_ID 0x82
1718 * t4_seeprom_wp - enable/disable EEPROM write protection
1719 * @adapter: the adapter
1720 * @enable: whether to enable or disable write protection
1722 * Enables or disables write protection on the serial EEPROM.
1724 int t4_seeprom_wp(struct adapter *adapter, bool enable)
1726 unsigned int v = enable ? 0xc : 0;
1727 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
1728 return ret < 0 ? ret : 0;
1732 * get_vpd_params - read VPD parameters from VPD EEPROM
1733 * @adapter: adapter to read
1734 * @p: where to store the parameters
1736 * Reads card parameters stored in VPD EEPROM.
1738 int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
1740 u32 cclk_param, cclk_val;
1744 unsigned int vpdr_len, kw_offset, id_len;
1746 vpd = vmalloc(VPD_LEN);
1750 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
1754 /* The VPD shall have a unique identifier specified by the PCI SIG.
1755 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
1756 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
1757 * is expected to automatically put this entry at the
1758 * beginning of the VPD.
1760 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
1762 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
1766 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
1767 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
1772 id_len = pci_vpd_lrdt_size(vpd);
1773 if (id_len > ID_LEN)
1776 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
1778 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
1783 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
1784 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
1785 if (vpdr_len + kw_offset > VPD_LEN) {
1786 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
1791 #define FIND_VPD_KW(var, name) do { \
1792 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
1794 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
1798 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
1801 FIND_VPD_KW(i, "RV");
1802 for (csum = 0; i >= 0; i--)
1806 dev_err(adapter->pdev_dev,
1807 "corrupted VPD EEPROM, actual csum %u\n", csum);
1812 FIND_VPD_KW(ec, "EC");
1813 FIND_VPD_KW(sn, "SN");
1814 FIND_VPD_KW(pn, "PN");
1817 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
1819 memcpy(p->ec, vpd + ec, EC_LEN);
1821 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
1822 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
1824 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
1825 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
1829 * Ask firmware for the Core Clock since it knows how to translate the
1830 * Reference Clock ('V2') VPD field into a Core Clock value ...
1832 cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1833 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
1834 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
1835 1, &cclk_param, &cclk_val);
1846 /* serial flash and firmware constants */
1848 SF_ATTEMPTS = 10, /* max retries for SF operations */
1850 /* flash command opcodes */
1851 SF_PROG_PAGE = 2, /* program page */
1852 SF_WR_DISABLE = 4, /* disable writes */
1853 SF_RD_STATUS = 5, /* read status register */
1854 SF_WR_ENABLE = 6, /* enable writes */
1855 SF_RD_DATA_FAST = 0xb, /* read flash */
1856 SF_RD_ID = 0x9f, /* read ID */
1857 SF_ERASE_SECTOR = 0xd8, /* erase sector */
1859 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
1863 * sf1_read - read data from the serial flash
1864 * @adapter: the adapter
1865 * @byte_cnt: number of bytes to read
1866 * @cont: whether another operation will be chained
1867 * @lock: whether to lock SF for PL access only
1868 * @valp: where to store the read data
1870 * Reads up to 4 bytes of data from the serial flash. The location of
1871 * the read needs to be specified prior to calling this by issuing the
1872 * appropriate commands to the serial flash.
1874 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
1875 int lock, u32 *valp)
1879 if (!byte_cnt || byte_cnt > 4)
1881 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
1883 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
1884 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
1885 ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
1887 *valp = t4_read_reg(adapter, SF_DATA_A);
1892 * sf1_write - write data to the serial flash
1893 * @adapter: the adapter
1894 * @byte_cnt: number of bytes to write
1895 * @cont: whether another operation will be chained
1896 * @lock: whether to lock SF for PL access only
1897 * @val: value to write
1899 * Writes up to 4 bytes of data to the serial flash. The location of
1900 * the write needs to be specified prior to calling this by issuing the
1901 * appropriate commands to the serial flash.
1903 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
1906 if (!byte_cnt || byte_cnt > 4)
1908 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
1910 t4_write_reg(adapter, SF_DATA_A, val);
1911 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
1912 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
1913 return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
1917 * flash_wait_op - wait for a flash operation to complete
1918 * @adapter: the adapter
1919 * @attempts: max number of polls of the status register
1920 * @delay: delay between polls in ms
1922 * Wait for a flash operation to complete by polling the status register.
1924 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
1930 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
1931 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
1935 if (--attempts == 0)
1943 * t4_read_flash - read words from serial flash
1944 * @adapter: the adapter
1945 * @addr: the start address for the read
1946 * @nwords: how many 32-bit words to read
1947 * @data: where to store the read data
1948 * @byte_oriented: whether to store data as bytes or as words
1950 * Read the specified number of 32-bit words from the serial flash.
1951 * If @byte_oriented is set the read data is stored as a byte array
1952 * (i.e., big-endian), otherwise as 32-bit words in the platform's
1953 * natural endianness.
1955 int t4_read_flash(struct adapter *adapter, unsigned int addr,
1956 unsigned int nwords, u32 *data, int byte_oriented)
1960 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
1963 addr = swab32(addr) | SF_RD_DATA_FAST;
1965 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
1966 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
1969 for ( ; nwords; nwords--, data++) {
1970 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
1972 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
1976 *data = (__force __u32)(cpu_to_be32(*data));
1982 * t4_write_flash - write up to a page of data to the serial flash
1983 * @adapter: the adapter
1984 * @addr: the start address to write
1985 * @n: length of data to write in bytes
1986 * @data: the data to write
1988 * Writes up to a page of data (256 bytes) to the serial flash starting
1989 * at the given address. All the data must be written to the same page.
1991 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
1992 unsigned int n, const u8 *data)
1996 unsigned int i, c, left, val, offset = addr & 0xff;
1998 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
2001 val = swab32(addr) | SF_PROG_PAGE;
2003 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
2004 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
2007 for (left = n; left; left -= c) {
2009 for (val = 0, i = 0; i < c; ++i)
2010 val = (val << 8) + *data++;
2012 ret = sf1_write(adapter, c, c != left, 1, val);
2016 ret = flash_wait_op(adapter, 8, 1);
2020 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
2022 /* Read the page to verify the write succeeded */
2023 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
2027 if (memcmp(data - n, (u8 *)buf + offset, n)) {
2028 dev_err(adapter->pdev_dev,
2029 "failed to correctly write the flash page at %#x\n",
2036 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
2041 * t4_get_fw_version - read the firmware version
2042 * @adapter: the adapter
2043 * @vers: where to place the version
2045 * Reads the FW version from flash.
2047 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
2049 return t4_read_flash(adapter, FLASH_FW_START +
2050 offsetof(struct fw_hdr, fw_ver), 1,
2055 * t4_get_tp_version - read the TP microcode version
2056 * @adapter: the adapter
2057 * @vers: where to place the version
2059 * Reads the TP microcode version from flash.
2061 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
2063 return t4_read_flash(adapter, FLASH_FW_START +
2064 offsetof(struct fw_hdr, tp_microcode_ver),
2069 * t4_get_exprom_version - return the Expansion ROM version (if any)
2070 * @adapter: the adapter
2071 * @vers: where to place the version
2073 * Reads the Expansion ROM header from FLASH and returns the version
2074 * number (if present) through the @vers return value pointer. We return
2075 * this in the Firmware Version Format since it's convenient. Return
2076 * 0 on success, -ENOENT if no Expansion ROM is present.
2078 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
2080 struct exprom_header {
2081 unsigned char hdr_arr[16]; /* must start with 0x55aa */
2082 unsigned char hdr_ver[4]; /* Expansion ROM version */
2084 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
2088 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
2089 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
2094 hdr = (struct exprom_header *)exprom_header_buf;
2095 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
2098 *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
2099 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
2100 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
2101 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
2105 /* Is the given firmware API compatible with the one the driver was compiled
2108 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
2111 /* short circuit if it's the exact same firmware version */
2112 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
2115 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
2116 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
2117 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
2124 /* The firmware in the filesystem is usable, but should it be installed?
2125 * This routine explains itself in detail if it indicates the filesystem
2126 * firmware should be installed.
2128 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
2133 if (!card_fw_usable) {
2134 reason = "incompatible or unusable";
2139 reason = "older than the version supported with this driver";
2146 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
2147 "installing firmware %u.%u.%u.%u on card.\n",
2148 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
2149 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
2150 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
2151 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
2156 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
2157 const u8 *fw_data, unsigned int fw_size,
2158 struct fw_hdr *card_fw, enum dev_state state,
2161 int ret, card_fw_usable, fs_fw_usable;
2162 const struct fw_hdr *fs_fw;
2163 const struct fw_hdr *drv_fw;
2165 drv_fw = &fw_info->fw_hdr;
2167 /* Read the header of the firmware on the card */
2168 ret = -t4_read_flash(adap, FLASH_FW_START,
2169 sizeof(*card_fw) / sizeof(uint32_t),
2170 (uint32_t *)card_fw, 1);
2172 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
2174 dev_err(adap->pdev_dev,
2175 "Unable to read card's firmware header: %d\n", ret);
2179 if (fw_data != NULL) {
2180 fs_fw = (const void *)fw_data;
2181 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
2187 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2188 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
2189 /* Common case: the firmware on the card is an exact match and
2190 * the filesystem one is an exact match too, or the filesystem
2191 * one is absent/incompatible.
2193 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
2194 should_install_fs_fw(adap, card_fw_usable,
2195 be32_to_cpu(fs_fw->fw_ver),
2196 be32_to_cpu(card_fw->fw_ver))) {
2197 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
2200 dev_err(adap->pdev_dev,
2201 "failed to install firmware: %d\n", ret);
2205 /* Installed successfully, update the cached header too. */
2208 *reset = 0; /* already reset as part of load_fw */
2211 if (!card_fw_usable) {
2214 d = be32_to_cpu(drv_fw->fw_ver);
2215 c = be32_to_cpu(card_fw->fw_ver);
2216 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
2218 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
2220 "driver compiled with %d.%d.%d.%d, "
2221 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
2223 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
2224 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
2225 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
2226 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
2227 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
2228 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
2233 /* We're using whatever's on the card and it's known to be good. */
2234 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
2235 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
2242 * t4_flash_erase_sectors - erase a range of flash sectors
2243 * @adapter: the adapter
2244 * @start: the first sector to erase
2245 * @end: the last sector to erase
2247 * Erases the sectors in the given inclusive range.
2249 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
2253 if (end >= adapter->params.sf_nsec)
2256 while (start <= end) {
2257 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
2258 (ret = sf1_write(adapter, 4, 0, 1,
2259 SF_ERASE_SECTOR | (start << 8))) != 0 ||
2260 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
2261 dev_err(adapter->pdev_dev,
2262 "erase of flash sector %d failed, error %d\n",
2268 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
2273 * t4_flash_cfg_addr - return the address of the flash configuration file
2274 * @adapter: the adapter
2276 * Return the address within the flash where the Firmware Configuration
2279 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
2281 if (adapter->params.sf_size == 0x100000)
2282 return FLASH_FPGA_CFG_START;
2284 return FLASH_CFG_START;
2287 /* Return TRUE if the specified firmware matches the adapter. I.e. T4
2288 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
2289 * and emit an error message for mismatched firmware to save our caller the
2292 static bool t4_fw_matches_chip(const struct adapter *adap,
2293 const struct fw_hdr *hdr)
2295 /* The expression below will return FALSE for any unsupported adapter
2296 * which will keep us "honest" in the future ...
2298 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
2299 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
2300 (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
2303 dev_err(adap->pdev_dev,
2304 "FW image (%d) is not suitable for this adapter (%d)\n",
2305 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
2310 * t4_load_fw - download firmware
2311 * @adap: the adapter
2312 * @fw_data: the firmware image to write
2315 * Write the supplied firmware image to the card's serial flash.
2317 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
2322 u8 first_page[SF_PAGE_SIZE];
2323 const __be32 *p = (const __be32 *)fw_data;
2324 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
2325 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
2326 unsigned int fw_img_start = adap->params.sf_fw_start;
2327 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
2330 dev_err(adap->pdev_dev, "FW image has no data\n");
2334 dev_err(adap->pdev_dev,
2335 "FW image size not multiple of 512 bytes\n");
2338 if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
2339 dev_err(adap->pdev_dev,
2340 "FW image size differs from size in FW header\n");
2343 if (size > FW_MAX_SIZE) {
2344 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
2348 if (!t4_fw_matches_chip(adap, hdr))
2351 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
2352 csum += be32_to_cpu(p[i]);
2354 if (csum != 0xffffffff) {
2355 dev_err(adap->pdev_dev,
2356 "corrupted firmware image, checksum %#x\n", csum);
2360 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
2361 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
2366 * We write the correct version at the end so the driver can see a bad
2367 * version if the FW write fails. Start by writing a copy of the
2368 * first page with a bad version.
2370 memcpy(first_page, fw_data, SF_PAGE_SIZE);
2371 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
2372 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
2376 addr = fw_img_start;
2377 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
2378 addr += SF_PAGE_SIZE;
2379 fw_data += SF_PAGE_SIZE;
2380 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
2385 ret = t4_write_flash(adap,
2386 fw_img_start + offsetof(struct fw_hdr, fw_ver),
2387 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
2390 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
2393 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
2398 * t4_phy_fw_ver - return current PHY firmware version
2399 * @adap: the adapter
2400 * @phy_fw_ver: return value buffer for PHY firmware version
2402 * Returns the current version of external PHY firmware on the
2405 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
2410 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2411 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
2412 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
2413 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
2414 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
2423 * t4_load_phy_fw - download port PHY firmware
2424 * @adap: the adapter
2425 * @win: the PCI-E Memory Window index to use for t4_memory_rw()
2426 * @win_lock: the lock to use to guard the memory copy
2427 * @phy_fw_version: function to check PHY firmware versions
2428 * @phy_fw_data: the PHY firmware image to write
2429 * @phy_fw_size: image size
2431 * Transfer the specified PHY firmware to the adapter. If a non-NULL
2432 * @phy_fw_version is supplied, then it will be used to determine if
2433 * it's necessary to perform the transfer by comparing the version
2434 * of any existing adapter PHY firmware with that of the passed in
2435 * PHY firmware image. If @win_lock is non-NULL then it will be used
2436 * around the call to t4_memory_rw() which transfers the PHY firmware
2439 * A negative error number will be returned if an error occurs. If
2440 * version number support is available and there's no need to upgrade
2441 * the firmware, 0 will be returned. If firmware is successfully
2442 * transferred to the adapter, 1 will be retured.
2444 * NOTE: some adapters only have local RAM to store the PHY firmware. As
2445 * a result, a RESET of the adapter would cause that RAM to lose its
2446 * contents. Thus, loading PHY firmware on such adapters must happen
2447 * after any FW_RESET_CMDs ...
2449 int t4_load_phy_fw(struct adapter *adap,
2450 int win, spinlock_t *win_lock,
2451 int (*phy_fw_version)(const u8 *, size_t),
2452 const u8 *phy_fw_data, size_t phy_fw_size)
2454 unsigned long mtype = 0, maddr = 0;
2456 int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
2459 /* If we have version number support, then check to see if the adapter
2460 * already has up-to-date PHY firmware loaded.
2462 if (phy_fw_version) {
2463 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
2464 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
2468 if (cur_phy_fw_ver >= new_phy_fw_vers) {
2469 CH_WARN(adap, "PHY Firmware already up-to-date, "
2470 "version %#x\n", cur_phy_fw_ver);
2475 /* Ask the firmware where it wants us to copy the PHY firmware image.
2476 * The size of the file requires a special version of the READ coommand
2477 * which will pass the file size via the values field in PARAMS_CMD and
2478 * retrieve the return value from firmware and place it in the same
2481 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2482 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
2483 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
2484 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
2486 ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
2491 maddr = (val & 0xff) << 16;
2493 /* Copy the supplied PHY Firmware image to the adapter memory location
2494 * allocated by the adapter firmware.
2497 spin_lock_bh(win_lock);
2498 ret = t4_memory_rw(adap, win, mtype, maddr,
2499 phy_fw_size, (__be32 *)phy_fw_data,
2502 spin_unlock_bh(win_lock);
2506 /* Tell the firmware that the PHY firmware image has been written to
2507 * RAM and it can now start copying it over to the PHYs. The chip
2508 * firmware will RESET the affected PHYs as part of this operation
2509 * leaving them running the new PHY firmware image.
2511 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2512 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
2513 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
2514 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
2515 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
2516 ¶m, &val, 30000);
2518 /* If we have version number support, then check to see that the new
2519 * firmware got loaded properly.
2521 if (phy_fw_version) {
2522 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
2526 if (cur_phy_fw_ver != new_phy_fw_vers) {
2527 CH_WARN(adap, "PHY Firmware did not update: "
2528 "version on adapter %#x, "
2529 "version flashed %#x\n",
2530 cur_phy_fw_ver, new_phy_fw_vers);
2539 * t4_fwcache - firmware cache operation
2540 * @adap: the adapter
2541 * @op : the operation (flush or flush and invalidate)
2543 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
2545 struct fw_params_cmd c;
2547 memset(&c, 0, sizeof(c));
2549 cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
2550 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
2551 FW_PARAMS_CMD_PFN_V(adap->pf) |
2552 FW_PARAMS_CMD_VFN_V(0));
2553 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2555 cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2556 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
2557 c.param[0].val = (__force __be32)op;
2559 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
2562 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
2566 for (i = 0; i < 8; i++) {
2567 u32 *p = la_buf + i;
2569 t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
2570 j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
2571 t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
2572 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
2573 *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
2577 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
2578 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
2582 * t4_link_start - apply link configuration to MAC/PHY
2583 * @phy: the PHY to setup
2584 * @mac: the MAC to setup
2585 * @lc: the requested link configuration
2587 * Set up a port's MAC and PHY according to a desired link configuration.
2588 * - If the PHY can auto-negotiate first decide what to advertise, then
2589 * enable/disable auto-negotiation as desired, and reset.
2590 * - If the PHY does not auto-negotiate just reset it.
2591 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
2592 * otherwise do it later based on the outcome of auto-negotiation.
2594 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
2595 struct link_config *lc)
2597 struct fw_port_cmd c;
2598 unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
2601 if (lc->requested_fc & PAUSE_RX)
2602 fc |= FW_PORT_CAP_FC_RX;
2603 if (lc->requested_fc & PAUSE_TX)
2604 fc |= FW_PORT_CAP_FC_TX;
2606 memset(&c, 0, sizeof(c));
2607 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
2608 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
2609 FW_PORT_CMD_PORTID_V(port));
2611 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
2614 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2615 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
2617 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
2618 } else if (lc->autoneg == AUTONEG_DISABLE) {
2619 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
2620 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
2622 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
2624 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2628 * t4_restart_aneg - restart autonegotiation
2629 * @adap: the adapter
2630 * @mbox: mbox to use for the FW command
2631 * @port: the port id
2633 * Restarts autonegotiation for the selected port.
2635 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
2637 struct fw_port_cmd c;
2639 memset(&c, 0, sizeof(c));
2640 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
2641 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
2642 FW_PORT_CMD_PORTID_V(port));
2644 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
2646 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
2647 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2650 typedef void (*int_handler_t)(struct adapter *adap);
2653 unsigned int mask; /* bits to check in interrupt status */
2654 const char *msg; /* message to print or NULL */
2655 short stat_idx; /* stat counter to increment or -1 */
2656 unsigned short fatal; /* whether the condition reported is fatal */
2657 int_handler_t int_handler; /* platform-specific int handler */
2661 * t4_handle_intr_status - table driven interrupt handler
2662 * @adapter: the adapter that generated the interrupt
2663 * @reg: the interrupt status register to process
2664 * @acts: table of interrupt actions
2666 * A table driven interrupt handler that applies a set of masks to an
2667 * interrupt status word and performs the corresponding actions if the
2668 * interrupts described by the mask have occurred. The actions include
2669 * optionally emitting a warning or alert message. The table is terminated
2670 * by an entry specifying mask 0. Returns the number of fatal interrupt
2673 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
2674 const struct intr_info *acts)
2677 unsigned int mask = 0;
2678 unsigned int status = t4_read_reg(adapter, reg);
2680 for ( ; acts->mask; ++acts) {
2681 if (!(status & acts->mask))
2685 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
2686 status & acts->mask);
2687 } else if (acts->msg && printk_ratelimit())
2688 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
2689 status & acts->mask);
2690 if (acts->int_handler)
2691 acts->int_handler(adapter);
2695 if (status) /* clear processed interrupts */
2696 t4_write_reg(adapter, reg, status);
2701 * Interrupt handler for the PCIE module.
2703 static void pcie_intr_handler(struct adapter *adapter)
2705 static const struct intr_info sysbus_intr_info[] = {
2706 { RNPP_F, "RXNP array parity error", -1, 1 },
2707 { RPCP_F, "RXPC array parity error", -1, 1 },
2708 { RCIP_F, "RXCIF array parity error", -1, 1 },
2709 { RCCP_F, "Rx completions control array parity error", -1, 1 },
2710 { RFTP_F, "RXFT array parity error", -1, 1 },
2713 static const struct intr_info pcie_port_intr_info[] = {
2714 { TPCP_F, "TXPC array parity error", -1, 1 },
2715 { TNPP_F, "TXNP array parity error", -1, 1 },
2716 { TFTP_F, "TXFT array parity error", -1, 1 },
2717 { TCAP_F, "TXCA array parity error", -1, 1 },
2718 { TCIP_F, "TXCIF array parity error", -1, 1 },
2719 { RCAP_F, "RXCA array parity error", -1, 1 },
2720 { OTDD_F, "outbound request TLP discarded", -1, 1 },
2721 { RDPE_F, "Rx data parity error", -1, 1 },
2722 { TDUE_F, "Tx uncorrectable data error", -1, 1 },
2725 static const struct intr_info pcie_intr_info[] = {
2726 { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
2727 { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
2728 { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
2729 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
2730 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
2731 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
2732 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
2733 { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
2734 { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
2735 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
2736 { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
2737 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
2738 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
2739 { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
2740 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
2741 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
2742 { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
2743 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
2744 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
2745 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
2746 { FIDPERR_F, "PCI FID parity error", -1, 1 },
2747 { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
2748 { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
2749 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
2750 { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
2751 { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
2752 { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
2753 { PCIESINT_F, "PCI core secondary fault", -1, 1 },
2754 { PCIEPINT_F, "PCI core primary fault", -1, 1 },
2755 { UNXSPLCPLERR_F, "PCI unexpected split completion error",
2760 static struct intr_info t5_pcie_intr_info[] = {
2761 { MSTGRPPERR_F, "Master Response Read Queue parity error",
2763 { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
2764 { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
2765 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
2766 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
2767 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
2768 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
2769 { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
2771 { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
2773 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
2774 { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
2775 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
2776 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
2777 { DREQWRPERR_F, "PCI DMA channel write request parity error",
2779 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
2780 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
2781 { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
2782 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
2783 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
2784 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
2785 { FIDPERR_F, "PCI FID parity error", -1, 1 },
2786 { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
2787 { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
2788 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
2789 { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
2791 { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
2793 { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
2794 { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
2795 { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
2796 { READRSPERR_F, "Outbound read error", -1, 0 },
2802 if (is_t4(adapter->params.chip))
2803 fat = t4_handle_intr_status(adapter,
2804 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
2806 t4_handle_intr_status(adapter,
2807 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
2808 pcie_port_intr_info) +
2809 t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
2812 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
2816 t4_fatal_err(adapter);
2820 * TP interrupt handler.
2822 static void tp_intr_handler(struct adapter *adapter)
2824 static const struct intr_info tp_intr_info[] = {
2825 { 0x3fffffff, "TP parity error", -1, 1 },
2826 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
2830 if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
2831 t4_fatal_err(adapter);
2835 * SGE interrupt handler.
2837 static void sge_intr_handler(struct adapter *adapter)
2842 static const struct intr_info sge_intr_info[] = {
2843 { ERR_CPL_EXCEED_IQE_SIZE_F,
2844 "SGE received CPL exceeding IQE size", -1, 1 },
2845 { ERR_INVALID_CIDX_INC_F,
2846 "SGE GTS CIDX increment too large", -1, 0 },
2847 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
2848 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
2849 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
2850 "SGE IQID > 1023 received CPL for FL", -1, 0 },
2851 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
2853 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
2855 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
2857 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
2859 { ERR_ING_CTXT_PRIO_F,
2860 "SGE too many priority ingress contexts", -1, 0 },
2861 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
2862 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
2866 static struct intr_info t4t5_sge_intr_info[] = {
2867 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
2868 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
2869 { ERR_EGR_CTXT_PRIO_F,
2870 "SGE too many priority egress contexts", -1, 0 },
2874 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
2875 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
2877 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
2878 (unsigned long long)v);
2879 t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
2880 t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
2883 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
2884 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
2885 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
2886 t4t5_sge_intr_info);
2888 err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
2889 if (err & ERROR_QID_VALID_F) {
2890 dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
2892 if (err & UNCAPTURED_ERROR_F)
2893 dev_err(adapter->pdev_dev,
2894 "SGE UNCAPTURED_ERROR set (clearing)\n");
2895 t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
2896 UNCAPTURED_ERROR_F);
2900 t4_fatal_err(adapter);
2903 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
2904 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
2905 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
2906 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
2909 * CIM interrupt handler.
2911 static void cim_intr_handler(struct adapter *adapter)
2913 static const struct intr_info cim_intr_info[] = {
2914 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
2915 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2916 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2917 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
2918 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
2919 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
2920 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
2923 static const struct intr_info cim_upintr_info[] = {
2924 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
2925 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
2926 { ILLWRINT_F, "CIM illegal write", -1, 1 },
2927 { ILLRDINT_F, "CIM illegal read", -1, 1 },
2928 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
2929 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
2930 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
2931 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
2932 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
2933 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
2934 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
2935 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
2936 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
2937 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
2938 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
2939 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
2940 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
2941 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
2942 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
2943 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
2944 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
2945 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
2946 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
2947 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
2948 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
2949 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
2950 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
2951 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
2957 if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
2958 t4_report_fw_error(adapter);
2960 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
2962 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
2965 t4_fatal_err(adapter);
2969 * ULP RX interrupt handler.
2971 static void ulprx_intr_handler(struct adapter *adapter)
2973 static const struct intr_info ulprx_intr_info[] = {
2974 { 0x1800000, "ULPRX context error", -1, 1 },
2975 { 0x7fffff, "ULPRX parity error", -1, 1 },
2979 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
2980 t4_fatal_err(adapter);
2984 * ULP TX interrupt handler.
2986 static void ulptx_intr_handler(struct adapter *adapter)
2988 static const struct intr_info ulptx_intr_info[] = {
2989 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
2991 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
2993 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
2995 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
2997 { 0xfffffff, "ULPTX parity error", -1, 1 },
3001 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
3002 t4_fatal_err(adapter);
3006 * PM TX interrupt handler.
3008 static void pmtx_intr_handler(struct adapter *adapter)
3010 static const struct intr_info pmtx_intr_info[] = {
3011 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
3012 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
3013 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
3014 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
3015 { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
3016 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
3017 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
3019 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
3020 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
3024 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
3025 t4_fatal_err(adapter);
3029 * PM RX interrupt handler.
3031 static void pmrx_intr_handler(struct adapter *adapter)
3033 static const struct intr_info pmrx_intr_info[] = {
3034 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
3035 { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
3036 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
3037 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
3039 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
3040 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
3044 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
3045 t4_fatal_err(adapter);
3049 * CPL switch interrupt handler.
3051 static void cplsw_intr_handler(struct adapter *adapter)
3053 static const struct intr_info cplsw_intr_info[] = {
3054 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
3055 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
3056 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
3057 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
3058 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
3059 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
3063 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
3064 t4_fatal_err(adapter);
3068 * LE interrupt handler.
3070 static void le_intr_handler(struct adapter *adap)
3072 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
3073 static const struct intr_info le_intr_info[] = {
3074 { LIPMISS_F, "LE LIP miss", -1, 0 },
3075 { LIP0_F, "LE 0 LIP error", -1, 0 },
3076 { PARITYERR_F, "LE parity error", -1, 1 },
3077 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
3078 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
3082 static struct intr_info t6_le_intr_info[] = {
3083 { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
3084 { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
3085 { TCAMINTPERR_F, "LE parity error", -1, 1 },
3086 { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
3087 { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
3091 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
3092 (chip <= CHELSIO_T5) ?
3093 le_intr_info : t6_le_intr_info))
3098 * MPS interrupt handler.
3100 static void mps_intr_handler(struct adapter *adapter)
3102 static const struct intr_info mps_rx_intr_info[] = {
3103 { 0xffffff, "MPS Rx parity error", -1, 1 },
3106 static const struct intr_info mps_tx_intr_info[] = {
3107 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
3108 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
3109 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
3111 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
3113 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
3114 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
3115 { FRMERR_F, "MPS Tx framing error", -1, 1 },
3118 static const struct intr_info mps_trc_intr_info[] = {
3119 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
3120 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
3122 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
3125 static const struct intr_info mps_stat_sram_intr_info[] = {
3126 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
3129 static const struct intr_info mps_stat_tx_intr_info[] = {
3130 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
3133 static const struct intr_info mps_stat_rx_intr_info[] = {
3134 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
3137 static const struct intr_info mps_cls_intr_info[] = {
3138 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
3139 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
3140 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
3146 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
3148 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
3150 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
3151 mps_trc_intr_info) +
3152 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
3153 mps_stat_sram_intr_info) +
3154 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
3155 mps_stat_tx_intr_info) +
3156 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
3157 mps_stat_rx_intr_info) +
3158 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
3161 t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
3162 t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
3164 t4_fatal_err(adapter);
3167 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
3171 * EDC/MC interrupt handler.
3173 static void mem_intr_handler(struct adapter *adapter, int idx)
3175 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
3177 unsigned int addr, cnt_addr, v;
3179 if (idx <= MEM_EDC1) {
3180 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
3181 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
3182 } else if (idx == MEM_MC) {
3183 if (is_t4(adapter->params.chip)) {
3184 addr = MC_INT_CAUSE_A;
3185 cnt_addr = MC_ECC_STATUS_A;
3187 addr = MC_P_INT_CAUSE_A;
3188 cnt_addr = MC_P_ECC_STATUS_A;
3191 addr = MC_REG(MC_P_INT_CAUSE_A, 1);
3192 cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
3195 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
3196 if (v & PERR_INT_CAUSE_F)
3197 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
3199 if (v & ECC_CE_INT_CAUSE_F) {
3200 u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
3202 t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
3203 if (printk_ratelimit())
3204 dev_warn(adapter->pdev_dev,
3205 "%u %s correctable ECC data error%s\n",
3206 cnt, name[idx], cnt > 1 ? "s" : "");
3208 if (v & ECC_UE_INT_CAUSE_F)
3209 dev_alert(adapter->pdev_dev,
3210 "%s uncorrectable ECC data error\n", name[idx]);
3212 t4_write_reg(adapter, addr, v);
3213 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
3214 t4_fatal_err(adapter);
3218 * MA interrupt handler.
3220 static void ma_intr_handler(struct adapter *adap)
3222 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
3224 if (status & MEM_PERR_INT_CAUSE_F) {
3225 dev_alert(adap->pdev_dev,
3226 "MA parity error, parity status %#x\n",
3227 t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
3228 if (is_t5(adap->params.chip))
3229 dev_alert(adap->pdev_dev,
3230 "MA parity error, parity status %#x\n",
3232 MA_PARITY_ERROR_STATUS2_A));
3234 if (status & MEM_WRAP_INT_CAUSE_F) {
3235 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
3236 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
3237 "client %u to address %#x\n",
3238 MEM_WRAP_CLIENT_NUM_G(v),
3239 MEM_WRAP_ADDRESS_G(v) << 4);
3241 t4_write_reg(adap, MA_INT_CAUSE_A, status);
3246 * SMB interrupt handler.
3248 static void smb_intr_handler(struct adapter *adap)
3250 static const struct intr_info smb_intr_info[] = {
3251 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
3252 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
3253 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
3257 if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
3262 * NC-SI interrupt handler.
3264 static void ncsi_intr_handler(struct adapter *adap)
3266 static const struct intr_info ncsi_intr_info[] = {
3267 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
3268 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
3269 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
3270 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
3274 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
3279 * XGMAC interrupt handler.
3281 static void xgmac_intr_handler(struct adapter *adap, int port)
3283 u32 v, int_cause_reg;
3285 if (is_t4(adap->params.chip))
3286 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
3288 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
3290 v = t4_read_reg(adap, int_cause_reg);
3292 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
3296 if (v & TXFIFO_PRTY_ERR_F)
3297 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
3299 if (v & RXFIFO_PRTY_ERR_F)
3300 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
3302 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
3307 * PL interrupt handler.
3309 static void pl_intr_handler(struct adapter *adap)
3311 static const struct intr_info pl_intr_info[] = {
3312 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
3313 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
3317 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
3321 #define PF_INTR_MASK (PFSW_F)
3322 #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
3323 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
3324 CPL_SWITCH_F | SGE_F | ULP_TX_F)
3327 * t4_slow_intr_handler - control path interrupt handler
3328 * @adapter: the adapter
3330 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
3331 * The designation 'slow' is because it involves register reads, while
3332 * data interrupts typically don't involve any MMIOs.
3334 int t4_slow_intr_handler(struct adapter *adapter)
3336 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
3338 if (!(cause & GLBL_INTR_MASK))
3341 cim_intr_handler(adapter);
3343 mps_intr_handler(adapter);
3345 ncsi_intr_handler(adapter);
3347 pl_intr_handler(adapter);
3349 smb_intr_handler(adapter);
3350 if (cause & XGMAC0_F)
3351 xgmac_intr_handler(adapter, 0);
3352 if (cause & XGMAC1_F)
3353 xgmac_intr_handler(adapter, 1);
3354 if (cause & XGMAC_KR0_F)
3355 xgmac_intr_handler(adapter, 2);
3356 if (cause & XGMAC_KR1_F)
3357 xgmac_intr_handler(adapter, 3);
3359 pcie_intr_handler(adapter);
3361 mem_intr_handler(adapter, MEM_MC);
3362 if (is_t5(adapter->params.chip) && (cause & MC1_F))
3363 mem_intr_handler(adapter, MEM_MC1);
3365 mem_intr_handler(adapter, MEM_EDC0);
3367 mem_intr_handler(adapter, MEM_EDC1);
3369 le_intr_handler(adapter);
3371 tp_intr_handler(adapter);
3373 ma_intr_handler(adapter);
3374 if (cause & PM_TX_F)
3375 pmtx_intr_handler(adapter);
3376 if (cause & PM_RX_F)
3377 pmrx_intr_handler(adapter);
3378 if (cause & ULP_RX_F)
3379 ulprx_intr_handler(adapter);
3380 if (cause & CPL_SWITCH_F)
3381 cplsw_intr_handler(adapter);
3383 sge_intr_handler(adapter);
3384 if (cause & ULP_TX_F)
3385 ulptx_intr_handler(adapter);
3387 /* Clear the interrupts just processed for which we are the master. */
3388 t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
3389 (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
3394 * t4_intr_enable - enable interrupts
3395 * @adapter: the adapter whose interrupts should be enabled
3397 * Enable PF-specific interrupts for the calling function and the top-level
3398 * interrupt concentrator for global interrupts. Interrupts are already
3399 * enabled at each module, here we just enable the roots of the interrupt
3402 * Note: this function should be called only when the driver manages
3403 * non PF-specific interrupts from the various HW modules. Only one PCI
3404 * function at a time should be doing this.
3406 void t4_intr_enable(struct adapter *adapter)
3409 u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
3411 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
3412 val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
3413 t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
3414 ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
3415 ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
3416 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
3417 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
3418 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
3419 DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
3420 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
3421 t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
3425 * t4_intr_disable - disable interrupts
3426 * @adapter: the adapter whose interrupts should be disabled
3428 * Disable interrupts. We only disable the top-level interrupt
3429 * concentrators. The caller must be a PCI function managing global
3432 void t4_intr_disable(struct adapter *adapter)
3434 u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
3436 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
3437 t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
3441 * hash_mac_addr - return the hash value of a MAC address
3442 * @addr: the 48-bit Ethernet MAC address
3444 * Hashes a MAC address according to the hash function used by HW inexact
3445 * (hash) address matching.
3447 static int hash_mac_addr(const u8 *addr)
3449 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
3450 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
3458 * t4_config_rss_range - configure a portion of the RSS mapping table
3459 * @adapter: the adapter
3460 * @mbox: mbox to use for the FW command
3461 * @viid: virtual interface whose RSS subtable is to be written
3462 * @start: start entry in the table to write
3463 * @n: how many table entries to write
3464 * @rspq: values for the response queue lookup table
3465 * @nrspq: number of values in @rspq
3467 * Programs the selected part of the VI's RSS mapping table with the
3468 * provided values. If @nrspq < @n the supplied values are used repeatedly
3469 * until the full table range is populated.
3471 * The caller must ensure the values in @rspq are in the range allowed for
3474 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
3475 int start, int n, const u16 *rspq, unsigned int nrspq)
3478 const u16 *rsp = rspq;
3479 const u16 *rsp_end = rspq + nrspq;
3480 struct fw_rss_ind_tbl_cmd cmd;
3482 memset(&cmd, 0, sizeof(cmd));
3483 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
3484 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3485 FW_RSS_IND_TBL_CMD_VIID_V(viid));
3486 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
3488 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
3490 int nq = min(n, 32);
3491 __be32 *qp = &cmd.iq0_to_iq2;
3493 cmd.niqid = cpu_to_be16(nq);
3494 cmd.startidx = cpu_to_be16(start);
3502 v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
3503 if (++rsp >= rsp_end)
3505 v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
3506 if (++rsp >= rsp_end)
3508 v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
3509 if (++rsp >= rsp_end)
3512 *qp++ = cpu_to_be32(v);
3516 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
3524 * t4_config_glbl_rss - configure the global RSS mode
3525 * @adapter: the adapter
3526 * @mbox: mbox to use for the FW command
3527 * @mode: global RSS mode
3528 * @flags: mode-specific flags
3530 * Sets the global RSS mode.
3532 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
3535 struct fw_rss_glb_config_cmd c;
3537 memset(&c, 0, sizeof(c));
3538 c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
3539 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3540 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3541 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
3542 c.u.manual.mode_pkd =
3543 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
3544 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
3545 c.u.basicvirtual.mode_pkd =
3546 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
3547 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
3550 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
3554 * t4_config_vi_rss - configure per VI RSS settings
3555 * @adapter: the adapter
3556 * @mbox: mbox to use for the FW command
3559 * @defq: id of the default RSS queue for the VI.
3561 * Configures VI-specific RSS properties.
3563 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
3564 unsigned int flags, unsigned int defq)
3566 struct fw_rss_vi_config_cmd c;
3568 memset(&c, 0, sizeof(c));
3569 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
3570 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3571 FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
3572 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3573 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
3574 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
3575 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
3578 /* Read an RSS table row */
3579 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
3581 t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
3582 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
3587 * t4_read_rss - read the contents of the RSS mapping table
3588 * @adapter: the adapter
3589 * @map: holds the contents of the RSS mapping table
3591 * Reads the contents of the RSS hash->queue mapping table.
3593 int t4_read_rss(struct adapter *adapter, u16 *map)
3598 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
3599 ret = rd_rss_row(adapter, i, &val);
3602 *map++ = LKPTBLQUEUE0_G(val);
3603 *map++ = LKPTBLQUEUE1_G(val);
3609 * t4_read_rss_key - read the global RSS key
3610 * @adap: the adapter
3611 * @key: 10-entry array holding the 320-bit RSS key
3613 * Reads the global 320-bit RSS key.
3615 void t4_read_rss_key(struct adapter *adap, u32 *key)
3617 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
3618 TP_RSS_SECRET_KEY0_A);
3622 * t4_write_rss_key - program one of the RSS keys
3623 * @adap: the adapter
3624 * @key: 10-entry array holding the 320-bit RSS key
3625 * @idx: which RSS key to write
3627 * Writes one of the RSS keys with the given 320-bit value. If @idx is
3628 * 0..15 the corresponding entry in the RSS key table is written,
3629 * otherwise the global RSS key is written.
3631 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
3633 u8 rss_key_addr_cnt = 16;
3634 u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
3636 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
3637 * allows access to key addresses 16-63 by using KeyWrAddrX
3638 * as index[5:4](upper 2) into key table
3640 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
3641 (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
3642 rss_key_addr_cnt = 32;
3644 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
3645 TP_RSS_SECRET_KEY0_A);
3647 if (idx >= 0 && idx < rss_key_addr_cnt) {
3648 if (rss_key_addr_cnt > 16)
3649 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
3650 KEYWRADDRX_V(idx >> 4) |
3651 T6_VFWRADDR_V(idx) | KEYWREN_F);
3653 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
3654 KEYWRADDR_V(idx) | KEYWREN_F);
3659 * t4_read_rss_pf_config - read PF RSS Configuration Table
3660 * @adapter: the adapter
3661 * @index: the entry in the PF RSS table to read
3662 * @valp: where to store the returned value
3664 * Reads the PF RSS Configuration Table at the specified index and returns
3665 * the value found there.
3667 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
3670 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3671 valp, 1, TP_RSS_PF0_CONFIG_A + index);
3675 * t4_read_rss_vf_config - read VF RSS Configuration Table
3676 * @adapter: the adapter
3677 * @index: the entry in the VF RSS table to read
3678 * @vfl: where to store the returned VFL
3679 * @vfh: where to store the returned VFH
3681 * Reads the VF RSS Configuration Table at the specified index and returns
3682 * the (VFL, VFH) values found there.
3684 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
3687 u32 vrt, mask, data;
3689 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
3690 mask = VFWRADDR_V(VFWRADDR_M);
3691 data = VFWRADDR_V(index);
3693 mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
3694 data = T6_VFWRADDR_V(index);
3697 /* Request that the index'th VF Table values be read into VFL/VFH.
3699 vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
3700 vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
3701 vrt |= data | VFRDEN_F;
3702 t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
3704 /* Grab the VFL/VFH values ...
3706 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3707 vfl, 1, TP_RSS_VFL_CONFIG_A);
3708 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3709 vfh, 1, TP_RSS_VFH_CONFIG_A);
3713 * t4_read_rss_pf_map - read PF RSS Map
3714 * @adapter: the adapter
3716 * Reads the PF RSS Map register and returns its value.
3718 u32 t4_read_rss_pf_map(struct adapter *adapter)
3722 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3723 &pfmap, 1, TP_RSS_PF_MAP_A);
3728 * t4_read_rss_pf_mask - read PF RSS Mask
3729 * @adapter: the adapter
3731 * Reads the PF RSS Mask register and returns its value.
3733 u32 t4_read_rss_pf_mask(struct adapter *adapter)
3737 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3738 &pfmask, 1, TP_RSS_PF_MSK_A);
3743 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
3744 * @adap: the adapter
3745 * @v4: holds the TCP/IP counter values
3746 * @v6: holds the TCP/IPv6 counter values
3748 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
3749 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
3751 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
3752 struct tp_tcp_stats *v6)
3754 u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
3756 #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
3757 #define STAT(x) val[STAT_IDX(x)]
3758 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
3761 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
3762 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
3763 v4->tcp_out_rsts = STAT(OUT_RST);
3764 v4->tcp_in_segs = STAT64(IN_SEG);
3765 v4->tcp_out_segs = STAT64(OUT_SEG);
3766 v4->tcp_retrans_segs = STAT64(RXT_SEG);
3769 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
3770 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
3771 v6->tcp_out_rsts = STAT(OUT_RST);
3772 v6->tcp_in_segs = STAT64(IN_SEG);
3773 v6->tcp_out_segs = STAT64(OUT_SEG);
3774 v6->tcp_retrans_segs = STAT64(RXT_SEG);
3782 * t4_tp_get_err_stats - read TP's error MIB counters
3783 * @adap: the adapter
3784 * @st: holds the counter values
3786 * Returns the values of TP's error counters.
3788 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
3790 /* T6 and later has 2 channels */
3791 if (adap->params.arch.nchan == NCHAN) {
3792 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3793 st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A);
3794 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3795 st->tnl_cong_drops, 8,
3796 TP_MIB_TNL_CNG_DROP_0_A);
3797 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3798 st->tnl_tx_drops, 4,
3799 TP_MIB_TNL_DROP_0_A);
3800 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3801 st->ofld_vlan_drops, 4,
3802 TP_MIB_OFD_VLN_DROP_0_A);
3803 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3804 st->tcp6_in_errs, 4,
3805 TP_MIB_TCP_V6IN_ERR_0_A);
3807 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3808 st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A);
3809 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3810 st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A);
3811 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3812 st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A);
3813 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3814 st->tnl_cong_drops, 2,
3815 TP_MIB_TNL_CNG_DROP_0_A);
3816 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3817 st->ofld_chan_drops, 2,
3818 TP_MIB_OFD_CHN_DROP_0_A);
3819 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3820 st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A);
3821 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3822 st->ofld_vlan_drops, 2,
3823 TP_MIB_OFD_VLN_DROP_0_A);
3824 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3825 st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A);
3827 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3828 &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
3832 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
3833 * @adap: the adapter
3834 * @st: holds the counter values
3836 * Returns the values of TP's CPL counters.
3838 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3840 /* T6 and later has 2 channels */
3841 if (adap->params.arch.nchan == NCHAN) {
3842 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
3843 8, TP_MIB_CPL_IN_REQ_0_A);
3845 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
3846 2, TP_MIB_CPL_IN_REQ_0_A);
3847 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
3848 2, TP_MIB_CPL_OUT_RSP_0_A);
3853 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3854 * @adap: the adapter
3855 * @st: holds the counter values
3857 * Returns the values of TP's RDMA counters.
3859 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3861 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt,
3862 2, TP_MIB_RQE_DFR_PKT_A);
3866 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
3867 * @adap: the adapter
3868 * @idx: the port index
3869 * @st: holds the counter values
3871 * Returns the values of TP's FCoE counters for the selected port.
3873 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
3874 struct tp_fcoe_stats *st)
3878 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp,
3879 1, TP_MIB_FCOE_DDP_0_A + idx);
3880 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop,
3881 1, TP_MIB_FCOE_DROP_0_A + idx);
3882 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
3883 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx);
3884 st->octets_ddp = ((u64)val[0] << 32) | val[1];
3888 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
3889 * @adap: the adapter
3890 * @st: holds the counter values
3892 * Returns the values of TP's counters for non-TCP directly-placed packets.
3894 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
3898 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4,
3900 st->frames = val[0];
3902 st->octets = ((u64)val[2] << 32) | val[3];
3906 * t4_read_mtu_tbl - returns the values in the HW path MTU table
3907 * @adap: the adapter
3908 * @mtus: where to store the MTU values
3909 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
3911 * Reads the HW path MTU table.
3913 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3918 for (i = 0; i < NMTUS; ++i) {
3919 t4_write_reg(adap, TP_MTU_TABLE_A,
3920 MTUINDEX_V(0xff) | MTUVALUE_V(i));
3921 v = t4_read_reg(adap, TP_MTU_TABLE_A);
3922 mtus[i] = MTUVALUE_G(v);
3924 mtu_log[i] = MTUWIDTH_G(v);
3929 * t4_read_cong_tbl - reads the congestion control table
3930 * @adap: the adapter
3931 * @incr: where to store the alpha values
3933 * Reads the additive increments programmed into the HW congestion
3936 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3938 unsigned int mtu, w;
3940 for (mtu = 0; mtu < NMTUS; ++mtu)
3941 for (w = 0; w < NCCTRL_WIN; ++w) {
3942 t4_write_reg(adap, TP_CCTRL_TABLE_A,
3943 ROWINDEX_V(0xffff) | (mtu << 5) | w);
3944 incr[mtu][w] = (u16)t4_read_reg(adap,
3945 TP_CCTRL_TABLE_A) & 0x1fff;
3950 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3951 * @adap: the adapter
3952 * @addr: the indirect TP register address
3953 * @mask: specifies the field within the register to modify
3954 * @val: new value for the field
3956 * Sets a field of an indirect TP register to the given value.
3958 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3959 unsigned int mask, unsigned int val)
3961 t4_write_reg(adap, TP_PIO_ADDR_A, addr);
3962 val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
3963 t4_write_reg(adap, TP_PIO_DATA_A, val);
3967 * init_cong_ctrl - initialize congestion control parameters
3968 * @a: the alpha values for congestion control
3969 * @b: the beta values for congestion control
3971 * Initialize the congestion control parameters.
3973 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
3975 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
4000 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
4003 b[13] = b[14] = b[15] = b[16] = 3;
4004 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
4005 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
4010 /* The minimum additive increment value for the congestion control table */
4011 #define CC_MIN_INCR 2U
4014 * t4_load_mtus - write the MTU and congestion control HW tables
4015 * @adap: the adapter
4016 * @mtus: the values for the MTU table
4017 * @alpha: the values for the congestion control alpha parameter
4018 * @beta: the values for the congestion control beta parameter
4020 * Write the HW MTU table with the supplied MTUs and the high-speed
4021 * congestion control table with the supplied alpha, beta, and MTUs.
4022 * We write the two tables together because the additive increments
4023 * depend on the MTUs.
4025 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
4026 const unsigned short *alpha, const unsigned short *beta)
4028 static const unsigned int avg_pkts[NCCTRL_WIN] = {
4029 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
4030 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
4031 28672, 40960, 57344, 81920, 114688, 163840, 229376
4036 for (i = 0; i < NMTUS; ++i) {
4037 unsigned int mtu = mtus[i];
4038 unsigned int log2 = fls(mtu);
4040 if (!(mtu & ((1 << log2) >> 2))) /* round */
4042 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
4043 MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
4045 for (w = 0; w < NCCTRL_WIN; ++w) {
4048 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
4051 t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
4052 (w << 16) | (beta[w] << 13) | inc);
4058 * t4_pmtx_get_stats - returns the HW stats from PMTX
4059 * @adap: the adapter
4060 * @cnt: where to store the count statistics
4061 * @cycles: where to store the cycle statistics
4063 * Returns performance statistics from PMTX.
4065 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
4070 for (i = 0; i < PM_NSTATS; i++) {
4071 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
4072 cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
4073 if (is_t4(adap->params.chip)) {
4074 cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
4076 t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
4077 PM_TX_DBG_DATA_A, data, 2,
4078 PM_TX_DBG_STAT_MSB_A);
4079 cycles[i] = (((u64)data[0] << 32) | data[1]);
4085 * t4_pmrx_get_stats - returns the HW stats from PMRX
4086 * @adap: the adapter
4087 * @cnt: where to store the count statistics
4088 * @cycles: where to store the cycle statistics
4090 * Returns performance statistics from PMRX.
4092 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
4097 for (i = 0; i < PM_NSTATS; i++) {
4098 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
4099 cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
4100 if (is_t4(adap->params.chip)) {
4101 cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
4103 t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
4104 PM_RX_DBG_DATA_A, data, 2,
4105 PM_RX_DBG_STAT_MSB_A);
4106 cycles[i] = (((u64)data[0] << 32) | data[1]);
4112 * t4_get_mps_bg_map - return the buffer groups associated with a port
4113 * @adap: the adapter
4114 * @idx: the port index
4116 * Returns a bitmap indicating which MPS buffer groups are associated
4117 * with the given port. Bit i is set if buffer group i is used by the
4120 unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
4122 u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
4125 return idx == 0 ? 0xf : 0;
4127 return idx < 2 ? (3 << (2 * idx)) : 0;
4132 * t4_get_port_type_description - return Port Type string description
4133 * @port_type: firmware Port Type enumeration
4135 const char *t4_get_port_type_description(enum fw_port_type port_type)
4137 static const char *const port_type_description[] = {
4156 if (port_type < ARRAY_SIZE(port_type_description))
4157 return port_type_description[port_type];
4162 * t4_get_port_stats_offset - collect port stats relative to a previous
4164 * @adap: The adapter
4166 * @stats: Current stats to fill
4167 * @offset: Previous stats snapshot
4169 void t4_get_port_stats_offset(struct adapter *adap, int idx,
4170 struct port_stats *stats,
4171 struct port_stats *offset)
4176 t4_get_port_stats(adap, idx, stats);
4177 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
4178 i < (sizeof(struct port_stats) / sizeof(u64));
4184 * t4_get_port_stats - collect port statistics
4185 * @adap: the adapter
4186 * @idx: the port index
4187 * @p: the stats structure to fill
4189 * Collect statistics related to the given port from HW.
4191 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
4193 u32 bgmap = t4_get_mps_bg_map(adap, idx);
4195 #define GET_STAT(name) \
4196 t4_read_reg64(adap, \
4197 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
4198 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
4199 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
4201 p->tx_octets = GET_STAT(TX_PORT_BYTES);
4202 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
4203 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
4204 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
4205 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
4206 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
4207 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
4208 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
4209 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
4210 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
4211 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
4212 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
4213 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
4214 p->tx_drop = GET_STAT(TX_PORT_DROP);
4215 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
4216 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
4217 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
4218 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
4219 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
4220 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
4221 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
4222 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
4223 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
4225 p->rx_octets = GET_STAT(RX_PORT_BYTES);
4226 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
4227 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
4228 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
4229 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
4230 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
4231 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
4232 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
4233 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
4234 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
4235 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
4236 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
4237 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
4238 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
4239 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
4240 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
4241 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
4242 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
4243 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
4244 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
4245 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
4246 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
4247 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
4248 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
4249 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
4250 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
4251 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
4253 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
4254 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
4255 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
4256 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
4257 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
4258 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
4259 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
4260 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
4267 * t4_get_lb_stats - collect loopback port statistics
4268 * @adap: the adapter
4269 * @idx: the loopback port index
4270 * @p: the stats structure to fill
4272 * Return HW statistics for the given loopback port.
4274 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
4276 u32 bgmap = t4_get_mps_bg_map(adap, idx);
4278 #define GET_STAT(name) \
4279 t4_read_reg64(adap, \
4280 (is_t4(adap->params.chip) ? \
4281 PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
4282 T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
4283 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
4285 p->octets = GET_STAT(BYTES);
4286 p->frames = GET_STAT(FRAMES);
4287 p->bcast_frames = GET_STAT(BCAST);
4288 p->mcast_frames = GET_STAT(MCAST);
4289 p->ucast_frames = GET_STAT(UCAST);
4290 p->error_frames = GET_STAT(ERROR);
4292 p->frames_64 = GET_STAT(64B);
4293 p->frames_65_127 = GET_STAT(65B_127B);
4294 p->frames_128_255 = GET_STAT(128B_255B);
4295 p->frames_256_511 = GET_STAT(256B_511B);
4296 p->frames_512_1023 = GET_STAT(512B_1023B);
4297 p->frames_1024_1518 = GET_STAT(1024B_1518B);
4298 p->frames_1519_max = GET_STAT(1519B_MAX);
4299 p->drop = GET_STAT(DROP_FRAMES);
4301 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
4302 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
4303 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
4304 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
4305 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
4306 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
4307 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
4308 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
4314 /* t4_mk_filtdelwr - create a delete filter WR
4315 * @ftid: the filter ID
4316 * @wr: the filter work request to populate
4317 * @qid: ingress queue to receive the delete notification
4319 * Creates a filter work request to delete the supplied filter. If @qid is
4320 * negative the delete notification is suppressed.
4322 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
4324 memset(wr, 0, sizeof(*wr));
4325 wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
4326 wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
4327 wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
4328 FW_FILTER_WR_NOREPLY_V(qid < 0));
4329 wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
4331 wr->rx_chan_rx_rpl_iq =
4332 cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
4335 #define INIT_CMD(var, cmd, rd_wr) do { \
4336 (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
4337 FW_CMD_REQUEST_F | \
4338 FW_CMD_##rd_wr##_F); \
4339 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
4342 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
4346 struct fw_ldst_cmd c;
4348 memset(&c, 0, sizeof(c));
4349 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
4350 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
4354 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4355 c.u.addrval.addr = cpu_to_be32(addr);
4356 c.u.addrval.val = cpu_to_be32(val);
4358 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4362 * t4_mdio_rd - read a PHY register through MDIO
4363 * @adap: the adapter
4364 * @mbox: mailbox to use for the FW command
4365 * @phy_addr: the PHY address
4366 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
4367 * @reg: the register to read
4368 * @valp: where to store the value
4370 * Issues a FW command through the given mailbox to read a PHY register.
4372 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4373 unsigned int mmd, unsigned int reg, u16 *valp)
4377 struct fw_ldst_cmd c;
4379 memset(&c, 0, sizeof(c));
4380 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
4381 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
4382 FW_CMD_REQUEST_F | FW_CMD_READ_F |
4384 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4385 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
4386 FW_LDST_CMD_MMD_V(mmd));
4387 c.u.mdio.raddr = cpu_to_be16(reg);
4389 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4391 *valp = be16_to_cpu(c.u.mdio.rval);
4396 * t4_mdio_wr - write a PHY register through MDIO
4397 * @adap: the adapter
4398 * @mbox: mailbox to use for the FW command
4399 * @phy_addr: the PHY address
4400 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
4401 * @reg: the register to write
4402 * @valp: value to write
4404 * Issues a FW command through the given mailbox to write a PHY register.
4406 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4407 unsigned int mmd, unsigned int reg, u16 val)
4410 struct fw_ldst_cmd c;
4412 memset(&c, 0, sizeof(c));
4413 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
4414 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
4415 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
4417 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4418 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
4419 FW_LDST_CMD_MMD_V(mmd));
4420 c.u.mdio.raddr = cpu_to_be16(reg);
4421 c.u.mdio.rval = cpu_to_be16(val);
4423 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4427 * t4_sge_decode_idma_state - decode the idma state
4428 * @adap: the adapter
4429 * @state: the state idma is stuck in
4431 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
4433 static const char * const t4_decode[] = {
4435 "IDMA_PUSH_MORE_CPL_FIFO",
4436 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
4438 "IDMA_PHYSADDR_SEND_PCIEHDR",
4439 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
4440 "IDMA_PHYSADDR_SEND_PAYLOAD",
4441 "IDMA_SEND_FIFO_TO_IMSG",
4442 "IDMA_FL_REQ_DATA_FL_PREP",
4443 "IDMA_FL_REQ_DATA_FL",
4445 "IDMA_FL_H_REQ_HEADER_FL",
4446 "IDMA_FL_H_SEND_PCIEHDR",
4447 "IDMA_FL_H_PUSH_CPL_FIFO",
4448 "IDMA_FL_H_SEND_CPL",
4449 "IDMA_FL_H_SEND_IP_HDR_FIRST",
4450 "IDMA_FL_H_SEND_IP_HDR",
4451 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
4452 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
4453 "IDMA_FL_H_SEND_IP_HDR_PADDING",
4454 "IDMA_FL_D_SEND_PCIEHDR",
4455 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
4456 "IDMA_FL_D_REQ_NEXT_DATA_FL",
4457 "IDMA_FL_SEND_PCIEHDR",
4458 "IDMA_FL_PUSH_CPL_FIFO",
4460 "IDMA_FL_SEND_PAYLOAD_FIRST",
4461 "IDMA_FL_SEND_PAYLOAD",
4462 "IDMA_FL_REQ_NEXT_DATA_FL",
4463 "IDMA_FL_SEND_NEXT_PCIEHDR",
4464 "IDMA_FL_SEND_PADDING",
4465 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
4466 "IDMA_FL_SEND_FIFO_TO_IMSG",
4467 "IDMA_FL_REQ_DATAFL_DONE",
4468 "IDMA_FL_REQ_HEADERFL_DONE",
4470 static const char * const t5_decode[] = {
4473 "IDMA_PUSH_MORE_CPL_FIFO",
4474 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
4475 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
4476 "IDMA_PHYSADDR_SEND_PCIEHDR",
4477 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
4478 "IDMA_PHYSADDR_SEND_PAYLOAD",
4479 "IDMA_SEND_FIFO_TO_IMSG",
4480 "IDMA_FL_REQ_DATA_FL",
4482 "IDMA_FL_DROP_SEND_INC",
4483 "IDMA_FL_H_REQ_HEADER_FL",
4484 "IDMA_FL_H_SEND_PCIEHDR",
4485 "IDMA_FL_H_PUSH_CPL_FIFO",
4486 "IDMA_FL_H_SEND_CPL",
4487 "IDMA_FL_H_SEND_IP_HDR_FIRST",
4488 "IDMA_FL_H_SEND_IP_HDR",
4489 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
4490 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
4491 "IDMA_FL_H_SEND_IP_HDR_PADDING",
4492 "IDMA_FL_D_SEND_PCIEHDR",
4493 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
4494 "IDMA_FL_D_REQ_NEXT_DATA_FL",
4495 "IDMA_FL_SEND_PCIEHDR",
4496 "IDMA_FL_PUSH_CPL_FIFO",
4498 "IDMA_FL_SEND_PAYLOAD_FIRST",
4499 "IDMA_FL_SEND_PAYLOAD",
4500 "IDMA_FL_REQ_NEXT_DATA_FL",
4501 "IDMA_FL_SEND_NEXT_PCIEHDR",
4502 "IDMA_FL_SEND_PADDING",
4503 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
4505 static const u32 sge_regs[] = {
4506 SGE_DEBUG_DATA_LOW_INDEX_2_A,
4507 SGE_DEBUG_DATA_LOW_INDEX_3_A,
4508 SGE_DEBUG_DATA_HIGH_INDEX_10_A,
4510 const char **sge_idma_decode;
4511 int sge_idma_decode_nstates;
4514 if (is_t4(adapter->params.chip)) {
4515 sge_idma_decode = (const char **)t4_decode;
4516 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
4518 sge_idma_decode = (const char **)t5_decode;
4519 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
4522 if (state < sge_idma_decode_nstates)
4523 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
4525 CH_WARN(adapter, "idma state %d unknown\n", state);
4527 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
4528 CH_WARN(adapter, "SGE register %#x value %#x\n",
4529 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
4533 * t4_sge_ctxt_flush - flush the SGE context cache
4534 * @adap: the adapter
4535 * @mbox: mailbox to use for the FW command
4537 * Issues a FW command through the given mailbox to flush the
4538 * SGE context cache.
4540 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
4544 struct fw_ldst_cmd c;
4546 memset(&c, 0, sizeof(c));
4547 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
4548 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
4549 FW_CMD_REQUEST_F | FW_CMD_READ_F |
4551 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4552 c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
4554 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4559 * t4_fw_hello - establish communication with FW
4560 * @adap: the adapter
4561 * @mbox: mailbox to use for the FW command
4562 * @evt_mbox: mailbox to receive async FW events
4563 * @master: specifies the caller's willingness to be the device master
4564 * @state: returns the current device state (if non-NULL)
4566 * Issues a command to establish communication with FW. Returns either
4567 * an error (negative integer) or the mailbox of the Master PF.
4569 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4570 enum dev_master master, enum dev_state *state)
4573 struct fw_hello_cmd c;
4575 unsigned int master_mbox;
4576 int retries = FW_CMD_HELLO_RETRIES;
4579 memset(&c, 0, sizeof(c));
4580 INIT_CMD(c, HELLO, WRITE);
4581 c.err_to_clearinit = cpu_to_be32(
4582 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
4583 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
4584 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
4585 mbox : FW_HELLO_CMD_MBMASTER_M) |
4586 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
4587 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
4588 FW_HELLO_CMD_CLEARINIT_F);
4591 * Issue the HELLO command to the firmware. If it's not successful
4592 * but indicates that we got a "busy" or "timeout" condition, retry
4593 * the HELLO until we exhaust our retry limit. If we do exceed our
4594 * retry limit, check to see if the firmware left us any error
4595 * information and report that if so.
4597 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4599 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4601 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
4602 t4_report_fw_error(adap);
4606 v = be32_to_cpu(c.err_to_clearinit);
4607 master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
4609 if (v & FW_HELLO_CMD_ERR_F)
4610 *state = DEV_STATE_ERR;
4611 else if (v & FW_HELLO_CMD_INIT_F)
4612 *state = DEV_STATE_INIT;
4614 *state = DEV_STATE_UNINIT;
4618 * If we're not the Master PF then we need to wait around for the
4619 * Master PF Driver to finish setting up the adapter.
4621 * Note that we also do this wait if we're a non-Master-capable PF and
4622 * there is no current Master PF; a Master PF may show up momentarily
4623 * and we wouldn't want to fail pointlessly. (This can happen when an
4624 * OS loads lots of different drivers rapidly at the same time). In
4625 * this case, the Master PF returned by the firmware will be
4626 * PCIE_FW_MASTER_M so the test below will work ...
4628 if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
4629 master_mbox != mbox) {
4630 int waiting = FW_CMD_HELLO_TIMEOUT;
4633 * Wait for the firmware to either indicate an error or
4634 * initialized state. If we see either of these we bail out
4635 * and report the issue to the caller. If we exhaust the
4636 * "hello timeout" and we haven't exhausted our retries, try
4637 * again. Otherwise bail with a timeout error.
4646 * If neither Error nor Initialialized are indicated
4647 * by the firmware keep waiting till we exaust our
4648 * timeout ... and then retry if we haven't exhausted
4651 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
4652 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
4663 * We either have an Error or Initialized condition
4664 * report errors preferentially.
4667 if (pcie_fw & PCIE_FW_ERR_F)
4668 *state = DEV_STATE_ERR;
4669 else if (pcie_fw & PCIE_FW_INIT_F)
4670 *state = DEV_STATE_INIT;
4674 * If we arrived before a Master PF was selected and
4675 * there's not a valid Master PF, grab its identity
4678 if (master_mbox == PCIE_FW_MASTER_M &&
4679 (pcie_fw & PCIE_FW_MASTER_VLD_F))
4680 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
4689 * t4_fw_bye - end communication with FW
4690 * @adap: the adapter
4691 * @mbox: mailbox to use for the FW command
4693 * Issues a command to terminate communication with FW.
4695 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4697 struct fw_bye_cmd c;
4699 memset(&c, 0, sizeof(c));
4700 INIT_CMD(c, BYE, WRITE);
4701 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4705 * t4_init_cmd - ask FW to initialize the device
4706 * @adap: the adapter
4707 * @mbox: mailbox to use for the FW command
4709 * Issues a command to FW to partially initialize the device. This
4710 * performs initialization that generally doesn't depend on user input.
4712 int t4_early_init(struct adapter *adap, unsigned int mbox)
4714 struct fw_initialize_cmd c;
4716 memset(&c, 0, sizeof(c));
4717 INIT_CMD(c, INITIALIZE, WRITE);
4718 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4722 * t4_fw_reset - issue a reset to FW
4723 * @adap: the adapter
4724 * @mbox: mailbox to use for the FW command
4725 * @reset: specifies the type of reset to perform
4727 * Issues a reset command of the specified type to FW.
4729 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4731 struct fw_reset_cmd c;
4733 memset(&c, 0, sizeof(c));
4734 INIT_CMD(c, RESET, WRITE);
4735 c.val = cpu_to_be32(reset);
4736 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4740 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4741 * @adap: the adapter
4742 * @mbox: mailbox to use for the FW RESET command (if desired)
4743 * @force: force uP into RESET even if FW RESET command fails
4745 * Issues a RESET command to firmware (if desired) with a HALT indication
4746 * and then puts the microprocessor into RESET state. The RESET command
4747 * will only be issued if a legitimate mailbox is provided (mbox <=
4748 * PCIE_FW_MASTER_M).
4750 * This is generally used in order for the host to safely manipulate the
4751 * adapter without fear of conflicting with whatever the firmware might
4752 * be doing. The only way out of this state is to RESTART the firmware
4755 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4760 * If a legitimate mailbox is provided, issue a RESET command
4761 * with a HALT indication.
4763 if (mbox <= PCIE_FW_MASTER_M) {
4764 struct fw_reset_cmd c;
4766 memset(&c, 0, sizeof(c));
4767 INIT_CMD(c, RESET, WRITE);
4768 c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
4769 c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
4770 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4774 * Normally we won't complete the operation if the firmware RESET
4775 * command fails but if our caller insists we'll go ahead and put the
4776 * uP into RESET. This can be useful if the firmware is hung or even
4777 * missing ... We'll have to take the risk of putting the uP into
4778 * RESET without the cooperation of firmware in that case.
4780 * We also force the firmware's HALT flag to be on in case we bypassed
4781 * the firmware RESET command above or we're dealing with old firmware
4782 * which doesn't have the HALT capability. This will serve as a flag
4783 * for the incoming firmware to know that it's coming out of a HALT
4784 * rather than a RESET ... if it's new enough to understand that ...
4786 if (ret == 0 || force) {
4787 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
4788 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
4793 * And we always return the result of the firmware RESET command
4794 * even when we force the uP into RESET ...
4800 * t4_fw_restart - restart the firmware by taking the uP out of RESET
4801 * @adap: the adapter
4802 * @reset: if we want to do a RESET to restart things
4804 * Restart firmware previously halted by t4_fw_halt(). On successful
4805 * return the previous PF Master remains as the new PF Master and there
4806 * is no need to issue a new HELLO command, etc.
4808 * We do this in two ways:
4810 * 1. If we're dealing with newer firmware we'll simply want to take
4811 * the chip's microprocessor out of RESET. This will cause the
4812 * firmware to start up from its start vector. And then we'll loop
4813 * until the firmware indicates it's started again (PCIE_FW.HALT
4814 * reset to 0) or we timeout.
4816 * 2. If we're dealing with older firmware then we'll need to RESET
4817 * the chip since older firmware won't recognize the PCIE_FW.HALT
4818 * flag and automatically RESET itself on startup.
4820 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4824 * Since we're directing the RESET instead of the firmware
4825 * doing it automatically, we need to clear the PCIE_FW.HALT
4828 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
4831 * If we've been given a valid mailbox, first try to get the
4832 * firmware to do the RESET. If that works, great and we can
4833 * return success. Otherwise, if we haven't been given a
4834 * valid mailbox or the RESET command failed, fall back to
4835 * hitting the chip with a hammer.
4837 if (mbox <= PCIE_FW_MASTER_M) {
4838 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
4840 if (t4_fw_reset(adap, mbox,
4841 PIORST_F | PIORSTMODE_F) == 0)
4845 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
4850 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
4851 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4852 if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
4863 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4864 * @adap: the adapter
4865 * @mbox: mailbox to use for the FW RESET command (if desired)
4866 * @fw_data: the firmware image to write
4868 * @force: force upgrade even if firmware doesn't cooperate
4870 * Perform all of the steps necessary for upgrading an adapter's
4871 * firmware image. Normally this requires the cooperation of the
4872 * existing firmware in order to halt all existing activities
4873 * but if an invalid mailbox token is passed in we skip that step
4874 * (though we'll still put the adapter microprocessor into RESET in
4877 * On successful return the new firmware will have been loaded and
4878 * the adapter will have been fully RESET losing all previous setup
4879 * state. On unsuccessful return the adapter may be completely hosed ...
4880 * positive errno indicates that the adapter is ~probably~ intact, a
4881 * negative errno indicates that things are looking bad ...
4883 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4884 const u8 *fw_data, unsigned int size, int force)
4886 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4889 if (!t4_fw_matches_chip(adap, fw_hdr))
4892 ret = t4_fw_halt(adap, mbox, force);
4893 if (ret < 0 && !force)
4896 ret = t4_load_fw(adap, fw_data, size);
4901 * Older versions of the firmware don't understand the new
4902 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4903 * restart. So for newly loaded older firmware we'll have to do the
4904 * RESET for it so it starts up on a clean slate. We can tell if
4905 * the newly loaded firmware will handle this right by checking
4906 * its header flags to see if it advertises the capability.
4908 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4909 return t4_fw_restart(adap, mbox, reset);
4913 * t4_fixup_host_params - fix up host-dependent parameters
4914 * @adap: the adapter
4915 * @page_size: the host's Base Page Size
4916 * @cache_line_size: the host's Cache Line Size
4918 * Various registers in T4 contain values which are dependent on the
4919 * host's Base Page and Cache Line Sizes. This function will fix all of
4920 * those registers with the appropriate values as passed in ...
4922 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
4923 unsigned int cache_line_size)
4925 unsigned int page_shift = fls(page_size) - 1;
4926 unsigned int sge_hps = page_shift - 10;
4927 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
4928 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
4929 unsigned int fl_align_log = fls(fl_align) - 1;
4931 t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
4932 HOSTPAGESIZEPF0_V(sge_hps) |
4933 HOSTPAGESIZEPF1_V(sge_hps) |
4934 HOSTPAGESIZEPF2_V(sge_hps) |
4935 HOSTPAGESIZEPF3_V(sge_hps) |
4936 HOSTPAGESIZEPF4_V(sge_hps) |
4937 HOSTPAGESIZEPF5_V(sge_hps) |
4938 HOSTPAGESIZEPF6_V(sge_hps) |
4939 HOSTPAGESIZEPF7_V(sge_hps));
4941 if (is_t4(adap->params.chip)) {
4942 t4_set_reg_field(adap, SGE_CONTROL_A,
4943 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
4944 EGRSTATUSPAGESIZE_F,
4945 INGPADBOUNDARY_V(fl_align_log -
4946 INGPADBOUNDARY_SHIFT_X) |
4947 EGRSTATUSPAGESIZE_V(stat_len != 64));
4949 /* T5 introduced the separation of the Free List Padding and
4950 * Packing Boundaries. Thus, we can select a smaller Padding
4951 * Boundary to avoid uselessly chewing up PCIe Link and Memory
4952 * Bandwidth, and use a Packing Boundary which is large enough
4953 * to avoid false sharing between CPUs, etc.
4955 * For the PCI Link, the smaller the Padding Boundary the
4956 * better. For the Memory Controller, a smaller Padding
4957 * Boundary is better until we cross under the Memory Line
4958 * Size (the minimum unit of transfer to/from Memory). If we
4959 * have a Padding Boundary which is smaller than the Memory
4960 * Line Size, that'll involve a Read-Modify-Write cycle on the
4961 * Memory Controller which is never good. For T5 the smallest
4962 * Padding Boundary which we can select is 32 bytes which is
4963 * larger than any known Memory Controller Line Size so we'll
4966 * T5 has a different interpretation of the "0" value for the
4967 * Packing Boundary. This corresponds to 16 bytes instead of
4968 * the expected 32 bytes. We never have a Packing Boundary
4969 * less than 32 bytes so we can't use that special value but
4970 * on the other hand, if we wanted 32 bytes, the best we can
4971 * really do is 64 bytes.
4973 if (fl_align <= 32) {
4977 t4_set_reg_field(adap, SGE_CONTROL_A,
4978 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
4979 EGRSTATUSPAGESIZE_F,
4980 INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
4981 EGRSTATUSPAGESIZE_V(stat_len != 64));
4982 t4_set_reg_field(adap, SGE_CONTROL2_A,
4983 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
4984 INGPACKBOUNDARY_V(fl_align_log -
4985 INGPACKBOUNDARY_SHIFT_X));
4988 * Adjust various SGE Free List Host Buffer Sizes.
4990 * This is something of a crock since we're using fixed indices into
4991 * the array which are also known by the sge.c code and the T4
4992 * Firmware Configuration File. We need to come up with a much better
4993 * approach to managing this array. For now, the first four entries
4998 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
4999 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
5001 * For the single-MTU buffers in unpacked mode we need to include
5002 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
5003 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
5004 * Padding boundary. All of these are accommodated in the Factory
5005 * Default Firmware Configuration File but we need to adjust it for
5006 * this host's cache line size.
5008 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
5009 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
5010 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
5012 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
5013 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
5016 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
5022 * t4_fw_initialize - ask FW to initialize the device
5023 * @adap: the adapter
5024 * @mbox: mailbox to use for the FW command
5026 * Issues a command to FW to partially initialize the device. This
5027 * performs initialization that generally doesn't depend on user input.
5029 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
5031 struct fw_initialize_cmd c;
5033 memset(&c, 0, sizeof(c));
5034 INIT_CMD(c, INITIALIZE, WRITE);
5035 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5039 * t4_query_params_rw - query FW or device parameters
5040 * @adap: the adapter
5041 * @mbox: mailbox to use for the FW command
5044 * @nparams: the number of parameters
5045 * @params: the parameter names
5046 * @val: the parameter values
5047 * @rw: Write and read flag
5049 * Reads the value of FW or device parameters. Up to 7 parameters can be
5052 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
5053 unsigned int vf, unsigned int nparams, const u32 *params,
5057 struct fw_params_cmd c;
5058 __be32 *p = &c.param[0].mnem;
5063 memset(&c, 0, sizeof(c));
5064 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
5065 FW_CMD_REQUEST_F | FW_CMD_READ_F |
5066 FW_PARAMS_CMD_PFN_V(pf) |
5067 FW_PARAMS_CMD_VFN_V(vf));
5068 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5070 for (i = 0; i < nparams; i++) {
5071 *p++ = cpu_to_be32(*params++);
5073 *p = cpu_to_be32(*(val + i));
5077 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5079 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
5080 *val++ = be32_to_cpu(*p);
5084 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
5085 unsigned int vf, unsigned int nparams, const u32 *params,
5088 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
5092 * t4_set_params_timeout - sets FW or device parameters
5093 * @adap: the adapter
5094 * @mbox: mailbox to use for the FW command
5097 * @nparams: the number of parameters
5098 * @params: the parameter names
5099 * @val: the parameter values
5100 * @timeout: the timeout time
5102 * Sets the value of FW or device parameters. Up to 7 parameters can be
5103 * specified at once.
5105 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
5106 unsigned int pf, unsigned int vf,
5107 unsigned int nparams, const u32 *params,
5108 const u32 *val, int timeout)
5110 struct fw_params_cmd c;
5111 __be32 *p = &c.param[0].mnem;
5116 memset(&c, 0, sizeof(c));
5117 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
5118 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5119 FW_PARAMS_CMD_PFN_V(pf) |
5120 FW_PARAMS_CMD_VFN_V(vf));
5121 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5124 *p++ = cpu_to_be32(*params++);
5125 *p++ = cpu_to_be32(*val++);
5128 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
5132 * t4_set_params - sets FW or device parameters
5133 * @adap: the adapter
5134 * @mbox: mailbox to use for the FW command
5137 * @nparams: the number of parameters
5138 * @params: the parameter names
5139 * @val: the parameter values
5141 * Sets the value of FW or device parameters. Up to 7 parameters can be
5142 * specified at once.
5144 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
5145 unsigned int vf, unsigned int nparams, const u32 *params,
5148 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
5149 FW_CMD_MAX_TIMEOUT);
5153 * t4_cfg_pfvf - configure PF/VF resource limits
5154 * @adap: the adapter
5155 * @mbox: mailbox to use for the FW command
5156 * @pf: the PF being configured
5157 * @vf: the VF being configured
5158 * @txq: the max number of egress queues
5159 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
5160 * @rxqi: the max number of interrupt-capable ingress queues
5161 * @rxq: the max number of interruptless ingress queues
5162 * @tc: the PCI traffic class
5163 * @vi: the max number of virtual interfaces
5164 * @cmask: the channel access rights mask for the PF/VF
5165 * @pmask: the port access rights mask for the PF/VF
5166 * @nexact: the maximum number of exact MPS filters
5167 * @rcaps: read capabilities
5168 * @wxcaps: write/execute capabilities
5170 * Configures resource limits and capabilities for a physical or virtual
5173 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
5174 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
5175 unsigned int rxqi, unsigned int rxq, unsigned int tc,
5176 unsigned int vi, unsigned int cmask, unsigned int pmask,
5177 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
5179 struct fw_pfvf_cmd c;
5181 memset(&c, 0, sizeof(c));
5182 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
5183 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
5184 FW_PFVF_CMD_VFN_V(vf));
5185 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5186 c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
5187 FW_PFVF_CMD_NIQ_V(rxq));
5188 c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
5189 FW_PFVF_CMD_PMASK_V(pmask) |
5190 FW_PFVF_CMD_NEQ_V(txq));
5191 c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
5192 FW_PFVF_CMD_NVI_V(vi) |
5193 FW_PFVF_CMD_NEXACTF_V(nexact));
5194 c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
5195 FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
5196 FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
5197 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5201 * t4_alloc_vi - allocate a virtual interface
5202 * @adap: the adapter
5203 * @mbox: mailbox to use for the FW command
5204 * @port: physical port associated with the VI
5205 * @pf: the PF owning the VI
5206 * @vf: the VF owning the VI
5207 * @nmac: number of MAC addresses needed (1 to 5)
5208 * @mac: the MAC addresses of the VI
5209 * @rss_size: size of RSS table slice associated with this VI
5211 * Allocates a virtual interface for the given physical port. If @mac is
5212 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
5213 * @mac should be large enough to hold @nmac Ethernet addresses, they are
5214 * stored consecutively so the space needed is @nmac * 6 bytes.
5215 * Returns a negative error number or the non-negative VI id.
5217 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
5218 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
5219 unsigned int *rss_size)
5224 memset(&c, 0, sizeof(c));
5225 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
5226 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
5227 FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
5228 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
5229 c.portid_pkd = FW_VI_CMD_PORTID_V(port);
5232 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5237 memcpy(mac, c.mac, sizeof(c.mac));
5240 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
5242 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
5244 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
5246 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
5250 *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
5251 return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
5255 * t4_free_vi - free a virtual interface
5256 * @adap: the adapter
5257 * @mbox: mailbox to use for the FW command
5258 * @pf: the PF owning the VI
5259 * @vf: the VF owning the VI
5260 * @viid: virtual interface identifiler
5262 * Free a previously allocated virtual interface.
5264 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
5265 unsigned int vf, unsigned int viid)
5269 memset(&c, 0, sizeof(c));
5270 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
5273 FW_VI_CMD_PFN_V(pf) |
5274 FW_VI_CMD_VFN_V(vf));
5275 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
5276 c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
5278 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5282 * t4_set_rxmode - set Rx properties of a virtual interface
5283 * @adap: the adapter
5284 * @mbox: mailbox to use for the FW command
5286 * @mtu: the new MTU or -1
5287 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
5288 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
5289 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
5290 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
5291 * @sleep_ok: if true we may sleep while awaiting command completion
5293 * Sets Rx properties of a virtual interface.
5295 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
5296 int mtu, int promisc, int all_multi, int bcast, int vlanex,
5299 struct fw_vi_rxmode_cmd c;
5301 /* convert to FW values */
5303 mtu = FW_RXMODE_MTU_NO_CHG;
5305 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
5307 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
5309 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
5311 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
5313 memset(&c, 0, sizeof(c));
5314 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
5315 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5316 FW_VI_RXMODE_CMD_VIID_V(viid));
5317 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5319 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
5320 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
5321 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
5322 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
5323 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
5324 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5328 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
5329 * @adap: the adapter
5330 * @mbox: mailbox to use for the FW command
5332 * @free: if true any existing filters for this VI id are first removed
5333 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
5334 * @addr: the MAC address(es)
5335 * @idx: where to store the index of each allocated filter
5336 * @hash: pointer to hash address filter bitmap
5337 * @sleep_ok: call is allowed to sleep
5339 * Allocates an exact-match filter for each of the supplied addresses and
5340 * sets it to the corresponding address. If @idx is not %NULL it should
5341 * have at least @naddr entries, each of which will be set to the index of
5342 * the filter allocated for the corresponding MAC address. If a filter
5343 * could not be allocated for an address its index is set to 0xffff.
5344 * If @hash is not %NULL addresses that fail to allocate an exact filter
5345 * are hashed and update the hash filter bitmap pointed at by @hash.
5347 * Returns a negative error number or the number of filters allocated.
5349 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
5350 unsigned int viid, bool free, unsigned int naddr,
5351 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
5353 int offset, ret = 0;
5354 struct fw_vi_mac_cmd c;
5355 unsigned int nfilters = 0;
5356 unsigned int max_naddr = adap->params.arch.mps_tcam_size;
5357 unsigned int rem = naddr;
5359 if (naddr > max_naddr)
5362 for (offset = 0; offset < naddr ; /**/) {
5363 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
5364 rem : ARRAY_SIZE(c.u.exact));
5365 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
5366 u.exact[fw_naddr]), 16);
5367 struct fw_vi_mac_exact *p;
5370 memset(&c, 0, sizeof(c));
5371 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
5374 FW_CMD_EXEC_V(free) |
5375 FW_VI_MAC_CMD_VIID_V(viid));
5376 c.freemacs_to_len16 =
5377 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
5378 FW_CMD_LEN16_V(len16));
5380 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5382 cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
5383 FW_VI_MAC_CMD_IDX_V(
5384 FW_VI_MAC_ADD_MAC));
5385 memcpy(p->macaddr, addr[offset + i],
5386 sizeof(p->macaddr));
5389 /* It's okay if we run out of space in our MAC address arena.
5390 * Some of the addresses we submit may get stored so we need
5391 * to run through the reply to see what the results were ...
5393 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
5394 if (ret && ret != -FW_ENOMEM)
5397 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5398 u16 index = FW_VI_MAC_CMD_IDX_G(
5399 be16_to_cpu(p->valid_to_idx));
5402 idx[offset + i] = (index >= max_naddr ?
5404 if (index < max_naddr)
5408 hash_mac_addr(addr[offset + i]));
5416 if (ret == 0 || ret == -FW_ENOMEM)
5422 * t4_change_mac - modifies the exact-match filter for a MAC address
5423 * @adap: the adapter
5424 * @mbox: mailbox to use for the FW command
5426 * @idx: index of existing filter for old value of MAC address, or -1
5427 * @addr: the new MAC address value
5428 * @persist: whether a new MAC allocation should be persistent
5429 * @add_smt: if true also add the address to the HW SMT
5431 * Modifies an exact-match filter and sets it to the new MAC address.
5432 * Note that in general it is not possible to modify the value of a given
5433 * filter so the generic way to modify an address filter is to free the one
5434 * being used by the old address value and allocate a new filter for the
5435 * new address value. @idx can be -1 if the address is a new addition.
5437 * Returns a negative error number or the index of the filter with the new
5440 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
5441 int idx, const u8 *addr, bool persist, bool add_smt)
5444 struct fw_vi_mac_cmd c;
5445 struct fw_vi_mac_exact *p = c.u.exact;
5446 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
5448 if (idx < 0) /* new allocation */
5449 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
5450 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
5452 memset(&c, 0, sizeof(c));
5453 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
5454 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5455 FW_VI_MAC_CMD_VIID_V(viid));
5456 c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
5457 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
5458 FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
5459 FW_VI_MAC_CMD_IDX_V(idx));
5460 memcpy(p->macaddr, addr, sizeof(p->macaddr));
5462 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5464 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
5465 if (ret >= max_mac_addr)
5472 * t4_set_addr_hash - program the MAC inexact-match hash filter
5473 * @adap: the adapter
5474 * @mbox: mailbox to use for the FW command
5476 * @ucast: whether the hash filter should also match unicast addresses
5477 * @vec: the value to be written to the hash filter
5478 * @sleep_ok: call is allowed to sleep
5480 * Sets the 64-bit inexact-match hash filter for a virtual interface.
5482 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
5483 bool ucast, u64 vec, bool sleep_ok)
5485 struct fw_vi_mac_cmd c;
5487 memset(&c, 0, sizeof(c));
5488 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
5489 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5490 FW_VI_ENABLE_CMD_VIID_V(viid));
5491 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
5492 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
5494 c.u.hash.hashvec = cpu_to_be64(vec);
5495 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5499 * t4_enable_vi_params - enable/disable a virtual interface
5500 * @adap: the adapter
5501 * @mbox: mailbox to use for the FW command
5503 * @rx_en: 1=enable Rx, 0=disable Rx
5504 * @tx_en: 1=enable Tx, 0=disable Tx
5505 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
5507 * Enables/disables a virtual interface. Note that setting DCB Enable
5508 * only makes sense when enabling a Virtual Interface ...
5510 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
5511 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
5513 struct fw_vi_enable_cmd c;
5515 memset(&c, 0, sizeof(c));
5516 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
5517 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5518 FW_VI_ENABLE_CMD_VIID_V(viid));
5519 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
5520 FW_VI_ENABLE_CMD_EEN_V(tx_en) |
5521 FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
5523 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
5527 * t4_enable_vi - enable/disable a virtual interface
5528 * @adap: the adapter
5529 * @mbox: mailbox to use for the FW command
5531 * @rx_en: 1=enable Rx, 0=disable Rx
5532 * @tx_en: 1=enable Tx, 0=disable Tx
5534 * Enables/disables a virtual interface.
5536 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
5537 bool rx_en, bool tx_en)
5539 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
5543 * t4_identify_port - identify a VI's port by blinking its LED
5544 * @adap: the adapter
5545 * @mbox: mailbox to use for the FW command
5547 * @nblinks: how many times to blink LED at 2.5 Hz
5549 * Identifies a VI's port by blinking its LED.
5551 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
5552 unsigned int nblinks)
5554 struct fw_vi_enable_cmd c;
5556 memset(&c, 0, sizeof(c));
5557 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
5558 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5559 FW_VI_ENABLE_CMD_VIID_V(viid));
5560 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
5561 c.blinkdur = cpu_to_be16(nblinks);
5562 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5566 * t4_iq_free - free an ingress queue and its FLs
5567 * @adap: the adapter
5568 * @mbox: mailbox to use for the FW command
5569 * @pf: the PF owning the queues
5570 * @vf: the VF owning the queues
5571 * @iqtype: the ingress queue type
5572 * @iqid: ingress queue id
5573 * @fl0id: FL0 queue id or 0xffff if no attached FL0
5574 * @fl1id: FL1 queue id or 0xffff if no attached FL1
5576 * Frees an ingress queue and its associated FLs, if any.
5578 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5579 unsigned int vf, unsigned int iqtype, unsigned int iqid,
5580 unsigned int fl0id, unsigned int fl1id)
5584 memset(&c, 0, sizeof(c));
5585 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
5586 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
5587 FW_IQ_CMD_VFN_V(vf));
5588 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
5589 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
5590 c.iqid = cpu_to_be16(iqid);
5591 c.fl0id = cpu_to_be16(fl0id);
5592 c.fl1id = cpu_to_be16(fl1id);
5593 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5597 * t4_eth_eq_free - free an Ethernet egress queue
5598 * @adap: the adapter
5599 * @mbox: mailbox to use for the FW command
5600 * @pf: the PF owning the queue
5601 * @vf: the VF owning the queue
5602 * @eqid: egress queue id
5604 * Frees an Ethernet egress queue.
5606 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5607 unsigned int vf, unsigned int eqid)
5609 struct fw_eq_eth_cmd c;
5611 memset(&c, 0, sizeof(c));
5612 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
5613 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5614 FW_EQ_ETH_CMD_PFN_V(pf) |
5615 FW_EQ_ETH_CMD_VFN_V(vf));
5616 c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
5617 c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
5618 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5622 * t4_ctrl_eq_free - free a control egress queue
5623 * @adap: the adapter
5624 * @mbox: mailbox to use for the FW command
5625 * @pf: the PF owning the queue
5626 * @vf: the VF owning the queue
5627 * @eqid: egress queue id
5629 * Frees a control egress queue.
5631 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5632 unsigned int vf, unsigned int eqid)
5634 struct fw_eq_ctrl_cmd c;
5636 memset(&c, 0, sizeof(c));
5637 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
5638 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5639 FW_EQ_CTRL_CMD_PFN_V(pf) |
5640 FW_EQ_CTRL_CMD_VFN_V(vf));
5641 c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
5642 c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
5643 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5647 * t4_ofld_eq_free - free an offload egress queue
5648 * @adap: the adapter
5649 * @mbox: mailbox to use for the FW command
5650 * @pf: the PF owning the queue
5651 * @vf: the VF owning the queue
5652 * @eqid: egress queue id
5654 * Frees a control egress queue.
5656 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5657 unsigned int vf, unsigned int eqid)
5659 struct fw_eq_ofld_cmd c;
5661 memset(&c, 0, sizeof(c));
5662 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
5663 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5664 FW_EQ_OFLD_CMD_PFN_V(pf) |
5665 FW_EQ_OFLD_CMD_VFN_V(vf));
5666 c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
5667 c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
5668 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5672 * t4_handle_fw_rpl - process a FW reply message
5673 * @adap: the adapter
5674 * @rpl: start of the FW message
5676 * Processes a FW message, such as link state change messages.
5678 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5680 u8 opcode = *(const u8 *)rpl;
5682 if (opcode == FW_PORT_CMD) { /* link/module state change message */
5683 int speed = 0, fc = 0;
5684 const struct fw_port_cmd *p = (void *)rpl;
5685 int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
5686 int port = adap->chan_map[chan];
5687 struct port_info *pi = adap2pinfo(adap, port);
5688 struct link_config *lc = &pi->link_cfg;
5689 u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
5690 int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
5691 u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
5693 if (stat & FW_PORT_CMD_RXPAUSE_F)
5695 if (stat & FW_PORT_CMD_TXPAUSE_F)
5697 if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
5699 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
5701 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
5703 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
5706 if (link_ok != lc->link_ok || speed != lc->speed ||
5707 fc != lc->fc) { /* something changed */
5708 lc->link_ok = link_ok;
5711 lc->supported = be16_to_cpu(p->u.info.pcap);
5712 t4_os_link_changed(adap, port, link_ok);
5714 if (mod != pi->mod_type) {
5716 t4_os_portmod_changed(adap, port);
5722 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
5726 if (pci_is_pcie(adapter->pdev)) {
5727 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
5728 p->speed = val & PCI_EXP_LNKSTA_CLS;
5729 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5734 * init_link_config - initialize a link's SW state
5735 * @lc: structure holding the link state
5736 * @caps: link capabilities
5738 * Initializes the SW state maintained for each link, including the link's
5739 * capabilities and default speed/flow-control/autonegotiation settings.
5741 static void init_link_config(struct link_config *lc, unsigned int caps)
5743 lc->supported = caps;
5744 lc->requested_speed = 0;
5746 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5747 if (lc->supported & FW_PORT_CAP_ANEG) {
5748 lc->advertising = lc->supported & ADVERT_MASK;
5749 lc->autoneg = AUTONEG_ENABLE;
5750 lc->requested_fc |= PAUSE_AUTONEG;
5752 lc->advertising = 0;
5753 lc->autoneg = AUTONEG_DISABLE;
5757 #define CIM_PF_NOACCESS 0xeeeeeeee
5759 int t4_wait_dev_ready(void __iomem *regs)
5763 whoami = readl(regs + PL_WHOAMI_A);
5764 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
5768 whoami = readl(regs + PL_WHOAMI_A);
5769 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
5773 u32 vendor_and_model_id;
5777 static int get_flash_params(struct adapter *adap)
5779 /* Table for non-Numonix supported flash parts. Numonix parts are left
5780 * to the preexisting code. All flash parts have 64KB sectors.
5782 static struct flash_desc supported_flash[] = {
5783 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
5789 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
5791 ret = sf1_read(adap, 3, 0, 1, &info);
5792 t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
5796 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
5797 if (supported_flash[ret].vendor_and_model_id == info) {
5798 adap->params.sf_size = supported_flash[ret].size_mb;
5799 adap->params.sf_nsec =
5800 adap->params.sf_size / SF_SEC_SIZE;
5804 if ((info & 0xff) != 0x20) /* not a Numonix flash */
5806 info >>= 16; /* log2 of size */
5807 if (info >= 0x14 && info < 0x18)
5808 adap->params.sf_nsec = 1 << (info - 16);
5809 else if (info == 0x18)
5810 adap->params.sf_nsec = 64;
5813 adap->params.sf_size = 1 << info;
5814 adap->params.sf_fw_start =
5815 t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
5817 if (adap->params.sf_size < FLASH_MIN_SIZE)
5818 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
5819 adap->params.sf_size, FLASH_MIN_SIZE);
5824 * t4_prep_adapter - prepare SW and HW for operation
5825 * @adapter: the adapter
5826 * @reset: if true perform a HW reset
5828 * Initialize adapter SW state for the various HW modules, set initial
5829 * values for some adapter tunables, take PHYs out of reset, and
5830 * initialize the MDIO interface.
5832 int t4_prep_adapter(struct adapter *adapter)
5838 get_pci_mode(adapter, &adapter->params.pci);
5839 pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
5841 ret = get_flash_params(adapter);
5843 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
5847 /* Retrieve adapter's device ID
5849 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
5850 ver = device_id >> 12;
5851 adapter->params.chip = 0;
5854 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
5855 adapter->params.arch.sge_fl_db = DBPRIO_F;
5856 adapter->params.arch.mps_tcam_size =
5857 NUM_MPS_CLS_SRAM_L_INSTANCES;
5858 adapter->params.arch.mps_rplc_size = 128;
5859 adapter->params.arch.nchan = NCHAN;
5860 adapter->params.arch.vfcount = 128;
5863 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
5864 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
5865 adapter->params.arch.mps_tcam_size =
5866 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5867 adapter->params.arch.mps_rplc_size = 128;
5868 adapter->params.arch.nchan = NCHAN;
5869 adapter->params.arch.vfcount = 128;
5872 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
5873 adapter->params.arch.sge_fl_db = 0;
5874 adapter->params.arch.mps_tcam_size =
5875 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5876 adapter->params.arch.mps_rplc_size = 256;
5877 adapter->params.arch.nchan = 2;
5878 adapter->params.arch.vfcount = 256;
5881 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
5886 adapter->params.cim_la_size = CIMLA_SIZE;
5887 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5890 * Default port for debugging in case we can't reach FW.
5892 adapter->params.nports = 1;
5893 adapter->params.portvec = 1;
5894 adapter->params.vpd.cclk = 50000;
5899 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
5900 * @adapter: the adapter
5901 * @qid: the Queue ID
5902 * @qtype: the Ingress or Egress type for @qid
5903 * @pbar2_qoffset: BAR2 Queue Offset
5904 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
5906 * Returns the BAR2 SGE Queue Registers information associated with the
5907 * indicated Absolute Queue ID. These are passed back in return value
5908 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
5909 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
5911 * This may return an error which indicates that BAR2 SGE Queue
5912 * registers aren't available. If an error is not returned, then the
5913 * following values are returned:
5915 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
5916 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
5918 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
5919 * require the "Inferred Queue ID" ability may be used. E.g. the
5920 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
5921 * then these "Inferred Queue ID" register may not be used.
5923 int t4_bar2_sge_qregs(struct adapter *adapter,
5925 enum t4_bar2_qtype qtype,
5927 unsigned int *pbar2_qid)
5929 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
5930 u64 bar2_page_offset, bar2_qoffset;
5931 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
5933 /* T4 doesn't support BAR2 SGE Queue registers.
5935 if (is_t4(adapter->params.chip))
5938 /* Get our SGE Page Size parameters.
5940 page_shift = adapter->params.sge.hps + 10;
5941 page_size = 1 << page_shift;
5943 /* Get the right Queues per Page parameters for our Queue.
5945 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
5946 ? adapter->params.sge.eq_qpp
5947 : adapter->params.sge.iq_qpp);
5948 qpp_mask = (1 << qpp_shift) - 1;
5950 /* Calculate the basics of the BAR2 SGE Queue register area:
5951 * o The BAR2 page the Queue registers will be in.
5952 * o The BAR2 Queue ID.
5953 * o The BAR2 Queue ID Offset into the BAR2 page.
5955 bar2_page_offset = ((qid >> qpp_shift) << page_shift);
5956 bar2_qid = qid & qpp_mask;
5957 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
5959 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
5960 * hardware will infer the Absolute Queue ID simply from the writes to
5961 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
5962 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
5963 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
5964 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
5965 * from the BAR2 Page and BAR2 Queue ID.
5967 * One important censequence of this is that some BAR2 SGE registers
5968 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
5969 * there. But other registers synthesize the SGE Queue ID purely
5970 * from the writes to the registers -- the Write Combined Doorbell
5971 * Buffer is a good example. These BAR2 SGE Registers are only
5972 * available for those BAR2 SGE Register areas where the SGE Absolute
5973 * Queue ID can be inferred from simple writes.
5975 bar2_qoffset = bar2_page_offset;
5976 bar2_qinferred = (bar2_qid_offset < page_size);
5977 if (bar2_qinferred) {
5978 bar2_qoffset += bar2_qid_offset;
5982 *pbar2_qoffset = bar2_qoffset;
5983 *pbar2_qid = bar2_qid;
5988 * t4_init_devlog_params - initialize adapter->params.devlog
5989 * @adap: the adapter
5991 * Initialize various fields of the adapter's Firmware Device Log
5992 * Parameters structure.
5994 int t4_init_devlog_params(struct adapter *adap)
5996 struct devlog_params *dparams = &adap->params.devlog;
5998 unsigned int devlog_meminfo;
5999 struct fw_devlog_cmd devlog_cmd;
6002 /* If we're dealing with newer firmware, the Device Log Paramerters
6003 * are stored in a designated register which allows us to access the
6004 * Device Log even if we can't talk to the firmware.
6007 t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
6009 unsigned int nentries, nentries128;
6011 dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
6012 dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
6014 nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
6015 nentries = (nentries128 + 1) * 128;
6016 dparams->size = nentries * sizeof(struct fw_devlog_e);
6021 /* Otherwise, ask the firmware for it's Device Log Parameters.
6023 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
6024 devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
6025 FW_CMD_REQUEST_F | FW_CMD_READ_F);
6026 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
6027 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
6033 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
6034 dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
6035 dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
6036 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
6042 * t4_init_sge_params - initialize adap->params.sge
6043 * @adapter: the adapter
6045 * Initialize various fields of the adapter's SGE Parameters structure.
6047 int t4_init_sge_params(struct adapter *adapter)
6049 struct sge_params *sge_params = &adapter->params.sge;
6051 unsigned int s_hps, s_qpp;
6053 /* Extract the SGE Page Size for our PF.
6055 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
6056 s_hps = (HOSTPAGESIZEPF0_S +
6057 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
6058 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
6060 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
6062 s_qpp = (QUEUESPERPAGEPF0_S +
6063 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
6064 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
6065 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
6066 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
6067 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
6073 * t4_init_tp_params - initialize adap->params.tp
6074 * @adap: the adapter
6076 * Initialize various fields of the adapter's TP Parameters structure.
6078 int t4_init_tp_params(struct adapter *adap)
6083 v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
6084 adap->params.tp.tre = TIMERRESOLUTION_G(v);
6085 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
6087 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
6088 for (chan = 0; chan < NCHAN; chan++)
6089 adap->params.tp.tx_modq[chan] = chan;
6091 /* Cache the adapter's Compressed Filter Mode and global Incress
6094 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
6095 &adap->params.tp.vlan_pri_map, 1,
6097 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
6098 &adap->params.tp.ingress_config, 1,
6099 TP_INGRESS_CONFIG_A);
6101 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
6102 * shift positions of several elements of the Compressed Filter Tuple
6103 * for this adapter which we need frequently ...
6105 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
6106 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
6107 adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
6108 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
6111 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
6112 * represents the presence of an Outer VLAN instead of a VNIC ID.
6114 if ((adap->params.tp.ingress_config & VNIC_F) == 0)
6115 adap->params.tp.vnic_shift = -1;
6121 * t4_filter_field_shift - calculate filter field shift
6122 * @adap: the adapter
6123 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
6125 * Return the shift position of a filter field within the Compressed
6126 * Filter Tuple. The filter field is specified via its selection bit
6127 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
6129 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
6131 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
6135 if ((filter_mode & filter_sel) == 0)
6138 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
6139 switch (filter_mode & sel) {
6141 field_shift += FT_FCOE_W;
6144 field_shift += FT_PORT_W;
6147 field_shift += FT_VNIC_ID_W;
6150 field_shift += FT_VLAN_W;
6153 field_shift += FT_TOS_W;
6156 field_shift += FT_PROTOCOL_W;
6159 field_shift += FT_ETHERTYPE_W;
6162 field_shift += FT_MACMATCH_W;
6165 field_shift += FT_MPSHITTYPE_W;
6167 case FRAGMENTATION_F:
6168 field_shift += FT_FRAGMENTATION_W;
6175 int t4_init_rss_mode(struct adapter *adap, int mbox)
6178 struct fw_rss_vi_config_cmd rvc;
6180 memset(&rvc, 0, sizeof(rvc));
6182 for_each_port(adap, i) {
6183 struct port_info *p = adap2pinfo(adap, i);
6186 cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
6187 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6188 FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
6189 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
6190 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
6193 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
6198 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
6202 struct fw_port_cmd c;
6203 struct fw_rss_vi_config_cmd rvc;
6205 memset(&c, 0, sizeof(c));
6206 memset(&rvc, 0, sizeof(rvc));
6208 for_each_port(adap, i) {
6209 unsigned int rss_size;
6210 struct port_info *p = adap2pinfo(adap, i);
6212 while ((adap->params.portvec & (1 << j)) == 0)
6215 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
6216 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6217 FW_PORT_CMD_PORTID_V(j));
6218 c.action_to_len16 = cpu_to_be32(
6219 FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
6221 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6225 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
6232 p->rss_size = rss_size;
6233 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
6234 adap->port[i]->dev_port = j;
6236 ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
6237 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
6238 FW_PORT_CMD_MDIOADDR_G(ret) : -1;
6239 p->port_type = FW_PORT_CMD_PTYPE_G(ret);
6240 p->mod_type = FW_PORT_MOD_TYPE_NA;
6243 cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
6244 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6245 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
6246 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
6247 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
6250 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
6252 init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
6259 * t4_read_cimq_cfg - read CIM queue configuration
6260 * @adap: the adapter
6261 * @base: holds the queue base addresses in bytes
6262 * @size: holds the queue sizes in bytes
6263 * @thres: holds the queue full thresholds in bytes
6265 * Returns the current configuration of the CIM queues, starting with
6266 * the IBQs, then the OBQs.
6268 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
6271 int cim_num_obq = is_t4(adap->params.chip) ?
6272 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
6274 for (i = 0; i < CIM_NUM_IBQ; i++) {
6275 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
6277 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
6278 /* value is in 256-byte units */
6279 *base++ = CIMQBASE_G(v) * 256;
6280 *size++ = CIMQSIZE_G(v) * 256;
6281 *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
6283 for (i = 0; i < cim_num_obq; i++) {
6284 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
6286 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
6287 /* value is in 256-byte units */
6288 *base++ = CIMQBASE_G(v) * 256;
6289 *size++ = CIMQSIZE_G(v) * 256;
6294 * t4_read_cim_ibq - read the contents of a CIM inbound queue
6295 * @adap: the adapter
6296 * @qid: the queue index
6297 * @data: where to store the queue contents
6298 * @n: capacity of @data in 32-bit words
6300 * Reads the contents of the selected CIM queue starting at address 0 up
6301 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
6302 * error and the number of 32-bit words actually read on success.
6304 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
6306 int i, err, attempts;
6308 const unsigned int nwords = CIM_IBQ_SIZE * 4;
6310 if (qid > 5 || (n & 3))
6313 addr = qid * nwords;
6317 /* It might take 3-10ms before the IBQ debug read access is allowed.
6318 * Wait for 1 Sec with a delay of 1 usec.
6322 for (i = 0; i < n; i++, addr++) {
6323 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
6325 err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
6329 *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
6331 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
6336 * t4_read_cim_obq - read the contents of a CIM outbound queue
6337 * @adap: the adapter
6338 * @qid: the queue index
6339 * @data: where to store the queue contents
6340 * @n: capacity of @data in 32-bit words
6342 * Reads the contents of the selected CIM queue starting at address 0 up
6343 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
6344 * error and the number of 32-bit words actually read on success.
6346 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
6349 unsigned int addr, v, nwords;
6350 int cim_num_obq = is_t4(adap->params.chip) ?
6351 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
6353 if ((qid > (cim_num_obq - 1)) || (n & 3))
6356 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
6357 QUENUMSELECT_V(qid));
6358 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
6360 addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
6361 nwords = CIMQSIZE_G(v) * 64; /* same */
6365 for (i = 0; i < n; i++, addr++) {
6366 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
6368 err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
6372 *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
6374 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
6379 * t4_cim_read - read a block from CIM internal address space
6380 * @adap: the adapter
6381 * @addr: the start address within the CIM address space
6382 * @n: number of words to read
6383 * @valp: where to store the result
6385 * Reads a block of 4-byte words from the CIM intenal address space.
6387 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
6392 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
6395 for ( ; !ret && n--; addr += 4) {
6396 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
6397 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
6400 *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
6406 * t4_cim_write - write a block into CIM internal address space
6407 * @adap: the adapter
6408 * @addr: the start address within the CIM address space
6409 * @n: number of words to write
6410 * @valp: set of values to write
6412 * Writes a block of 4-byte words into the CIM intenal address space.
6414 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
6415 const unsigned int *valp)
6419 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
6422 for ( ; !ret && n--; addr += 4) {
6423 t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
6424 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
6425 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
6431 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
6434 return t4_cim_write(adap, addr, 1, &val);
6438 * t4_cim_read_la - read CIM LA capture buffer
6439 * @adap: the adapter
6440 * @la_buf: where to store the LA data
6441 * @wrptr: the HW write pointer within the capture buffer
6443 * Reads the contents of the CIM LA buffer with the most recent entry at
6444 * the end of the returned data and with the entry at @wrptr first.
6445 * We try to leave the LA in the running state we find it in.
6447 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
6450 unsigned int cfg, val, idx;
6452 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
6456 if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
6457 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
6462 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
6466 idx = UPDBGLAWRPTR_G(val);
6470 for (i = 0; i < adap->params.cim_la_size; i++) {
6471 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
6472 UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
6475 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
6478 if (val & UPDBGLARDEN_F) {
6482 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
6485 idx = (idx + 1) & UPDBGLARDPTR_M;
6488 if (cfg & UPDBGLAEN_F) {
6489 int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
6490 cfg & ~UPDBGLARDEN_F);
6498 * t4_tp_read_la - read TP LA capture buffer
6499 * @adap: the adapter
6500 * @la_buf: where to store the LA data
6501 * @wrptr: the HW write pointer within the capture buffer
6503 * Reads the contents of the TP LA buffer with the most recent entry at
6504 * the end of the returned data and with the entry at @wrptr first.
6505 * We leave the LA in the running state we find it in.
6507 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
6509 bool last_incomplete;
6510 unsigned int i, cfg, val, idx;
6512 cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
6513 if (cfg & DBGLAENABLE_F) /* freeze LA */
6514 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
6515 adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
6517 val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
6518 idx = DBGLAWPTR_G(val);
6519 last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
6520 if (last_incomplete)
6521 idx = (idx + 1) & DBGLARPTR_M;
6526 val &= ~DBGLARPTR_V(DBGLARPTR_M);
6527 val |= adap->params.tp.la_mask;
6529 for (i = 0; i < TPLA_SIZE; i++) {
6530 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
6531 la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
6532 idx = (idx + 1) & DBGLARPTR_M;
6535 /* Wipe out last entry if it isn't valid */
6536 if (last_incomplete)
6537 la_buf[TPLA_SIZE - 1] = ~0ULL;
6539 if (cfg & DBGLAENABLE_F) /* restore running state */
6540 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
6541 cfg | adap->params.tp.la_mask);
6544 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
6545 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
6546 * state for more than the Warning Threshold then we'll issue a warning about
6547 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
6548 * appears to be hung every Warning Repeat second till the situation clears.
6549 * If the situation clears, we'll note that as well.
6551 #define SGE_IDMA_WARN_THRESH 1
6552 #define SGE_IDMA_WARN_REPEAT 300
6555 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
6556 * @adapter: the adapter
6557 * @idma: the adapter IDMA Monitor state
6559 * Initialize the state of an SGE Ingress DMA Monitor.
6561 void t4_idma_monitor_init(struct adapter *adapter,
6562 struct sge_idma_monitor_state *idma)
6564 /* Initialize the state variables for detecting an SGE Ingress DMA
6565 * hang. The SGE has internal counters which count up on each clock
6566 * tick whenever the SGE finds its Ingress DMA State Engines in the
6567 * same state they were on the previous clock tick. The clock used is
6568 * the Core Clock so we have a limit on the maximum "time" they can
6569 * record; typically a very small number of seconds. For instance,
6570 * with a 600MHz Core Clock, we can only count up to a bit more than
6571 * 7s. So we'll synthesize a larger counter in order to not run the
6572 * risk of having the "timers" overflow and give us the flexibility to
6573 * maintain a Hung SGE State Machine of our own which operates across
6574 * a longer time frame.
6576 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
6577 idma->idma_stalled[0] = 0;
6578 idma->idma_stalled[1] = 0;
6582 * t4_idma_monitor - monitor SGE Ingress DMA state
6583 * @adapter: the adapter
6584 * @idma: the adapter IDMA Monitor state
6585 * @hz: number of ticks/second
6586 * @ticks: number of ticks since the last IDMA Monitor call
6588 void t4_idma_monitor(struct adapter *adapter,
6589 struct sge_idma_monitor_state *idma,
6592 int i, idma_same_state_cnt[2];
6594 /* Read the SGE Debug Ingress DMA Same State Count registers. These
6595 * are counters inside the SGE which count up on each clock when the
6596 * SGE finds its Ingress DMA State Engines in the same states they
6597 * were in the previous clock. The counters will peg out at
6598 * 0xffffffff without wrapping around so once they pass the 1s
6599 * threshold they'll stay above that till the IDMA state changes.
6601 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
6602 idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
6603 idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
6605 for (i = 0; i < 2; i++) {
6606 u32 debug0, debug11;
6608 /* If the Ingress DMA Same State Counter ("timer") is less
6609 * than 1s, then we can reset our synthesized Stall Timer and
6610 * continue. If we have previously emitted warnings about a
6611 * potential stalled Ingress Queue, issue a note indicating
6612 * that the Ingress Queue has resumed forward progress.
6614 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
6615 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
6616 dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
6617 "resumed after %d seconds\n",
6618 i, idma->idma_qid[i],
6619 idma->idma_stalled[i] / hz);
6620 idma->idma_stalled[i] = 0;
6624 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
6625 * domain. The first time we get here it'll be because we
6626 * passed the 1s Threshold; each additional time it'll be
6627 * because the RX Timer Callback is being fired on its regular
6630 * If the stall is below our Potential Hung Ingress Queue
6631 * Warning Threshold, continue.
6633 if (idma->idma_stalled[i] == 0) {
6634 idma->idma_stalled[i] = hz;
6635 idma->idma_warn[i] = 0;
6637 idma->idma_stalled[i] += ticks;
6638 idma->idma_warn[i] -= ticks;
6641 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
6644 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
6646 if (idma->idma_warn[i] > 0)
6648 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
6650 /* Read and save the SGE IDMA State and Queue ID information.
6651 * We do this every time in case it changes across time ...
6652 * can't be too careful ...
6654 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
6655 debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
6656 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
6658 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
6659 debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
6660 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
6662 dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
6663 "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
6664 i, idma->idma_qid[i], idma->idma_state[i],
6665 idma->idma_stalled[i] / hz,
6667 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);