2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/init.h>
36 #include <linux/delay.h>
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
56 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
60 u32 val = t4_read_reg(adapter, reg);
62 if (!!(val & mask) == polarity) {
74 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
88 * Sets a register field specified by the supplied mask to the
91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
101 * t4_read_indirect - read indirectly addressed registers
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
109 * Reads registers that are accessed indirectly through an address/data
112 static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
125 * t4_write_indirect - write indirectly addressed registers
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
136 static void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
148 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
150 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
153 for ( ; nflit; nflit--, mbox_addr += 8)
154 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
158 * Handle a FW assertion reported in a mailbox.
160 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
162 struct fw_debug_cmd asrt;
164 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
165 dev_alert(adap->pdev_dev,
166 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
167 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
168 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
171 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
173 dev_err(adap->pdev_dev,
174 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
175 (unsigned long long)t4_read_reg64(adap, data_reg),
176 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
177 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
178 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
179 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
180 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
181 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
182 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
186 * t4_wr_mbox_meat - send a command to FW through the given mailbox
188 * @mbox: index of the mailbox to use
189 * @cmd: the command to write
190 * @size: command length in bytes
191 * @rpl: where to optionally store the reply
192 * @sleep_ok: if true we may sleep while awaiting command completion
194 * Sends the given command to FW through the selected mailbox and waits
195 * for the FW to execute the command. If @rpl is not %NULL it is used to
196 * store the FW's reply to the command. The command and its optional
197 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
198 * to respond. @sleep_ok determines whether we may sleep while awaiting
199 * the response. If sleeping is allowed we use progressive backoff
202 * The return value is 0 on success or a negative errno on failure. A
203 * failure can happen either because we are not able to execute the
204 * command or FW executes it but signals an error. In the latter case
205 * the return value is the error code indicated by FW (negated).
207 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
208 void *rpl, bool sleep_ok)
210 static int delay[] = {
211 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
216 int i, ms, delay_idx;
217 const __be64 *p = cmd;
218 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
219 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
221 if ((size & 15) || size > MBOX_LEN)
224 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
225 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
226 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
228 if (v != MBOX_OWNER_DRV)
229 return v ? -EBUSY : -ETIMEDOUT;
231 for (i = 0; i < size; i += 8)
232 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
234 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
235 t4_read_reg(adap, ctl_reg); /* flush write */
240 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
242 ms = delay[delay_idx]; /* last element may repeat */
243 if (delay_idx < ARRAY_SIZE(delay) - 1)
249 v = t4_read_reg(adap, ctl_reg);
250 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
251 if (!(v & MBMSGVALID)) {
252 t4_write_reg(adap, ctl_reg, 0);
256 res = t4_read_reg64(adap, data_reg);
257 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
258 fw_asrt(adap, data_reg);
259 res = FW_CMD_RETVAL(EIO);
261 get_mbox_rpl(adap, rpl, size / 8, data_reg);
263 if (FW_CMD_RETVAL_GET((int)res))
264 dump_mbox(adap, mbox, data_reg);
265 t4_write_reg(adap, ctl_reg, 0);
266 return -FW_CMD_RETVAL_GET((int)res);
270 dump_mbox(adap, mbox, data_reg);
271 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
272 *(const u8 *)cmd, mbox);
277 * t4_mc_read - read from MC through backdoor accesses
279 * @addr: address of first byte requested
280 * @data: 64 bytes of data containing the requested address
281 * @ecc: where to store the corresponding 64-bit ECC word
283 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
284 * that covers the requested address @addr. If @parity is not %NULL it
285 * is assigned the 64-bit ECC word for the read data.
287 int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
291 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
293 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
294 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
295 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
296 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
298 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
302 #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
304 for (i = 15; i >= 0; i--)
305 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
307 *ecc = t4_read_reg64(adap, MC_DATA(16));
313 * t4_edc_read - read from EDC through backdoor accesses
315 * @idx: which EDC to access
316 * @addr: address of first byte requested
317 * @data: 64 bytes of data containing the requested address
318 * @ecc: where to store the corresponding 64-bit ECC word
320 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
321 * that covers the requested address @addr. If @parity is not %NULL it
322 * is assigned the 64-bit ECC word for the read data.
324 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
329 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
331 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
332 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
333 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
334 t4_write_reg(adap, EDC_BIST_CMD + idx,
335 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
336 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
340 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
342 for (i = 15; i >= 0; i--)
343 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
345 *ecc = t4_read_reg64(adap, EDC_DATA(16));
350 #define VPD_ENTRY(name, len) \
351 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
354 * Partial EEPROM Vital Product Data structure. Includes only the ID and
363 VPD_ENTRY(pn, 16); /* part number */
364 VPD_ENTRY(ec, EC_LEN); /* EC level */
365 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
366 VPD_ENTRY(na, 12); /* MAC address base */
367 VPD_ENTRY(port_type, 8); /* port types */
368 VPD_ENTRY(gpio, 14); /* GPIO usage */
369 VPD_ENTRY(cclk, 6); /* core clock */
370 VPD_ENTRY(port_addr, 8); /* port MDIO addresses */
371 VPD_ENTRY(rv, 1); /* csum */
372 u32 pad; /* for multiple-of-4 sizing and alignment */
375 #define EEPROM_STAT_ADDR 0x7bfc
379 * t4_seeprom_wp - enable/disable EEPROM write protection
380 * @adapter: the adapter
381 * @enable: whether to enable or disable write protection
383 * Enables or disables write protection on the serial EEPROM.
385 int t4_seeprom_wp(struct adapter *adapter, bool enable)
387 unsigned int v = enable ? 0xc : 0;
388 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
389 return ret < 0 ? ret : 0;
393 * get_vpd_params - read VPD parameters from VPD EEPROM
394 * @adapter: adapter to read
395 * @p: where to store the parameters
397 * Reads card parameters stored in VPD EEPROM.
399 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
403 u8 *q = (u8 *)&vpd, csum;
405 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), &vpd);
409 for (csum = 0; q <= vpd.rv_data; q++)
413 dev_err(adapter->pdev_dev,
414 "corrupted VPD EEPROM, actual csum %u\n", csum);
418 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
419 memcpy(p->id, vpd.id_data, sizeof(vpd.id_data));
421 memcpy(p->ec, vpd.ec_data, sizeof(vpd.ec_data));
423 memcpy(p->sn, vpd.sn_data, sizeof(vpd.sn_data));
428 /* serial flash and firmware constants */
430 SF_ATTEMPTS = 10, /* max retries for SF operations */
432 /* flash command opcodes */
433 SF_PROG_PAGE = 2, /* program page */
434 SF_WR_DISABLE = 4, /* disable writes */
435 SF_RD_STATUS = 5, /* read status register */
436 SF_WR_ENABLE = 6, /* enable writes */
437 SF_RD_DATA_FAST = 0xb, /* read flash */
438 SF_ERASE_SECTOR = 0xd8, /* erase sector */
440 FW_START_SEC = 8, /* first flash sector for FW */
441 FW_END_SEC = 15, /* last flash sector for FW */
442 FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
443 FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
447 * sf1_read - read data from the serial flash
448 * @adapter: the adapter
449 * @byte_cnt: number of bytes to read
450 * @cont: whether another operation will be chained
451 * @lock: whether to lock SF for PL access only
452 * @valp: where to store the read data
454 * Reads up to 4 bytes of data from the serial flash. The location of
455 * the read needs to be specified prior to calling this by issuing the
456 * appropriate commands to the serial flash.
458 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
463 if (!byte_cnt || byte_cnt > 4)
465 if (t4_read_reg(adapter, SF_OP) & BUSY)
467 cont = cont ? SF_CONT : 0;
468 lock = lock ? SF_LOCK : 0;
469 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
470 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
472 *valp = t4_read_reg(adapter, SF_DATA);
477 * sf1_write - write data to the serial flash
478 * @adapter: the adapter
479 * @byte_cnt: number of bytes to write
480 * @cont: whether another operation will be chained
481 * @lock: whether to lock SF for PL access only
482 * @val: value to write
484 * Writes up to 4 bytes of data to the serial flash. The location of
485 * the write needs to be specified prior to calling this by issuing the
486 * appropriate commands to the serial flash.
488 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
491 if (!byte_cnt || byte_cnt > 4)
493 if (t4_read_reg(adapter, SF_OP) & BUSY)
495 cont = cont ? SF_CONT : 0;
496 lock = lock ? SF_LOCK : 0;
497 t4_write_reg(adapter, SF_DATA, val);
498 t4_write_reg(adapter, SF_OP, lock |
499 cont | BYTECNT(byte_cnt - 1) | OP_WR);
500 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
504 * flash_wait_op - wait for a flash operation to complete
505 * @adapter: the adapter
506 * @attempts: max number of polls of the status register
507 * @delay: delay between polls in ms
509 * Wait for a flash operation to complete by polling the status register.
511 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
517 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
518 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
530 * t4_read_flash - read words from serial flash
531 * @adapter: the adapter
532 * @addr: the start address for the read
533 * @nwords: how many 32-bit words to read
534 * @data: where to store the read data
535 * @byte_oriented: whether to store data as bytes or as words
537 * Read the specified number of 32-bit words from the serial flash.
538 * If @byte_oriented is set the read data is stored as a byte array
539 * (i.e., big-endian), otherwise as 32-bit words in the platform's
542 static int t4_read_flash(struct adapter *adapter, unsigned int addr,
543 unsigned int nwords, u32 *data, int byte_oriented)
547 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
550 addr = swab32(addr) | SF_RD_DATA_FAST;
552 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
553 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
556 for ( ; nwords; nwords--, data++) {
557 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
559 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
563 *data = htonl(*data);
569 * t4_write_flash - write up to a page of data to the serial flash
570 * @adapter: the adapter
571 * @addr: the start address to write
572 * @n: length of data to write in bytes
573 * @data: the data to write
575 * Writes up to a page of data (256 bytes) to the serial flash starting
576 * at the given address. All the data must be written to the same page.
578 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
579 unsigned int n, const u8 *data)
583 unsigned int i, c, left, val, offset = addr & 0xff;
585 if (addr >= SF_SIZE || offset + n > SF_PAGE_SIZE)
588 val = swab32(addr) | SF_PROG_PAGE;
590 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
591 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
594 for (left = n; left; left -= c) {
596 for (val = 0, i = 0; i < c; ++i)
597 val = (val << 8) + *data++;
599 ret = sf1_write(adapter, c, c != left, 1, val);
603 ret = flash_wait_op(adapter, 5, 1);
607 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
609 /* Read the page to verify the write succeeded */
610 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
614 if (memcmp(data - n, (u8 *)buf + offset, n)) {
615 dev_err(adapter->pdev_dev,
616 "failed to correctly write the flash page at %#x\n",
623 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
628 * get_fw_version - read the firmware version
629 * @adapter: the adapter
630 * @vers: where to place the version
632 * Reads the FW version from flash.
634 static int get_fw_version(struct adapter *adapter, u32 *vers)
636 return t4_read_flash(adapter,
637 FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1,
642 * get_tp_version - read the TP microcode version
643 * @adapter: the adapter
644 * @vers: where to place the version
646 * Reads the TP microcode version from flash.
648 static int get_tp_version(struct adapter *adapter, u32 *vers)
650 return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr,
656 * t4_check_fw_version - check if the FW is compatible with this driver
657 * @adapter: the adapter
659 * Checks if an adapter's FW is compatible with the driver. Returns 0
660 * if there's exact match, a negative error if the version could not be
661 * read or there's a major version mismatch, and a positive value if the
662 * expected major version is found but there's a minor version mismatch.
664 int t4_check_fw_version(struct adapter *adapter)
667 int ret, major, minor, micro;
669 ret = get_fw_version(adapter, &adapter->params.fw_vers);
671 ret = get_tp_version(adapter, &adapter->params.tp_vers);
673 ret = t4_read_flash(adapter,
674 FW_IMG_START + offsetof(struct fw_hdr, intfver_nic),
679 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
680 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
681 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
682 memcpy(adapter->params.api_vers, api_vers,
683 sizeof(adapter->params.api_vers));
685 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
686 dev_err(adapter->pdev_dev,
687 "card FW has major version %u, driver wants %u\n",
688 major, FW_VERSION_MAJOR);
692 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
693 return 0; /* perfect match */
695 /* Minor/micro version mismatch. Report it but often it's OK. */
700 * t4_flash_erase_sectors - erase a range of flash sectors
701 * @adapter: the adapter
702 * @start: the first sector to erase
703 * @end: the last sector to erase
705 * Erases the sectors in the given inclusive range.
707 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
711 while (start <= end) {
712 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
713 (ret = sf1_write(adapter, 4, 0, 1,
714 SF_ERASE_SECTOR | (start << 8))) != 0 ||
715 (ret = flash_wait_op(adapter, 5, 500)) != 0) {
716 dev_err(adapter->pdev_dev,
717 "erase of flash sector %d failed, error %d\n",
723 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
728 * t4_load_fw - download firmware
730 * @fw_data: the firmware image to write
733 * Write the supplied firmware image to the card's serial flash.
735 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
740 u8 first_page[SF_PAGE_SIZE];
741 const u32 *p = (const u32 *)fw_data;
742 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
745 dev_err(adap->pdev_dev, "FW image has no data\n");
749 dev_err(adap->pdev_dev,
750 "FW image size not multiple of 512 bytes\n");
753 if (ntohs(hdr->len512) * 512 != size) {
754 dev_err(adap->pdev_dev,
755 "FW image size differs from size in FW header\n");
758 if (size > FW_MAX_SIZE) {
759 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
764 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
767 if (csum != 0xffffffff) {
768 dev_err(adap->pdev_dev,
769 "corrupted firmware image, checksum %#x\n", csum);
773 i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */
774 ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1);
779 * We write the correct version at the end so the driver can see a bad
780 * version if the FW write fails. Start by writing a copy of the
781 * first page with a bad version.
783 memcpy(first_page, fw_data, SF_PAGE_SIZE);
784 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
785 ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page);
790 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
791 addr += SF_PAGE_SIZE;
792 fw_data += SF_PAGE_SIZE;
793 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
798 ret = t4_write_flash(adap,
799 FW_IMG_START + offsetof(struct fw_hdr, fw_ver),
800 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
803 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
808 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
809 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
812 * t4_link_start - apply link configuration to MAC/PHY
813 * @phy: the PHY to setup
814 * @mac: the MAC to setup
815 * @lc: the requested link configuration
817 * Set up a port's MAC and PHY according to a desired link configuration.
818 * - If the PHY can auto-negotiate first decide what to advertise, then
819 * enable/disable auto-negotiation as desired, and reset.
820 * - If the PHY does not auto-negotiate just reset it.
821 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
822 * otherwise do it later based on the outcome of auto-negotiation.
824 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
825 struct link_config *lc)
827 struct fw_port_cmd c;
828 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
831 if (lc->requested_fc & PAUSE_RX)
832 fc |= FW_PORT_CAP_FC_RX;
833 if (lc->requested_fc & PAUSE_TX)
834 fc |= FW_PORT_CAP_FC_TX;
836 memset(&c, 0, sizeof(c));
837 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
838 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
839 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
842 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
843 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
844 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
845 } else if (lc->autoneg == AUTONEG_DISABLE) {
846 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
847 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
849 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
851 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
855 * t4_restart_aneg - restart autonegotiation
857 * @mbox: mbox to use for the FW command
860 * Restarts autonegotiation for the selected port.
862 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
864 struct fw_port_cmd c;
866 memset(&c, 0, sizeof(c));
867 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
868 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
869 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
871 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
872 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
876 * t4_set_vlan_accel - configure HW VLAN extraction
878 * @ports: bitmap of adapter ports to operate on
879 * @on: enable (1) or disable (0) HW VLAN extraction
881 * Enables or disables HW extraction of VLAN tags for the ports specified
882 * by @ports. @ports is a bitmap with the ith bit designating the port
883 * associated with the ith adapter channel.
885 void t4_set_vlan_accel(struct adapter *adap, unsigned int ports, int on)
887 ports <<= VLANEXTENABLE_SHIFT;
888 t4_set_reg_field(adap, TP_OUT_CONFIG, ports, on ? ports : 0);
892 unsigned int mask; /* bits to check in interrupt status */
893 const char *msg; /* message to print or NULL */
894 short stat_idx; /* stat counter to increment or -1 */
895 unsigned short fatal; /* whether the condition reported is fatal */
899 * t4_handle_intr_status - table driven interrupt handler
900 * @adapter: the adapter that generated the interrupt
901 * @reg: the interrupt status register to process
902 * @acts: table of interrupt actions
904 * A table driven interrupt handler that applies a set of masks to an
905 * interrupt status word and performs the corresponding actions if the
906 * interrupts described by the mask have occured. The actions include
907 * optionally emitting a warning or alert message. The table is terminated
908 * by an entry specifying mask 0. Returns the number of fatal interrupt
911 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
912 const struct intr_info *acts)
915 unsigned int mask = 0;
916 unsigned int status = t4_read_reg(adapter, reg);
918 for ( ; acts->mask; ++acts) {
919 if (!(status & acts->mask))
923 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
924 status & acts->mask);
925 } else if (acts->msg && printk_ratelimit())
926 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
927 status & acts->mask);
931 if (status) /* clear processed interrupts */
932 t4_write_reg(adapter, reg, status);
937 * Interrupt handler for the PCIE module.
939 static void pcie_intr_handler(struct adapter *adapter)
941 static struct intr_info sysbus_intr_info[] = {
942 { RNPP, "RXNP array parity error", -1, 1 },
943 { RPCP, "RXPC array parity error", -1, 1 },
944 { RCIP, "RXCIF array parity error", -1, 1 },
945 { RCCP, "Rx completions control array parity error", -1, 1 },
946 { RFTP, "RXFT array parity error", -1, 1 },
949 static struct intr_info pcie_port_intr_info[] = {
950 { TPCP, "TXPC array parity error", -1, 1 },
951 { TNPP, "TXNP array parity error", -1, 1 },
952 { TFTP, "TXFT array parity error", -1, 1 },
953 { TCAP, "TXCA array parity error", -1, 1 },
954 { TCIP, "TXCIF array parity error", -1, 1 },
955 { RCAP, "RXCA array parity error", -1, 1 },
956 { OTDD, "outbound request TLP discarded", -1, 1 },
957 { RDPE, "Rx data parity error", -1, 1 },
958 { TDUE, "Tx uncorrectable data error", -1, 1 },
961 static struct intr_info pcie_intr_info[] = {
962 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
963 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
964 { MSIDATAPERR, "MSI data parity error", -1, 1 },
965 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
966 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
967 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
968 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
969 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
970 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
971 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
972 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
973 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
974 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
975 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
976 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
977 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
978 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
979 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
980 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
981 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
982 { FIDPERR, "PCI FID parity error", -1, 1 },
983 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
984 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
985 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
986 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
987 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
988 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
989 { PCIESINT, "PCI core secondary fault", -1, 1 },
990 { PCIEPINT, "PCI core primary fault", -1, 1 },
991 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
997 fat = t4_handle_intr_status(adapter,
998 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1000 t4_handle_intr_status(adapter,
1001 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1002 pcie_port_intr_info) +
1003 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
1005 t4_fatal_err(adapter);
1009 * TP interrupt handler.
1011 static void tp_intr_handler(struct adapter *adapter)
1013 static struct intr_info tp_intr_info[] = {
1014 { 0x3fffffff, "TP parity error", -1, 1 },
1015 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1019 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1020 t4_fatal_err(adapter);
1024 * SGE interrupt handler.
1026 static void sge_intr_handler(struct adapter *adapter)
1030 static struct intr_info sge_intr_info[] = {
1031 { ERR_CPL_EXCEED_IQE_SIZE,
1032 "SGE received CPL exceeding IQE size", -1, 1 },
1033 { ERR_INVALID_CIDX_INC,
1034 "SGE GTS CIDX increment too large", -1, 0 },
1035 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1036 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1037 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1038 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1039 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1041 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1043 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1045 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1047 { ERR_ING_CTXT_PRIO,
1048 "SGE too many priority ingress contexts", -1, 0 },
1049 { ERR_EGR_CTXT_PRIO,
1050 "SGE too many priority egress contexts", -1, 0 },
1051 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1052 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1056 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1057 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1059 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1060 (unsigned long long)v);
1061 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1062 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1065 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1067 t4_fatal_err(adapter);
1071 * CIM interrupt handler.
1073 static void cim_intr_handler(struct adapter *adapter)
1075 static struct intr_info cim_intr_info[] = {
1076 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1077 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1078 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1079 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1080 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1081 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1082 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1085 static struct intr_info cim_upintr_info[] = {
1086 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1087 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1088 { ILLWRINT, "CIM illegal write", -1, 1 },
1089 { ILLRDINT, "CIM illegal read", -1, 1 },
1090 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1091 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1092 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1093 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1094 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1095 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1096 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1097 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1098 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1099 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1100 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1101 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1102 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1103 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1104 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1105 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1106 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1107 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1108 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1109 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1110 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1111 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1112 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1113 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1119 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1121 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1124 t4_fatal_err(adapter);
1128 * ULP RX interrupt handler.
1130 static void ulprx_intr_handler(struct adapter *adapter)
1132 static struct intr_info ulprx_intr_info[] = {
1133 { 0x7fffff, "ULPRX parity error", -1, 1 },
1137 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1138 t4_fatal_err(adapter);
1142 * ULP TX interrupt handler.
1144 static void ulptx_intr_handler(struct adapter *adapter)
1146 static struct intr_info ulptx_intr_info[] = {
1147 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1149 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1151 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1153 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1155 { 0xfffffff, "ULPTX parity error", -1, 1 },
1159 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1160 t4_fatal_err(adapter);
1164 * PM TX interrupt handler.
1166 static void pmtx_intr_handler(struct adapter *adapter)
1168 static struct intr_info pmtx_intr_info[] = {
1169 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1170 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1171 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1172 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1173 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1174 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1175 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1176 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1177 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1181 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1182 t4_fatal_err(adapter);
1186 * PM RX interrupt handler.
1188 static void pmrx_intr_handler(struct adapter *adapter)
1190 static struct intr_info pmrx_intr_info[] = {
1191 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1192 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1193 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1194 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1195 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1196 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1200 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1201 t4_fatal_err(adapter);
1205 * CPL switch interrupt handler.
1207 static void cplsw_intr_handler(struct adapter *adapter)
1209 static struct intr_info cplsw_intr_info[] = {
1210 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1211 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1212 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1213 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1214 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1215 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1219 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1220 t4_fatal_err(adapter);
1224 * LE interrupt handler.
1226 static void le_intr_handler(struct adapter *adap)
1228 static struct intr_info le_intr_info[] = {
1229 { LIPMISS, "LE LIP miss", -1, 0 },
1230 { LIP0, "LE 0 LIP error", -1, 0 },
1231 { PARITYERR, "LE parity error", -1, 1 },
1232 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1233 { REQQPARERR, "LE request queue parity error", -1, 1 },
1237 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1242 * MPS interrupt handler.
1244 static void mps_intr_handler(struct adapter *adapter)
1246 static struct intr_info mps_rx_intr_info[] = {
1247 { 0xffffff, "MPS Rx parity error", -1, 1 },
1250 static struct intr_info mps_tx_intr_info[] = {
1251 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1252 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1253 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1254 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1255 { BUBBLE, "MPS Tx underflow", -1, 1 },
1256 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1257 { FRMERR, "MPS Tx framing error", -1, 1 },
1260 static struct intr_info mps_trc_intr_info[] = {
1261 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1262 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1263 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1266 static struct intr_info mps_stat_sram_intr_info[] = {
1267 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1270 static struct intr_info mps_stat_tx_intr_info[] = {
1271 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1274 static struct intr_info mps_stat_rx_intr_info[] = {
1275 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1278 static struct intr_info mps_cls_intr_info[] = {
1279 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1280 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1281 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1287 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1289 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1291 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1292 mps_trc_intr_info) +
1293 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1294 mps_stat_sram_intr_info) +
1295 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1296 mps_stat_tx_intr_info) +
1297 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1298 mps_stat_rx_intr_info) +
1299 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1302 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1303 RXINT | TXINT | STATINT);
1304 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1306 t4_fatal_err(adapter);
1309 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1312 * EDC/MC interrupt handler.
1314 static void mem_intr_handler(struct adapter *adapter, int idx)
1316 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1318 unsigned int addr, cnt_addr, v;
1320 if (idx <= MEM_EDC1) {
1321 addr = EDC_REG(EDC_INT_CAUSE, idx);
1322 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1324 addr = MC_INT_CAUSE;
1325 cnt_addr = MC_ECC_STATUS;
1328 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1329 if (v & PERR_INT_CAUSE)
1330 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1332 if (v & ECC_CE_INT_CAUSE) {
1333 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1335 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1336 if (printk_ratelimit())
1337 dev_warn(adapter->pdev_dev,
1338 "%u %s correctable ECC data error%s\n",
1339 cnt, name[idx], cnt > 1 ? "s" : "");
1341 if (v & ECC_UE_INT_CAUSE)
1342 dev_alert(adapter->pdev_dev,
1343 "%s uncorrectable ECC data error\n", name[idx]);
1345 t4_write_reg(adapter, addr, v);
1346 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1347 t4_fatal_err(adapter);
1351 * MA interrupt handler.
1353 static void ma_intr_handler(struct adapter *adap)
1355 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1357 if (status & MEM_PERR_INT_CAUSE)
1358 dev_alert(adap->pdev_dev,
1359 "MA parity error, parity status %#x\n",
1360 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1361 if (status & MEM_WRAP_INT_CAUSE) {
1362 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1363 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1364 "client %u to address %#x\n",
1365 MEM_WRAP_CLIENT_NUM_GET(v),
1366 MEM_WRAP_ADDRESS_GET(v) << 4);
1368 t4_write_reg(adap, MA_INT_CAUSE, status);
1373 * SMB interrupt handler.
1375 static void smb_intr_handler(struct adapter *adap)
1377 static struct intr_info smb_intr_info[] = {
1378 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1379 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1380 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1384 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1389 * NC-SI interrupt handler.
1391 static void ncsi_intr_handler(struct adapter *adap)
1393 static struct intr_info ncsi_intr_info[] = {
1394 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1395 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1396 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1397 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1401 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1406 * XGMAC interrupt handler.
1408 static void xgmac_intr_handler(struct adapter *adap, int port)
1410 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1412 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1416 if (v & TXFIFO_PRTY_ERR)
1417 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1419 if (v & RXFIFO_PRTY_ERR)
1420 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1422 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1427 * PL interrupt handler.
1429 static void pl_intr_handler(struct adapter *adap)
1431 static struct intr_info pl_intr_info[] = {
1432 { FATALPERR, "T4 fatal parity error", -1, 1 },
1433 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1437 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1441 #define PF_INTR_MASK (PFSW | PFCIM)
1442 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1443 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1444 CPL_SWITCH | SGE | ULP_TX)
1447 * t4_slow_intr_handler - control path interrupt handler
1448 * @adapter: the adapter
1450 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1451 * The designation 'slow' is because it involves register reads, while
1452 * data interrupts typically don't involve any MMIOs.
1454 int t4_slow_intr_handler(struct adapter *adapter)
1456 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1458 if (!(cause & GLBL_INTR_MASK))
1461 cim_intr_handler(adapter);
1463 mps_intr_handler(adapter);
1465 ncsi_intr_handler(adapter);
1467 pl_intr_handler(adapter);
1469 smb_intr_handler(adapter);
1471 xgmac_intr_handler(adapter, 0);
1473 xgmac_intr_handler(adapter, 1);
1474 if (cause & XGMAC_KR0)
1475 xgmac_intr_handler(adapter, 2);
1476 if (cause & XGMAC_KR1)
1477 xgmac_intr_handler(adapter, 3);
1479 pcie_intr_handler(adapter);
1481 mem_intr_handler(adapter, MEM_MC);
1483 mem_intr_handler(adapter, MEM_EDC0);
1485 mem_intr_handler(adapter, MEM_EDC1);
1487 le_intr_handler(adapter);
1489 tp_intr_handler(adapter);
1491 ma_intr_handler(adapter);
1493 pmtx_intr_handler(adapter);
1495 pmrx_intr_handler(adapter);
1497 ulprx_intr_handler(adapter);
1498 if (cause & CPL_SWITCH)
1499 cplsw_intr_handler(adapter);
1501 sge_intr_handler(adapter);
1503 ulptx_intr_handler(adapter);
1505 /* Clear the interrupts just processed for which we are the master. */
1506 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1507 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1512 * t4_intr_enable - enable interrupts
1513 * @adapter: the adapter whose interrupts should be enabled
1515 * Enable PF-specific interrupts for the calling function and the top-level
1516 * interrupt concentrator for global interrupts. Interrupts are already
1517 * enabled at each module, here we just enable the roots of the interrupt
1520 * Note: this function should be called only when the driver manages
1521 * non PF-specific interrupts from the various HW modules. Only one PCI
1522 * function at a time should be doing this.
1524 void t4_intr_enable(struct adapter *adapter)
1526 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1528 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1529 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1530 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1531 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1532 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1533 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1534 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1536 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1537 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1541 * t4_intr_disable - disable interrupts
1542 * @adapter: the adapter whose interrupts should be disabled
1544 * Disable interrupts. We only disable the top-level interrupt
1545 * concentrators. The caller must be a PCI function managing global
1548 void t4_intr_disable(struct adapter *adapter)
1550 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1552 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1553 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1557 * t4_intr_clear - clear all interrupts
1558 * @adapter: the adapter whose interrupts should be cleared
1560 * Clears all interrupts. The caller must be a PCI function managing
1561 * global interrupts.
1563 void t4_intr_clear(struct adapter *adapter)
1565 static const unsigned int cause_reg[] = {
1566 SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3,
1567 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1568 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1569 PCIE_NONFAT_ERR, PCIE_INT_CAUSE,
1571 MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE,
1572 EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1),
1573 CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE,
1574 MYPF_REG(CIM_PF_HOST_INT_CAUSE),
1576 ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE,
1577 PM_RX_INT_CAUSE, PM_TX_INT_CAUSE,
1578 MPS_RX_PERR_INT_CAUSE,
1580 MYPF_REG(PL_PF_INT_CAUSE),
1587 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
1588 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
1590 t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK);
1591 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1595 * hash_mac_addr - return the hash value of a MAC address
1596 * @addr: the 48-bit Ethernet MAC address
1598 * Hashes a MAC address according to the hash function used by HW inexact
1599 * (hash) address matching.
1601 static int hash_mac_addr(const u8 *addr)
1603 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1604 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1612 * t4_config_rss_range - configure a portion of the RSS mapping table
1613 * @adapter: the adapter
1614 * @mbox: mbox to use for the FW command
1615 * @viid: virtual interface whose RSS subtable is to be written
1616 * @start: start entry in the table to write
1617 * @n: how many table entries to write
1618 * @rspq: values for the response queue lookup table
1619 * @nrspq: number of values in @rspq
1621 * Programs the selected part of the VI's RSS mapping table with the
1622 * provided values. If @nrspq < @n the supplied values are used repeatedly
1623 * until the full table range is populated.
1625 * The caller must ensure the values in @rspq are in the range allowed for
1628 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1629 int start, int n, const u16 *rspq, unsigned int nrspq)
1632 const u16 *rsp = rspq;
1633 const u16 *rsp_end = rspq + nrspq;
1634 struct fw_rss_ind_tbl_cmd cmd;
1636 memset(&cmd, 0, sizeof(cmd));
1637 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1638 FW_CMD_REQUEST | FW_CMD_WRITE |
1639 FW_RSS_IND_TBL_CMD_VIID(viid));
1640 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1642 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1644 int nq = min(n, 32);
1645 __be32 *qp = &cmd.iq0_to_iq2;
1647 cmd.niqid = htons(nq);
1648 cmd.startidx = htons(start);
1656 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1657 if (++rsp >= rsp_end)
1659 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1660 if (++rsp >= rsp_end)
1662 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1663 if (++rsp >= rsp_end)
1670 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1678 * t4_config_glbl_rss - configure the global RSS mode
1679 * @adapter: the adapter
1680 * @mbox: mbox to use for the FW command
1681 * @mode: global RSS mode
1682 * @flags: mode-specific flags
1684 * Sets the global RSS mode.
1686 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1689 struct fw_rss_glb_config_cmd c;
1691 memset(&c, 0, sizeof(c));
1692 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1693 FW_CMD_REQUEST | FW_CMD_WRITE);
1694 c.retval_len16 = htonl(FW_LEN16(c));
1695 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1696 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1697 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1698 c.u.basicvirtual.mode_pkd =
1699 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1700 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1703 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1706 /* Read an RSS table row */
1707 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
1709 t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row);
1710 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1,
1715 * t4_read_rss - read the contents of the RSS mapping table
1716 * @adapter: the adapter
1717 * @map: holds the contents of the RSS mapping table
1719 * Reads the contents of the RSS hash->queue mapping table.
1721 int t4_read_rss(struct adapter *adapter, u16 *map)
1726 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
1727 ret = rd_rss_row(adapter, i, &val);
1730 *map++ = LKPTBLQUEUE0_GET(val);
1731 *map++ = LKPTBLQUEUE1_GET(val);
1737 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1738 * @adap: the adapter
1739 * @v4: holds the TCP/IP counter values
1740 * @v6: holds the TCP/IPv6 counter values
1742 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1743 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1745 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1746 struct tp_tcp_stats *v6)
1748 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1750 #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1751 #define STAT(x) val[STAT_IDX(x)]
1752 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1755 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1756 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1757 v4->tcpOutRsts = STAT(OUT_RST);
1758 v4->tcpInSegs = STAT64(IN_SEG);
1759 v4->tcpOutSegs = STAT64(OUT_SEG);
1760 v4->tcpRetransSegs = STAT64(RXT_SEG);
1763 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1764 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1765 v6->tcpOutRsts = STAT(OUT_RST);
1766 v6->tcpInSegs = STAT64(IN_SEG);
1767 v6->tcpOutSegs = STAT64(OUT_SEG);
1768 v6->tcpRetransSegs = STAT64(RXT_SEG);
1776 * t4_tp_get_err_stats - read TP's error MIB counters
1777 * @adap: the adapter
1778 * @st: holds the counter values
1780 * Returns the values of TP's error counters.
1782 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
1784 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs,
1785 12, TP_MIB_MAC_IN_ERR_0);
1786 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops,
1787 8, TP_MIB_TNL_CNG_DROP_0);
1788 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops,
1789 4, TP_MIB_TNL_DROP_0);
1790 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops,
1791 4, TP_MIB_OFD_VLN_DROP_0);
1792 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs,
1793 4, TP_MIB_TCP_V6IN_ERR_0);
1794 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh,
1795 2, TP_MIB_OFD_ARP_DROP);
1799 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1800 * @adap: the adapter
1801 * @mtus: where to store the MTU values
1802 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1804 * Reads the HW path MTU table.
1806 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1811 for (i = 0; i < NMTUS; ++i) {
1812 t4_write_reg(adap, TP_MTU_TABLE,
1813 MTUINDEX(0xff) | MTUVALUE(i));
1814 v = t4_read_reg(adap, TP_MTU_TABLE);
1815 mtus[i] = MTUVALUE_GET(v);
1817 mtu_log[i] = MTUWIDTH_GET(v);
1822 * init_cong_ctrl - initialize congestion control parameters
1823 * @a: the alpha values for congestion control
1824 * @b: the beta values for congestion control
1826 * Initialize the congestion control parameters.
1828 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1830 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1855 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
1858 b[13] = b[14] = b[15] = b[16] = 3;
1859 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
1860 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
1865 /* The minimum additive increment value for the congestion control table */
1866 #define CC_MIN_INCR 2U
1869 * t4_load_mtus - write the MTU and congestion control HW tables
1870 * @adap: the adapter
1871 * @mtus: the values for the MTU table
1872 * @alpha: the values for the congestion control alpha parameter
1873 * @beta: the values for the congestion control beta parameter
1875 * Write the HW MTU table with the supplied MTUs and the high-speed
1876 * congestion control table with the supplied alpha, beta, and MTUs.
1877 * We write the two tables together because the additive increments
1878 * depend on the MTUs.
1880 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1881 const unsigned short *alpha, const unsigned short *beta)
1883 static const unsigned int avg_pkts[NCCTRL_WIN] = {
1884 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
1885 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
1886 28672, 40960, 57344, 81920, 114688, 163840, 229376
1891 for (i = 0; i < NMTUS; ++i) {
1892 unsigned int mtu = mtus[i];
1893 unsigned int log2 = fls(mtu);
1895 if (!(mtu & ((1 << log2) >> 2))) /* round */
1897 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
1898 MTUWIDTH(log2) | MTUVALUE(mtu));
1900 for (w = 0; w < NCCTRL_WIN; ++w) {
1903 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
1906 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
1907 (w << 16) | (beta[w] << 13) | inc);
1913 * t4_set_trace_filter - configure one of the tracing filters
1914 * @adap: the adapter
1915 * @tp: the desired trace filter parameters
1916 * @idx: which filter to configure
1917 * @enable: whether to enable or disable the filter
1919 * Configures one of the tracing filters available in HW. If @enable is
1920 * %0 @tp is not examined and may be %NULL.
1922 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
1923 int idx, int enable)
1925 int i, ofst = idx * 4;
1926 u32 data_reg, mask_reg, cfg;
1927 u32 multitrc = TRCMULTIFILTER;
1930 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1934 if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f ||
1935 tp->skip_ofst > 0x1f || tp->min_len > 0x1ff ||
1936 tp->snap_len > 9600 || (idx && tp->snap_len > 256))
1939 if (tp->snap_len > 256) { /* must be tracer 0 */
1940 if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) |
1941 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) |
1942 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN)
1943 return -EINVAL; /* other tracers are enabled */
1946 i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B);
1947 if (TFCAPTUREMAX_GET(i) > 256 &&
1948 (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN))
1952 /* stop the tracer we'll be changing */
1953 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1955 /* disable tracing globally if running in the wrong single/multi mode */
1956 cfg = t4_read_reg(adap, MPS_TRC_CFG);
1957 if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) {
1958 t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN);
1959 t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1961 if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY))
1965 * At this point either the tracing is enabled and in the right mode or
1969 idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH);
1970 data_reg = MPS_TRC_FILTER0_MATCH + idx;
1971 mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx;
1973 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
1974 t4_write_reg(adap, data_reg, tp->data[i]);
1975 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
1977 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst,
1978 TFCAPTUREMAX(tp->snap_len) |
1979 TFMINPKTSIZE(tp->min_len));
1980 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst,
1981 TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) |
1982 TFPORT(tp->port) | TFEN |
1983 (tp->invert ? TFINVERTMATCH : 0));
1985 cfg &= ~TRCMULTIFILTER;
1986 t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc);
1987 out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1992 * t4_get_trace_filter - query one of the tracing filters
1993 * @adap: the adapter
1994 * @tp: the current trace filter parameters
1995 * @idx: which trace filter to query
1996 * @enabled: non-zero if the filter is enabled
1998 * Returns the current settings of one of the HW tracing filters.
2000 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
2004 int i, ofst = idx * 4;
2005 u32 data_reg, mask_reg;
2007 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst);
2008 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst);
2010 *enabled = !!(ctla & TFEN);
2011 tp->snap_len = TFCAPTUREMAX_GET(ctlb);
2012 tp->min_len = TFMINPKTSIZE_GET(ctlb);
2013 tp->skip_ofst = TFOFFSET_GET(ctla);
2014 tp->skip_len = TFLENGTH_GET(ctla);
2015 tp->invert = !!(ctla & TFINVERTMATCH);
2016 tp->port = TFPORT_GET(ctla);
2018 ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx;
2019 data_reg = MPS_TRC_FILTER0_MATCH + ofst;
2020 mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst;
2022 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
2023 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
2024 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
2029 * get_mps_bg_map - return the buffer groups associated with a port
2030 * @adap: the adapter
2031 * @idx: the port index
2033 * Returns a bitmap indicating which MPS buffer groups are associated
2034 * with the given port. Bit i is set if buffer group i is used by the
2037 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2039 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2042 return idx == 0 ? 0xf : 0;
2044 return idx < 2 ? (3 << (2 * idx)) : 0;
2049 * t4_get_port_stats - collect port statistics
2050 * @adap: the adapter
2051 * @idx: the port index
2052 * @p: the stats structure to fill
2054 * Collect statistics related to the given port from HW.
2056 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2058 u32 bgmap = get_mps_bg_map(adap, idx);
2060 #define GET_STAT(name) \
2061 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
2062 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2064 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2065 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2066 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2067 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2068 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2069 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2070 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2071 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2072 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2073 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2074 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2075 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2076 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2077 p->tx_drop = GET_STAT(TX_PORT_DROP);
2078 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2079 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2080 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2081 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2082 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2083 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2084 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2085 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2086 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2088 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2089 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2090 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2091 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2092 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2093 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2094 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2095 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2096 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2097 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2098 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2099 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2100 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2101 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2102 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2103 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2104 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2105 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2106 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2107 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2108 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2109 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2110 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2111 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2112 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2113 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2114 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2116 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2117 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2118 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2119 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2120 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2121 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2122 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2123 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2130 * t4_get_lb_stats - collect loopback port statistics
2131 * @adap: the adapter
2132 * @idx: the loopback port index
2133 * @p: the stats structure to fill
2135 * Return HW statistics for the given loopback port.
2137 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
2139 u32 bgmap = get_mps_bg_map(adap, idx);
2141 #define GET_STAT(name) \
2142 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L))
2143 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2145 p->octets = GET_STAT(BYTES);
2146 p->frames = GET_STAT(FRAMES);
2147 p->bcast_frames = GET_STAT(BCAST);
2148 p->mcast_frames = GET_STAT(MCAST);
2149 p->ucast_frames = GET_STAT(UCAST);
2150 p->error_frames = GET_STAT(ERROR);
2152 p->frames_64 = GET_STAT(64B);
2153 p->frames_65_127 = GET_STAT(65B_127B);
2154 p->frames_128_255 = GET_STAT(128B_255B);
2155 p->frames_256_511 = GET_STAT(256B_511B);
2156 p->frames_512_1023 = GET_STAT(512B_1023B);
2157 p->frames_1024_1518 = GET_STAT(1024B_1518B);
2158 p->frames_1519_max = GET_STAT(1519B_MAX);
2159 p->drop = t4_read_reg(adap, PORT_REG(idx,
2160 MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
2162 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
2163 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
2164 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
2165 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
2166 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
2167 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
2168 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
2169 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
2176 * t4_wol_magic_enable - enable/disable magic packet WoL
2177 * @adap: the adapter
2178 * @port: the physical port index
2179 * @addr: MAC address expected in magic packets, %NULL to disable
2181 * Enables/disables magic packet wake-on-LAN for the selected port.
2183 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2187 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
2188 (addr[2] << 24) | (addr[3] << 16) |
2189 (addr[4] << 8) | addr[5]);
2190 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
2191 (addr[0] << 8) | addr[1]);
2193 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
2194 addr ? MAGICEN : 0);
2198 * t4_wol_pat_enable - enable/disable pattern-based WoL
2199 * @adap: the adapter
2200 * @port: the physical port index
2201 * @map: bitmap of which HW pattern filters to set
2202 * @mask0: byte mask for bytes 0-63 of a packet
2203 * @mask1: byte mask for bytes 64-127 of a packet
2204 * @crc: Ethernet CRC for selected bytes
2205 * @enable: enable/disable switch
2207 * Sets the pattern filters indicated in @map to mask out the bytes
2208 * specified in @mask0/@mask1 in received packets and compare the CRC of
2209 * the resulting packet against @crc. If @enable is %true pattern-based
2210 * WoL is enabled, otherwise disabled.
2212 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2213 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2218 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
2225 #define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
2227 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2228 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2229 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2231 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2235 /* write byte masks */
2236 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2237 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2238 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2239 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2243 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2244 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2245 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2246 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2251 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2255 #define INIT_CMD(var, cmd, rd_wr) do { \
2256 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2257 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2258 (var).retval_len16 = htonl(FW_LEN16(var)); \
2262 * t4_mdio_rd - read a PHY register through MDIO
2263 * @adap: the adapter
2264 * @mbox: mailbox to use for the FW command
2265 * @phy_addr: the PHY address
2266 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2267 * @reg: the register to read
2268 * @valp: where to store the value
2270 * Issues a FW command through the given mailbox to read a PHY register.
2272 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2273 unsigned int mmd, unsigned int reg, u16 *valp)
2276 struct fw_ldst_cmd c;
2278 memset(&c, 0, sizeof(c));
2279 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2280 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2281 c.cycles_to_len16 = htonl(FW_LEN16(c));
2282 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2283 FW_LDST_CMD_MMD(mmd));
2284 c.u.mdio.raddr = htons(reg);
2286 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2288 *valp = ntohs(c.u.mdio.rval);
2293 * t4_mdio_wr - write a PHY register through MDIO
2294 * @adap: the adapter
2295 * @mbox: mailbox to use for the FW command
2296 * @phy_addr: the PHY address
2297 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2298 * @reg: the register to write
2299 * @valp: value to write
2301 * Issues a FW command through the given mailbox to write a PHY register.
2303 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2304 unsigned int mmd, unsigned int reg, u16 val)
2306 struct fw_ldst_cmd c;
2308 memset(&c, 0, sizeof(c));
2309 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2310 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2311 c.cycles_to_len16 = htonl(FW_LEN16(c));
2312 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2313 FW_LDST_CMD_MMD(mmd));
2314 c.u.mdio.raddr = htons(reg);
2315 c.u.mdio.rval = htons(val);
2317 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2321 * t4_fw_hello - establish communication with FW
2322 * @adap: the adapter
2323 * @mbox: mailbox to use for the FW command
2324 * @evt_mbox: mailbox to receive async FW events
2325 * @master: specifies the caller's willingness to be the device master
2326 * @state: returns the current device state
2328 * Issues a command to establish communication with FW.
2330 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2331 enum dev_master master, enum dev_state *state)
2334 struct fw_hello_cmd c;
2336 INIT_CMD(c, HELLO, WRITE);
2337 c.err_to_mbasyncnot = htonl(
2338 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2339 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2340 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
2341 FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
2343 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2344 if (ret == 0 && state) {
2345 u32 v = ntohl(c.err_to_mbasyncnot);
2346 if (v & FW_HELLO_CMD_INIT)
2347 *state = DEV_STATE_INIT;
2348 else if (v & FW_HELLO_CMD_ERR)
2349 *state = DEV_STATE_ERR;
2351 *state = DEV_STATE_UNINIT;
2357 * t4_fw_bye - end communication with FW
2358 * @adap: the adapter
2359 * @mbox: mailbox to use for the FW command
2361 * Issues a command to terminate communication with FW.
2363 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2365 struct fw_bye_cmd c;
2367 INIT_CMD(c, BYE, WRITE);
2368 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2372 * t4_init_cmd - ask FW to initialize the device
2373 * @adap: the adapter
2374 * @mbox: mailbox to use for the FW command
2376 * Issues a command to FW to partially initialize the device. This
2377 * performs initialization that generally doesn't depend on user input.
2379 int t4_early_init(struct adapter *adap, unsigned int mbox)
2381 struct fw_initialize_cmd c;
2383 INIT_CMD(c, INITIALIZE, WRITE);
2384 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2388 * t4_fw_reset - issue a reset to FW
2389 * @adap: the adapter
2390 * @mbox: mailbox to use for the FW command
2391 * @reset: specifies the type of reset to perform
2393 * Issues a reset command of the specified type to FW.
2395 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2397 struct fw_reset_cmd c;
2399 INIT_CMD(c, RESET, WRITE);
2400 c.val = htonl(reset);
2401 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2405 * t4_query_params - query FW or device parameters
2406 * @adap: the adapter
2407 * @mbox: mailbox to use for the FW command
2410 * @nparams: the number of parameters
2411 * @params: the parameter names
2412 * @val: the parameter values
2414 * Reads the value of FW or device parameters. Up to 7 parameters can be
2417 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2418 unsigned int vf, unsigned int nparams, const u32 *params,
2422 struct fw_params_cmd c;
2423 __be32 *p = &c.param[0].mnem;
2428 memset(&c, 0, sizeof(c));
2429 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2430 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2431 FW_PARAMS_CMD_VFN(vf));
2432 c.retval_len16 = htonl(FW_LEN16(c));
2433 for (i = 0; i < nparams; i++, p += 2)
2434 *p = htonl(*params++);
2436 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2438 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2444 * t4_set_params - sets FW or device parameters
2445 * @adap: the adapter
2446 * @mbox: mailbox to use for the FW command
2449 * @nparams: the number of parameters
2450 * @params: the parameter names
2451 * @val: the parameter values
2453 * Sets the value of FW or device parameters. Up to 7 parameters can be
2454 * specified at once.
2456 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2457 unsigned int vf, unsigned int nparams, const u32 *params,
2460 struct fw_params_cmd c;
2461 __be32 *p = &c.param[0].mnem;
2466 memset(&c, 0, sizeof(c));
2467 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2468 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2469 FW_PARAMS_CMD_VFN(vf));
2470 c.retval_len16 = htonl(FW_LEN16(c));
2472 *p++ = htonl(*params++);
2473 *p++ = htonl(*val++);
2476 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2480 * t4_cfg_pfvf - configure PF/VF resource limits
2481 * @adap: the adapter
2482 * @mbox: mailbox to use for the FW command
2483 * @pf: the PF being configured
2484 * @vf: the VF being configured
2485 * @txq: the max number of egress queues
2486 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2487 * @rxqi: the max number of interrupt-capable ingress queues
2488 * @rxq: the max number of interruptless ingress queues
2489 * @tc: the PCI traffic class
2490 * @vi: the max number of virtual interfaces
2491 * @cmask: the channel access rights mask for the PF/VF
2492 * @pmask: the port access rights mask for the PF/VF
2493 * @nexact: the maximum number of exact MPS filters
2494 * @rcaps: read capabilities
2495 * @wxcaps: write/execute capabilities
2497 * Configures resource limits and capabilities for a physical or virtual
2500 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2501 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2502 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2503 unsigned int vi, unsigned int cmask, unsigned int pmask,
2504 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2506 struct fw_pfvf_cmd c;
2508 memset(&c, 0, sizeof(c));
2509 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2510 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2511 FW_PFVF_CMD_VFN(vf));
2512 c.retval_len16 = htonl(FW_LEN16(c));
2513 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2514 FW_PFVF_CMD_NIQ(rxq));
2515 c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
2516 FW_PFVF_CMD_PMASK(pmask) |
2517 FW_PFVF_CMD_NEQ(txq));
2518 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2519 FW_PFVF_CMD_NEXACTF(nexact));
2520 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2521 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2522 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2523 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2527 * t4_alloc_vi - allocate a virtual interface
2528 * @adap: the adapter
2529 * @mbox: mailbox to use for the FW command
2530 * @port: physical port associated with the VI
2531 * @pf: the PF owning the VI
2532 * @vf: the VF owning the VI
2533 * @nmac: number of MAC addresses needed (1 to 5)
2534 * @mac: the MAC addresses of the VI
2535 * @rss_size: size of RSS table slice associated with this VI
2537 * Allocates a virtual interface for the given physical port. If @mac is
2538 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2539 * @mac should be large enough to hold @nmac Ethernet addresses, they are
2540 * stored consecutively so the space needed is @nmac * 6 bytes.
2541 * Returns a negative error number or the non-negative VI id.
2543 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2544 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2545 unsigned int *rss_size)
2550 memset(&c, 0, sizeof(c));
2551 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2552 FW_CMD_WRITE | FW_CMD_EXEC |
2553 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2554 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2555 c.portid_pkd = FW_VI_CMD_PORTID(port);
2558 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2563 memcpy(mac, c.mac, sizeof(c.mac));
2566 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2568 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2570 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2572 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2576 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
2577 return ntohs(c.viid_pkd);
2581 * t4_free_vi - free a virtual interface
2582 * @adap: the adapter
2583 * @mbox: mailbox to use for the FW command
2584 * @pf: the PF owning the VI
2585 * @vf: the VF owning the VI
2586 * @viid: virtual interface identifiler
2588 * Free a previously allocated virtual interface.
2590 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
2591 unsigned int vf, unsigned int viid)
2595 memset(&c, 0, sizeof(c));
2596 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2597 FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
2599 c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
2600 c.viid_pkd = htons(FW_VI_CMD_VIID(viid));
2601 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2605 * t4_set_rxmode - set Rx properties of a virtual interface
2606 * @adap: the adapter
2607 * @mbox: mailbox to use for the FW command
2609 * @mtu: the new MTU or -1
2610 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2611 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2612 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
2613 * @sleep_ok: if true we may sleep while awaiting command completion
2615 * Sets Rx properties of a virtual interface.
2617 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2618 int mtu, int promisc, int all_multi, int bcast, bool sleep_ok)
2620 struct fw_vi_rxmode_cmd c;
2622 /* convert to FW values */
2624 mtu = FW_RXMODE_MTU_NO_CHG;
2626 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2628 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2630 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
2632 memset(&c, 0, sizeof(c));
2633 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2634 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2635 c.retval_len16 = htonl(FW_LEN16(c));
2636 c.mtu_to_broadcasten = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2637 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2638 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2639 FW_VI_RXMODE_CMD_BROADCASTEN(bcast));
2640 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2644 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2645 * @adap: the adapter
2646 * @mbox: mailbox to use for the FW command
2648 * @free: if true any existing filters for this VI id are first removed
2649 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
2650 * @addr: the MAC address(es)
2651 * @idx: where to store the index of each allocated filter
2652 * @hash: pointer to hash address filter bitmap
2653 * @sleep_ok: call is allowed to sleep
2655 * Allocates an exact-match filter for each of the supplied addresses and
2656 * sets it to the corresponding address. If @idx is not %NULL it should
2657 * have at least @naddr entries, each of which will be set to the index of
2658 * the filter allocated for the corresponding MAC address. If a filter
2659 * could not be allocated for an address its index is set to 0xffff.
2660 * If @hash is not %NULL addresses that fail to allocate an exact filter
2661 * are hashed and update the hash filter bitmap pointed at by @hash.
2663 * Returns a negative error number or the number of filters allocated.
2665 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2666 unsigned int viid, bool free, unsigned int naddr,
2667 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2670 struct fw_vi_mac_cmd c;
2671 struct fw_vi_mac_exact *p;
2676 memset(&c, 0, sizeof(c));
2677 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2678 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2679 FW_VI_MAC_CMD_VIID(viid));
2680 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2681 FW_CMD_LEN16((naddr + 2) / 2));
2683 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2684 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2685 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2686 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2689 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2693 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2694 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2697 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2698 if (index < NEXACT_MAC)
2701 *hash |= (1 << hash_mac_addr(addr[i]));
2707 * t4_change_mac - modifies the exact-match filter for a MAC address
2708 * @adap: the adapter
2709 * @mbox: mailbox to use for the FW command
2711 * @idx: index of existing filter for old value of MAC address, or -1
2712 * @addr: the new MAC address value
2713 * @persist: whether a new MAC allocation should be persistent
2714 * @add_smt: if true also add the address to the HW SMT
2716 * Modifies an exact-match filter and sets it to the new MAC address.
2717 * Note that in general it is not possible to modify the value of a given
2718 * filter so the generic way to modify an address filter is to free the one
2719 * being used by the old address value and allocate a new filter for the
2720 * new address value. @idx can be -1 if the address is a new addition.
2722 * Returns a negative error number or the index of the filter with the new
2725 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2726 int idx, const u8 *addr, bool persist, bool add_smt)
2729 struct fw_vi_mac_cmd c;
2730 struct fw_vi_mac_exact *p = c.u.exact;
2732 if (idx < 0) /* new allocation */
2733 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2734 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2736 memset(&c, 0, sizeof(c));
2737 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2738 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
2739 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
2740 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2741 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2742 FW_VI_MAC_CMD_IDX(idx));
2743 memcpy(p->macaddr, addr, sizeof(p->macaddr));
2745 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2747 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2748 if (ret >= NEXACT_MAC)
2755 * t4_set_addr_hash - program the MAC inexact-match hash filter
2756 * @adap: the adapter
2757 * @mbox: mailbox to use for the FW command
2759 * @ucast: whether the hash filter should also match unicast addresses
2760 * @vec: the value to be written to the hash filter
2761 * @sleep_ok: call is allowed to sleep
2763 * Sets the 64-bit inexact-match hash filter for a virtual interface.
2765 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2766 bool ucast, u64 vec, bool sleep_ok)
2768 struct fw_vi_mac_cmd c;
2770 memset(&c, 0, sizeof(c));
2771 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2772 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
2773 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
2774 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
2776 c.u.hash.hashvec = cpu_to_be64(vec);
2777 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2781 * t4_enable_vi - enable/disable a virtual interface
2782 * @adap: the adapter
2783 * @mbox: mailbox to use for the FW command
2785 * @rx_en: 1=enable Rx, 0=disable Rx
2786 * @tx_en: 1=enable Tx, 0=disable Tx
2788 * Enables/disables a virtual interface.
2790 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2791 bool rx_en, bool tx_en)
2793 struct fw_vi_enable_cmd c;
2795 memset(&c, 0, sizeof(c));
2796 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2797 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2798 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
2799 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
2800 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2804 * t4_identify_port - identify a VI's port by blinking its LED
2805 * @adap: the adapter
2806 * @mbox: mailbox to use for the FW command
2808 * @nblinks: how many times to blink LED at 2.5 Hz
2810 * Identifies a VI's port by blinking its LED.
2812 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2813 unsigned int nblinks)
2815 struct fw_vi_enable_cmd c;
2817 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2818 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2819 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
2820 c.blinkdur = htons(nblinks);
2821 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2825 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
2826 * @adap: the adapter
2827 * @mbox: mailbox to use for the FW command
2828 * @start: %true to enable the queues, %false to disable them
2829 * @pf: the PF owning the queues
2830 * @vf: the VF owning the queues
2831 * @iqid: ingress queue id
2832 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2833 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2835 * Starts or stops an ingress queue and its associated FLs, if any.
2837 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
2838 unsigned int pf, unsigned int vf, unsigned int iqid,
2839 unsigned int fl0id, unsigned int fl1id)
2843 memset(&c, 0, sizeof(c));
2844 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2845 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2847 c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) |
2848 FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
2849 c.iqid = htons(iqid);
2850 c.fl0id = htons(fl0id);
2851 c.fl1id = htons(fl1id);
2852 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2856 * t4_iq_free - free an ingress queue and its FLs
2857 * @adap: the adapter
2858 * @mbox: mailbox to use for the FW command
2859 * @pf: the PF owning the queues
2860 * @vf: the VF owning the queues
2861 * @iqtype: the ingress queue type
2862 * @iqid: ingress queue id
2863 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2864 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2866 * Frees an ingress queue and its associated FLs, if any.
2868 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2869 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2870 unsigned int fl0id, unsigned int fl1id)
2874 memset(&c, 0, sizeof(c));
2875 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2876 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2878 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
2879 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
2880 c.iqid = htons(iqid);
2881 c.fl0id = htons(fl0id);
2882 c.fl1id = htons(fl1id);
2883 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2887 * t4_eth_eq_free - free an Ethernet egress queue
2888 * @adap: the adapter
2889 * @mbox: mailbox to use for the FW command
2890 * @pf: the PF owning the queue
2891 * @vf: the VF owning the queue
2892 * @eqid: egress queue id
2894 * Frees an Ethernet egress queue.
2896 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2897 unsigned int vf, unsigned int eqid)
2899 struct fw_eq_eth_cmd c;
2901 memset(&c, 0, sizeof(c));
2902 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2903 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
2904 FW_EQ_ETH_CMD_VFN(vf));
2905 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2906 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
2907 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2911 * t4_ctrl_eq_free - free a control egress queue
2912 * @adap: the adapter
2913 * @mbox: mailbox to use for the FW command
2914 * @pf: the PF owning the queue
2915 * @vf: the VF owning the queue
2916 * @eqid: egress queue id
2918 * Frees a control egress queue.
2920 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2921 unsigned int vf, unsigned int eqid)
2923 struct fw_eq_ctrl_cmd c;
2925 memset(&c, 0, sizeof(c));
2926 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2927 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
2928 FW_EQ_CTRL_CMD_VFN(vf));
2929 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
2930 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
2931 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2935 * t4_ofld_eq_free - free an offload egress queue
2936 * @adap: the adapter
2937 * @mbox: mailbox to use for the FW command
2938 * @pf: the PF owning the queue
2939 * @vf: the VF owning the queue
2940 * @eqid: egress queue id
2942 * Frees a control egress queue.
2944 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2945 unsigned int vf, unsigned int eqid)
2947 struct fw_eq_ofld_cmd c;
2949 memset(&c, 0, sizeof(c));
2950 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2951 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
2952 FW_EQ_OFLD_CMD_VFN(vf));
2953 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
2954 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
2955 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2959 * t4_handle_fw_rpl - process a FW reply message
2960 * @adap: the adapter
2961 * @rpl: start of the FW message
2963 * Processes a FW message, such as link state change messages.
2965 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2967 u8 opcode = *(const u8 *)rpl;
2969 if (opcode == FW_PORT_CMD) { /* link/module state change message */
2970 int speed = 0, fc = 0;
2971 const struct fw_port_cmd *p = (void *)rpl;
2972 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
2973 int port = adap->chan_map[chan];
2974 struct port_info *pi = adap2pinfo(adap, port);
2975 struct link_config *lc = &pi->link_cfg;
2976 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2977 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
2978 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
2980 if (stat & FW_PORT_CMD_RXPAUSE)
2982 if (stat & FW_PORT_CMD_TXPAUSE)
2984 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
2986 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
2988 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
2989 speed = SPEED_10000;
2991 if (link_ok != lc->link_ok || speed != lc->speed ||
2992 fc != lc->fc) { /* something changed */
2993 lc->link_ok = link_ok;
2996 t4_os_link_changed(adap, port, link_ok);
2998 if (mod != pi->mod_type) {
3000 t4_os_portmod_changed(adap, port);
3006 static void __devinit get_pci_mode(struct adapter *adapter,
3007 struct pci_params *p)
3010 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
3013 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3015 p->speed = val & PCI_EXP_LNKSTA_CLS;
3016 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3021 * init_link_config - initialize a link's SW state
3022 * @lc: structure holding the link state
3023 * @caps: link capabilities
3025 * Initializes the SW state maintained for each link, including the link's
3026 * capabilities and default speed/flow-control/autonegotiation settings.
3028 static void __devinit init_link_config(struct link_config *lc,
3031 lc->supported = caps;
3032 lc->requested_speed = 0;
3034 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3035 if (lc->supported & FW_PORT_CAP_ANEG) {
3036 lc->advertising = lc->supported & ADVERT_MASK;
3037 lc->autoneg = AUTONEG_ENABLE;
3038 lc->requested_fc |= PAUSE_AUTONEG;
3040 lc->advertising = 0;
3041 lc->autoneg = AUTONEG_DISABLE;
3045 static int __devinit wait_dev_ready(struct adapter *adap)
3047 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3050 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3054 * t4_prep_adapter - prepare SW and HW for operation
3055 * @adapter: the adapter
3056 * @reset: if true perform a HW reset
3058 * Initialize adapter SW state for the various HW modules, set initial
3059 * values for some adapter tunables, take PHYs out of reset, and
3060 * initialize the MDIO interface.
3062 int __devinit t4_prep_adapter(struct adapter *adapter)
3066 ret = wait_dev_ready(adapter);
3070 get_pci_mode(adapter, &adapter->params.pci);
3071 adapter->params.rev = t4_read_reg(adapter, PL_REV);
3073 ret = get_vpd_params(adapter, &adapter->params.vpd);
3077 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3080 * Default port for debugging in case we can't reach FW.
3082 adapter->params.nports = 1;
3083 adapter->params.portvec = 1;
3087 int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3091 struct fw_port_cmd c;
3093 memset(&c, 0, sizeof(c));
3095 for_each_port(adap, i) {
3096 unsigned int rss_size;
3097 struct port_info *p = adap2pinfo(adap, i);
3099 while ((adap->params.portvec & (1 << j)) == 0)
3102 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
3103 FW_CMD_REQUEST | FW_CMD_READ |
3104 FW_PORT_CMD_PORTID(j));
3105 c.action_to_len16 = htonl(
3106 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
3108 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3112 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
3119 p->rss_size = rss_size;
3120 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3121 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
3123 ret = ntohl(c.u.info.lstatus_to_modtype);
3124 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
3125 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
3126 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
3127 p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret);
3129 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));