2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/init.h>
36 #include <linux/delay.h>
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
56 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
60 u32 val = t4_read_reg(adapter, reg);
62 if (!!(val & mask) == polarity) {
74 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
88 * Sets a register field specified by the supplied mask to the
91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
101 * t4_read_indirect - read indirectly addressed registers
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
109 * Reads registers that are accessed indirectly through an address/data
112 static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
125 * t4_write_indirect - write indirectly addressed registers
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
136 static void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
148 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
150 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
153 for ( ; nflit; nflit--, mbox_addr += 8)
154 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
158 * Handle a FW assertion reported in a mailbox.
160 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
162 struct fw_debug_cmd asrt;
164 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
165 dev_alert(adap->pdev_dev,
166 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
167 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
168 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
171 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
173 dev_err(adap->pdev_dev,
174 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
175 (unsigned long long)t4_read_reg64(adap, data_reg),
176 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
177 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
178 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
179 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
180 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
181 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
182 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
186 * t4_wr_mbox_meat - send a command to FW through the given mailbox
188 * @mbox: index of the mailbox to use
189 * @cmd: the command to write
190 * @size: command length in bytes
191 * @rpl: where to optionally store the reply
192 * @sleep_ok: if true we may sleep while awaiting command completion
194 * Sends the given command to FW through the selected mailbox and waits
195 * for the FW to execute the command. If @rpl is not %NULL it is used to
196 * store the FW's reply to the command. The command and its optional
197 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
198 * to respond. @sleep_ok determines whether we may sleep while awaiting
199 * the response. If sleeping is allowed we use progressive backoff
202 * The return value is 0 on success or a negative errno on failure. A
203 * failure can happen either because we are not able to execute the
204 * command or FW executes it but signals an error. In the latter case
205 * the return value is the error code indicated by FW (negated).
207 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
208 void *rpl, bool sleep_ok)
210 static int delay[] = {
211 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
216 int i, ms, delay_idx;
217 const __be64 *p = cmd;
218 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
219 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
221 if ((size & 15) || size > MBOX_LEN)
224 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
225 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
226 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
228 if (v != MBOX_OWNER_DRV)
229 return v ? -EBUSY : -ETIMEDOUT;
231 for (i = 0; i < size; i += 8)
232 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
234 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
235 t4_read_reg(adap, ctl_reg); /* flush write */
240 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
242 ms = delay[delay_idx]; /* last element may repeat */
243 if (delay_idx < ARRAY_SIZE(delay) - 1)
249 v = t4_read_reg(adap, ctl_reg);
250 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
251 if (!(v & MBMSGVALID)) {
252 t4_write_reg(adap, ctl_reg, 0);
256 res = t4_read_reg64(adap, data_reg);
257 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
258 fw_asrt(adap, data_reg);
259 res = FW_CMD_RETVAL(EIO);
261 get_mbox_rpl(adap, rpl, size / 8, data_reg);
263 if (FW_CMD_RETVAL_GET((int)res))
264 dump_mbox(adap, mbox, data_reg);
265 t4_write_reg(adap, ctl_reg, 0);
266 return -FW_CMD_RETVAL_GET((int)res);
270 dump_mbox(adap, mbox, data_reg);
271 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
272 *(const u8 *)cmd, mbox);
277 * t4_mc_read - read from MC through backdoor accesses
279 * @addr: address of first byte requested
280 * @data: 64 bytes of data containing the requested address
281 * @ecc: where to store the corresponding 64-bit ECC word
283 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
284 * that covers the requested address @addr. If @parity is not %NULL it
285 * is assigned the 64-bit ECC word for the read data.
287 int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
291 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
293 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
294 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
295 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
296 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
298 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
302 #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
304 for (i = 15; i >= 0; i--)
305 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
307 *ecc = t4_read_reg64(adap, MC_DATA(16));
313 * t4_edc_read - read from EDC through backdoor accesses
315 * @idx: which EDC to access
316 * @addr: address of first byte requested
317 * @data: 64 bytes of data containing the requested address
318 * @ecc: where to store the corresponding 64-bit ECC word
320 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
321 * that covers the requested address @addr. If @parity is not %NULL it
322 * is assigned the 64-bit ECC word for the read data.
324 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
329 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
331 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
332 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
333 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
334 t4_write_reg(adap, EDC_BIST_CMD + idx,
335 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
336 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
340 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
342 for (i = 15; i >= 0; i--)
343 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
345 *ecc = t4_read_reg64(adap, EDC_DATA(16));
351 * Partial EEPROM Vital Product Data structure. Includes only the ID and
362 #define EEPROM_STAT_ADDR 0x7bfc
367 * t4_seeprom_wp - enable/disable EEPROM write protection
368 * @adapter: the adapter
369 * @enable: whether to enable or disable write protection
371 * Enables or disables write protection on the serial EEPROM.
373 int t4_seeprom_wp(struct adapter *adapter, bool enable)
375 unsigned int v = enable ? 0xc : 0;
376 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
377 return ret < 0 ? ret : 0;
381 * get_vpd_params - read VPD parameters from VPD EEPROM
382 * @adapter: adapter to read
383 * @p: where to store the parameters
385 * Reads card parameters stored in VPD EEPROM.
387 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
391 u8 vpd[VPD_LEN], csum;
392 unsigned int vpdr_len;
393 const struct t4_vpd_hdr *v;
395 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
399 v = (const struct t4_vpd_hdr *)vpd;
400 vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag);
401 if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
402 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
406 #define FIND_VPD_KW(var, name) do { \
407 var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \
410 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
413 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
416 FIND_VPD_KW(i, "RV");
417 for (csum = 0; i >= 0; i--)
421 dev_err(adapter->pdev_dev,
422 "corrupted VPD EEPROM, actual csum %u\n", csum);
426 FIND_VPD_KW(ec, "EC");
427 FIND_VPD_KW(sn, "SN");
428 FIND_VPD_KW(v2, "V2");
431 p->cclk = simple_strtoul(vpd + v2, NULL, 10);
432 memcpy(p->id, v->id_data, ID_LEN);
434 memcpy(p->ec, vpd + ec, EC_LEN);
436 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
437 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
442 /* serial flash and firmware constants */
444 SF_ATTEMPTS = 10, /* max retries for SF operations */
446 /* flash command opcodes */
447 SF_PROG_PAGE = 2, /* program page */
448 SF_WR_DISABLE = 4, /* disable writes */
449 SF_RD_STATUS = 5, /* read status register */
450 SF_WR_ENABLE = 6, /* enable writes */
451 SF_RD_DATA_FAST = 0xb, /* read flash */
452 SF_ERASE_SECTOR = 0xd8, /* erase sector */
454 FW_START_SEC = 8, /* first flash sector for FW */
455 FW_END_SEC = 15, /* last flash sector for FW */
456 FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
457 FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
461 * sf1_read - read data from the serial flash
462 * @adapter: the adapter
463 * @byte_cnt: number of bytes to read
464 * @cont: whether another operation will be chained
465 * @lock: whether to lock SF for PL access only
466 * @valp: where to store the read data
468 * Reads up to 4 bytes of data from the serial flash. The location of
469 * the read needs to be specified prior to calling this by issuing the
470 * appropriate commands to the serial flash.
472 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
477 if (!byte_cnt || byte_cnt > 4)
479 if (t4_read_reg(adapter, SF_OP) & BUSY)
481 cont = cont ? SF_CONT : 0;
482 lock = lock ? SF_LOCK : 0;
483 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
484 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
486 *valp = t4_read_reg(adapter, SF_DATA);
491 * sf1_write - write data to the serial flash
492 * @adapter: the adapter
493 * @byte_cnt: number of bytes to write
494 * @cont: whether another operation will be chained
495 * @lock: whether to lock SF for PL access only
496 * @val: value to write
498 * Writes up to 4 bytes of data to the serial flash. The location of
499 * the write needs to be specified prior to calling this by issuing the
500 * appropriate commands to the serial flash.
502 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
505 if (!byte_cnt || byte_cnt > 4)
507 if (t4_read_reg(adapter, SF_OP) & BUSY)
509 cont = cont ? SF_CONT : 0;
510 lock = lock ? SF_LOCK : 0;
511 t4_write_reg(adapter, SF_DATA, val);
512 t4_write_reg(adapter, SF_OP, lock |
513 cont | BYTECNT(byte_cnt - 1) | OP_WR);
514 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
518 * flash_wait_op - wait for a flash operation to complete
519 * @adapter: the adapter
520 * @attempts: max number of polls of the status register
521 * @delay: delay between polls in ms
523 * Wait for a flash operation to complete by polling the status register.
525 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
531 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
532 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
544 * t4_read_flash - read words from serial flash
545 * @adapter: the adapter
546 * @addr: the start address for the read
547 * @nwords: how many 32-bit words to read
548 * @data: where to store the read data
549 * @byte_oriented: whether to store data as bytes or as words
551 * Read the specified number of 32-bit words from the serial flash.
552 * If @byte_oriented is set the read data is stored as a byte array
553 * (i.e., big-endian), otherwise as 32-bit words in the platform's
556 static int t4_read_flash(struct adapter *adapter, unsigned int addr,
557 unsigned int nwords, u32 *data, int byte_oriented)
561 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
564 addr = swab32(addr) | SF_RD_DATA_FAST;
566 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
567 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
570 for ( ; nwords; nwords--, data++) {
571 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
573 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
577 *data = htonl(*data);
583 * t4_write_flash - write up to a page of data to the serial flash
584 * @adapter: the adapter
585 * @addr: the start address to write
586 * @n: length of data to write in bytes
587 * @data: the data to write
589 * Writes up to a page of data (256 bytes) to the serial flash starting
590 * at the given address. All the data must be written to the same page.
592 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
593 unsigned int n, const u8 *data)
597 unsigned int i, c, left, val, offset = addr & 0xff;
599 if (addr >= SF_SIZE || offset + n > SF_PAGE_SIZE)
602 val = swab32(addr) | SF_PROG_PAGE;
604 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
605 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
608 for (left = n; left; left -= c) {
610 for (val = 0, i = 0; i < c; ++i)
611 val = (val << 8) + *data++;
613 ret = sf1_write(adapter, c, c != left, 1, val);
617 ret = flash_wait_op(adapter, 5, 1);
621 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
623 /* Read the page to verify the write succeeded */
624 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
628 if (memcmp(data - n, (u8 *)buf + offset, n)) {
629 dev_err(adapter->pdev_dev,
630 "failed to correctly write the flash page at %#x\n",
637 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
642 * get_fw_version - read the firmware version
643 * @adapter: the adapter
644 * @vers: where to place the version
646 * Reads the FW version from flash.
648 static int get_fw_version(struct adapter *adapter, u32 *vers)
650 return t4_read_flash(adapter,
651 FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1,
656 * get_tp_version - read the TP microcode version
657 * @adapter: the adapter
658 * @vers: where to place the version
660 * Reads the TP microcode version from flash.
662 static int get_tp_version(struct adapter *adapter, u32 *vers)
664 return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr,
670 * t4_check_fw_version - check if the FW is compatible with this driver
671 * @adapter: the adapter
673 * Checks if an adapter's FW is compatible with the driver. Returns 0
674 * if there's exact match, a negative error if the version could not be
675 * read or there's a major version mismatch, and a positive value if the
676 * expected major version is found but there's a minor version mismatch.
678 int t4_check_fw_version(struct adapter *adapter)
681 int ret, major, minor, micro;
683 ret = get_fw_version(adapter, &adapter->params.fw_vers);
685 ret = get_tp_version(adapter, &adapter->params.tp_vers);
687 ret = t4_read_flash(adapter,
688 FW_IMG_START + offsetof(struct fw_hdr, intfver_nic),
693 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
694 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
695 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
696 memcpy(adapter->params.api_vers, api_vers,
697 sizeof(adapter->params.api_vers));
699 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
700 dev_err(adapter->pdev_dev,
701 "card FW has major version %u, driver wants %u\n",
702 major, FW_VERSION_MAJOR);
706 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
707 return 0; /* perfect match */
709 /* Minor/micro version mismatch. Report it but often it's OK. */
714 * t4_flash_erase_sectors - erase a range of flash sectors
715 * @adapter: the adapter
716 * @start: the first sector to erase
717 * @end: the last sector to erase
719 * Erases the sectors in the given inclusive range.
721 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
725 while (start <= end) {
726 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
727 (ret = sf1_write(adapter, 4, 0, 1,
728 SF_ERASE_SECTOR | (start << 8))) != 0 ||
729 (ret = flash_wait_op(adapter, 5, 500)) != 0) {
730 dev_err(adapter->pdev_dev,
731 "erase of flash sector %d failed, error %d\n",
737 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
742 * t4_load_fw - download firmware
744 * @fw_data: the firmware image to write
747 * Write the supplied firmware image to the card's serial flash.
749 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
754 u8 first_page[SF_PAGE_SIZE];
755 const u32 *p = (const u32 *)fw_data;
756 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
759 dev_err(adap->pdev_dev, "FW image has no data\n");
763 dev_err(adap->pdev_dev,
764 "FW image size not multiple of 512 bytes\n");
767 if (ntohs(hdr->len512) * 512 != size) {
768 dev_err(adap->pdev_dev,
769 "FW image size differs from size in FW header\n");
772 if (size > FW_MAX_SIZE) {
773 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
778 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
781 if (csum != 0xffffffff) {
782 dev_err(adap->pdev_dev,
783 "corrupted firmware image, checksum %#x\n", csum);
787 i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */
788 ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1);
793 * We write the correct version at the end so the driver can see a bad
794 * version if the FW write fails. Start by writing a copy of the
795 * first page with a bad version.
797 memcpy(first_page, fw_data, SF_PAGE_SIZE);
798 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
799 ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page);
804 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
805 addr += SF_PAGE_SIZE;
806 fw_data += SF_PAGE_SIZE;
807 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
812 ret = t4_write_flash(adap,
813 FW_IMG_START + offsetof(struct fw_hdr, fw_ver),
814 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
817 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
822 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
823 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
826 * t4_link_start - apply link configuration to MAC/PHY
827 * @phy: the PHY to setup
828 * @mac: the MAC to setup
829 * @lc: the requested link configuration
831 * Set up a port's MAC and PHY according to a desired link configuration.
832 * - If the PHY can auto-negotiate first decide what to advertise, then
833 * enable/disable auto-negotiation as desired, and reset.
834 * - If the PHY does not auto-negotiate just reset it.
835 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
836 * otherwise do it later based on the outcome of auto-negotiation.
838 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
839 struct link_config *lc)
841 struct fw_port_cmd c;
842 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
845 if (lc->requested_fc & PAUSE_RX)
846 fc |= FW_PORT_CAP_FC_RX;
847 if (lc->requested_fc & PAUSE_TX)
848 fc |= FW_PORT_CAP_FC_TX;
850 memset(&c, 0, sizeof(c));
851 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
852 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
853 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
856 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
857 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
858 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
859 } else if (lc->autoneg == AUTONEG_DISABLE) {
860 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
861 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
863 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
865 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
869 * t4_restart_aneg - restart autonegotiation
871 * @mbox: mbox to use for the FW command
874 * Restarts autonegotiation for the selected port.
876 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
878 struct fw_port_cmd c;
880 memset(&c, 0, sizeof(c));
881 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
882 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
883 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
885 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
886 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
890 * t4_set_vlan_accel - configure HW VLAN extraction
892 * @ports: bitmap of adapter ports to operate on
893 * @on: enable (1) or disable (0) HW VLAN extraction
895 * Enables or disables HW extraction of VLAN tags for the ports specified
896 * by @ports. @ports is a bitmap with the ith bit designating the port
897 * associated with the ith adapter channel.
899 void t4_set_vlan_accel(struct adapter *adap, unsigned int ports, int on)
901 ports <<= VLANEXTENABLE_SHIFT;
902 t4_set_reg_field(adap, TP_OUT_CONFIG, ports, on ? ports : 0);
906 unsigned int mask; /* bits to check in interrupt status */
907 const char *msg; /* message to print or NULL */
908 short stat_idx; /* stat counter to increment or -1 */
909 unsigned short fatal; /* whether the condition reported is fatal */
913 * t4_handle_intr_status - table driven interrupt handler
914 * @adapter: the adapter that generated the interrupt
915 * @reg: the interrupt status register to process
916 * @acts: table of interrupt actions
918 * A table driven interrupt handler that applies a set of masks to an
919 * interrupt status word and performs the corresponding actions if the
920 * interrupts described by the mask have occured. The actions include
921 * optionally emitting a warning or alert message. The table is terminated
922 * by an entry specifying mask 0. Returns the number of fatal interrupt
925 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
926 const struct intr_info *acts)
929 unsigned int mask = 0;
930 unsigned int status = t4_read_reg(adapter, reg);
932 for ( ; acts->mask; ++acts) {
933 if (!(status & acts->mask))
937 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
938 status & acts->mask);
939 } else if (acts->msg && printk_ratelimit())
940 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
941 status & acts->mask);
945 if (status) /* clear processed interrupts */
946 t4_write_reg(adapter, reg, status);
951 * Interrupt handler for the PCIE module.
953 static void pcie_intr_handler(struct adapter *adapter)
955 static struct intr_info sysbus_intr_info[] = {
956 { RNPP, "RXNP array parity error", -1, 1 },
957 { RPCP, "RXPC array parity error", -1, 1 },
958 { RCIP, "RXCIF array parity error", -1, 1 },
959 { RCCP, "Rx completions control array parity error", -1, 1 },
960 { RFTP, "RXFT array parity error", -1, 1 },
963 static struct intr_info pcie_port_intr_info[] = {
964 { TPCP, "TXPC array parity error", -1, 1 },
965 { TNPP, "TXNP array parity error", -1, 1 },
966 { TFTP, "TXFT array parity error", -1, 1 },
967 { TCAP, "TXCA array parity error", -1, 1 },
968 { TCIP, "TXCIF array parity error", -1, 1 },
969 { RCAP, "RXCA array parity error", -1, 1 },
970 { OTDD, "outbound request TLP discarded", -1, 1 },
971 { RDPE, "Rx data parity error", -1, 1 },
972 { TDUE, "Tx uncorrectable data error", -1, 1 },
975 static struct intr_info pcie_intr_info[] = {
976 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
977 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
978 { MSIDATAPERR, "MSI data parity error", -1, 1 },
979 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
980 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
981 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
982 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
983 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
984 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
985 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
986 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
987 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
988 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
989 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
990 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
991 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
992 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
993 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
994 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
995 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
996 { FIDPERR, "PCI FID parity error", -1, 1 },
997 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
998 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
999 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1000 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1001 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1002 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1003 { PCIESINT, "PCI core secondary fault", -1, 1 },
1004 { PCIEPINT, "PCI core primary fault", -1, 1 },
1005 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1011 fat = t4_handle_intr_status(adapter,
1012 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1014 t4_handle_intr_status(adapter,
1015 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1016 pcie_port_intr_info) +
1017 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
1019 t4_fatal_err(adapter);
1023 * TP interrupt handler.
1025 static void tp_intr_handler(struct adapter *adapter)
1027 static struct intr_info tp_intr_info[] = {
1028 { 0x3fffffff, "TP parity error", -1, 1 },
1029 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1033 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1034 t4_fatal_err(adapter);
1038 * SGE interrupt handler.
1040 static void sge_intr_handler(struct adapter *adapter)
1044 static struct intr_info sge_intr_info[] = {
1045 { ERR_CPL_EXCEED_IQE_SIZE,
1046 "SGE received CPL exceeding IQE size", -1, 1 },
1047 { ERR_INVALID_CIDX_INC,
1048 "SGE GTS CIDX increment too large", -1, 0 },
1049 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1050 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1051 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1052 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1053 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1055 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1057 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1059 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1061 { ERR_ING_CTXT_PRIO,
1062 "SGE too many priority ingress contexts", -1, 0 },
1063 { ERR_EGR_CTXT_PRIO,
1064 "SGE too many priority egress contexts", -1, 0 },
1065 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1066 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1070 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1071 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1073 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1074 (unsigned long long)v);
1075 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1076 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1079 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1081 t4_fatal_err(adapter);
1085 * CIM interrupt handler.
1087 static void cim_intr_handler(struct adapter *adapter)
1089 static struct intr_info cim_intr_info[] = {
1090 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1091 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1092 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1093 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1094 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1095 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1096 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1099 static struct intr_info cim_upintr_info[] = {
1100 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1101 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1102 { ILLWRINT, "CIM illegal write", -1, 1 },
1103 { ILLRDINT, "CIM illegal read", -1, 1 },
1104 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1105 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1106 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1107 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1108 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1109 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1110 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1111 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1112 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1113 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1114 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1115 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1116 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1117 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1118 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1119 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1120 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1121 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1122 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1123 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1124 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1125 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1126 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1127 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1133 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1135 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1138 t4_fatal_err(adapter);
1142 * ULP RX interrupt handler.
1144 static void ulprx_intr_handler(struct adapter *adapter)
1146 static struct intr_info ulprx_intr_info[] = {
1147 { 0x7fffff, "ULPRX parity error", -1, 1 },
1151 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1152 t4_fatal_err(adapter);
1156 * ULP TX interrupt handler.
1158 static void ulptx_intr_handler(struct adapter *adapter)
1160 static struct intr_info ulptx_intr_info[] = {
1161 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1163 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1165 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1167 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1169 { 0xfffffff, "ULPTX parity error", -1, 1 },
1173 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1174 t4_fatal_err(adapter);
1178 * PM TX interrupt handler.
1180 static void pmtx_intr_handler(struct adapter *adapter)
1182 static struct intr_info pmtx_intr_info[] = {
1183 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1184 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1185 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1186 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1187 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1188 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1189 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1190 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1191 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1195 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1196 t4_fatal_err(adapter);
1200 * PM RX interrupt handler.
1202 static void pmrx_intr_handler(struct adapter *adapter)
1204 static struct intr_info pmrx_intr_info[] = {
1205 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1206 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1207 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1208 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1209 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1210 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1214 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1215 t4_fatal_err(adapter);
1219 * CPL switch interrupt handler.
1221 static void cplsw_intr_handler(struct adapter *adapter)
1223 static struct intr_info cplsw_intr_info[] = {
1224 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1225 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1226 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1227 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1228 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1229 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1233 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1234 t4_fatal_err(adapter);
1238 * LE interrupt handler.
1240 static void le_intr_handler(struct adapter *adap)
1242 static struct intr_info le_intr_info[] = {
1243 { LIPMISS, "LE LIP miss", -1, 0 },
1244 { LIP0, "LE 0 LIP error", -1, 0 },
1245 { PARITYERR, "LE parity error", -1, 1 },
1246 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1247 { REQQPARERR, "LE request queue parity error", -1, 1 },
1251 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1256 * MPS interrupt handler.
1258 static void mps_intr_handler(struct adapter *adapter)
1260 static struct intr_info mps_rx_intr_info[] = {
1261 { 0xffffff, "MPS Rx parity error", -1, 1 },
1264 static struct intr_info mps_tx_intr_info[] = {
1265 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1266 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1267 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1268 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1269 { BUBBLE, "MPS Tx underflow", -1, 1 },
1270 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1271 { FRMERR, "MPS Tx framing error", -1, 1 },
1274 static struct intr_info mps_trc_intr_info[] = {
1275 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1276 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1277 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1280 static struct intr_info mps_stat_sram_intr_info[] = {
1281 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1284 static struct intr_info mps_stat_tx_intr_info[] = {
1285 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1288 static struct intr_info mps_stat_rx_intr_info[] = {
1289 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1292 static struct intr_info mps_cls_intr_info[] = {
1293 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1294 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1295 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1301 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1303 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1305 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1306 mps_trc_intr_info) +
1307 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1308 mps_stat_sram_intr_info) +
1309 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1310 mps_stat_tx_intr_info) +
1311 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1312 mps_stat_rx_intr_info) +
1313 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1316 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1317 RXINT | TXINT | STATINT);
1318 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1320 t4_fatal_err(adapter);
1323 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1326 * EDC/MC interrupt handler.
1328 static void mem_intr_handler(struct adapter *adapter, int idx)
1330 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1332 unsigned int addr, cnt_addr, v;
1334 if (idx <= MEM_EDC1) {
1335 addr = EDC_REG(EDC_INT_CAUSE, idx);
1336 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1338 addr = MC_INT_CAUSE;
1339 cnt_addr = MC_ECC_STATUS;
1342 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1343 if (v & PERR_INT_CAUSE)
1344 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1346 if (v & ECC_CE_INT_CAUSE) {
1347 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1349 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1350 if (printk_ratelimit())
1351 dev_warn(adapter->pdev_dev,
1352 "%u %s correctable ECC data error%s\n",
1353 cnt, name[idx], cnt > 1 ? "s" : "");
1355 if (v & ECC_UE_INT_CAUSE)
1356 dev_alert(adapter->pdev_dev,
1357 "%s uncorrectable ECC data error\n", name[idx]);
1359 t4_write_reg(adapter, addr, v);
1360 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1361 t4_fatal_err(adapter);
1365 * MA interrupt handler.
1367 static void ma_intr_handler(struct adapter *adap)
1369 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1371 if (status & MEM_PERR_INT_CAUSE)
1372 dev_alert(adap->pdev_dev,
1373 "MA parity error, parity status %#x\n",
1374 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1375 if (status & MEM_WRAP_INT_CAUSE) {
1376 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1377 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1378 "client %u to address %#x\n",
1379 MEM_WRAP_CLIENT_NUM_GET(v),
1380 MEM_WRAP_ADDRESS_GET(v) << 4);
1382 t4_write_reg(adap, MA_INT_CAUSE, status);
1387 * SMB interrupt handler.
1389 static void smb_intr_handler(struct adapter *adap)
1391 static struct intr_info smb_intr_info[] = {
1392 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1393 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1394 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1398 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1403 * NC-SI interrupt handler.
1405 static void ncsi_intr_handler(struct adapter *adap)
1407 static struct intr_info ncsi_intr_info[] = {
1408 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1409 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1410 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1411 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1415 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1420 * XGMAC interrupt handler.
1422 static void xgmac_intr_handler(struct adapter *adap, int port)
1424 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1426 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1430 if (v & TXFIFO_PRTY_ERR)
1431 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1433 if (v & RXFIFO_PRTY_ERR)
1434 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1436 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1441 * PL interrupt handler.
1443 static void pl_intr_handler(struct adapter *adap)
1445 static struct intr_info pl_intr_info[] = {
1446 { FATALPERR, "T4 fatal parity error", -1, 1 },
1447 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1451 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1455 #define PF_INTR_MASK (PFSW | PFCIM)
1456 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1457 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1458 CPL_SWITCH | SGE | ULP_TX)
1461 * t4_slow_intr_handler - control path interrupt handler
1462 * @adapter: the adapter
1464 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1465 * The designation 'slow' is because it involves register reads, while
1466 * data interrupts typically don't involve any MMIOs.
1468 int t4_slow_intr_handler(struct adapter *adapter)
1470 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1472 if (!(cause & GLBL_INTR_MASK))
1475 cim_intr_handler(adapter);
1477 mps_intr_handler(adapter);
1479 ncsi_intr_handler(adapter);
1481 pl_intr_handler(adapter);
1483 smb_intr_handler(adapter);
1485 xgmac_intr_handler(adapter, 0);
1487 xgmac_intr_handler(adapter, 1);
1488 if (cause & XGMAC_KR0)
1489 xgmac_intr_handler(adapter, 2);
1490 if (cause & XGMAC_KR1)
1491 xgmac_intr_handler(adapter, 3);
1493 pcie_intr_handler(adapter);
1495 mem_intr_handler(adapter, MEM_MC);
1497 mem_intr_handler(adapter, MEM_EDC0);
1499 mem_intr_handler(adapter, MEM_EDC1);
1501 le_intr_handler(adapter);
1503 tp_intr_handler(adapter);
1505 ma_intr_handler(adapter);
1507 pmtx_intr_handler(adapter);
1509 pmrx_intr_handler(adapter);
1511 ulprx_intr_handler(adapter);
1512 if (cause & CPL_SWITCH)
1513 cplsw_intr_handler(adapter);
1515 sge_intr_handler(adapter);
1517 ulptx_intr_handler(adapter);
1519 /* Clear the interrupts just processed for which we are the master. */
1520 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1521 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1526 * t4_intr_enable - enable interrupts
1527 * @adapter: the adapter whose interrupts should be enabled
1529 * Enable PF-specific interrupts for the calling function and the top-level
1530 * interrupt concentrator for global interrupts. Interrupts are already
1531 * enabled at each module, here we just enable the roots of the interrupt
1534 * Note: this function should be called only when the driver manages
1535 * non PF-specific interrupts from the various HW modules. Only one PCI
1536 * function at a time should be doing this.
1538 void t4_intr_enable(struct adapter *adapter)
1540 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1542 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1543 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1544 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1545 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1546 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1547 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1548 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1550 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1551 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1555 * t4_intr_disable - disable interrupts
1556 * @adapter: the adapter whose interrupts should be disabled
1558 * Disable interrupts. We only disable the top-level interrupt
1559 * concentrators. The caller must be a PCI function managing global
1562 void t4_intr_disable(struct adapter *adapter)
1564 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1566 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1567 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1571 * t4_intr_clear - clear all interrupts
1572 * @adapter: the adapter whose interrupts should be cleared
1574 * Clears all interrupts. The caller must be a PCI function managing
1575 * global interrupts.
1577 void t4_intr_clear(struct adapter *adapter)
1579 static const unsigned int cause_reg[] = {
1580 SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3,
1581 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1582 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1583 PCIE_NONFAT_ERR, PCIE_INT_CAUSE,
1585 MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE,
1586 EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1),
1587 CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE,
1588 MYPF_REG(CIM_PF_HOST_INT_CAUSE),
1590 ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE,
1591 PM_RX_INT_CAUSE, PM_TX_INT_CAUSE,
1592 MPS_RX_PERR_INT_CAUSE,
1594 MYPF_REG(PL_PF_INT_CAUSE),
1601 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
1602 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
1604 t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK);
1605 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1609 * hash_mac_addr - return the hash value of a MAC address
1610 * @addr: the 48-bit Ethernet MAC address
1612 * Hashes a MAC address according to the hash function used by HW inexact
1613 * (hash) address matching.
1615 static int hash_mac_addr(const u8 *addr)
1617 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1618 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1626 * t4_config_rss_range - configure a portion of the RSS mapping table
1627 * @adapter: the adapter
1628 * @mbox: mbox to use for the FW command
1629 * @viid: virtual interface whose RSS subtable is to be written
1630 * @start: start entry in the table to write
1631 * @n: how many table entries to write
1632 * @rspq: values for the response queue lookup table
1633 * @nrspq: number of values in @rspq
1635 * Programs the selected part of the VI's RSS mapping table with the
1636 * provided values. If @nrspq < @n the supplied values are used repeatedly
1637 * until the full table range is populated.
1639 * The caller must ensure the values in @rspq are in the range allowed for
1642 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1643 int start, int n, const u16 *rspq, unsigned int nrspq)
1646 const u16 *rsp = rspq;
1647 const u16 *rsp_end = rspq + nrspq;
1648 struct fw_rss_ind_tbl_cmd cmd;
1650 memset(&cmd, 0, sizeof(cmd));
1651 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1652 FW_CMD_REQUEST | FW_CMD_WRITE |
1653 FW_RSS_IND_TBL_CMD_VIID(viid));
1654 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1656 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1658 int nq = min(n, 32);
1659 __be32 *qp = &cmd.iq0_to_iq2;
1661 cmd.niqid = htons(nq);
1662 cmd.startidx = htons(start);
1670 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1671 if (++rsp >= rsp_end)
1673 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1674 if (++rsp >= rsp_end)
1676 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1677 if (++rsp >= rsp_end)
1684 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1692 * t4_config_glbl_rss - configure the global RSS mode
1693 * @adapter: the adapter
1694 * @mbox: mbox to use for the FW command
1695 * @mode: global RSS mode
1696 * @flags: mode-specific flags
1698 * Sets the global RSS mode.
1700 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1703 struct fw_rss_glb_config_cmd c;
1705 memset(&c, 0, sizeof(c));
1706 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1707 FW_CMD_REQUEST | FW_CMD_WRITE);
1708 c.retval_len16 = htonl(FW_LEN16(c));
1709 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1710 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1711 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1712 c.u.basicvirtual.mode_pkd =
1713 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1714 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1717 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1720 /* Read an RSS table row */
1721 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
1723 t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row);
1724 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1,
1729 * t4_read_rss - read the contents of the RSS mapping table
1730 * @adapter: the adapter
1731 * @map: holds the contents of the RSS mapping table
1733 * Reads the contents of the RSS hash->queue mapping table.
1735 int t4_read_rss(struct adapter *adapter, u16 *map)
1740 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
1741 ret = rd_rss_row(adapter, i, &val);
1744 *map++ = LKPTBLQUEUE0_GET(val);
1745 *map++ = LKPTBLQUEUE1_GET(val);
1751 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1752 * @adap: the adapter
1753 * @v4: holds the TCP/IP counter values
1754 * @v6: holds the TCP/IPv6 counter values
1756 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1757 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1759 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1760 struct tp_tcp_stats *v6)
1762 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1764 #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1765 #define STAT(x) val[STAT_IDX(x)]
1766 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1769 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1770 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1771 v4->tcpOutRsts = STAT(OUT_RST);
1772 v4->tcpInSegs = STAT64(IN_SEG);
1773 v4->tcpOutSegs = STAT64(OUT_SEG);
1774 v4->tcpRetransSegs = STAT64(RXT_SEG);
1777 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1778 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1779 v6->tcpOutRsts = STAT(OUT_RST);
1780 v6->tcpInSegs = STAT64(IN_SEG);
1781 v6->tcpOutSegs = STAT64(OUT_SEG);
1782 v6->tcpRetransSegs = STAT64(RXT_SEG);
1790 * t4_tp_get_err_stats - read TP's error MIB counters
1791 * @adap: the adapter
1792 * @st: holds the counter values
1794 * Returns the values of TP's error counters.
1796 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
1798 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs,
1799 12, TP_MIB_MAC_IN_ERR_0);
1800 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops,
1801 8, TP_MIB_TNL_CNG_DROP_0);
1802 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops,
1803 4, TP_MIB_TNL_DROP_0);
1804 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops,
1805 4, TP_MIB_OFD_VLN_DROP_0);
1806 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs,
1807 4, TP_MIB_TCP_V6IN_ERR_0);
1808 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh,
1809 2, TP_MIB_OFD_ARP_DROP);
1813 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1814 * @adap: the adapter
1815 * @mtus: where to store the MTU values
1816 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1818 * Reads the HW path MTU table.
1820 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1825 for (i = 0; i < NMTUS; ++i) {
1826 t4_write_reg(adap, TP_MTU_TABLE,
1827 MTUINDEX(0xff) | MTUVALUE(i));
1828 v = t4_read_reg(adap, TP_MTU_TABLE);
1829 mtus[i] = MTUVALUE_GET(v);
1831 mtu_log[i] = MTUWIDTH_GET(v);
1836 * init_cong_ctrl - initialize congestion control parameters
1837 * @a: the alpha values for congestion control
1838 * @b: the beta values for congestion control
1840 * Initialize the congestion control parameters.
1842 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1844 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1869 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
1872 b[13] = b[14] = b[15] = b[16] = 3;
1873 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
1874 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
1879 /* The minimum additive increment value for the congestion control table */
1880 #define CC_MIN_INCR 2U
1883 * t4_load_mtus - write the MTU and congestion control HW tables
1884 * @adap: the adapter
1885 * @mtus: the values for the MTU table
1886 * @alpha: the values for the congestion control alpha parameter
1887 * @beta: the values for the congestion control beta parameter
1889 * Write the HW MTU table with the supplied MTUs and the high-speed
1890 * congestion control table with the supplied alpha, beta, and MTUs.
1891 * We write the two tables together because the additive increments
1892 * depend on the MTUs.
1894 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1895 const unsigned short *alpha, const unsigned short *beta)
1897 static const unsigned int avg_pkts[NCCTRL_WIN] = {
1898 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
1899 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
1900 28672, 40960, 57344, 81920, 114688, 163840, 229376
1905 for (i = 0; i < NMTUS; ++i) {
1906 unsigned int mtu = mtus[i];
1907 unsigned int log2 = fls(mtu);
1909 if (!(mtu & ((1 << log2) >> 2))) /* round */
1911 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
1912 MTUWIDTH(log2) | MTUVALUE(mtu));
1914 for (w = 0; w < NCCTRL_WIN; ++w) {
1917 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
1920 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
1921 (w << 16) | (beta[w] << 13) | inc);
1927 * t4_set_trace_filter - configure one of the tracing filters
1928 * @adap: the adapter
1929 * @tp: the desired trace filter parameters
1930 * @idx: which filter to configure
1931 * @enable: whether to enable or disable the filter
1933 * Configures one of the tracing filters available in HW. If @enable is
1934 * %0 @tp is not examined and may be %NULL.
1936 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
1937 int idx, int enable)
1939 int i, ofst = idx * 4;
1940 u32 data_reg, mask_reg, cfg;
1941 u32 multitrc = TRCMULTIFILTER;
1944 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1948 if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f ||
1949 tp->skip_ofst > 0x1f || tp->min_len > 0x1ff ||
1950 tp->snap_len > 9600 || (idx && tp->snap_len > 256))
1953 if (tp->snap_len > 256) { /* must be tracer 0 */
1954 if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) |
1955 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) |
1956 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN)
1957 return -EINVAL; /* other tracers are enabled */
1960 i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B);
1961 if (TFCAPTUREMAX_GET(i) > 256 &&
1962 (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN))
1966 /* stop the tracer we'll be changing */
1967 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1969 /* disable tracing globally if running in the wrong single/multi mode */
1970 cfg = t4_read_reg(adap, MPS_TRC_CFG);
1971 if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) {
1972 t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN);
1973 t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1975 if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY))
1979 * At this point either the tracing is enabled and in the right mode or
1983 idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH);
1984 data_reg = MPS_TRC_FILTER0_MATCH + idx;
1985 mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx;
1987 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
1988 t4_write_reg(adap, data_reg, tp->data[i]);
1989 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
1991 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst,
1992 TFCAPTUREMAX(tp->snap_len) |
1993 TFMINPKTSIZE(tp->min_len));
1994 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst,
1995 TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) |
1996 TFPORT(tp->port) | TFEN |
1997 (tp->invert ? TFINVERTMATCH : 0));
1999 cfg &= ~TRCMULTIFILTER;
2000 t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc);
2001 out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */
2006 * t4_get_trace_filter - query one of the tracing filters
2007 * @adap: the adapter
2008 * @tp: the current trace filter parameters
2009 * @idx: which trace filter to query
2010 * @enabled: non-zero if the filter is enabled
2012 * Returns the current settings of one of the HW tracing filters.
2014 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
2018 int i, ofst = idx * 4;
2019 u32 data_reg, mask_reg;
2021 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst);
2022 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst);
2024 *enabled = !!(ctla & TFEN);
2025 tp->snap_len = TFCAPTUREMAX_GET(ctlb);
2026 tp->min_len = TFMINPKTSIZE_GET(ctlb);
2027 tp->skip_ofst = TFOFFSET_GET(ctla);
2028 tp->skip_len = TFLENGTH_GET(ctla);
2029 tp->invert = !!(ctla & TFINVERTMATCH);
2030 tp->port = TFPORT_GET(ctla);
2032 ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx;
2033 data_reg = MPS_TRC_FILTER0_MATCH + ofst;
2034 mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst;
2036 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
2037 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
2038 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
2043 * get_mps_bg_map - return the buffer groups associated with a port
2044 * @adap: the adapter
2045 * @idx: the port index
2047 * Returns a bitmap indicating which MPS buffer groups are associated
2048 * with the given port. Bit i is set if buffer group i is used by the
2051 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2053 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2056 return idx == 0 ? 0xf : 0;
2058 return idx < 2 ? (3 << (2 * idx)) : 0;
2063 * t4_get_port_stats - collect port statistics
2064 * @adap: the adapter
2065 * @idx: the port index
2066 * @p: the stats structure to fill
2068 * Collect statistics related to the given port from HW.
2070 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2072 u32 bgmap = get_mps_bg_map(adap, idx);
2074 #define GET_STAT(name) \
2075 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
2076 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2078 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2079 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2080 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2081 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2082 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2083 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2084 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2085 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2086 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2087 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2088 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2089 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2090 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2091 p->tx_drop = GET_STAT(TX_PORT_DROP);
2092 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2093 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2094 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2095 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2096 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2097 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2098 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2099 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2100 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2102 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2103 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2104 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2105 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2106 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2107 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2108 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2109 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2110 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2111 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2112 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2113 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2114 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2115 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2116 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2117 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2118 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2119 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2120 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2121 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2122 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2123 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2124 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2125 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2126 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2127 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2128 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2130 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2131 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2132 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2133 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2134 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2135 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2136 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2137 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2144 * t4_get_lb_stats - collect loopback port statistics
2145 * @adap: the adapter
2146 * @idx: the loopback port index
2147 * @p: the stats structure to fill
2149 * Return HW statistics for the given loopback port.
2151 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
2153 u32 bgmap = get_mps_bg_map(adap, idx);
2155 #define GET_STAT(name) \
2156 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L))
2157 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2159 p->octets = GET_STAT(BYTES);
2160 p->frames = GET_STAT(FRAMES);
2161 p->bcast_frames = GET_STAT(BCAST);
2162 p->mcast_frames = GET_STAT(MCAST);
2163 p->ucast_frames = GET_STAT(UCAST);
2164 p->error_frames = GET_STAT(ERROR);
2166 p->frames_64 = GET_STAT(64B);
2167 p->frames_65_127 = GET_STAT(65B_127B);
2168 p->frames_128_255 = GET_STAT(128B_255B);
2169 p->frames_256_511 = GET_STAT(256B_511B);
2170 p->frames_512_1023 = GET_STAT(512B_1023B);
2171 p->frames_1024_1518 = GET_STAT(1024B_1518B);
2172 p->frames_1519_max = GET_STAT(1519B_MAX);
2173 p->drop = t4_read_reg(adap, PORT_REG(idx,
2174 MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
2176 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
2177 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
2178 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
2179 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
2180 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
2181 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
2182 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
2183 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
2190 * t4_wol_magic_enable - enable/disable magic packet WoL
2191 * @adap: the adapter
2192 * @port: the physical port index
2193 * @addr: MAC address expected in magic packets, %NULL to disable
2195 * Enables/disables magic packet wake-on-LAN for the selected port.
2197 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2201 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
2202 (addr[2] << 24) | (addr[3] << 16) |
2203 (addr[4] << 8) | addr[5]);
2204 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
2205 (addr[0] << 8) | addr[1]);
2207 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
2208 addr ? MAGICEN : 0);
2212 * t4_wol_pat_enable - enable/disable pattern-based WoL
2213 * @adap: the adapter
2214 * @port: the physical port index
2215 * @map: bitmap of which HW pattern filters to set
2216 * @mask0: byte mask for bytes 0-63 of a packet
2217 * @mask1: byte mask for bytes 64-127 of a packet
2218 * @crc: Ethernet CRC for selected bytes
2219 * @enable: enable/disable switch
2221 * Sets the pattern filters indicated in @map to mask out the bytes
2222 * specified in @mask0/@mask1 in received packets and compare the CRC of
2223 * the resulting packet against @crc. If @enable is %true pattern-based
2224 * WoL is enabled, otherwise disabled.
2226 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2227 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2232 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
2239 #define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
2241 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2242 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2243 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2245 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2249 /* write byte masks */
2250 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2251 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2252 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2253 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2257 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2258 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2259 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2260 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2265 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2269 #define INIT_CMD(var, cmd, rd_wr) do { \
2270 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2271 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2272 (var).retval_len16 = htonl(FW_LEN16(var)); \
2276 * t4_mdio_rd - read a PHY register through MDIO
2277 * @adap: the adapter
2278 * @mbox: mailbox to use for the FW command
2279 * @phy_addr: the PHY address
2280 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2281 * @reg: the register to read
2282 * @valp: where to store the value
2284 * Issues a FW command through the given mailbox to read a PHY register.
2286 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2287 unsigned int mmd, unsigned int reg, u16 *valp)
2290 struct fw_ldst_cmd c;
2292 memset(&c, 0, sizeof(c));
2293 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2294 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2295 c.cycles_to_len16 = htonl(FW_LEN16(c));
2296 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2297 FW_LDST_CMD_MMD(mmd));
2298 c.u.mdio.raddr = htons(reg);
2300 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2302 *valp = ntohs(c.u.mdio.rval);
2307 * t4_mdio_wr - write a PHY register through MDIO
2308 * @adap: the adapter
2309 * @mbox: mailbox to use for the FW command
2310 * @phy_addr: the PHY address
2311 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2312 * @reg: the register to write
2313 * @valp: value to write
2315 * Issues a FW command through the given mailbox to write a PHY register.
2317 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2318 unsigned int mmd, unsigned int reg, u16 val)
2320 struct fw_ldst_cmd c;
2322 memset(&c, 0, sizeof(c));
2323 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2324 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2325 c.cycles_to_len16 = htonl(FW_LEN16(c));
2326 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2327 FW_LDST_CMD_MMD(mmd));
2328 c.u.mdio.raddr = htons(reg);
2329 c.u.mdio.rval = htons(val);
2331 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2335 * t4_fw_hello - establish communication with FW
2336 * @adap: the adapter
2337 * @mbox: mailbox to use for the FW command
2338 * @evt_mbox: mailbox to receive async FW events
2339 * @master: specifies the caller's willingness to be the device master
2340 * @state: returns the current device state
2342 * Issues a command to establish communication with FW.
2344 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2345 enum dev_master master, enum dev_state *state)
2348 struct fw_hello_cmd c;
2350 INIT_CMD(c, HELLO, WRITE);
2351 c.err_to_mbasyncnot = htonl(
2352 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2353 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2354 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
2355 FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
2357 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2358 if (ret == 0 && state) {
2359 u32 v = ntohl(c.err_to_mbasyncnot);
2360 if (v & FW_HELLO_CMD_INIT)
2361 *state = DEV_STATE_INIT;
2362 else if (v & FW_HELLO_CMD_ERR)
2363 *state = DEV_STATE_ERR;
2365 *state = DEV_STATE_UNINIT;
2371 * t4_fw_bye - end communication with FW
2372 * @adap: the adapter
2373 * @mbox: mailbox to use for the FW command
2375 * Issues a command to terminate communication with FW.
2377 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2379 struct fw_bye_cmd c;
2381 INIT_CMD(c, BYE, WRITE);
2382 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2386 * t4_init_cmd - ask FW to initialize the device
2387 * @adap: the adapter
2388 * @mbox: mailbox to use for the FW command
2390 * Issues a command to FW to partially initialize the device. This
2391 * performs initialization that generally doesn't depend on user input.
2393 int t4_early_init(struct adapter *adap, unsigned int mbox)
2395 struct fw_initialize_cmd c;
2397 INIT_CMD(c, INITIALIZE, WRITE);
2398 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2402 * t4_fw_reset - issue a reset to FW
2403 * @adap: the adapter
2404 * @mbox: mailbox to use for the FW command
2405 * @reset: specifies the type of reset to perform
2407 * Issues a reset command of the specified type to FW.
2409 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2411 struct fw_reset_cmd c;
2413 INIT_CMD(c, RESET, WRITE);
2414 c.val = htonl(reset);
2415 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2419 * t4_query_params - query FW or device parameters
2420 * @adap: the adapter
2421 * @mbox: mailbox to use for the FW command
2424 * @nparams: the number of parameters
2425 * @params: the parameter names
2426 * @val: the parameter values
2428 * Reads the value of FW or device parameters. Up to 7 parameters can be
2431 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2432 unsigned int vf, unsigned int nparams, const u32 *params,
2436 struct fw_params_cmd c;
2437 __be32 *p = &c.param[0].mnem;
2442 memset(&c, 0, sizeof(c));
2443 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2444 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2445 FW_PARAMS_CMD_VFN(vf));
2446 c.retval_len16 = htonl(FW_LEN16(c));
2447 for (i = 0; i < nparams; i++, p += 2)
2448 *p = htonl(*params++);
2450 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2452 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2458 * t4_set_params - sets FW or device parameters
2459 * @adap: the adapter
2460 * @mbox: mailbox to use for the FW command
2463 * @nparams: the number of parameters
2464 * @params: the parameter names
2465 * @val: the parameter values
2467 * Sets the value of FW or device parameters. Up to 7 parameters can be
2468 * specified at once.
2470 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2471 unsigned int vf, unsigned int nparams, const u32 *params,
2474 struct fw_params_cmd c;
2475 __be32 *p = &c.param[0].mnem;
2480 memset(&c, 0, sizeof(c));
2481 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2482 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2483 FW_PARAMS_CMD_VFN(vf));
2484 c.retval_len16 = htonl(FW_LEN16(c));
2486 *p++ = htonl(*params++);
2487 *p++ = htonl(*val++);
2490 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2494 * t4_cfg_pfvf - configure PF/VF resource limits
2495 * @adap: the adapter
2496 * @mbox: mailbox to use for the FW command
2497 * @pf: the PF being configured
2498 * @vf: the VF being configured
2499 * @txq: the max number of egress queues
2500 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2501 * @rxqi: the max number of interrupt-capable ingress queues
2502 * @rxq: the max number of interruptless ingress queues
2503 * @tc: the PCI traffic class
2504 * @vi: the max number of virtual interfaces
2505 * @cmask: the channel access rights mask for the PF/VF
2506 * @pmask: the port access rights mask for the PF/VF
2507 * @nexact: the maximum number of exact MPS filters
2508 * @rcaps: read capabilities
2509 * @wxcaps: write/execute capabilities
2511 * Configures resource limits and capabilities for a physical or virtual
2514 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2515 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2516 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2517 unsigned int vi, unsigned int cmask, unsigned int pmask,
2518 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2520 struct fw_pfvf_cmd c;
2522 memset(&c, 0, sizeof(c));
2523 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2524 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2525 FW_PFVF_CMD_VFN(vf));
2526 c.retval_len16 = htonl(FW_LEN16(c));
2527 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2528 FW_PFVF_CMD_NIQ(rxq));
2529 c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
2530 FW_PFVF_CMD_PMASK(pmask) |
2531 FW_PFVF_CMD_NEQ(txq));
2532 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2533 FW_PFVF_CMD_NEXACTF(nexact));
2534 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2535 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2536 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2537 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2541 * t4_alloc_vi - allocate a virtual interface
2542 * @adap: the adapter
2543 * @mbox: mailbox to use for the FW command
2544 * @port: physical port associated with the VI
2545 * @pf: the PF owning the VI
2546 * @vf: the VF owning the VI
2547 * @nmac: number of MAC addresses needed (1 to 5)
2548 * @mac: the MAC addresses of the VI
2549 * @rss_size: size of RSS table slice associated with this VI
2551 * Allocates a virtual interface for the given physical port. If @mac is
2552 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2553 * @mac should be large enough to hold @nmac Ethernet addresses, they are
2554 * stored consecutively so the space needed is @nmac * 6 bytes.
2555 * Returns a negative error number or the non-negative VI id.
2557 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2558 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2559 unsigned int *rss_size)
2564 memset(&c, 0, sizeof(c));
2565 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2566 FW_CMD_WRITE | FW_CMD_EXEC |
2567 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2568 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2569 c.portid_pkd = FW_VI_CMD_PORTID(port);
2572 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2577 memcpy(mac, c.mac, sizeof(c.mac));
2580 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2582 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2584 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2586 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2590 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
2591 return ntohs(c.viid_pkd);
2595 * t4_free_vi - free a virtual interface
2596 * @adap: the adapter
2597 * @mbox: mailbox to use for the FW command
2598 * @pf: the PF owning the VI
2599 * @vf: the VF owning the VI
2600 * @viid: virtual interface identifiler
2602 * Free a previously allocated virtual interface.
2604 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
2605 unsigned int vf, unsigned int viid)
2609 memset(&c, 0, sizeof(c));
2610 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2611 FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
2613 c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
2614 c.viid_pkd = htons(FW_VI_CMD_VIID(viid));
2615 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2619 * t4_set_rxmode - set Rx properties of a virtual interface
2620 * @adap: the adapter
2621 * @mbox: mailbox to use for the FW command
2623 * @mtu: the new MTU or -1
2624 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2625 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2626 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
2627 * @sleep_ok: if true we may sleep while awaiting command completion
2629 * Sets Rx properties of a virtual interface.
2631 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2632 int mtu, int promisc, int all_multi, int bcast, bool sleep_ok)
2634 struct fw_vi_rxmode_cmd c;
2636 /* convert to FW values */
2638 mtu = FW_RXMODE_MTU_NO_CHG;
2640 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2642 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2644 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
2646 memset(&c, 0, sizeof(c));
2647 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2648 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2649 c.retval_len16 = htonl(FW_LEN16(c));
2650 c.mtu_to_broadcasten = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2651 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2652 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2653 FW_VI_RXMODE_CMD_BROADCASTEN(bcast));
2654 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2658 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2659 * @adap: the adapter
2660 * @mbox: mailbox to use for the FW command
2662 * @free: if true any existing filters for this VI id are first removed
2663 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
2664 * @addr: the MAC address(es)
2665 * @idx: where to store the index of each allocated filter
2666 * @hash: pointer to hash address filter bitmap
2667 * @sleep_ok: call is allowed to sleep
2669 * Allocates an exact-match filter for each of the supplied addresses and
2670 * sets it to the corresponding address. If @idx is not %NULL it should
2671 * have at least @naddr entries, each of which will be set to the index of
2672 * the filter allocated for the corresponding MAC address. If a filter
2673 * could not be allocated for an address its index is set to 0xffff.
2674 * If @hash is not %NULL addresses that fail to allocate an exact filter
2675 * are hashed and update the hash filter bitmap pointed at by @hash.
2677 * Returns a negative error number or the number of filters allocated.
2679 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2680 unsigned int viid, bool free, unsigned int naddr,
2681 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2684 struct fw_vi_mac_cmd c;
2685 struct fw_vi_mac_exact *p;
2690 memset(&c, 0, sizeof(c));
2691 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2692 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2693 FW_VI_MAC_CMD_VIID(viid));
2694 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2695 FW_CMD_LEN16((naddr + 2) / 2));
2697 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2698 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2699 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2700 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2703 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2707 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2708 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2711 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2712 if (index < NEXACT_MAC)
2715 *hash |= (1 << hash_mac_addr(addr[i]));
2721 * t4_change_mac - modifies the exact-match filter for a MAC address
2722 * @adap: the adapter
2723 * @mbox: mailbox to use for the FW command
2725 * @idx: index of existing filter for old value of MAC address, or -1
2726 * @addr: the new MAC address value
2727 * @persist: whether a new MAC allocation should be persistent
2728 * @add_smt: if true also add the address to the HW SMT
2730 * Modifies an exact-match filter and sets it to the new MAC address.
2731 * Note that in general it is not possible to modify the value of a given
2732 * filter so the generic way to modify an address filter is to free the one
2733 * being used by the old address value and allocate a new filter for the
2734 * new address value. @idx can be -1 if the address is a new addition.
2736 * Returns a negative error number or the index of the filter with the new
2739 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2740 int idx, const u8 *addr, bool persist, bool add_smt)
2743 struct fw_vi_mac_cmd c;
2744 struct fw_vi_mac_exact *p = c.u.exact;
2746 if (idx < 0) /* new allocation */
2747 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2748 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2750 memset(&c, 0, sizeof(c));
2751 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2752 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
2753 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
2754 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2755 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2756 FW_VI_MAC_CMD_IDX(idx));
2757 memcpy(p->macaddr, addr, sizeof(p->macaddr));
2759 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2761 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2762 if (ret >= NEXACT_MAC)
2769 * t4_set_addr_hash - program the MAC inexact-match hash filter
2770 * @adap: the adapter
2771 * @mbox: mailbox to use for the FW command
2773 * @ucast: whether the hash filter should also match unicast addresses
2774 * @vec: the value to be written to the hash filter
2775 * @sleep_ok: call is allowed to sleep
2777 * Sets the 64-bit inexact-match hash filter for a virtual interface.
2779 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2780 bool ucast, u64 vec, bool sleep_ok)
2782 struct fw_vi_mac_cmd c;
2784 memset(&c, 0, sizeof(c));
2785 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2786 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
2787 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
2788 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
2790 c.u.hash.hashvec = cpu_to_be64(vec);
2791 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2795 * t4_enable_vi - enable/disable a virtual interface
2796 * @adap: the adapter
2797 * @mbox: mailbox to use for the FW command
2799 * @rx_en: 1=enable Rx, 0=disable Rx
2800 * @tx_en: 1=enable Tx, 0=disable Tx
2802 * Enables/disables a virtual interface.
2804 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2805 bool rx_en, bool tx_en)
2807 struct fw_vi_enable_cmd c;
2809 memset(&c, 0, sizeof(c));
2810 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2811 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2812 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
2813 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
2814 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2818 * t4_identify_port - identify a VI's port by blinking its LED
2819 * @adap: the adapter
2820 * @mbox: mailbox to use for the FW command
2822 * @nblinks: how many times to blink LED at 2.5 Hz
2824 * Identifies a VI's port by blinking its LED.
2826 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2827 unsigned int nblinks)
2829 struct fw_vi_enable_cmd c;
2831 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2832 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2833 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
2834 c.blinkdur = htons(nblinks);
2835 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2839 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
2840 * @adap: the adapter
2841 * @mbox: mailbox to use for the FW command
2842 * @start: %true to enable the queues, %false to disable them
2843 * @pf: the PF owning the queues
2844 * @vf: the VF owning the queues
2845 * @iqid: ingress queue id
2846 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2847 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2849 * Starts or stops an ingress queue and its associated FLs, if any.
2851 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
2852 unsigned int pf, unsigned int vf, unsigned int iqid,
2853 unsigned int fl0id, unsigned int fl1id)
2857 memset(&c, 0, sizeof(c));
2858 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2859 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2861 c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) |
2862 FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
2863 c.iqid = htons(iqid);
2864 c.fl0id = htons(fl0id);
2865 c.fl1id = htons(fl1id);
2866 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2870 * t4_iq_free - free an ingress queue and its FLs
2871 * @adap: the adapter
2872 * @mbox: mailbox to use for the FW command
2873 * @pf: the PF owning the queues
2874 * @vf: the VF owning the queues
2875 * @iqtype: the ingress queue type
2876 * @iqid: ingress queue id
2877 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2878 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2880 * Frees an ingress queue and its associated FLs, if any.
2882 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2883 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2884 unsigned int fl0id, unsigned int fl1id)
2888 memset(&c, 0, sizeof(c));
2889 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2890 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2892 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
2893 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
2894 c.iqid = htons(iqid);
2895 c.fl0id = htons(fl0id);
2896 c.fl1id = htons(fl1id);
2897 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2901 * t4_eth_eq_free - free an Ethernet egress queue
2902 * @adap: the adapter
2903 * @mbox: mailbox to use for the FW command
2904 * @pf: the PF owning the queue
2905 * @vf: the VF owning the queue
2906 * @eqid: egress queue id
2908 * Frees an Ethernet egress queue.
2910 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2911 unsigned int vf, unsigned int eqid)
2913 struct fw_eq_eth_cmd c;
2915 memset(&c, 0, sizeof(c));
2916 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2917 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
2918 FW_EQ_ETH_CMD_VFN(vf));
2919 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2920 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
2921 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2925 * t4_ctrl_eq_free - free a control egress queue
2926 * @adap: the adapter
2927 * @mbox: mailbox to use for the FW command
2928 * @pf: the PF owning the queue
2929 * @vf: the VF owning the queue
2930 * @eqid: egress queue id
2932 * Frees a control egress queue.
2934 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2935 unsigned int vf, unsigned int eqid)
2937 struct fw_eq_ctrl_cmd c;
2939 memset(&c, 0, sizeof(c));
2940 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2941 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
2942 FW_EQ_CTRL_CMD_VFN(vf));
2943 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
2944 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
2945 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2949 * t4_ofld_eq_free - free an offload egress queue
2950 * @adap: the adapter
2951 * @mbox: mailbox to use for the FW command
2952 * @pf: the PF owning the queue
2953 * @vf: the VF owning the queue
2954 * @eqid: egress queue id
2956 * Frees a control egress queue.
2958 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2959 unsigned int vf, unsigned int eqid)
2961 struct fw_eq_ofld_cmd c;
2963 memset(&c, 0, sizeof(c));
2964 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2965 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
2966 FW_EQ_OFLD_CMD_VFN(vf));
2967 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
2968 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
2969 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2973 * t4_handle_fw_rpl - process a FW reply message
2974 * @adap: the adapter
2975 * @rpl: start of the FW message
2977 * Processes a FW message, such as link state change messages.
2979 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2981 u8 opcode = *(const u8 *)rpl;
2983 if (opcode == FW_PORT_CMD) { /* link/module state change message */
2984 int speed = 0, fc = 0;
2985 const struct fw_port_cmd *p = (void *)rpl;
2986 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
2987 int port = adap->chan_map[chan];
2988 struct port_info *pi = adap2pinfo(adap, port);
2989 struct link_config *lc = &pi->link_cfg;
2990 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2991 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
2992 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
2994 if (stat & FW_PORT_CMD_RXPAUSE)
2996 if (stat & FW_PORT_CMD_TXPAUSE)
2998 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
3000 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
3002 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
3003 speed = SPEED_10000;
3005 if (link_ok != lc->link_ok || speed != lc->speed ||
3006 fc != lc->fc) { /* something changed */
3007 lc->link_ok = link_ok;
3010 t4_os_link_changed(adap, port, link_ok);
3012 if (mod != pi->mod_type) {
3014 t4_os_portmod_changed(adap, port);
3020 static void __devinit get_pci_mode(struct adapter *adapter,
3021 struct pci_params *p)
3024 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
3027 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3029 p->speed = val & PCI_EXP_LNKSTA_CLS;
3030 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3035 * init_link_config - initialize a link's SW state
3036 * @lc: structure holding the link state
3037 * @caps: link capabilities
3039 * Initializes the SW state maintained for each link, including the link's
3040 * capabilities and default speed/flow-control/autonegotiation settings.
3042 static void __devinit init_link_config(struct link_config *lc,
3045 lc->supported = caps;
3046 lc->requested_speed = 0;
3048 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3049 if (lc->supported & FW_PORT_CAP_ANEG) {
3050 lc->advertising = lc->supported & ADVERT_MASK;
3051 lc->autoneg = AUTONEG_ENABLE;
3052 lc->requested_fc |= PAUSE_AUTONEG;
3054 lc->advertising = 0;
3055 lc->autoneg = AUTONEG_DISABLE;
3059 static int __devinit wait_dev_ready(struct adapter *adap)
3061 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3064 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3068 * t4_prep_adapter - prepare SW and HW for operation
3069 * @adapter: the adapter
3070 * @reset: if true perform a HW reset
3072 * Initialize adapter SW state for the various HW modules, set initial
3073 * values for some adapter tunables, take PHYs out of reset, and
3074 * initialize the MDIO interface.
3076 int __devinit t4_prep_adapter(struct adapter *adapter)
3080 ret = wait_dev_ready(adapter);
3084 get_pci_mode(adapter, &adapter->params.pci);
3085 adapter->params.rev = t4_read_reg(adapter, PL_REV);
3087 ret = get_vpd_params(adapter, &adapter->params.vpd);
3091 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3094 * Default port for debugging in case we can't reach FW.
3096 adapter->params.nports = 1;
3097 adapter->params.portvec = 1;
3101 int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3105 struct fw_port_cmd c;
3107 memset(&c, 0, sizeof(c));
3109 for_each_port(adap, i) {
3110 unsigned int rss_size;
3111 struct port_info *p = adap2pinfo(adap, i);
3113 while ((adap->params.portvec & (1 << j)) == 0)
3116 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
3117 FW_CMD_REQUEST | FW_CMD_READ |
3118 FW_PORT_CMD_PORTID(j));
3119 c.action_to_len16 = htonl(
3120 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
3122 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3126 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
3133 p->rss_size = rss_size;
3134 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3135 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
3137 ret = ntohl(c.u.info.lstatus_to_modtype);
3138 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
3139 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
3140 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
3141 p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret);
3143 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));