cxgb4: Set mac addr from vpd, when we can't contact firmware
[pandora-kernel.git] / drivers / net / ethernet / chelsio / cxgb4 / t4_hw.c
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <linux/delay.h>
36 #include "cxgb4.h"
37 #include "t4_regs.h"
38 #include "t4_values.h"
39 #include "t4fw_api.h"
40
41 /**
42  *      t4_wait_op_done_val - wait until an operation is completed
43  *      @adapter: the adapter performing the operation
44  *      @reg: the register to check for completion
45  *      @mask: a single-bit field within @reg that indicates completion
46  *      @polarity: the value of the field when the operation is completed
47  *      @attempts: number of check iterations
48  *      @delay: delay in usecs between iterations
49  *      @valp: where to store the value of the register at completion time
50  *
51  *      Wait until an operation is completed by checking a bit in a register
52  *      up to @attempts times.  If @valp is not NULL the value of the register
53  *      at the time it indicated completion is stored there.  Returns 0 if the
54  *      operation completes and -EAGAIN otherwise.
55  */
56 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57                                int polarity, int attempts, int delay, u32 *valp)
58 {
59         while (1) {
60                 u32 val = t4_read_reg(adapter, reg);
61
62                 if (!!(val & mask) == polarity) {
63                         if (valp)
64                                 *valp = val;
65                         return 0;
66                 }
67                 if (--attempts == 0)
68                         return -EAGAIN;
69                 if (delay)
70                         udelay(delay);
71         }
72 }
73
74 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75                                   int polarity, int attempts, int delay)
76 {
77         return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78                                    delay, NULL);
79 }
80
81 /**
82  *      t4_set_reg_field - set a register field to a value
83  *      @adapter: the adapter to program
84  *      @addr: the register address
85  *      @mask: specifies the portion of the register to modify
86  *      @val: the new value for the register field
87  *
88  *      Sets a register field specified by the supplied mask to the
89  *      given value.
90  */
91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92                       u32 val)
93 {
94         u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96         t4_write_reg(adapter, addr, v | val);
97         (void) t4_read_reg(adapter, addr);      /* flush */
98 }
99
100 /**
101  *      t4_read_indirect - read indirectly addressed registers
102  *      @adap: the adapter
103  *      @addr_reg: register holding the indirect address
104  *      @data_reg: register holding the value of the indirect register
105  *      @vals: where the read register values are stored
106  *      @nregs: how many indirect registers to read
107  *      @start_idx: index of first indirect register to read
108  *
109  *      Reads registers that are accessed indirectly through an address/data
110  *      register pair.
111  */
112 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113                              unsigned int data_reg, u32 *vals,
114                              unsigned int nregs, unsigned int start_idx)
115 {
116         while (nregs--) {
117                 t4_write_reg(adap, addr_reg, start_idx);
118                 *vals++ = t4_read_reg(adap, data_reg);
119                 start_idx++;
120         }
121 }
122
123 /**
124  *      t4_write_indirect - write indirectly addressed registers
125  *      @adap: the adapter
126  *      @addr_reg: register holding the indirect addresses
127  *      @data_reg: register holding the value for the indirect registers
128  *      @vals: values to write
129  *      @nregs: how many indirect registers to write
130  *      @start_idx: address of first indirect register to write
131  *
132  *      Writes a sequential block of registers that are accessed indirectly
133  *      through an address/data register pair.
134  */
135 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136                        unsigned int data_reg, const u32 *vals,
137                        unsigned int nregs, unsigned int start_idx)
138 {
139         while (nregs--) {
140                 t4_write_reg(adap, addr_reg, start_idx++);
141                 t4_write_reg(adap, data_reg, *vals++);
142         }
143 }
144
145 /*
146  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
147  * mechanism.  This guarantees that we get the real value even if we're
148  * operating within a Virtual Machine and the Hypervisor is trapping our
149  * Configuration Space accesses.
150  */
151 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
152 {
153         u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
154
155         if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
156                 req |= ENABLE_F;
157         else
158                 req |= T6_ENABLE_F;
159
160         if (is_t4(adap->params.chip))
161                 req |= LOCALCFG_F;
162
163         t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
164         *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
165
166         /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
167          * Configuration Space read.  (None of the other fields matter when
168          * ENABLE is 0 so a simple register write is easier than a
169          * read-modify-write via t4_set_reg_field().)
170          */
171         t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
172 }
173
174 /*
175  * t4_report_fw_error - report firmware error
176  * @adap: the adapter
177  *
178  * The adapter firmware can indicate error conditions to the host.
179  * If the firmware has indicated an error, print out the reason for
180  * the firmware error.
181  */
182 static void t4_report_fw_error(struct adapter *adap)
183 {
184         static const char *const reason[] = {
185                 "Crash",                        /* PCIE_FW_EVAL_CRASH */
186                 "During Device Preparation",    /* PCIE_FW_EVAL_PREP */
187                 "During Device Configuration",  /* PCIE_FW_EVAL_CONF */
188                 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
189                 "Unexpected Event",             /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
190                 "Insufficient Airflow",         /* PCIE_FW_EVAL_OVERHEAT */
191                 "Device Shutdown",              /* PCIE_FW_EVAL_DEVICESHUTDOWN */
192                 "Reserved",                     /* reserved */
193         };
194         u32 pcie_fw;
195
196         pcie_fw = t4_read_reg(adap, PCIE_FW_A);
197         if (pcie_fw & PCIE_FW_ERR_F)
198                 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
199                         reason[PCIE_FW_EVAL_G(pcie_fw)]);
200 }
201
202 /*
203  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
204  */
205 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
206                          u32 mbox_addr)
207 {
208         for ( ; nflit; nflit--, mbox_addr += 8)
209                 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
210 }
211
212 /*
213  * Handle a FW assertion reported in a mailbox.
214  */
215 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
216 {
217         struct fw_debug_cmd asrt;
218
219         get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
220         dev_alert(adap->pdev_dev,
221                   "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
222                   asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
223                   be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
224 }
225
226 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
227 {
228         dev_err(adap->pdev_dev,
229                 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
230                 (unsigned long long)t4_read_reg64(adap, data_reg),
231                 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
232                 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
233                 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
234                 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
235                 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
236                 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
237                 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
238 }
239
240 /**
241  *      t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
242  *      @adap: the adapter
243  *      @mbox: index of the mailbox to use
244  *      @cmd: the command to write
245  *      @size: command length in bytes
246  *      @rpl: where to optionally store the reply
247  *      @sleep_ok: if true we may sleep while awaiting command completion
248  *      @timeout: time to wait for command to finish before timing out
249  *
250  *      Sends the given command to FW through the selected mailbox and waits
251  *      for the FW to execute the command.  If @rpl is not %NULL it is used to
252  *      store the FW's reply to the command.  The command and its optional
253  *      reply are of the same length.  FW can take up to %FW_CMD_MAX_TIMEOUT ms
254  *      to respond.  @sleep_ok determines whether we may sleep while awaiting
255  *      the response.  If sleeping is allowed we use progressive backoff
256  *      otherwise we spin.
257  *
258  *      The return value is 0 on success or a negative errno on failure.  A
259  *      failure can happen either because we are not able to execute the
260  *      command or FW executes it but signals an error.  In the latter case
261  *      the return value is the error code indicated by FW (negated).
262  */
263 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
264                             int size, void *rpl, bool sleep_ok, int timeout)
265 {
266         static const int delay[] = {
267                 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
268         };
269
270         u32 v;
271         u64 res;
272         int i, ms, delay_idx;
273         const __be64 *p = cmd;
274         u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
275         u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
276
277         if ((size & 15) || size > MBOX_LEN)
278                 return -EINVAL;
279
280         /*
281          * If the device is off-line, as in EEH, commands will time out.
282          * Fail them early so we don't waste time waiting.
283          */
284         if (adap->pdev->error_state != pci_channel_io_normal)
285                 return -EIO;
286
287         v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
288         for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
289                 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
290
291         if (v != MBOX_OWNER_DRV)
292                 return v ? -EBUSY : -ETIMEDOUT;
293
294         for (i = 0; i < size; i += 8)
295                 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
296
297         t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
298         t4_read_reg(adap, ctl_reg);          /* flush write */
299
300         delay_idx = 0;
301         ms = delay[0];
302
303         for (i = 0; i < timeout; i += ms) {
304                 if (sleep_ok) {
305                         ms = delay[delay_idx];  /* last element may repeat */
306                         if (delay_idx < ARRAY_SIZE(delay) - 1)
307                                 delay_idx++;
308                         msleep(ms);
309                 } else
310                         mdelay(ms);
311
312                 v = t4_read_reg(adap, ctl_reg);
313                 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
314                         if (!(v & MBMSGVALID_F)) {
315                                 t4_write_reg(adap, ctl_reg, 0);
316                                 continue;
317                         }
318
319                         res = t4_read_reg64(adap, data_reg);
320                         if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
321                                 fw_asrt(adap, data_reg);
322                                 res = FW_CMD_RETVAL_V(EIO);
323                         } else if (rpl) {
324                                 get_mbox_rpl(adap, rpl, size / 8, data_reg);
325                         }
326
327                         if (FW_CMD_RETVAL_G((int)res))
328                                 dump_mbox(adap, mbox, data_reg);
329                         t4_write_reg(adap, ctl_reg, 0);
330                         return -FW_CMD_RETVAL_G((int)res);
331                 }
332         }
333
334         dump_mbox(adap, mbox, data_reg);
335         dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
336                 *(const u8 *)cmd, mbox);
337         t4_report_fw_error(adap);
338         return -ETIMEDOUT;
339 }
340
341 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
342                     void *rpl, bool sleep_ok)
343 {
344         return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
345                                        FW_CMD_MAX_TIMEOUT);
346 }
347
348 /**
349  *      t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
350  *      @adap: the adapter
351  *      @win: PCI-E Memory Window to use
352  *      @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
353  *      @addr: address within indicated memory type
354  *      @len: amount of memory to transfer
355  *      @hbuf: host memory buffer
356  *      @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
357  *
358  *      Reads/writes an [almost] arbitrary memory region in the firmware: the
359  *      firmware memory address and host buffer must be aligned on 32-bit
360  *      boudaries; the length may be arbitrary.  The memory is transferred as
361  *      a raw byte sequence from/to the firmware's memory.  If this memory
362  *      contains data structures which contain multi-byte integers, it's the
363  *      caller's responsibility to perform appropriate byte order conversions.
364  */
365 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
366                  u32 len, void *hbuf, int dir)
367 {
368         u32 pos, offset, resid, memoffset;
369         u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
370         u32 *buf;
371
372         /* Argument sanity checks ...
373          */
374         if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
375                 return -EINVAL;
376         buf = (u32 *)hbuf;
377
378         /* It's convenient to be able to handle lengths which aren't a
379          * multiple of 32-bits because we often end up transferring files to
380          * the firmware.  So we'll handle that by normalizing the length here
381          * and then handling any residual transfer at the end.
382          */
383         resid = len & 0x3;
384         len -= resid;
385
386         /* Offset into the region of memory which is being accessed
387          * MEM_EDC0 = 0
388          * MEM_EDC1 = 1
389          * MEM_MC   = 2 -- MEM_MC for chips with only 1 memory controller
390          * MEM_MC1  = 3 -- for chips with 2 memory controllers (e.g. T5)
391          */
392         edc_size  = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
393         if (mtype != MEM_MC1)
394                 memoffset = (mtype * (edc_size * 1024 * 1024));
395         else {
396                 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
397                                                       MA_EXT_MEMORY0_BAR_A));
398                 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
399         }
400
401         /* Determine the PCIE_MEM_ACCESS_OFFSET */
402         addr = addr + memoffset;
403
404         /* Each PCI-E Memory Window is programmed with a window size -- or
405          * "aperture" -- which controls the granularity of its mapping onto
406          * adapter memory.  We need to grab that aperture in order to know
407          * how to use the specified window.  The window is also programmed
408          * with the base address of the Memory Window in BAR0's address
409          * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
410          * the address is relative to BAR0.
411          */
412         mem_reg = t4_read_reg(adap,
413                               PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
414                                                   win));
415         mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
416         mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
417         if (is_t4(adap->params.chip))
418                 mem_base -= adap->t4_bar0;
419         win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
420
421         /* Calculate our initial PCI-E Memory Window Position and Offset into
422          * that Window.
423          */
424         pos = addr & ~(mem_aperture-1);
425         offset = addr - pos;
426
427         /* Set up initial PCI-E Memory Window to cover the start of our
428          * transfer.  (Read it back to ensure that changes propagate before we
429          * attempt to use the new value.)
430          */
431         t4_write_reg(adap,
432                      PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
433                      pos | win_pf);
434         t4_read_reg(adap,
435                     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
436
437         /* Transfer data to/from the adapter as long as there's an integral
438          * number of 32-bit transfers to complete.
439          *
440          * A note on Endianness issues:
441          *
442          * The "register" reads and writes below from/to the PCI-E Memory
443          * Window invoke the standard adapter Big-Endian to PCI-E Link
444          * Little-Endian "swizzel."  As a result, if we have the following
445          * data in adapter memory:
446          *
447          *     Memory:  ... | b0 | b1 | b2 | b3 | ...
448          *     Address:      i+0  i+1  i+2  i+3
449          *
450          * Then a read of the adapter memory via the PCI-E Memory Window
451          * will yield:
452          *
453          *     x = readl(i)
454          *         31                  0
455          *         [ b3 | b2 | b1 | b0 ]
456          *
457          * If this value is stored into local memory on a Little-Endian system
458          * it will show up correctly in local memory as:
459          *
460          *     ( ..., b0, b1, b2, b3, ... )
461          *
462          * But on a Big-Endian system, the store will show up in memory
463          * incorrectly swizzled as:
464          *
465          *     ( ..., b3, b2, b1, b0, ... )
466          *
467          * So we need to account for this in the reads and writes to the
468          * PCI-E Memory Window below by undoing the register read/write
469          * swizzels.
470          */
471         while (len > 0) {
472                 if (dir == T4_MEMORY_READ)
473                         *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
474                                                 mem_base + offset));
475                 else
476                         t4_write_reg(adap, mem_base + offset,
477                                      (__force u32)cpu_to_le32(*buf++));
478                 offset += sizeof(__be32);
479                 len -= sizeof(__be32);
480
481                 /* If we've reached the end of our current window aperture,
482                  * move the PCI-E Memory Window on to the next.  Note that
483                  * doing this here after "len" may be 0 allows us to set up
484                  * the PCI-E Memory Window for a possible final residual
485                  * transfer below ...
486                  */
487                 if (offset == mem_aperture) {
488                         pos += mem_aperture;
489                         offset = 0;
490                         t4_write_reg(adap,
491                                 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
492                                                     win), pos | win_pf);
493                         t4_read_reg(adap,
494                                 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
495                                                     win));
496                 }
497         }
498
499         /* If the original transfer had a length which wasn't a multiple of
500          * 32-bits, now's where we need to finish off the transfer of the
501          * residual amount.  The PCI-E Memory Window has already been moved
502          * above (if necessary) to cover this final transfer.
503          */
504         if (resid) {
505                 union {
506                         u32 word;
507                         char byte[4];
508                 } last;
509                 unsigned char *bp;
510                 int i;
511
512                 if (dir == T4_MEMORY_READ) {
513                         last.word = le32_to_cpu(
514                                         (__force __le32)t4_read_reg(adap,
515                                                 mem_base + offset));
516                         for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
517                                 bp[i] = last.byte[i];
518                 } else {
519                         last.word = *buf;
520                         for (i = resid; i < 4; i++)
521                                 last.byte[i] = 0;
522                         t4_write_reg(adap, mem_base + offset,
523                                      (__force u32)cpu_to_le32(last.word));
524                 }
525         }
526
527         return 0;
528 }
529
530 /* Return the specified PCI-E Configuration Space register from our Physical
531  * Function.  We try first via a Firmware LDST Command since we prefer to let
532  * the firmware own all of these registers, but if that fails we go for it
533  * directly ourselves.
534  */
535 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
536 {
537         u32 val, ldst_addrspace;
538
539         /* If fw_attach != 0, construct and send the Firmware LDST Command to
540          * retrieve the specified PCI-E Configuration Space register.
541          */
542         struct fw_ldst_cmd ldst_cmd;
543         int ret;
544
545         memset(&ldst_cmd, 0, sizeof(ldst_cmd));
546         ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
547         ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
548                                                FW_CMD_REQUEST_F |
549                                                FW_CMD_READ_F |
550                                                ldst_addrspace);
551         ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
552         ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
553         ldst_cmd.u.pcie.ctrl_to_fn =
554                 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
555         ldst_cmd.u.pcie.r = reg;
556
557         /* If the LDST Command succeeds, return the result, otherwise
558          * fall through to reading it directly ourselves ...
559          */
560         ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
561                          &ldst_cmd);
562         if (ret == 0)
563                 val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
564         else
565                 /* Read the desired Configuration Space register via the PCI-E
566                  * Backdoor mechanism.
567                  */
568                 t4_hw_pci_read_cfg4(adap, reg, &val);
569         return val;
570 }
571
572 /* Get the window based on base passed to it.
573  * Window aperture is currently unhandled, but there is no use case for it
574  * right now
575  */
576 static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
577                          u32 memwin_base)
578 {
579         u32 ret;
580
581         if (is_t4(adap->params.chip)) {
582                 u32 bar0;
583
584                 /* Truncation intentional: we only read the bottom 32-bits of
585                  * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
586                  * mechanism to read BAR0 instead of using
587                  * pci_resource_start() because we could be operating from
588                  * within a Virtual Machine which is trapping our accesses to
589                  * our Configuration Space and we need to set up the PCI-E
590                  * Memory Window decoders with the actual addresses which will
591                  * be coming across the PCI-E link.
592                  */
593                 bar0 = t4_read_pcie_cfg4(adap, pci_base);
594                 bar0 &= pci_mask;
595                 adap->t4_bar0 = bar0;
596
597                 ret = bar0 + memwin_base;
598         } else {
599                 /* For T5, only relative offset inside the PCIe BAR is passed */
600                 ret = memwin_base;
601         }
602         return ret;
603 }
604
605 /* Get the default utility window (win0) used by everyone */
606 u32 t4_get_util_window(struct adapter *adap)
607 {
608         return t4_get_window(adap, PCI_BASE_ADDRESS_0,
609                              PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
610 }
611
612 /* Set up memory window for accessing adapter memory ranges.  (Read
613  * back MA register to ensure that changes propagate before we attempt
614  * to use the new values.)
615  */
616 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
617 {
618         t4_write_reg(adap,
619                      PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
620                      memwin_base | BIR_V(0) |
621                      WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
622         t4_read_reg(adap,
623                     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
624 }
625
626 /**
627  *      t4_get_regs_len - return the size of the chips register set
628  *      @adapter: the adapter
629  *
630  *      Returns the size of the chip's BAR0 register space.
631  */
632 unsigned int t4_get_regs_len(struct adapter *adapter)
633 {
634         unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
635
636         switch (chip_version) {
637         case CHELSIO_T4:
638                 return T4_REGMAP_SIZE;
639
640         case CHELSIO_T5:
641         case CHELSIO_T6:
642                 return T5_REGMAP_SIZE;
643         }
644
645         dev_err(adapter->pdev_dev,
646                 "Unsupported chip version %d\n", chip_version);
647         return 0;
648 }
649
650 /**
651  *      t4_get_regs - read chip registers into provided buffer
652  *      @adap: the adapter
653  *      @buf: register buffer
654  *      @buf_size: size (in bytes) of register buffer
655  *
656  *      If the provided register buffer isn't large enough for the chip's
657  *      full register range, the register dump will be truncated to the
658  *      register buffer's size.
659  */
660 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
661 {
662         static const unsigned int t4_reg_ranges[] = {
663                 0x1008, 0x1108,
664                 0x1180, 0x11b4,
665                 0x11fc, 0x123c,
666                 0x1300, 0x173c,
667                 0x1800, 0x18fc,
668                 0x3000, 0x305c,
669                 0x3068, 0x30d8,
670                 0x30e0, 0x5924,
671                 0x5960, 0x59d4,
672                 0x5a00, 0x5af8,
673                 0x6000, 0x6098,
674                 0x6100, 0x6150,
675                 0x6200, 0x6208,
676                 0x6240, 0x6248,
677                 0x6280, 0x6338,
678                 0x6370, 0x638c,
679                 0x6400, 0x643c,
680                 0x6500, 0x6524,
681                 0x6a00, 0x6a38,
682                 0x6a60, 0x6a78,
683                 0x6b00, 0x6b84,
684                 0x6bf0, 0x6c84,
685                 0x6cf0, 0x6d84,
686                 0x6df0, 0x6e84,
687                 0x6ef0, 0x6f84,
688                 0x6ff0, 0x7084,
689                 0x70f0, 0x7184,
690                 0x71f0, 0x7284,
691                 0x72f0, 0x7384,
692                 0x73f0, 0x7450,
693                 0x7500, 0x7530,
694                 0x7600, 0x761c,
695                 0x7680, 0x76cc,
696                 0x7700, 0x7798,
697                 0x77c0, 0x77fc,
698                 0x7900, 0x79fc,
699                 0x7b00, 0x7c38,
700                 0x7d00, 0x7efc,
701                 0x8dc0, 0x8e1c,
702                 0x8e30, 0x8e78,
703                 0x8ea0, 0x8f6c,
704                 0x8fc0, 0x9074,
705                 0x90fc, 0x90fc,
706                 0x9400, 0x9458,
707                 0x9600, 0x96bc,
708                 0x9800, 0x9808,
709                 0x9820, 0x983c,
710                 0x9850, 0x9864,
711                 0x9c00, 0x9c6c,
712                 0x9c80, 0x9cec,
713                 0x9d00, 0x9d6c,
714                 0x9d80, 0x9dec,
715                 0x9e00, 0x9e6c,
716                 0x9e80, 0x9eec,
717                 0x9f00, 0x9f6c,
718                 0x9f80, 0x9fec,
719                 0xd004, 0xd03c,
720                 0xdfc0, 0xdfe0,
721                 0xe000, 0xea7c,
722                 0xf000, 0x11110,
723                 0x11118, 0x11190,
724                 0x19040, 0x1906c,
725                 0x19078, 0x19080,
726                 0x1908c, 0x19124,
727                 0x19150, 0x191b0,
728                 0x191d0, 0x191e8,
729                 0x19238, 0x1924c,
730                 0x193f8, 0x19474,
731                 0x19490, 0x194f8,
732                 0x19800, 0x19f4c,
733                 0x1a000, 0x1a06c,
734                 0x1a0b0, 0x1a120,
735                 0x1a128, 0x1a138,
736                 0x1a190, 0x1a1c4,
737                 0x1a1fc, 0x1a1fc,
738                 0x1e040, 0x1e04c,
739                 0x1e284, 0x1e28c,
740                 0x1e2c0, 0x1e2c0,
741                 0x1e2e0, 0x1e2e0,
742                 0x1e300, 0x1e384,
743                 0x1e3c0, 0x1e3c8,
744                 0x1e440, 0x1e44c,
745                 0x1e684, 0x1e68c,
746                 0x1e6c0, 0x1e6c0,
747                 0x1e6e0, 0x1e6e0,
748                 0x1e700, 0x1e784,
749                 0x1e7c0, 0x1e7c8,
750                 0x1e840, 0x1e84c,
751                 0x1ea84, 0x1ea8c,
752                 0x1eac0, 0x1eac0,
753                 0x1eae0, 0x1eae0,
754                 0x1eb00, 0x1eb84,
755                 0x1ebc0, 0x1ebc8,
756                 0x1ec40, 0x1ec4c,
757                 0x1ee84, 0x1ee8c,
758                 0x1eec0, 0x1eec0,
759                 0x1eee0, 0x1eee0,
760                 0x1ef00, 0x1ef84,
761                 0x1efc0, 0x1efc8,
762                 0x1f040, 0x1f04c,
763                 0x1f284, 0x1f28c,
764                 0x1f2c0, 0x1f2c0,
765                 0x1f2e0, 0x1f2e0,
766                 0x1f300, 0x1f384,
767                 0x1f3c0, 0x1f3c8,
768                 0x1f440, 0x1f44c,
769                 0x1f684, 0x1f68c,
770                 0x1f6c0, 0x1f6c0,
771                 0x1f6e0, 0x1f6e0,
772                 0x1f700, 0x1f784,
773                 0x1f7c0, 0x1f7c8,
774                 0x1f840, 0x1f84c,
775                 0x1fa84, 0x1fa8c,
776                 0x1fac0, 0x1fac0,
777                 0x1fae0, 0x1fae0,
778                 0x1fb00, 0x1fb84,
779                 0x1fbc0, 0x1fbc8,
780                 0x1fc40, 0x1fc4c,
781                 0x1fe84, 0x1fe8c,
782                 0x1fec0, 0x1fec0,
783                 0x1fee0, 0x1fee0,
784                 0x1ff00, 0x1ff84,
785                 0x1ffc0, 0x1ffc8,
786                 0x20000, 0x2002c,
787                 0x20100, 0x2013c,
788                 0x20190, 0x201c8,
789                 0x20200, 0x20318,
790                 0x20400, 0x20528,
791                 0x20540, 0x20614,
792                 0x21000, 0x21040,
793                 0x2104c, 0x21060,
794                 0x210c0, 0x210ec,
795                 0x21200, 0x21268,
796                 0x21270, 0x21284,
797                 0x212fc, 0x21388,
798                 0x21400, 0x21404,
799                 0x21500, 0x21518,
800                 0x2152c, 0x2153c,
801                 0x21550, 0x21554,
802                 0x21600, 0x21600,
803                 0x21608, 0x21628,
804                 0x21630, 0x2163c,
805                 0x21700, 0x2171c,
806                 0x21780, 0x2178c,
807                 0x21800, 0x21c38,
808                 0x21c80, 0x21d7c,
809                 0x21e00, 0x21e04,
810                 0x22000, 0x2202c,
811                 0x22100, 0x2213c,
812                 0x22190, 0x221c8,
813                 0x22200, 0x22318,
814                 0x22400, 0x22528,
815                 0x22540, 0x22614,
816                 0x23000, 0x23040,
817                 0x2304c, 0x23060,
818                 0x230c0, 0x230ec,
819                 0x23200, 0x23268,
820                 0x23270, 0x23284,
821                 0x232fc, 0x23388,
822                 0x23400, 0x23404,
823                 0x23500, 0x23518,
824                 0x2352c, 0x2353c,
825                 0x23550, 0x23554,
826                 0x23600, 0x23600,
827                 0x23608, 0x23628,
828                 0x23630, 0x2363c,
829                 0x23700, 0x2371c,
830                 0x23780, 0x2378c,
831                 0x23800, 0x23c38,
832                 0x23c80, 0x23d7c,
833                 0x23e00, 0x23e04,
834                 0x24000, 0x2402c,
835                 0x24100, 0x2413c,
836                 0x24190, 0x241c8,
837                 0x24200, 0x24318,
838                 0x24400, 0x24528,
839                 0x24540, 0x24614,
840                 0x25000, 0x25040,
841                 0x2504c, 0x25060,
842                 0x250c0, 0x250ec,
843                 0x25200, 0x25268,
844                 0x25270, 0x25284,
845                 0x252fc, 0x25388,
846                 0x25400, 0x25404,
847                 0x25500, 0x25518,
848                 0x2552c, 0x2553c,
849                 0x25550, 0x25554,
850                 0x25600, 0x25600,
851                 0x25608, 0x25628,
852                 0x25630, 0x2563c,
853                 0x25700, 0x2571c,
854                 0x25780, 0x2578c,
855                 0x25800, 0x25c38,
856                 0x25c80, 0x25d7c,
857                 0x25e00, 0x25e04,
858                 0x26000, 0x2602c,
859                 0x26100, 0x2613c,
860                 0x26190, 0x261c8,
861                 0x26200, 0x26318,
862                 0x26400, 0x26528,
863                 0x26540, 0x26614,
864                 0x27000, 0x27040,
865                 0x2704c, 0x27060,
866                 0x270c0, 0x270ec,
867                 0x27200, 0x27268,
868                 0x27270, 0x27284,
869                 0x272fc, 0x27388,
870                 0x27400, 0x27404,
871                 0x27500, 0x27518,
872                 0x2752c, 0x2753c,
873                 0x27550, 0x27554,
874                 0x27600, 0x27600,
875                 0x27608, 0x27628,
876                 0x27630, 0x2763c,
877                 0x27700, 0x2771c,
878                 0x27780, 0x2778c,
879                 0x27800, 0x27c38,
880                 0x27c80, 0x27d7c,
881                 0x27e00, 0x27e04,
882         };
883
884         static const unsigned int t5_reg_ranges[] = {
885                 0x1008, 0x1148,
886                 0x1180, 0x11b4,
887                 0x11fc, 0x123c,
888                 0x1280, 0x173c,
889                 0x1800, 0x18fc,
890                 0x3000, 0x3028,
891                 0x3068, 0x30d8,
892                 0x30e0, 0x30fc,
893                 0x3140, 0x357c,
894                 0x35a8, 0x35cc,
895                 0x35ec, 0x35ec,
896                 0x3600, 0x5624,
897                 0x56cc, 0x575c,
898                 0x580c, 0x5814,
899                 0x5890, 0x58bc,
900                 0x5940, 0x59dc,
901                 0x59fc, 0x5a18,
902                 0x5a60, 0x5a9c,
903                 0x5b94, 0x5bfc,
904                 0x6000, 0x6040,
905                 0x6058, 0x614c,
906                 0x7700, 0x7798,
907                 0x77c0, 0x78fc,
908                 0x7b00, 0x7c54,
909                 0x7d00, 0x7efc,
910                 0x8dc0, 0x8de0,
911                 0x8df8, 0x8e84,
912                 0x8ea0, 0x8f84,
913                 0x8fc0, 0x90f8,
914                 0x9400, 0x9470,
915                 0x9600, 0x96f4,
916                 0x9800, 0x9808,
917                 0x9820, 0x983c,
918                 0x9850, 0x9864,
919                 0x9c00, 0x9c6c,
920                 0x9c80, 0x9cec,
921                 0x9d00, 0x9d6c,
922                 0x9d80, 0x9dec,
923                 0x9e00, 0x9e6c,
924                 0x9e80, 0x9eec,
925                 0x9f00, 0x9f6c,
926                 0x9f80, 0xa020,
927                 0xd004, 0xd03c,
928                 0xdfc0, 0xdfe0,
929                 0xe000, 0x11088,
930                 0x1109c, 0x11110,
931                 0x11118, 0x1117c,
932                 0x11190, 0x11204,
933                 0x19040, 0x1906c,
934                 0x19078, 0x19080,
935                 0x1908c, 0x19124,
936                 0x19150, 0x191b0,
937                 0x191d0, 0x191e8,
938                 0x19238, 0x19290,
939                 0x193f8, 0x19474,
940                 0x19490, 0x194cc,
941                 0x194f0, 0x194f8,
942                 0x19c00, 0x19c60,
943                 0x19c94, 0x19e10,
944                 0x19e50, 0x19f34,
945                 0x19f40, 0x19f50,
946                 0x19f90, 0x19fe4,
947                 0x1a000, 0x1a06c,
948                 0x1a0b0, 0x1a120,
949                 0x1a128, 0x1a138,
950                 0x1a190, 0x1a1c4,
951                 0x1a1fc, 0x1a1fc,
952                 0x1e008, 0x1e00c,
953                 0x1e040, 0x1e04c,
954                 0x1e284, 0x1e290,
955                 0x1e2c0, 0x1e2c0,
956                 0x1e2e0, 0x1e2e0,
957                 0x1e300, 0x1e384,
958                 0x1e3c0, 0x1e3c8,
959                 0x1e408, 0x1e40c,
960                 0x1e440, 0x1e44c,
961                 0x1e684, 0x1e690,
962                 0x1e6c0, 0x1e6c0,
963                 0x1e6e0, 0x1e6e0,
964                 0x1e700, 0x1e784,
965                 0x1e7c0, 0x1e7c8,
966                 0x1e808, 0x1e80c,
967                 0x1e840, 0x1e84c,
968                 0x1ea84, 0x1ea90,
969                 0x1eac0, 0x1eac0,
970                 0x1eae0, 0x1eae0,
971                 0x1eb00, 0x1eb84,
972                 0x1ebc0, 0x1ebc8,
973                 0x1ec08, 0x1ec0c,
974                 0x1ec40, 0x1ec4c,
975                 0x1ee84, 0x1ee90,
976                 0x1eec0, 0x1eec0,
977                 0x1eee0, 0x1eee0,
978                 0x1ef00, 0x1ef84,
979                 0x1efc0, 0x1efc8,
980                 0x1f008, 0x1f00c,
981                 0x1f040, 0x1f04c,
982                 0x1f284, 0x1f290,
983                 0x1f2c0, 0x1f2c0,
984                 0x1f2e0, 0x1f2e0,
985                 0x1f300, 0x1f384,
986                 0x1f3c0, 0x1f3c8,
987                 0x1f408, 0x1f40c,
988                 0x1f440, 0x1f44c,
989                 0x1f684, 0x1f690,
990                 0x1f6c0, 0x1f6c0,
991                 0x1f6e0, 0x1f6e0,
992                 0x1f700, 0x1f784,
993                 0x1f7c0, 0x1f7c8,
994                 0x1f808, 0x1f80c,
995                 0x1f840, 0x1f84c,
996                 0x1fa84, 0x1fa90,
997                 0x1fac0, 0x1fac0,
998                 0x1fae0, 0x1fae0,
999                 0x1fb00, 0x1fb84,
1000                 0x1fbc0, 0x1fbc8,
1001                 0x1fc08, 0x1fc0c,
1002                 0x1fc40, 0x1fc4c,
1003                 0x1fe84, 0x1fe90,
1004                 0x1fec0, 0x1fec0,
1005                 0x1fee0, 0x1fee0,
1006                 0x1ff00, 0x1ff84,
1007                 0x1ffc0, 0x1ffc8,
1008                 0x30000, 0x30030,
1009                 0x30100, 0x30144,
1010                 0x30190, 0x301d0,
1011                 0x30200, 0x30318,
1012                 0x30400, 0x3052c,
1013                 0x30540, 0x3061c,
1014                 0x30800, 0x30834,
1015                 0x308c0, 0x30908,
1016                 0x30910, 0x309ac,
1017                 0x30a00, 0x30a2c,
1018                 0x30a44, 0x30a50,
1019                 0x30a74, 0x30c24,
1020                 0x30d00, 0x30d00,
1021                 0x30d08, 0x30d14,
1022                 0x30d1c, 0x30d20,
1023                 0x30d3c, 0x30d50,
1024                 0x31200, 0x3120c,
1025                 0x31220, 0x31220,
1026                 0x31240, 0x31240,
1027                 0x31600, 0x3160c,
1028                 0x31a00, 0x31a1c,
1029                 0x31e00, 0x31e20,
1030                 0x31e38, 0x31e3c,
1031                 0x31e80, 0x31e80,
1032                 0x31e88, 0x31ea8,
1033                 0x31eb0, 0x31eb4,
1034                 0x31ec8, 0x31ed4,
1035                 0x31fb8, 0x32004,
1036                 0x32200, 0x32200,
1037                 0x32208, 0x32240,
1038                 0x32248, 0x32280,
1039                 0x32288, 0x322c0,
1040                 0x322c8, 0x322fc,
1041                 0x32600, 0x32630,
1042                 0x32a00, 0x32abc,
1043                 0x32b00, 0x32b70,
1044                 0x33000, 0x33048,
1045                 0x33060, 0x3309c,
1046                 0x330f0, 0x33148,
1047                 0x33160, 0x3319c,
1048                 0x331f0, 0x332e4,
1049                 0x332f8, 0x333e4,
1050                 0x333f8, 0x33448,
1051                 0x33460, 0x3349c,
1052                 0x334f0, 0x33548,
1053                 0x33560, 0x3359c,
1054                 0x335f0, 0x336e4,
1055                 0x336f8, 0x337e4,
1056                 0x337f8, 0x337fc,
1057                 0x33814, 0x33814,
1058                 0x3382c, 0x3382c,
1059                 0x33880, 0x3388c,
1060                 0x338e8, 0x338ec,
1061                 0x33900, 0x33948,
1062                 0x33960, 0x3399c,
1063                 0x339f0, 0x33ae4,
1064                 0x33af8, 0x33b10,
1065                 0x33b28, 0x33b28,
1066                 0x33b3c, 0x33b50,
1067                 0x33bf0, 0x33c10,
1068                 0x33c28, 0x33c28,
1069                 0x33c3c, 0x33c50,
1070                 0x33cf0, 0x33cfc,
1071                 0x34000, 0x34030,
1072                 0x34100, 0x34144,
1073                 0x34190, 0x341d0,
1074                 0x34200, 0x34318,
1075                 0x34400, 0x3452c,
1076                 0x34540, 0x3461c,
1077                 0x34800, 0x34834,
1078                 0x348c0, 0x34908,
1079                 0x34910, 0x349ac,
1080                 0x34a00, 0x34a2c,
1081                 0x34a44, 0x34a50,
1082                 0x34a74, 0x34c24,
1083                 0x34d00, 0x34d00,
1084                 0x34d08, 0x34d14,
1085                 0x34d1c, 0x34d20,
1086                 0x34d3c, 0x34d50,
1087                 0x35200, 0x3520c,
1088                 0x35220, 0x35220,
1089                 0x35240, 0x35240,
1090                 0x35600, 0x3560c,
1091                 0x35a00, 0x35a1c,
1092                 0x35e00, 0x35e20,
1093                 0x35e38, 0x35e3c,
1094                 0x35e80, 0x35e80,
1095                 0x35e88, 0x35ea8,
1096                 0x35eb0, 0x35eb4,
1097                 0x35ec8, 0x35ed4,
1098                 0x35fb8, 0x36004,
1099                 0x36200, 0x36200,
1100                 0x36208, 0x36240,
1101                 0x36248, 0x36280,
1102                 0x36288, 0x362c0,
1103                 0x362c8, 0x362fc,
1104                 0x36600, 0x36630,
1105                 0x36a00, 0x36abc,
1106                 0x36b00, 0x36b70,
1107                 0x37000, 0x37048,
1108                 0x37060, 0x3709c,
1109                 0x370f0, 0x37148,
1110                 0x37160, 0x3719c,
1111                 0x371f0, 0x372e4,
1112                 0x372f8, 0x373e4,
1113                 0x373f8, 0x37448,
1114                 0x37460, 0x3749c,
1115                 0x374f0, 0x37548,
1116                 0x37560, 0x3759c,
1117                 0x375f0, 0x376e4,
1118                 0x376f8, 0x377e4,
1119                 0x377f8, 0x377fc,
1120                 0x37814, 0x37814,
1121                 0x3782c, 0x3782c,
1122                 0x37880, 0x3788c,
1123                 0x378e8, 0x378ec,
1124                 0x37900, 0x37948,
1125                 0x37960, 0x3799c,
1126                 0x379f0, 0x37ae4,
1127                 0x37af8, 0x37b10,
1128                 0x37b28, 0x37b28,
1129                 0x37b3c, 0x37b50,
1130                 0x37bf0, 0x37c10,
1131                 0x37c28, 0x37c28,
1132                 0x37c3c, 0x37c50,
1133                 0x37cf0, 0x37cfc,
1134                 0x38000, 0x38030,
1135                 0x38100, 0x38144,
1136                 0x38190, 0x381d0,
1137                 0x38200, 0x38318,
1138                 0x38400, 0x3852c,
1139                 0x38540, 0x3861c,
1140                 0x38800, 0x38834,
1141                 0x388c0, 0x38908,
1142                 0x38910, 0x389ac,
1143                 0x38a00, 0x38a2c,
1144                 0x38a44, 0x38a50,
1145                 0x38a74, 0x38c24,
1146                 0x38d00, 0x38d00,
1147                 0x38d08, 0x38d14,
1148                 0x38d1c, 0x38d20,
1149                 0x38d3c, 0x38d50,
1150                 0x39200, 0x3920c,
1151                 0x39220, 0x39220,
1152                 0x39240, 0x39240,
1153                 0x39600, 0x3960c,
1154                 0x39a00, 0x39a1c,
1155                 0x39e00, 0x39e20,
1156                 0x39e38, 0x39e3c,
1157                 0x39e80, 0x39e80,
1158                 0x39e88, 0x39ea8,
1159                 0x39eb0, 0x39eb4,
1160                 0x39ec8, 0x39ed4,
1161                 0x39fb8, 0x3a004,
1162                 0x3a200, 0x3a200,
1163                 0x3a208, 0x3a240,
1164                 0x3a248, 0x3a280,
1165                 0x3a288, 0x3a2c0,
1166                 0x3a2c8, 0x3a2fc,
1167                 0x3a600, 0x3a630,
1168                 0x3aa00, 0x3aabc,
1169                 0x3ab00, 0x3ab70,
1170                 0x3b000, 0x3b048,
1171                 0x3b060, 0x3b09c,
1172                 0x3b0f0, 0x3b148,
1173                 0x3b160, 0x3b19c,
1174                 0x3b1f0, 0x3b2e4,
1175                 0x3b2f8, 0x3b3e4,
1176                 0x3b3f8, 0x3b448,
1177                 0x3b460, 0x3b49c,
1178                 0x3b4f0, 0x3b548,
1179                 0x3b560, 0x3b59c,
1180                 0x3b5f0, 0x3b6e4,
1181                 0x3b6f8, 0x3b7e4,
1182                 0x3b7f8, 0x3b7fc,
1183                 0x3b814, 0x3b814,
1184                 0x3b82c, 0x3b82c,
1185                 0x3b880, 0x3b88c,
1186                 0x3b8e8, 0x3b8ec,
1187                 0x3b900, 0x3b948,
1188                 0x3b960, 0x3b99c,
1189                 0x3b9f0, 0x3bae4,
1190                 0x3baf8, 0x3bb10,
1191                 0x3bb28, 0x3bb28,
1192                 0x3bb3c, 0x3bb50,
1193                 0x3bbf0, 0x3bc10,
1194                 0x3bc28, 0x3bc28,
1195                 0x3bc3c, 0x3bc50,
1196                 0x3bcf0, 0x3bcfc,
1197                 0x3c000, 0x3c030,
1198                 0x3c100, 0x3c144,
1199                 0x3c190, 0x3c1d0,
1200                 0x3c200, 0x3c318,
1201                 0x3c400, 0x3c52c,
1202                 0x3c540, 0x3c61c,
1203                 0x3c800, 0x3c834,
1204                 0x3c8c0, 0x3c908,
1205                 0x3c910, 0x3c9ac,
1206                 0x3ca00, 0x3ca2c,
1207                 0x3ca44, 0x3ca50,
1208                 0x3ca74, 0x3cc24,
1209                 0x3cd00, 0x3cd00,
1210                 0x3cd08, 0x3cd14,
1211                 0x3cd1c, 0x3cd20,
1212                 0x3cd3c, 0x3cd50,
1213                 0x3d200, 0x3d20c,
1214                 0x3d220, 0x3d220,
1215                 0x3d240, 0x3d240,
1216                 0x3d600, 0x3d60c,
1217                 0x3da00, 0x3da1c,
1218                 0x3de00, 0x3de20,
1219                 0x3de38, 0x3de3c,
1220                 0x3de80, 0x3de80,
1221                 0x3de88, 0x3dea8,
1222                 0x3deb0, 0x3deb4,
1223                 0x3dec8, 0x3ded4,
1224                 0x3dfb8, 0x3e004,
1225                 0x3e200, 0x3e200,
1226                 0x3e208, 0x3e240,
1227                 0x3e248, 0x3e280,
1228                 0x3e288, 0x3e2c0,
1229                 0x3e2c8, 0x3e2fc,
1230                 0x3e600, 0x3e630,
1231                 0x3ea00, 0x3eabc,
1232                 0x3eb00, 0x3eb70,
1233                 0x3f000, 0x3f048,
1234                 0x3f060, 0x3f09c,
1235                 0x3f0f0, 0x3f148,
1236                 0x3f160, 0x3f19c,
1237                 0x3f1f0, 0x3f2e4,
1238                 0x3f2f8, 0x3f3e4,
1239                 0x3f3f8, 0x3f448,
1240                 0x3f460, 0x3f49c,
1241                 0x3f4f0, 0x3f548,
1242                 0x3f560, 0x3f59c,
1243                 0x3f5f0, 0x3f6e4,
1244                 0x3f6f8, 0x3f7e4,
1245                 0x3f7f8, 0x3f7fc,
1246                 0x3f814, 0x3f814,
1247                 0x3f82c, 0x3f82c,
1248                 0x3f880, 0x3f88c,
1249                 0x3f8e8, 0x3f8ec,
1250                 0x3f900, 0x3f948,
1251                 0x3f960, 0x3f99c,
1252                 0x3f9f0, 0x3fae4,
1253                 0x3faf8, 0x3fb10,
1254                 0x3fb28, 0x3fb28,
1255                 0x3fb3c, 0x3fb50,
1256                 0x3fbf0, 0x3fc10,
1257                 0x3fc28, 0x3fc28,
1258                 0x3fc3c, 0x3fc50,
1259                 0x3fcf0, 0x3fcfc,
1260                 0x40000, 0x4000c,
1261                 0x40040, 0x40068,
1262                 0x4007c, 0x40144,
1263                 0x40180, 0x4018c,
1264                 0x40200, 0x40298,
1265                 0x402ac, 0x4033c,
1266                 0x403f8, 0x403fc,
1267                 0x41304, 0x413c4,
1268                 0x41400, 0x4141c,
1269                 0x41480, 0x414d0,
1270                 0x44000, 0x44078,
1271                 0x440c0, 0x44278,
1272                 0x442c0, 0x44478,
1273                 0x444c0, 0x44678,
1274                 0x446c0, 0x44878,
1275                 0x448c0, 0x449fc,
1276                 0x45000, 0x45068,
1277                 0x45080, 0x45084,
1278                 0x450a0, 0x450b0,
1279                 0x45200, 0x45268,
1280                 0x45280, 0x45284,
1281                 0x452a0, 0x452b0,
1282                 0x460c0, 0x460e4,
1283                 0x47000, 0x4708c,
1284                 0x47200, 0x47250,
1285                 0x47400, 0x47420,
1286                 0x47600, 0x47618,
1287                 0x47800, 0x47814,
1288                 0x48000, 0x4800c,
1289                 0x48040, 0x48068,
1290                 0x4807c, 0x48144,
1291                 0x48180, 0x4818c,
1292                 0x48200, 0x48298,
1293                 0x482ac, 0x4833c,
1294                 0x483f8, 0x483fc,
1295                 0x49304, 0x493c4,
1296                 0x49400, 0x4941c,
1297                 0x49480, 0x494d0,
1298                 0x4c000, 0x4c078,
1299                 0x4c0c0, 0x4c278,
1300                 0x4c2c0, 0x4c478,
1301                 0x4c4c0, 0x4c678,
1302                 0x4c6c0, 0x4c878,
1303                 0x4c8c0, 0x4c9fc,
1304                 0x4d000, 0x4d068,
1305                 0x4d080, 0x4d084,
1306                 0x4d0a0, 0x4d0b0,
1307                 0x4d200, 0x4d268,
1308                 0x4d280, 0x4d284,
1309                 0x4d2a0, 0x4d2b0,
1310                 0x4e0c0, 0x4e0e4,
1311                 0x4f000, 0x4f08c,
1312                 0x4f200, 0x4f250,
1313                 0x4f400, 0x4f420,
1314                 0x4f600, 0x4f618,
1315                 0x4f800, 0x4f814,
1316                 0x50000, 0x500cc,
1317                 0x50400, 0x50400,
1318                 0x50800, 0x508cc,
1319                 0x50c00, 0x50c00,
1320                 0x51000, 0x5101c,
1321                 0x51300, 0x51308,
1322         };
1323
1324         static const unsigned int t6_reg_ranges[] = {
1325                 0x1008, 0x114c,
1326                 0x1180, 0x11b4,
1327                 0x11fc, 0x1250,
1328                 0x1280, 0x133c,
1329                 0x1800, 0x18fc,
1330                 0x3000, 0x302c,
1331                 0x3060, 0x30d8,
1332                 0x30e0, 0x30fc,
1333                 0x3140, 0x357c,
1334                 0x35a8, 0x35cc,
1335                 0x35ec, 0x35ec,
1336                 0x3600, 0x5624,
1337                 0x56cc, 0x575c,
1338                 0x580c, 0x5814,
1339                 0x5890, 0x58bc,
1340                 0x5940, 0x595c,
1341                 0x5980, 0x598c,
1342                 0x59b0, 0x59dc,
1343                 0x59fc, 0x5a18,
1344                 0x5a60, 0x5a6c,
1345                 0x5a80, 0x5a9c,
1346                 0x5b94, 0x5bfc,
1347                 0x5c10, 0x5ec0,
1348                 0x5ec8, 0x5ec8,
1349                 0x6000, 0x6040,
1350                 0x6058, 0x6154,
1351                 0x7700, 0x7798,
1352                 0x77c0, 0x7880,
1353                 0x78cc, 0x78fc,
1354                 0x7b00, 0x7c54,
1355                 0x7d00, 0x7efc,
1356                 0x8dc0, 0x8de0,
1357                 0x8df8, 0x8e84,
1358                 0x8ea0, 0x8f88,
1359                 0x8fb8, 0x911c,
1360                 0x9400, 0x9470,
1361                 0x9600, 0x971c,
1362                 0x9800, 0x9808,
1363                 0x9820, 0x983c,
1364                 0x9850, 0x9864,
1365                 0x9c00, 0x9c6c,
1366                 0x9c80, 0x9cec,
1367                 0x9d00, 0x9d6c,
1368                 0x9d80, 0x9dec,
1369                 0x9e00, 0x9e6c,
1370                 0x9e80, 0x9eec,
1371                 0x9f00, 0x9f6c,
1372                 0x9f80, 0xa020,
1373                 0xd004, 0xd03c,
1374                 0xdfc0, 0xdfe0,
1375                 0xe000, 0xf008,
1376                 0x11000, 0x11014,
1377                 0x11048, 0x11110,
1378                 0x11118, 0x1117c,
1379                 0x11190, 0x11260,
1380                 0x11300, 0x1130c,
1381                 0x12000, 0x1205c,
1382                 0x19040, 0x1906c,
1383                 0x19078, 0x19080,
1384                 0x1908c, 0x19124,
1385                 0x19150, 0x191b0,
1386                 0x191d0, 0x191e8,
1387                 0x19238, 0x192b8,
1388                 0x193f8, 0x19474,
1389                 0x19490, 0x194cc,
1390                 0x194f0, 0x194f8,
1391                 0x19c00, 0x19c80,
1392                 0x19c94, 0x19cbc,
1393                 0x19ce4, 0x19d28,
1394                 0x19d50, 0x19d78,
1395                 0x19d94, 0x19dc8,
1396                 0x19df0, 0x19e10,
1397                 0x19e50, 0x19e6c,
1398                 0x19ea0, 0x19f34,
1399                 0x19f40, 0x19f50,
1400                 0x19f90, 0x19fac,
1401                 0x19fc4, 0x19fe4,
1402                 0x1a000, 0x1a06c,
1403                 0x1a0b0, 0x1a120,
1404                 0x1a128, 0x1a138,
1405                 0x1a190, 0x1a1c4,
1406                 0x1a1fc, 0x1a1fc,
1407                 0x1e008, 0x1e00c,
1408                 0x1e040, 0x1e04c,
1409                 0x1e284, 0x1e290,
1410                 0x1e2c0, 0x1e2c0,
1411                 0x1e2e0, 0x1e2e0,
1412                 0x1e300, 0x1e384,
1413                 0x1e3c0, 0x1e3c8,
1414                 0x1e408, 0x1e40c,
1415                 0x1e440, 0x1e44c,
1416                 0x1e684, 0x1e690,
1417                 0x1e6c0, 0x1e6c0,
1418                 0x1e6e0, 0x1e6e0,
1419                 0x1e700, 0x1e784,
1420                 0x1e7c0, 0x1e7c8,
1421                 0x1e808, 0x1e80c,
1422                 0x1e840, 0x1e84c,
1423                 0x1ea84, 0x1ea90,
1424                 0x1eac0, 0x1eac0,
1425                 0x1eae0, 0x1eae0,
1426                 0x1eb00, 0x1eb84,
1427                 0x1ebc0, 0x1ebc8,
1428                 0x1ec08, 0x1ec0c,
1429                 0x1ec40, 0x1ec4c,
1430                 0x1ee84, 0x1ee90,
1431                 0x1eec0, 0x1eec0,
1432                 0x1eee0, 0x1eee0,
1433                 0x1ef00, 0x1ef84,
1434                 0x1efc0, 0x1efc8,
1435                 0x1f008, 0x1f00c,
1436                 0x1f040, 0x1f04c,
1437                 0x1f284, 0x1f290,
1438                 0x1f2c0, 0x1f2c0,
1439                 0x1f2e0, 0x1f2e0,
1440                 0x1f300, 0x1f384,
1441                 0x1f3c0, 0x1f3c8,
1442                 0x1f408, 0x1f40c,
1443                 0x1f440, 0x1f44c,
1444                 0x1f684, 0x1f690,
1445                 0x1f6c0, 0x1f6c0,
1446                 0x1f6e0, 0x1f6e0,
1447                 0x1f700, 0x1f784,
1448                 0x1f7c0, 0x1f7c8,
1449                 0x1f808, 0x1f80c,
1450                 0x1f840, 0x1f84c,
1451                 0x1fa84, 0x1fa90,
1452                 0x1fac0, 0x1fac0,
1453                 0x1fae0, 0x1fae0,
1454                 0x1fb00, 0x1fb84,
1455                 0x1fbc0, 0x1fbc8,
1456                 0x1fc08, 0x1fc0c,
1457                 0x1fc40, 0x1fc4c,
1458                 0x1fe84, 0x1fe90,
1459                 0x1fec0, 0x1fec0,
1460                 0x1fee0, 0x1fee0,
1461                 0x1ff00, 0x1ff84,
1462                 0x1ffc0, 0x1ffc8,
1463                 0x30000, 0x30070,
1464                 0x30100, 0x3015c,
1465                 0x30190, 0x301d0,
1466                 0x30200, 0x30318,
1467                 0x30400, 0x3052c,
1468                 0x30540, 0x3061c,
1469                 0x30800, 0x3088c,
1470                 0x308c0, 0x30908,
1471                 0x30910, 0x309b8,
1472                 0x30a00, 0x30a04,
1473                 0x30a0c, 0x30a2c,
1474                 0x30a44, 0x30a50,
1475                 0x30a74, 0x30c24,
1476                 0x30d00, 0x30d3c,
1477                 0x30d44, 0x30d7c,
1478                 0x30de0, 0x30de0,
1479                 0x30e00, 0x30ed4,
1480                 0x30f00, 0x30fa4,
1481                 0x30fc0, 0x30fc4,
1482                 0x31000, 0x31004,
1483                 0x31080, 0x310fc,
1484                 0x31208, 0x31220,
1485                 0x3123c, 0x31254,
1486                 0x31300, 0x31300,
1487                 0x31308, 0x3131c,
1488                 0x31338, 0x3133c,
1489                 0x31380, 0x31380,
1490                 0x31388, 0x313a8,
1491                 0x313b4, 0x313b4,
1492                 0x31400, 0x31420,
1493                 0x31438, 0x3143c,
1494                 0x31480, 0x31480,
1495                 0x314a8, 0x314a8,
1496                 0x314b0, 0x314b4,
1497                 0x314c8, 0x314d4,
1498                 0x31a40, 0x31a4c,
1499                 0x31af0, 0x31b20,
1500                 0x31b38, 0x31b3c,
1501                 0x31b80, 0x31b80,
1502                 0x31ba8, 0x31ba8,
1503                 0x31bb0, 0x31bb4,
1504                 0x31bc8, 0x31bd4,
1505                 0x32140, 0x3218c,
1506                 0x321f0, 0x32200,
1507                 0x32218, 0x32218,
1508                 0x32400, 0x32400,
1509                 0x32408, 0x3241c,
1510                 0x32618, 0x32620,
1511                 0x32664, 0x32664,
1512                 0x326a8, 0x326a8,
1513                 0x326ec, 0x326ec,
1514                 0x32a00, 0x32abc,
1515                 0x32b00, 0x32b78,
1516                 0x32c00, 0x32c00,
1517                 0x32c08, 0x32c3c,
1518                 0x32e00, 0x32e2c,
1519                 0x32f00, 0x32f2c,
1520                 0x33000, 0x330ac,
1521                 0x330c0, 0x331ac,
1522                 0x331c0, 0x332c4,
1523                 0x332e4, 0x333c4,
1524                 0x333e4, 0x334ac,
1525                 0x334c0, 0x335ac,
1526                 0x335c0, 0x336c4,
1527                 0x336e4, 0x337c4,
1528                 0x337e4, 0x337fc,
1529                 0x33814, 0x33814,
1530                 0x33854, 0x33868,
1531                 0x33880, 0x3388c,
1532                 0x338c0, 0x338d0,
1533                 0x338e8, 0x338ec,
1534                 0x33900, 0x339ac,
1535                 0x339c0, 0x33ac4,
1536                 0x33ae4, 0x33b10,
1537                 0x33b24, 0x33b50,
1538                 0x33bf0, 0x33c10,
1539                 0x33c24, 0x33c50,
1540                 0x33cf0, 0x33cfc,
1541                 0x34000, 0x34070,
1542                 0x34100, 0x3415c,
1543                 0x34190, 0x341d0,
1544                 0x34200, 0x34318,
1545                 0x34400, 0x3452c,
1546                 0x34540, 0x3461c,
1547                 0x34800, 0x3488c,
1548                 0x348c0, 0x34908,
1549                 0x34910, 0x349b8,
1550                 0x34a00, 0x34a04,
1551                 0x34a0c, 0x34a2c,
1552                 0x34a44, 0x34a50,
1553                 0x34a74, 0x34c24,
1554                 0x34d00, 0x34d3c,
1555                 0x34d44, 0x34d7c,
1556                 0x34de0, 0x34de0,
1557                 0x34e00, 0x34ed4,
1558                 0x34f00, 0x34fa4,
1559                 0x34fc0, 0x34fc4,
1560                 0x35000, 0x35004,
1561                 0x35080, 0x350fc,
1562                 0x35208, 0x35220,
1563                 0x3523c, 0x35254,
1564                 0x35300, 0x35300,
1565                 0x35308, 0x3531c,
1566                 0x35338, 0x3533c,
1567                 0x35380, 0x35380,
1568                 0x35388, 0x353a8,
1569                 0x353b4, 0x353b4,
1570                 0x35400, 0x35420,
1571                 0x35438, 0x3543c,
1572                 0x35480, 0x35480,
1573                 0x354a8, 0x354a8,
1574                 0x354b0, 0x354b4,
1575                 0x354c8, 0x354d4,
1576                 0x35a40, 0x35a4c,
1577                 0x35af0, 0x35b20,
1578                 0x35b38, 0x35b3c,
1579                 0x35b80, 0x35b80,
1580                 0x35ba8, 0x35ba8,
1581                 0x35bb0, 0x35bb4,
1582                 0x35bc8, 0x35bd4,
1583                 0x36140, 0x3618c,
1584                 0x361f0, 0x36200,
1585                 0x36218, 0x36218,
1586                 0x36400, 0x36400,
1587                 0x36408, 0x3641c,
1588                 0x36618, 0x36620,
1589                 0x36664, 0x36664,
1590                 0x366a8, 0x366a8,
1591                 0x366ec, 0x366ec,
1592                 0x36a00, 0x36abc,
1593                 0x36b00, 0x36b78,
1594                 0x36c00, 0x36c00,
1595                 0x36c08, 0x36c3c,
1596                 0x36e00, 0x36e2c,
1597                 0x36f00, 0x36f2c,
1598                 0x37000, 0x370ac,
1599                 0x370c0, 0x371ac,
1600                 0x371c0, 0x372c4,
1601                 0x372e4, 0x373c4,
1602                 0x373e4, 0x374ac,
1603                 0x374c0, 0x375ac,
1604                 0x375c0, 0x376c4,
1605                 0x376e4, 0x377c4,
1606                 0x377e4, 0x377fc,
1607                 0x37814, 0x37814,
1608                 0x37854, 0x37868,
1609                 0x37880, 0x3788c,
1610                 0x378c0, 0x378d0,
1611                 0x378e8, 0x378ec,
1612                 0x37900, 0x379ac,
1613                 0x379c0, 0x37ac4,
1614                 0x37ae4, 0x37b10,
1615                 0x37b24, 0x37b50,
1616                 0x37bf0, 0x37c10,
1617                 0x37c24, 0x37c50,
1618                 0x37cf0, 0x37cfc,
1619                 0x40040, 0x40040,
1620                 0x40080, 0x40084,
1621                 0x40100, 0x40100,
1622                 0x40140, 0x401bc,
1623                 0x40200, 0x40214,
1624                 0x40228, 0x40228,
1625                 0x40240, 0x40258,
1626                 0x40280, 0x40280,
1627                 0x40304, 0x40304,
1628                 0x40330, 0x4033c,
1629                 0x41304, 0x413dc,
1630                 0x41400, 0x4141c,
1631                 0x41480, 0x414d0,
1632                 0x44000, 0x4407c,
1633                 0x440c0, 0x4427c,
1634                 0x442c0, 0x4447c,
1635                 0x444c0, 0x4467c,
1636                 0x446c0, 0x4487c,
1637                 0x448c0, 0x44a7c,
1638                 0x44ac0, 0x44c7c,
1639                 0x44cc0, 0x44e7c,
1640                 0x44ec0, 0x4507c,
1641                 0x450c0, 0x451fc,
1642                 0x45800, 0x45868,
1643                 0x45880, 0x45884,
1644                 0x458a0, 0x458b0,
1645                 0x45a00, 0x45a68,
1646                 0x45a80, 0x45a84,
1647                 0x45aa0, 0x45ab0,
1648                 0x460c0, 0x460e4,
1649                 0x47000, 0x4708c,
1650                 0x47200, 0x47250,
1651                 0x47400, 0x47420,
1652                 0x47600, 0x47618,
1653                 0x47800, 0x4782c,
1654                 0x50000, 0x500cc,
1655                 0x50400, 0x50400,
1656                 0x50800, 0x508cc,
1657                 0x50c00, 0x50c00,
1658                 0x51000, 0x510b0,
1659                 0x51300, 0x51324,
1660         };
1661
1662         u32 *buf_end = (u32 *)((char *)buf + buf_size);
1663         const unsigned int *reg_ranges;
1664         int reg_ranges_size, range;
1665         unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
1666
1667         /* Select the right set of register ranges to dump depending on the
1668          * adapter chip type.
1669          */
1670         switch (chip_version) {
1671         case CHELSIO_T4:
1672                 reg_ranges = t4_reg_ranges;
1673                 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
1674                 break;
1675
1676         case CHELSIO_T5:
1677                 reg_ranges = t5_reg_ranges;
1678                 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
1679                 break;
1680
1681         case CHELSIO_T6:
1682                 reg_ranges = t6_reg_ranges;
1683                 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
1684                 break;
1685
1686         default:
1687                 dev_err(adap->pdev_dev,
1688                         "Unsupported chip version %d\n", chip_version);
1689                 return;
1690         }
1691
1692         /* Clear the register buffer and insert the appropriate register
1693          * values selected by the above register ranges.
1694          */
1695         memset(buf, 0, buf_size);
1696         for (range = 0; range < reg_ranges_size; range += 2) {
1697                 unsigned int reg = reg_ranges[range];
1698                 unsigned int last_reg = reg_ranges[range + 1];
1699                 u32 *bufp = (u32 *)((char *)buf + reg);
1700
1701                 /* Iterate across the register range filling in the register
1702                  * buffer but don't write past the end of the register buffer.
1703                  */
1704                 while (reg <= last_reg && bufp < buf_end) {
1705                         *bufp++ = t4_read_reg(adap, reg);
1706                         reg += sizeof(u32);
1707                 }
1708         }
1709 }
1710
1711 #define EEPROM_STAT_ADDR   0x7bfc
1712 #define VPD_BASE           0x400
1713 #define VPD_BASE_OLD       0
1714 #define VPD_LEN            1024
1715 #define CHELSIO_VPD_UNIQUE_ID 0x82
1716
1717 /**
1718  *      t4_seeprom_wp - enable/disable EEPROM write protection
1719  *      @adapter: the adapter
1720  *      @enable: whether to enable or disable write protection
1721  *
1722  *      Enables or disables write protection on the serial EEPROM.
1723  */
1724 int t4_seeprom_wp(struct adapter *adapter, bool enable)
1725 {
1726         unsigned int v = enable ? 0xc : 0;
1727         int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
1728         return ret < 0 ? ret : 0;
1729 }
1730
1731 /**
1732  *      t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
1733  *      @adapter: adapter to read
1734  *      @p: where to store the parameters
1735  *
1736  *      Reads card parameters stored in VPD EEPROM.
1737  */
1738 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
1739 {
1740         int i, ret = 0, addr;
1741         int ec, sn, pn, na;
1742         u8 *vpd, csum;
1743         unsigned int vpdr_len, kw_offset, id_len;
1744
1745         vpd = vmalloc(VPD_LEN);
1746         if (!vpd)
1747                 return -ENOMEM;
1748
1749         /* Card information normally starts at VPD_BASE but early cards had
1750          * it at 0.
1751          */
1752         ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
1753         if (ret < 0)
1754                 goto out;
1755
1756         /* The VPD shall have a unique identifier specified by the PCI SIG.
1757          * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
1758          * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
1759          * is expected to automatically put this entry at the
1760          * beginning of the VPD.
1761          */
1762         addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
1763
1764         ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
1765         if (ret < 0)
1766                 goto out;
1767
1768         if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
1769                 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
1770                 ret = -EINVAL;
1771                 goto out;
1772         }
1773
1774         id_len = pci_vpd_lrdt_size(vpd);
1775         if (id_len > ID_LEN)
1776                 id_len = ID_LEN;
1777
1778         i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
1779         if (i < 0) {
1780                 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
1781                 ret = -EINVAL;
1782                 goto out;
1783         }
1784
1785         vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
1786         kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
1787         if (vpdr_len + kw_offset > VPD_LEN) {
1788                 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
1789                 ret = -EINVAL;
1790                 goto out;
1791         }
1792
1793 #define FIND_VPD_KW(var, name) do { \
1794         var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
1795         if (var < 0) { \
1796                 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
1797                 ret = -EINVAL; \
1798                 goto out; \
1799         } \
1800         var += PCI_VPD_INFO_FLD_HDR_SIZE; \
1801 } while (0)
1802
1803         FIND_VPD_KW(i, "RV");
1804         for (csum = 0; i >= 0; i--)
1805                 csum += vpd[i];
1806
1807         if (csum) {
1808                 dev_err(adapter->pdev_dev,
1809                         "corrupted VPD EEPROM, actual csum %u\n", csum);
1810                 ret = -EINVAL;
1811                 goto out;
1812         }
1813
1814         FIND_VPD_KW(ec, "EC");
1815         FIND_VPD_KW(sn, "SN");
1816         FIND_VPD_KW(pn, "PN");
1817         FIND_VPD_KW(na, "NA");
1818 #undef FIND_VPD_KW
1819
1820         memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
1821         strim(p->id);
1822         memcpy(p->ec, vpd + ec, EC_LEN);
1823         strim(p->ec);
1824         i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
1825         memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
1826         strim(p->sn);
1827         i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
1828         memcpy(p->pn, vpd + pn, min(i, PN_LEN));
1829         strim(p->pn);
1830         memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
1831         strim((char *)p->na);
1832
1833 out:
1834         vfree(vpd);
1835         return ret;
1836 }
1837
1838 /**
1839  *      t4_get_vpd_params - read VPD parameters & retrieve Core Clock
1840  *      @adapter: adapter to read
1841  *      @p: where to store the parameters
1842  *
1843  *      Reads card parameters stored in VPD EEPROM and retrieves the Core
1844  *      Clock.  This can only be called after a connection to the firmware
1845  *      is established.
1846  */
1847 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
1848 {
1849         u32 cclk_param, cclk_val;
1850         int ret;
1851
1852         /* Grab the raw VPD parameters.
1853          */
1854         ret = t4_get_raw_vpd_params(adapter, p);
1855         if (ret)
1856                 return ret;
1857
1858         /* Ask firmware for the Core Clock since it knows how to translate the
1859          * Reference Clock ('V2') VPD field into a Core Clock value ...
1860          */
1861         cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1862                       FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
1863         ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
1864                               1, &cclk_param, &cclk_val);
1865
1866         if (ret)
1867                 return ret;
1868         p->cclk = cclk_val;
1869
1870         return 0;
1871 }
1872
1873 /* serial flash and firmware constants */
1874 enum {
1875         SF_ATTEMPTS = 10,             /* max retries for SF operations */
1876
1877         /* flash command opcodes */
1878         SF_PROG_PAGE    = 2,          /* program page */
1879         SF_WR_DISABLE   = 4,          /* disable writes */
1880         SF_RD_STATUS    = 5,          /* read status register */
1881         SF_WR_ENABLE    = 6,          /* enable writes */
1882         SF_RD_DATA_FAST = 0xb,        /* read flash */
1883         SF_RD_ID        = 0x9f,       /* read ID */
1884         SF_ERASE_SECTOR = 0xd8,       /* erase sector */
1885
1886         FW_MAX_SIZE = 16 * SF_SEC_SIZE,
1887 };
1888
1889 /**
1890  *      sf1_read - read data from the serial flash
1891  *      @adapter: the adapter
1892  *      @byte_cnt: number of bytes to read
1893  *      @cont: whether another operation will be chained
1894  *      @lock: whether to lock SF for PL access only
1895  *      @valp: where to store the read data
1896  *
1897  *      Reads up to 4 bytes of data from the serial flash.  The location of
1898  *      the read needs to be specified prior to calling this by issuing the
1899  *      appropriate commands to the serial flash.
1900  */
1901 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
1902                     int lock, u32 *valp)
1903 {
1904         int ret;
1905
1906         if (!byte_cnt || byte_cnt > 4)
1907                 return -EINVAL;
1908         if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
1909                 return -EBUSY;
1910         t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
1911                      SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
1912         ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
1913         if (!ret)
1914                 *valp = t4_read_reg(adapter, SF_DATA_A);
1915         return ret;
1916 }
1917
1918 /**
1919  *      sf1_write - write data to the serial flash
1920  *      @adapter: the adapter
1921  *      @byte_cnt: number of bytes to write
1922  *      @cont: whether another operation will be chained
1923  *      @lock: whether to lock SF for PL access only
1924  *      @val: value to write
1925  *
1926  *      Writes up to 4 bytes of data to the serial flash.  The location of
1927  *      the write needs to be specified prior to calling this by issuing the
1928  *      appropriate commands to the serial flash.
1929  */
1930 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
1931                      int lock, u32 val)
1932 {
1933         if (!byte_cnt || byte_cnt > 4)
1934                 return -EINVAL;
1935         if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
1936                 return -EBUSY;
1937         t4_write_reg(adapter, SF_DATA_A, val);
1938         t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
1939                      SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
1940         return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
1941 }
1942
1943 /**
1944  *      flash_wait_op - wait for a flash operation to complete
1945  *      @adapter: the adapter
1946  *      @attempts: max number of polls of the status register
1947  *      @delay: delay between polls in ms
1948  *
1949  *      Wait for a flash operation to complete by polling the status register.
1950  */
1951 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
1952 {
1953         int ret;
1954         u32 status;
1955
1956         while (1) {
1957                 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
1958                     (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
1959                         return ret;
1960                 if (!(status & 1))
1961                         return 0;
1962                 if (--attempts == 0)
1963                         return -EAGAIN;
1964                 if (delay)
1965                         msleep(delay);
1966         }
1967 }
1968
1969 /**
1970  *      t4_read_flash - read words from serial flash
1971  *      @adapter: the adapter
1972  *      @addr: the start address for the read
1973  *      @nwords: how many 32-bit words to read
1974  *      @data: where to store the read data
1975  *      @byte_oriented: whether to store data as bytes or as words
1976  *
1977  *      Read the specified number of 32-bit words from the serial flash.
1978  *      If @byte_oriented is set the read data is stored as a byte array
1979  *      (i.e., big-endian), otherwise as 32-bit words in the platform's
1980  *      natural endianness.
1981  */
1982 int t4_read_flash(struct adapter *adapter, unsigned int addr,
1983                   unsigned int nwords, u32 *data, int byte_oriented)
1984 {
1985         int ret;
1986
1987         if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
1988                 return -EINVAL;
1989
1990         addr = swab32(addr) | SF_RD_DATA_FAST;
1991
1992         if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
1993             (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
1994                 return ret;
1995
1996         for ( ; nwords; nwords--, data++) {
1997                 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
1998                 if (nwords == 1)
1999                         t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
2000                 if (ret)
2001                         return ret;
2002                 if (byte_oriented)
2003                         *data = (__force __u32)(cpu_to_be32(*data));
2004         }
2005         return 0;
2006 }
2007
2008 /**
2009  *      t4_write_flash - write up to a page of data to the serial flash
2010  *      @adapter: the adapter
2011  *      @addr: the start address to write
2012  *      @n: length of data to write in bytes
2013  *      @data: the data to write
2014  *
2015  *      Writes up to a page of data (256 bytes) to the serial flash starting
2016  *      at the given address.  All the data must be written to the same page.
2017  */
2018 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
2019                           unsigned int n, const u8 *data)
2020 {
2021         int ret;
2022         u32 buf[64];
2023         unsigned int i, c, left, val, offset = addr & 0xff;
2024
2025         if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
2026                 return -EINVAL;
2027
2028         val = swab32(addr) | SF_PROG_PAGE;
2029
2030         if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
2031             (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
2032                 goto unlock;
2033
2034         for (left = n; left; left -= c) {
2035                 c = min(left, 4U);
2036                 for (val = 0, i = 0; i < c; ++i)
2037                         val = (val << 8) + *data++;
2038
2039                 ret = sf1_write(adapter, c, c != left, 1, val);
2040                 if (ret)
2041                         goto unlock;
2042         }
2043         ret = flash_wait_op(adapter, 8, 1);
2044         if (ret)
2045                 goto unlock;
2046
2047         t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
2048
2049         /* Read the page to verify the write succeeded */
2050         ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
2051         if (ret)
2052                 return ret;
2053
2054         if (memcmp(data - n, (u8 *)buf + offset, n)) {
2055                 dev_err(adapter->pdev_dev,
2056                         "failed to correctly write the flash page at %#x\n",
2057                         addr);
2058                 return -EIO;
2059         }
2060         return 0;
2061
2062 unlock:
2063         t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
2064         return ret;
2065 }
2066
2067 /**
2068  *      t4_get_fw_version - read the firmware version
2069  *      @adapter: the adapter
2070  *      @vers: where to place the version
2071  *
2072  *      Reads the FW version from flash.
2073  */
2074 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
2075 {
2076         return t4_read_flash(adapter, FLASH_FW_START +
2077                              offsetof(struct fw_hdr, fw_ver), 1,
2078                              vers, 0);
2079 }
2080
2081 /**
2082  *      t4_get_tp_version - read the TP microcode version
2083  *      @adapter: the adapter
2084  *      @vers: where to place the version
2085  *
2086  *      Reads the TP microcode version from flash.
2087  */
2088 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
2089 {
2090         return t4_read_flash(adapter, FLASH_FW_START +
2091                              offsetof(struct fw_hdr, tp_microcode_ver),
2092                              1, vers, 0);
2093 }
2094
2095 /**
2096  *      t4_get_exprom_version - return the Expansion ROM version (if any)
2097  *      @adapter: the adapter
2098  *      @vers: where to place the version
2099  *
2100  *      Reads the Expansion ROM header from FLASH and returns the version
2101  *      number (if present) through the @vers return value pointer.  We return
2102  *      this in the Firmware Version Format since it's convenient.  Return
2103  *      0 on success, -ENOENT if no Expansion ROM is present.
2104  */
2105 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
2106 {
2107         struct exprom_header {
2108                 unsigned char hdr_arr[16];      /* must start with 0x55aa */
2109                 unsigned char hdr_ver[4];       /* Expansion ROM version */
2110         } *hdr;
2111         u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
2112                                            sizeof(u32))];
2113         int ret;
2114
2115         ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
2116                             ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
2117                             0);
2118         if (ret)
2119                 return ret;
2120
2121         hdr = (struct exprom_header *)exprom_header_buf;
2122         if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
2123                 return -ENOENT;
2124
2125         *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
2126                  FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
2127                  FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
2128                  FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
2129         return 0;
2130 }
2131
2132 /* Is the given firmware API compatible with the one the driver was compiled
2133  * with?
2134  */
2135 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
2136 {
2137
2138         /* short circuit if it's the exact same firmware version */
2139         if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
2140                 return 1;
2141
2142 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
2143         if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
2144             SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
2145                 return 1;
2146 #undef SAME_INTF
2147
2148         return 0;
2149 }
2150
2151 /* The firmware in the filesystem is usable, but should it be installed?
2152  * This routine explains itself in detail if it indicates the filesystem
2153  * firmware should be installed.
2154  */
2155 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
2156                                 int k, int c)
2157 {
2158         const char *reason;
2159
2160         if (!card_fw_usable) {
2161                 reason = "incompatible or unusable";
2162                 goto install;
2163         }
2164
2165         if (k > c) {
2166                 reason = "older than the version supported with this driver";
2167                 goto install;
2168         }
2169
2170         return 0;
2171
2172 install:
2173         dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
2174                 "installing firmware %u.%u.%u.%u on card.\n",
2175                 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
2176                 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
2177                 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
2178                 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
2179
2180         return 1;
2181 }
2182
2183 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
2184                const u8 *fw_data, unsigned int fw_size,
2185                struct fw_hdr *card_fw, enum dev_state state,
2186                int *reset)
2187 {
2188         int ret, card_fw_usable, fs_fw_usable;
2189         const struct fw_hdr *fs_fw;
2190         const struct fw_hdr *drv_fw;
2191
2192         drv_fw = &fw_info->fw_hdr;
2193
2194         /* Read the header of the firmware on the card */
2195         ret = -t4_read_flash(adap, FLASH_FW_START,
2196                             sizeof(*card_fw) / sizeof(uint32_t),
2197                             (uint32_t *)card_fw, 1);
2198         if (ret == 0) {
2199                 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
2200         } else {
2201                 dev_err(adap->pdev_dev,
2202                         "Unable to read card's firmware header: %d\n", ret);
2203                 card_fw_usable = 0;
2204         }
2205
2206         if (fw_data != NULL) {
2207                 fs_fw = (const void *)fw_data;
2208                 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
2209         } else {
2210                 fs_fw = NULL;
2211                 fs_fw_usable = 0;
2212         }
2213
2214         if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2215             (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
2216                 /* Common case: the firmware on the card is an exact match and
2217                  * the filesystem one is an exact match too, or the filesystem
2218                  * one is absent/incompatible.
2219                  */
2220         } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
2221                    should_install_fs_fw(adap, card_fw_usable,
2222                                         be32_to_cpu(fs_fw->fw_ver),
2223                                         be32_to_cpu(card_fw->fw_ver))) {
2224                 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
2225                                      fw_size, 0);
2226                 if (ret != 0) {
2227                         dev_err(adap->pdev_dev,
2228                                 "failed to install firmware: %d\n", ret);
2229                         goto bye;
2230                 }
2231
2232                 /* Installed successfully, update the cached header too. */
2233                 *card_fw = *fs_fw;
2234                 card_fw_usable = 1;
2235                 *reset = 0;     /* already reset as part of load_fw */
2236         }
2237
2238         if (!card_fw_usable) {
2239                 uint32_t d, c, k;
2240
2241                 d = be32_to_cpu(drv_fw->fw_ver);
2242                 c = be32_to_cpu(card_fw->fw_ver);
2243                 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
2244
2245                 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
2246                         "chip state %d, "
2247                         "driver compiled with %d.%d.%d.%d, "
2248                         "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
2249                         state,
2250                         FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
2251                         FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
2252                         FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
2253                         FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
2254                         FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
2255                         FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
2256                 ret = EINVAL;
2257                 goto bye;
2258         }
2259
2260         /* We're using whatever's on the card and it's known to be good. */
2261         adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
2262         adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
2263
2264 bye:
2265         return ret;
2266 }
2267
2268 /**
2269  *      t4_flash_erase_sectors - erase a range of flash sectors
2270  *      @adapter: the adapter
2271  *      @start: the first sector to erase
2272  *      @end: the last sector to erase
2273  *
2274  *      Erases the sectors in the given inclusive range.
2275  */
2276 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
2277 {
2278         int ret = 0;
2279
2280         if (end >= adapter->params.sf_nsec)
2281                 return -EINVAL;
2282
2283         while (start <= end) {
2284                 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
2285                     (ret = sf1_write(adapter, 4, 0, 1,
2286                                      SF_ERASE_SECTOR | (start << 8))) != 0 ||
2287                     (ret = flash_wait_op(adapter, 14, 500)) != 0) {
2288                         dev_err(adapter->pdev_dev,
2289                                 "erase of flash sector %d failed, error %d\n",
2290                                 start, ret);
2291                         break;
2292                 }
2293                 start++;
2294         }
2295         t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
2296         return ret;
2297 }
2298
2299 /**
2300  *      t4_flash_cfg_addr - return the address of the flash configuration file
2301  *      @adapter: the adapter
2302  *
2303  *      Return the address within the flash where the Firmware Configuration
2304  *      File is stored.
2305  */
2306 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
2307 {
2308         if (adapter->params.sf_size == 0x100000)
2309                 return FLASH_FPGA_CFG_START;
2310         else
2311                 return FLASH_CFG_START;
2312 }
2313
2314 /* Return TRUE if the specified firmware matches the adapter.  I.e. T4
2315  * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
2316  * and emit an error message for mismatched firmware to save our caller the
2317  * effort ...
2318  */
2319 static bool t4_fw_matches_chip(const struct adapter *adap,
2320                                const struct fw_hdr *hdr)
2321 {
2322         /* The expression below will return FALSE for any unsupported adapter
2323          * which will keep us "honest" in the future ...
2324          */
2325         if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
2326             (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
2327             (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
2328                 return true;
2329
2330         dev_err(adap->pdev_dev,
2331                 "FW image (%d) is not suitable for this adapter (%d)\n",
2332                 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
2333         return false;
2334 }
2335
2336 /**
2337  *      t4_load_fw - download firmware
2338  *      @adap: the adapter
2339  *      @fw_data: the firmware image to write
2340  *      @size: image size
2341  *
2342  *      Write the supplied firmware image to the card's serial flash.
2343  */
2344 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
2345 {
2346         u32 csum;
2347         int ret, addr;
2348         unsigned int i;
2349         u8 first_page[SF_PAGE_SIZE];
2350         const __be32 *p = (const __be32 *)fw_data;
2351         const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
2352         unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
2353         unsigned int fw_img_start = adap->params.sf_fw_start;
2354         unsigned int fw_start_sec = fw_img_start / sf_sec_size;
2355
2356         if (!size) {
2357                 dev_err(adap->pdev_dev, "FW image has no data\n");
2358                 return -EINVAL;
2359         }
2360         if (size & 511) {
2361                 dev_err(adap->pdev_dev,
2362                         "FW image size not multiple of 512 bytes\n");
2363                 return -EINVAL;
2364         }
2365         if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
2366                 dev_err(adap->pdev_dev,
2367                         "FW image size differs from size in FW header\n");
2368                 return -EINVAL;
2369         }
2370         if (size > FW_MAX_SIZE) {
2371                 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
2372                         FW_MAX_SIZE);
2373                 return -EFBIG;
2374         }
2375         if (!t4_fw_matches_chip(adap, hdr))
2376                 return -EINVAL;
2377
2378         for (csum = 0, i = 0; i < size / sizeof(csum); i++)
2379                 csum += be32_to_cpu(p[i]);
2380
2381         if (csum != 0xffffffff) {
2382                 dev_err(adap->pdev_dev,
2383                         "corrupted firmware image, checksum %#x\n", csum);
2384                 return -EINVAL;
2385         }
2386
2387         i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
2388         ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
2389         if (ret)
2390                 goto out;
2391
2392         /*
2393          * We write the correct version at the end so the driver can see a bad
2394          * version if the FW write fails.  Start by writing a copy of the
2395          * first page with a bad version.
2396          */
2397         memcpy(first_page, fw_data, SF_PAGE_SIZE);
2398         ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
2399         ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
2400         if (ret)
2401                 goto out;
2402
2403         addr = fw_img_start;
2404         for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
2405                 addr += SF_PAGE_SIZE;
2406                 fw_data += SF_PAGE_SIZE;
2407                 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
2408                 if (ret)
2409                         goto out;
2410         }
2411
2412         ret = t4_write_flash(adap,
2413                              fw_img_start + offsetof(struct fw_hdr, fw_ver),
2414                              sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
2415 out:
2416         if (ret)
2417                 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
2418                         ret);
2419         else
2420                 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
2421         return ret;
2422 }
2423
2424 /**
2425  *      t4_phy_fw_ver - return current PHY firmware version
2426  *      @adap: the adapter
2427  *      @phy_fw_ver: return value buffer for PHY firmware version
2428  *
2429  *      Returns the current version of external PHY firmware on the
2430  *      adapter.
2431  */
2432 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
2433 {
2434         u32 param, val;
2435         int ret;
2436
2437         param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2438                  FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
2439                  FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
2440                  FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
2441         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
2442                               &param, &val);
2443         if (ret < 0)
2444                 return ret;
2445         *phy_fw_ver = val;
2446         return 0;
2447 }
2448
2449 /**
2450  *      t4_load_phy_fw - download port PHY firmware
2451  *      @adap: the adapter
2452  *      @win: the PCI-E Memory Window index to use for t4_memory_rw()
2453  *      @win_lock: the lock to use to guard the memory copy
2454  *      @phy_fw_version: function to check PHY firmware versions
2455  *      @phy_fw_data: the PHY firmware image to write
2456  *      @phy_fw_size: image size
2457  *
2458  *      Transfer the specified PHY firmware to the adapter.  If a non-NULL
2459  *      @phy_fw_version is supplied, then it will be used to determine if
2460  *      it's necessary to perform the transfer by comparing the version
2461  *      of any existing adapter PHY firmware with that of the passed in
2462  *      PHY firmware image.  If @win_lock is non-NULL then it will be used
2463  *      around the call to t4_memory_rw() which transfers the PHY firmware
2464  *      to the adapter.
2465  *
2466  *      A negative error number will be returned if an error occurs.  If
2467  *      version number support is available and there's no need to upgrade
2468  *      the firmware, 0 will be returned.  If firmware is successfully
2469  *      transferred to the adapter, 1 will be retured.
2470  *
2471  *      NOTE: some adapters only have local RAM to store the PHY firmware.  As
2472  *      a result, a RESET of the adapter would cause that RAM to lose its
2473  *      contents.  Thus, loading PHY firmware on such adapters must happen
2474  *      after any FW_RESET_CMDs ...
2475  */
2476 int t4_load_phy_fw(struct adapter *adap,
2477                    int win, spinlock_t *win_lock,
2478                    int (*phy_fw_version)(const u8 *, size_t),
2479                    const u8 *phy_fw_data, size_t phy_fw_size)
2480 {
2481         unsigned long mtype = 0, maddr = 0;
2482         u32 param, val;
2483         int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
2484         int ret;
2485
2486         /* If we have version number support, then check to see if the adapter
2487          * already has up-to-date PHY firmware loaded.
2488          */
2489          if (phy_fw_version) {
2490                 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
2491                 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
2492                 if (ret < 0)
2493                         return ret;
2494
2495                 if (cur_phy_fw_ver >= new_phy_fw_vers) {
2496                         CH_WARN(adap, "PHY Firmware already up-to-date, "
2497                                 "version %#x\n", cur_phy_fw_ver);
2498                         return 0;
2499                 }
2500         }
2501
2502         /* Ask the firmware where it wants us to copy the PHY firmware image.
2503          * The size of the file requires a special version of the READ coommand
2504          * which will pass the file size via the values field in PARAMS_CMD and
2505          * retrieve the return value from firmware and place it in the same
2506          * buffer values
2507          */
2508         param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2509                  FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
2510                  FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
2511                  FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
2512         val = phy_fw_size;
2513         ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
2514                                  &param, &val, 1);
2515         if (ret < 0)
2516                 return ret;
2517         mtype = val >> 8;
2518         maddr = (val & 0xff) << 16;
2519
2520         /* Copy the supplied PHY Firmware image to the adapter memory location
2521          * allocated by the adapter firmware.
2522          */
2523         if (win_lock)
2524                 spin_lock_bh(win_lock);
2525         ret = t4_memory_rw(adap, win, mtype, maddr,
2526                            phy_fw_size, (__be32 *)phy_fw_data,
2527                            T4_MEMORY_WRITE);
2528         if (win_lock)
2529                 spin_unlock_bh(win_lock);
2530         if (ret)
2531                 return ret;
2532
2533         /* Tell the firmware that the PHY firmware image has been written to
2534          * RAM and it can now start copying it over to the PHYs.  The chip
2535          * firmware will RESET the affected PHYs as part of this operation
2536          * leaving them running the new PHY firmware image.
2537          */
2538         param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2539                  FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
2540                  FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
2541                  FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
2542         ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
2543                                     &param, &val, 30000);
2544
2545         /* If we have version number support, then check to see that the new
2546          * firmware got loaded properly.
2547          */
2548         if (phy_fw_version) {
2549                 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
2550                 if (ret < 0)
2551                         return ret;
2552
2553                 if (cur_phy_fw_ver != new_phy_fw_vers) {
2554                         CH_WARN(adap, "PHY Firmware did not update: "
2555                                 "version on adapter %#x, "
2556                                 "version flashed %#x\n",
2557                                 cur_phy_fw_ver, new_phy_fw_vers);
2558                         return -ENXIO;
2559                 }
2560         }
2561
2562         return 1;
2563 }
2564
2565 /**
2566  *      t4_fwcache - firmware cache operation
2567  *      @adap: the adapter
2568  *      @op  : the operation (flush or flush and invalidate)
2569  */
2570 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
2571 {
2572         struct fw_params_cmd c;
2573
2574         memset(&c, 0, sizeof(c));
2575         c.op_to_vfn =
2576                 cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
2577                             FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
2578                             FW_PARAMS_CMD_PFN_V(adap->pf) |
2579                             FW_PARAMS_CMD_VFN_V(0));
2580         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2581         c.param[0].mnem =
2582                 cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2583                             FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
2584         c.param[0].val = (__force __be32)op;
2585
2586         return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
2587 }
2588
2589 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
2590 {
2591         unsigned int i, j;
2592
2593         for (i = 0; i < 8; i++) {
2594                 u32 *p = la_buf + i;
2595
2596                 t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
2597                 j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
2598                 t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
2599                 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
2600                         *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
2601         }
2602 }
2603
2604 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
2605                      FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
2606                      FW_PORT_CAP_ANEG)
2607
2608 /**
2609  *      t4_link_l1cfg - apply link configuration to MAC/PHY
2610  *      @phy: the PHY to setup
2611  *      @mac: the MAC to setup
2612  *      @lc: the requested link configuration
2613  *
2614  *      Set up a port's MAC and PHY according to a desired link configuration.
2615  *      - If the PHY can auto-negotiate first decide what to advertise, then
2616  *        enable/disable auto-negotiation as desired, and reset.
2617  *      - If the PHY does not auto-negotiate just reset it.
2618  *      - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
2619  *        otherwise do it later based on the outcome of auto-negotiation.
2620  */
2621 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
2622                   struct link_config *lc)
2623 {
2624         struct fw_port_cmd c;
2625         unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
2626
2627         lc->link_ok = 0;
2628         if (lc->requested_fc & PAUSE_RX)
2629                 fc |= FW_PORT_CAP_FC_RX;
2630         if (lc->requested_fc & PAUSE_TX)
2631                 fc |= FW_PORT_CAP_FC_TX;
2632
2633         memset(&c, 0, sizeof(c));
2634         c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
2635                                      FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
2636                                      FW_PORT_CMD_PORTID_V(port));
2637         c.action_to_len16 =
2638                 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
2639                             FW_LEN16(c));
2640
2641         if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2642                 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
2643                                              fc);
2644                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
2645         } else if (lc->autoneg == AUTONEG_DISABLE) {
2646                 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
2647                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
2648         } else
2649                 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
2650
2651         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2652 }
2653
2654 /**
2655  *      t4_restart_aneg - restart autonegotiation
2656  *      @adap: the adapter
2657  *      @mbox: mbox to use for the FW command
2658  *      @port: the port id
2659  *
2660  *      Restarts autonegotiation for the selected port.
2661  */
2662 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
2663 {
2664         struct fw_port_cmd c;
2665
2666         memset(&c, 0, sizeof(c));
2667         c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
2668                                      FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
2669                                      FW_PORT_CMD_PORTID_V(port));
2670         c.action_to_len16 =
2671                 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
2672                             FW_LEN16(c));
2673         c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
2674         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2675 }
2676
2677 typedef void (*int_handler_t)(struct adapter *adap);
2678
2679 struct intr_info {
2680         unsigned int mask;       /* bits to check in interrupt status */
2681         const char *msg;         /* message to print or NULL */
2682         short stat_idx;          /* stat counter to increment or -1 */
2683         unsigned short fatal;    /* whether the condition reported is fatal */
2684         int_handler_t int_handler; /* platform-specific int handler */
2685 };
2686
2687 /**
2688  *      t4_handle_intr_status - table driven interrupt handler
2689  *      @adapter: the adapter that generated the interrupt
2690  *      @reg: the interrupt status register to process
2691  *      @acts: table of interrupt actions
2692  *
2693  *      A table driven interrupt handler that applies a set of masks to an
2694  *      interrupt status word and performs the corresponding actions if the
2695  *      interrupts described by the mask have occurred.  The actions include
2696  *      optionally emitting a warning or alert message.  The table is terminated
2697  *      by an entry specifying mask 0.  Returns the number of fatal interrupt
2698  *      conditions.
2699  */
2700 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
2701                                  const struct intr_info *acts)
2702 {
2703         int fatal = 0;
2704         unsigned int mask = 0;
2705         unsigned int status = t4_read_reg(adapter, reg);
2706
2707         for ( ; acts->mask; ++acts) {
2708                 if (!(status & acts->mask))
2709                         continue;
2710                 if (acts->fatal) {
2711                         fatal++;
2712                         dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
2713                                   status & acts->mask);
2714                 } else if (acts->msg && printk_ratelimit())
2715                         dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
2716                                  status & acts->mask);
2717                 if (acts->int_handler)
2718                         acts->int_handler(adapter);
2719                 mask |= acts->mask;
2720         }
2721         status &= mask;
2722         if (status)                           /* clear processed interrupts */
2723                 t4_write_reg(adapter, reg, status);
2724         return fatal;
2725 }
2726
2727 /*
2728  * Interrupt handler for the PCIE module.
2729  */
2730 static void pcie_intr_handler(struct adapter *adapter)
2731 {
2732         static const struct intr_info sysbus_intr_info[] = {
2733                 { RNPP_F, "RXNP array parity error", -1, 1 },
2734                 { RPCP_F, "RXPC array parity error", -1, 1 },
2735                 { RCIP_F, "RXCIF array parity error", -1, 1 },
2736                 { RCCP_F, "Rx completions control array parity error", -1, 1 },
2737                 { RFTP_F, "RXFT array parity error", -1, 1 },
2738                 { 0 }
2739         };
2740         static const struct intr_info pcie_port_intr_info[] = {
2741                 { TPCP_F, "TXPC array parity error", -1, 1 },
2742                 { TNPP_F, "TXNP array parity error", -1, 1 },
2743                 { TFTP_F, "TXFT array parity error", -1, 1 },
2744                 { TCAP_F, "TXCA array parity error", -1, 1 },
2745                 { TCIP_F, "TXCIF array parity error", -1, 1 },
2746                 { RCAP_F, "RXCA array parity error", -1, 1 },
2747                 { OTDD_F, "outbound request TLP discarded", -1, 1 },
2748                 { RDPE_F, "Rx data parity error", -1, 1 },
2749                 { TDUE_F, "Tx uncorrectable data error", -1, 1 },
2750                 { 0 }
2751         };
2752         static const struct intr_info pcie_intr_info[] = {
2753                 { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
2754                 { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
2755                 { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
2756                 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
2757                 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
2758                 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
2759                 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
2760                 { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
2761                 { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
2762                 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
2763                 { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
2764                 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
2765                 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
2766                 { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
2767                 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
2768                 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
2769                 { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
2770                 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
2771                 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
2772                 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
2773                 { FIDPERR_F, "PCI FID parity error", -1, 1 },
2774                 { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
2775                 { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
2776                 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
2777                 { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
2778                 { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
2779                 { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
2780                 { PCIESINT_F, "PCI core secondary fault", -1, 1 },
2781                 { PCIEPINT_F, "PCI core primary fault", -1, 1 },
2782                 { UNXSPLCPLERR_F, "PCI unexpected split completion error",
2783                   -1, 0 },
2784                 { 0 }
2785         };
2786
2787         static struct intr_info t5_pcie_intr_info[] = {
2788                 { MSTGRPPERR_F, "Master Response Read Queue parity error",
2789                   -1, 1 },
2790                 { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
2791                 { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
2792                 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
2793                 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
2794                 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
2795                 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
2796                 { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
2797                   -1, 1 },
2798                 { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
2799                   -1, 1 },
2800                 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
2801                 { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
2802                 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
2803                 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
2804                 { DREQWRPERR_F, "PCI DMA channel write request parity error",
2805                   -1, 1 },
2806                 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
2807                 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
2808                 { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
2809                 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
2810                 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
2811                 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
2812                 { FIDPERR_F, "PCI FID parity error", -1, 1 },
2813                 { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
2814                 { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
2815                 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
2816                 { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
2817                   -1, 1 },
2818                 { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
2819                   -1, 1 },
2820                 { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
2821                 { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
2822                 { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
2823                 { READRSPERR_F, "Outbound read error", -1, 0 },
2824                 { 0 }
2825         };
2826
2827         int fat;
2828
2829         if (is_t4(adapter->params.chip))
2830                 fat = t4_handle_intr_status(adapter,
2831                                 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
2832                                 sysbus_intr_info) +
2833                         t4_handle_intr_status(adapter,
2834                                         PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
2835                                         pcie_port_intr_info) +
2836                         t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
2837                                               pcie_intr_info);
2838         else
2839                 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
2840                                             t5_pcie_intr_info);
2841
2842         if (fat)
2843                 t4_fatal_err(adapter);
2844 }
2845
2846 /*
2847  * TP interrupt handler.
2848  */
2849 static void tp_intr_handler(struct adapter *adapter)
2850 {
2851         static const struct intr_info tp_intr_info[] = {
2852                 { 0x3fffffff, "TP parity error", -1, 1 },
2853                 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
2854                 { 0 }
2855         };
2856
2857         if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
2858                 t4_fatal_err(adapter);
2859 }
2860
2861 /*
2862  * SGE interrupt handler.
2863  */
2864 static void sge_intr_handler(struct adapter *adapter)
2865 {
2866         u64 v;
2867         u32 err;
2868
2869         static const struct intr_info sge_intr_info[] = {
2870                 { ERR_CPL_EXCEED_IQE_SIZE_F,
2871                   "SGE received CPL exceeding IQE size", -1, 1 },
2872                 { ERR_INVALID_CIDX_INC_F,
2873                   "SGE GTS CIDX increment too large", -1, 0 },
2874                 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
2875                 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
2876                 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
2877                   "SGE IQID > 1023 received CPL for FL", -1, 0 },
2878                 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
2879                   0 },
2880                 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
2881                   0 },
2882                 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
2883                   0 },
2884                 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
2885                   0 },
2886                 { ERR_ING_CTXT_PRIO_F,
2887                   "SGE too many priority ingress contexts", -1, 0 },
2888                 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
2889                 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
2890                 { 0 }
2891         };
2892
2893         static struct intr_info t4t5_sge_intr_info[] = {
2894                 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
2895                 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
2896                 { ERR_EGR_CTXT_PRIO_F,
2897                   "SGE too many priority egress contexts", -1, 0 },
2898                 { 0 }
2899         };
2900
2901         v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
2902                 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
2903         if (v) {
2904                 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
2905                                 (unsigned long long)v);
2906                 t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
2907                 t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
2908         }
2909
2910         v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
2911         if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
2912                 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
2913                                            t4t5_sge_intr_info);
2914
2915         err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
2916         if (err & ERROR_QID_VALID_F) {
2917                 dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
2918                         ERROR_QID_G(err));
2919                 if (err & UNCAPTURED_ERROR_F)
2920                         dev_err(adapter->pdev_dev,
2921                                 "SGE UNCAPTURED_ERROR set (clearing)\n");
2922                 t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
2923                              UNCAPTURED_ERROR_F);
2924         }
2925
2926         if (v != 0)
2927                 t4_fatal_err(adapter);
2928 }
2929
2930 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
2931                       OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
2932 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
2933                       IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
2934
2935 /*
2936  * CIM interrupt handler.
2937  */
2938 static void cim_intr_handler(struct adapter *adapter)
2939 {
2940         static const struct intr_info cim_intr_info[] = {
2941                 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
2942                 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2943                 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2944                 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
2945                 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
2946                 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
2947                 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
2948                 { 0 }
2949         };
2950         static const struct intr_info cim_upintr_info[] = {
2951                 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
2952                 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
2953                 { ILLWRINT_F, "CIM illegal write", -1, 1 },
2954                 { ILLRDINT_F, "CIM illegal read", -1, 1 },
2955                 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
2956                 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
2957                 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
2958                 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
2959                 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
2960                 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
2961                 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
2962                 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
2963                 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
2964                 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
2965                 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
2966                 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
2967                 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
2968                 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
2969                 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
2970                 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
2971                 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
2972                 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
2973                 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
2974                 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
2975                 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
2976                 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
2977                 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
2978                 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
2979                 { 0 }
2980         };
2981
2982         int fat;
2983
2984         if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
2985                 t4_report_fw_error(adapter);
2986
2987         fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
2988                                     cim_intr_info) +
2989               t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
2990                                     cim_upintr_info);
2991         if (fat)
2992                 t4_fatal_err(adapter);
2993 }
2994
2995 /*
2996  * ULP RX interrupt handler.
2997  */
2998 static void ulprx_intr_handler(struct adapter *adapter)
2999 {
3000         static const struct intr_info ulprx_intr_info[] = {
3001                 { 0x1800000, "ULPRX context error", -1, 1 },
3002                 { 0x7fffff, "ULPRX parity error", -1, 1 },
3003                 { 0 }
3004         };
3005
3006         if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
3007                 t4_fatal_err(adapter);
3008 }
3009
3010 /*
3011  * ULP TX interrupt handler.
3012  */
3013 static void ulptx_intr_handler(struct adapter *adapter)
3014 {
3015         static const struct intr_info ulptx_intr_info[] = {
3016                 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
3017                   0 },
3018                 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
3019                   0 },
3020                 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
3021                   0 },
3022                 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
3023                   0 },
3024                 { 0xfffffff, "ULPTX parity error", -1, 1 },
3025                 { 0 }
3026         };
3027
3028         if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
3029                 t4_fatal_err(adapter);
3030 }
3031
3032 /*
3033  * PM TX interrupt handler.
3034  */
3035 static void pmtx_intr_handler(struct adapter *adapter)
3036 {
3037         static const struct intr_info pmtx_intr_info[] = {
3038                 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
3039                 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
3040                 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
3041                 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
3042                 { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
3043                 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
3044                 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
3045                   -1, 1 },
3046                 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
3047                 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
3048                 { 0 }
3049         };
3050
3051         if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
3052                 t4_fatal_err(adapter);
3053 }
3054
3055 /*
3056  * PM RX interrupt handler.
3057  */
3058 static void pmrx_intr_handler(struct adapter *adapter)
3059 {
3060         static const struct intr_info pmrx_intr_info[] = {
3061                 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
3062                 { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
3063                 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
3064                 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
3065                   -1, 1 },
3066                 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
3067                 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
3068                 { 0 }
3069         };
3070
3071         if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
3072                 t4_fatal_err(adapter);
3073 }
3074
3075 /*
3076  * CPL switch interrupt handler.
3077  */
3078 static void cplsw_intr_handler(struct adapter *adapter)
3079 {
3080         static const struct intr_info cplsw_intr_info[] = {
3081                 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
3082                 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
3083                 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
3084                 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
3085                 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
3086                 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
3087                 { 0 }
3088         };
3089
3090         if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
3091                 t4_fatal_err(adapter);
3092 }
3093
3094 /*
3095  * LE interrupt handler.
3096  */
3097 static void le_intr_handler(struct adapter *adap)
3098 {
3099         enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
3100         static const struct intr_info le_intr_info[] = {
3101                 { LIPMISS_F, "LE LIP miss", -1, 0 },
3102                 { LIP0_F, "LE 0 LIP error", -1, 0 },
3103                 { PARITYERR_F, "LE parity error", -1, 1 },
3104                 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
3105                 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
3106                 { 0 }
3107         };
3108
3109         static struct intr_info t6_le_intr_info[] = {
3110                 { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
3111                 { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
3112                 { TCAMINTPERR_F, "LE parity error", -1, 1 },
3113                 { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
3114                 { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
3115                 { 0 }
3116         };
3117
3118         if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
3119                                   (chip <= CHELSIO_T5) ?
3120                                   le_intr_info : t6_le_intr_info))
3121                 t4_fatal_err(adap);
3122 }
3123
3124 /*
3125  * MPS interrupt handler.
3126  */
3127 static void mps_intr_handler(struct adapter *adapter)
3128 {
3129         static const struct intr_info mps_rx_intr_info[] = {
3130                 { 0xffffff, "MPS Rx parity error", -1, 1 },
3131                 { 0 }
3132         };
3133         static const struct intr_info mps_tx_intr_info[] = {
3134                 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
3135                 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
3136                 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
3137                   -1, 1 },
3138                 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
3139                   -1, 1 },
3140                 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
3141                 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
3142                 { FRMERR_F, "MPS Tx framing error", -1, 1 },
3143                 { 0 }
3144         };
3145         static const struct intr_info mps_trc_intr_info[] = {
3146                 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
3147                 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
3148                   -1, 1 },
3149                 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
3150                 { 0 }
3151         };
3152         static const struct intr_info mps_stat_sram_intr_info[] = {
3153                 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
3154                 { 0 }
3155         };
3156         static const struct intr_info mps_stat_tx_intr_info[] = {
3157                 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
3158                 { 0 }
3159         };
3160         static const struct intr_info mps_stat_rx_intr_info[] = {
3161                 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
3162                 { 0 }
3163         };
3164         static const struct intr_info mps_cls_intr_info[] = {
3165                 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
3166                 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
3167                 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
3168                 { 0 }
3169         };
3170
3171         int fat;
3172
3173         fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
3174                                     mps_rx_intr_info) +
3175               t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
3176                                     mps_tx_intr_info) +
3177               t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
3178                                     mps_trc_intr_info) +
3179               t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
3180                                     mps_stat_sram_intr_info) +
3181               t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
3182                                     mps_stat_tx_intr_info) +
3183               t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
3184                                     mps_stat_rx_intr_info) +
3185               t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
3186                                     mps_cls_intr_info);
3187
3188         t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
3189         t4_read_reg(adapter, MPS_INT_CAUSE_A);                    /* flush */
3190         if (fat)
3191                 t4_fatal_err(adapter);
3192 }
3193
3194 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
3195                       ECC_UE_INT_CAUSE_F)
3196
3197 /*
3198  * EDC/MC interrupt handler.
3199  */
3200 static void mem_intr_handler(struct adapter *adapter, int idx)
3201 {
3202         static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
3203
3204         unsigned int addr, cnt_addr, v;
3205
3206         if (idx <= MEM_EDC1) {
3207                 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
3208                 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
3209         } else if (idx == MEM_MC) {
3210                 if (is_t4(adapter->params.chip)) {
3211                         addr = MC_INT_CAUSE_A;
3212                         cnt_addr = MC_ECC_STATUS_A;
3213                 } else {
3214                         addr = MC_P_INT_CAUSE_A;
3215                         cnt_addr = MC_P_ECC_STATUS_A;
3216                 }
3217         } else {
3218                 addr = MC_REG(MC_P_INT_CAUSE_A, 1);
3219                 cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
3220         }
3221
3222         v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
3223         if (v & PERR_INT_CAUSE_F)
3224                 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
3225                           name[idx]);
3226         if (v & ECC_CE_INT_CAUSE_F) {
3227                 u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
3228
3229                 t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
3230                 if (printk_ratelimit())
3231                         dev_warn(adapter->pdev_dev,
3232                                  "%u %s correctable ECC data error%s\n",
3233                                  cnt, name[idx], cnt > 1 ? "s" : "");
3234         }
3235         if (v & ECC_UE_INT_CAUSE_F)
3236                 dev_alert(adapter->pdev_dev,
3237                           "%s uncorrectable ECC data error\n", name[idx]);
3238
3239         t4_write_reg(adapter, addr, v);
3240         if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
3241                 t4_fatal_err(adapter);
3242 }
3243
3244 /*
3245  * MA interrupt handler.
3246  */
3247 static void ma_intr_handler(struct adapter *adap)
3248 {
3249         u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
3250
3251         if (status & MEM_PERR_INT_CAUSE_F) {
3252                 dev_alert(adap->pdev_dev,
3253                           "MA parity error, parity status %#x\n",
3254                           t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
3255                 if (is_t5(adap->params.chip))
3256                         dev_alert(adap->pdev_dev,
3257                                   "MA parity error, parity status %#x\n",
3258                                   t4_read_reg(adap,
3259                                               MA_PARITY_ERROR_STATUS2_A));
3260         }
3261         if (status & MEM_WRAP_INT_CAUSE_F) {
3262                 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
3263                 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
3264                           "client %u to address %#x\n",
3265                           MEM_WRAP_CLIENT_NUM_G(v),
3266                           MEM_WRAP_ADDRESS_G(v) << 4);
3267         }
3268         t4_write_reg(adap, MA_INT_CAUSE_A, status);
3269         t4_fatal_err(adap);
3270 }
3271
3272 /*
3273  * SMB interrupt handler.
3274  */
3275 static void smb_intr_handler(struct adapter *adap)
3276 {
3277         static const struct intr_info smb_intr_info[] = {
3278                 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
3279                 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
3280                 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
3281                 { 0 }
3282         };
3283
3284         if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
3285                 t4_fatal_err(adap);
3286 }
3287
3288 /*
3289  * NC-SI interrupt handler.
3290  */
3291 static void ncsi_intr_handler(struct adapter *adap)
3292 {
3293         static const struct intr_info ncsi_intr_info[] = {
3294                 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
3295                 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
3296                 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
3297                 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
3298                 { 0 }
3299         };
3300
3301         if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
3302                 t4_fatal_err(adap);
3303 }
3304
3305 /*
3306  * XGMAC interrupt handler.
3307  */
3308 static void xgmac_intr_handler(struct adapter *adap, int port)
3309 {
3310         u32 v, int_cause_reg;
3311
3312         if (is_t4(adap->params.chip))
3313                 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
3314         else
3315                 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
3316
3317         v = t4_read_reg(adap, int_cause_reg);
3318
3319         v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
3320         if (!v)
3321                 return;
3322
3323         if (v & TXFIFO_PRTY_ERR_F)
3324                 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
3325                           port);
3326         if (v & RXFIFO_PRTY_ERR_F)
3327                 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
3328                           port);
3329         t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
3330         t4_fatal_err(adap);
3331 }
3332
3333 /*
3334  * PL interrupt handler.
3335  */
3336 static void pl_intr_handler(struct adapter *adap)
3337 {
3338         static const struct intr_info pl_intr_info[] = {
3339                 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
3340                 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
3341                 { 0 }
3342         };
3343
3344         if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
3345                 t4_fatal_err(adap);
3346 }
3347
3348 #define PF_INTR_MASK (PFSW_F)
3349 #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
3350                 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
3351                 CPL_SWITCH_F | SGE_F | ULP_TX_F)
3352
3353 /**
3354  *      t4_slow_intr_handler - control path interrupt handler
3355  *      @adapter: the adapter
3356  *
3357  *      T4 interrupt handler for non-data global interrupt events, e.g., errors.
3358  *      The designation 'slow' is because it involves register reads, while
3359  *      data interrupts typically don't involve any MMIOs.
3360  */
3361 int t4_slow_intr_handler(struct adapter *adapter)
3362 {
3363         u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
3364
3365         if (!(cause & GLBL_INTR_MASK))
3366                 return 0;
3367         if (cause & CIM_F)
3368                 cim_intr_handler(adapter);
3369         if (cause & MPS_F)
3370                 mps_intr_handler(adapter);
3371         if (cause & NCSI_F)
3372                 ncsi_intr_handler(adapter);
3373         if (cause & PL_F)
3374                 pl_intr_handler(adapter);
3375         if (cause & SMB_F)
3376                 smb_intr_handler(adapter);
3377         if (cause & XGMAC0_F)
3378                 xgmac_intr_handler(adapter, 0);
3379         if (cause & XGMAC1_F)
3380                 xgmac_intr_handler(adapter, 1);
3381         if (cause & XGMAC_KR0_F)
3382                 xgmac_intr_handler(adapter, 2);
3383         if (cause & XGMAC_KR1_F)
3384                 xgmac_intr_handler(adapter, 3);
3385         if (cause & PCIE_F)
3386                 pcie_intr_handler(adapter);
3387         if (cause & MC_F)
3388                 mem_intr_handler(adapter, MEM_MC);
3389         if (is_t5(adapter->params.chip) && (cause & MC1_F))
3390                 mem_intr_handler(adapter, MEM_MC1);
3391         if (cause & EDC0_F)
3392                 mem_intr_handler(adapter, MEM_EDC0);
3393         if (cause & EDC1_F)
3394                 mem_intr_handler(adapter, MEM_EDC1);
3395         if (cause & LE_F)
3396                 le_intr_handler(adapter);
3397         if (cause & TP_F)
3398                 tp_intr_handler(adapter);
3399         if (cause & MA_F)
3400                 ma_intr_handler(adapter);
3401         if (cause & PM_TX_F)
3402                 pmtx_intr_handler(adapter);
3403         if (cause & PM_RX_F)
3404                 pmrx_intr_handler(adapter);
3405         if (cause & ULP_RX_F)
3406                 ulprx_intr_handler(adapter);
3407         if (cause & CPL_SWITCH_F)
3408                 cplsw_intr_handler(adapter);
3409         if (cause & SGE_F)
3410                 sge_intr_handler(adapter);
3411         if (cause & ULP_TX_F)
3412                 ulptx_intr_handler(adapter);
3413
3414         /* Clear the interrupts just processed for which we are the master. */
3415         t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
3416         (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
3417         return 1;
3418 }
3419
3420 /**
3421  *      t4_intr_enable - enable interrupts
3422  *      @adapter: the adapter whose interrupts should be enabled
3423  *
3424  *      Enable PF-specific interrupts for the calling function and the top-level
3425  *      interrupt concentrator for global interrupts.  Interrupts are already
3426  *      enabled at each module, here we just enable the roots of the interrupt
3427  *      hierarchies.
3428  *
3429  *      Note: this function should be called only when the driver manages
3430  *      non PF-specific interrupts from the various HW modules.  Only one PCI
3431  *      function at a time should be doing this.
3432  */
3433 void t4_intr_enable(struct adapter *adapter)
3434 {
3435         u32 val = 0;
3436         u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
3437
3438         if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
3439                 val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
3440         t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
3441                      ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
3442                      ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
3443                      ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
3444                      ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
3445                      ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
3446                      DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
3447         t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
3448         t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
3449 }
3450
3451 /**
3452  *      t4_intr_disable - disable interrupts
3453  *      @adapter: the adapter whose interrupts should be disabled
3454  *
3455  *      Disable interrupts.  We only disable the top-level interrupt
3456  *      concentrators.  The caller must be a PCI function managing global
3457  *      interrupts.
3458  */
3459 void t4_intr_disable(struct adapter *adapter)
3460 {
3461         u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
3462
3463         t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
3464         t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
3465 }
3466
3467 /**
3468  *      hash_mac_addr - return the hash value of a MAC address
3469  *      @addr: the 48-bit Ethernet MAC address
3470  *
3471  *      Hashes a MAC address according to the hash function used by HW inexact
3472  *      (hash) address matching.
3473  */
3474 static int hash_mac_addr(const u8 *addr)
3475 {
3476         u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
3477         u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
3478         a ^= b;
3479         a ^= (a >> 12);
3480         a ^= (a >> 6);
3481         return a & 0x3f;
3482 }
3483
3484 /**
3485  *      t4_config_rss_range - configure a portion of the RSS mapping table
3486  *      @adapter: the adapter
3487  *      @mbox: mbox to use for the FW command
3488  *      @viid: virtual interface whose RSS subtable is to be written
3489  *      @start: start entry in the table to write
3490  *      @n: how many table entries to write
3491  *      @rspq: values for the response queue lookup table
3492  *      @nrspq: number of values in @rspq
3493  *
3494  *      Programs the selected part of the VI's RSS mapping table with the
3495  *      provided values.  If @nrspq < @n the supplied values are used repeatedly
3496  *      until the full table range is populated.
3497  *
3498  *      The caller must ensure the values in @rspq are in the range allowed for
3499  *      @viid.
3500  */
3501 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
3502                         int start, int n, const u16 *rspq, unsigned int nrspq)
3503 {
3504         int ret;
3505         const u16 *rsp = rspq;
3506         const u16 *rsp_end = rspq + nrspq;
3507         struct fw_rss_ind_tbl_cmd cmd;
3508
3509         memset(&cmd, 0, sizeof(cmd));
3510         cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
3511                                FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3512                                FW_RSS_IND_TBL_CMD_VIID_V(viid));
3513         cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
3514
3515         /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
3516         while (n > 0) {
3517                 int nq = min(n, 32);
3518                 __be32 *qp = &cmd.iq0_to_iq2;
3519
3520                 cmd.niqid = cpu_to_be16(nq);
3521                 cmd.startidx = cpu_to_be16(start);
3522
3523                 start += nq;
3524                 n -= nq;
3525
3526                 while (nq > 0) {
3527                         unsigned int v;
3528
3529                         v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
3530                         if (++rsp >= rsp_end)
3531                                 rsp = rspq;
3532                         v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
3533                         if (++rsp >= rsp_end)
3534                                 rsp = rspq;
3535                         v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
3536                         if (++rsp >= rsp_end)
3537                                 rsp = rspq;
3538
3539                         *qp++ = cpu_to_be32(v);
3540                         nq -= 3;
3541                 }
3542
3543                 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
3544                 if (ret)
3545                         return ret;
3546         }
3547         return 0;
3548 }
3549
3550 /**
3551  *      t4_config_glbl_rss - configure the global RSS mode
3552  *      @adapter: the adapter
3553  *      @mbox: mbox to use for the FW command
3554  *      @mode: global RSS mode
3555  *      @flags: mode-specific flags
3556  *
3557  *      Sets the global RSS mode.
3558  */
3559 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
3560                        unsigned int flags)
3561 {
3562         struct fw_rss_glb_config_cmd c;
3563
3564         memset(&c, 0, sizeof(c));
3565         c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
3566                                     FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3567         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3568         if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
3569                 c.u.manual.mode_pkd =
3570                         cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
3571         } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
3572                 c.u.basicvirtual.mode_pkd =
3573                         cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
3574                 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
3575         } else
3576                 return -EINVAL;
3577         return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
3578 }
3579
3580 /**
3581  *      t4_config_vi_rss - configure per VI RSS settings
3582  *      @adapter: the adapter
3583  *      @mbox: mbox to use for the FW command
3584  *      @viid: the VI id
3585  *      @flags: RSS flags
3586  *      @defq: id of the default RSS queue for the VI.
3587  *
3588  *      Configures VI-specific RSS properties.
3589  */
3590 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
3591                      unsigned int flags, unsigned int defq)
3592 {
3593         struct fw_rss_vi_config_cmd c;
3594
3595         memset(&c, 0, sizeof(c));
3596         c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
3597                                    FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3598                                    FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
3599         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3600         c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
3601                                         FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
3602         return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
3603 }
3604
3605 /* Read an RSS table row */
3606 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
3607 {
3608         t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
3609         return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
3610                                    5, 0, val);
3611 }
3612
3613 /**
3614  *      t4_read_rss - read the contents of the RSS mapping table
3615  *      @adapter: the adapter
3616  *      @map: holds the contents of the RSS mapping table
3617  *
3618  *      Reads the contents of the RSS hash->queue mapping table.
3619  */
3620 int t4_read_rss(struct adapter *adapter, u16 *map)
3621 {
3622         u32 val;
3623         int i, ret;
3624
3625         for (i = 0; i < RSS_NENTRIES / 2; ++i) {
3626                 ret = rd_rss_row(adapter, i, &val);
3627                 if (ret)
3628                         return ret;
3629                 *map++ = LKPTBLQUEUE0_G(val);
3630                 *map++ = LKPTBLQUEUE1_G(val);
3631         }
3632         return 0;
3633 }
3634
3635 /**
3636  *      t4_read_rss_key - read the global RSS key
3637  *      @adap: the adapter
3638  *      @key: 10-entry array holding the 320-bit RSS key
3639  *
3640  *      Reads the global 320-bit RSS key.
3641  */
3642 void t4_read_rss_key(struct adapter *adap, u32 *key)
3643 {
3644         t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
3645                          TP_RSS_SECRET_KEY0_A);
3646 }
3647
3648 /**
3649  *      t4_write_rss_key - program one of the RSS keys
3650  *      @adap: the adapter
3651  *      @key: 10-entry array holding the 320-bit RSS key
3652  *      @idx: which RSS key to write
3653  *
3654  *      Writes one of the RSS keys with the given 320-bit value.  If @idx is
3655  *      0..15 the corresponding entry in the RSS key table is written,
3656  *      otherwise the global RSS key is written.
3657  */
3658 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
3659 {
3660         u8 rss_key_addr_cnt = 16;
3661         u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
3662
3663         /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
3664          * allows access to key addresses 16-63 by using KeyWrAddrX
3665          * as index[5:4](upper 2) into key table
3666          */
3667         if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
3668             (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
3669                 rss_key_addr_cnt = 32;
3670
3671         t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
3672                           TP_RSS_SECRET_KEY0_A);
3673
3674         if (idx >= 0 && idx < rss_key_addr_cnt) {
3675                 if (rss_key_addr_cnt > 16)
3676                         t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
3677                                      KEYWRADDRX_V(idx >> 4) |
3678                                      T6_VFWRADDR_V(idx) | KEYWREN_F);
3679                 else
3680                         t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
3681                                      KEYWRADDR_V(idx) | KEYWREN_F);
3682         }
3683 }
3684
3685 /**
3686  *      t4_read_rss_pf_config - read PF RSS Configuration Table
3687  *      @adapter: the adapter
3688  *      @index: the entry in the PF RSS table to read
3689  *      @valp: where to store the returned value
3690  *
3691  *      Reads the PF RSS Configuration Table at the specified index and returns
3692  *      the value found there.
3693  */
3694 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
3695                            u32 *valp)
3696 {
3697         t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3698                          valp, 1, TP_RSS_PF0_CONFIG_A + index);
3699 }
3700
3701 /**
3702  *      t4_read_rss_vf_config - read VF RSS Configuration Table
3703  *      @adapter: the adapter
3704  *      @index: the entry in the VF RSS table to read
3705  *      @vfl: where to store the returned VFL
3706  *      @vfh: where to store the returned VFH
3707  *
3708  *      Reads the VF RSS Configuration Table at the specified index and returns
3709  *      the (VFL, VFH) values found there.
3710  */
3711 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
3712                            u32 *vfl, u32 *vfh)
3713 {
3714         u32 vrt, mask, data;
3715
3716         if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
3717                 mask = VFWRADDR_V(VFWRADDR_M);
3718                 data = VFWRADDR_V(index);
3719         } else {
3720                  mask =  T6_VFWRADDR_V(T6_VFWRADDR_M);
3721                  data = T6_VFWRADDR_V(index);
3722         }
3723
3724         /* Request that the index'th VF Table values be read into VFL/VFH.
3725          */
3726         vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
3727         vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
3728         vrt |= data | VFRDEN_F;
3729         t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
3730
3731         /* Grab the VFL/VFH values ...
3732          */
3733         t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3734                          vfl, 1, TP_RSS_VFL_CONFIG_A);
3735         t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3736                          vfh, 1, TP_RSS_VFH_CONFIG_A);
3737 }
3738
3739 /**
3740  *      t4_read_rss_pf_map - read PF RSS Map
3741  *      @adapter: the adapter
3742  *
3743  *      Reads the PF RSS Map register and returns its value.
3744  */
3745 u32 t4_read_rss_pf_map(struct adapter *adapter)
3746 {
3747         u32 pfmap;
3748
3749         t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3750                          &pfmap, 1, TP_RSS_PF_MAP_A);
3751         return pfmap;
3752 }
3753
3754 /**
3755  *      t4_read_rss_pf_mask - read PF RSS Mask
3756  *      @adapter: the adapter
3757  *
3758  *      Reads the PF RSS Mask register and returns its value.
3759  */
3760 u32 t4_read_rss_pf_mask(struct adapter *adapter)
3761 {
3762         u32 pfmask;
3763
3764         t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3765                          &pfmask, 1, TP_RSS_PF_MSK_A);
3766         return pfmask;
3767 }
3768
3769 /**
3770  *      t4_tp_get_tcp_stats - read TP's TCP MIB counters
3771  *      @adap: the adapter
3772  *      @v4: holds the TCP/IP counter values
3773  *      @v6: holds the TCP/IPv6 counter values
3774  *
3775  *      Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
3776  *      Either @v4 or @v6 may be %NULL to skip the corresponding stats.
3777  */
3778 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
3779                          struct tp_tcp_stats *v6)
3780 {
3781         u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
3782
3783 #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
3784 #define STAT(x)     val[STAT_IDX(x)]
3785 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
3786
3787         if (v4) {
3788                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
3789                                  ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
3790                 v4->tcp_out_rsts = STAT(OUT_RST);
3791                 v4->tcp_in_segs  = STAT64(IN_SEG);
3792                 v4->tcp_out_segs = STAT64(OUT_SEG);
3793                 v4->tcp_retrans_segs = STAT64(RXT_SEG);
3794         }
3795         if (v6) {
3796                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
3797                                  ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
3798                 v6->tcp_out_rsts = STAT(OUT_RST);
3799                 v6->tcp_in_segs  = STAT64(IN_SEG);
3800                 v6->tcp_out_segs = STAT64(OUT_SEG);
3801                 v6->tcp_retrans_segs = STAT64(RXT_SEG);
3802         }
3803 #undef STAT64
3804 #undef STAT
3805 #undef STAT_IDX
3806 }
3807
3808 /**
3809  *      t4_tp_get_err_stats - read TP's error MIB counters
3810  *      @adap: the adapter
3811  *      @st: holds the counter values
3812  *
3813  *      Returns the values of TP's error counters.
3814  */
3815 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
3816 {
3817         /* T6 and later has 2 channels */
3818         if (adap->params.arch.nchan == NCHAN) {
3819                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3820                                  st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A);
3821                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3822                                  st->tnl_cong_drops, 8,
3823                                  TP_MIB_TNL_CNG_DROP_0_A);
3824                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3825                                  st->tnl_tx_drops, 4,
3826                                  TP_MIB_TNL_DROP_0_A);
3827                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3828                                  st->ofld_vlan_drops, 4,
3829                                  TP_MIB_OFD_VLN_DROP_0_A);
3830                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3831                                  st->tcp6_in_errs, 4,
3832                                  TP_MIB_TCP_V6IN_ERR_0_A);
3833         } else {
3834                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3835                                  st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A);
3836                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3837                                  st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A);
3838                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3839                                  st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A);
3840                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3841                                  st->tnl_cong_drops, 2,
3842                                  TP_MIB_TNL_CNG_DROP_0_A);
3843                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3844                                  st->ofld_chan_drops, 2,
3845                                  TP_MIB_OFD_CHN_DROP_0_A);
3846                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3847                                  st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A);
3848                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3849                                  st->ofld_vlan_drops, 2,
3850                                  TP_MIB_OFD_VLN_DROP_0_A);
3851                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3852                                  st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A);
3853         }
3854         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
3855                          &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
3856 }
3857
3858 /**
3859  *      t4_tp_get_cpl_stats - read TP's CPL MIB counters
3860  *      @adap: the adapter
3861  *      @st: holds the counter values
3862  *
3863  *      Returns the values of TP's CPL counters.
3864  */
3865 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3866 {
3867         /* T6 and later has 2 channels */
3868         if (adap->params.arch.nchan == NCHAN) {
3869                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
3870                                  8, TP_MIB_CPL_IN_REQ_0_A);
3871         } else {
3872                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
3873                                  2, TP_MIB_CPL_IN_REQ_0_A);
3874                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
3875                                  2, TP_MIB_CPL_OUT_RSP_0_A);
3876         }
3877 }
3878
3879 /**
3880  *      t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3881  *      @adap: the adapter
3882  *      @st: holds the counter values
3883  *
3884  *      Returns the values of TP's RDMA counters.
3885  */
3886 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3887 {
3888         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt,
3889                          2, TP_MIB_RQE_DFR_PKT_A);
3890 }
3891
3892 /**
3893  *      t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
3894  *      @adap: the adapter
3895  *      @idx: the port index
3896  *      @st: holds the counter values
3897  *
3898  *      Returns the values of TP's FCoE counters for the selected port.
3899  */
3900 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
3901                        struct tp_fcoe_stats *st)
3902 {
3903         u32 val[2];
3904
3905         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp,
3906                          1, TP_MIB_FCOE_DDP_0_A + idx);
3907         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop,
3908                          1, TP_MIB_FCOE_DROP_0_A + idx);
3909         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
3910                          2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx);
3911         st->octets_ddp = ((u64)val[0] << 32) | val[1];
3912 }
3913
3914 /**
3915  *      t4_get_usm_stats - read TP's non-TCP DDP MIB counters
3916  *      @adap: the adapter
3917  *      @st: holds the counter values
3918  *
3919  *      Returns the values of TP's counters for non-TCP directly-placed packets.
3920  */
3921 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
3922 {
3923         u32 val[4];
3924
3925         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4,
3926                          TP_MIB_USM_PKTS_A);
3927         st->frames = val[0];
3928         st->drops = val[1];
3929         st->octets = ((u64)val[2] << 32) | val[3];
3930 }
3931
3932 /**
3933  *      t4_read_mtu_tbl - returns the values in the HW path MTU table
3934  *      @adap: the adapter
3935  *      @mtus: where to store the MTU values
3936  *      @mtu_log: where to store the MTU base-2 log (may be %NULL)
3937  *
3938  *      Reads the HW path MTU table.
3939  */
3940 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3941 {
3942         u32 v;
3943         int i;
3944
3945         for (i = 0; i < NMTUS; ++i) {
3946                 t4_write_reg(adap, TP_MTU_TABLE_A,
3947                              MTUINDEX_V(0xff) | MTUVALUE_V(i));
3948                 v = t4_read_reg(adap, TP_MTU_TABLE_A);
3949                 mtus[i] = MTUVALUE_G(v);
3950                 if (mtu_log)
3951                         mtu_log[i] = MTUWIDTH_G(v);
3952         }
3953 }
3954
3955 /**
3956  *      t4_read_cong_tbl - reads the congestion control table
3957  *      @adap: the adapter
3958  *      @incr: where to store the alpha values
3959  *
3960  *      Reads the additive increments programmed into the HW congestion
3961  *      control table.
3962  */
3963 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3964 {
3965         unsigned int mtu, w;
3966
3967         for (mtu = 0; mtu < NMTUS; ++mtu)
3968                 for (w = 0; w < NCCTRL_WIN; ++w) {
3969                         t4_write_reg(adap, TP_CCTRL_TABLE_A,
3970                                      ROWINDEX_V(0xffff) | (mtu << 5) | w);
3971                         incr[mtu][w] = (u16)t4_read_reg(adap,
3972                                                 TP_CCTRL_TABLE_A) & 0x1fff;
3973                 }
3974 }
3975
3976 /**
3977  *      t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3978  *      @adap: the adapter
3979  *      @addr: the indirect TP register address
3980  *      @mask: specifies the field within the register to modify
3981  *      @val: new value for the field
3982  *
3983  *      Sets a field of an indirect TP register to the given value.
3984  */
3985 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3986                             unsigned int mask, unsigned int val)
3987 {
3988         t4_write_reg(adap, TP_PIO_ADDR_A, addr);
3989         val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
3990         t4_write_reg(adap, TP_PIO_DATA_A, val);
3991 }
3992
3993 /**
3994  *      init_cong_ctrl - initialize congestion control parameters
3995  *      @a: the alpha values for congestion control
3996  *      @b: the beta values for congestion control
3997  *
3998  *      Initialize the congestion control parameters.
3999  */
4000 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
4001 {
4002         a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
4003         a[9] = 2;
4004         a[10] = 3;
4005         a[11] = 4;
4006         a[12] = 5;
4007         a[13] = 6;
4008         a[14] = 7;
4009         a[15] = 8;
4010         a[16] = 9;
4011         a[17] = 10;
4012         a[18] = 14;
4013         a[19] = 17;
4014         a[20] = 21;
4015         a[21] = 25;
4016         a[22] = 30;
4017         a[23] = 35;
4018         a[24] = 45;
4019         a[25] = 60;
4020         a[26] = 80;
4021         a[27] = 100;
4022         a[28] = 200;
4023         a[29] = 300;
4024         a[30] = 400;
4025         a[31] = 500;
4026
4027         b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
4028         b[9] = b[10] = 1;
4029         b[11] = b[12] = 2;
4030         b[13] = b[14] = b[15] = b[16] = 3;
4031         b[17] = b[18] = b[19] = b[20] = b[21] = 4;
4032         b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
4033         b[28] = b[29] = 6;
4034         b[30] = b[31] = 7;
4035 }
4036
4037 /* The minimum additive increment value for the congestion control table */
4038 #define CC_MIN_INCR 2U
4039
4040 /**
4041  *      t4_load_mtus - write the MTU and congestion control HW tables
4042  *      @adap: the adapter
4043  *      @mtus: the values for the MTU table
4044  *      @alpha: the values for the congestion control alpha parameter
4045  *      @beta: the values for the congestion control beta parameter
4046  *
4047  *      Write the HW MTU table with the supplied MTUs and the high-speed
4048  *      congestion control table with the supplied alpha, beta, and MTUs.
4049  *      We write the two tables together because the additive increments
4050  *      depend on the MTUs.
4051  */
4052 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
4053                   const unsigned short *alpha, const unsigned short *beta)
4054 {
4055         static const unsigned int avg_pkts[NCCTRL_WIN] = {
4056                 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
4057                 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
4058                 28672, 40960, 57344, 81920, 114688, 163840, 229376
4059         };
4060
4061         unsigned int i, w;
4062
4063         for (i = 0; i < NMTUS; ++i) {
4064                 unsigned int mtu = mtus[i];
4065                 unsigned int log2 = fls(mtu);
4066
4067                 if (!(mtu & ((1 << log2) >> 2)))     /* round */
4068                         log2--;
4069                 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
4070                              MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
4071
4072                 for (w = 0; w < NCCTRL_WIN; ++w) {
4073                         unsigned int inc;
4074
4075                         inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
4076                                   CC_MIN_INCR);
4077
4078                         t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
4079                                      (w << 16) | (beta[w] << 13) | inc);
4080                 }
4081         }
4082 }
4083
4084 /**
4085  *      t4_pmtx_get_stats - returns the HW stats from PMTX
4086  *      @adap: the adapter
4087  *      @cnt: where to store the count statistics
4088  *      @cycles: where to store the cycle statistics
4089  *
4090  *      Returns performance statistics from PMTX.
4091  */
4092 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
4093 {
4094         int i;
4095         u32 data[2];
4096
4097         for (i = 0; i < PM_NSTATS; i++) {
4098                 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
4099                 cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
4100                 if (is_t4(adap->params.chip)) {
4101                         cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
4102                 } else {
4103                         t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
4104                                          PM_TX_DBG_DATA_A, data, 2,
4105                                          PM_TX_DBG_STAT_MSB_A);
4106                         cycles[i] = (((u64)data[0] << 32) | data[1]);
4107                 }
4108         }
4109 }
4110
4111 /**
4112  *      t4_pmrx_get_stats - returns the HW stats from PMRX
4113  *      @adap: the adapter
4114  *      @cnt: where to store the count statistics
4115  *      @cycles: where to store the cycle statistics
4116  *
4117  *      Returns performance statistics from PMRX.
4118  */
4119 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
4120 {
4121         int i;
4122         u32 data[2];
4123
4124         for (i = 0; i < PM_NSTATS; i++) {
4125                 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
4126                 cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
4127                 if (is_t4(adap->params.chip)) {
4128                         cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
4129                 } else {
4130                         t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
4131                                          PM_RX_DBG_DATA_A, data, 2,
4132                                          PM_RX_DBG_STAT_MSB_A);
4133                         cycles[i] = (((u64)data[0] << 32) | data[1]);
4134                 }
4135         }
4136 }
4137
4138 /**
4139  *      t4_get_mps_bg_map - return the buffer groups associated with a port
4140  *      @adap: the adapter
4141  *      @idx: the port index
4142  *
4143  *      Returns a bitmap indicating which MPS buffer groups are associated
4144  *      with the given port.  Bit i is set if buffer group i is used by the
4145  *      port.
4146  */
4147 unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
4148 {
4149         u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
4150
4151         if (n == 0)
4152                 return idx == 0 ? 0xf : 0;
4153         if (n == 1)
4154                 return idx < 2 ? (3 << (2 * idx)) : 0;
4155         return 1 << idx;
4156 }
4157
4158 /**
4159  *      t4_get_port_type_description - return Port Type string description
4160  *      @port_type: firmware Port Type enumeration
4161  */
4162 const char *t4_get_port_type_description(enum fw_port_type port_type)
4163 {
4164         static const char *const port_type_description[] = {
4165                 "R XFI",
4166                 "R XAUI",
4167                 "T SGMII",
4168                 "T XFI",
4169                 "T XAUI",
4170                 "KX4",
4171                 "CX4",
4172                 "KX",
4173                 "KR",
4174                 "R SFP+",
4175                 "KR/KX",
4176                 "KR/KX/KX4",
4177                 "R QSFP_10G",
4178                 "R QSA",
4179                 "R QSFP",
4180                 "R BP40_BA",
4181         };
4182
4183         if (port_type < ARRAY_SIZE(port_type_description))
4184                 return port_type_description[port_type];
4185         return "UNKNOWN";
4186 }
4187
4188 /**
4189  *      t4_get_port_stats_offset - collect port stats relative to a previous
4190  *                                 snapshot
4191  *      @adap: The adapter
4192  *      @idx: The port
4193  *      @stats: Current stats to fill
4194  *      @offset: Previous stats snapshot
4195  */
4196 void t4_get_port_stats_offset(struct adapter *adap, int idx,
4197                               struct port_stats *stats,
4198                               struct port_stats *offset)
4199 {
4200         u64 *s, *o;
4201         int i;
4202
4203         t4_get_port_stats(adap, idx, stats);
4204         for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
4205                         i < (sizeof(struct port_stats) / sizeof(u64));
4206                         i++, s++, o++)
4207                 *s -= *o;
4208 }
4209
4210 /**
4211  *      t4_get_port_stats - collect port statistics
4212  *      @adap: the adapter
4213  *      @idx: the port index
4214  *      @p: the stats structure to fill
4215  *
4216  *      Collect statistics related to the given port from HW.
4217  */
4218 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
4219 {
4220         u32 bgmap = t4_get_mps_bg_map(adap, idx);
4221
4222 #define GET_STAT(name) \
4223         t4_read_reg64(adap, \
4224         (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
4225         T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
4226 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
4227
4228         p->tx_octets           = GET_STAT(TX_PORT_BYTES);
4229         p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
4230         p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
4231         p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
4232         p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
4233         p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
4234         p->tx_frames_64        = GET_STAT(TX_PORT_64B);
4235         p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
4236         p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
4237         p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
4238         p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
4239         p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
4240         p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
4241         p->tx_drop             = GET_STAT(TX_PORT_DROP);
4242         p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
4243         p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
4244         p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
4245         p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
4246         p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
4247         p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
4248         p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
4249         p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
4250         p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
4251
4252         p->rx_octets           = GET_STAT(RX_PORT_BYTES);
4253         p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
4254         p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
4255         p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
4256         p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
4257         p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
4258         p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
4259         p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
4260         p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
4261         p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
4262         p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
4263         p->rx_frames_64        = GET_STAT(RX_PORT_64B);
4264         p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
4265         p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
4266         p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
4267         p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
4268         p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
4269         p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
4270         p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
4271         p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
4272         p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
4273         p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
4274         p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
4275         p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
4276         p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
4277         p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
4278         p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
4279
4280         p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
4281         p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
4282         p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
4283         p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
4284         p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
4285         p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
4286         p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
4287         p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
4288
4289 #undef GET_STAT
4290 #undef GET_STAT_COM
4291 }
4292
4293 /**
4294  *      t4_get_lb_stats - collect loopback port statistics
4295  *      @adap: the adapter
4296  *      @idx: the loopback port index
4297  *      @p: the stats structure to fill
4298  *
4299  *      Return HW statistics for the given loopback port.
4300  */
4301 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
4302 {
4303         u32 bgmap = t4_get_mps_bg_map(adap, idx);
4304
4305 #define GET_STAT(name) \
4306         t4_read_reg64(adap, \
4307         (is_t4(adap->params.chip) ? \
4308         PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
4309         T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
4310 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
4311
4312         p->octets           = GET_STAT(BYTES);
4313         p->frames           = GET_STAT(FRAMES);
4314         p->bcast_frames     = GET_STAT(BCAST);
4315         p->mcast_frames     = GET_STAT(MCAST);
4316         p->ucast_frames     = GET_STAT(UCAST);
4317         p->error_frames     = GET_STAT(ERROR);
4318
4319         p->frames_64        = GET_STAT(64B);
4320         p->frames_65_127    = GET_STAT(65B_127B);
4321         p->frames_128_255   = GET_STAT(128B_255B);
4322         p->frames_256_511   = GET_STAT(256B_511B);
4323         p->frames_512_1023  = GET_STAT(512B_1023B);
4324         p->frames_1024_1518 = GET_STAT(1024B_1518B);
4325         p->frames_1519_max  = GET_STAT(1519B_MAX);
4326         p->drop             = GET_STAT(DROP_FRAMES);
4327
4328         p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
4329         p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
4330         p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
4331         p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
4332         p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
4333         p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
4334         p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
4335         p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
4336
4337 #undef GET_STAT
4338 #undef GET_STAT_COM
4339 }
4340
4341 /*     t4_mk_filtdelwr - create a delete filter WR
4342  *     @ftid: the filter ID
4343  *     @wr: the filter work request to populate
4344  *     @qid: ingress queue to receive the delete notification
4345  *
4346  *     Creates a filter work request to delete the supplied filter.  If @qid is
4347  *     negative the delete notification is suppressed.
4348  */
4349 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
4350 {
4351         memset(wr, 0, sizeof(*wr));
4352         wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
4353         wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
4354         wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
4355                                     FW_FILTER_WR_NOREPLY_V(qid < 0));
4356         wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
4357         if (qid >= 0)
4358                 wr->rx_chan_rx_rpl_iq =
4359                         cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
4360 }
4361
4362 #define INIT_CMD(var, cmd, rd_wr) do { \
4363         (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
4364                                         FW_CMD_REQUEST_F | \
4365                                         FW_CMD_##rd_wr##_F); \
4366         (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
4367 } while (0)
4368
4369 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
4370                           u32 addr, u32 val)
4371 {
4372         u32 ldst_addrspace;
4373         struct fw_ldst_cmd c;
4374
4375         memset(&c, 0, sizeof(c));
4376         ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
4377         c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
4378                                         FW_CMD_REQUEST_F |
4379                                         FW_CMD_WRITE_F |
4380                                         ldst_addrspace);
4381         c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4382         c.u.addrval.addr = cpu_to_be32(addr);
4383         c.u.addrval.val = cpu_to_be32(val);
4384
4385         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4386 }
4387
4388 /**
4389  *      t4_mdio_rd - read a PHY register through MDIO
4390  *      @adap: the adapter
4391  *      @mbox: mailbox to use for the FW command
4392  *      @phy_addr: the PHY address
4393  *      @mmd: the PHY MMD to access (0 for clause 22 PHYs)
4394  *      @reg: the register to read
4395  *      @valp: where to store the value
4396  *
4397  *      Issues a FW command through the given mailbox to read a PHY register.
4398  */
4399 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4400                unsigned int mmd, unsigned int reg, u16 *valp)
4401 {
4402         int ret;
4403         u32 ldst_addrspace;
4404         struct fw_ldst_cmd c;
4405
4406         memset(&c, 0, sizeof(c));
4407         ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
4408         c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
4409                                         FW_CMD_REQUEST_F | FW_CMD_READ_F |
4410                                         ldst_addrspace);
4411         c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4412         c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
4413                                          FW_LDST_CMD_MMD_V(mmd));
4414         c.u.mdio.raddr = cpu_to_be16(reg);
4415
4416         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4417         if (ret == 0)
4418                 *valp = be16_to_cpu(c.u.mdio.rval);
4419         return ret;
4420 }
4421
4422 /**
4423  *      t4_mdio_wr - write a PHY register through MDIO
4424  *      @adap: the adapter
4425  *      @mbox: mailbox to use for the FW command
4426  *      @phy_addr: the PHY address
4427  *      @mmd: the PHY MMD to access (0 for clause 22 PHYs)
4428  *      @reg: the register to write
4429  *      @valp: value to write
4430  *
4431  *      Issues a FW command through the given mailbox to write a PHY register.
4432  */
4433 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4434                unsigned int mmd, unsigned int reg, u16 val)
4435 {
4436         u32 ldst_addrspace;
4437         struct fw_ldst_cmd c;
4438
4439         memset(&c, 0, sizeof(c));
4440         ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
4441         c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
4442                                         FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
4443                                         ldst_addrspace);
4444         c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4445         c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
4446                                          FW_LDST_CMD_MMD_V(mmd));
4447         c.u.mdio.raddr = cpu_to_be16(reg);
4448         c.u.mdio.rval = cpu_to_be16(val);
4449
4450         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4451 }
4452
4453 /**
4454  *      t4_sge_decode_idma_state - decode the idma state
4455  *      @adap: the adapter
4456  *      @state: the state idma is stuck in
4457  */
4458 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
4459 {
4460         static const char * const t4_decode[] = {
4461                 "IDMA_IDLE",
4462                 "IDMA_PUSH_MORE_CPL_FIFO",
4463                 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
4464                 "Not used",
4465                 "IDMA_PHYSADDR_SEND_PCIEHDR",
4466                 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
4467                 "IDMA_PHYSADDR_SEND_PAYLOAD",
4468                 "IDMA_SEND_FIFO_TO_IMSG",
4469                 "IDMA_FL_REQ_DATA_FL_PREP",
4470                 "IDMA_FL_REQ_DATA_FL",
4471                 "IDMA_FL_DROP",
4472                 "IDMA_FL_H_REQ_HEADER_FL",
4473                 "IDMA_FL_H_SEND_PCIEHDR",
4474                 "IDMA_FL_H_PUSH_CPL_FIFO",
4475                 "IDMA_FL_H_SEND_CPL",
4476                 "IDMA_FL_H_SEND_IP_HDR_FIRST",
4477                 "IDMA_FL_H_SEND_IP_HDR",
4478                 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
4479                 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
4480                 "IDMA_FL_H_SEND_IP_HDR_PADDING",
4481                 "IDMA_FL_D_SEND_PCIEHDR",
4482                 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
4483                 "IDMA_FL_D_REQ_NEXT_DATA_FL",
4484                 "IDMA_FL_SEND_PCIEHDR",
4485                 "IDMA_FL_PUSH_CPL_FIFO",
4486                 "IDMA_FL_SEND_CPL",
4487                 "IDMA_FL_SEND_PAYLOAD_FIRST",
4488                 "IDMA_FL_SEND_PAYLOAD",
4489                 "IDMA_FL_REQ_NEXT_DATA_FL",
4490                 "IDMA_FL_SEND_NEXT_PCIEHDR",
4491                 "IDMA_FL_SEND_PADDING",
4492                 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
4493                 "IDMA_FL_SEND_FIFO_TO_IMSG",
4494                 "IDMA_FL_REQ_DATAFL_DONE",
4495                 "IDMA_FL_REQ_HEADERFL_DONE",
4496         };
4497         static const char * const t5_decode[] = {
4498                 "IDMA_IDLE",
4499                 "IDMA_ALMOST_IDLE",
4500                 "IDMA_PUSH_MORE_CPL_FIFO",
4501                 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
4502                 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
4503                 "IDMA_PHYSADDR_SEND_PCIEHDR",
4504                 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
4505                 "IDMA_PHYSADDR_SEND_PAYLOAD",
4506                 "IDMA_SEND_FIFO_TO_IMSG",
4507                 "IDMA_FL_REQ_DATA_FL",
4508                 "IDMA_FL_DROP",
4509                 "IDMA_FL_DROP_SEND_INC",
4510                 "IDMA_FL_H_REQ_HEADER_FL",
4511                 "IDMA_FL_H_SEND_PCIEHDR",
4512                 "IDMA_FL_H_PUSH_CPL_FIFO",
4513                 "IDMA_FL_H_SEND_CPL",
4514                 "IDMA_FL_H_SEND_IP_HDR_FIRST",
4515                 "IDMA_FL_H_SEND_IP_HDR",
4516                 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
4517                 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
4518                 "IDMA_FL_H_SEND_IP_HDR_PADDING",
4519                 "IDMA_FL_D_SEND_PCIEHDR",
4520                 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
4521                 "IDMA_FL_D_REQ_NEXT_DATA_FL",
4522                 "IDMA_FL_SEND_PCIEHDR",
4523                 "IDMA_FL_PUSH_CPL_FIFO",
4524                 "IDMA_FL_SEND_CPL",
4525                 "IDMA_FL_SEND_PAYLOAD_FIRST",
4526                 "IDMA_FL_SEND_PAYLOAD",
4527                 "IDMA_FL_REQ_NEXT_DATA_FL",
4528                 "IDMA_FL_SEND_NEXT_PCIEHDR",
4529                 "IDMA_FL_SEND_PADDING",
4530                 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
4531         };
4532         static const u32 sge_regs[] = {
4533                 SGE_DEBUG_DATA_LOW_INDEX_2_A,
4534                 SGE_DEBUG_DATA_LOW_INDEX_3_A,
4535                 SGE_DEBUG_DATA_HIGH_INDEX_10_A,
4536         };
4537         const char **sge_idma_decode;
4538         int sge_idma_decode_nstates;
4539         int i;
4540
4541         if (is_t4(adapter->params.chip)) {
4542                 sge_idma_decode = (const char **)t4_decode;
4543                 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
4544         } else {
4545                 sge_idma_decode = (const char **)t5_decode;
4546                 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
4547         }
4548
4549         if (state < sge_idma_decode_nstates)
4550                 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
4551         else
4552                 CH_WARN(adapter, "idma state %d unknown\n", state);
4553
4554         for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
4555                 CH_WARN(adapter, "SGE register %#x value %#x\n",
4556                         sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
4557 }
4558
4559 /**
4560  *      t4_sge_ctxt_flush - flush the SGE context cache
4561  *      @adap: the adapter
4562  *      @mbox: mailbox to use for the FW command
4563  *
4564  *      Issues a FW command through the given mailbox to flush the
4565  *      SGE context cache.
4566  */
4567 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
4568 {
4569         int ret;
4570         u32 ldst_addrspace;
4571         struct fw_ldst_cmd c;
4572
4573         memset(&c, 0, sizeof(c));
4574         ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
4575         c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
4576                                         FW_CMD_REQUEST_F | FW_CMD_READ_F |
4577                                         ldst_addrspace);
4578         c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4579         c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
4580
4581         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4582         return ret;
4583 }
4584
4585 /**
4586  *      t4_fw_hello - establish communication with FW
4587  *      @adap: the adapter
4588  *      @mbox: mailbox to use for the FW command
4589  *      @evt_mbox: mailbox to receive async FW events
4590  *      @master: specifies the caller's willingness to be the device master
4591  *      @state: returns the current device state (if non-NULL)
4592  *
4593  *      Issues a command to establish communication with FW.  Returns either
4594  *      an error (negative integer) or the mailbox of the Master PF.
4595  */
4596 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4597                 enum dev_master master, enum dev_state *state)
4598 {
4599         int ret;
4600         struct fw_hello_cmd c;
4601         u32 v;
4602         unsigned int master_mbox;
4603         int retries = FW_CMD_HELLO_RETRIES;
4604
4605 retry:
4606         memset(&c, 0, sizeof(c));
4607         INIT_CMD(c, HELLO, WRITE);
4608         c.err_to_clearinit = cpu_to_be32(
4609                 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
4610                 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
4611                 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
4612                                         mbox : FW_HELLO_CMD_MBMASTER_M) |
4613                 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
4614                 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
4615                 FW_HELLO_CMD_CLEARINIT_F);
4616
4617         /*
4618          * Issue the HELLO command to the firmware.  If it's not successful
4619          * but indicates that we got a "busy" or "timeout" condition, retry
4620          * the HELLO until we exhaust our retry limit.  If we do exceed our
4621          * retry limit, check to see if the firmware left us any error
4622          * information and report that if so.
4623          */
4624         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4625         if (ret < 0) {
4626                 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4627                         goto retry;
4628                 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
4629                         t4_report_fw_error(adap);
4630                 return ret;
4631         }
4632
4633         v = be32_to_cpu(c.err_to_clearinit);
4634         master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
4635         if (state) {
4636                 if (v & FW_HELLO_CMD_ERR_F)
4637                         *state = DEV_STATE_ERR;
4638                 else if (v & FW_HELLO_CMD_INIT_F)
4639                         *state = DEV_STATE_INIT;
4640                 else
4641                         *state = DEV_STATE_UNINIT;
4642         }
4643
4644         /*
4645          * If we're not the Master PF then we need to wait around for the
4646          * Master PF Driver to finish setting up the adapter.
4647          *
4648          * Note that we also do this wait if we're a non-Master-capable PF and
4649          * there is no current Master PF; a Master PF may show up momentarily
4650          * and we wouldn't want to fail pointlessly.  (This can happen when an
4651          * OS loads lots of different drivers rapidly at the same time).  In
4652          * this case, the Master PF returned by the firmware will be
4653          * PCIE_FW_MASTER_M so the test below will work ...
4654          */
4655         if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
4656             master_mbox != mbox) {
4657                 int waiting = FW_CMD_HELLO_TIMEOUT;
4658
4659                 /*
4660                  * Wait for the firmware to either indicate an error or
4661                  * initialized state.  If we see either of these we bail out
4662                  * and report the issue to the caller.  If we exhaust the
4663                  * "hello timeout" and we haven't exhausted our retries, try
4664                  * again.  Otherwise bail with a timeout error.
4665                  */
4666                 for (;;) {
4667                         u32 pcie_fw;
4668
4669                         msleep(50);
4670                         waiting -= 50;
4671
4672                         /*
4673                          * If neither Error nor Initialialized are indicated
4674                          * by the firmware keep waiting till we exaust our
4675                          * timeout ... and then retry if we haven't exhausted
4676                          * our retries ...
4677                          */
4678                         pcie_fw = t4_read_reg(adap, PCIE_FW_A);
4679                         if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
4680                                 if (waiting <= 0) {
4681                                         if (retries-- > 0)
4682                                                 goto retry;
4683
4684                                         return -ETIMEDOUT;
4685                                 }
4686                                 continue;
4687                         }
4688
4689                         /*
4690                          * We either have an Error or Initialized condition
4691                          * report errors preferentially.
4692                          */
4693                         if (state) {
4694                                 if (pcie_fw & PCIE_FW_ERR_F)
4695                                         *state = DEV_STATE_ERR;
4696                                 else if (pcie_fw & PCIE_FW_INIT_F)
4697                                         *state = DEV_STATE_INIT;
4698                         }
4699
4700                         /*
4701                          * If we arrived before a Master PF was selected and
4702                          * there's not a valid Master PF, grab its identity
4703                          * for our caller.
4704                          */
4705                         if (master_mbox == PCIE_FW_MASTER_M &&
4706                             (pcie_fw & PCIE_FW_MASTER_VLD_F))
4707                                 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
4708                         break;
4709                 }
4710         }
4711
4712         return master_mbox;
4713 }
4714
4715 /**
4716  *      t4_fw_bye - end communication with FW
4717  *      @adap: the adapter
4718  *      @mbox: mailbox to use for the FW command
4719  *
4720  *      Issues a command to terminate communication with FW.
4721  */
4722 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4723 {
4724         struct fw_bye_cmd c;
4725
4726         memset(&c, 0, sizeof(c));
4727         INIT_CMD(c, BYE, WRITE);
4728         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4729 }
4730
4731 /**
4732  *      t4_init_cmd - ask FW to initialize the device
4733  *      @adap: the adapter
4734  *      @mbox: mailbox to use for the FW command
4735  *
4736  *      Issues a command to FW to partially initialize the device.  This
4737  *      performs initialization that generally doesn't depend on user input.
4738  */
4739 int t4_early_init(struct adapter *adap, unsigned int mbox)
4740 {
4741         struct fw_initialize_cmd c;
4742
4743         memset(&c, 0, sizeof(c));
4744         INIT_CMD(c, INITIALIZE, WRITE);
4745         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4746 }
4747
4748 /**
4749  *      t4_fw_reset - issue a reset to FW
4750  *      @adap: the adapter
4751  *      @mbox: mailbox to use for the FW command
4752  *      @reset: specifies the type of reset to perform
4753  *
4754  *      Issues a reset command of the specified type to FW.
4755  */
4756 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4757 {
4758         struct fw_reset_cmd c;
4759
4760         memset(&c, 0, sizeof(c));
4761         INIT_CMD(c, RESET, WRITE);
4762         c.val = cpu_to_be32(reset);
4763         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4764 }
4765
4766 /**
4767  *      t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4768  *      @adap: the adapter
4769  *      @mbox: mailbox to use for the FW RESET command (if desired)
4770  *      @force: force uP into RESET even if FW RESET command fails
4771  *
4772  *      Issues a RESET command to firmware (if desired) with a HALT indication
4773  *      and then puts the microprocessor into RESET state.  The RESET command
4774  *      will only be issued if a legitimate mailbox is provided (mbox <=
4775  *      PCIE_FW_MASTER_M).
4776  *
4777  *      This is generally used in order for the host to safely manipulate the
4778  *      adapter without fear of conflicting with whatever the firmware might
4779  *      be doing.  The only way out of this state is to RESTART the firmware
4780  *      ...
4781  */
4782 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4783 {
4784         int ret = 0;
4785
4786         /*
4787          * If a legitimate mailbox is provided, issue a RESET command
4788          * with a HALT indication.
4789          */
4790         if (mbox <= PCIE_FW_MASTER_M) {
4791                 struct fw_reset_cmd c;
4792
4793                 memset(&c, 0, sizeof(c));
4794                 INIT_CMD(c, RESET, WRITE);
4795                 c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
4796                 c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
4797                 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4798         }
4799
4800         /*
4801          * Normally we won't complete the operation if the firmware RESET
4802          * command fails but if our caller insists we'll go ahead and put the
4803          * uP into RESET.  This can be useful if the firmware is hung or even
4804          * missing ...  We'll have to take the risk of putting the uP into
4805          * RESET without the cooperation of firmware in that case.
4806          *
4807          * We also force the firmware's HALT flag to be on in case we bypassed
4808          * the firmware RESET command above or we're dealing with old firmware
4809          * which doesn't have the HALT capability.  This will serve as a flag
4810          * for the incoming firmware to know that it's coming out of a HALT
4811          * rather than a RESET ... if it's new enough to understand that ...
4812          */
4813         if (ret == 0 || force) {
4814                 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
4815                 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
4816                                  PCIE_FW_HALT_F);
4817         }
4818
4819         /*
4820          * And we always return the result of the firmware RESET command
4821          * even when we force the uP into RESET ...
4822          */
4823         return ret;
4824 }
4825
4826 /**
4827  *      t4_fw_restart - restart the firmware by taking the uP out of RESET
4828  *      @adap: the adapter
4829  *      @reset: if we want to do a RESET to restart things
4830  *
4831  *      Restart firmware previously halted by t4_fw_halt().  On successful
4832  *      return the previous PF Master remains as the new PF Master and there
4833  *      is no need to issue a new HELLO command, etc.
4834  *
4835  *      We do this in two ways:
4836  *
4837  *       1. If we're dealing with newer firmware we'll simply want to take
4838  *          the chip's microprocessor out of RESET.  This will cause the
4839  *          firmware to start up from its start vector.  And then we'll loop
4840  *          until the firmware indicates it's started again (PCIE_FW.HALT
4841  *          reset to 0) or we timeout.
4842  *
4843  *       2. If we're dealing with older firmware then we'll need to RESET
4844  *          the chip since older firmware won't recognize the PCIE_FW.HALT
4845  *          flag and automatically RESET itself on startup.
4846  */
4847 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4848 {
4849         if (reset) {
4850                 /*
4851                  * Since we're directing the RESET instead of the firmware
4852                  * doing it automatically, we need to clear the PCIE_FW.HALT
4853                  * bit.
4854                  */
4855                 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
4856
4857                 /*
4858                  * If we've been given a valid mailbox, first try to get the
4859                  * firmware to do the RESET.  If that works, great and we can
4860                  * return success.  Otherwise, if we haven't been given a
4861                  * valid mailbox or the RESET command failed, fall back to
4862                  * hitting the chip with a hammer.
4863                  */
4864                 if (mbox <= PCIE_FW_MASTER_M) {
4865                         t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
4866                         msleep(100);
4867                         if (t4_fw_reset(adap, mbox,
4868                                         PIORST_F | PIORSTMODE_F) == 0)
4869                                 return 0;
4870                 }
4871
4872                 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
4873                 msleep(2000);
4874         } else {
4875                 int ms;
4876
4877                 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
4878                 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4879                         if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
4880                                 return 0;
4881                         msleep(100);
4882                         ms += 100;
4883                 }
4884                 return -ETIMEDOUT;
4885         }
4886         return 0;
4887 }
4888
4889 /**
4890  *      t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4891  *      @adap: the adapter
4892  *      @mbox: mailbox to use for the FW RESET command (if desired)
4893  *      @fw_data: the firmware image to write
4894  *      @size: image size
4895  *      @force: force upgrade even if firmware doesn't cooperate
4896  *
4897  *      Perform all of the steps necessary for upgrading an adapter's
4898  *      firmware image.  Normally this requires the cooperation of the
4899  *      existing firmware in order to halt all existing activities
4900  *      but if an invalid mailbox token is passed in we skip that step
4901  *      (though we'll still put the adapter microprocessor into RESET in
4902  *      that case).
4903  *
4904  *      On successful return the new firmware will have been loaded and
4905  *      the adapter will have been fully RESET losing all previous setup
4906  *      state.  On unsuccessful return the adapter may be completely hosed ...
4907  *      positive errno indicates that the adapter is ~probably~ intact, a
4908  *      negative errno indicates that things are looking bad ...
4909  */
4910 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4911                   const u8 *fw_data, unsigned int size, int force)
4912 {
4913         const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4914         int reset, ret;
4915
4916         if (!t4_fw_matches_chip(adap, fw_hdr))
4917                 return -EINVAL;
4918
4919         ret = t4_fw_halt(adap, mbox, force);
4920         if (ret < 0 && !force)
4921                 return ret;
4922
4923         ret = t4_load_fw(adap, fw_data, size);
4924         if (ret < 0)
4925                 return ret;
4926
4927         /*
4928          * Older versions of the firmware don't understand the new
4929          * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4930          * restart.  So for newly loaded older firmware we'll have to do the
4931          * RESET for it so it starts up on a clean slate.  We can tell if
4932          * the newly loaded firmware will handle this right by checking
4933          * its header flags to see if it advertises the capability.
4934          */
4935         reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4936         return t4_fw_restart(adap, mbox, reset);
4937 }
4938
4939 /**
4940  *      t4_fixup_host_params - fix up host-dependent parameters
4941  *      @adap: the adapter
4942  *      @page_size: the host's Base Page Size
4943  *      @cache_line_size: the host's Cache Line Size
4944  *
4945  *      Various registers in T4 contain values which are dependent on the
4946  *      host's Base Page and Cache Line Sizes.  This function will fix all of
4947  *      those registers with the appropriate values as passed in ...
4948  */
4949 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
4950                          unsigned int cache_line_size)
4951 {
4952         unsigned int page_shift = fls(page_size) - 1;
4953         unsigned int sge_hps = page_shift - 10;
4954         unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
4955         unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
4956         unsigned int fl_align_log = fls(fl_align) - 1;
4957
4958         t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
4959                      HOSTPAGESIZEPF0_V(sge_hps) |
4960                      HOSTPAGESIZEPF1_V(sge_hps) |
4961                      HOSTPAGESIZEPF2_V(sge_hps) |
4962                      HOSTPAGESIZEPF3_V(sge_hps) |
4963                      HOSTPAGESIZEPF4_V(sge_hps) |
4964                      HOSTPAGESIZEPF5_V(sge_hps) |
4965                      HOSTPAGESIZEPF6_V(sge_hps) |
4966                      HOSTPAGESIZEPF7_V(sge_hps));
4967
4968         if (is_t4(adap->params.chip)) {
4969                 t4_set_reg_field(adap, SGE_CONTROL_A,
4970                                  INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
4971                                  EGRSTATUSPAGESIZE_F,
4972                                  INGPADBOUNDARY_V(fl_align_log -
4973                                                   INGPADBOUNDARY_SHIFT_X) |
4974                                  EGRSTATUSPAGESIZE_V(stat_len != 64));
4975         } else {
4976                 /* T5 introduced the separation of the Free List Padding and
4977                  * Packing Boundaries.  Thus, we can select a smaller Padding
4978                  * Boundary to avoid uselessly chewing up PCIe Link and Memory
4979                  * Bandwidth, and use a Packing Boundary which is large enough
4980                  * to avoid false sharing between CPUs, etc.
4981                  *
4982                  * For the PCI Link, the smaller the Padding Boundary the
4983                  * better.  For the Memory Controller, a smaller Padding
4984                  * Boundary is better until we cross under the Memory Line
4985                  * Size (the minimum unit of transfer to/from Memory).  If we
4986                  * have a Padding Boundary which is smaller than the Memory
4987                  * Line Size, that'll involve a Read-Modify-Write cycle on the
4988                  * Memory Controller which is never good.  For T5 the smallest
4989                  * Padding Boundary which we can select is 32 bytes which is
4990                  * larger than any known Memory Controller Line Size so we'll
4991                  * use that.
4992                  *
4993                  * T5 has a different interpretation of the "0" value for the
4994                  * Packing Boundary.  This corresponds to 16 bytes instead of
4995                  * the expected 32 bytes.  We never have a Packing Boundary
4996                  * less than 32 bytes so we can't use that special value but
4997                  * on the other hand, if we wanted 32 bytes, the best we can
4998                  * really do is 64 bytes.
4999                 */
5000                 if (fl_align <= 32) {
5001                         fl_align = 64;
5002                         fl_align_log = 6;
5003                 }
5004                 t4_set_reg_field(adap, SGE_CONTROL_A,
5005                                  INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
5006                                  EGRSTATUSPAGESIZE_F,
5007                                  INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
5008                                  EGRSTATUSPAGESIZE_V(stat_len != 64));
5009                 t4_set_reg_field(adap, SGE_CONTROL2_A,
5010                                  INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
5011                                  INGPACKBOUNDARY_V(fl_align_log -
5012                                                    INGPACKBOUNDARY_SHIFT_X));
5013         }
5014         /*
5015          * Adjust various SGE Free List Host Buffer Sizes.
5016          *
5017          * This is something of a crock since we're using fixed indices into
5018          * the array which are also known by the sge.c code and the T4
5019          * Firmware Configuration File.  We need to come up with a much better
5020          * approach to managing this array.  For now, the first four entries
5021          * are:
5022          *
5023          *   0: Host Page Size
5024          *   1: 64KB
5025          *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
5026          *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
5027          *
5028          * For the single-MTU buffers in unpacked mode we need to include
5029          * space for the SGE Control Packet Shift, 14 byte Ethernet header,
5030          * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
5031          * Padding boundary.  All of these are accommodated in the Factory
5032          * Default Firmware Configuration File but we need to adjust it for
5033          * this host's cache line size.
5034          */
5035         t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
5036         t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
5037                      (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
5038                      & ~(fl_align-1));
5039         t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
5040                      (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
5041                      & ~(fl_align-1));
5042
5043         t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
5044
5045         return 0;
5046 }
5047
5048 /**
5049  *      t4_fw_initialize - ask FW to initialize the device
5050  *      @adap: the adapter
5051  *      @mbox: mailbox to use for the FW command
5052  *
5053  *      Issues a command to FW to partially initialize the device.  This
5054  *      performs initialization that generally doesn't depend on user input.
5055  */
5056 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
5057 {
5058         struct fw_initialize_cmd c;
5059
5060         memset(&c, 0, sizeof(c));
5061         INIT_CMD(c, INITIALIZE, WRITE);
5062         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5063 }
5064
5065 /**
5066  *      t4_query_params_rw - query FW or device parameters
5067  *      @adap: the adapter
5068  *      @mbox: mailbox to use for the FW command
5069  *      @pf: the PF
5070  *      @vf: the VF
5071  *      @nparams: the number of parameters
5072  *      @params: the parameter names
5073  *      @val: the parameter values
5074  *      @rw: Write and read flag
5075  *
5076  *      Reads the value of FW or device parameters.  Up to 7 parameters can be
5077  *      queried at once.
5078  */
5079 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
5080                        unsigned int vf, unsigned int nparams, const u32 *params,
5081                        u32 *val, int rw)
5082 {
5083         int i, ret;
5084         struct fw_params_cmd c;
5085         __be32 *p = &c.param[0].mnem;
5086
5087         if (nparams > 7)
5088                 return -EINVAL;
5089
5090         memset(&c, 0, sizeof(c));
5091         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
5092                                   FW_CMD_REQUEST_F | FW_CMD_READ_F |
5093                                   FW_PARAMS_CMD_PFN_V(pf) |
5094                                   FW_PARAMS_CMD_VFN_V(vf));
5095         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5096
5097         for (i = 0; i < nparams; i++) {
5098                 *p++ = cpu_to_be32(*params++);
5099                 if (rw)
5100                         *p = cpu_to_be32(*(val + i));
5101                 p++;
5102         }
5103
5104         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5105         if (ret == 0)
5106                 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
5107                         *val++ = be32_to_cpu(*p);
5108         return ret;
5109 }
5110
5111 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
5112                     unsigned int vf, unsigned int nparams, const u32 *params,
5113                     u32 *val)
5114 {
5115         return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
5116 }
5117
5118 /**
5119  *      t4_set_params_timeout - sets FW or device parameters
5120  *      @adap: the adapter
5121  *      @mbox: mailbox to use for the FW command
5122  *      @pf: the PF
5123  *      @vf: the VF
5124  *      @nparams: the number of parameters
5125  *      @params: the parameter names
5126  *      @val: the parameter values
5127  *      @timeout: the timeout time
5128  *
5129  *      Sets the value of FW or device parameters.  Up to 7 parameters can be
5130  *      specified at once.
5131  */
5132 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
5133                           unsigned int pf, unsigned int vf,
5134                           unsigned int nparams, const u32 *params,
5135                           const u32 *val, int timeout)
5136 {
5137         struct fw_params_cmd c;
5138         __be32 *p = &c.param[0].mnem;
5139
5140         if (nparams > 7)
5141                 return -EINVAL;
5142
5143         memset(&c, 0, sizeof(c));
5144         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
5145                                   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5146                                   FW_PARAMS_CMD_PFN_V(pf) |
5147                                   FW_PARAMS_CMD_VFN_V(vf));
5148         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5149
5150         while (nparams--) {
5151                 *p++ = cpu_to_be32(*params++);
5152                 *p++ = cpu_to_be32(*val++);
5153         }
5154
5155         return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
5156 }
5157
5158 /**
5159  *      t4_set_params - sets FW or device parameters
5160  *      @adap: the adapter
5161  *      @mbox: mailbox to use for the FW command
5162  *      @pf: the PF
5163  *      @vf: the VF
5164  *      @nparams: the number of parameters
5165  *      @params: the parameter names
5166  *      @val: the parameter values
5167  *
5168  *      Sets the value of FW or device parameters.  Up to 7 parameters can be
5169  *      specified at once.
5170  */
5171 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
5172                   unsigned int vf, unsigned int nparams, const u32 *params,
5173                   const u32 *val)
5174 {
5175         return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
5176                                      FW_CMD_MAX_TIMEOUT);
5177 }
5178
5179 /**
5180  *      t4_cfg_pfvf - configure PF/VF resource limits
5181  *      @adap: the adapter
5182  *      @mbox: mailbox to use for the FW command
5183  *      @pf: the PF being configured
5184  *      @vf: the VF being configured
5185  *      @txq: the max number of egress queues
5186  *      @txq_eth_ctrl: the max number of egress Ethernet or control queues
5187  *      @rxqi: the max number of interrupt-capable ingress queues
5188  *      @rxq: the max number of interruptless ingress queues
5189  *      @tc: the PCI traffic class
5190  *      @vi: the max number of virtual interfaces
5191  *      @cmask: the channel access rights mask for the PF/VF
5192  *      @pmask: the port access rights mask for the PF/VF
5193  *      @nexact: the maximum number of exact MPS filters
5194  *      @rcaps: read capabilities
5195  *      @wxcaps: write/execute capabilities
5196  *
5197  *      Configures resource limits and capabilities for a physical or virtual
5198  *      function.
5199  */
5200 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
5201                 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
5202                 unsigned int rxqi, unsigned int rxq, unsigned int tc,
5203                 unsigned int vi, unsigned int cmask, unsigned int pmask,
5204                 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
5205 {
5206         struct fw_pfvf_cmd c;
5207
5208         memset(&c, 0, sizeof(c));
5209         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
5210                                   FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
5211                                   FW_PFVF_CMD_VFN_V(vf));
5212         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5213         c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
5214                                      FW_PFVF_CMD_NIQ_V(rxq));
5215         c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
5216                                     FW_PFVF_CMD_PMASK_V(pmask) |
5217                                     FW_PFVF_CMD_NEQ_V(txq));
5218         c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
5219                                       FW_PFVF_CMD_NVI_V(vi) |
5220                                       FW_PFVF_CMD_NEXACTF_V(nexact));
5221         c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
5222                                         FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
5223                                         FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
5224         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5225 }
5226
5227 /**
5228  *      t4_alloc_vi - allocate a virtual interface
5229  *      @adap: the adapter
5230  *      @mbox: mailbox to use for the FW command
5231  *      @port: physical port associated with the VI
5232  *      @pf: the PF owning the VI
5233  *      @vf: the VF owning the VI
5234  *      @nmac: number of MAC addresses needed (1 to 5)
5235  *      @mac: the MAC addresses of the VI
5236  *      @rss_size: size of RSS table slice associated with this VI
5237  *
5238  *      Allocates a virtual interface for the given physical port.  If @mac is
5239  *      not %NULL it contains the MAC addresses of the VI as assigned by FW.
5240  *      @mac should be large enough to hold @nmac Ethernet addresses, they are
5241  *      stored consecutively so the space needed is @nmac * 6 bytes.
5242  *      Returns a negative error number or the non-negative VI id.
5243  */
5244 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
5245                 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
5246                 unsigned int *rss_size)
5247 {
5248         int ret;
5249         struct fw_vi_cmd c;
5250
5251         memset(&c, 0, sizeof(c));
5252         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
5253                                   FW_CMD_WRITE_F | FW_CMD_EXEC_F |
5254                                   FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
5255         c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
5256         c.portid_pkd = FW_VI_CMD_PORTID_V(port);
5257         c.nmac = nmac - 1;
5258
5259         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5260         if (ret)
5261                 return ret;
5262
5263         if (mac) {
5264                 memcpy(mac, c.mac, sizeof(c.mac));
5265                 switch (nmac) {
5266                 case 5:
5267                         memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
5268                 case 4:
5269                         memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
5270                 case 3:
5271                         memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
5272                 case 2:
5273                         memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
5274                 }
5275         }
5276         if (rss_size)
5277                 *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
5278         return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
5279 }
5280
5281 /**
5282  *      t4_free_vi - free a virtual interface
5283  *      @adap: the adapter
5284  *      @mbox: mailbox to use for the FW command
5285  *      @pf: the PF owning the VI
5286  *      @vf: the VF owning the VI
5287  *      @viid: virtual interface identifiler
5288  *
5289  *      Free a previously allocated virtual interface.
5290  */
5291 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
5292                unsigned int vf, unsigned int viid)
5293 {
5294         struct fw_vi_cmd c;
5295
5296         memset(&c, 0, sizeof(c));
5297         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
5298                                   FW_CMD_REQUEST_F |
5299                                   FW_CMD_EXEC_F |
5300                                   FW_VI_CMD_PFN_V(pf) |
5301                                   FW_VI_CMD_VFN_V(vf));
5302         c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
5303         c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
5304
5305         return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5306 }
5307
5308 /**
5309  *      t4_set_rxmode - set Rx properties of a virtual interface
5310  *      @adap: the adapter
5311  *      @mbox: mailbox to use for the FW command
5312  *      @viid: the VI id
5313  *      @mtu: the new MTU or -1
5314  *      @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
5315  *      @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
5316  *      @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
5317  *      @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
5318  *      @sleep_ok: if true we may sleep while awaiting command completion
5319  *
5320  *      Sets Rx properties of a virtual interface.
5321  */
5322 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
5323                   int mtu, int promisc, int all_multi, int bcast, int vlanex,
5324                   bool sleep_ok)
5325 {
5326         struct fw_vi_rxmode_cmd c;
5327
5328         /* convert to FW values */
5329         if (mtu < 0)
5330                 mtu = FW_RXMODE_MTU_NO_CHG;
5331         if (promisc < 0)
5332                 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
5333         if (all_multi < 0)
5334                 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
5335         if (bcast < 0)
5336                 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
5337         if (vlanex < 0)
5338                 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
5339
5340         memset(&c, 0, sizeof(c));
5341         c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
5342                                    FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5343                                    FW_VI_RXMODE_CMD_VIID_V(viid));
5344         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5345         c.mtu_to_vlanexen =
5346                 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
5347                             FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
5348                             FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
5349                             FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
5350                             FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
5351         return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5352 }
5353
5354 /**
5355  *      t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
5356  *      @adap: the adapter
5357  *      @mbox: mailbox to use for the FW command
5358  *      @viid: the VI id
5359  *      @free: if true any existing filters for this VI id are first removed
5360  *      @naddr: the number of MAC addresses to allocate filters for (up to 7)
5361  *      @addr: the MAC address(es)
5362  *      @idx: where to store the index of each allocated filter
5363  *      @hash: pointer to hash address filter bitmap
5364  *      @sleep_ok: call is allowed to sleep
5365  *
5366  *      Allocates an exact-match filter for each of the supplied addresses and
5367  *      sets it to the corresponding address.  If @idx is not %NULL it should
5368  *      have at least @naddr entries, each of which will be set to the index of
5369  *      the filter allocated for the corresponding MAC address.  If a filter
5370  *      could not be allocated for an address its index is set to 0xffff.
5371  *      If @hash is not %NULL addresses that fail to allocate an exact filter
5372  *      are hashed and update the hash filter bitmap pointed at by @hash.
5373  *
5374  *      Returns a negative error number or the number of filters allocated.
5375  */
5376 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
5377                       unsigned int viid, bool free, unsigned int naddr,
5378                       const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
5379 {
5380         int offset, ret = 0;
5381         struct fw_vi_mac_cmd c;
5382         unsigned int nfilters = 0;
5383         unsigned int max_naddr = adap->params.arch.mps_tcam_size;
5384         unsigned int rem = naddr;
5385
5386         if (naddr > max_naddr)
5387                 return -EINVAL;
5388
5389         for (offset = 0; offset < naddr ; /**/) {
5390                 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
5391                                          rem : ARRAY_SIZE(c.u.exact));
5392                 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
5393                                                      u.exact[fw_naddr]), 16);
5394                 struct fw_vi_mac_exact *p;
5395                 int i;
5396
5397                 memset(&c, 0, sizeof(c));
5398                 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
5399                                            FW_CMD_REQUEST_F |
5400                                            FW_CMD_WRITE_F |
5401                                            FW_CMD_EXEC_V(free) |
5402                                            FW_VI_MAC_CMD_VIID_V(viid));
5403                 c.freemacs_to_len16 =
5404                         cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
5405                                     FW_CMD_LEN16_V(len16));
5406
5407                 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5408                         p->valid_to_idx =
5409                                 cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
5410                                             FW_VI_MAC_CMD_IDX_V(
5411                                                     FW_VI_MAC_ADD_MAC));
5412                         memcpy(p->macaddr, addr[offset + i],
5413                                sizeof(p->macaddr));
5414                 }
5415
5416                 /* It's okay if we run out of space in our MAC address arena.
5417                  * Some of the addresses we submit may get stored so we need
5418                  * to run through the reply to see what the results were ...
5419                  */
5420                 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
5421                 if (ret && ret != -FW_ENOMEM)
5422                         break;
5423
5424                 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5425                         u16 index = FW_VI_MAC_CMD_IDX_G(
5426                                         be16_to_cpu(p->valid_to_idx));
5427
5428                         if (idx)
5429                                 idx[offset + i] = (index >= max_naddr ?
5430                                                    0xffff : index);
5431                         if (index < max_naddr)
5432                                 nfilters++;
5433                         else if (hash)
5434                                 *hash |= (1ULL <<
5435                                           hash_mac_addr(addr[offset + i]));
5436                 }
5437
5438                 free = false;
5439                 offset += fw_naddr;
5440                 rem -= fw_naddr;
5441         }
5442
5443         if (ret == 0 || ret == -FW_ENOMEM)
5444                 ret = nfilters;
5445         return ret;
5446 }
5447
5448 /**
5449  *      t4_change_mac - modifies the exact-match filter for a MAC address
5450  *      @adap: the adapter
5451  *      @mbox: mailbox to use for the FW command
5452  *      @viid: the VI id
5453  *      @idx: index of existing filter for old value of MAC address, or -1
5454  *      @addr: the new MAC address value
5455  *      @persist: whether a new MAC allocation should be persistent
5456  *      @add_smt: if true also add the address to the HW SMT
5457  *
5458  *      Modifies an exact-match filter and sets it to the new MAC address.
5459  *      Note that in general it is not possible to modify the value of a given
5460  *      filter so the generic way to modify an address filter is to free the one
5461  *      being used by the old address value and allocate a new filter for the
5462  *      new address value.  @idx can be -1 if the address is a new addition.
5463  *
5464  *      Returns a negative error number or the index of the filter with the new
5465  *      MAC value.
5466  */
5467 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
5468                   int idx, const u8 *addr, bool persist, bool add_smt)
5469 {
5470         int ret, mode;
5471         struct fw_vi_mac_cmd c;
5472         struct fw_vi_mac_exact *p = c.u.exact;
5473         unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
5474
5475         if (idx < 0)                             /* new allocation */
5476                 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
5477         mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
5478
5479         memset(&c, 0, sizeof(c));
5480         c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
5481                                    FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5482                                    FW_VI_MAC_CMD_VIID_V(viid));
5483         c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
5484         p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
5485                                       FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
5486                                       FW_VI_MAC_CMD_IDX_V(idx));
5487         memcpy(p->macaddr, addr, sizeof(p->macaddr));
5488
5489         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5490         if (ret == 0) {
5491                 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
5492                 if (ret >= max_mac_addr)
5493                         ret = -ENOMEM;
5494         }
5495         return ret;
5496 }
5497
5498 /**
5499  *      t4_set_addr_hash - program the MAC inexact-match hash filter
5500  *      @adap: the adapter
5501  *      @mbox: mailbox to use for the FW command
5502  *      @viid: the VI id
5503  *      @ucast: whether the hash filter should also match unicast addresses
5504  *      @vec: the value to be written to the hash filter
5505  *      @sleep_ok: call is allowed to sleep
5506  *
5507  *      Sets the 64-bit inexact-match hash filter for a virtual interface.
5508  */
5509 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
5510                      bool ucast, u64 vec, bool sleep_ok)
5511 {
5512         struct fw_vi_mac_cmd c;
5513
5514         memset(&c, 0, sizeof(c));
5515         c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
5516                                    FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5517                                    FW_VI_ENABLE_CMD_VIID_V(viid));
5518         c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
5519                                           FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
5520                                           FW_CMD_LEN16_V(1));
5521         c.u.hash.hashvec = cpu_to_be64(vec);
5522         return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5523 }
5524
5525 /**
5526  *      t4_enable_vi_params - enable/disable a virtual interface
5527  *      @adap: the adapter
5528  *      @mbox: mailbox to use for the FW command
5529  *      @viid: the VI id
5530  *      @rx_en: 1=enable Rx, 0=disable Rx
5531  *      @tx_en: 1=enable Tx, 0=disable Tx
5532  *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
5533  *
5534  *      Enables/disables a virtual interface.  Note that setting DCB Enable
5535  *      only makes sense when enabling a Virtual Interface ...
5536  */
5537 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
5538                         unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
5539 {
5540         struct fw_vi_enable_cmd c;
5541
5542         memset(&c, 0, sizeof(c));
5543         c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
5544                                    FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5545                                    FW_VI_ENABLE_CMD_VIID_V(viid));
5546         c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
5547                                      FW_VI_ENABLE_CMD_EEN_V(tx_en) |
5548                                      FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
5549                                      FW_LEN16(c));
5550         return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
5551 }
5552
5553 /**
5554  *      t4_enable_vi - enable/disable a virtual interface
5555  *      @adap: the adapter
5556  *      @mbox: mailbox to use for the FW command
5557  *      @viid: the VI id
5558  *      @rx_en: 1=enable Rx, 0=disable Rx
5559  *      @tx_en: 1=enable Tx, 0=disable Tx
5560  *
5561  *      Enables/disables a virtual interface.
5562  */
5563 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
5564                  bool rx_en, bool tx_en)
5565 {
5566         return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
5567 }
5568
5569 /**
5570  *      t4_identify_port - identify a VI's port by blinking its LED
5571  *      @adap: the adapter
5572  *      @mbox: mailbox to use for the FW command
5573  *      @viid: the VI id
5574  *      @nblinks: how many times to blink LED at 2.5 Hz
5575  *
5576  *      Identifies a VI's port by blinking its LED.
5577  */
5578 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
5579                      unsigned int nblinks)
5580 {
5581         struct fw_vi_enable_cmd c;
5582
5583         memset(&c, 0, sizeof(c));
5584         c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
5585                                    FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5586                                    FW_VI_ENABLE_CMD_VIID_V(viid));
5587         c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
5588         c.blinkdur = cpu_to_be16(nblinks);
5589         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5590 }
5591
5592 /**
5593  *      t4_iq_free - free an ingress queue and its FLs
5594  *      @adap: the adapter
5595  *      @mbox: mailbox to use for the FW command
5596  *      @pf: the PF owning the queues
5597  *      @vf: the VF owning the queues
5598  *      @iqtype: the ingress queue type
5599  *      @iqid: ingress queue id
5600  *      @fl0id: FL0 queue id or 0xffff if no attached FL0
5601  *      @fl1id: FL1 queue id or 0xffff if no attached FL1
5602  *
5603  *      Frees an ingress queue and its associated FLs, if any.
5604  */
5605 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5606                unsigned int vf, unsigned int iqtype, unsigned int iqid,
5607                unsigned int fl0id, unsigned int fl1id)
5608 {
5609         struct fw_iq_cmd c;
5610
5611         memset(&c, 0, sizeof(c));
5612         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
5613                                   FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
5614                                   FW_IQ_CMD_VFN_V(vf));
5615         c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
5616         c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
5617         c.iqid = cpu_to_be16(iqid);
5618         c.fl0id = cpu_to_be16(fl0id);
5619         c.fl1id = cpu_to_be16(fl1id);
5620         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5621 }
5622
5623 /**
5624  *      t4_eth_eq_free - free an Ethernet egress queue
5625  *      @adap: the adapter
5626  *      @mbox: mailbox to use for the FW command
5627  *      @pf: the PF owning the queue
5628  *      @vf: the VF owning the queue
5629  *      @eqid: egress queue id
5630  *
5631  *      Frees an Ethernet egress queue.
5632  */
5633 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5634                    unsigned int vf, unsigned int eqid)
5635 {
5636         struct fw_eq_eth_cmd c;
5637
5638         memset(&c, 0, sizeof(c));
5639         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
5640                                   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5641                                   FW_EQ_ETH_CMD_PFN_V(pf) |
5642                                   FW_EQ_ETH_CMD_VFN_V(vf));
5643         c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
5644         c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
5645         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5646 }
5647
5648 /**
5649  *      t4_ctrl_eq_free - free a control egress queue
5650  *      @adap: the adapter
5651  *      @mbox: mailbox to use for the FW command
5652  *      @pf: the PF owning the queue
5653  *      @vf: the VF owning the queue
5654  *      @eqid: egress queue id
5655  *
5656  *      Frees a control egress queue.
5657  */
5658 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5659                     unsigned int vf, unsigned int eqid)
5660 {
5661         struct fw_eq_ctrl_cmd c;
5662
5663         memset(&c, 0, sizeof(c));
5664         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
5665                                   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5666                                   FW_EQ_CTRL_CMD_PFN_V(pf) |
5667                                   FW_EQ_CTRL_CMD_VFN_V(vf));
5668         c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
5669         c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
5670         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5671 }
5672
5673 /**
5674  *      t4_ofld_eq_free - free an offload egress queue
5675  *      @adap: the adapter
5676  *      @mbox: mailbox to use for the FW command
5677  *      @pf: the PF owning the queue
5678  *      @vf: the VF owning the queue
5679  *      @eqid: egress queue id
5680  *
5681  *      Frees a control egress queue.
5682  */
5683 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5684                     unsigned int vf, unsigned int eqid)
5685 {
5686         struct fw_eq_ofld_cmd c;
5687
5688         memset(&c, 0, sizeof(c));
5689         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
5690                                   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
5691                                   FW_EQ_OFLD_CMD_PFN_V(pf) |
5692                                   FW_EQ_OFLD_CMD_VFN_V(vf));
5693         c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
5694         c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
5695         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5696 }
5697
5698 /**
5699  *      t4_handle_fw_rpl - process a FW reply message
5700  *      @adap: the adapter
5701  *      @rpl: start of the FW message
5702  *
5703  *      Processes a FW message, such as link state change messages.
5704  */
5705 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5706 {
5707         u8 opcode = *(const u8 *)rpl;
5708
5709         if (opcode == FW_PORT_CMD) {    /* link/module state change message */
5710                 int speed = 0, fc = 0;
5711                 const struct fw_port_cmd *p = (void *)rpl;
5712                 int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
5713                 int port = adap->chan_map[chan];
5714                 struct port_info *pi = adap2pinfo(adap, port);
5715                 struct link_config *lc = &pi->link_cfg;
5716                 u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
5717                 int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
5718                 u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
5719
5720                 if (stat & FW_PORT_CMD_RXPAUSE_F)
5721                         fc |= PAUSE_RX;
5722                 if (stat & FW_PORT_CMD_TXPAUSE_F)
5723                         fc |= PAUSE_TX;
5724                 if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
5725                         speed = 100;
5726                 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
5727                         speed = 1000;
5728                 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
5729                         speed = 10000;
5730                 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
5731                         speed = 40000;
5732
5733                 if (link_ok != lc->link_ok || speed != lc->speed ||
5734                     fc != lc->fc) {                    /* something changed */
5735                         lc->link_ok = link_ok;
5736                         lc->speed = speed;
5737                         lc->fc = fc;
5738                         lc->supported = be16_to_cpu(p->u.info.pcap);
5739                         t4_os_link_changed(adap, port, link_ok);
5740                 }
5741                 if (mod != pi->mod_type) {
5742                         pi->mod_type = mod;
5743                         t4_os_portmod_changed(adap, port);
5744                 }
5745         }
5746         return 0;
5747 }
5748
5749 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
5750 {
5751         u16 val;
5752
5753         if (pci_is_pcie(adapter->pdev)) {
5754                 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
5755                 p->speed = val & PCI_EXP_LNKSTA_CLS;
5756                 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5757         }
5758 }
5759
5760 /**
5761  *      init_link_config - initialize a link's SW state
5762  *      @lc: structure holding the link state
5763  *      @caps: link capabilities
5764  *
5765  *      Initializes the SW state maintained for each link, including the link's
5766  *      capabilities and default speed/flow-control/autonegotiation settings.
5767  */
5768 static void init_link_config(struct link_config *lc, unsigned int caps)
5769 {
5770         lc->supported = caps;
5771         lc->requested_speed = 0;
5772         lc->speed = 0;
5773         lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5774         if (lc->supported & FW_PORT_CAP_ANEG) {
5775                 lc->advertising = lc->supported & ADVERT_MASK;
5776                 lc->autoneg = AUTONEG_ENABLE;
5777                 lc->requested_fc |= PAUSE_AUTONEG;
5778         } else {
5779                 lc->advertising = 0;
5780                 lc->autoneg = AUTONEG_DISABLE;
5781         }
5782 }
5783
5784 #define CIM_PF_NOACCESS 0xeeeeeeee
5785
5786 int t4_wait_dev_ready(void __iomem *regs)
5787 {
5788         u32 whoami;
5789
5790         whoami = readl(regs + PL_WHOAMI_A);
5791         if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
5792                 return 0;
5793
5794         msleep(500);
5795         whoami = readl(regs + PL_WHOAMI_A);
5796         return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
5797 }
5798
5799 struct flash_desc {
5800         u32 vendor_and_model_id;
5801         u32 size_mb;
5802 };
5803
5804 static int get_flash_params(struct adapter *adap)
5805 {
5806         /* Table for non-Numonix supported flash parts.  Numonix parts are left
5807          * to the preexisting code.  All flash parts have 64KB sectors.
5808          */
5809         static struct flash_desc supported_flash[] = {
5810                 { 0x150201, 4 << 20 },       /* Spansion 4MB S25FL032P */
5811         };
5812
5813         int ret;
5814         u32 info;
5815
5816         ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
5817         if (!ret)
5818                 ret = sf1_read(adap, 3, 0, 1, &info);
5819         t4_write_reg(adap, SF_OP_A, 0);                    /* unlock SF */
5820         if (ret)
5821                 return ret;
5822
5823         for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
5824                 if (supported_flash[ret].vendor_and_model_id == info) {
5825                         adap->params.sf_size = supported_flash[ret].size_mb;
5826                         adap->params.sf_nsec =
5827                                 adap->params.sf_size / SF_SEC_SIZE;
5828                         return 0;
5829                 }
5830
5831         if ((info & 0xff) != 0x20)             /* not a Numonix flash */
5832                 return -EINVAL;
5833         info >>= 16;                           /* log2 of size */
5834         if (info >= 0x14 && info < 0x18)
5835                 adap->params.sf_nsec = 1 << (info - 16);
5836         else if (info == 0x18)
5837                 adap->params.sf_nsec = 64;
5838         else
5839                 return -EINVAL;
5840         adap->params.sf_size = 1 << info;
5841         adap->params.sf_fw_start =
5842                 t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
5843
5844         if (adap->params.sf_size < FLASH_MIN_SIZE)
5845                 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
5846                          adap->params.sf_size, FLASH_MIN_SIZE);
5847         return 0;
5848 }
5849
5850 /**
5851  *      t4_prep_adapter - prepare SW and HW for operation
5852  *      @adapter: the adapter
5853  *      @reset: if true perform a HW reset
5854  *
5855  *      Initialize adapter SW state for the various HW modules, set initial
5856  *      values for some adapter tunables, take PHYs out of reset, and
5857  *      initialize the MDIO interface.
5858  */
5859 int t4_prep_adapter(struct adapter *adapter)
5860 {
5861         int ret, ver;
5862         uint16_t device_id;
5863         u32 pl_rev;
5864
5865         get_pci_mode(adapter, &adapter->params.pci);
5866         pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
5867
5868         ret = get_flash_params(adapter);
5869         if (ret < 0) {
5870                 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
5871                 return ret;
5872         }
5873
5874         /* Retrieve adapter's device ID
5875          */
5876         pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
5877         ver = device_id >> 12;
5878         adapter->params.chip = 0;
5879         switch (ver) {
5880         case CHELSIO_T4:
5881                 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
5882                 adapter->params.arch.sge_fl_db = DBPRIO_F;
5883                 adapter->params.arch.mps_tcam_size =
5884                                  NUM_MPS_CLS_SRAM_L_INSTANCES;
5885                 adapter->params.arch.mps_rplc_size = 128;
5886                 adapter->params.arch.nchan = NCHAN;
5887                 adapter->params.arch.vfcount = 128;
5888                 break;
5889         case CHELSIO_T5:
5890                 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
5891                 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
5892                 adapter->params.arch.mps_tcam_size =
5893                                  NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5894                 adapter->params.arch.mps_rplc_size = 128;
5895                 adapter->params.arch.nchan = NCHAN;
5896                 adapter->params.arch.vfcount = 128;
5897                 break;
5898         case CHELSIO_T6:
5899                 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
5900                 adapter->params.arch.sge_fl_db = 0;
5901                 adapter->params.arch.mps_tcam_size =
5902                                  NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5903                 adapter->params.arch.mps_rplc_size = 256;
5904                 adapter->params.arch.nchan = 2;
5905                 adapter->params.arch.vfcount = 256;
5906                 break;
5907         default:
5908                 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
5909                         device_id);
5910                 return -EINVAL;
5911         }
5912
5913         adapter->params.cim_la_size = CIMLA_SIZE;
5914         init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5915
5916         /*
5917          * Default port for debugging in case we can't reach FW.
5918          */
5919         adapter->params.nports = 1;
5920         adapter->params.portvec = 1;
5921         adapter->params.vpd.cclk = 50000;
5922         return 0;
5923 }
5924
5925 /**
5926  *      t4_bar2_sge_qregs - return BAR2 SGE Queue register information
5927  *      @adapter: the adapter
5928  *      @qid: the Queue ID
5929  *      @qtype: the Ingress or Egress type for @qid
5930  *      @pbar2_qoffset: BAR2 Queue Offset
5931  *      @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
5932  *
5933  *      Returns the BAR2 SGE Queue Registers information associated with the
5934  *      indicated Absolute Queue ID.  These are passed back in return value
5935  *      pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
5936  *      and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
5937  *
5938  *      This may return an error which indicates that BAR2 SGE Queue
5939  *      registers aren't available.  If an error is not returned, then the
5940  *      following values are returned:
5941  *
5942  *        *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
5943  *        *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
5944  *
5945  *      If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
5946  *      require the "Inferred Queue ID" ability may be used.  E.g. the
5947  *      Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
5948  *      then these "Inferred Queue ID" register may not be used.
5949  */
5950 int t4_bar2_sge_qregs(struct adapter *adapter,
5951                       unsigned int qid,
5952                       enum t4_bar2_qtype qtype,
5953                       u64 *pbar2_qoffset,
5954                       unsigned int *pbar2_qid)
5955 {
5956         unsigned int page_shift, page_size, qpp_shift, qpp_mask;
5957         u64 bar2_page_offset, bar2_qoffset;
5958         unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
5959
5960         /* T4 doesn't support BAR2 SGE Queue registers.
5961          */
5962         if (is_t4(adapter->params.chip))
5963                 return -EINVAL;
5964
5965         /* Get our SGE Page Size parameters.
5966          */
5967         page_shift = adapter->params.sge.hps + 10;
5968         page_size = 1 << page_shift;
5969
5970         /* Get the right Queues per Page parameters for our Queue.
5971          */
5972         qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
5973                      ? adapter->params.sge.eq_qpp
5974                      : adapter->params.sge.iq_qpp);
5975         qpp_mask = (1 << qpp_shift) - 1;
5976
5977         /*  Calculate the basics of the BAR2 SGE Queue register area:
5978          *  o The BAR2 page the Queue registers will be in.
5979          *  o The BAR2 Queue ID.
5980          *  o The BAR2 Queue ID Offset into the BAR2 page.
5981          */
5982         bar2_page_offset = ((qid >> qpp_shift) << page_shift);
5983         bar2_qid = qid & qpp_mask;
5984         bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
5985
5986         /* If the BAR2 Queue ID Offset is less than the Page Size, then the
5987          * hardware will infer the Absolute Queue ID simply from the writes to
5988          * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
5989          * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
5990          * write to the first BAR2 SGE Queue Area within the BAR2 Page with
5991          * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
5992          * from the BAR2 Page and BAR2 Queue ID.
5993          *
5994          * One important censequence of this is that some BAR2 SGE registers
5995          * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
5996          * there.  But other registers synthesize the SGE Queue ID purely
5997          * from the writes to the registers -- the Write Combined Doorbell
5998          * Buffer is a good example.  These BAR2 SGE Registers are only
5999          * available for those BAR2 SGE Register areas where the SGE Absolute
6000          * Queue ID can be inferred from simple writes.
6001          */
6002         bar2_qoffset = bar2_page_offset;
6003         bar2_qinferred = (bar2_qid_offset < page_size);
6004         if (bar2_qinferred) {
6005                 bar2_qoffset += bar2_qid_offset;
6006                 bar2_qid = 0;
6007         }
6008
6009         *pbar2_qoffset = bar2_qoffset;
6010         *pbar2_qid = bar2_qid;
6011         return 0;
6012 }
6013
6014 /**
6015  *      t4_init_devlog_params - initialize adapter->params.devlog
6016  *      @adap: the adapter
6017  *
6018  *      Initialize various fields of the adapter's Firmware Device Log
6019  *      Parameters structure.
6020  */
6021 int t4_init_devlog_params(struct adapter *adap)
6022 {
6023         struct devlog_params *dparams = &adap->params.devlog;
6024         u32 pf_dparams;
6025         unsigned int devlog_meminfo;
6026         struct fw_devlog_cmd devlog_cmd;
6027         int ret;
6028
6029         /* If we're dealing with newer firmware, the Device Log Paramerters
6030          * are stored in a designated register which allows us to access the
6031          * Device Log even if we can't talk to the firmware.
6032          */
6033         pf_dparams =
6034                 t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
6035         if (pf_dparams) {
6036                 unsigned int nentries, nentries128;
6037
6038                 dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
6039                 dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
6040
6041                 nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
6042                 nentries = (nentries128 + 1) * 128;
6043                 dparams->size = nentries * sizeof(struct fw_devlog_e);
6044
6045                 return 0;
6046         }
6047
6048         /* Otherwise, ask the firmware for it's Device Log Parameters.
6049          */
6050         memset(&devlog_cmd, 0, sizeof(devlog_cmd));
6051         devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
6052                                              FW_CMD_REQUEST_F | FW_CMD_READ_F);
6053         devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
6054         ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
6055                          &devlog_cmd);
6056         if (ret)
6057                 return ret;
6058
6059         devlog_meminfo =
6060                 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
6061         dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
6062         dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
6063         dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
6064
6065         return 0;
6066 }
6067
6068 /**
6069  *      t4_init_sge_params - initialize adap->params.sge
6070  *      @adapter: the adapter
6071  *
6072  *      Initialize various fields of the adapter's SGE Parameters structure.
6073  */
6074 int t4_init_sge_params(struct adapter *adapter)
6075 {
6076         struct sge_params *sge_params = &adapter->params.sge;
6077         u32 hps, qpp;
6078         unsigned int s_hps, s_qpp;
6079
6080         /* Extract the SGE Page Size for our PF.
6081          */
6082         hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
6083         s_hps = (HOSTPAGESIZEPF0_S +
6084                  (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
6085         sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
6086
6087         /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
6088          */
6089         s_qpp = (QUEUESPERPAGEPF0_S +
6090                 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
6091         qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
6092         sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
6093         qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
6094         sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
6095
6096         return 0;
6097 }
6098
6099 /**
6100  *      t4_init_tp_params - initialize adap->params.tp
6101  *      @adap: the adapter
6102  *
6103  *      Initialize various fields of the adapter's TP Parameters structure.
6104  */
6105 int t4_init_tp_params(struct adapter *adap)
6106 {
6107         int chan;
6108         u32 v;
6109
6110         v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
6111         adap->params.tp.tre = TIMERRESOLUTION_G(v);
6112         adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
6113
6114         /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
6115         for (chan = 0; chan < NCHAN; chan++)
6116                 adap->params.tp.tx_modq[chan] = chan;
6117
6118         /* Cache the adapter's Compressed Filter Mode and global Incress
6119          * Configuration.
6120          */
6121         t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
6122                          &adap->params.tp.vlan_pri_map, 1,
6123                          TP_VLAN_PRI_MAP_A);
6124         t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
6125                          &adap->params.tp.ingress_config, 1,
6126                          TP_INGRESS_CONFIG_A);
6127
6128         /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
6129          * shift positions of several elements of the Compressed Filter Tuple
6130          * for this adapter which we need frequently ...
6131          */
6132         adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
6133         adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
6134         adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
6135         adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
6136                                                                PROTOCOL_F);
6137
6138         /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
6139          * represents the presence of an Outer VLAN instead of a VNIC ID.
6140          */
6141         if ((adap->params.tp.ingress_config & VNIC_F) == 0)
6142                 adap->params.tp.vnic_shift = -1;
6143
6144         return 0;
6145 }
6146
6147 /**
6148  *      t4_filter_field_shift - calculate filter field shift
6149  *      @adap: the adapter
6150  *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
6151  *
6152  *      Return the shift position of a filter field within the Compressed
6153  *      Filter Tuple.  The filter field is specified via its selection bit
6154  *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
6155  */
6156 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
6157 {
6158         unsigned int filter_mode = adap->params.tp.vlan_pri_map;
6159         unsigned int sel;
6160         int field_shift;
6161
6162         if ((filter_mode & filter_sel) == 0)
6163                 return -1;
6164
6165         for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
6166                 switch (filter_mode & sel) {
6167                 case FCOE_F:
6168                         field_shift += FT_FCOE_W;
6169                         break;
6170                 case PORT_F:
6171                         field_shift += FT_PORT_W;
6172                         break;
6173                 case VNIC_ID_F:
6174                         field_shift += FT_VNIC_ID_W;
6175                         break;
6176                 case VLAN_F:
6177                         field_shift += FT_VLAN_W;
6178                         break;
6179                 case TOS_F:
6180                         field_shift += FT_TOS_W;
6181                         break;
6182                 case PROTOCOL_F:
6183                         field_shift += FT_PROTOCOL_W;
6184                         break;
6185                 case ETHERTYPE_F:
6186                         field_shift += FT_ETHERTYPE_W;
6187                         break;
6188                 case MACMATCH_F:
6189                         field_shift += FT_MACMATCH_W;
6190                         break;
6191                 case MPSHITTYPE_F:
6192                         field_shift += FT_MPSHITTYPE_W;
6193                         break;
6194                 case FRAGMENTATION_F:
6195                         field_shift += FT_FRAGMENTATION_W;
6196                         break;
6197                 }
6198         }
6199         return field_shift;
6200 }
6201
6202 int t4_init_rss_mode(struct adapter *adap, int mbox)
6203 {
6204         int i, ret;
6205         struct fw_rss_vi_config_cmd rvc;
6206
6207         memset(&rvc, 0, sizeof(rvc));
6208
6209         for_each_port(adap, i) {
6210                 struct port_info *p = adap2pinfo(adap, i);
6211
6212                 rvc.op_to_viid =
6213                         cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
6214                                     FW_CMD_REQUEST_F | FW_CMD_READ_F |
6215                                     FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
6216                 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
6217                 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
6218                 if (ret)
6219                         return ret;
6220                 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
6221         }
6222         return 0;
6223 }
6224
6225 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
6226 {
6227         u8 addr[6];
6228         int ret, i, j = 0;
6229         struct fw_port_cmd c;
6230         struct fw_rss_vi_config_cmd rvc;
6231
6232         memset(&c, 0, sizeof(c));
6233         memset(&rvc, 0, sizeof(rvc));
6234
6235         for_each_port(adap, i) {
6236                 unsigned int rss_size;
6237                 struct port_info *p = adap2pinfo(adap, i);
6238
6239                 while ((adap->params.portvec & (1 << j)) == 0)
6240                         j++;
6241
6242                 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
6243                                              FW_CMD_REQUEST_F | FW_CMD_READ_F |
6244                                              FW_PORT_CMD_PORTID_V(j));
6245                 c.action_to_len16 = cpu_to_be32(
6246                         FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
6247                         FW_LEN16(c));
6248                 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6249                 if (ret)
6250                         return ret;
6251
6252                 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
6253                 if (ret < 0)
6254                         return ret;
6255
6256                 p->viid = ret;
6257                 p->tx_chan = j;
6258                 p->lport = j;
6259                 p->rss_size = rss_size;
6260                 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
6261                 adap->port[i]->dev_port = j;
6262
6263                 ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
6264                 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
6265                         FW_PORT_CMD_MDIOADDR_G(ret) : -1;
6266                 p->port_type = FW_PORT_CMD_PTYPE_G(ret);
6267                 p->mod_type = FW_PORT_MOD_TYPE_NA;
6268
6269                 rvc.op_to_viid =
6270                         cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
6271                                     FW_CMD_REQUEST_F | FW_CMD_READ_F |
6272                                     FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
6273                 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
6274                 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
6275                 if (ret)
6276                         return ret;
6277                 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
6278
6279                 init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
6280                 j++;
6281         }
6282         return 0;
6283 }
6284
6285 /**
6286  *      t4_read_cimq_cfg - read CIM queue configuration
6287  *      @adap: the adapter
6288  *      @base: holds the queue base addresses in bytes
6289  *      @size: holds the queue sizes in bytes
6290  *      @thres: holds the queue full thresholds in bytes
6291  *
6292  *      Returns the current configuration of the CIM queues, starting with
6293  *      the IBQs, then the OBQs.
6294  */
6295 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
6296 {
6297         unsigned int i, v;
6298         int cim_num_obq = is_t4(adap->params.chip) ?
6299                                 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
6300
6301         for (i = 0; i < CIM_NUM_IBQ; i++) {
6302                 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
6303                              QUENUMSELECT_V(i));
6304                 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
6305                 /* value is in 256-byte units */
6306                 *base++ = CIMQBASE_G(v) * 256;
6307                 *size++ = CIMQSIZE_G(v) * 256;
6308                 *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
6309         }
6310         for (i = 0; i < cim_num_obq; i++) {
6311                 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
6312                              QUENUMSELECT_V(i));
6313                 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
6314                 /* value is in 256-byte units */
6315                 *base++ = CIMQBASE_G(v) * 256;
6316                 *size++ = CIMQSIZE_G(v) * 256;
6317         }
6318 }
6319
6320 /**
6321  *      t4_read_cim_ibq - read the contents of a CIM inbound queue
6322  *      @adap: the adapter
6323  *      @qid: the queue index
6324  *      @data: where to store the queue contents
6325  *      @n: capacity of @data in 32-bit words
6326  *
6327  *      Reads the contents of the selected CIM queue starting at address 0 up
6328  *      to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
6329  *      error and the number of 32-bit words actually read on success.
6330  */
6331 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
6332 {
6333         int i, err, attempts;
6334         unsigned int addr;
6335         const unsigned int nwords = CIM_IBQ_SIZE * 4;
6336
6337         if (qid > 5 || (n & 3))
6338                 return -EINVAL;
6339
6340         addr = qid * nwords;
6341         if (n > nwords)
6342                 n = nwords;
6343
6344         /* It might take 3-10ms before the IBQ debug read access is allowed.
6345          * Wait for 1 Sec with a delay of 1 usec.
6346          */
6347         attempts = 1000000;
6348
6349         for (i = 0; i < n; i++, addr++) {
6350                 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
6351                              IBQDBGEN_F);
6352                 err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
6353                                       attempts, 1);
6354                 if (err)
6355                         return err;
6356                 *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
6357         }
6358         t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
6359         return i;
6360 }
6361
6362 /**
6363  *      t4_read_cim_obq - read the contents of a CIM outbound queue
6364  *      @adap: the adapter
6365  *      @qid: the queue index
6366  *      @data: where to store the queue contents
6367  *      @n: capacity of @data in 32-bit words
6368  *
6369  *      Reads the contents of the selected CIM queue starting at address 0 up
6370  *      to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
6371  *      error and the number of 32-bit words actually read on success.
6372  */
6373 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
6374 {
6375         int i, err;
6376         unsigned int addr, v, nwords;
6377         int cim_num_obq = is_t4(adap->params.chip) ?
6378                                 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
6379
6380         if ((qid > (cim_num_obq - 1)) || (n & 3))
6381                 return -EINVAL;
6382
6383         t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
6384                      QUENUMSELECT_V(qid));
6385         v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
6386
6387         addr = CIMQBASE_G(v) * 64;    /* muliple of 256 -> muliple of 4 */
6388         nwords = CIMQSIZE_G(v) * 64;  /* same */
6389         if (n > nwords)
6390                 n = nwords;
6391
6392         for (i = 0; i < n; i++, addr++) {
6393                 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
6394                              OBQDBGEN_F);
6395                 err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
6396                                       2, 1);
6397                 if (err)
6398                         return err;
6399                 *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
6400         }
6401         t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
6402         return i;
6403 }
6404
6405 /**
6406  *      t4_cim_read - read a block from CIM internal address space
6407  *      @adap: the adapter
6408  *      @addr: the start address within the CIM address space
6409  *      @n: number of words to read
6410  *      @valp: where to store the result
6411  *
6412  *      Reads a block of 4-byte words from the CIM intenal address space.
6413  */
6414 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
6415                 unsigned int *valp)
6416 {
6417         int ret = 0;
6418
6419         if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
6420                 return -EBUSY;
6421
6422         for ( ; !ret && n--; addr += 4) {
6423                 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
6424                 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
6425                                       0, 5, 2);
6426                 if (!ret)
6427                         *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
6428         }
6429         return ret;
6430 }
6431
6432 /**
6433  *      t4_cim_write - write a block into CIM internal address space
6434  *      @adap: the adapter
6435  *      @addr: the start address within the CIM address space
6436  *      @n: number of words to write
6437  *      @valp: set of values to write
6438  *
6439  *      Writes a block of 4-byte words into the CIM intenal address space.
6440  */
6441 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
6442                  const unsigned int *valp)
6443 {
6444         int ret = 0;
6445
6446         if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
6447                 return -EBUSY;
6448
6449         for ( ; !ret && n--; addr += 4) {
6450                 t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
6451                 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
6452                 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
6453                                       0, 5, 2);
6454         }
6455         return ret;
6456 }
6457
6458 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
6459                          unsigned int val)
6460 {
6461         return t4_cim_write(adap, addr, 1, &val);
6462 }
6463
6464 /**
6465  *      t4_cim_read_la - read CIM LA capture buffer
6466  *      @adap: the adapter
6467  *      @la_buf: where to store the LA data
6468  *      @wrptr: the HW write pointer within the capture buffer
6469  *
6470  *      Reads the contents of the CIM LA buffer with the most recent entry at
6471  *      the end of the returned data and with the entry at @wrptr first.
6472  *      We try to leave the LA in the running state we find it in.
6473  */
6474 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
6475 {
6476         int i, ret;
6477         unsigned int cfg, val, idx;
6478
6479         ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
6480         if (ret)
6481                 return ret;
6482
6483         if (cfg & UPDBGLAEN_F) {        /* LA is running, freeze it */
6484                 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
6485                 if (ret)
6486                         return ret;
6487         }
6488
6489         ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
6490         if (ret)
6491                 goto restart;
6492
6493         idx = UPDBGLAWRPTR_G(val);
6494         if (wrptr)
6495                 *wrptr = idx;
6496
6497         for (i = 0; i < adap->params.cim_la_size; i++) {
6498                 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
6499                                     UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
6500                 if (ret)
6501                         break;
6502                 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
6503                 if (ret)
6504                         break;
6505                 if (val & UPDBGLARDEN_F) {
6506                         ret = -ETIMEDOUT;
6507                         break;
6508                 }
6509                 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
6510                 if (ret)
6511                         break;
6512                 idx = (idx + 1) & UPDBGLARDPTR_M;
6513         }
6514 restart:
6515         if (cfg & UPDBGLAEN_F) {
6516                 int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
6517                                       cfg & ~UPDBGLARDEN_F);
6518                 if (!ret)
6519                         ret = r;
6520         }
6521         return ret;
6522 }
6523
6524 /**
6525  *      t4_tp_read_la - read TP LA capture buffer
6526  *      @adap: the adapter
6527  *      @la_buf: where to store the LA data
6528  *      @wrptr: the HW write pointer within the capture buffer
6529  *
6530  *      Reads the contents of the TP LA buffer with the most recent entry at
6531  *      the end of the returned data and with the entry at @wrptr first.
6532  *      We leave the LA in the running state we find it in.
6533  */
6534 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
6535 {
6536         bool last_incomplete;
6537         unsigned int i, cfg, val, idx;
6538
6539         cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
6540         if (cfg & DBGLAENABLE_F)                        /* freeze LA */
6541                 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
6542                              adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
6543
6544         val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
6545         idx = DBGLAWPTR_G(val);
6546         last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
6547         if (last_incomplete)
6548                 idx = (idx + 1) & DBGLARPTR_M;
6549         if (wrptr)
6550                 *wrptr = idx;
6551
6552         val &= 0xffff;
6553         val &= ~DBGLARPTR_V(DBGLARPTR_M);
6554         val |= adap->params.tp.la_mask;
6555
6556         for (i = 0; i < TPLA_SIZE; i++) {
6557                 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
6558                 la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
6559                 idx = (idx + 1) & DBGLARPTR_M;
6560         }
6561
6562         /* Wipe out last entry if it isn't valid */
6563         if (last_incomplete)
6564                 la_buf[TPLA_SIZE - 1] = ~0ULL;
6565
6566         if (cfg & DBGLAENABLE_F)                    /* restore running state */
6567                 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
6568                              cfg | adap->params.tp.la_mask);
6569 }
6570
6571 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
6572  * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
6573  * state for more than the Warning Threshold then we'll issue a warning about
6574  * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
6575  * appears to be hung every Warning Repeat second till the situation clears.
6576  * If the situation clears, we'll note that as well.
6577  */
6578 #define SGE_IDMA_WARN_THRESH 1
6579 #define SGE_IDMA_WARN_REPEAT 300
6580
6581 /**
6582  *      t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
6583  *      @adapter: the adapter
6584  *      @idma: the adapter IDMA Monitor state
6585  *
6586  *      Initialize the state of an SGE Ingress DMA Monitor.
6587  */
6588 void t4_idma_monitor_init(struct adapter *adapter,
6589                           struct sge_idma_monitor_state *idma)
6590 {
6591         /* Initialize the state variables for detecting an SGE Ingress DMA
6592          * hang.  The SGE has internal counters which count up on each clock
6593          * tick whenever the SGE finds its Ingress DMA State Engines in the
6594          * same state they were on the previous clock tick.  The clock used is
6595          * the Core Clock so we have a limit on the maximum "time" they can
6596          * record; typically a very small number of seconds.  For instance,
6597          * with a 600MHz Core Clock, we can only count up to a bit more than
6598          * 7s.  So we'll synthesize a larger counter in order to not run the
6599          * risk of having the "timers" overflow and give us the flexibility to
6600          * maintain a Hung SGE State Machine of our own which operates across
6601          * a longer time frame.
6602          */
6603         idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
6604         idma->idma_stalled[0] = 0;
6605         idma->idma_stalled[1] = 0;
6606 }
6607
6608 /**
6609  *      t4_idma_monitor - monitor SGE Ingress DMA state
6610  *      @adapter: the adapter
6611  *      @idma: the adapter IDMA Monitor state
6612  *      @hz: number of ticks/second
6613  *      @ticks: number of ticks since the last IDMA Monitor call
6614  */
6615 void t4_idma_monitor(struct adapter *adapter,
6616                      struct sge_idma_monitor_state *idma,
6617                      int hz, int ticks)
6618 {
6619         int i, idma_same_state_cnt[2];
6620
6621          /* Read the SGE Debug Ingress DMA Same State Count registers.  These
6622           * are counters inside the SGE which count up on each clock when the
6623           * SGE finds its Ingress DMA State Engines in the same states they
6624           * were in the previous clock.  The counters will peg out at
6625           * 0xffffffff without wrapping around so once they pass the 1s
6626           * threshold they'll stay above that till the IDMA state changes.
6627           */
6628         t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
6629         idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
6630         idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
6631
6632         for (i = 0; i < 2; i++) {
6633                 u32 debug0, debug11;
6634
6635                 /* If the Ingress DMA Same State Counter ("timer") is less
6636                  * than 1s, then we can reset our synthesized Stall Timer and
6637                  * continue.  If we have previously emitted warnings about a
6638                  * potential stalled Ingress Queue, issue a note indicating
6639                  * that the Ingress Queue has resumed forward progress.
6640                  */
6641                 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
6642                         if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
6643                                 dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
6644                                          "resumed after %d seconds\n",
6645                                          i, idma->idma_qid[i],
6646                                          idma->idma_stalled[i] / hz);
6647                         idma->idma_stalled[i] = 0;
6648                         continue;
6649                 }
6650
6651                 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
6652                  * domain.  The first time we get here it'll be because we
6653                  * passed the 1s Threshold; each additional time it'll be
6654                  * because the RX Timer Callback is being fired on its regular
6655                  * schedule.
6656                  *
6657                  * If the stall is below our Potential Hung Ingress Queue
6658                  * Warning Threshold, continue.
6659                  */
6660                 if (idma->idma_stalled[i] == 0) {
6661                         idma->idma_stalled[i] = hz;
6662                         idma->idma_warn[i] = 0;
6663                 } else {
6664                         idma->idma_stalled[i] += ticks;
6665                         idma->idma_warn[i] -= ticks;
6666                 }
6667
6668                 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
6669                         continue;
6670
6671                 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
6672                  */
6673                 if (idma->idma_warn[i] > 0)
6674                         continue;
6675                 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
6676
6677                 /* Read and save the SGE IDMA State and Queue ID information.
6678                  * We do this every time in case it changes across time ...
6679                  * can't be too careful ...
6680                  */
6681                 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
6682                 debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
6683                 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
6684
6685                 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
6686                 debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
6687                 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
6688
6689                 dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
6690                          "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
6691                          i, idma->idma_qid[i], idma->idma_state[i],
6692                          idma->idma_stalled[i] / hz,
6693                          debug0, debug11);
6694                 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
6695         }
6696 }