Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[pandora-kernel.git] / drivers / scsi / qla2xxx / qla_nx.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include <linux/delay.h>
9 #include <linux/pci.h>
10 #include <scsi/scsi_tcq.h>
11
12 #define MASK(n)                 ((1ULL<<(n))-1)
13 #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
14         ((addr >> 25) & 0x3ff))
15 #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \
16         ((addr >> 25) & 0x3ff))
17 #define MS_WIN(addr) (addr & 0x0ffc0000)
18 #define QLA82XX_PCI_MN_2M   (0)
19 #define QLA82XX_PCI_MS_2M   (0x80000)
20 #define QLA82XX_PCI_OCM0_2M (0xc0000)
21 #define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
22 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
23 #define BLOCK_PROTECT_BITS 0x0F
24
25 /* CRB window related */
26 #define CRB_BLK(off)    ((off >> 20) & 0x3f)
27 #define CRB_SUBBLK(off) ((off >> 16) & 0xf)
28 #define CRB_WINDOW_2M   (0x130060)
29 #define QLA82XX_PCI_CAMQM_2M_END        (0x04800800UL)
30 #define CRB_HI(off)     ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
31                         ((off) & 0xf0000))
32 #define QLA82XX_PCI_CAMQM_2M_BASE       (0x000ff800UL)
33 #define CRB_INDIRECT_2M (0x1e0000UL)
34
35 #define MAX_CRB_XFORM 60
36 static unsigned long crb_addr_xform[MAX_CRB_XFORM];
37 int qla82xx_crb_table_initialized;
38
39 #define qla82xx_crb_addr_transform(name) \
40         (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
41         QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
42
43 static void qla82xx_crb_addr_transform_setup(void)
44 {
45         qla82xx_crb_addr_transform(XDMA);
46         qla82xx_crb_addr_transform(TIMR);
47         qla82xx_crb_addr_transform(SRE);
48         qla82xx_crb_addr_transform(SQN3);
49         qla82xx_crb_addr_transform(SQN2);
50         qla82xx_crb_addr_transform(SQN1);
51         qla82xx_crb_addr_transform(SQN0);
52         qla82xx_crb_addr_transform(SQS3);
53         qla82xx_crb_addr_transform(SQS2);
54         qla82xx_crb_addr_transform(SQS1);
55         qla82xx_crb_addr_transform(SQS0);
56         qla82xx_crb_addr_transform(RPMX7);
57         qla82xx_crb_addr_transform(RPMX6);
58         qla82xx_crb_addr_transform(RPMX5);
59         qla82xx_crb_addr_transform(RPMX4);
60         qla82xx_crb_addr_transform(RPMX3);
61         qla82xx_crb_addr_transform(RPMX2);
62         qla82xx_crb_addr_transform(RPMX1);
63         qla82xx_crb_addr_transform(RPMX0);
64         qla82xx_crb_addr_transform(ROMUSB);
65         qla82xx_crb_addr_transform(SN);
66         qla82xx_crb_addr_transform(QMN);
67         qla82xx_crb_addr_transform(QMS);
68         qla82xx_crb_addr_transform(PGNI);
69         qla82xx_crb_addr_transform(PGND);
70         qla82xx_crb_addr_transform(PGN3);
71         qla82xx_crb_addr_transform(PGN2);
72         qla82xx_crb_addr_transform(PGN1);
73         qla82xx_crb_addr_transform(PGN0);
74         qla82xx_crb_addr_transform(PGSI);
75         qla82xx_crb_addr_transform(PGSD);
76         qla82xx_crb_addr_transform(PGS3);
77         qla82xx_crb_addr_transform(PGS2);
78         qla82xx_crb_addr_transform(PGS1);
79         qla82xx_crb_addr_transform(PGS0);
80         qla82xx_crb_addr_transform(PS);
81         qla82xx_crb_addr_transform(PH);
82         qla82xx_crb_addr_transform(NIU);
83         qla82xx_crb_addr_transform(I2Q);
84         qla82xx_crb_addr_transform(EG);
85         qla82xx_crb_addr_transform(MN);
86         qla82xx_crb_addr_transform(MS);
87         qla82xx_crb_addr_transform(CAS2);
88         qla82xx_crb_addr_transform(CAS1);
89         qla82xx_crb_addr_transform(CAS0);
90         qla82xx_crb_addr_transform(CAM);
91         qla82xx_crb_addr_transform(C2C1);
92         qla82xx_crb_addr_transform(C2C0);
93         qla82xx_crb_addr_transform(SMB);
94         qla82xx_crb_addr_transform(OCM0);
95         /*
96          * Used only in P3 just define it for P2 also.
97          */
98         qla82xx_crb_addr_transform(I2C0);
99
100         qla82xx_crb_table_initialized = 1;
101 }
102
103 struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
104         {{{0, 0,         0,         0} } },
105         {{{1, 0x0100000, 0x0102000, 0x120000},
106         {1, 0x0110000, 0x0120000, 0x130000},
107         {1, 0x0120000, 0x0122000, 0x124000},
108         {1, 0x0130000, 0x0132000, 0x126000},
109         {1, 0x0140000, 0x0142000, 0x128000},
110         {1, 0x0150000, 0x0152000, 0x12a000},
111         {1, 0x0160000, 0x0170000, 0x110000},
112         {1, 0x0170000, 0x0172000, 0x12e000},
113         {0, 0x0000000, 0x0000000, 0x000000},
114         {0, 0x0000000, 0x0000000, 0x000000},
115         {0, 0x0000000, 0x0000000, 0x000000},
116         {0, 0x0000000, 0x0000000, 0x000000},
117         {0, 0x0000000, 0x0000000, 0x000000},
118         {0, 0x0000000, 0x0000000, 0x000000},
119         {1, 0x01e0000, 0x01e0800, 0x122000},
120         {0, 0x0000000, 0x0000000, 0x000000} } } ,
121         {{{1, 0x0200000, 0x0210000, 0x180000} } },
122         {{{0, 0,         0,         0} } },
123         {{{1, 0x0400000, 0x0401000, 0x169000} } },
124         {{{1, 0x0500000, 0x0510000, 0x140000} } },
125         {{{1, 0x0600000, 0x0610000, 0x1c0000} } },
126         {{{1, 0x0700000, 0x0704000, 0x1b8000} } },
127         {{{1, 0x0800000, 0x0802000, 0x170000},
128         {0, 0x0000000, 0x0000000, 0x000000},
129         {0, 0x0000000, 0x0000000, 0x000000},
130         {0, 0x0000000, 0x0000000, 0x000000},
131         {0, 0x0000000, 0x0000000, 0x000000},
132         {0, 0x0000000, 0x0000000, 0x000000},
133         {0, 0x0000000, 0x0000000, 0x000000},
134         {0, 0x0000000, 0x0000000, 0x000000},
135         {0, 0x0000000, 0x0000000, 0x000000},
136         {0, 0x0000000, 0x0000000, 0x000000},
137         {0, 0x0000000, 0x0000000, 0x000000},
138         {0, 0x0000000, 0x0000000, 0x000000},
139         {0, 0x0000000, 0x0000000, 0x000000},
140         {0, 0x0000000, 0x0000000, 0x000000},
141         {0, 0x0000000, 0x0000000, 0x000000},
142         {1, 0x08f0000, 0x08f2000, 0x172000} } },
143         {{{1, 0x0900000, 0x0902000, 0x174000},
144         {0, 0x0000000, 0x0000000, 0x000000},
145         {0, 0x0000000, 0x0000000, 0x000000},
146         {0, 0x0000000, 0x0000000, 0x000000},
147         {0, 0x0000000, 0x0000000, 0x000000},
148         {0, 0x0000000, 0x0000000, 0x000000},
149         {0, 0x0000000, 0x0000000, 0x000000},
150         {0, 0x0000000, 0x0000000, 0x000000},
151         {0, 0x0000000, 0x0000000, 0x000000},
152         {0, 0x0000000, 0x0000000, 0x000000},
153         {0, 0x0000000, 0x0000000, 0x000000},
154         {0, 0x0000000, 0x0000000, 0x000000},
155         {0, 0x0000000, 0x0000000, 0x000000},
156         {0, 0x0000000, 0x0000000, 0x000000},
157         {0, 0x0000000, 0x0000000, 0x000000},
158         {1, 0x09f0000, 0x09f2000, 0x176000} } },
159         {{{0, 0x0a00000, 0x0a02000, 0x178000},
160         {0, 0x0000000, 0x0000000, 0x000000},
161         {0, 0x0000000, 0x0000000, 0x000000},
162         {0, 0x0000000, 0x0000000, 0x000000},
163         {0, 0x0000000, 0x0000000, 0x000000},
164         {0, 0x0000000, 0x0000000, 0x000000},
165         {0, 0x0000000, 0x0000000, 0x000000},
166         {0, 0x0000000, 0x0000000, 0x000000},
167         {0, 0x0000000, 0x0000000, 0x000000},
168         {0, 0x0000000, 0x0000000, 0x000000},
169         {0, 0x0000000, 0x0000000, 0x000000},
170         {0, 0x0000000, 0x0000000, 0x000000},
171         {0, 0x0000000, 0x0000000, 0x000000},
172         {0, 0x0000000, 0x0000000, 0x000000},
173         {0, 0x0000000, 0x0000000, 0x000000},
174         {1, 0x0af0000, 0x0af2000, 0x17a000} } },
175         {{{0, 0x0b00000, 0x0b02000, 0x17c000},
176         {0, 0x0000000, 0x0000000, 0x000000},
177         {0, 0x0000000, 0x0000000, 0x000000},
178         {0, 0x0000000, 0x0000000, 0x000000},
179         {0, 0x0000000, 0x0000000, 0x000000},
180         {0, 0x0000000, 0x0000000, 0x000000},
181         {0, 0x0000000, 0x0000000, 0x000000},
182         {0, 0x0000000, 0x0000000, 0x000000},
183         {0, 0x0000000, 0x0000000, 0x000000},
184         {0, 0x0000000, 0x0000000, 0x000000},
185         {0, 0x0000000, 0x0000000, 0x000000},
186         {0, 0x0000000, 0x0000000, 0x000000},
187         {0, 0x0000000, 0x0000000, 0x000000},
188         {0, 0x0000000, 0x0000000, 0x000000},
189         {0, 0x0000000, 0x0000000, 0x000000},
190         {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
191         {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },
192         {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },
193         {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },
194         {{{1, 0x0f00000, 0x0f01000, 0x164000} } },
195         {{{0, 0x1000000, 0x1004000, 0x1a8000} } },
196         {{{1, 0x1100000, 0x1101000, 0x160000} } },
197         {{{1, 0x1200000, 0x1201000, 0x161000} } },
198         {{{1, 0x1300000, 0x1301000, 0x162000} } },
199         {{{1, 0x1400000, 0x1401000, 0x163000} } },
200         {{{1, 0x1500000, 0x1501000, 0x165000} } },
201         {{{1, 0x1600000, 0x1601000, 0x166000} } },
202         {{{0, 0,         0,         0} } },
203         {{{0, 0,         0,         0} } },
204         {{{0, 0,         0,         0} } },
205         {{{0, 0,         0,         0} } },
206         {{{0, 0,         0,         0} } },
207         {{{0, 0,         0,         0} } },
208         {{{1, 0x1d00000, 0x1d10000, 0x190000} } },
209         {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },
210         {{{1, 0x1f00000, 0x1f10000, 0x150000} } },
211         {{{0} } },
212         {{{1, 0x2100000, 0x2102000, 0x120000},
213         {1, 0x2110000, 0x2120000, 0x130000},
214         {1, 0x2120000, 0x2122000, 0x124000},
215         {1, 0x2130000, 0x2132000, 0x126000},
216         {1, 0x2140000, 0x2142000, 0x128000},
217         {1, 0x2150000, 0x2152000, 0x12a000},
218         {1, 0x2160000, 0x2170000, 0x110000},
219         {1, 0x2170000, 0x2172000, 0x12e000},
220         {0, 0x0000000, 0x0000000, 0x000000},
221         {0, 0x0000000, 0x0000000, 0x000000},
222         {0, 0x0000000, 0x0000000, 0x000000},
223         {0, 0x0000000, 0x0000000, 0x000000},
224         {0, 0x0000000, 0x0000000, 0x000000},
225         {0, 0x0000000, 0x0000000, 0x000000},
226         {0, 0x0000000, 0x0000000, 0x000000},
227         {0, 0x0000000, 0x0000000, 0x000000} } },
228         {{{1, 0x2200000, 0x2204000, 0x1b0000} } },
229         {{{0} } },
230         {{{0} } },
231         {{{0} } },
232         {{{0} } },
233         {{{0} } },
234         {{{1, 0x2800000, 0x2804000, 0x1a4000} } },
235         {{{1, 0x2900000, 0x2901000, 0x16b000} } },
236         {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },
237         {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },
238         {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },
239         {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },
240         {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },
241         {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },
242         {{{1, 0x3000000, 0x3000400, 0x1adc00} } },
243         {{{0, 0x3100000, 0x3104000, 0x1a8000} } },
244         {{{1, 0x3200000, 0x3204000, 0x1d4000} } },
245         {{{1, 0x3300000, 0x3304000, 0x1a0000} } },
246         {{{0} } },
247         {{{1, 0x3500000, 0x3500400, 0x1ac000} } },
248         {{{1, 0x3600000, 0x3600400, 0x1ae000} } },
249         {{{1, 0x3700000, 0x3700400, 0x1ae400} } },
250         {{{1, 0x3800000, 0x3804000, 0x1d0000} } },
251         {{{1, 0x3900000, 0x3904000, 0x1b4000} } },
252         {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },
253         {{{0} } },
254         {{{0} } },
255         {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },
256         {{{1, 0x3e00000, 0x3e01000, 0x167000} } },
257         {{{1, 0x3f00000, 0x3f01000, 0x168000} } }
258 };
259
260 /*
261  * top 12 bits of crb internal address (hub, agent)
262  */
263 unsigned qla82xx_crb_hub_agt[64] = {
264         0,
265         QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
266         QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
267         QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
268         0,
269         QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
270         QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
271         QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
272         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
273         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
274         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
275         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
276         QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
277         QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
278         QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
279         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
280         QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
281         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
282         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
283         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
284         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
285         QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
286         QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
287         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
288         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
289         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
290         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
291         0,
292         QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
293         QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
294         0,
295         QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
296         0,
297         QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
298         QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
299         0,
300         0,
301         0,
302         0,
303         0,
304         QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
305         0,
306         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
307         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
308         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
309         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
310         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
311         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
312         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
313         QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
314         QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
315         QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
316         0,
317         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
318         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
319         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
320         QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
321         0,
322         QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
323         QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
324         QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
325         0,
326         QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
327         0,
328 };
329
330 /* Device states */
331 char *qdev_state[] = {
332          "Unknown",
333         "Cold",
334         "Initializing",
335         "Ready",
336         "Need Reset",
337         "Need Quiescent",
338         "Failed",
339         "Quiescent",
340 };
341
342 /*
343  * In: 'off' is offset from CRB space in 128M pci map
344  * Out: 'off' is 2M pci map addr
345  * side effect: lock crb window
346  */
347 static void
348 qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
349 {
350         u32 win_read;
351         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
352
353         ha->crb_win = CRB_HI(*off);
354         writel(ha->crb_win,
355                 (void *)(CRB_WINDOW_2M + ha->nx_pcibase));
356
357         /* Read back value to make sure write has gone through before trying
358          * to use it.
359          */
360         win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
361         if (win_read != ha->crb_win) {
362                 ql_dbg(ql_dbg_p3p, vha, 0xb000,
363                     "%s: Written crbwin (0x%x) "
364                     "!= Read crbwin (0x%x), off=0x%lx.\n",
365                     ha->crb_win, win_read, *off);
366         }
367         *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
368 }
369
370 static inline unsigned long
371 qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
372 {
373         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
374         /* See if we are currently pointing to the region we want to use next */
375         if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
376                 /* No need to change window. PCIX and PCIEregs are in both
377                  * regs are in both windows.
378                  */
379                 return off;
380         }
381
382         if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
383                 /* We are in first CRB window */
384                 if (ha->curr_window != 0)
385                         WARN_ON(1);
386                 return off;
387         }
388
389         if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
390                 /* We are in second CRB window */
391                 off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
392
393                 if (ha->curr_window != 1)
394                         return off;
395
396                 /* We are in the QM or direct access
397                  * register region - do nothing
398                  */
399                 if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
400                         (off < QLA82XX_PCI_CAMQM_MAX))
401                         return off;
402         }
403         /* strange address given */
404         ql_dbg(ql_dbg_p3p, vha, 0xb001,
405             "%x: Warning: unm_nic_pci_set_crbwindow "
406             "called with an unknown address(%llx).\n",
407             QLA2XXX_DRIVER_NAME, off);
408         return off;
409 }
410
411 static int
412 qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
413 {
414         struct crb_128M_2M_sub_block_map *m;
415
416         if (*off >= QLA82XX_CRB_MAX)
417                 return -1;
418
419         if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
420                 *off = (*off - QLA82XX_PCI_CAMQM) +
421                     QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
422                 return 0;
423         }
424
425         if (*off < QLA82XX_PCI_CRBSPACE)
426                 return -1;
427
428         *off -= QLA82XX_PCI_CRBSPACE;
429
430         /* Try direct map */
431         m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
432
433         if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
434                 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
435                 return 0;
436         }
437         /* Not in direct map, use crb window */
438         return 1;
439 }
440
441 #define CRB_WIN_LOCK_TIMEOUT 100000000
442 static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
443 {
444         int done = 0, timeout = 0;
445
446         while (!done) {
447                 /* acquire semaphore3 from PCI HW block */
448                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
449                 if (done == 1)
450                         break;
451                 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
452                         return -1;
453                 timeout++;
454         }
455         qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
456         return 0;
457 }
458
459 int
460 qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
461 {
462         unsigned long flags = 0;
463         int rv;
464
465         rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
466
467         BUG_ON(rv == -1);
468
469         if (rv == 1) {
470                 write_lock_irqsave(&ha->hw_lock, flags);
471                 qla82xx_crb_win_lock(ha);
472                 qla82xx_pci_set_crbwindow_2M(ha, &off);
473         }
474
475         writel(data, (void __iomem *)off);
476
477         if (rv == 1) {
478                 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
479                 write_unlock_irqrestore(&ha->hw_lock, flags);
480         }
481         return 0;
482 }
483
484 int
485 qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
486 {
487         unsigned long flags = 0;
488         int rv;
489         u32 data;
490
491         rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
492
493         BUG_ON(rv == -1);
494
495         if (rv == 1) {
496                 write_lock_irqsave(&ha->hw_lock, flags);
497                 qla82xx_crb_win_lock(ha);
498                 qla82xx_pci_set_crbwindow_2M(ha, &off);
499         }
500         data = RD_REG_DWORD((void __iomem *)off);
501
502         if (rv == 1) {
503                 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
504                 write_unlock_irqrestore(&ha->hw_lock, flags);
505         }
506         return data;
507 }
508
509 #define IDC_LOCK_TIMEOUT 100000000
510 int qla82xx_idc_lock(struct qla_hw_data *ha)
511 {
512         int i;
513         int done = 0, timeout = 0;
514
515         while (!done) {
516                 /* acquire semaphore5 from PCI HW block */
517                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
518                 if (done == 1)
519                         break;
520                 if (timeout >= IDC_LOCK_TIMEOUT)
521                         return -1;
522
523                 timeout++;
524
525                 /* Yield CPU */
526                 if (!in_interrupt())
527                         schedule();
528                 else {
529                         for (i = 0; i < 20; i++)
530                                 cpu_relax();
531                 }
532         }
533
534         return 0;
535 }
536
537 void qla82xx_idc_unlock(struct qla_hw_data *ha)
538 {
539         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
540 }
541
542 /*  PCI Windowing for DDR regions.  */
543 #define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
544         (((addr) <= (high)) && ((addr) >= (low)))
545 /*
546  * check memory access boundary.
547  * used by test agent. support ddr access only for now
548  */
549 static unsigned long
550 qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
551         unsigned long long addr, int size)
552 {
553         if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
554                 QLA82XX_ADDR_DDR_NET_MAX) ||
555                 !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET,
556                 QLA82XX_ADDR_DDR_NET_MAX) ||
557                 ((size != 1) && (size != 2) && (size != 4) && (size != 8)))
558                         return 0;
559         else
560                 return 1;
561 }
562
563 int qla82xx_pci_set_window_warning_count;
564
565 static unsigned long
566 qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
567 {
568         int window;
569         u32 win_read;
570         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
571
572         if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
573                 QLA82XX_ADDR_DDR_NET_MAX)) {
574                 /* DDR network side */
575                 window = MN_WIN(addr);
576                 ha->ddr_mn_window = window;
577                 qla82xx_wr_32(ha,
578                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
579                 win_read = qla82xx_rd_32(ha,
580                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
581                 if ((win_read << 17) != window) {
582                         ql_dbg(ql_dbg_p3p, vha, 0xb003,
583                             "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n",
584                             __func__, window, win_read);
585                 }
586                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
587         } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
588                 QLA82XX_ADDR_OCM0_MAX)) {
589                 unsigned int temp1;
590                 if ((addr & 0x00ff800) == 0xff800) {
591                         ql_log(ql_log_warn, vha, 0xb004,
592                             "%s: QM access not handled.\n", __func__);
593                         addr = -1UL;
594                 }
595                 window = OCM_WIN(addr);
596                 ha->ddr_mn_window = window;
597                 qla82xx_wr_32(ha,
598                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
599                 win_read = qla82xx_rd_32(ha,
600                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
601                 temp1 = ((window & 0x1FF) << 7) |
602                     ((window & 0x0FFFE0000) >> 17);
603                 if (win_read != temp1) {
604                         ql_log(ql_log_warn, vha, 0xb005,
605                             "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n",
606                             __func__, temp1, win_read);
607                 }
608                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
609
610         } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
611                 QLA82XX_P3_ADDR_QDR_NET_MAX)) {
612                 /* QDR network side */
613                 window = MS_WIN(addr);
614                 ha->qdr_sn_window = window;
615                 qla82xx_wr_32(ha,
616                         ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window);
617                 win_read = qla82xx_rd_32(ha,
618                         ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
619                 if (win_read != window) {
620                         ql_log(ql_log_warn, vha, 0xb006,
621                             "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n",
622                             __func__, window, win_read);
623                 }
624                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
625         } else {
626                 /*
627                  * peg gdb frequently accesses memory that doesn't exist,
628                  * this limits the chit chat so debugging isn't slowed down.
629                  */
630                 if ((qla82xx_pci_set_window_warning_count++ < 8) ||
631                     (qla82xx_pci_set_window_warning_count%64 == 0)) {
632                         ql_log(ql_log_warn, vha, 0xb007,
633                             "%s: Warning:%s Unknown address range!.\n",
634                             __func__, QLA2XXX_DRIVER_NAME);
635                 }
636                 addr = -1UL;
637         }
638         return addr;
639 }
640
641 /* check if address is in the same windows as the previous access */
642 static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
643         unsigned long long addr)
644 {
645         int                     window;
646         unsigned long long      qdr_max;
647
648         qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
649
650         /* DDR network side */
651         if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
652                 QLA82XX_ADDR_DDR_NET_MAX))
653                 BUG();
654         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
655                 QLA82XX_ADDR_OCM0_MAX))
656                 return 1;
657         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
658                 QLA82XX_ADDR_OCM1_MAX))
659                 return 1;
660         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
661                 /* QDR network side */
662                 window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
663                 if (ha->qdr_sn_window == window)
664                         return 1;
665         }
666         return 0;
667 }
668
669 static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
670         u64 off, void *data, int size)
671 {
672         unsigned long   flags;
673         void           *addr = NULL;
674         int             ret = 0;
675         u64             start;
676         uint8_t         *mem_ptr = NULL;
677         unsigned long   mem_base;
678         unsigned long   mem_page;
679         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
680
681         write_lock_irqsave(&ha->hw_lock, flags);
682
683         /*
684          * If attempting to access unknown address or straddle hw windows,
685          * do not access.
686          */
687         start = qla82xx_pci_set_window(ha, off);
688         if ((start == -1UL) ||
689                 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
690                 write_unlock_irqrestore(&ha->hw_lock, flags);
691                 ql_log(ql_log_fatal, vha, 0xb008,
692                     "%s out of bound pci memory "
693                     "access, offset is 0x%llx.\n",
694                     QLA2XXX_DRIVER_NAME, off);
695                 return -1;
696         }
697
698         write_unlock_irqrestore(&ha->hw_lock, flags);
699         mem_base = pci_resource_start(ha->pdev, 0);
700         mem_page = start & PAGE_MASK;
701         /* Map two pages whenever user tries to access addresses in two
702         * consecutive pages.
703         */
704         if (mem_page != ((start + size - 1) & PAGE_MASK))
705                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
706         else
707                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
708         if (mem_ptr == 0UL) {
709                 *(u8  *)data = 0;
710                 return -1;
711         }
712         addr = mem_ptr;
713         addr += start & (PAGE_SIZE - 1);
714         write_lock_irqsave(&ha->hw_lock, flags);
715
716         switch (size) {
717         case 1:
718                 *(u8  *)data = readb(addr);
719                 break;
720         case 2:
721                 *(u16 *)data = readw(addr);
722                 break;
723         case 4:
724                 *(u32 *)data = readl(addr);
725                 break;
726         case 8:
727                 *(u64 *)data = readq(addr);
728                 break;
729         default:
730                 ret = -1;
731                 break;
732         }
733         write_unlock_irqrestore(&ha->hw_lock, flags);
734
735         if (mem_ptr)
736                 iounmap(mem_ptr);
737         return ret;
738 }
739
740 static int
741 qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
742         u64 off, void *data, int size)
743 {
744         unsigned long   flags;
745         void           *addr = NULL;
746         int             ret = 0;
747         u64             start;
748         uint8_t         *mem_ptr = NULL;
749         unsigned long   mem_base;
750         unsigned long   mem_page;
751         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
752
753         write_lock_irqsave(&ha->hw_lock, flags);
754
755         /*
756          * If attempting to access unknown address or straddle hw windows,
757          * do not access.
758          */
759         start = qla82xx_pci_set_window(ha, off);
760         if ((start == -1UL) ||
761                 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
762                 write_unlock_irqrestore(&ha->hw_lock, flags);
763                 ql_log(ql_log_fatal, vha, 0xb009,
764                     "%s out of bount memory "
765                     "access, offset is 0x%llx.\n",
766                     QLA2XXX_DRIVER_NAME, off);
767                 return -1;
768         }
769
770         write_unlock_irqrestore(&ha->hw_lock, flags);
771         mem_base = pci_resource_start(ha->pdev, 0);
772         mem_page = start & PAGE_MASK;
773         /* Map two pages whenever user tries to access addresses in two
774          * consecutive pages.
775          */
776         if (mem_page != ((start + size - 1) & PAGE_MASK))
777                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
778         else
779                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
780         if (mem_ptr == 0UL)
781                 return -1;
782
783         addr = mem_ptr;
784         addr += start & (PAGE_SIZE - 1);
785         write_lock_irqsave(&ha->hw_lock, flags);
786
787         switch (size) {
788         case 1:
789                 writeb(*(u8  *)data, addr);
790                 break;
791         case 2:
792                 writew(*(u16 *)data, addr);
793                 break;
794         case 4:
795                 writel(*(u32 *)data, addr);
796                 break;
797         case 8:
798                 writeq(*(u64 *)data, addr);
799                 break;
800         default:
801                 ret = -1;
802                 break;
803         }
804         write_unlock_irqrestore(&ha->hw_lock, flags);
805         if (mem_ptr)
806                 iounmap(mem_ptr);
807         return ret;
808 }
809
810 #define MTU_FUDGE_FACTOR 100
811 static unsigned long
812 qla82xx_decode_crb_addr(unsigned long addr)
813 {
814         int i;
815         unsigned long base_addr, offset, pci_base;
816
817         if (!qla82xx_crb_table_initialized)
818                 qla82xx_crb_addr_transform_setup();
819
820         pci_base = ADDR_ERROR;
821         base_addr = addr & 0xfff00000;
822         offset = addr & 0x000fffff;
823
824         for (i = 0; i < MAX_CRB_XFORM; i++) {
825                 if (crb_addr_xform[i] == base_addr) {
826                         pci_base = i << 20;
827                         break;
828                 }
829         }
830         if (pci_base == ADDR_ERROR)
831                 return pci_base;
832         return pci_base + offset;
833 }
834
835 static long rom_max_timeout = 100;
836 static long qla82xx_rom_lock_timeout = 100;
837
838 static int
839 qla82xx_rom_lock(struct qla_hw_data *ha)
840 {
841         int done = 0, timeout = 0;
842
843         while (!done) {
844                 /* acquire semaphore2 from PCI HW block */
845                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
846                 if (done == 1)
847                         break;
848                 if (timeout >= qla82xx_rom_lock_timeout)
849                         return -1;
850                 timeout++;
851         }
852         qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
853         return 0;
854 }
855
856 static void
857 qla82xx_rom_unlock(struct qla_hw_data *ha)
858 {
859         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
860 }
861
862 static int
863 qla82xx_wait_rom_busy(struct qla_hw_data *ha)
864 {
865         long timeout = 0;
866         long done = 0 ;
867         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
868
869         while (done == 0) {
870                 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
871                 done &= 4;
872                 timeout++;
873                 if (timeout >= rom_max_timeout) {
874                         ql_dbg(ql_dbg_p3p, vha, 0xb00a,
875                             "%s: Timeout reached waiting for rom busy.\n",
876                             QLA2XXX_DRIVER_NAME);
877                         return -1;
878                 }
879         }
880         return 0;
881 }
882
883 static int
884 qla82xx_wait_rom_done(struct qla_hw_data *ha)
885 {
886         long timeout = 0;
887         long done = 0 ;
888         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
889
890         while (done == 0) {
891                 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
892                 done &= 2;
893                 timeout++;
894                 if (timeout >= rom_max_timeout) {
895                         ql_dbg(ql_dbg_p3p, vha, 0xb00b,
896                             "%s: Timeout reached waiting for rom done.\n",
897                             QLA2XXX_DRIVER_NAME);
898                         return -1;
899                 }
900         }
901         return 0;
902 }
903
904 static int
905 qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
906 {
907         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
908
909         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
910         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
911         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
912         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
913         qla82xx_wait_rom_busy(ha);
914         if (qla82xx_wait_rom_done(ha)) {
915                 ql_log(ql_log_fatal, vha, 0x00ba,
916                     "Error waiting for rom done.\n");
917                 return -1;
918         }
919         /* Reset abyte_cnt and dummy_byte_cnt */
920         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
921         udelay(10);
922         cond_resched();
923         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
924         *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
925         return 0;
926 }
927
928 static int
929 qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
930 {
931         int ret, loops = 0;
932         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
933
934         while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
935                 udelay(100);
936                 schedule();
937                 loops++;
938         }
939         if (loops >= 50000) {
940                 ql_log(ql_log_fatal, vha, 0x00b9,
941                     "Failed to aquire SEM2 lock.\n");
942                 return -1;
943         }
944         ret = qla82xx_do_rom_fast_read(ha, addr, valp);
945         qla82xx_rom_unlock(ha);
946         return ret;
947 }
948
949 static int
950 qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
951 {
952         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
953         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
954         qla82xx_wait_rom_busy(ha);
955         if (qla82xx_wait_rom_done(ha)) {
956                 ql_log(ql_log_warn, vha, 0xb00c,
957                     "Error waiting for rom done.\n");
958                 return -1;
959         }
960         *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
961         return 0;
962 }
963
964 static int
965 qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
966 {
967         long timeout = 0;
968         uint32_t done = 1 ;
969         uint32_t val;
970         int ret = 0;
971         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
972
973         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
974         while ((done != 0) && (ret == 0)) {
975                 ret = qla82xx_read_status_reg(ha, &val);
976                 done = val & 1;
977                 timeout++;
978                 udelay(10);
979                 cond_resched();
980                 if (timeout >= 50000) {
981                         ql_log(ql_log_warn, vha, 0xb00d,
982                             "Timeout reached waiting for write finish.\n");
983                         return -1;
984                 }
985         }
986         return ret;
987 }
988
989 static int
990 qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
991 {
992         uint32_t val;
993         qla82xx_wait_rom_busy(ha);
994         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
995         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
996         qla82xx_wait_rom_busy(ha);
997         if (qla82xx_wait_rom_done(ha))
998                 return -1;
999         if (qla82xx_read_status_reg(ha, &val) != 0)
1000                 return -1;
1001         if ((val & 2) != 2)
1002                 return -1;
1003         return 0;
1004 }
1005
1006 static int
1007 qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
1008 {
1009         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1010         if (qla82xx_flash_set_write_enable(ha))
1011                 return -1;
1012         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
1013         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
1014         if (qla82xx_wait_rom_done(ha)) {
1015                 ql_log(ql_log_warn, vha, 0xb00e,
1016                     "Error waiting for rom done.\n");
1017                 return -1;
1018         }
1019         return qla82xx_flash_wait_write_finish(ha);
1020 }
1021
1022 static int
1023 qla82xx_write_disable_flash(struct qla_hw_data *ha)
1024 {
1025         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1026         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
1027         if (qla82xx_wait_rom_done(ha)) {
1028                 ql_log(ql_log_warn, vha, 0xb00f,
1029                     "Error waiting for rom done.\n");
1030                 return -1;
1031         }
1032         return 0;
1033 }
1034
1035 static int
1036 ql82xx_rom_lock_d(struct qla_hw_data *ha)
1037 {
1038         int loops = 0;
1039         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1040
1041         while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
1042                 udelay(100);
1043                 cond_resched();
1044                 loops++;
1045         }
1046         if (loops >= 50000) {
1047                 ql_log(ql_log_warn, vha, 0xb010,
1048                     "ROM lock failed.\n");
1049                 return -1;
1050         }
1051         return 0;;
1052 }
1053
1054 static int
1055 qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1056         uint32_t data)
1057 {
1058         int ret = 0;
1059         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1060
1061         ret = ql82xx_rom_lock_d(ha);
1062         if (ret < 0) {
1063                 ql_log(ql_log_warn, vha, 0xb011,
1064                     "ROM lock failed.\n");
1065                 return ret;
1066         }
1067
1068         if (qla82xx_flash_set_write_enable(ha))
1069                 goto done_write;
1070
1071         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
1072         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr);
1073         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
1074         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
1075         qla82xx_wait_rom_busy(ha);
1076         if (qla82xx_wait_rom_done(ha)) {
1077                 ql_log(ql_log_warn, vha, 0xb012,
1078                     "Error waiting for rom done.\n");
1079                 ret = -1;
1080                 goto done_write;
1081         }
1082
1083         ret = qla82xx_flash_wait_write_finish(ha);
1084
1085 done_write:
1086         qla82xx_rom_unlock(ha);
1087         return ret;
1088 }
1089
1090 /* This routine does CRB initialize sequence
1091  *  to put the ISP into operational state
1092  */
1093 static int
1094 qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1095 {
1096         int addr, val;
1097         int i ;
1098         struct crb_addr_pair *buf;
1099         unsigned long off;
1100         unsigned offset, n;
1101         struct qla_hw_data *ha = vha->hw;
1102
1103         struct crb_addr_pair {
1104                 long addr;
1105                 long data;
1106         };
1107
1108         /* Halt all the indiviual PEGs and other blocks of the ISP */
1109         qla82xx_rom_lock(ha);
1110
1111         /* disable all I2Q */
1112         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
1113         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
1114         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
1115         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
1116         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
1117         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
1118
1119         /* disable all niu interrupts */
1120         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
1121         /* disable xge rx/tx */
1122         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
1123         /* disable xg1 rx/tx */
1124         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
1125         /* disable sideband mac */
1126         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
1127         /* disable ap0 mac */
1128         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
1129         /* disable ap1 mac */
1130         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
1131
1132         /* halt sre */
1133         val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
1134         qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
1135
1136         /* halt epg */
1137         qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
1138
1139         /* halt timers */
1140         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
1141         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
1142         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
1143         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
1144         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1145         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
1146
1147         /* halt pegs */
1148         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
1149         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
1150         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
1151         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
1152         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1153         msleep(20);
1154
1155         /* big hammer */
1156         if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
1157                 /* don't reset CAM block on reset */
1158                 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
1159         else
1160                 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
1161
1162         /* reset ms */
1163         val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1164         val |= (1 << 1);
1165         qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1166         msleep(20);
1167
1168         /* unreset ms */
1169         val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1170         val &= ~(1 << 1);
1171         qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1172         msleep(20);
1173
1174         qla82xx_rom_unlock(ha);
1175
1176         /* Read the signature value from the flash.
1177          * Offset 0: Contain signature (0xcafecafe)
1178          * Offset 4: Offset and number of addr/value pairs
1179          * that present in CRB initialize sequence
1180          */
1181         if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1182             qla82xx_rom_fast_read(ha, 4, &n) != 0) {
1183                 ql_log(ql_log_fatal, vha, 0x006e,
1184                     "Error Reading crb_init area: n: %08x.\n", n);
1185                 return -1;
1186         }
1187
1188         /* Offset in flash = lower 16 bits
1189          * Number of enteries = upper 16 bits
1190          */
1191         offset = n & 0xffffU;
1192         n = (n >> 16) & 0xffffU;
1193
1194         /* number of addr/value pair should not exceed 1024 enteries */
1195         if (n  >= 1024) {
1196                 ql_log(ql_log_fatal, vha, 0x0071,
1197                     "Card flash not initialized:n=0x%x.\n", n);
1198                 return -1;
1199         }
1200
1201         ql_log(ql_log_info, vha, 0x0072,
1202             "%d CRB init values found in ROM.\n", n);
1203
1204         buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
1205         if (buf == NULL) {
1206                 ql_log(ql_log_fatal, vha, 0x010c,
1207                     "Unable to allocate memory.\n");
1208                 return -1;
1209         }
1210
1211         for (i = 0; i < n; i++) {
1212                 if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
1213                     qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) {
1214                         kfree(buf);
1215                         return -1;
1216                 }
1217
1218                 buf[i].addr = addr;
1219                 buf[i].data = val;
1220         }
1221
1222         for (i = 0; i < n; i++) {
1223                 /* Translate internal CRB initialization
1224                  * address to PCI bus address
1225                  */
1226                 off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) +
1227                     QLA82XX_PCI_CRBSPACE;
1228                 /* Not all CRB  addr/value pair to be written,
1229                  * some of them are skipped
1230                  */
1231
1232                 /* skipping cold reboot MAGIC */
1233                 if (off == QLA82XX_CAM_RAM(0x1fc))
1234                         continue;
1235
1236                 /* do not reset PCI */
1237                 if (off == (ROMUSB_GLB + 0xbc))
1238                         continue;
1239
1240                 /* skip core clock, so that firmware can increase the clock */
1241                 if (off == (ROMUSB_GLB + 0xc8))
1242                         continue;
1243
1244                 /* skip the function enable register */
1245                 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
1246                         continue;
1247
1248                 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
1249                         continue;
1250
1251                 if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
1252                         continue;
1253
1254                 if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
1255                         continue;
1256
1257                 if (off == ADDR_ERROR) {
1258                         ql_log(ql_log_fatal, vha, 0x0116,
1259                             "Unknow addr: 0x%08lx.\n", buf[i].addr);
1260                         continue;
1261                 }
1262
1263                 qla82xx_wr_32(ha, off, buf[i].data);
1264
1265                 /* ISP requires much bigger delay to settle down,
1266                  * else crb_window returns 0xffffffff
1267                  */
1268                 if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
1269                         msleep(1000);
1270
1271                 /* ISP requires millisec delay between
1272                  * successive CRB register updation
1273                  */
1274                 msleep(1);
1275         }
1276
1277         kfree(buf);
1278
1279         /* Resetting the data and instruction cache */
1280         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
1281         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
1282         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
1283
1284         /* Clear all protocol processing engines */
1285         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
1286         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
1287         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
1288         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
1289         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
1290         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
1291         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
1292         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
1293         return 0;
1294 }
1295
1296 static int
1297 qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1298                 u64 off, void *data, int size)
1299 {
1300         int i, j, ret = 0, loop, sz[2], off0;
1301         int scale, shift_amount, startword;
1302         uint32_t temp;
1303         uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1304
1305         /*
1306          * If not MN, go check for MS or invalid.
1307          */
1308         if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1309                 mem_crb = QLA82XX_CRB_QDR_NET;
1310         else {
1311                 mem_crb = QLA82XX_CRB_DDR_NET;
1312                 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1313                         return qla82xx_pci_mem_write_direct(ha,
1314                             off, data, size);
1315         }
1316
1317         off0 = off & 0x7;
1318         sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1319         sz[1] = size - sz[0];
1320
1321         off8 = off & 0xfffffff0;
1322         loop = (((off & 0xf) + size - 1) >> 4) + 1;
1323         shift_amount = 4;
1324         scale = 2;
1325         startword = (off & 0xf)/8;
1326
1327         for (i = 0; i < loop; i++) {
1328                 if (qla82xx_pci_mem_read_2M(ha, off8 +
1329                     (i << shift_amount), &word[i * scale], 8))
1330                         return -1;
1331         }
1332
1333         switch (size) {
1334         case 1:
1335                 tmpw = *((uint8_t *)data);
1336                 break;
1337         case 2:
1338                 tmpw = *((uint16_t *)data);
1339                 break;
1340         case 4:
1341                 tmpw = *((uint32_t *)data);
1342                 break;
1343         case 8:
1344         default:
1345                 tmpw = *((uint64_t *)data);
1346                 break;
1347         }
1348
1349         if (sz[0] == 8) {
1350                 word[startword] = tmpw;
1351         } else {
1352                 word[startword] &=
1353                         ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1354                 word[startword] |= tmpw << (off0 * 8);
1355         }
1356         if (sz[1] != 0) {
1357                 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1358                 word[startword+1] |= tmpw >> (sz[0] * 8);
1359         }
1360
1361         for (i = 0; i < loop; i++) {
1362                 temp = off8 + (i << shift_amount);
1363                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1364                 temp = 0;
1365                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1366                 temp = word[i * scale] & 0xffffffff;
1367                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1368                 temp = (word[i * scale] >> 32) & 0xffffffff;
1369                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1370                 temp = word[i*scale + 1] & 0xffffffff;
1371                 qla82xx_wr_32(ha, mem_crb +
1372                     MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
1373                 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1374                 qla82xx_wr_32(ha, mem_crb +
1375                     MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
1376
1377                 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1378                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1379                 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1380                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1381
1382                 for (j = 0; j < MAX_CTL_CHECK; j++) {
1383                         temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1384                         if ((temp & MIU_TA_CTL_BUSY) == 0)
1385                                 break;
1386                 }
1387
1388                 if (j >= MAX_CTL_CHECK) {
1389                         if (printk_ratelimit())
1390                                 dev_err(&ha->pdev->dev,
1391                                     "failed to write through agent.\n");
1392                         ret = -1;
1393                         break;
1394                 }
1395         }
1396
1397         return ret;
1398 }
1399
1400 static int
1401 qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
1402 {
1403         int  i;
1404         long size = 0;
1405         long flashaddr = ha->flt_region_bootload << 2;
1406         long memaddr = BOOTLD_START;
1407         u64 data;
1408         u32 high, low;
1409         size = (IMAGE_START - BOOTLD_START) / 8;
1410
1411         for (i = 0; i < size; i++) {
1412                 if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
1413                     (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) {
1414                         return -1;
1415                 }
1416                 data = ((u64)high << 32) | low ;
1417                 qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
1418                 flashaddr += 8;
1419                 memaddr += 8;
1420
1421                 if (i % 0x1000 == 0)
1422                         msleep(1);
1423         }
1424         udelay(100);
1425         read_lock(&ha->hw_lock);
1426         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1427         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1428         read_unlock(&ha->hw_lock);
1429         return 0;
1430 }
1431
1432 int
1433 qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1434                 u64 off, void *data, int size)
1435 {
1436         int i, j = 0, k, start, end, loop, sz[2], off0[2];
1437         int           shift_amount;
1438         uint32_t      temp;
1439         uint64_t      off8, val, mem_crb, word[2] = {0, 0};
1440
1441         /*
1442          * If not MN, go check for MS or invalid.
1443          */
1444
1445         if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1446                 mem_crb = QLA82XX_CRB_QDR_NET;
1447         else {
1448                 mem_crb = QLA82XX_CRB_DDR_NET;
1449                 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1450                         return qla82xx_pci_mem_read_direct(ha,
1451                             off, data, size);
1452         }
1453
1454         off8 = off & 0xfffffff0;
1455         off0[0] = off & 0xf;
1456         sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
1457         shift_amount = 4;
1458         loop = ((off0[0] + size - 1) >> shift_amount) + 1;
1459         off0[1] = 0;
1460         sz[1] = size - sz[0];
1461
1462         for (i = 0; i < loop; i++) {
1463                 temp = off8 + (i << shift_amount);
1464                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
1465                 temp = 0;
1466                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
1467                 temp = MIU_TA_CTL_ENABLE;
1468                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1469                 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
1470                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1471
1472                 for (j = 0; j < MAX_CTL_CHECK; j++) {
1473                         temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1474                         if ((temp & MIU_TA_CTL_BUSY) == 0)
1475                                 break;
1476                 }
1477
1478                 if (j >= MAX_CTL_CHECK) {
1479                         if (printk_ratelimit())
1480                                 dev_err(&ha->pdev->dev,
1481                                     "failed to read through agent.\n");
1482                         break;
1483                 }
1484
1485                 start = off0[i] >> 2;
1486                 end   = (off0[i] + sz[i] - 1) >> 2;
1487                 for (k = start; k <= end; k++) {
1488                         temp = qla82xx_rd_32(ha,
1489                                         mem_crb + MIU_TEST_AGT_RDDATA(k));
1490                         word[i] |= ((uint64_t)temp << (32 * (k & 1)));
1491                 }
1492         }
1493
1494         if (j >= MAX_CTL_CHECK)
1495                 return -1;
1496
1497         if ((off0[0] & 7) == 0) {
1498                 val = word[0];
1499         } else {
1500                 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
1501                         ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
1502         }
1503
1504         switch (size) {
1505         case 1:
1506                 *(uint8_t  *)data = val;
1507                 break;
1508         case 2:
1509                 *(uint16_t *)data = val;
1510                 break;
1511         case 4:
1512                 *(uint32_t *)data = val;
1513                 break;
1514         case 8:
1515                 *(uint64_t *)data = val;
1516                 break;
1517         }
1518         return 0;
1519 }
1520
1521
1522 static struct qla82xx_uri_table_desc *
1523 qla82xx_get_table_desc(const u8 *unirom, int section)
1524 {
1525         uint32_t i;
1526         struct qla82xx_uri_table_desc *directory =
1527                 (struct qla82xx_uri_table_desc *)&unirom[0];
1528         __le32 offset;
1529         __le32 tab_type;
1530         __le32 entries = cpu_to_le32(directory->num_entries);
1531
1532         for (i = 0; i < entries; i++) {
1533                 offset = cpu_to_le32(directory->findex) +
1534                     (i * cpu_to_le32(directory->entry_size));
1535                 tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8));
1536
1537                 if (tab_type == section)
1538                         return (struct qla82xx_uri_table_desc *)&unirom[offset];
1539         }
1540
1541         return NULL;
1542 }
1543
1544 static struct qla82xx_uri_data_desc *
1545 qla82xx_get_data_desc(struct qla_hw_data *ha,
1546         u32 section, u32 idx_offset)
1547 {
1548         const u8 *unirom = ha->hablob->fw->data;
1549         int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset));
1550         struct qla82xx_uri_table_desc *tab_desc = NULL;
1551         __le32 offset;
1552
1553         tab_desc = qla82xx_get_table_desc(unirom, section);
1554         if (!tab_desc)
1555                 return NULL;
1556
1557         offset = cpu_to_le32(tab_desc->findex) +
1558             (cpu_to_le32(tab_desc->entry_size) * idx);
1559
1560         return (struct qla82xx_uri_data_desc *)&unirom[offset];
1561 }
1562
1563 static u8 *
1564 qla82xx_get_bootld_offset(struct qla_hw_data *ha)
1565 {
1566         u32 offset = BOOTLD_START;
1567         struct qla82xx_uri_data_desc *uri_desc = NULL;
1568
1569         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1570                 uri_desc = qla82xx_get_data_desc(ha,
1571                     QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF);
1572                 if (uri_desc)
1573                         offset = cpu_to_le32(uri_desc->findex);
1574         }
1575
1576         return (u8 *)&ha->hablob->fw->data[offset];
1577 }
1578
1579 static __le32
1580 qla82xx_get_fw_size(struct qla_hw_data *ha)
1581 {
1582         struct qla82xx_uri_data_desc *uri_desc = NULL;
1583
1584         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1585                 uri_desc =  qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1586                     QLA82XX_URI_FIRMWARE_IDX_OFF);
1587                 if (uri_desc)
1588                         return cpu_to_le32(uri_desc->size);
1589         }
1590
1591         return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
1592 }
1593
1594 static u8 *
1595 qla82xx_get_fw_offs(struct qla_hw_data *ha)
1596 {
1597         u32 offset = IMAGE_START;
1598         struct qla82xx_uri_data_desc *uri_desc = NULL;
1599
1600         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1601                 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1602                         QLA82XX_URI_FIRMWARE_IDX_OFF);
1603                 if (uri_desc)
1604                         offset = cpu_to_le32(uri_desc->findex);
1605         }
1606
1607         return (u8 *)&ha->hablob->fw->data[offset];
1608 }
1609
1610 /* PCI related functions */
1611 char *
1612 qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
1613 {
1614         int pcie_reg;
1615         struct qla_hw_data *ha = vha->hw;
1616         char lwstr[6];
1617         uint16_t lnk;
1618
1619         pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
1620         pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
1621         ha->link_width = (lnk >> 4) & 0x3f;
1622
1623         strcpy(str, "PCIe (");
1624         strcat(str, "2.5Gb/s ");
1625         snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width);
1626         strcat(str, lwstr);
1627         return str;
1628 }
1629
1630 int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
1631 {
1632         unsigned long val = 0;
1633         u32 control;
1634
1635         switch (region) {
1636         case 0:
1637                 val = 0;
1638                 break;
1639         case 1:
1640                 pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
1641                 val = control + QLA82XX_MSIX_TBL_SPACE;
1642                 break;
1643         }
1644         return val;
1645 }
1646
1647
1648 int
1649 qla82xx_iospace_config(struct qla_hw_data *ha)
1650 {
1651         uint32_t len = 0;
1652
1653         if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
1654                 ql_log_pci(ql_log_fatal, ha->pdev, 0x000c,
1655                     "Failed to reserver selected regions.\n");
1656                 goto iospace_error_exit;
1657         }
1658
1659         /* Use MMIO operations for all accesses. */
1660         if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
1661                 ql_log_pci(ql_log_fatal, ha->pdev, 0x000d,
1662                     "Region #0 not an MMIO resource, aborting.\n");
1663                 goto iospace_error_exit;
1664         }
1665
1666         len = pci_resource_len(ha->pdev, 0);
1667         ha->nx_pcibase =
1668             (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
1669         if (!ha->nx_pcibase) {
1670                 ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
1671                     "Cannot remap pcibase MMIO, aborting.\n");
1672                 pci_release_regions(ha->pdev);
1673                 goto iospace_error_exit;
1674         }
1675
1676         /* Mapping of IO base pointer */
1677         ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
1678             0xbc000 + (ha->pdev->devfn << 11));
1679
1680         if (!ql2xdbwr) {
1681                 ha->nxdb_wr_ptr =
1682                     (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
1683                     (ha->pdev->devfn << 12)), 4);
1684                 if (!ha->nxdb_wr_ptr) {
1685                         ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
1686                             "Cannot remap MMIO, aborting.\n");
1687                         pci_release_regions(ha->pdev);
1688                         goto iospace_error_exit;
1689                 }
1690
1691                 /* Mapping of IO base pointer,
1692                  * door bell read and write pointer
1693                  */
1694                 ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
1695                     (ha->pdev->devfn * 8);
1696         } else {
1697                 ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
1698                         QLA82XX_CAMRAM_DB1 :
1699                         QLA82XX_CAMRAM_DB2);
1700         }
1701
1702         ha->max_req_queues = ha->max_rsp_queues = 1;
1703         ha->msix_count = ha->max_rsp_queues + 1;
1704         ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
1705             "nx_pci_base=%p iobase=%p "
1706             "max_req_queues=%d msix_count=%d.\n",
1707             ha->nx_pcibase, ha->iobase,
1708             ha->max_req_queues, ha->msix_count);
1709         ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
1710             "nx_pci_base=%p iobase=%p "
1711             "max_req_queues=%d msix_count=%d.\n",
1712             ha->nx_pcibase, ha->iobase,
1713             ha->max_req_queues, ha->msix_count);
1714         return 0;
1715
1716 iospace_error_exit:
1717         return -ENOMEM;
1718 }
1719
1720 /* GS related functions */
1721
1722 /* Initialization related functions */
1723
1724 /**
1725  * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers.
1726  * @ha: HA context
1727  *
1728  * Returns 0 on success.
1729 */
1730 int
1731 qla82xx_pci_config(scsi_qla_host_t *vha)
1732 {
1733         struct qla_hw_data *ha = vha->hw;
1734         int ret;
1735
1736         pci_set_master(ha->pdev);
1737         ret = pci_set_mwi(ha->pdev);
1738         ha->chip_revision = ha->pdev->revision;
1739         ql_dbg(ql_dbg_init, vha, 0x0043,
1740             "Chip revision:%ld.\n",
1741             ha->chip_revision);
1742         return 0;
1743 }
1744
1745 /**
1746  * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers.
1747  * @ha: HA context
1748  *
1749  * Returns 0 on success.
1750  */
1751 void
1752 qla82xx_reset_chip(scsi_qla_host_t *vha)
1753 {
1754         struct qla_hw_data *ha = vha->hw;
1755         ha->isp_ops->disable_intrs(ha);
1756 }
1757
1758 void qla82xx_config_rings(struct scsi_qla_host *vha)
1759 {
1760         struct qla_hw_data *ha = vha->hw;
1761         struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1762         struct init_cb_81xx *icb;
1763         struct req_que *req = ha->req_q_map[0];
1764         struct rsp_que *rsp = ha->rsp_q_map[0];
1765
1766         /* Setup ring parameters in initialization control block. */
1767         icb = (struct init_cb_81xx *)ha->init_cb;
1768         icb->request_q_outpointer = __constant_cpu_to_le16(0);
1769         icb->response_q_inpointer = __constant_cpu_to_le16(0);
1770         icb->request_q_length = cpu_to_le16(req->length);
1771         icb->response_q_length = cpu_to_le16(rsp->length);
1772         icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1773         icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1774         icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1775         icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1776
1777         WRT_REG_DWORD((unsigned long  __iomem *)&reg->req_q_out[0], 0);
1778         WRT_REG_DWORD((unsigned long  __iomem *)&reg->rsp_q_in[0], 0);
1779         WRT_REG_DWORD((unsigned long  __iomem *)&reg->rsp_q_out[0], 0);
1780 }
1781
1782 void qla82xx_reset_adapter(struct scsi_qla_host *vha)
1783 {
1784         struct qla_hw_data *ha = vha->hw;
1785         vha->flags.online = 0;
1786         qla2x00_try_to_stop_firmware(vha);
1787         ha->isp_ops->disable_intrs(ha);
1788 }
1789
1790 static int
1791 qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
1792 {
1793         u64 *ptr64;
1794         u32 i, flashaddr, size;
1795         __le64 data;
1796
1797         size = (IMAGE_START - BOOTLD_START) / 8;
1798
1799         ptr64 = (u64 *)qla82xx_get_bootld_offset(ha);
1800         flashaddr = BOOTLD_START;
1801
1802         for (i = 0; i < size; i++) {
1803                 data = cpu_to_le64(ptr64[i]);
1804                 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1805                         return -EIO;
1806                 flashaddr += 8;
1807         }
1808
1809         flashaddr = FLASH_ADDR_START;
1810         size = (__force u32)qla82xx_get_fw_size(ha) / 8;
1811         ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
1812
1813         for (i = 0; i < size; i++) {
1814                 data = cpu_to_le64(ptr64[i]);
1815
1816                 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1817                         return -EIO;
1818                 flashaddr += 8;
1819         }
1820         udelay(100);
1821
1822         /* Write a magic value to CAMRAM register
1823          * at a specified offset to indicate
1824          * that all data is written and
1825          * ready for firmware to initialize.
1826          */
1827         qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC);
1828
1829         read_lock(&ha->hw_lock);
1830         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1831         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1832         read_unlock(&ha->hw_lock);
1833         return 0;
1834 }
1835
1836 static int
1837 qla82xx_set_product_offset(struct qla_hw_data *ha)
1838 {
1839         struct qla82xx_uri_table_desc *ptab_desc = NULL;
1840         const uint8_t *unirom = ha->hablob->fw->data;
1841         uint32_t i;
1842         __le32 entries;
1843         __le32 flags, file_chiprev, offset;
1844         uint8_t chiprev = ha->chip_revision;
1845         /* Hardcoding mn_present flag for P3P */
1846         int mn_present = 0;
1847         uint32_t flagbit;
1848
1849         ptab_desc = qla82xx_get_table_desc(unirom,
1850                  QLA82XX_URI_DIR_SECT_PRODUCT_TBL);
1851        if (!ptab_desc)
1852                 return -1;
1853
1854         entries = cpu_to_le32(ptab_desc->num_entries);
1855
1856         for (i = 0; i < entries; i++) {
1857                 offset = cpu_to_le32(ptab_desc->findex) +
1858                         (i * cpu_to_le32(ptab_desc->entry_size));
1859                 flags = cpu_to_le32(*((int *)&unirom[offset] +
1860                         QLA82XX_URI_FLAGS_OFF));
1861                 file_chiprev = cpu_to_le32(*((int *)&unirom[offset] +
1862                         QLA82XX_URI_CHIP_REV_OFF));
1863
1864                 flagbit = mn_present ? 1 : 2;
1865
1866                 if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) {
1867                         ha->file_prd_off = offset;
1868                         return 0;
1869                 }
1870         }
1871         return -1;
1872 }
1873
1874 int
1875 qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
1876 {
1877         __le32 val;
1878         uint32_t min_size;
1879         struct qla_hw_data *ha = vha->hw;
1880         const struct firmware *fw = ha->hablob->fw;
1881
1882         ha->fw_type = fw_type;
1883
1884         if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1885                 if (qla82xx_set_product_offset(ha))
1886                         return -EINVAL;
1887
1888                 min_size = QLA82XX_URI_FW_MIN_SIZE;
1889         } else {
1890                 val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
1891                 if ((__force u32)val != QLA82XX_BDINFO_MAGIC)
1892                         return -EINVAL;
1893
1894                 min_size = QLA82XX_FW_MIN_SIZE;
1895         }
1896
1897         if (fw->size < min_size)
1898                 return -EINVAL;
1899         return 0;
1900 }
1901
1902 static int
1903 qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1904 {
1905         u32 val = 0;
1906         int retries = 60;
1907         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1908
1909         do {
1910                 read_lock(&ha->hw_lock);
1911                 val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE);
1912                 read_unlock(&ha->hw_lock);
1913
1914                 switch (val) {
1915                 case PHAN_INITIALIZE_COMPLETE:
1916                 case PHAN_INITIALIZE_ACK:
1917                         return QLA_SUCCESS;
1918                 case PHAN_INITIALIZE_FAILED:
1919                         break;
1920                 default:
1921                         break;
1922                 }
1923                 ql_log(ql_log_info, vha, 0x00a8,
1924                     "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n",
1925                     val, retries);
1926
1927                 msleep(500);
1928
1929         } while (--retries);
1930
1931         ql_log(ql_log_fatal, vha, 0x00a9,
1932             "Cmd Peg initialization failed: 0x%x.\n", val);
1933
1934         val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
1935         read_lock(&ha->hw_lock);
1936         qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
1937         read_unlock(&ha->hw_lock);
1938         return QLA_FUNCTION_FAILED;
1939 }
1940
1941 static int
1942 qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1943 {
1944         u32 val = 0;
1945         int retries = 60;
1946         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1947
1948         do {
1949                 read_lock(&ha->hw_lock);
1950                 val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE);
1951                 read_unlock(&ha->hw_lock);
1952
1953                 switch (val) {
1954                 case PHAN_INITIALIZE_COMPLETE:
1955                 case PHAN_INITIALIZE_ACK:
1956                         return QLA_SUCCESS;
1957                 case PHAN_INITIALIZE_FAILED:
1958                         break;
1959                 default:
1960                         break;
1961                 }
1962                 ql_log(ql_log_info, vha, 0x00ab,
1963                     "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n",
1964                     val, retries);
1965
1966                 msleep(500);
1967
1968         } while (--retries);
1969
1970         ql_log(ql_log_fatal, vha, 0x00ac,
1971             "Rcv Peg initializatin failed: 0x%x.\n", val);
1972         read_lock(&ha->hw_lock);
1973         qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
1974         read_unlock(&ha->hw_lock);
1975         return QLA_FUNCTION_FAILED;
1976 }
1977
1978 /* ISR related functions */
1979 uint32_t qla82xx_isr_int_target_mask_enable[8] = {
1980         ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1,
1981         ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3,
1982         ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5,
1983         ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7
1984 };
1985
1986 uint32_t qla82xx_isr_int_target_status[8] = {
1987         ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
1988         ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
1989         ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
1990         ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7
1991 };
1992
1993 static struct qla82xx_legacy_intr_set legacy_intr[] = \
1994         QLA82XX_LEGACY_INTR_CONFIG;
1995
1996 /*
1997  * qla82xx_mbx_completion() - Process mailbox command completions.
1998  * @ha: SCSI driver HA context
1999  * @mb0: Mailbox0 register
2000  */
2001 static void
2002 qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2003 {
2004         uint16_t        cnt;
2005         uint16_t __iomem *wptr;
2006         struct qla_hw_data *ha = vha->hw;
2007         struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2008         wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
2009
2010         /* Load return mailbox registers. */
2011         ha->flags.mbox_int = 1;
2012         ha->mailbox_out[0] = mb0;
2013
2014         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2015                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2016                 wptr++;
2017         }
2018
2019         if (ha->mcp) {
2020                 ql_dbg(ql_dbg_async, vha, 0x5052,
2021                     "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
2022         } else {
2023                 ql_dbg(ql_dbg_async, vha, 0x5053,
2024                     "MBX pointer ERROR.\n");
2025         }
2026 }
2027
2028 /*
2029  * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
2030  * @irq:
2031  * @dev_id: SCSI driver HA context
2032  * @regs:
2033  *
2034  * Called by system whenever the host adapter generates an interrupt.
2035  *
2036  * Returns handled flag.
2037  */
2038 irqreturn_t
2039 qla82xx_intr_handler(int irq, void *dev_id)
2040 {
2041         scsi_qla_host_t *vha;
2042         struct qla_hw_data *ha;
2043         struct rsp_que *rsp;
2044         struct device_reg_82xx __iomem *reg;
2045         int status = 0, status1 = 0;
2046         unsigned long   flags;
2047         unsigned long   iter;
2048         uint32_t        stat = 0;
2049         uint16_t        mb[4];
2050
2051         rsp = (struct rsp_que *) dev_id;
2052         if (!rsp) {
2053                 printk(KERN_INFO
2054                         "%s(): NULL response queue pointer.\n", __func__);
2055                 return IRQ_NONE;
2056         }
2057         ha = rsp->hw;
2058
2059         if (!ha->flags.msi_enabled) {
2060                 status = qla82xx_rd_32(ha, ISR_INT_VECTOR);
2061                 if (!(status & ha->nx_legacy_intr.int_vec_bit))
2062                         return IRQ_NONE;
2063
2064                 status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG);
2065                 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1))
2066                         return IRQ_NONE;
2067         }
2068
2069         /* clear the interrupt */
2070         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
2071
2072         /* read twice to ensure write is flushed */
2073         qla82xx_rd_32(ha, ISR_INT_VECTOR);
2074         qla82xx_rd_32(ha, ISR_INT_VECTOR);
2075
2076         reg = &ha->iobase->isp82;
2077
2078         spin_lock_irqsave(&ha->hardware_lock, flags);
2079         vha = pci_get_drvdata(ha->pdev);
2080         for (iter = 1; iter--; ) {
2081
2082                 if (RD_REG_DWORD(&reg->host_int)) {
2083                         stat = RD_REG_DWORD(&reg->host_status);
2084
2085                         switch (stat & 0xff) {
2086                         case 0x1:
2087                         case 0x2:
2088                         case 0x10:
2089                         case 0x11:
2090                                 qla82xx_mbx_completion(vha, MSW(stat));
2091                                 status |= MBX_INTERRUPT;
2092                                 break;
2093                         case 0x12:
2094                                 mb[0] = MSW(stat);
2095                                 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2096                                 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2097                                 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2098                                 qla2x00_async_event(vha, rsp, mb);
2099                                 break;
2100                         case 0x13:
2101                                 qla24xx_process_response_queue(vha, rsp);
2102                                 break;
2103                         default:
2104                                 ql_dbg(ql_dbg_async, vha, 0x5054,
2105                                     "Unrecognized interrupt type (%d).\n",
2106                                     stat & 0xff);
2107                                 break;
2108                         }
2109                 }
2110                 WRT_REG_DWORD(&reg->host_int, 0);
2111         }
2112         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2113         if (!ha->flags.msi_enabled)
2114                 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2115
2116 #ifdef QL_DEBUG_LEVEL_17
2117         if (!irq && ha->flags.eeh_busy)
2118                 ql_log(ql_log_warn, vha, 0x503d,
2119                     "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2120                     status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2121 #endif
2122
2123         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2124             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2125                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2126                 complete(&ha->mbx_intr_comp);
2127         }
2128         return IRQ_HANDLED;
2129 }
2130
2131 irqreturn_t
2132 qla82xx_msix_default(int irq, void *dev_id)
2133 {
2134         scsi_qla_host_t *vha;
2135         struct qla_hw_data *ha;
2136         struct rsp_que *rsp;
2137         struct device_reg_82xx __iomem *reg;
2138         int status = 0;
2139         unsigned long flags;
2140         uint32_t stat = 0;
2141         uint16_t mb[4];
2142
2143         rsp = (struct rsp_que *) dev_id;
2144         if (!rsp) {
2145                 printk(KERN_INFO
2146                         "%s(): NULL response queue pointer.\n", __func__);
2147                 return IRQ_NONE;
2148         }
2149         ha = rsp->hw;
2150
2151         reg = &ha->iobase->isp82;
2152
2153         spin_lock_irqsave(&ha->hardware_lock, flags);
2154         vha = pci_get_drvdata(ha->pdev);
2155         do {
2156                 if (RD_REG_DWORD(&reg->host_int)) {
2157                         stat = RD_REG_DWORD(&reg->host_status);
2158
2159                         switch (stat & 0xff) {
2160                         case 0x1:
2161                         case 0x2:
2162                         case 0x10:
2163                         case 0x11:
2164                                 qla82xx_mbx_completion(vha, MSW(stat));
2165                                 status |= MBX_INTERRUPT;
2166                                 break;
2167                         case 0x12:
2168                                 mb[0] = MSW(stat);
2169                                 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2170                                 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2171                                 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2172                                 qla2x00_async_event(vha, rsp, mb);
2173                                 break;
2174                         case 0x13:
2175                                 qla24xx_process_response_queue(vha, rsp);
2176                                 break;
2177                         default:
2178                                 ql_dbg(ql_dbg_async, vha, 0x5041,
2179                                     "Unrecognized interrupt type (%d).\n",
2180                                     stat & 0xff);
2181                                 break;
2182                         }
2183                 }
2184                 WRT_REG_DWORD(&reg->host_int, 0);
2185         } while (0);
2186
2187         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2188
2189 #ifdef QL_DEBUG_LEVEL_17
2190         if (!irq && ha->flags.eeh_busy)
2191                 ql_log(ql_log_warn, vha, 0x5044,
2192                     "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2193                     status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2194 #endif
2195
2196         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2197                 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2198                         set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2199                         complete(&ha->mbx_intr_comp);
2200         }
2201         return IRQ_HANDLED;
2202 }
2203
2204 irqreturn_t
2205 qla82xx_msix_rsp_q(int irq, void *dev_id)
2206 {
2207         scsi_qla_host_t *vha;
2208         struct qla_hw_data *ha;
2209         struct rsp_que *rsp;
2210         struct device_reg_82xx __iomem *reg;
2211
2212         rsp = (struct rsp_que *) dev_id;
2213         if (!rsp) {
2214                 printk(KERN_INFO
2215                         "%s(): NULL response queue pointer.\n", __func__);
2216                 return IRQ_NONE;
2217         }
2218
2219         ha = rsp->hw;
2220         reg = &ha->iobase->isp82;
2221         spin_lock_irq(&ha->hardware_lock);
2222         vha = pci_get_drvdata(ha->pdev);
2223         qla24xx_process_response_queue(vha, rsp);
2224         WRT_REG_DWORD(&reg->host_int, 0);
2225         spin_unlock_irq(&ha->hardware_lock);
2226         return IRQ_HANDLED;
2227 }
2228
2229 void
2230 qla82xx_poll(int irq, void *dev_id)
2231 {
2232         scsi_qla_host_t *vha;
2233         struct qla_hw_data *ha;
2234         struct rsp_que *rsp;
2235         struct device_reg_82xx __iomem *reg;
2236         int status = 0;
2237         uint32_t stat;
2238         uint16_t mb[4];
2239         unsigned long flags;
2240
2241         rsp = (struct rsp_que *) dev_id;
2242         if (!rsp) {
2243                 printk(KERN_INFO
2244                         "%s(): NULL response queue pointer.\n", __func__);
2245                 return;
2246         }
2247         ha = rsp->hw;
2248
2249         reg = &ha->iobase->isp82;
2250         spin_lock_irqsave(&ha->hardware_lock, flags);
2251         vha = pci_get_drvdata(ha->pdev);
2252
2253         if (RD_REG_DWORD(&reg->host_int)) {
2254                 stat = RD_REG_DWORD(&reg->host_status);
2255                 switch (stat & 0xff) {
2256                 case 0x1:
2257                 case 0x2:
2258                 case 0x10:
2259                 case 0x11:
2260                         qla82xx_mbx_completion(vha, MSW(stat));
2261                         status |= MBX_INTERRUPT;
2262                         break;
2263                 case 0x12:
2264                         mb[0] = MSW(stat);
2265                         mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2266                         mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2267                         mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2268                         qla2x00_async_event(vha, rsp, mb);
2269                         break;
2270                 case 0x13:
2271                         qla24xx_process_response_queue(vha, rsp);
2272                         break;
2273                 default:
2274                         ql_dbg(ql_dbg_p3p, vha, 0xb013,
2275                             "Unrecognized interrupt type (%d).\n",
2276                             stat * 0xff);
2277                         break;
2278                 }
2279         }
2280         WRT_REG_DWORD(&reg->host_int, 0);
2281         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2282 }
2283
2284 void
2285 qla82xx_enable_intrs(struct qla_hw_data *ha)
2286 {
2287         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2288         qla82xx_mbx_intr_enable(vha);
2289         spin_lock_irq(&ha->hardware_lock);
2290         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2291         spin_unlock_irq(&ha->hardware_lock);
2292         ha->interrupts_on = 1;
2293 }
2294
2295 void
2296 qla82xx_disable_intrs(struct qla_hw_data *ha)
2297 {
2298         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2299         qla82xx_mbx_intr_disable(vha);
2300         spin_lock_irq(&ha->hardware_lock);
2301         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
2302         spin_unlock_irq(&ha->hardware_lock);
2303         ha->interrupts_on = 0;
2304 }
2305
2306 void qla82xx_init_flags(struct qla_hw_data *ha)
2307 {
2308         struct qla82xx_legacy_intr_set *nx_legacy_intr;
2309
2310         /* ISP 8021 initializations */
2311         rwlock_init(&ha->hw_lock);
2312         ha->qdr_sn_window = -1;
2313         ha->ddr_mn_window = -1;
2314         ha->curr_window = 255;
2315         ha->portnum = PCI_FUNC(ha->pdev->devfn);
2316         nx_legacy_intr = &legacy_intr[ha->portnum];
2317         ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
2318         ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
2319         ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
2320         ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
2321 }
2322
2323 inline void
2324 qla82xx_set_drv_active(scsi_qla_host_t *vha)
2325 {
2326         uint32_t drv_active;
2327         struct qla_hw_data *ha = vha->hw;
2328
2329         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2330
2331         /* If reset value is all FF's, initialize DRV_ACTIVE */
2332         if (drv_active == 0xffffffff) {
2333                 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE,
2334                         QLA82XX_DRV_NOT_ACTIVE);
2335                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2336         }
2337         drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2338         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2339 }
2340
2341 inline void
2342 qla82xx_clear_drv_active(struct qla_hw_data *ha)
2343 {
2344         uint32_t drv_active;
2345
2346         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2347         drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2348         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2349 }
2350
2351 static inline int
2352 qla82xx_need_reset(struct qla_hw_data *ha)
2353 {
2354         uint32_t drv_state;
2355         int rval;
2356
2357         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2358         rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2359         return rval;
2360 }
2361
2362 static inline void
2363 qla82xx_set_rst_ready(struct qla_hw_data *ha)
2364 {
2365         uint32_t drv_state;
2366         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2367
2368         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2369
2370         /* If reset value is all FF's, initialize DRV_STATE */
2371         if (drv_state == 0xffffffff) {
2372                 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY);
2373                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2374         }
2375         drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2376         ql_log(ql_log_info, vha, 0x00bb,
2377             "drv_state = 0x%x.\n", drv_state);
2378         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2379 }
2380
2381 static inline void
2382 qla82xx_clear_rst_ready(struct qla_hw_data *ha)
2383 {
2384         uint32_t drv_state;
2385
2386         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2387         drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2388         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2389 }
2390
2391 static inline void
2392 qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
2393 {
2394         uint32_t qsnt_state;
2395
2396         qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2397         qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2398         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2399 }
2400
2401 void
2402 qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha)
2403 {
2404         struct qla_hw_data *ha = vha->hw;
2405         uint32_t qsnt_state;
2406
2407         qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2408         qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2409         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2410 }
2411
2412 static int
2413 qla82xx_load_fw(scsi_qla_host_t *vha)
2414 {
2415         int rst;
2416         struct fw_blob *blob;
2417         struct qla_hw_data *ha = vha->hw;
2418
2419         if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
2420                 ql_log(ql_log_fatal, vha, 0x009f,
2421                     "Error during CRB initialization.\n");
2422                 return QLA_FUNCTION_FAILED;
2423         }
2424         udelay(500);
2425
2426         /* Bring QM and CAMRAM out of reset */
2427         rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
2428         rst &= ~((1 << 28) | (1 << 24));
2429         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
2430
2431         /*
2432          * FW Load priority:
2433          * 1) Operational firmware residing in flash.
2434          * 2) Firmware via request-firmware interface (.bin file).
2435          */
2436         if (ql2xfwloadbin == 2)
2437                 goto try_blob_fw;
2438
2439         ql_log(ql_log_info, vha, 0x00a0,
2440             "Attempting to load firmware from flash.\n");
2441
2442         if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2443                 ql_log(ql_log_info, vha, 0x00a1,
2444                     "Firmware loaded successully from flash.\n");
2445                 return QLA_SUCCESS;
2446         } else {
2447                 ql_log(ql_log_warn, vha, 0x0108,
2448                     "Firmware load from flash failed.\n");
2449         }
2450
2451 try_blob_fw:
2452         ql_log(ql_log_info, vha, 0x00a2,
2453             "Attempting to load firmware from blob.\n");
2454
2455         /* Load firmware blob. */
2456         blob = ha->hablob = qla2x00_request_firmware(vha);
2457         if (!blob) {
2458                 ql_log(ql_log_fatal, vha, 0x00a3,
2459                     "Firmware image not preset.\n");
2460                 goto fw_load_failed;
2461         }
2462
2463         /* Validating firmware blob */
2464         if (qla82xx_validate_firmware_blob(vha,
2465                 QLA82XX_FLASH_ROMIMAGE)) {
2466                 /* Fallback to URI format */
2467                 if (qla82xx_validate_firmware_blob(vha,
2468                         QLA82XX_UNIFIED_ROMIMAGE)) {
2469                         ql_log(ql_log_fatal, vha, 0x00a4,
2470                             "No valid firmware image found.\n");
2471                         return QLA_FUNCTION_FAILED;
2472                 }
2473         }
2474
2475         if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
2476                 ql_log(ql_log_info, vha, 0x00a5,
2477                     "Firmware loaded successfully from binary blob.\n");
2478                 return QLA_SUCCESS;
2479         } else {
2480                 ql_log(ql_log_fatal, vha, 0x00a6,
2481                     "Firmware load failed for binary blob.\n");
2482                 blob->fw = NULL;
2483                 blob = NULL;
2484                 goto fw_load_failed;
2485         }
2486         return QLA_SUCCESS;
2487
2488 fw_load_failed:
2489         return QLA_FUNCTION_FAILED;
2490 }
2491
2492 int
2493 qla82xx_start_firmware(scsi_qla_host_t *vha)
2494 {
2495         int           pcie_cap;
2496         uint16_t      lnk;
2497         struct qla_hw_data *ha = vha->hw;
2498
2499         /* scrub dma mask expansion register */
2500         qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE);
2501
2502         /* Put both the PEG CMD and RCV PEG to default state
2503          * of 0 before resetting the hardware
2504          */
2505         qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
2506         qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
2507
2508         /* Overwrite stale initialization register values */
2509         qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
2510         qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
2511
2512         if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
2513                 ql_log(ql_log_fatal, vha, 0x00a7,
2514                     "Error trying to start fw.\n");
2515                 return QLA_FUNCTION_FAILED;
2516         }
2517
2518         /* Handshake with the card before we register the devices. */
2519         if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
2520                 ql_log(ql_log_fatal, vha, 0x00aa,
2521                     "Error during card handshake.\n");
2522                 return QLA_FUNCTION_FAILED;
2523         }
2524
2525         /* Negotiated Link width */
2526         pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
2527         pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
2528         ha->link_width = (lnk >> 4) & 0x3f;
2529
2530         /* Synchronize with Receive peg */
2531         return qla82xx_check_rcvpeg_state(ha);
2532 }
2533
2534 static inline int
2535 qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
2536         uint16_t tot_dsds)
2537 {
2538         uint32_t *cur_dsd = NULL;
2539         scsi_qla_host_t *vha;
2540         struct qla_hw_data *ha;
2541         struct scsi_cmnd *cmd;
2542         struct  scatterlist *cur_seg;
2543         uint32_t *dsd_seg;
2544         void *next_dsd;
2545         uint8_t avail_dsds;
2546         uint8_t first_iocb = 1;
2547         uint32_t dsd_list_len;
2548         struct dsd_dma *dsd_ptr;
2549         struct ct6_dsd *ctx;
2550
2551         cmd = sp->cmd;
2552
2553         /* Update entry type to indicate Command Type 3 IOCB */
2554         *((uint32_t *)(&cmd_pkt->entry_type)) =
2555                 __constant_cpu_to_le32(COMMAND_TYPE_6);
2556
2557         /* No data transfer */
2558         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
2559                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
2560                 return 0;
2561         }
2562
2563         vha = sp->fcport->vha;
2564         ha = vha->hw;
2565
2566         /* Set transfer direction */
2567         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2568                 cmd_pkt->control_flags =
2569                     __constant_cpu_to_le16(CF_WRITE_DATA);
2570                 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
2571         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
2572                 cmd_pkt->control_flags =
2573                     __constant_cpu_to_le16(CF_READ_DATA);
2574                 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
2575         }
2576
2577         cur_seg = scsi_sglist(cmd);
2578         ctx = sp->ctx;
2579
2580         while (tot_dsds) {
2581                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
2582                     QLA_DSDS_PER_IOCB : tot_dsds;
2583                 tot_dsds -= avail_dsds;
2584                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
2585
2586                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
2587                     struct dsd_dma, list);
2588                 next_dsd = dsd_ptr->dsd_addr;
2589                 list_del(&dsd_ptr->list);
2590                 ha->gbl_dsd_avail--;
2591                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
2592                 ctx->dsd_use_cnt++;
2593                 ha->gbl_dsd_inuse++;
2594
2595                 if (first_iocb) {
2596                         first_iocb = 0;
2597                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2598                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2599                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2600                         *dsd_seg++ = cpu_to_le32(dsd_list_len);
2601                 } else {
2602                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2603                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2604                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
2605                 }
2606                 cur_dsd = (uint32_t *)next_dsd;
2607                 while (avail_dsds) {
2608                         dma_addr_t      sle_dma;
2609
2610                         sle_dma = sg_dma_address(cur_seg);
2611                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2612                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2613                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
2614                         cur_seg = sg_next(cur_seg);
2615                         avail_dsds--;
2616                 }
2617         }
2618
2619         /* Null termination */
2620         *cur_dsd++ =  0;
2621         *cur_dsd++ = 0;
2622         *cur_dsd++ = 0;
2623         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
2624         return 0;
2625 }
2626
2627 /*
2628  * qla82xx_calc_dsd_lists() - Determine number of DSD list required
2629  * for Command Type 6.
2630  *
2631  * @dsds: number of data segment decriptors needed
2632  *
2633  * Returns the number of dsd list needed to store @dsds.
2634  */
2635 inline uint16_t
2636 qla82xx_calc_dsd_lists(uint16_t dsds)
2637 {
2638         uint16_t dsd_lists = 0;
2639
2640         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
2641         if (dsds % QLA_DSDS_PER_IOCB)
2642                 dsd_lists++;
2643         return dsd_lists;
2644 }
2645
2646 /*
2647  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2648  * @sp: command to send to the ISP
2649  *
2650  * Returns non-zero if a failure occurred, else zero.
2651  */
2652 int
2653 qla82xx_start_scsi(srb_t *sp)
2654 {
2655         int             ret, nseg;
2656         unsigned long   flags;
2657         struct scsi_cmnd *cmd;
2658         uint32_t        *clr_ptr;
2659         uint32_t        index;
2660         uint32_t        handle;
2661         uint16_t        cnt;
2662         uint16_t        req_cnt;
2663         uint16_t        tot_dsds;
2664         struct device_reg_82xx __iomem *reg;
2665         uint32_t dbval;
2666         uint32_t *fcp_dl;
2667         uint8_t additional_cdb_len;
2668         struct ct6_dsd *ctx;
2669         struct scsi_qla_host *vha = sp->fcport->vha;
2670         struct qla_hw_data *ha = vha->hw;
2671         struct req_que *req = NULL;
2672         struct rsp_que *rsp = NULL;
2673         char            tag[2];
2674
2675         /* Setup device pointers. */
2676         ret = 0;
2677         reg = &ha->iobase->isp82;
2678         cmd = sp->cmd;
2679         req = vha->req;
2680         rsp = ha->rsp_q_map[0];
2681
2682         /* So we know we haven't pci_map'ed anything yet */
2683         tot_dsds = 0;
2684
2685         dbval = 0x04 | (ha->portnum << 5);
2686
2687         /* Send marker if required */
2688         if (vha->marker_needed != 0) {
2689                 if (qla2x00_marker(vha, req,
2690                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2691                         ql_log(ql_log_warn, vha, 0x300c,
2692                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2693                         return QLA_FUNCTION_FAILED;
2694                 }
2695                 vha->marker_needed = 0;
2696         }
2697
2698         /* Acquire ring specific lock */
2699         spin_lock_irqsave(&ha->hardware_lock, flags);
2700
2701         /* Check for room in outstanding command list. */
2702         handle = req->current_outstanding_cmd;
2703         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2704                 handle++;
2705                 if (handle == MAX_OUTSTANDING_COMMANDS)
2706                         handle = 1;
2707                 if (!req->outstanding_cmds[handle])
2708                         break;
2709         }
2710         if (index == MAX_OUTSTANDING_COMMANDS)
2711                 goto queuing_error;
2712
2713         /* Map the sg table so we have an accurate count of sg entries needed */
2714         if (scsi_sg_count(cmd)) {
2715                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2716                     scsi_sg_count(cmd), cmd->sc_data_direction);
2717                 if (unlikely(!nseg))
2718                         goto queuing_error;
2719         } else
2720                 nseg = 0;
2721
2722         tot_dsds = nseg;
2723
2724         if (tot_dsds > ql2xshiftctondsd) {
2725                 struct cmd_type_6 *cmd_pkt;
2726                 uint16_t more_dsd_lists = 0;
2727                 struct dsd_dma *dsd_ptr;
2728                 uint16_t i;
2729
2730                 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
2731                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2732                         ql_dbg(ql_dbg_io, vha, 0x300d,
2733                             "Num of DSD list %d is than %d for cmd=%p.\n",
2734                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2735                             cmd);
2736                         goto queuing_error;
2737                 }
2738
2739                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2740                         goto sufficient_dsds;
2741                 else
2742                         more_dsd_lists -= ha->gbl_dsd_avail;
2743
2744                 for (i = 0; i < more_dsd_lists; i++) {
2745                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2746                         if (!dsd_ptr) {
2747                                 ql_log(ql_log_fatal, vha, 0x300e,
2748                                     "Failed to allocate memory for dsd_dma "
2749                                     "for cmd=%p.\n", cmd);
2750                                 goto queuing_error;
2751                         }
2752
2753                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2754                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2755                         if (!dsd_ptr->dsd_addr) {
2756                                 kfree(dsd_ptr);
2757                                 ql_log(ql_log_fatal, vha, 0x300f,
2758                                     "Failed to allocate memory for dsd_addr "
2759                                     "for cmd=%p.\n", cmd);
2760                                 goto queuing_error;
2761                         }
2762                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2763                         ha->gbl_dsd_avail++;
2764                 }
2765
2766 sufficient_dsds:
2767                 req_cnt = 1;
2768
2769                 if (req->cnt < (req_cnt + 2)) {
2770                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2771                                 &reg->req_q_out[0]);
2772                         if (req->ring_index < cnt)
2773                                 req->cnt = cnt - req->ring_index;
2774                         else
2775                                 req->cnt = req->length -
2776                                         (req->ring_index - cnt);
2777                 }
2778
2779                 if (req->cnt < (req_cnt + 2))
2780                         goto queuing_error;
2781
2782                 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2783                 if (!sp->ctx) {
2784                         ql_log(ql_log_fatal, vha, 0x3010,
2785                             "Failed to allocate ctx for cmd=%p.\n", cmd);
2786                         goto queuing_error;
2787                 }
2788                 memset(ctx, 0, sizeof(struct ct6_dsd));
2789                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2790                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2791                 if (!ctx->fcp_cmnd) {
2792                         ql_log(ql_log_fatal, vha, 0x3011,
2793                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2794                         goto queuing_error_fcp_cmnd;
2795                 }
2796
2797                 /* Initialize the DSD list and dma handle */
2798                 INIT_LIST_HEAD(&ctx->dsd_list);
2799                 ctx->dsd_use_cnt = 0;
2800
2801                 if (cmd->cmd_len > 16) {
2802                         additional_cdb_len = cmd->cmd_len - 16;
2803                         if ((cmd->cmd_len % 4) != 0) {
2804                                 /* SCSI command bigger than 16 bytes must be
2805                                  * multiple of 4
2806                                  */
2807                                 ql_log(ql_log_warn, vha, 0x3012,
2808                                     "scsi cmd len %d not multiple of 4 "
2809                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
2810                                 goto queuing_error_fcp_cmnd;
2811                         }
2812                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2813                 } else {
2814                         additional_cdb_len = 0;
2815                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2816                 }
2817
2818                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2819                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2820
2821                 /* Zero out remaining portion of packet. */
2822                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2823                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2824                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2825                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2826
2827                 /* Set NPORT-ID and LUN number*/
2828                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2829                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2830                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2831                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2832                 cmd_pkt->vp_index = sp->fcport->vp_idx;
2833
2834                 /* Build IOCB segments */
2835                 if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2836                         goto queuing_error_fcp_cmnd;
2837
2838                 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2839                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2840
2841                 /*
2842                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2843                  */
2844                 if (scsi_populate_tag_msg(cmd, tag)) {
2845                         switch (tag[0]) {
2846                         case HEAD_OF_QUEUE_TAG:
2847                                 ctx->fcp_cmnd->task_attribute =
2848                                     TSK_HEAD_OF_QUEUE;
2849                                 break;
2850                         case ORDERED_QUEUE_TAG:
2851                                 ctx->fcp_cmnd->task_attribute =
2852                                     TSK_ORDERED;
2853                                 break;
2854                         }
2855                 }
2856
2857                 /* build FCP_CMND IU */
2858                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2859                 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2860                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2861
2862                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2863                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2864                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2865                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2866
2867                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2868
2869                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2870                     additional_cdb_len);
2871                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2872
2873                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2874                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2875                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2876                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2877                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2878
2879                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2880                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2881                 /* Set total data segment count. */
2882                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2883                 /* Specify response queue number where
2884                  * completion should happen
2885                  */
2886                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2887         } else {
2888                 struct cmd_type_7 *cmd_pkt;
2889                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2890                 if (req->cnt < (req_cnt + 2)) {
2891                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2892                             &reg->req_q_out[0]);
2893                         if (req->ring_index < cnt)
2894                                 req->cnt = cnt - req->ring_index;
2895                         else
2896                                 req->cnt = req->length -
2897                                         (req->ring_index - cnt);
2898                 }
2899                 if (req->cnt < (req_cnt + 2))
2900                         goto queuing_error;
2901
2902                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2903                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2904
2905                 /* Zero out remaining portion of packet. */
2906                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2907                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2908                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2909                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2910
2911                 /* Set NPORT-ID and LUN number*/
2912                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2913                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2914                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2915                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2916                 cmd_pkt->vp_index = sp->fcport->vp_idx;
2917
2918                 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2919                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2920                         sizeof(cmd_pkt->lun));
2921
2922                 /*
2923                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2924                  */
2925                 if (scsi_populate_tag_msg(cmd, tag)) {
2926                         switch (tag[0]) {
2927                         case HEAD_OF_QUEUE_TAG:
2928                                 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2929                                 break;
2930                         case ORDERED_QUEUE_TAG:
2931                                 cmd_pkt->task = TSK_ORDERED;
2932                                 break;
2933                         }
2934                 }
2935
2936                 /* Load SCSI command packet. */
2937                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2938                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2939
2940                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2941
2942                 /* Build IOCB segments */
2943                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2944
2945                 /* Set total data segment count. */
2946                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2947                 /* Specify response queue number where
2948                  * completion should happen.
2949                  */
2950                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2951
2952         }
2953         /* Build command packet. */
2954         req->current_outstanding_cmd = handle;
2955         req->outstanding_cmds[handle] = sp;
2956         sp->handle = handle;
2957         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2958         req->cnt -= req_cnt;
2959         wmb();
2960
2961         /* Adjust ring index. */
2962         req->ring_index++;
2963         if (req->ring_index == req->length) {
2964                 req->ring_index = 0;
2965                 req->ring_ptr = req->ring;
2966         } else
2967                 req->ring_ptr++;
2968
2969         sp->flags |= SRB_DMA_VALID;
2970
2971         /* Set chip new ring index. */
2972         /* write, read and verify logic */
2973         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2974         if (ql2xdbwr)
2975                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2976         else {
2977                 WRT_REG_DWORD(
2978                         (unsigned long __iomem *)ha->nxdb_wr_ptr,
2979                         dbval);
2980                 wmb();
2981                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2982                         WRT_REG_DWORD(
2983                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2984                                 dbval);
2985                         wmb();
2986                 }
2987         }
2988
2989         /* Manage unprocessed RIO/ZIO commands in response queue. */
2990         if (vha->flags.process_response_queue &&
2991             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2992                 qla24xx_process_response_queue(vha, rsp);
2993
2994         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2995         return QLA_SUCCESS;
2996
2997 queuing_error_fcp_cmnd:
2998         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2999 queuing_error:
3000         if (tot_dsds)
3001                 scsi_dma_unmap(cmd);
3002
3003         if (sp->ctx) {
3004                 mempool_free(sp->ctx, ha->ctx_mempool);
3005                 sp->ctx = NULL;
3006         }
3007         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3008
3009         return QLA_FUNCTION_FAILED;
3010 }
3011
3012 static uint32_t *
3013 qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
3014         uint32_t length)
3015 {
3016         uint32_t i;
3017         uint32_t val;
3018         struct qla_hw_data *ha = vha->hw;
3019
3020         /* Dword reads to flash. */
3021         for (i = 0; i < length/4; i++, faddr += 4) {
3022                 if (qla82xx_rom_fast_read(ha, faddr, &val)) {
3023                         ql_log(ql_log_warn, vha, 0x0106,
3024                             "Do ROM fast read failed.\n");
3025                         goto done_read;
3026                 }
3027                 dwptr[i] = __constant_cpu_to_le32(val);
3028         }
3029 done_read:
3030         return dwptr;
3031 }
3032
3033 static int
3034 qla82xx_unprotect_flash(struct qla_hw_data *ha)
3035 {
3036         int ret;
3037         uint32_t val;
3038         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3039
3040         ret = ql82xx_rom_lock_d(ha);
3041         if (ret < 0) {
3042                 ql_log(ql_log_warn, vha, 0xb014,
3043                     "ROM Lock failed.\n");
3044                 return ret;
3045         }
3046
3047         ret = qla82xx_read_status_reg(ha, &val);
3048         if (ret < 0)
3049                 goto done_unprotect;
3050
3051         val &= ~(BLOCK_PROTECT_BITS << 2);
3052         ret = qla82xx_write_status_reg(ha, val);
3053         if (ret < 0) {
3054                 val |= (BLOCK_PROTECT_BITS << 2);
3055                 qla82xx_write_status_reg(ha, val);
3056         }
3057
3058         if (qla82xx_write_disable_flash(ha) != 0)
3059                 ql_log(ql_log_warn, vha, 0xb015,
3060                     "Write disable failed.\n");
3061
3062 done_unprotect:
3063         qla82xx_rom_unlock(ha);
3064         return ret;
3065 }
3066
3067 static int
3068 qla82xx_protect_flash(struct qla_hw_data *ha)
3069 {
3070         int ret;
3071         uint32_t val;
3072         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3073
3074         ret = ql82xx_rom_lock_d(ha);
3075         if (ret < 0) {
3076                 ql_log(ql_log_warn, vha, 0xb016,
3077                     "ROM Lock failed.\n");
3078                 return ret;
3079         }
3080
3081         ret = qla82xx_read_status_reg(ha, &val);
3082         if (ret < 0)
3083                 goto done_protect;
3084
3085         val |= (BLOCK_PROTECT_BITS << 2);
3086         /* LOCK all sectors */
3087         ret = qla82xx_write_status_reg(ha, val);
3088         if (ret < 0)
3089                 ql_log(ql_log_warn, vha, 0xb017,
3090                     "Write status register failed.\n");
3091
3092         if (qla82xx_write_disable_flash(ha) != 0)
3093                 ql_log(ql_log_warn, vha, 0xb018,
3094                     "Write disable failed.\n");
3095 done_protect:
3096         qla82xx_rom_unlock(ha);
3097         return ret;
3098 }
3099
3100 static int
3101 qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
3102 {
3103         int ret = 0;
3104         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3105
3106         ret = ql82xx_rom_lock_d(ha);
3107         if (ret < 0) {
3108                 ql_log(ql_log_warn, vha, 0xb019,
3109                     "ROM Lock failed.\n");
3110                 return ret;
3111         }
3112
3113         qla82xx_flash_set_write_enable(ha);
3114         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
3115         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
3116         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
3117
3118         if (qla82xx_wait_rom_done(ha)) {
3119                 ql_log(ql_log_warn, vha, 0xb01a,
3120                     "Error waiting for rom done.\n");
3121                 ret = -1;
3122                 goto done;
3123         }
3124         ret = qla82xx_flash_wait_write_finish(ha);
3125 done:
3126         qla82xx_rom_unlock(ha);
3127         return ret;
3128 }
3129
3130 /*
3131  * Address and length are byte address
3132  */
3133 uint8_t *
3134 qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3135         uint32_t offset, uint32_t length)
3136 {
3137         scsi_block_requests(vha->host);
3138         qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
3139         scsi_unblock_requests(vha->host);
3140         return buf;
3141 }
3142
3143 static int
3144 qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3145         uint32_t faddr, uint32_t dwords)
3146 {
3147         int ret;
3148         uint32_t liter;
3149         uint32_t sec_mask, rest_addr;
3150         dma_addr_t optrom_dma;
3151         void *optrom = NULL;
3152         int page_mode = 0;
3153         struct qla_hw_data *ha = vha->hw;
3154
3155         ret = -1;
3156
3157         /* Prepare burst-capable write on supported ISPs. */
3158         if (page_mode && !(faddr & 0xfff) &&
3159             dwords > OPTROM_BURST_DWORDS) {
3160                 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
3161                     &optrom_dma, GFP_KERNEL);
3162                 if (!optrom) {
3163                         ql_log(ql_log_warn, vha, 0xb01b,
3164                             "Unable to allocate memory "
3165                             "for optron burst write (%x KB).\n",
3166                             OPTROM_BURST_SIZE / 1024);
3167                 }
3168         }
3169
3170         rest_addr = ha->fdt_block_size - 1;
3171         sec_mask = ~rest_addr;
3172
3173         ret = qla82xx_unprotect_flash(ha);
3174         if (ret) {
3175                 ql_log(ql_log_warn, vha, 0xb01c,
3176                     "Unable to unprotect flash for update.\n");
3177                 goto write_done;
3178         }
3179
3180         for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
3181                 /* Are we at the beginning of a sector? */
3182                 if ((faddr & rest_addr) == 0) {
3183
3184                         ret = qla82xx_erase_sector(ha, faddr);
3185                         if (ret) {
3186                                 ql_log(ql_log_warn, vha, 0xb01d,
3187                                     "Unable to erase sector: address=%x.\n",
3188                                     faddr);
3189                                 break;
3190                         }
3191                 }
3192
3193                 /* Go with burst-write. */
3194                 if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
3195                         /* Copy data to DMA'ble buffer. */
3196                         memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
3197
3198                         ret = qla2x00_load_ram(vha, optrom_dma,
3199                             (ha->flash_data_off | faddr),
3200                             OPTROM_BURST_DWORDS);
3201                         if (ret != QLA_SUCCESS) {
3202                                 ql_log(ql_log_warn, vha, 0xb01e,
3203                                     "Unable to burst-write optrom segment "
3204                                     "(%x/%x/%llx).\n", ret,
3205                                     (ha->flash_data_off | faddr),
3206                                     (unsigned long long)optrom_dma);
3207                                 ql_log(ql_log_warn, vha, 0xb01f,
3208                                     "Reverting to slow-write.\n");
3209
3210                                 dma_free_coherent(&ha->pdev->dev,
3211                                     OPTROM_BURST_SIZE, optrom, optrom_dma);
3212                                 optrom = NULL;
3213                         } else {
3214                                 liter += OPTROM_BURST_DWORDS - 1;
3215                                 faddr += OPTROM_BURST_DWORDS - 1;
3216                                 dwptr += OPTROM_BURST_DWORDS - 1;
3217                                 continue;
3218                         }
3219                 }
3220
3221                 ret = qla82xx_write_flash_dword(ha, faddr,
3222                     cpu_to_le32(*dwptr));
3223                 if (ret) {
3224                         ql_dbg(ql_dbg_p3p, vha, 0xb020,
3225                             "Unable to program flash address=%x data=%x.\n",
3226                             faddr, *dwptr);
3227                         break;
3228                 }
3229         }
3230
3231         ret = qla82xx_protect_flash(ha);
3232         if (ret)
3233                 ql_log(ql_log_warn, vha, 0xb021,
3234                     "Unable to protect flash after update.\n");
3235 write_done:
3236         if (optrom)
3237                 dma_free_coherent(&ha->pdev->dev,
3238                     OPTROM_BURST_SIZE, optrom, optrom_dma);
3239         return ret;
3240 }
3241
3242 int
3243 qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3244         uint32_t offset, uint32_t length)
3245 {
3246         int rval;
3247
3248         /* Suspend HBA. */
3249         scsi_block_requests(vha->host);
3250         rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
3251                 length >> 2);
3252         scsi_unblock_requests(vha->host);
3253
3254         /* Convert return ISP82xx to generic */
3255         if (rval)
3256                 rval = QLA_FUNCTION_FAILED;
3257         else
3258                 rval = QLA_SUCCESS;
3259         return rval;
3260 }
3261
3262 void
3263 qla82xx_start_iocbs(srb_t *sp)
3264 {
3265         struct qla_hw_data *ha = sp->fcport->vha->hw;
3266         struct req_que *req = ha->req_q_map[0];
3267         struct device_reg_82xx __iomem *reg;
3268         uint32_t dbval;
3269
3270         /* Adjust ring index. */
3271         req->ring_index++;
3272         if (req->ring_index == req->length) {
3273                 req->ring_index = 0;
3274                 req->ring_ptr = req->ring;
3275         } else
3276                 req->ring_ptr++;
3277
3278         reg = &ha->iobase->isp82;
3279         dbval = 0x04 | (ha->portnum << 5);
3280
3281         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3282         if (ql2xdbwr)
3283                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
3284         else {
3285                 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
3286                 wmb();
3287                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3288                         WRT_REG_DWORD((unsigned long  __iomem *)ha->nxdb_wr_ptr,
3289                                 dbval);
3290                         wmb();
3291                 }
3292         }
3293 }
3294
3295 void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
3296 {
3297         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3298
3299         if (qla82xx_rom_lock(ha))
3300                 /* Someone else is holding the lock. */
3301                 ql_log(ql_log_info, vha, 0xb022,
3302                     "Resetting rom_lock.\n");
3303
3304         /*
3305          * Either we got the lock, or someone
3306          * else died while holding it.
3307          * In either case, unlock.
3308          */
3309         qla82xx_rom_unlock(ha);
3310 }
3311
3312 /*
3313  * qla82xx_device_bootstrap
3314  *    Initialize device, set DEV_READY, start fw
3315  *
3316  * Note:
3317  *      IDC lock must be held upon entry
3318  *
3319  * Return:
3320  *    Success : 0
3321  *    Failed  : 1
3322  */
3323 static int
3324 qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3325 {
3326         int rval = QLA_SUCCESS;
3327         int i, timeout;
3328         uint32_t old_count, count;
3329         struct qla_hw_data *ha = vha->hw;
3330         int need_reset = 0, peg_stuck = 1;
3331
3332         need_reset = qla82xx_need_reset(ha);
3333
3334         old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3335
3336         for (i = 0; i < 10; i++) {
3337                 timeout = msleep_interruptible(200);
3338                 if (timeout) {
3339                         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3340                                 QLA82XX_DEV_FAILED);
3341                         return QLA_FUNCTION_FAILED;
3342                 }
3343
3344                 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3345                 if (count != old_count)
3346                         peg_stuck = 0;
3347         }
3348
3349         if (need_reset) {
3350                 /* We are trying to perform a recovery here. */
3351                 if (peg_stuck)
3352                         qla82xx_rom_lock_recovery(ha);
3353                 goto dev_initialize;
3354         } else  {
3355                 /* Start of day for this ha context. */
3356                 if (peg_stuck) {
3357                         /* Either we are the first or recovery in progress. */
3358                         qla82xx_rom_lock_recovery(ha);
3359                         goto dev_initialize;
3360                 } else
3361                         /* Firmware already running. */
3362                         goto dev_ready;
3363         }
3364
3365         return rval;
3366
3367 dev_initialize:
3368         /* set to DEV_INITIALIZING */
3369         ql_log(ql_log_info, vha, 0x009e,
3370             "HW State: INITIALIZING.\n");
3371         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
3372
3373         /* Driver that sets device state to initializating sets IDC version */
3374         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
3375
3376         qla82xx_idc_unlock(ha);
3377         rval = qla82xx_start_firmware(vha);
3378         qla82xx_idc_lock(ha);
3379
3380         if (rval != QLA_SUCCESS) {
3381                 ql_log(ql_log_fatal, vha, 0x00ad,
3382                     "HW State: FAILED.\n");
3383                 qla82xx_clear_drv_active(ha);
3384                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
3385                 return rval;
3386         }
3387
3388 dev_ready:
3389         ql_log(ql_log_info, vha, 0x00ae,
3390             "HW State: READY.\n");
3391         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
3392
3393         return QLA_SUCCESS;
3394 }
3395
3396 /*
3397 * qla82xx_need_qsnt_handler
3398 *    Code to start quiescence sequence
3399 *
3400 * Note:
3401 *      IDC lock must be held upon entry
3402 *
3403 * Return: void
3404 */
3405
3406 static void
3407 qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
3408 {
3409         struct qla_hw_data *ha = vha->hw;
3410         uint32_t dev_state, drv_state, drv_active;
3411         unsigned long reset_timeout;
3412
3413         if (vha->flags.online) {
3414                 /*Block any further I/O and wait for pending cmnds to complete*/
3415                 qla82xx_quiescent_state_cleanup(vha);
3416         }
3417
3418         /* Set the quiescence ready bit */
3419         qla82xx_set_qsnt_ready(ha);
3420
3421         /*wait for 30 secs for other functions to ack */
3422         reset_timeout = jiffies + (30 * HZ);
3423
3424         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3425         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3426         /* Its 2 that is written when qsnt is acked, moving one bit */
3427         drv_active = drv_active << 0x01;
3428
3429         while (drv_state != drv_active) {
3430
3431                 if (time_after_eq(jiffies, reset_timeout)) {
3432                         /* quiescence timeout, other functions didn't ack
3433                          * changing the state to DEV_READY
3434                          */
3435                         ql_log(ql_log_info, vha, 0xb023,
3436                             "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME);
3437                         ql_log(ql_log_info, vha, 0xb024,
3438                             "DRV_ACTIVE:%d DRV_STATE:%d.\n",
3439                             drv_active, drv_state);
3440                         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3441                             QLA82XX_DEV_READY);
3442                         ql_log(ql_log_info, vha, 0xb025,
3443                             "HW State: DEV_READY.\n");
3444                         qla82xx_idc_unlock(ha);
3445                         qla2x00_perform_loop_resync(vha);
3446                         qla82xx_idc_lock(ha);
3447
3448                         qla82xx_clear_qsnt_ready(vha);
3449                         return;
3450                 }
3451
3452                 qla82xx_idc_unlock(ha);
3453                 msleep(1000);
3454                 qla82xx_idc_lock(ha);
3455
3456                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3457                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3458                 drv_active = drv_active << 0x01;
3459         }
3460         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3461         /* everyone acked so set the state to DEV_QUIESCENCE */
3462         if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
3463                 ql_log(ql_log_info, vha, 0xb026,
3464                     "HW State: DEV_QUIESCENT.\n");
3465                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
3466         }
3467 }
3468
3469 /*
3470 * qla82xx_wait_for_state_change
3471 *    Wait for device state to change from given current state
3472 *
3473 * Note:
3474 *     IDC lock must not be held upon entry
3475 *
3476 * Return:
3477 *    Changed device state.
3478 */
3479 uint32_t
3480 qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
3481 {
3482         struct qla_hw_data *ha = vha->hw;
3483         uint32_t dev_state;
3484
3485         do {
3486                 msleep(1000);
3487                 qla82xx_idc_lock(ha);
3488                 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3489                 qla82xx_idc_unlock(ha);
3490         } while (dev_state == curr_state);
3491
3492         return dev_state;
3493 }
3494
3495 static void
3496 qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3497 {
3498         struct qla_hw_data *ha = vha->hw;
3499
3500         /* Disable the board */
3501         ql_log(ql_log_fatal, vha, 0x00b8,
3502             "Disabling the board.\n");
3503
3504         qla82xx_idc_lock(ha);
3505         qla82xx_clear_drv_active(ha);
3506         qla82xx_idc_unlock(ha);
3507
3508         /* Set DEV_FAILED flag to disable timer */
3509         vha->device_flags |= DFLG_DEV_FAILED;
3510         qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3511         qla2x00_mark_all_devices_lost(vha, 0);
3512         vha->flags.online = 0;
3513         vha->flags.init_done = 0;
3514 }
3515
3516 /*
3517  * qla82xx_need_reset_handler
3518  *    Code to start reset sequence
3519  *
3520  * Note:
3521  *      IDC lock must be held upon entry
3522  *
3523  * Return:
3524  *    Success : 0
3525  *    Failed  : 1
3526  */
3527 static void
3528 qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3529 {
3530         uint32_t dev_state, drv_state, drv_active;
3531         unsigned long reset_timeout;
3532         struct qla_hw_data *ha = vha->hw;
3533         struct req_que *req = ha->req_q_map[0];
3534
3535         if (vha->flags.online) {
3536                 qla82xx_idc_unlock(ha);
3537                 qla2x00_abort_isp_cleanup(vha);
3538                 ha->isp_ops->get_flash_version(vha, req->ring);
3539                 ha->isp_ops->nvram_config(vha);
3540                 qla82xx_idc_lock(ha);
3541         }
3542
3543         qla82xx_set_rst_ready(ha);
3544
3545         /* wait for 10 seconds for reset ack from all functions */
3546         reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
3547
3548         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3549         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3550
3551         while (drv_state != drv_active) {
3552                 if (time_after_eq(jiffies, reset_timeout)) {
3553                         ql_log(ql_log_warn, vha, 0x00b5,
3554                             "Reset timeout.\n");
3555                         break;
3556                 }
3557                 qla82xx_idc_unlock(ha);
3558                 msleep(1000);
3559                 qla82xx_idc_lock(ha);
3560                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3561                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3562         }
3563
3564         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3565         ql_log(ql_log_info, vha, 0x00b6,
3566             "Device state is 0x%x = %s.\n",
3567             dev_state,
3568             dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3569
3570         /* Force to DEV_COLD unless someone else is starting a reset */
3571         if (dev_state != QLA82XX_DEV_INITIALIZING) {
3572                 ql_log(ql_log_info, vha, 0x00b7,
3573                     "HW State: COLD/RE-INIT.\n");
3574                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3575         }
3576 }
3577
3578 int
3579 qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3580 {
3581         uint32_t fw_heartbeat_counter;
3582         int status = 0;
3583
3584         fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
3585                 QLA82XX_PEG_ALIVE_COUNTER);
3586         /* all 0xff, assume AER/EEH in progress, ignore */
3587         if (fw_heartbeat_counter == 0xffffffff) {
3588                 ql_dbg(ql_dbg_timer, vha, 0x6003,
3589                     "FW heartbeat counter is 0xffffffff, "
3590                     "returning status=%d.\n", status);
3591                 return status;
3592         }
3593         if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
3594                 vha->seconds_since_last_heartbeat++;
3595                 /* FW not alive after 2 seconds */
3596                 if (vha->seconds_since_last_heartbeat == 2) {
3597                         vha->seconds_since_last_heartbeat = 0;
3598                         status = 1;
3599                 }
3600         } else
3601                 vha->seconds_since_last_heartbeat = 0;
3602         vha->fw_heartbeat_counter = fw_heartbeat_counter;
3603         if (status)
3604                 ql_dbg(ql_dbg_timer, vha, 0x6004,
3605                     "Returning status=%d.\n", status);
3606         return status;
3607 }
3608
3609 /*
3610  * qla82xx_device_state_handler
3611  *      Main state handler
3612  *
3613  * Note:
3614  *      IDC lock must be held upon entry
3615  *
3616  * Return:
3617  *    Success : 0
3618  *    Failed  : 1
3619  */
3620 int
3621 qla82xx_device_state_handler(scsi_qla_host_t *vha)
3622 {
3623         uint32_t dev_state;
3624         uint32_t old_dev_state;
3625         int rval = QLA_SUCCESS;
3626         unsigned long dev_init_timeout;
3627         struct qla_hw_data *ha = vha->hw;
3628         int loopcount = 0;
3629
3630         qla82xx_idc_lock(ha);
3631         if (!vha->flags.init_done)
3632                 qla82xx_set_drv_active(vha);
3633
3634         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3635         old_dev_state = dev_state;
3636         ql_log(ql_log_info, vha, 0x009b,
3637             "Device state is 0x%x = %s.\n",
3638             dev_state,
3639             dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3640
3641         /* wait for 30 seconds for device to go ready */
3642         dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
3643
3644         while (1) {
3645
3646                 if (time_after_eq(jiffies, dev_init_timeout)) {
3647                         ql_log(ql_log_fatal, vha, 0x009c,
3648                             "Device init failed.\n");
3649                         rval = QLA_FUNCTION_FAILED;
3650                         break;
3651                 }
3652                 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3653                 if (old_dev_state != dev_state) {
3654                         loopcount = 0;
3655                         old_dev_state = dev_state;
3656                 }
3657                 if (loopcount < 5) {
3658                         ql_log(ql_log_info, vha, 0x009d,
3659                             "Device state is 0x%x = %s.\n",
3660                             dev_state,
3661                             dev_state < MAX_STATES ? qdev_state[dev_state] :
3662                             "Unknown");
3663                 }
3664
3665                 switch (dev_state) {
3666                 case QLA82XX_DEV_READY:
3667                         goto exit;
3668                 case QLA82XX_DEV_COLD:
3669                         rval = qla82xx_device_bootstrap(vha);
3670                         goto exit;
3671                 case QLA82XX_DEV_INITIALIZING:
3672                         qla82xx_idc_unlock(ha);
3673                         msleep(1000);
3674                         qla82xx_idc_lock(ha);
3675                         break;
3676                 case QLA82XX_DEV_NEED_RESET:
3677                     if (!ql2xdontresethba)
3678                         qla82xx_need_reset_handler(vha);
3679                         dev_init_timeout = jiffies +
3680                                 (ha->nx_dev_init_timeout * HZ);
3681                         break;
3682                 case QLA82XX_DEV_NEED_QUIESCENT:
3683                         qla82xx_need_qsnt_handler(vha);
3684                         /* Reset timeout value after quiescence handler */
3685                         dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3686                                                          * HZ);
3687                         break;
3688                 case QLA82XX_DEV_QUIESCENT:
3689                         /* Owner will exit and other will wait for the state
3690                          * to get changed
3691                          */
3692                         if (ha->flags.quiesce_owner)
3693                                 goto exit;
3694
3695                         qla82xx_idc_unlock(ha);
3696                         msleep(1000);
3697                         qla82xx_idc_lock(ha);
3698
3699                         /* Reset timeout value after quiescence handler */
3700                         dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3701                                                          * HZ);
3702                         break;
3703                 case QLA82XX_DEV_FAILED:
3704                         qla82xx_dev_failed_handler(vha);
3705                         rval = QLA_FUNCTION_FAILED;
3706                         goto exit;
3707                 default:
3708                         qla82xx_idc_unlock(ha);
3709                         msleep(1000);
3710                         qla82xx_idc_lock(ha);
3711                 }
3712                 loopcount++;
3713         }
3714 exit:
3715         qla82xx_idc_unlock(ha);
3716         return rval;
3717 }
3718
3719 void qla82xx_watchdog(scsi_qla_host_t *vha)
3720 {
3721         uint32_t dev_state, halt_status;
3722         struct qla_hw_data *ha = vha->hw;
3723
3724         /* don't poll if reset is going on */
3725         if (!ha->flags.isp82xx_reset_hdlr_active) {
3726                 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3727                 if (dev_state == QLA82XX_DEV_NEED_RESET &&
3728                     !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
3729                         ql_log(ql_log_warn, vha, 0x6001,
3730                             "Adapter reset needed.\n");
3731                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3732                         qla2xxx_wake_dpc(vha);
3733                 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
3734                         !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3735                         ql_log(ql_log_warn, vha, 0x6002,
3736                             "Quiescent needed.\n");
3737                         set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3738                         qla2xxx_wake_dpc(vha);
3739                 } else {
3740                         if (qla82xx_check_fw_alive(vha)) {
3741                                 halt_status = qla82xx_rd_32(ha,
3742                                     QLA82XX_PEG_HALT_STATUS1);
3743                                 ql_dbg(ql_dbg_timer, vha, 0x6005,
3744                                     "dumping hw/fw registers:.\n "
3745                                     " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
3746                                     " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
3747                                     " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n "
3748                                     " PEG_NET_4_PC: 0x%x.\n", halt_status,
3749                                     qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2),
3750                                     qla82xx_rd_32(ha,
3751                                             QLA82XX_CRB_PEG_NET_0 + 0x3c),
3752                                     qla82xx_rd_32(ha,
3753                                             QLA82XX_CRB_PEG_NET_1 + 0x3c),
3754                                     qla82xx_rd_32(ha,
3755                                             QLA82XX_CRB_PEG_NET_2 + 0x3c),
3756                                     qla82xx_rd_32(ha,
3757                                             QLA82XX_CRB_PEG_NET_3 + 0x3c),
3758                                     qla82xx_rd_32(ha,
3759                                             QLA82XX_CRB_PEG_NET_4 + 0x3c));
3760                                 if (halt_status & HALT_STATUS_UNRECOVERABLE) {
3761                                         set_bit(ISP_UNRECOVERABLE,
3762                                             &vha->dpc_flags);
3763                                 } else {
3764                                         ql_log(ql_log_info, vha, 0x6006,
3765                                             "Detect abort  needed.\n");
3766                                         set_bit(ISP_ABORT_NEEDED,
3767                                             &vha->dpc_flags);
3768                                 }
3769                                 qla2xxx_wake_dpc(vha);
3770                                 ha->flags.isp82xx_fw_hung = 1;
3771                                 if (ha->flags.mbox_busy) {
3772                                         ha->flags.mbox_int = 1;
3773                                         ql_log(ql_log_warn, vha, 0x6007,
3774                                             "Due to FW hung, doing "
3775                                             "premature completion of mbx "
3776                                             "command.\n");
3777                                         if (test_bit(MBX_INTR_WAIT,
3778                                             &ha->mbx_cmd_flags))
3779                                                 complete(&ha->mbx_intr_comp);
3780                                 }
3781                         }
3782                 }
3783         }
3784 }
3785
3786 int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3787 {
3788         int rval;
3789         rval = qla82xx_device_state_handler(vha);
3790         return rval;
3791 }
3792
3793 /*
3794  *  qla82xx_abort_isp
3795  *      Resets ISP and aborts all outstanding commands.
3796  *
3797  * Input:
3798  *      ha           = adapter block pointer.
3799  *
3800  * Returns:
3801  *      0 = success
3802  */
3803 int
3804 qla82xx_abort_isp(scsi_qla_host_t *vha)
3805 {
3806         int rval;
3807         struct qla_hw_data *ha = vha->hw;
3808         uint32_t dev_state;
3809
3810         if (vha->device_flags & DFLG_DEV_FAILED) {
3811                 ql_log(ql_log_warn, vha, 0x8024,
3812                     "Device in failed state, exiting.\n");
3813                 return QLA_SUCCESS;
3814         }
3815         ha->flags.isp82xx_reset_hdlr_active = 1;
3816
3817         qla82xx_idc_lock(ha);
3818         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3819         if (dev_state == QLA82XX_DEV_READY) {
3820                 ql_log(ql_log_info, vha, 0x8025,
3821                     "HW State: NEED RESET.\n");
3822                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3823                         QLA82XX_DEV_NEED_RESET);
3824         } else
3825                 ql_log(ql_log_info, vha, 0x8026,
3826                     "Hw State: %s.\n", dev_state < MAX_STATES ?
3827                     qdev_state[dev_state] : "Unknown");
3828         qla82xx_idc_unlock(ha);
3829
3830         rval = qla82xx_device_state_handler(vha);
3831
3832         qla82xx_idc_lock(ha);
3833         qla82xx_clear_rst_ready(ha);
3834         qla82xx_idc_unlock(ha);
3835
3836         if (rval == QLA_SUCCESS) {
3837                 ha->flags.isp82xx_fw_hung = 0;
3838                 ha->flags.isp82xx_reset_hdlr_active = 0;
3839                 qla82xx_restart_isp(vha);
3840         }
3841
3842         if (rval) {
3843                 vha->flags.online = 1;
3844                 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3845                         if (ha->isp_abort_cnt == 0) {
3846                                 ql_log(ql_log_warn, vha, 0x8027,
3847                                     "ISP error recover failed - board "
3848                                     "disabled.\n");
3849                                 /*
3850                                  * The next call disables the board
3851                                  * completely.
3852                                  */
3853                                 ha->isp_ops->reset_adapter(vha);
3854                                 vha->flags.online = 0;
3855                                 clear_bit(ISP_ABORT_RETRY,
3856                                     &vha->dpc_flags);
3857                                 rval = QLA_SUCCESS;
3858                         } else { /* schedule another ISP abort */
3859                                 ha->isp_abort_cnt--;
3860                                 ql_log(ql_log_warn, vha, 0x8036,
3861                                     "ISP abort - retry remaining %d.\n",
3862                                     ha->isp_abort_cnt);
3863                                 rval = QLA_FUNCTION_FAILED;
3864                         }
3865                 } else {
3866                         ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3867                         ql_dbg(ql_dbg_taskm, vha, 0x8029,
3868                             "ISP error recovery - retrying (%d) more times.\n",
3869                             ha->isp_abort_cnt);
3870                         set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3871                         rval = QLA_FUNCTION_FAILED;
3872                 }
3873         }
3874         return rval;
3875 }
3876
3877 /*
3878  *  qla82xx_fcoe_ctx_reset
3879  *      Perform a quick reset and aborts all outstanding commands.
3880  *      This will only perform an FCoE context reset and avoids a full blown
3881  *      chip reset.
3882  *
3883  * Input:
3884  *      ha = adapter block pointer.
3885  *      is_reset_path = flag for identifying the reset path.
3886  *
3887  * Returns:
3888  *      0 = success
3889  */
3890 int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha)
3891 {
3892         int rval = QLA_FUNCTION_FAILED;
3893
3894         if (vha->flags.online) {
3895                 /* Abort all outstanding commands, so as to be requeued later */
3896                 qla2x00_abort_isp_cleanup(vha);
3897         }
3898
3899         /* Stop currently executing firmware.
3900          * This will destroy existing FCoE context at the F/W end.
3901          */
3902         qla2x00_try_to_stop_firmware(vha);
3903
3904         /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */
3905         rval = qla82xx_restart_isp(vha);
3906
3907         return rval;
3908 }
3909
3910 /*
3911  * qla2x00_wait_for_fcoe_ctx_reset
3912  *    Wait till the FCoE context is reset.
3913  *
3914  * Note:
3915  *    Does context switching here.
3916  *    Release SPIN_LOCK (if any) before calling this routine.
3917  *
3918  * Return:
3919  *    Success (fcoe_ctx reset is done) : 0
3920  *    Failed  (fcoe_ctx reset not completed within max loop timout ) : 1
3921  */
3922 int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
3923 {
3924         int status = QLA_FUNCTION_FAILED;
3925         unsigned long wait_reset;
3926
3927         wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
3928         while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3929             test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
3930             && time_before(jiffies, wait_reset)) {
3931
3932                 set_current_state(TASK_UNINTERRUPTIBLE);
3933                 schedule_timeout(HZ);
3934
3935                 if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) &&
3936                     !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
3937                         status = QLA_SUCCESS;
3938                         break;
3939                 }
3940         }
3941         ql_dbg(ql_dbg_p3p, vha, 0xb027,
3942             "%s status=%d.\n", status);
3943
3944         return status;
3945 }
3946
3947 void
3948 qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3949 {
3950         int i;
3951         unsigned long flags;
3952         struct qla_hw_data *ha = vha->hw;
3953
3954         /* Check if 82XX firmware is alive or not
3955          * We may have arrived here from NEED_RESET
3956          * detection only
3957          */
3958         if (!ha->flags.isp82xx_fw_hung) {
3959                 for (i = 0; i < 2; i++) {
3960                         msleep(1000);
3961                         if (qla82xx_check_fw_alive(vha)) {
3962                                 ha->flags.isp82xx_fw_hung = 1;
3963                                 if (ha->flags.mbox_busy) {
3964                                         ha->flags.mbox_int = 1;
3965                                         complete(&ha->mbx_intr_comp);
3966                                 }
3967                                 break;
3968                         }
3969                 }
3970         }
3971         ql_dbg(ql_dbg_init, vha, 0x00b0,
3972             "Entered %s fw_hung=%d.\n",
3973             __func__, ha->flags.isp82xx_fw_hung);
3974
3975         /* Abort all commands gracefully if fw NOT hung */
3976         if (!ha->flags.isp82xx_fw_hung) {
3977                 int cnt, que;
3978                 srb_t *sp;
3979                 struct req_que *req;
3980
3981                 spin_lock_irqsave(&ha->hardware_lock, flags);
3982                 for (que = 0; que < ha->max_req_queues; que++) {
3983                         req = ha->req_q_map[que];
3984                         if (!req)
3985                                 continue;
3986                         for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3987                                 sp = req->outstanding_cmds[cnt];
3988                                 if (sp) {
3989                                         if (!sp->ctx ||
3990                                             (sp->flags & SRB_FCP_CMND_DMA_VALID)) {
3991                                                 spin_unlock_irqrestore(
3992                                                     &ha->hardware_lock, flags);
3993                                                 if (ha->isp_ops->abort_command(sp)) {
3994                                                         ql_log(ql_log_info, vha,
3995                                                             0x00b1,
3996                                                             "mbx abort failed.\n");
3997                                                 } else {
3998                                                         ql_log(ql_log_info, vha,
3999                                                             0x00b2,
4000                                                             "mbx abort success.\n");
4001                                                 }
4002                                                 spin_lock_irqsave(&ha->hardware_lock, flags);
4003                                         }
4004                                 }
4005                         }
4006                 }
4007                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4008
4009                 /* Wait for pending cmds (physical and virtual) to complete */
4010                 if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
4011                     WAIT_HOST) == QLA_SUCCESS) {
4012                         ql_dbg(ql_dbg_init, vha, 0x00b3,
4013                             "Done wait for "
4014                             "pending commands.\n");
4015                 }
4016         }
4017 }