Merge branch 'upstream/ticketlock-cleanup' of git://github.com/jsgf/linux-xen into...
[pandora-kernel.git] / drivers / scsi / qla2xxx / qla_nx.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include <linux/delay.h>
9 #include <linux/pci.h>
10 #include <scsi/scsi_tcq.h>
11
12 #define MASK(n)                 ((1ULL<<(n))-1)
13 #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
14         ((addr >> 25) & 0x3ff))
15 #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \
16         ((addr >> 25) & 0x3ff))
17 #define MS_WIN(addr) (addr & 0x0ffc0000)
18 #define QLA82XX_PCI_MN_2M   (0)
19 #define QLA82XX_PCI_MS_2M   (0x80000)
20 #define QLA82XX_PCI_OCM0_2M (0xc0000)
21 #define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
22 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
23 #define BLOCK_PROTECT_BITS 0x0F
24
25 /* CRB window related */
26 #define CRB_BLK(off)    ((off >> 20) & 0x3f)
27 #define CRB_SUBBLK(off) ((off >> 16) & 0xf)
28 #define CRB_WINDOW_2M   (0x130060)
29 #define QLA82XX_PCI_CAMQM_2M_END        (0x04800800UL)
30 #define CRB_HI(off)     ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
31                         ((off) & 0xf0000))
32 #define QLA82XX_PCI_CAMQM_2M_BASE       (0x000ff800UL)
33 #define CRB_INDIRECT_2M (0x1e0000UL)
34
35 #define MAX_CRB_XFORM 60
36 static unsigned long crb_addr_xform[MAX_CRB_XFORM];
37 int qla82xx_crb_table_initialized;
38
39 #define qla82xx_crb_addr_transform(name) \
40         (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
41         QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
42
43 static void qla82xx_crb_addr_transform_setup(void)
44 {
45         qla82xx_crb_addr_transform(XDMA);
46         qla82xx_crb_addr_transform(TIMR);
47         qla82xx_crb_addr_transform(SRE);
48         qla82xx_crb_addr_transform(SQN3);
49         qla82xx_crb_addr_transform(SQN2);
50         qla82xx_crb_addr_transform(SQN1);
51         qla82xx_crb_addr_transform(SQN0);
52         qla82xx_crb_addr_transform(SQS3);
53         qla82xx_crb_addr_transform(SQS2);
54         qla82xx_crb_addr_transform(SQS1);
55         qla82xx_crb_addr_transform(SQS0);
56         qla82xx_crb_addr_transform(RPMX7);
57         qla82xx_crb_addr_transform(RPMX6);
58         qla82xx_crb_addr_transform(RPMX5);
59         qla82xx_crb_addr_transform(RPMX4);
60         qla82xx_crb_addr_transform(RPMX3);
61         qla82xx_crb_addr_transform(RPMX2);
62         qla82xx_crb_addr_transform(RPMX1);
63         qla82xx_crb_addr_transform(RPMX0);
64         qla82xx_crb_addr_transform(ROMUSB);
65         qla82xx_crb_addr_transform(SN);
66         qla82xx_crb_addr_transform(QMN);
67         qla82xx_crb_addr_transform(QMS);
68         qla82xx_crb_addr_transform(PGNI);
69         qla82xx_crb_addr_transform(PGND);
70         qla82xx_crb_addr_transform(PGN3);
71         qla82xx_crb_addr_transform(PGN2);
72         qla82xx_crb_addr_transform(PGN1);
73         qla82xx_crb_addr_transform(PGN0);
74         qla82xx_crb_addr_transform(PGSI);
75         qla82xx_crb_addr_transform(PGSD);
76         qla82xx_crb_addr_transform(PGS3);
77         qla82xx_crb_addr_transform(PGS2);
78         qla82xx_crb_addr_transform(PGS1);
79         qla82xx_crb_addr_transform(PGS0);
80         qla82xx_crb_addr_transform(PS);
81         qla82xx_crb_addr_transform(PH);
82         qla82xx_crb_addr_transform(NIU);
83         qla82xx_crb_addr_transform(I2Q);
84         qla82xx_crb_addr_transform(EG);
85         qla82xx_crb_addr_transform(MN);
86         qla82xx_crb_addr_transform(MS);
87         qla82xx_crb_addr_transform(CAS2);
88         qla82xx_crb_addr_transform(CAS1);
89         qla82xx_crb_addr_transform(CAS0);
90         qla82xx_crb_addr_transform(CAM);
91         qla82xx_crb_addr_transform(C2C1);
92         qla82xx_crb_addr_transform(C2C0);
93         qla82xx_crb_addr_transform(SMB);
94         qla82xx_crb_addr_transform(OCM0);
95         /*
96          * Used only in P3 just define it for P2 also.
97          */
98         qla82xx_crb_addr_transform(I2C0);
99
100         qla82xx_crb_table_initialized = 1;
101 }
102
103 struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
104         {{{0, 0,         0,         0} } },
105         {{{1, 0x0100000, 0x0102000, 0x120000},
106         {1, 0x0110000, 0x0120000, 0x130000},
107         {1, 0x0120000, 0x0122000, 0x124000},
108         {1, 0x0130000, 0x0132000, 0x126000},
109         {1, 0x0140000, 0x0142000, 0x128000},
110         {1, 0x0150000, 0x0152000, 0x12a000},
111         {1, 0x0160000, 0x0170000, 0x110000},
112         {1, 0x0170000, 0x0172000, 0x12e000},
113         {0, 0x0000000, 0x0000000, 0x000000},
114         {0, 0x0000000, 0x0000000, 0x000000},
115         {0, 0x0000000, 0x0000000, 0x000000},
116         {0, 0x0000000, 0x0000000, 0x000000},
117         {0, 0x0000000, 0x0000000, 0x000000},
118         {0, 0x0000000, 0x0000000, 0x000000},
119         {1, 0x01e0000, 0x01e0800, 0x122000},
120         {0, 0x0000000, 0x0000000, 0x000000} } } ,
121         {{{1, 0x0200000, 0x0210000, 0x180000} } },
122         {{{0, 0,         0,         0} } },
123         {{{1, 0x0400000, 0x0401000, 0x169000} } },
124         {{{1, 0x0500000, 0x0510000, 0x140000} } },
125         {{{1, 0x0600000, 0x0610000, 0x1c0000} } },
126         {{{1, 0x0700000, 0x0704000, 0x1b8000} } },
127         {{{1, 0x0800000, 0x0802000, 0x170000},
128         {0, 0x0000000, 0x0000000, 0x000000},
129         {0, 0x0000000, 0x0000000, 0x000000},
130         {0, 0x0000000, 0x0000000, 0x000000},
131         {0, 0x0000000, 0x0000000, 0x000000},
132         {0, 0x0000000, 0x0000000, 0x000000},
133         {0, 0x0000000, 0x0000000, 0x000000},
134         {0, 0x0000000, 0x0000000, 0x000000},
135         {0, 0x0000000, 0x0000000, 0x000000},
136         {0, 0x0000000, 0x0000000, 0x000000},
137         {0, 0x0000000, 0x0000000, 0x000000},
138         {0, 0x0000000, 0x0000000, 0x000000},
139         {0, 0x0000000, 0x0000000, 0x000000},
140         {0, 0x0000000, 0x0000000, 0x000000},
141         {0, 0x0000000, 0x0000000, 0x000000},
142         {1, 0x08f0000, 0x08f2000, 0x172000} } },
143         {{{1, 0x0900000, 0x0902000, 0x174000},
144         {0, 0x0000000, 0x0000000, 0x000000},
145         {0, 0x0000000, 0x0000000, 0x000000},
146         {0, 0x0000000, 0x0000000, 0x000000},
147         {0, 0x0000000, 0x0000000, 0x000000},
148         {0, 0x0000000, 0x0000000, 0x000000},
149         {0, 0x0000000, 0x0000000, 0x000000},
150         {0, 0x0000000, 0x0000000, 0x000000},
151         {0, 0x0000000, 0x0000000, 0x000000},
152         {0, 0x0000000, 0x0000000, 0x000000},
153         {0, 0x0000000, 0x0000000, 0x000000},
154         {0, 0x0000000, 0x0000000, 0x000000},
155         {0, 0x0000000, 0x0000000, 0x000000},
156         {0, 0x0000000, 0x0000000, 0x000000},
157         {0, 0x0000000, 0x0000000, 0x000000},
158         {1, 0x09f0000, 0x09f2000, 0x176000} } },
159         {{{0, 0x0a00000, 0x0a02000, 0x178000},
160         {0, 0x0000000, 0x0000000, 0x000000},
161         {0, 0x0000000, 0x0000000, 0x000000},
162         {0, 0x0000000, 0x0000000, 0x000000},
163         {0, 0x0000000, 0x0000000, 0x000000},
164         {0, 0x0000000, 0x0000000, 0x000000},
165         {0, 0x0000000, 0x0000000, 0x000000},
166         {0, 0x0000000, 0x0000000, 0x000000},
167         {0, 0x0000000, 0x0000000, 0x000000},
168         {0, 0x0000000, 0x0000000, 0x000000},
169         {0, 0x0000000, 0x0000000, 0x000000},
170         {0, 0x0000000, 0x0000000, 0x000000},
171         {0, 0x0000000, 0x0000000, 0x000000},
172         {0, 0x0000000, 0x0000000, 0x000000},
173         {0, 0x0000000, 0x0000000, 0x000000},
174         {1, 0x0af0000, 0x0af2000, 0x17a000} } },
175         {{{0, 0x0b00000, 0x0b02000, 0x17c000},
176         {0, 0x0000000, 0x0000000, 0x000000},
177         {0, 0x0000000, 0x0000000, 0x000000},
178         {0, 0x0000000, 0x0000000, 0x000000},
179         {0, 0x0000000, 0x0000000, 0x000000},
180         {0, 0x0000000, 0x0000000, 0x000000},
181         {0, 0x0000000, 0x0000000, 0x000000},
182         {0, 0x0000000, 0x0000000, 0x000000},
183         {0, 0x0000000, 0x0000000, 0x000000},
184         {0, 0x0000000, 0x0000000, 0x000000},
185         {0, 0x0000000, 0x0000000, 0x000000},
186         {0, 0x0000000, 0x0000000, 0x000000},
187         {0, 0x0000000, 0x0000000, 0x000000},
188         {0, 0x0000000, 0x0000000, 0x000000},
189         {0, 0x0000000, 0x0000000, 0x000000},
190         {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
191         {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },
192         {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },
193         {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },
194         {{{1, 0x0f00000, 0x0f01000, 0x164000} } },
195         {{{0, 0x1000000, 0x1004000, 0x1a8000} } },
196         {{{1, 0x1100000, 0x1101000, 0x160000} } },
197         {{{1, 0x1200000, 0x1201000, 0x161000} } },
198         {{{1, 0x1300000, 0x1301000, 0x162000} } },
199         {{{1, 0x1400000, 0x1401000, 0x163000} } },
200         {{{1, 0x1500000, 0x1501000, 0x165000} } },
201         {{{1, 0x1600000, 0x1601000, 0x166000} } },
202         {{{0, 0,         0,         0} } },
203         {{{0, 0,         0,         0} } },
204         {{{0, 0,         0,         0} } },
205         {{{0, 0,         0,         0} } },
206         {{{0, 0,         0,         0} } },
207         {{{0, 0,         0,         0} } },
208         {{{1, 0x1d00000, 0x1d10000, 0x190000} } },
209         {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },
210         {{{1, 0x1f00000, 0x1f10000, 0x150000} } },
211         {{{0} } },
212         {{{1, 0x2100000, 0x2102000, 0x120000},
213         {1, 0x2110000, 0x2120000, 0x130000},
214         {1, 0x2120000, 0x2122000, 0x124000},
215         {1, 0x2130000, 0x2132000, 0x126000},
216         {1, 0x2140000, 0x2142000, 0x128000},
217         {1, 0x2150000, 0x2152000, 0x12a000},
218         {1, 0x2160000, 0x2170000, 0x110000},
219         {1, 0x2170000, 0x2172000, 0x12e000},
220         {0, 0x0000000, 0x0000000, 0x000000},
221         {0, 0x0000000, 0x0000000, 0x000000},
222         {0, 0x0000000, 0x0000000, 0x000000},
223         {0, 0x0000000, 0x0000000, 0x000000},
224         {0, 0x0000000, 0x0000000, 0x000000},
225         {0, 0x0000000, 0x0000000, 0x000000},
226         {0, 0x0000000, 0x0000000, 0x000000},
227         {0, 0x0000000, 0x0000000, 0x000000} } },
228         {{{1, 0x2200000, 0x2204000, 0x1b0000} } },
229         {{{0} } },
230         {{{0} } },
231         {{{0} } },
232         {{{0} } },
233         {{{0} } },
234         {{{1, 0x2800000, 0x2804000, 0x1a4000} } },
235         {{{1, 0x2900000, 0x2901000, 0x16b000} } },
236         {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },
237         {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },
238         {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },
239         {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },
240         {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },
241         {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },
242         {{{1, 0x3000000, 0x3000400, 0x1adc00} } },
243         {{{0, 0x3100000, 0x3104000, 0x1a8000} } },
244         {{{1, 0x3200000, 0x3204000, 0x1d4000} } },
245         {{{1, 0x3300000, 0x3304000, 0x1a0000} } },
246         {{{0} } },
247         {{{1, 0x3500000, 0x3500400, 0x1ac000} } },
248         {{{1, 0x3600000, 0x3600400, 0x1ae000} } },
249         {{{1, 0x3700000, 0x3700400, 0x1ae400} } },
250         {{{1, 0x3800000, 0x3804000, 0x1d0000} } },
251         {{{1, 0x3900000, 0x3904000, 0x1b4000} } },
252         {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },
253         {{{0} } },
254         {{{0} } },
255         {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },
256         {{{1, 0x3e00000, 0x3e01000, 0x167000} } },
257         {{{1, 0x3f00000, 0x3f01000, 0x168000} } }
258 };
259
260 /*
261  * top 12 bits of crb internal address (hub, agent)
262  */
263 unsigned qla82xx_crb_hub_agt[64] = {
264         0,
265         QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
266         QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
267         QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
268         0,
269         QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
270         QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
271         QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
272         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
273         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
274         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
275         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
276         QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
277         QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
278         QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
279         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
280         QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
281         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
282         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
283         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
284         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
285         QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
286         QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
287         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
288         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
289         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
290         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
291         0,
292         QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
293         QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
294         0,
295         QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
296         0,
297         QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
298         QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
299         0,
300         0,
301         0,
302         0,
303         0,
304         QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
305         0,
306         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
307         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
308         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
309         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
310         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
311         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
312         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
313         QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
314         QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
315         QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
316         0,
317         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
318         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
319         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
320         QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
321         0,
322         QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
323         QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
324         QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
325         0,
326         QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
327         0,
328 };
329
330 /* Device states */
331 char *qdev_state[] = {
332          "Unknown",
333         "Cold",
334         "Initializing",
335         "Ready",
336         "Need Reset",
337         "Need Quiescent",
338         "Failed",
339         "Quiescent",
340 };
341
342 /*
343  * In: 'off' is offset from CRB space in 128M pci map
344  * Out: 'off' is 2M pci map addr
345  * side effect: lock crb window
346  */
347 static void
348 qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
349 {
350         u32 win_read;
351         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
352
353         ha->crb_win = CRB_HI(*off);
354         writel(ha->crb_win,
355                 (void *)(CRB_WINDOW_2M + ha->nx_pcibase));
356
357         /* Read back value to make sure write has gone through before trying
358          * to use it.
359          */
360         win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
361         if (win_read != ha->crb_win) {
362                 ql_dbg(ql_dbg_p3p, vha, 0xb000,
363                     "%s: Written crbwin (0x%x) "
364                     "!= Read crbwin (0x%x), off=0x%lx.\n",
365                     ha->crb_win, win_read, *off);
366         }
367         *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
368 }
369
370 static inline unsigned long
371 qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
372 {
373         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
374         /* See if we are currently pointing to the region we want to use next */
375         if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
376                 /* No need to change window. PCIX and PCIEregs are in both
377                  * regs are in both windows.
378                  */
379                 return off;
380         }
381
382         if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
383                 /* We are in first CRB window */
384                 if (ha->curr_window != 0)
385                         WARN_ON(1);
386                 return off;
387         }
388
389         if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
390                 /* We are in second CRB window */
391                 off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
392
393                 if (ha->curr_window != 1)
394                         return off;
395
396                 /* We are in the QM or direct access
397                  * register region - do nothing
398                  */
399                 if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
400                         (off < QLA82XX_PCI_CAMQM_MAX))
401                         return off;
402         }
403         /* strange address given */
404         ql_dbg(ql_dbg_p3p, vha, 0xb001,
405             "%x: Warning: unm_nic_pci_set_crbwindow "
406             "called with an unknown address(%llx).\n",
407             QLA2XXX_DRIVER_NAME, off);
408         return off;
409 }
410
411 static int
412 qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
413 {
414         struct crb_128M_2M_sub_block_map *m;
415
416         if (*off >= QLA82XX_CRB_MAX)
417                 return -1;
418
419         if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
420                 *off = (*off - QLA82XX_PCI_CAMQM) +
421                     QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
422                 return 0;
423         }
424
425         if (*off < QLA82XX_PCI_CRBSPACE)
426                 return -1;
427
428         *off -= QLA82XX_PCI_CRBSPACE;
429
430         /* Try direct map */
431         m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
432
433         if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
434                 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
435                 return 0;
436         }
437         /* Not in direct map, use crb window */
438         return 1;
439 }
440
441 #define CRB_WIN_LOCK_TIMEOUT 100000000
442 static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
443 {
444         int done = 0, timeout = 0;
445
446         while (!done) {
447                 /* acquire semaphore3 from PCI HW block */
448                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
449                 if (done == 1)
450                         break;
451                 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
452                         return -1;
453                 timeout++;
454         }
455         qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
456         return 0;
457 }
458
459 int
460 qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
461 {
462         unsigned long flags = 0;
463         int rv;
464
465         rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
466
467         BUG_ON(rv == -1);
468
469         if (rv == 1) {
470                 write_lock_irqsave(&ha->hw_lock, flags);
471                 qla82xx_crb_win_lock(ha);
472                 qla82xx_pci_set_crbwindow_2M(ha, &off);
473         }
474
475         writel(data, (void __iomem *)off);
476
477         if (rv == 1) {
478                 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
479                 write_unlock_irqrestore(&ha->hw_lock, flags);
480         }
481         return 0;
482 }
483
484 int
485 qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
486 {
487         unsigned long flags = 0;
488         int rv;
489         u32 data;
490
491         rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
492
493         BUG_ON(rv == -1);
494
495         if (rv == 1) {
496                 write_lock_irqsave(&ha->hw_lock, flags);
497                 qla82xx_crb_win_lock(ha);
498                 qla82xx_pci_set_crbwindow_2M(ha, &off);
499         }
500         data = RD_REG_DWORD((void __iomem *)off);
501
502         if (rv == 1) {
503                 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
504                 write_unlock_irqrestore(&ha->hw_lock, flags);
505         }
506         return data;
507 }
508
509 #define IDC_LOCK_TIMEOUT 100000000
510 int qla82xx_idc_lock(struct qla_hw_data *ha)
511 {
512         int i;
513         int done = 0, timeout = 0;
514
515         while (!done) {
516                 /* acquire semaphore5 from PCI HW block */
517                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
518                 if (done == 1)
519                         break;
520                 if (timeout >= IDC_LOCK_TIMEOUT)
521                         return -1;
522
523                 timeout++;
524
525                 /* Yield CPU */
526                 if (!in_interrupt())
527                         schedule();
528                 else {
529                         for (i = 0; i < 20; i++)
530                                 cpu_relax();
531                 }
532         }
533
534         return 0;
535 }
536
537 void qla82xx_idc_unlock(struct qla_hw_data *ha)
538 {
539         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
540 }
541
542 /*  PCI Windowing for DDR regions.  */
543 #define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
544         (((addr) <= (high)) && ((addr) >= (low)))
545 /*
546  * check memory access boundary.
547  * used by test agent. support ddr access only for now
548  */
549 static unsigned long
550 qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
551         unsigned long long addr, int size)
552 {
553         if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
554                 QLA82XX_ADDR_DDR_NET_MAX) ||
555                 !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET,
556                 QLA82XX_ADDR_DDR_NET_MAX) ||
557                 ((size != 1) && (size != 2) && (size != 4) && (size != 8)))
558                         return 0;
559         else
560                 return 1;
561 }
562
563 int qla82xx_pci_set_window_warning_count;
564
565 static unsigned long
566 qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
567 {
568         int window;
569         u32 win_read;
570         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
571
572         if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
573                 QLA82XX_ADDR_DDR_NET_MAX)) {
574                 /* DDR network side */
575                 window = MN_WIN(addr);
576                 ha->ddr_mn_window = window;
577                 qla82xx_wr_32(ha,
578                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
579                 win_read = qla82xx_rd_32(ha,
580                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
581                 if ((win_read << 17) != window) {
582                         ql_dbg(ql_dbg_p3p, vha, 0xb003,
583                             "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n",
584                             __func__, window, win_read);
585                 }
586                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
587         } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
588                 QLA82XX_ADDR_OCM0_MAX)) {
589                 unsigned int temp1;
590                 if ((addr & 0x00ff800) == 0xff800) {
591                         ql_log(ql_log_warn, vha, 0xb004,
592                             "%s: QM access not handled.\n", __func__);
593                         addr = -1UL;
594                 }
595                 window = OCM_WIN(addr);
596                 ha->ddr_mn_window = window;
597                 qla82xx_wr_32(ha,
598                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
599                 win_read = qla82xx_rd_32(ha,
600                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
601                 temp1 = ((window & 0x1FF) << 7) |
602                     ((window & 0x0FFFE0000) >> 17);
603                 if (win_read != temp1) {
604                         ql_log(ql_log_warn, vha, 0xb005,
605                             "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n",
606                             __func__, temp1, win_read);
607                 }
608                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
609
610         } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
611                 QLA82XX_P3_ADDR_QDR_NET_MAX)) {
612                 /* QDR network side */
613                 window = MS_WIN(addr);
614                 ha->qdr_sn_window = window;
615                 qla82xx_wr_32(ha,
616                         ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window);
617                 win_read = qla82xx_rd_32(ha,
618                         ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
619                 if (win_read != window) {
620                         ql_log(ql_log_warn, vha, 0xb006,
621                             "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n",
622                             __func__, window, win_read);
623                 }
624                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
625         } else {
626                 /*
627                  * peg gdb frequently accesses memory that doesn't exist,
628                  * this limits the chit chat so debugging isn't slowed down.
629                  */
630                 if ((qla82xx_pci_set_window_warning_count++ < 8) ||
631                     (qla82xx_pci_set_window_warning_count%64 == 0)) {
632                         ql_log(ql_log_warn, vha, 0xb007,
633                             "%s: Warning:%s Unknown address range!.\n",
634                             __func__, QLA2XXX_DRIVER_NAME);
635                 }
636                 addr = -1UL;
637         }
638         return addr;
639 }
640
641 /* check if address is in the same windows as the previous access */
642 static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
643         unsigned long long addr)
644 {
645         int                     window;
646         unsigned long long      qdr_max;
647
648         qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
649
650         /* DDR network side */
651         if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
652                 QLA82XX_ADDR_DDR_NET_MAX))
653                 BUG();
654         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
655                 QLA82XX_ADDR_OCM0_MAX))
656                 return 1;
657         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
658                 QLA82XX_ADDR_OCM1_MAX))
659                 return 1;
660         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
661                 /* QDR network side */
662                 window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
663                 if (ha->qdr_sn_window == window)
664                         return 1;
665         }
666         return 0;
667 }
668
669 static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
670         u64 off, void *data, int size)
671 {
672         unsigned long   flags;
673         void           *addr = NULL;
674         int             ret = 0;
675         u64             start;
676         uint8_t         *mem_ptr = NULL;
677         unsigned long   mem_base;
678         unsigned long   mem_page;
679         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
680
681         write_lock_irqsave(&ha->hw_lock, flags);
682
683         /*
684          * If attempting to access unknown address or straddle hw windows,
685          * do not access.
686          */
687         start = qla82xx_pci_set_window(ha, off);
688         if ((start == -1UL) ||
689                 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
690                 write_unlock_irqrestore(&ha->hw_lock, flags);
691                 ql_log(ql_log_fatal, vha, 0xb008,
692                     "%s out of bound pci memory "
693                     "access, offset is 0x%llx.\n",
694                     QLA2XXX_DRIVER_NAME, off);
695                 return -1;
696         }
697
698         write_unlock_irqrestore(&ha->hw_lock, flags);
699         mem_base = pci_resource_start(ha->pdev, 0);
700         mem_page = start & PAGE_MASK;
701         /* Map two pages whenever user tries to access addresses in two
702         * consecutive pages.
703         */
704         if (mem_page != ((start + size - 1) & PAGE_MASK))
705                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
706         else
707                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
708         if (mem_ptr == 0UL) {
709                 *(u8  *)data = 0;
710                 return -1;
711         }
712         addr = mem_ptr;
713         addr += start & (PAGE_SIZE - 1);
714         write_lock_irqsave(&ha->hw_lock, flags);
715
716         switch (size) {
717         case 1:
718                 *(u8  *)data = readb(addr);
719                 break;
720         case 2:
721                 *(u16 *)data = readw(addr);
722                 break;
723         case 4:
724                 *(u32 *)data = readl(addr);
725                 break;
726         case 8:
727                 *(u64 *)data = readq(addr);
728                 break;
729         default:
730                 ret = -1;
731                 break;
732         }
733         write_unlock_irqrestore(&ha->hw_lock, flags);
734
735         if (mem_ptr)
736                 iounmap(mem_ptr);
737         return ret;
738 }
739
740 static int
741 qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
742         u64 off, void *data, int size)
743 {
744         unsigned long   flags;
745         void           *addr = NULL;
746         int             ret = 0;
747         u64             start;
748         uint8_t         *mem_ptr = NULL;
749         unsigned long   mem_base;
750         unsigned long   mem_page;
751         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
752
753         write_lock_irqsave(&ha->hw_lock, flags);
754
755         /*
756          * If attempting to access unknown address or straddle hw windows,
757          * do not access.
758          */
759         start = qla82xx_pci_set_window(ha, off);
760         if ((start == -1UL) ||
761                 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
762                 write_unlock_irqrestore(&ha->hw_lock, flags);
763                 ql_log(ql_log_fatal, vha, 0xb009,
764                     "%s out of bount memory "
765                     "access, offset is 0x%llx.\n",
766                     QLA2XXX_DRIVER_NAME, off);
767                 return -1;
768         }
769
770         write_unlock_irqrestore(&ha->hw_lock, flags);
771         mem_base = pci_resource_start(ha->pdev, 0);
772         mem_page = start & PAGE_MASK;
773         /* Map two pages whenever user tries to access addresses in two
774          * consecutive pages.
775          */
776         if (mem_page != ((start + size - 1) & PAGE_MASK))
777                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
778         else
779                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
780         if (mem_ptr == 0UL)
781                 return -1;
782
783         addr = mem_ptr;
784         addr += start & (PAGE_SIZE - 1);
785         write_lock_irqsave(&ha->hw_lock, flags);
786
787         switch (size) {
788         case 1:
789                 writeb(*(u8  *)data, addr);
790                 break;
791         case 2:
792                 writew(*(u16 *)data, addr);
793                 break;
794         case 4:
795                 writel(*(u32 *)data, addr);
796                 break;
797         case 8:
798                 writeq(*(u64 *)data, addr);
799                 break;
800         default:
801                 ret = -1;
802                 break;
803         }
804         write_unlock_irqrestore(&ha->hw_lock, flags);
805         if (mem_ptr)
806                 iounmap(mem_ptr);
807         return ret;
808 }
809
810 #define MTU_FUDGE_FACTOR 100
811 static unsigned long
812 qla82xx_decode_crb_addr(unsigned long addr)
813 {
814         int i;
815         unsigned long base_addr, offset, pci_base;
816
817         if (!qla82xx_crb_table_initialized)
818                 qla82xx_crb_addr_transform_setup();
819
820         pci_base = ADDR_ERROR;
821         base_addr = addr & 0xfff00000;
822         offset = addr & 0x000fffff;
823
824         for (i = 0; i < MAX_CRB_XFORM; i++) {
825                 if (crb_addr_xform[i] == base_addr) {
826                         pci_base = i << 20;
827                         break;
828                 }
829         }
830         if (pci_base == ADDR_ERROR)
831                 return pci_base;
832         return pci_base + offset;
833 }
834
835 static long rom_max_timeout = 100;
836 static long qla82xx_rom_lock_timeout = 100;
837
838 static int
839 qla82xx_rom_lock(struct qla_hw_data *ha)
840 {
841         int done = 0, timeout = 0;
842
843         while (!done) {
844                 /* acquire semaphore2 from PCI HW block */
845                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
846                 if (done == 1)
847                         break;
848                 if (timeout >= qla82xx_rom_lock_timeout)
849                         return -1;
850                 timeout++;
851         }
852         qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
853         return 0;
854 }
855
856 static void
857 qla82xx_rom_unlock(struct qla_hw_data *ha)
858 {
859         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
860 }
861
862 static int
863 qla82xx_wait_rom_busy(struct qla_hw_data *ha)
864 {
865         long timeout = 0;
866         long done = 0 ;
867         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
868
869         while (done == 0) {
870                 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
871                 done &= 4;
872                 timeout++;
873                 if (timeout >= rom_max_timeout) {
874                         ql_dbg(ql_dbg_p3p, vha, 0xb00a,
875                             "%s: Timeout reached waiting for rom busy.\n",
876                             QLA2XXX_DRIVER_NAME);
877                         return -1;
878                 }
879         }
880         return 0;
881 }
882
883 static int
884 qla82xx_wait_rom_done(struct qla_hw_data *ha)
885 {
886         long timeout = 0;
887         long done = 0 ;
888         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
889
890         while (done == 0) {
891                 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
892                 done &= 2;
893                 timeout++;
894                 if (timeout >= rom_max_timeout) {
895                         ql_dbg(ql_dbg_p3p, vha, 0xb00b,
896                             "%s: Timeout reached waiting for rom done.\n",
897                             QLA2XXX_DRIVER_NAME);
898                         return -1;
899                 }
900         }
901         return 0;
902 }
903
904 static int
905 qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
906 {
907         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
908
909         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
910         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
911         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
912         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
913         qla82xx_wait_rom_busy(ha);
914         if (qla82xx_wait_rom_done(ha)) {
915                 ql_log(ql_log_fatal, vha, 0x00ba,
916                     "Error waiting for rom done.\n");
917                 return -1;
918         }
919         /* Reset abyte_cnt and dummy_byte_cnt */
920         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
921         udelay(10);
922         cond_resched();
923         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
924         *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
925         return 0;
926 }
927
928 static int
929 qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
930 {
931         int ret, loops = 0;
932         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
933
934         while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
935                 udelay(100);
936                 schedule();
937                 loops++;
938         }
939         if (loops >= 50000) {
940                 ql_log(ql_log_fatal, vha, 0x00b9,
941                     "Failed to aquire SEM2 lock.\n");
942                 return -1;
943         }
944         ret = qla82xx_do_rom_fast_read(ha, addr, valp);
945         qla82xx_rom_unlock(ha);
946         return ret;
947 }
948
949 static int
950 qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
951 {
952         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
953         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
954         qla82xx_wait_rom_busy(ha);
955         if (qla82xx_wait_rom_done(ha)) {
956                 ql_log(ql_log_warn, vha, 0xb00c,
957                     "Error waiting for rom done.\n");
958                 return -1;
959         }
960         *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
961         return 0;
962 }
963
964 static int
965 qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
966 {
967         long timeout = 0;
968         uint32_t done = 1 ;
969         uint32_t val;
970         int ret = 0;
971         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
972
973         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
974         while ((done != 0) && (ret == 0)) {
975                 ret = qla82xx_read_status_reg(ha, &val);
976                 done = val & 1;
977                 timeout++;
978                 udelay(10);
979                 cond_resched();
980                 if (timeout >= 50000) {
981                         ql_log(ql_log_warn, vha, 0xb00d,
982                             "Timeout reached waiting for write finish.\n");
983                         return -1;
984                 }
985         }
986         return ret;
987 }
988
989 static int
990 qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
991 {
992         uint32_t val;
993         qla82xx_wait_rom_busy(ha);
994         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
995         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
996         qla82xx_wait_rom_busy(ha);
997         if (qla82xx_wait_rom_done(ha))
998                 return -1;
999         if (qla82xx_read_status_reg(ha, &val) != 0)
1000                 return -1;
1001         if ((val & 2) != 2)
1002                 return -1;
1003         return 0;
1004 }
1005
1006 static int
1007 qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
1008 {
1009         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1010         if (qla82xx_flash_set_write_enable(ha))
1011                 return -1;
1012         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
1013         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
1014         if (qla82xx_wait_rom_done(ha)) {
1015                 ql_log(ql_log_warn, vha, 0xb00e,
1016                     "Error waiting for rom done.\n");
1017                 return -1;
1018         }
1019         return qla82xx_flash_wait_write_finish(ha);
1020 }
1021
1022 static int
1023 qla82xx_write_disable_flash(struct qla_hw_data *ha)
1024 {
1025         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1026         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
1027         if (qla82xx_wait_rom_done(ha)) {
1028                 ql_log(ql_log_warn, vha, 0xb00f,
1029                     "Error waiting for rom done.\n");
1030                 return -1;
1031         }
1032         return 0;
1033 }
1034
1035 static int
1036 ql82xx_rom_lock_d(struct qla_hw_data *ha)
1037 {
1038         int loops = 0;
1039         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1040
1041         while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
1042                 udelay(100);
1043                 cond_resched();
1044                 loops++;
1045         }
1046         if (loops >= 50000) {
1047                 ql_log(ql_log_warn, vha, 0xb010,
1048                     "ROM lock failed.\n");
1049                 return -1;
1050         }
1051         return 0;;
1052 }
1053
1054 static int
1055 qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1056         uint32_t data)
1057 {
1058         int ret = 0;
1059         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1060
1061         ret = ql82xx_rom_lock_d(ha);
1062         if (ret < 0) {
1063                 ql_log(ql_log_warn, vha, 0xb011,
1064                     "ROM lock failed.\n");
1065                 return ret;
1066         }
1067
1068         if (qla82xx_flash_set_write_enable(ha))
1069                 goto done_write;
1070
1071         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
1072         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr);
1073         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
1074         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
1075         qla82xx_wait_rom_busy(ha);
1076         if (qla82xx_wait_rom_done(ha)) {
1077                 ql_log(ql_log_warn, vha, 0xb012,
1078                     "Error waiting for rom done.\n");
1079                 ret = -1;
1080                 goto done_write;
1081         }
1082
1083         ret = qla82xx_flash_wait_write_finish(ha);
1084
1085 done_write:
1086         qla82xx_rom_unlock(ha);
1087         return ret;
1088 }
1089
1090 /* This routine does CRB initialize sequence
1091  *  to put the ISP into operational state
1092  */
1093 static int
1094 qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1095 {
1096         int addr, val;
1097         int i ;
1098         struct crb_addr_pair *buf;
1099         unsigned long off;
1100         unsigned offset, n;
1101         struct qla_hw_data *ha = vha->hw;
1102
1103         struct crb_addr_pair {
1104                 long addr;
1105                 long data;
1106         };
1107
1108         /* Halt all the indiviual PEGs and other blocks of the ISP */
1109         qla82xx_rom_lock(ha);
1110
1111         /* disable all I2Q */
1112         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
1113         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
1114         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
1115         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
1116         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
1117         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
1118
1119         /* disable all niu interrupts */
1120         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
1121         /* disable xge rx/tx */
1122         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
1123         /* disable xg1 rx/tx */
1124         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
1125         /* disable sideband mac */
1126         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
1127         /* disable ap0 mac */
1128         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
1129         /* disable ap1 mac */
1130         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
1131
1132         /* halt sre */
1133         val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
1134         qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
1135
1136         /* halt epg */
1137         qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
1138
1139         /* halt timers */
1140         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
1141         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
1142         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
1143         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
1144         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1145         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
1146
1147         /* halt pegs */
1148         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
1149         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
1150         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
1151         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
1152         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1153         msleep(20);
1154
1155         /* big hammer */
1156         if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
1157                 /* don't reset CAM block on reset */
1158                 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
1159         else
1160                 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
1161
1162         /* reset ms */
1163         val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1164         val |= (1 << 1);
1165         qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1166         msleep(20);
1167
1168         /* unreset ms */
1169         val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1170         val &= ~(1 << 1);
1171         qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1172         msleep(20);
1173
1174         qla82xx_rom_unlock(ha);
1175
1176         /* Read the signature value from the flash.
1177          * Offset 0: Contain signature (0xcafecafe)
1178          * Offset 4: Offset and number of addr/value pairs
1179          * that present in CRB initialize sequence
1180          */
1181         if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1182             qla82xx_rom_fast_read(ha, 4, &n) != 0) {
1183                 ql_log(ql_log_fatal, vha, 0x006e,
1184                     "Error Reading crb_init area: n: %08x.\n", n);
1185                 return -1;
1186         }
1187
1188         /* Offset in flash = lower 16 bits
1189          * Number of enteries = upper 16 bits
1190          */
1191         offset = n & 0xffffU;
1192         n = (n >> 16) & 0xffffU;
1193
1194         /* number of addr/value pair should not exceed 1024 enteries */
1195         if (n  >= 1024) {
1196                 ql_log(ql_log_fatal, vha, 0x0071,
1197                     "Card flash not initialized:n=0x%x.\n", n);
1198                 return -1;
1199         }
1200
1201         ql_log(ql_log_info, vha, 0x0072,
1202             "%d CRB init values found in ROM.\n", n);
1203
1204         buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
1205         if (buf == NULL) {
1206                 ql_log(ql_log_fatal, vha, 0x010c,
1207                     "Unable to allocate memory.\n");
1208                 return -1;
1209         }
1210
1211         for (i = 0; i < n; i++) {
1212                 if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
1213                     qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) {
1214                         kfree(buf);
1215                         return -1;
1216                 }
1217
1218                 buf[i].addr = addr;
1219                 buf[i].data = val;
1220         }
1221
1222         for (i = 0; i < n; i++) {
1223                 /* Translate internal CRB initialization
1224                  * address to PCI bus address
1225                  */
1226                 off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) +
1227                     QLA82XX_PCI_CRBSPACE;
1228                 /* Not all CRB  addr/value pair to be written,
1229                  * some of them are skipped
1230                  */
1231
1232                 /* skipping cold reboot MAGIC */
1233                 if (off == QLA82XX_CAM_RAM(0x1fc))
1234                         continue;
1235
1236                 /* do not reset PCI */
1237                 if (off == (ROMUSB_GLB + 0xbc))
1238                         continue;
1239
1240                 /* skip core clock, so that firmware can increase the clock */
1241                 if (off == (ROMUSB_GLB + 0xc8))
1242                         continue;
1243
1244                 /* skip the function enable register */
1245                 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
1246                         continue;
1247
1248                 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
1249                         continue;
1250
1251                 if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
1252                         continue;
1253
1254                 if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
1255                         continue;
1256
1257                 if (off == ADDR_ERROR) {
1258                         ql_log(ql_log_fatal, vha, 0x0116,
1259                             "Unknow addr: 0x%08lx.\n", buf[i].addr);
1260                         continue;
1261                 }
1262
1263                 qla82xx_wr_32(ha, off, buf[i].data);
1264
1265                 /* ISP requires much bigger delay to settle down,
1266                  * else crb_window returns 0xffffffff
1267                  */
1268                 if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
1269                         msleep(1000);
1270
1271                 /* ISP requires millisec delay between
1272                  * successive CRB register updation
1273                  */
1274                 msleep(1);
1275         }
1276
1277         kfree(buf);
1278
1279         /* Resetting the data and instruction cache */
1280         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
1281         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
1282         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
1283
1284         /* Clear all protocol processing engines */
1285         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
1286         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
1287         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
1288         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
1289         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
1290         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
1291         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
1292         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
1293         return 0;
1294 }
1295
1296 static int
1297 qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1298                 u64 off, void *data, int size)
1299 {
1300         int i, j, ret = 0, loop, sz[2], off0;
1301         int scale, shift_amount, startword;
1302         uint32_t temp;
1303         uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1304
1305         /*
1306          * If not MN, go check for MS or invalid.
1307          */
1308         if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1309                 mem_crb = QLA82XX_CRB_QDR_NET;
1310         else {
1311                 mem_crb = QLA82XX_CRB_DDR_NET;
1312                 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1313                         return qla82xx_pci_mem_write_direct(ha,
1314                             off, data, size);
1315         }
1316
1317         off0 = off & 0x7;
1318         sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1319         sz[1] = size - sz[0];
1320
1321         off8 = off & 0xfffffff0;
1322         loop = (((off & 0xf) + size - 1) >> 4) + 1;
1323         shift_amount = 4;
1324         scale = 2;
1325         startword = (off & 0xf)/8;
1326
1327         for (i = 0; i < loop; i++) {
1328                 if (qla82xx_pci_mem_read_2M(ha, off8 +
1329                     (i << shift_amount), &word[i * scale], 8))
1330                         return -1;
1331         }
1332
1333         switch (size) {
1334         case 1:
1335                 tmpw = *((uint8_t *)data);
1336                 break;
1337         case 2:
1338                 tmpw = *((uint16_t *)data);
1339                 break;
1340         case 4:
1341                 tmpw = *((uint32_t *)data);
1342                 break;
1343         case 8:
1344         default:
1345                 tmpw = *((uint64_t *)data);
1346                 break;
1347         }
1348
1349         if (sz[0] == 8) {
1350                 word[startword] = tmpw;
1351         } else {
1352                 word[startword] &=
1353                         ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1354                 word[startword] |= tmpw << (off0 * 8);
1355         }
1356         if (sz[1] != 0) {
1357                 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1358                 word[startword+1] |= tmpw >> (sz[0] * 8);
1359         }
1360
1361         for (i = 0; i < loop; i++) {
1362                 temp = off8 + (i << shift_amount);
1363                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1364                 temp = 0;
1365                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1366                 temp = word[i * scale] & 0xffffffff;
1367                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1368                 temp = (word[i * scale] >> 32) & 0xffffffff;
1369                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1370                 temp = word[i*scale + 1] & 0xffffffff;
1371                 qla82xx_wr_32(ha, mem_crb +
1372                     MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
1373                 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1374                 qla82xx_wr_32(ha, mem_crb +
1375                     MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
1376
1377                 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1378                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1379                 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1380                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1381
1382                 for (j = 0; j < MAX_CTL_CHECK; j++) {
1383                         temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1384                         if ((temp & MIU_TA_CTL_BUSY) == 0)
1385                                 break;
1386                 }
1387
1388                 if (j >= MAX_CTL_CHECK) {
1389                         if (printk_ratelimit())
1390                                 dev_err(&ha->pdev->dev,
1391                                     "failed to write through agent.\n");
1392                         ret = -1;
1393                         break;
1394                 }
1395         }
1396
1397         return ret;
1398 }
1399
1400 static int
1401 qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
1402 {
1403         int  i;
1404         long size = 0;
1405         long flashaddr = ha->flt_region_bootload << 2;
1406         long memaddr = BOOTLD_START;
1407         u64 data;
1408         u32 high, low;
1409         size = (IMAGE_START - BOOTLD_START) / 8;
1410
1411         for (i = 0; i < size; i++) {
1412                 if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
1413                     (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) {
1414                         return -1;
1415                 }
1416                 data = ((u64)high << 32) | low ;
1417                 qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
1418                 flashaddr += 8;
1419                 memaddr += 8;
1420
1421                 if (i % 0x1000 == 0)
1422                         msleep(1);
1423         }
1424         udelay(100);
1425         read_lock(&ha->hw_lock);
1426         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1427         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1428         read_unlock(&ha->hw_lock);
1429         return 0;
1430 }
1431
1432 int
1433 qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1434                 u64 off, void *data, int size)
1435 {
1436         int i, j = 0, k, start, end, loop, sz[2], off0[2];
1437         int           shift_amount;
1438         uint32_t      temp;
1439         uint64_t      off8, val, mem_crb, word[2] = {0, 0};
1440
1441         /*
1442          * If not MN, go check for MS or invalid.
1443          */
1444
1445         if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1446                 mem_crb = QLA82XX_CRB_QDR_NET;
1447         else {
1448                 mem_crb = QLA82XX_CRB_DDR_NET;
1449                 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1450                         return qla82xx_pci_mem_read_direct(ha,
1451                             off, data, size);
1452         }
1453
1454         off8 = off & 0xfffffff0;
1455         off0[0] = off & 0xf;
1456         sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
1457         shift_amount = 4;
1458         loop = ((off0[0] + size - 1) >> shift_amount) + 1;
1459         off0[1] = 0;
1460         sz[1] = size - sz[0];
1461
1462         for (i = 0; i < loop; i++) {
1463                 temp = off8 + (i << shift_amount);
1464                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
1465                 temp = 0;
1466                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
1467                 temp = MIU_TA_CTL_ENABLE;
1468                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1469                 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
1470                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1471
1472                 for (j = 0; j < MAX_CTL_CHECK; j++) {
1473                         temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1474                         if ((temp & MIU_TA_CTL_BUSY) == 0)
1475                                 break;
1476                 }
1477
1478                 if (j >= MAX_CTL_CHECK) {
1479                         if (printk_ratelimit())
1480                                 dev_err(&ha->pdev->dev,
1481                                     "failed to read through agent.\n");
1482                         break;
1483                 }
1484
1485                 start = off0[i] >> 2;
1486                 end   = (off0[i] + sz[i] - 1) >> 2;
1487                 for (k = start; k <= end; k++) {
1488                         temp = qla82xx_rd_32(ha,
1489                                         mem_crb + MIU_TEST_AGT_RDDATA(k));
1490                         word[i] |= ((uint64_t)temp << (32 * (k & 1)));
1491                 }
1492         }
1493
1494         if (j >= MAX_CTL_CHECK)
1495                 return -1;
1496
1497         if ((off0[0] & 7) == 0) {
1498                 val = word[0];
1499         } else {
1500                 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
1501                         ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
1502         }
1503
1504         switch (size) {
1505         case 1:
1506                 *(uint8_t  *)data = val;
1507                 break;
1508         case 2:
1509                 *(uint16_t *)data = val;
1510                 break;
1511         case 4:
1512                 *(uint32_t *)data = val;
1513                 break;
1514         case 8:
1515                 *(uint64_t *)data = val;
1516                 break;
1517         }
1518         return 0;
1519 }
1520
1521
1522 static struct qla82xx_uri_table_desc *
1523 qla82xx_get_table_desc(const u8 *unirom, int section)
1524 {
1525         uint32_t i;
1526         struct qla82xx_uri_table_desc *directory =
1527                 (struct qla82xx_uri_table_desc *)&unirom[0];
1528         __le32 offset;
1529         __le32 tab_type;
1530         __le32 entries = cpu_to_le32(directory->num_entries);
1531
1532         for (i = 0; i < entries; i++) {
1533                 offset = cpu_to_le32(directory->findex) +
1534                     (i * cpu_to_le32(directory->entry_size));
1535                 tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8));
1536
1537                 if (tab_type == section)
1538                         return (struct qla82xx_uri_table_desc *)&unirom[offset];
1539         }
1540
1541         return NULL;
1542 }
1543
1544 static struct qla82xx_uri_data_desc *
1545 qla82xx_get_data_desc(struct qla_hw_data *ha,
1546         u32 section, u32 idx_offset)
1547 {
1548         const u8 *unirom = ha->hablob->fw->data;
1549         int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset));
1550         struct qla82xx_uri_table_desc *tab_desc = NULL;
1551         __le32 offset;
1552
1553         tab_desc = qla82xx_get_table_desc(unirom, section);
1554         if (!tab_desc)
1555                 return NULL;
1556
1557         offset = cpu_to_le32(tab_desc->findex) +
1558             (cpu_to_le32(tab_desc->entry_size) * idx);
1559
1560         return (struct qla82xx_uri_data_desc *)&unirom[offset];
1561 }
1562
1563 static u8 *
1564 qla82xx_get_bootld_offset(struct qla_hw_data *ha)
1565 {
1566         u32 offset = BOOTLD_START;
1567         struct qla82xx_uri_data_desc *uri_desc = NULL;
1568
1569         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1570                 uri_desc = qla82xx_get_data_desc(ha,
1571                     QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF);
1572                 if (uri_desc)
1573                         offset = cpu_to_le32(uri_desc->findex);
1574         }
1575
1576         return (u8 *)&ha->hablob->fw->data[offset];
1577 }
1578
1579 static __le32
1580 qla82xx_get_fw_size(struct qla_hw_data *ha)
1581 {
1582         struct qla82xx_uri_data_desc *uri_desc = NULL;
1583
1584         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1585                 uri_desc =  qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1586                     QLA82XX_URI_FIRMWARE_IDX_OFF);
1587                 if (uri_desc)
1588                         return cpu_to_le32(uri_desc->size);
1589         }
1590
1591         return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
1592 }
1593
1594 static u8 *
1595 qla82xx_get_fw_offs(struct qla_hw_data *ha)
1596 {
1597         u32 offset = IMAGE_START;
1598         struct qla82xx_uri_data_desc *uri_desc = NULL;
1599
1600         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1601                 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1602                         QLA82XX_URI_FIRMWARE_IDX_OFF);
1603                 if (uri_desc)
1604                         offset = cpu_to_le32(uri_desc->findex);
1605         }
1606
1607         return (u8 *)&ha->hablob->fw->data[offset];
1608 }
1609
1610 /* PCI related functions */
1611 char *
1612 qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
1613 {
1614         int pcie_reg;
1615         struct qla_hw_data *ha = vha->hw;
1616         char lwstr[6];
1617         uint16_t lnk;
1618
1619         pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
1620         pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
1621         ha->link_width = (lnk >> 4) & 0x3f;
1622
1623         strcpy(str, "PCIe (");
1624         strcat(str, "2.5Gb/s ");
1625         snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width);
1626         strcat(str, lwstr);
1627         return str;
1628 }
1629
1630 int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
1631 {
1632         unsigned long val = 0;
1633         u32 control;
1634
1635         switch (region) {
1636         case 0:
1637                 val = 0;
1638                 break;
1639         case 1:
1640                 pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
1641                 val = control + QLA82XX_MSIX_TBL_SPACE;
1642                 break;
1643         }
1644         return val;
1645 }
1646
1647
1648 int
1649 qla82xx_iospace_config(struct qla_hw_data *ha)
1650 {
1651         uint32_t len = 0;
1652
1653         if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
1654                 ql_log_pci(ql_log_fatal, ha->pdev, 0x000c,
1655                     "Failed to reserver selected regions.\n");
1656                 goto iospace_error_exit;
1657         }
1658
1659         /* Use MMIO operations for all accesses. */
1660         if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
1661                 ql_log_pci(ql_log_fatal, ha->pdev, 0x000d,
1662                     "Region #0 not an MMIO resource, aborting.\n");
1663                 goto iospace_error_exit;
1664         }
1665
1666         len = pci_resource_len(ha->pdev, 0);
1667         ha->nx_pcibase =
1668             (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
1669         if (!ha->nx_pcibase) {
1670                 ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
1671                     "Cannot remap pcibase MMIO, aborting.\n");
1672                 pci_release_regions(ha->pdev);
1673                 goto iospace_error_exit;
1674         }
1675
1676         /* Mapping of IO base pointer */
1677         ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
1678             0xbc000 + (ha->pdev->devfn << 11));
1679
1680         if (!ql2xdbwr) {
1681                 ha->nxdb_wr_ptr =
1682                     (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
1683                     (ha->pdev->devfn << 12)), 4);
1684                 if (!ha->nxdb_wr_ptr) {
1685                         ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
1686                             "Cannot remap MMIO, aborting.\n");
1687                         pci_release_regions(ha->pdev);
1688                         goto iospace_error_exit;
1689                 }
1690
1691                 /* Mapping of IO base pointer,
1692                  * door bell read and write pointer
1693                  */
1694                 ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
1695                     (ha->pdev->devfn * 8);
1696         } else {
1697                 ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
1698                         QLA82XX_CAMRAM_DB1 :
1699                         QLA82XX_CAMRAM_DB2);
1700         }
1701
1702         ha->max_req_queues = ha->max_rsp_queues = 1;
1703         ha->msix_count = ha->max_rsp_queues + 1;
1704         ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
1705             "nx_pci_base=%p iobase=%p "
1706             "max_req_queues=%d msix_count=%d.\n",
1707             ha->nx_pcibase, ha->iobase,
1708             ha->max_req_queues, ha->msix_count);
1709         ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
1710             "nx_pci_base=%p iobase=%p "
1711             "max_req_queues=%d msix_count=%d.\n",
1712             ha->nx_pcibase, ha->iobase,
1713             ha->max_req_queues, ha->msix_count);
1714         return 0;
1715
1716 iospace_error_exit:
1717         return -ENOMEM;
1718 }
1719
1720 /* GS related functions */
1721
1722 /* Initialization related functions */
1723
1724 /**
1725  * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers.
1726  * @ha: HA context
1727  *
1728  * Returns 0 on success.
1729 */
1730 int
1731 qla82xx_pci_config(scsi_qla_host_t *vha)
1732 {
1733         struct qla_hw_data *ha = vha->hw;
1734         int ret;
1735
1736         pci_set_master(ha->pdev);
1737         ret = pci_set_mwi(ha->pdev);
1738         ha->chip_revision = ha->pdev->revision;
1739         ql_dbg(ql_dbg_init, vha, 0x0043,
1740             "Chip revision:%ld.\n",
1741             ha->chip_revision);
1742         return 0;
1743 }
1744
1745 /**
1746  * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers.
1747  * @ha: HA context
1748  *
1749  * Returns 0 on success.
1750  */
1751 void
1752 qla82xx_reset_chip(scsi_qla_host_t *vha)
1753 {
1754         struct qla_hw_data *ha = vha->hw;
1755         ha->isp_ops->disable_intrs(ha);
1756 }
1757
1758 void qla82xx_config_rings(struct scsi_qla_host *vha)
1759 {
1760         struct qla_hw_data *ha = vha->hw;
1761         struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1762         struct init_cb_81xx *icb;
1763         struct req_que *req = ha->req_q_map[0];
1764         struct rsp_que *rsp = ha->rsp_q_map[0];
1765
1766         /* Setup ring parameters in initialization control block. */
1767         icb = (struct init_cb_81xx *)ha->init_cb;
1768         icb->request_q_outpointer = __constant_cpu_to_le16(0);
1769         icb->response_q_inpointer = __constant_cpu_to_le16(0);
1770         icb->request_q_length = cpu_to_le16(req->length);
1771         icb->response_q_length = cpu_to_le16(rsp->length);
1772         icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1773         icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1774         icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1775         icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1776
1777         WRT_REG_DWORD((unsigned long  __iomem *)&reg->req_q_out[0], 0);
1778         WRT_REG_DWORD((unsigned long  __iomem *)&reg->rsp_q_in[0], 0);
1779         WRT_REG_DWORD((unsigned long  __iomem *)&reg->rsp_q_out[0], 0);
1780 }
1781
1782 void qla82xx_reset_adapter(struct scsi_qla_host *vha)
1783 {
1784         struct qla_hw_data *ha = vha->hw;
1785         vha->flags.online = 0;
1786         qla2x00_try_to_stop_firmware(vha);
1787         ha->isp_ops->disable_intrs(ha);
1788 }
1789
1790 static int
1791 qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
1792 {
1793         u64 *ptr64;
1794         u32 i, flashaddr, size;
1795         __le64 data;
1796
1797         size = (IMAGE_START - BOOTLD_START) / 8;
1798
1799         ptr64 = (u64 *)qla82xx_get_bootld_offset(ha);
1800         flashaddr = BOOTLD_START;
1801
1802         for (i = 0; i < size; i++) {
1803                 data = cpu_to_le64(ptr64[i]);
1804                 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1805                         return -EIO;
1806                 flashaddr += 8;
1807         }
1808
1809         flashaddr = FLASH_ADDR_START;
1810         size = (__force u32)qla82xx_get_fw_size(ha) / 8;
1811         ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
1812
1813         for (i = 0; i < size; i++) {
1814                 data = cpu_to_le64(ptr64[i]);
1815
1816                 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1817                         return -EIO;
1818                 flashaddr += 8;
1819         }
1820         udelay(100);
1821
1822         /* Write a magic value to CAMRAM register
1823          * at a specified offset to indicate
1824          * that all data is written and
1825          * ready for firmware to initialize.
1826          */
1827         qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC);
1828
1829         read_lock(&ha->hw_lock);
1830         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1831         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1832         read_unlock(&ha->hw_lock);
1833         return 0;
1834 }
1835
1836 static int
1837 qla82xx_set_product_offset(struct qla_hw_data *ha)
1838 {
1839         struct qla82xx_uri_table_desc *ptab_desc = NULL;
1840         const uint8_t *unirom = ha->hablob->fw->data;
1841         uint32_t i;
1842         __le32 entries;
1843         __le32 flags, file_chiprev, offset;
1844         uint8_t chiprev = ha->chip_revision;
1845         /* Hardcoding mn_present flag for P3P */
1846         int mn_present = 0;
1847         uint32_t flagbit;
1848
1849         ptab_desc = qla82xx_get_table_desc(unirom,
1850                  QLA82XX_URI_DIR_SECT_PRODUCT_TBL);
1851        if (!ptab_desc)
1852                 return -1;
1853
1854         entries = cpu_to_le32(ptab_desc->num_entries);
1855
1856         for (i = 0; i < entries; i++) {
1857                 offset = cpu_to_le32(ptab_desc->findex) +
1858                         (i * cpu_to_le32(ptab_desc->entry_size));
1859                 flags = cpu_to_le32(*((int *)&unirom[offset] +
1860                         QLA82XX_URI_FLAGS_OFF));
1861                 file_chiprev = cpu_to_le32(*((int *)&unirom[offset] +
1862                         QLA82XX_URI_CHIP_REV_OFF));
1863
1864                 flagbit = mn_present ? 1 : 2;
1865
1866                 if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) {
1867                         ha->file_prd_off = offset;
1868                         return 0;
1869                 }
1870         }
1871         return -1;
1872 }
1873
1874 int
1875 qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
1876 {
1877         __le32 val;
1878         uint32_t min_size;
1879         struct qla_hw_data *ha = vha->hw;
1880         const struct firmware *fw = ha->hablob->fw;
1881
1882         ha->fw_type = fw_type;
1883
1884         if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1885                 if (qla82xx_set_product_offset(ha))
1886                         return -EINVAL;
1887
1888                 min_size = QLA82XX_URI_FW_MIN_SIZE;
1889         } else {
1890                 val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
1891                 if ((__force u32)val != QLA82XX_BDINFO_MAGIC)
1892                         return -EINVAL;
1893
1894                 min_size = QLA82XX_FW_MIN_SIZE;
1895         }
1896
1897         if (fw->size < min_size)
1898                 return -EINVAL;
1899         return 0;
1900 }
1901
1902 static int
1903 qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1904 {
1905         u32 val = 0;
1906         int retries = 60;
1907         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1908
1909         do {
1910                 read_lock(&ha->hw_lock);
1911                 val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE);
1912                 read_unlock(&ha->hw_lock);
1913
1914                 switch (val) {
1915                 case PHAN_INITIALIZE_COMPLETE:
1916                 case PHAN_INITIALIZE_ACK:
1917                         return QLA_SUCCESS;
1918                 case PHAN_INITIALIZE_FAILED:
1919                         break;
1920                 default:
1921                         break;
1922                 }
1923                 ql_log(ql_log_info, vha, 0x00a8,
1924                     "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n",
1925                     val, retries);
1926
1927                 msleep(500);
1928
1929         } while (--retries);
1930
1931         ql_log(ql_log_fatal, vha, 0x00a9,
1932             "Cmd Peg initialization failed: 0x%x.\n", val);
1933
1934         val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
1935         read_lock(&ha->hw_lock);
1936         qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
1937         read_unlock(&ha->hw_lock);
1938         return QLA_FUNCTION_FAILED;
1939 }
1940
1941 static int
1942 qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1943 {
1944         u32 val = 0;
1945         int retries = 60;
1946         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1947
1948         do {
1949                 read_lock(&ha->hw_lock);
1950                 val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE);
1951                 read_unlock(&ha->hw_lock);
1952
1953                 switch (val) {
1954                 case PHAN_INITIALIZE_COMPLETE:
1955                 case PHAN_INITIALIZE_ACK:
1956                         return QLA_SUCCESS;
1957                 case PHAN_INITIALIZE_FAILED:
1958                         break;
1959                 default:
1960                         break;
1961                 }
1962                 ql_log(ql_log_info, vha, 0x00ab,
1963                     "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n",
1964                     val, retries);
1965
1966                 msleep(500);
1967
1968         } while (--retries);
1969
1970         ql_log(ql_log_fatal, vha, 0x00ac,
1971             "Rcv Peg initializatin failed: 0x%x.\n", val);
1972         read_lock(&ha->hw_lock);
1973         qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
1974         read_unlock(&ha->hw_lock);
1975         return QLA_FUNCTION_FAILED;
1976 }
1977
1978 /* ISR related functions */
1979 uint32_t qla82xx_isr_int_target_mask_enable[8] = {
1980         ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1,
1981         ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3,
1982         ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5,
1983         ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7
1984 };
1985
1986 uint32_t qla82xx_isr_int_target_status[8] = {
1987         ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
1988         ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
1989         ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
1990         ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7
1991 };
1992
1993 static struct qla82xx_legacy_intr_set legacy_intr[] = \
1994         QLA82XX_LEGACY_INTR_CONFIG;
1995
1996 /*
1997  * qla82xx_mbx_completion() - Process mailbox command completions.
1998  * @ha: SCSI driver HA context
1999  * @mb0: Mailbox0 register
2000  */
2001 static void
2002 qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2003 {
2004         uint16_t        cnt;
2005         uint16_t __iomem *wptr;
2006         struct qla_hw_data *ha = vha->hw;
2007         struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2008         wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
2009
2010         /* Load return mailbox registers. */
2011         ha->flags.mbox_int = 1;
2012         ha->mailbox_out[0] = mb0;
2013
2014         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2015                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2016                 wptr++;
2017         }
2018
2019         if (ha->mcp) {
2020                 ql_dbg(ql_dbg_async, vha, 0x5052,
2021                     "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
2022         } else {
2023                 ql_dbg(ql_dbg_async, vha, 0x5053,
2024                     "MBX pointer ERROR.\n");
2025         }
2026 }
2027
2028 /*
2029  * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
2030  * @irq:
2031  * @dev_id: SCSI driver HA context
2032  * @regs:
2033  *
2034  * Called by system whenever the host adapter generates an interrupt.
2035  *
2036  * Returns handled flag.
2037  */
2038 irqreturn_t
2039 qla82xx_intr_handler(int irq, void *dev_id)
2040 {
2041         scsi_qla_host_t *vha;
2042         struct qla_hw_data *ha;
2043         struct rsp_que *rsp;
2044         struct device_reg_82xx __iomem *reg;
2045         int status = 0, status1 = 0;
2046         unsigned long   flags;
2047         unsigned long   iter;
2048         uint32_t        stat = 0;
2049         uint16_t        mb[4];
2050
2051         rsp = (struct rsp_que *) dev_id;
2052         if (!rsp) {
2053                 printk(KERN_INFO
2054                         "%s(): NULL response queue pointer.\n", __func__);
2055                 return IRQ_NONE;
2056         }
2057         ha = rsp->hw;
2058
2059         if (!ha->flags.msi_enabled) {
2060                 status = qla82xx_rd_32(ha, ISR_INT_VECTOR);
2061                 if (!(status & ha->nx_legacy_intr.int_vec_bit))
2062                         return IRQ_NONE;
2063
2064                 status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG);
2065                 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1))
2066                         return IRQ_NONE;
2067         }
2068
2069         /* clear the interrupt */
2070         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
2071
2072         /* read twice to ensure write is flushed */
2073         qla82xx_rd_32(ha, ISR_INT_VECTOR);
2074         qla82xx_rd_32(ha, ISR_INT_VECTOR);
2075
2076         reg = &ha->iobase->isp82;
2077
2078         spin_lock_irqsave(&ha->hardware_lock, flags);
2079         vha = pci_get_drvdata(ha->pdev);
2080         for (iter = 1; iter--; ) {
2081
2082                 if (RD_REG_DWORD(&reg->host_int)) {
2083                         stat = RD_REG_DWORD(&reg->host_status);
2084
2085                         switch (stat & 0xff) {
2086                         case 0x1:
2087                         case 0x2:
2088                         case 0x10:
2089                         case 0x11:
2090                                 qla82xx_mbx_completion(vha, MSW(stat));
2091                                 status |= MBX_INTERRUPT;
2092                                 break;
2093                         case 0x12:
2094                                 mb[0] = MSW(stat);
2095                                 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2096                                 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2097                                 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2098                                 qla2x00_async_event(vha, rsp, mb);
2099                                 break;
2100                         case 0x13:
2101                                 qla24xx_process_response_queue(vha, rsp);
2102                                 break;
2103                         default:
2104                                 ql_dbg(ql_dbg_async, vha, 0x5054,
2105                                     "Unrecognized interrupt type (%d).\n",
2106                                     stat & 0xff);
2107                                 break;
2108                         }
2109                 }
2110                 WRT_REG_DWORD(&reg->host_int, 0);
2111         }
2112         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2113         if (!ha->flags.msi_enabled)
2114                 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2115
2116 #ifdef QL_DEBUG_LEVEL_17
2117         if (!irq && ha->flags.eeh_busy)
2118                 ql_log(ql_log_warn, vha, 0x503d,
2119                     "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2120                     status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2121 #endif
2122
2123         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2124             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2125                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2126                 complete(&ha->mbx_intr_comp);
2127         }
2128         return IRQ_HANDLED;
2129 }
2130
2131 irqreturn_t
2132 qla82xx_msix_default(int irq, void *dev_id)
2133 {
2134         scsi_qla_host_t *vha;
2135         struct qla_hw_data *ha;
2136         struct rsp_que *rsp;
2137         struct device_reg_82xx __iomem *reg;
2138         int status = 0;
2139         unsigned long flags;
2140         uint32_t stat = 0;
2141         uint16_t mb[4];
2142
2143         rsp = (struct rsp_que *) dev_id;
2144         if (!rsp) {
2145                 printk(KERN_INFO
2146                         "%s(): NULL response queue pointer.\n", __func__);
2147                 return IRQ_NONE;
2148         }
2149         ha = rsp->hw;
2150
2151         reg = &ha->iobase->isp82;
2152
2153         spin_lock_irqsave(&ha->hardware_lock, flags);
2154         vha = pci_get_drvdata(ha->pdev);
2155         do {
2156                 if (RD_REG_DWORD(&reg->host_int)) {
2157                         stat = RD_REG_DWORD(&reg->host_status);
2158
2159                         switch (stat & 0xff) {
2160                         case 0x1:
2161                         case 0x2:
2162                         case 0x10:
2163                         case 0x11:
2164                                 qla82xx_mbx_completion(vha, MSW(stat));
2165                                 status |= MBX_INTERRUPT;
2166                                 break;
2167                         case 0x12:
2168                                 mb[0] = MSW(stat);
2169                                 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2170                                 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2171                                 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2172                                 qla2x00_async_event(vha, rsp, mb);
2173                                 break;
2174                         case 0x13:
2175                                 qla24xx_process_response_queue(vha, rsp);
2176                                 break;
2177                         default:
2178                                 ql_dbg(ql_dbg_async, vha, 0x5041,
2179                                     "Unrecognized interrupt type (%d).\n",
2180                                     stat & 0xff);
2181                                 break;
2182                         }
2183                 }
2184                 WRT_REG_DWORD(&reg->host_int, 0);
2185         } while (0);
2186
2187         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2188
2189 #ifdef QL_DEBUG_LEVEL_17
2190         if (!irq && ha->flags.eeh_busy)
2191                 ql_log(ql_log_warn, vha, 0x5044,
2192                     "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2193                     status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2194 #endif
2195
2196         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2197                 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2198                         set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2199                         complete(&ha->mbx_intr_comp);
2200         }
2201         return IRQ_HANDLED;
2202 }
2203
2204 irqreturn_t
2205 qla82xx_msix_rsp_q(int irq, void *dev_id)
2206 {
2207         scsi_qla_host_t *vha;
2208         struct qla_hw_data *ha;
2209         struct rsp_que *rsp;
2210         struct device_reg_82xx __iomem *reg;
2211         unsigned long flags;
2212
2213         rsp = (struct rsp_que *) dev_id;
2214         if (!rsp) {
2215                 printk(KERN_INFO
2216                         "%s(): NULL response queue pointer.\n", __func__);
2217                 return IRQ_NONE;
2218         }
2219
2220         ha = rsp->hw;
2221         reg = &ha->iobase->isp82;
2222         spin_lock_irqsave(&ha->hardware_lock, flags);
2223         vha = pci_get_drvdata(ha->pdev);
2224         qla24xx_process_response_queue(vha, rsp);
2225         WRT_REG_DWORD(&reg->host_int, 0);
2226         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2227         return IRQ_HANDLED;
2228 }
2229
2230 void
2231 qla82xx_poll(int irq, void *dev_id)
2232 {
2233         scsi_qla_host_t *vha;
2234         struct qla_hw_data *ha;
2235         struct rsp_que *rsp;
2236         struct device_reg_82xx __iomem *reg;
2237         int status = 0;
2238         uint32_t stat;
2239         uint16_t mb[4];
2240         unsigned long flags;
2241
2242         rsp = (struct rsp_que *) dev_id;
2243         if (!rsp) {
2244                 printk(KERN_INFO
2245                         "%s(): NULL response queue pointer.\n", __func__);
2246                 return;
2247         }
2248         ha = rsp->hw;
2249
2250         reg = &ha->iobase->isp82;
2251         spin_lock_irqsave(&ha->hardware_lock, flags);
2252         vha = pci_get_drvdata(ha->pdev);
2253
2254         if (RD_REG_DWORD(&reg->host_int)) {
2255                 stat = RD_REG_DWORD(&reg->host_status);
2256                 switch (stat & 0xff) {
2257                 case 0x1:
2258                 case 0x2:
2259                 case 0x10:
2260                 case 0x11:
2261                         qla82xx_mbx_completion(vha, MSW(stat));
2262                         status |= MBX_INTERRUPT;
2263                         break;
2264                 case 0x12:
2265                         mb[0] = MSW(stat);
2266                         mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2267                         mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2268                         mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2269                         qla2x00_async_event(vha, rsp, mb);
2270                         break;
2271                 case 0x13:
2272                         qla24xx_process_response_queue(vha, rsp);
2273                         break;
2274                 default:
2275                         ql_dbg(ql_dbg_p3p, vha, 0xb013,
2276                             "Unrecognized interrupt type (%d).\n",
2277                             stat * 0xff);
2278                         break;
2279                 }
2280         }
2281         WRT_REG_DWORD(&reg->host_int, 0);
2282         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2283 }
2284
2285 void
2286 qla82xx_enable_intrs(struct qla_hw_data *ha)
2287 {
2288         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2289         qla82xx_mbx_intr_enable(vha);
2290         spin_lock_irq(&ha->hardware_lock);
2291         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2292         spin_unlock_irq(&ha->hardware_lock);
2293         ha->interrupts_on = 1;
2294 }
2295
2296 void
2297 qla82xx_disable_intrs(struct qla_hw_data *ha)
2298 {
2299         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2300         qla82xx_mbx_intr_disable(vha);
2301         spin_lock_irq(&ha->hardware_lock);
2302         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
2303         spin_unlock_irq(&ha->hardware_lock);
2304         ha->interrupts_on = 0;
2305 }
2306
2307 void qla82xx_init_flags(struct qla_hw_data *ha)
2308 {
2309         struct qla82xx_legacy_intr_set *nx_legacy_intr;
2310
2311         /* ISP 8021 initializations */
2312         rwlock_init(&ha->hw_lock);
2313         ha->qdr_sn_window = -1;
2314         ha->ddr_mn_window = -1;
2315         ha->curr_window = 255;
2316         ha->portnum = PCI_FUNC(ha->pdev->devfn);
2317         nx_legacy_intr = &legacy_intr[ha->portnum];
2318         ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
2319         ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
2320         ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
2321         ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
2322 }
2323
2324 inline void
2325 qla82xx_set_drv_active(scsi_qla_host_t *vha)
2326 {
2327         uint32_t drv_active;
2328         struct qla_hw_data *ha = vha->hw;
2329
2330         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2331
2332         /* If reset value is all FF's, initialize DRV_ACTIVE */
2333         if (drv_active == 0xffffffff) {
2334                 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE,
2335                         QLA82XX_DRV_NOT_ACTIVE);
2336                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2337         }
2338         drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2339         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2340 }
2341
2342 inline void
2343 qla82xx_clear_drv_active(struct qla_hw_data *ha)
2344 {
2345         uint32_t drv_active;
2346
2347         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2348         drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2349         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2350 }
2351
2352 static inline int
2353 qla82xx_need_reset(struct qla_hw_data *ha)
2354 {
2355         uint32_t drv_state;
2356         int rval;
2357
2358         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2359         rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2360         return rval;
2361 }
2362
2363 static inline void
2364 qla82xx_set_rst_ready(struct qla_hw_data *ha)
2365 {
2366         uint32_t drv_state;
2367         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2368
2369         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2370
2371         /* If reset value is all FF's, initialize DRV_STATE */
2372         if (drv_state == 0xffffffff) {
2373                 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY);
2374                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2375         }
2376         drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2377         ql_log(ql_log_info, vha, 0x00bb,
2378             "drv_state = 0x%x.\n", drv_state);
2379         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2380 }
2381
2382 static inline void
2383 qla82xx_clear_rst_ready(struct qla_hw_data *ha)
2384 {
2385         uint32_t drv_state;
2386
2387         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2388         drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2389         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2390 }
2391
2392 static inline void
2393 qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
2394 {
2395         uint32_t qsnt_state;
2396
2397         qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2398         qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2399         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2400 }
2401
2402 void
2403 qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha)
2404 {
2405         struct qla_hw_data *ha = vha->hw;
2406         uint32_t qsnt_state;
2407
2408         qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2409         qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2410         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2411 }
2412
2413 static int
2414 qla82xx_load_fw(scsi_qla_host_t *vha)
2415 {
2416         int rst;
2417         struct fw_blob *blob;
2418         struct qla_hw_data *ha = vha->hw;
2419
2420         if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
2421                 ql_log(ql_log_fatal, vha, 0x009f,
2422                     "Error during CRB initialization.\n");
2423                 return QLA_FUNCTION_FAILED;
2424         }
2425         udelay(500);
2426
2427         /* Bring QM and CAMRAM out of reset */
2428         rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
2429         rst &= ~((1 << 28) | (1 << 24));
2430         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
2431
2432         /*
2433          * FW Load priority:
2434          * 1) Operational firmware residing in flash.
2435          * 2) Firmware via request-firmware interface (.bin file).
2436          */
2437         if (ql2xfwloadbin == 2)
2438                 goto try_blob_fw;
2439
2440         ql_log(ql_log_info, vha, 0x00a0,
2441             "Attempting to load firmware from flash.\n");
2442
2443         if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2444                 ql_log(ql_log_info, vha, 0x00a1,
2445                     "Firmware loaded successully from flash.\n");
2446                 return QLA_SUCCESS;
2447         } else {
2448                 ql_log(ql_log_warn, vha, 0x0108,
2449                     "Firmware load from flash failed.\n");
2450         }
2451
2452 try_blob_fw:
2453         ql_log(ql_log_info, vha, 0x00a2,
2454             "Attempting to load firmware from blob.\n");
2455
2456         /* Load firmware blob. */
2457         blob = ha->hablob = qla2x00_request_firmware(vha);
2458         if (!blob) {
2459                 ql_log(ql_log_fatal, vha, 0x00a3,
2460                     "Firmware image not preset.\n");
2461                 goto fw_load_failed;
2462         }
2463
2464         /* Validating firmware blob */
2465         if (qla82xx_validate_firmware_blob(vha,
2466                 QLA82XX_FLASH_ROMIMAGE)) {
2467                 /* Fallback to URI format */
2468                 if (qla82xx_validate_firmware_blob(vha,
2469                         QLA82XX_UNIFIED_ROMIMAGE)) {
2470                         ql_log(ql_log_fatal, vha, 0x00a4,
2471                             "No valid firmware image found.\n");
2472                         return QLA_FUNCTION_FAILED;
2473                 }
2474         }
2475
2476         if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
2477                 ql_log(ql_log_info, vha, 0x00a5,
2478                     "Firmware loaded successfully from binary blob.\n");
2479                 return QLA_SUCCESS;
2480         } else {
2481                 ql_log(ql_log_fatal, vha, 0x00a6,
2482                     "Firmware load failed for binary blob.\n");
2483                 blob->fw = NULL;
2484                 blob = NULL;
2485                 goto fw_load_failed;
2486         }
2487         return QLA_SUCCESS;
2488
2489 fw_load_failed:
2490         return QLA_FUNCTION_FAILED;
2491 }
2492
2493 int
2494 qla82xx_start_firmware(scsi_qla_host_t *vha)
2495 {
2496         int           pcie_cap;
2497         uint16_t      lnk;
2498         struct qla_hw_data *ha = vha->hw;
2499
2500         /* scrub dma mask expansion register */
2501         qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE);
2502
2503         /* Put both the PEG CMD and RCV PEG to default state
2504          * of 0 before resetting the hardware
2505          */
2506         qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
2507         qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
2508
2509         /* Overwrite stale initialization register values */
2510         qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
2511         qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
2512
2513         if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
2514                 ql_log(ql_log_fatal, vha, 0x00a7,
2515                     "Error trying to start fw.\n");
2516                 return QLA_FUNCTION_FAILED;
2517         }
2518
2519         /* Handshake with the card before we register the devices. */
2520         if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
2521                 ql_log(ql_log_fatal, vha, 0x00aa,
2522                     "Error during card handshake.\n");
2523                 return QLA_FUNCTION_FAILED;
2524         }
2525
2526         /* Negotiated Link width */
2527         pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
2528         pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
2529         ha->link_width = (lnk >> 4) & 0x3f;
2530
2531         /* Synchronize with Receive peg */
2532         return qla82xx_check_rcvpeg_state(ha);
2533 }
2534
2535 static inline int
2536 qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
2537         uint16_t tot_dsds)
2538 {
2539         uint32_t *cur_dsd = NULL;
2540         scsi_qla_host_t *vha;
2541         struct qla_hw_data *ha;
2542         struct scsi_cmnd *cmd;
2543         struct  scatterlist *cur_seg;
2544         uint32_t *dsd_seg;
2545         void *next_dsd;
2546         uint8_t avail_dsds;
2547         uint8_t first_iocb = 1;
2548         uint32_t dsd_list_len;
2549         struct dsd_dma *dsd_ptr;
2550         struct ct6_dsd *ctx;
2551
2552         cmd = sp->cmd;
2553
2554         /* Update entry type to indicate Command Type 3 IOCB */
2555         *((uint32_t *)(&cmd_pkt->entry_type)) =
2556                 __constant_cpu_to_le32(COMMAND_TYPE_6);
2557
2558         /* No data transfer */
2559         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
2560                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
2561                 return 0;
2562         }
2563
2564         vha = sp->fcport->vha;
2565         ha = vha->hw;
2566
2567         /* Set transfer direction */
2568         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2569                 cmd_pkt->control_flags =
2570                     __constant_cpu_to_le16(CF_WRITE_DATA);
2571                 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
2572         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
2573                 cmd_pkt->control_flags =
2574                     __constant_cpu_to_le16(CF_READ_DATA);
2575                 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
2576         }
2577
2578         cur_seg = scsi_sglist(cmd);
2579         ctx = sp->ctx;
2580
2581         while (tot_dsds) {
2582                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
2583                     QLA_DSDS_PER_IOCB : tot_dsds;
2584                 tot_dsds -= avail_dsds;
2585                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
2586
2587                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
2588                     struct dsd_dma, list);
2589                 next_dsd = dsd_ptr->dsd_addr;
2590                 list_del(&dsd_ptr->list);
2591                 ha->gbl_dsd_avail--;
2592                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
2593                 ctx->dsd_use_cnt++;
2594                 ha->gbl_dsd_inuse++;
2595
2596                 if (first_iocb) {
2597                         first_iocb = 0;
2598                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2599                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2600                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2601                         *dsd_seg++ = cpu_to_le32(dsd_list_len);
2602                 } else {
2603                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2604                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2605                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
2606                 }
2607                 cur_dsd = (uint32_t *)next_dsd;
2608                 while (avail_dsds) {
2609                         dma_addr_t      sle_dma;
2610
2611                         sle_dma = sg_dma_address(cur_seg);
2612                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2613                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2614                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
2615                         cur_seg = sg_next(cur_seg);
2616                         avail_dsds--;
2617                 }
2618         }
2619
2620         /* Null termination */
2621         *cur_dsd++ =  0;
2622         *cur_dsd++ = 0;
2623         *cur_dsd++ = 0;
2624         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
2625         return 0;
2626 }
2627
2628 /*
2629  * qla82xx_calc_dsd_lists() - Determine number of DSD list required
2630  * for Command Type 6.
2631  *
2632  * @dsds: number of data segment decriptors needed
2633  *
2634  * Returns the number of dsd list needed to store @dsds.
2635  */
2636 inline uint16_t
2637 qla82xx_calc_dsd_lists(uint16_t dsds)
2638 {
2639         uint16_t dsd_lists = 0;
2640
2641         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
2642         if (dsds % QLA_DSDS_PER_IOCB)
2643                 dsd_lists++;
2644         return dsd_lists;
2645 }
2646
2647 /*
2648  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2649  * @sp: command to send to the ISP
2650  *
2651  * Returns non-zero if a failure occurred, else zero.
2652  */
2653 int
2654 qla82xx_start_scsi(srb_t *sp)
2655 {
2656         int             ret, nseg;
2657         unsigned long   flags;
2658         struct scsi_cmnd *cmd;
2659         uint32_t        *clr_ptr;
2660         uint32_t        index;
2661         uint32_t        handle;
2662         uint16_t        cnt;
2663         uint16_t        req_cnt;
2664         uint16_t        tot_dsds;
2665         struct device_reg_82xx __iomem *reg;
2666         uint32_t dbval;
2667         uint32_t *fcp_dl;
2668         uint8_t additional_cdb_len;
2669         struct ct6_dsd *ctx;
2670         struct scsi_qla_host *vha = sp->fcport->vha;
2671         struct qla_hw_data *ha = vha->hw;
2672         struct req_que *req = NULL;
2673         struct rsp_que *rsp = NULL;
2674         char            tag[2];
2675
2676         /* Setup device pointers. */
2677         ret = 0;
2678         reg = &ha->iobase->isp82;
2679         cmd = sp->cmd;
2680         req = vha->req;
2681         rsp = ha->rsp_q_map[0];
2682
2683         /* So we know we haven't pci_map'ed anything yet */
2684         tot_dsds = 0;
2685
2686         dbval = 0x04 | (ha->portnum << 5);
2687
2688         /* Send marker if required */
2689         if (vha->marker_needed != 0) {
2690                 if (qla2x00_marker(vha, req,
2691                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2692                         ql_log(ql_log_warn, vha, 0x300c,
2693                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2694                         return QLA_FUNCTION_FAILED;
2695                 }
2696                 vha->marker_needed = 0;
2697         }
2698
2699         /* Acquire ring specific lock */
2700         spin_lock_irqsave(&ha->hardware_lock, flags);
2701
2702         /* Check for room in outstanding command list. */
2703         handle = req->current_outstanding_cmd;
2704         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2705                 handle++;
2706                 if (handle == MAX_OUTSTANDING_COMMANDS)
2707                         handle = 1;
2708                 if (!req->outstanding_cmds[handle])
2709                         break;
2710         }
2711         if (index == MAX_OUTSTANDING_COMMANDS)
2712                 goto queuing_error;
2713
2714         /* Map the sg table so we have an accurate count of sg entries needed */
2715         if (scsi_sg_count(cmd)) {
2716                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2717                     scsi_sg_count(cmd), cmd->sc_data_direction);
2718                 if (unlikely(!nseg))
2719                         goto queuing_error;
2720         } else
2721                 nseg = 0;
2722
2723         tot_dsds = nseg;
2724
2725         if (tot_dsds > ql2xshiftctondsd) {
2726                 struct cmd_type_6 *cmd_pkt;
2727                 uint16_t more_dsd_lists = 0;
2728                 struct dsd_dma *dsd_ptr;
2729                 uint16_t i;
2730
2731                 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
2732                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2733                         ql_dbg(ql_dbg_io, vha, 0x300d,
2734                             "Num of DSD list %d is than %d for cmd=%p.\n",
2735                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2736                             cmd);
2737                         goto queuing_error;
2738                 }
2739
2740                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2741                         goto sufficient_dsds;
2742                 else
2743                         more_dsd_lists -= ha->gbl_dsd_avail;
2744
2745                 for (i = 0; i < more_dsd_lists; i++) {
2746                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2747                         if (!dsd_ptr) {
2748                                 ql_log(ql_log_fatal, vha, 0x300e,
2749                                     "Failed to allocate memory for dsd_dma "
2750                                     "for cmd=%p.\n", cmd);
2751                                 goto queuing_error;
2752                         }
2753
2754                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2755                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2756                         if (!dsd_ptr->dsd_addr) {
2757                                 kfree(dsd_ptr);
2758                                 ql_log(ql_log_fatal, vha, 0x300f,
2759                                     "Failed to allocate memory for dsd_addr "
2760                                     "for cmd=%p.\n", cmd);
2761                                 goto queuing_error;
2762                         }
2763                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2764                         ha->gbl_dsd_avail++;
2765                 }
2766
2767 sufficient_dsds:
2768                 req_cnt = 1;
2769
2770                 if (req->cnt < (req_cnt + 2)) {
2771                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2772                                 &reg->req_q_out[0]);
2773                         if (req->ring_index < cnt)
2774                                 req->cnt = cnt - req->ring_index;
2775                         else
2776                                 req->cnt = req->length -
2777                                         (req->ring_index - cnt);
2778                 }
2779
2780                 if (req->cnt < (req_cnt + 2))
2781                         goto queuing_error;
2782
2783                 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2784                 if (!sp->ctx) {
2785                         ql_log(ql_log_fatal, vha, 0x3010,
2786                             "Failed to allocate ctx for cmd=%p.\n", cmd);
2787                         goto queuing_error;
2788                 }
2789                 memset(ctx, 0, sizeof(struct ct6_dsd));
2790                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2791                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2792                 if (!ctx->fcp_cmnd) {
2793                         ql_log(ql_log_fatal, vha, 0x3011,
2794                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2795                         goto queuing_error_fcp_cmnd;
2796                 }
2797
2798                 /* Initialize the DSD list and dma handle */
2799                 INIT_LIST_HEAD(&ctx->dsd_list);
2800                 ctx->dsd_use_cnt = 0;
2801
2802                 if (cmd->cmd_len > 16) {
2803                         additional_cdb_len = cmd->cmd_len - 16;
2804                         if ((cmd->cmd_len % 4) != 0) {
2805                                 /* SCSI command bigger than 16 bytes must be
2806                                  * multiple of 4
2807                                  */
2808                                 ql_log(ql_log_warn, vha, 0x3012,
2809                                     "scsi cmd len %d not multiple of 4 "
2810                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
2811                                 goto queuing_error_fcp_cmnd;
2812                         }
2813                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2814                 } else {
2815                         additional_cdb_len = 0;
2816                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2817                 }
2818
2819                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2820                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2821
2822                 /* Zero out remaining portion of packet. */
2823                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2824                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2825                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2826                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2827
2828                 /* Set NPORT-ID and LUN number*/
2829                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2830                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2831                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2832                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2833                 cmd_pkt->vp_index = sp->fcport->vp_idx;
2834
2835                 /* Build IOCB segments */
2836                 if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2837                         goto queuing_error_fcp_cmnd;
2838
2839                 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2840                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2841
2842                 /* build FCP_CMND IU */
2843                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2844                 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2845                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2846
2847                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2848                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2849                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2850                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2851
2852                 /*
2853                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2854                  */
2855                 if (scsi_populate_tag_msg(cmd, tag)) {
2856                         switch (tag[0]) {
2857                         case HEAD_OF_QUEUE_TAG:
2858                                 ctx->fcp_cmnd->task_attribute =
2859                                     TSK_HEAD_OF_QUEUE;
2860                                 break;
2861                         case ORDERED_QUEUE_TAG:
2862                                 ctx->fcp_cmnd->task_attribute =
2863                                     TSK_ORDERED;
2864                                 break;
2865                         }
2866                 }
2867
2868                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2869
2870                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2871                     additional_cdb_len);
2872                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2873
2874                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2875                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2876                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2877                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2878                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2879
2880                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2881                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2882                 /* Set total data segment count. */
2883                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2884                 /* Specify response queue number where
2885                  * completion should happen
2886                  */
2887                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2888         } else {
2889                 struct cmd_type_7 *cmd_pkt;
2890                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2891                 if (req->cnt < (req_cnt + 2)) {
2892                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2893                             &reg->req_q_out[0]);
2894                         if (req->ring_index < cnt)
2895                                 req->cnt = cnt - req->ring_index;
2896                         else
2897                                 req->cnt = req->length -
2898                                         (req->ring_index - cnt);
2899                 }
2900                 if (req->cnt < (req_cnt + 2))
2901                         goto queuing_error;
2902
2903                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2904                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2905
2906                 /* Zero out remaining portion of packet. */
2907                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2908                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2909                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2910                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2911
2912                 /* Set NPORT-ID and LUN number*/
2913                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2914                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2915                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2916                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2917                 cmd_pkt->vp_index = sp->fcport->vp_idx;
2918
2919                 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2920                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2921                         sizeof(cmd_pkt->lun));
2922
2923                 /*
2924                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2925                  */
2926                 if (scsi_populate_tag_msg(cmd, tag)) {
2927                         switch (tag[0]) {
2928                         case HEAD_OF_QUEUE_TAG:
2929                                 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2930                                 break;
2931                         case ORDERED_QUEUE_TAG:
2932                                 cmd_pkt->task = TSK_ORDERED;
2933                                 break;
2934                         }
2935                 }
2936
2937                 /* Load SCSI command packet. */
2938                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2939                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2940
2941                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2942
2943                 /* Build IOCB segments */
2944                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2945
2946                 /* Set total data segment count. */
2947                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2948                 /* Specify response queue number where
2949                  * completion should happen.
2950                  */
2951                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2952
2953         }
2954         /* Build command packet. */
2955         req->current_outstanding_cmd = handle;
2956         req->outstanding_cmds[handle] = sp;
2957         sp->handle = handle;
2958         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2959         req->cnt -= req_cnt;
2960         wmb();
2961
2962         /* Adjust ring index. */
2963         req->ring_index++;
2964         if (req->ring_index == req->length) {
2965                 req->ring_index = 0;
2966                 req->ring_ptr = req->ring;
2967         } else
2968                 req->ring_ptr++;
2969
2970         sp->flags |= SRB_DMA_VALID;
2971
2972         /* Set chip new ring index. */
2973         /* write, read and verify logic */
2974         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2975         if (ql2xdbwr)
2976                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2977         else {
2978                 WRT_REG_DWORD(
2979                         (unsigned long __iomem *)ha->nxdb_wr_ptr,
2980                         dbval);
2981                 wmb();
2982                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2983                         WRT_REG_DWORD(
2984                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2985                                 dbval);
2986                         wmb();
2987                 }
2988         }
2989
2990         /* Manage unprocessed RIO/ZIO commands in response queue. */
2991         if (vha->flags.process_response_queue &&
2992             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2993                 qla24xx_process_response_queue(vha, rsp);
2994
2995         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2996         return QLA_SUCCESS;
2997
2998 queuing_error_fcp_cmnd:
2999         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3000 queuing_error:
3001         if (tot_dsds)
3002                 scsi_dma_unmap(cmd);
3003
3004         if (sp->ctx) {
3005                 mempool_free(sp->ctx, ha->ctx_mempool);
3006                 sp->ctx = NULL;
3007         }
3008         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3009
3010         return QLA_FUNCTION_FAILED;
3011 }
3012
3013 static uint32_t *
3014 qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
3015         uint32_t length)
3016 {
3017         uint32_t i;
3018         uint32_t val;
3019         struct qla_hw_data *ha = vha->hw;
3020
3021         /* Dword reads to flash. */
3022         for (i = 0; i < length/4; i++, faddr += 4) {
3023                 if (qla82xx_rom_fast_read(ha, faddr, &val)) {
3024                         ql_log(ql_log_warn, vha, 0x0106,
3025                             "Do ROM fast read failed.\n");
3026                         goto done_read;
3027                 }
3028                 dwptr[i] = __constant_cpu_to_le32(val);
3029         }
3030 done_read:
3031         return dwptr;
3032 }
3033
3034 static int
3035 qla82xx_unprotect_flash(struct qla_hw_data *ha)
3036 {
3037         int ret;
3038         uint32_t val;
3039         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3040
3041         ret = ql82xx_rom_lock_d(ha);
3042         if (ret < 0) {
3043                 ql_log(ql_log_warn, vha, 0xb014,
3044                     "ROM Lock failed.\n");
3045                 return ret;
3046         }
3047
3048         ret = qla82xx_read_status_reg(ha, &val);
3049         if (ret < 0)
3050                 goto done_unprotect;
3051
3052         val &= ~(BLOCK_PROTECT_BITS << 2);
3053         ret = qla82xx_write_status_reg(ha, val);
3054         if (ret < 0) {
3055                 val |= (BLOCK_PROTECT_BITS << 2);
3056                 qla82xx_write_status_reg(ha, val);
3057         }
3058
3059         if (qla82xx_write_disable_flash(ha) != 0)
3060                 ql_log(ql_log_warn, vha, 0xb015,
3061                     "Write disable failed.\n");
3062
3063 done_unprotect:
3064         qla82xx_rom_unlock(ha);
3065         return ret;
3066 }
3067
3068 static int
3069 qla82xx_protect_flash(struct qla_hw_data *ha)
3070 {
3071         int ret;
3072         uint32_t val;
3073         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3074
3075         ret = ql82xx_rom_lock_d(ha);
3076         if (ret < 0) {
3077                 ql_log(ql_log_warn, vha, 0xb016,
3078                     "ROM Lock failed.\n");
3079                 return ret;
3080         }
3081
3082         ret = qla82xx_read_status_reg(ha, &val);
3083         if (ret < 0)
3084                 goto done_protect;
3085
3086         val |= (BLOCK_PROTECT_BITS << 2);
3087         /* LOCK all sectors */
3088         ret = qla82xx_write_status_reg(ha, val);
3089         if (ret < 0)
3090                 ql_log(ql_log_warn, vha, 0xb017,
3091                     "Write status register failed.\n");
3092
3093         if (qla82xx_write_disable_flash(ha) != 0)
3094                 ql_log(ql_log_warn, vha, 0xb018,
3095                     "Write disable failed.\n");
3096 done_protect:
3097         qla82xx_rom_unlock(ha);
3098         return ret;
3099 }
3100
3101 static int
3102 qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
3103 {
3104         int ret = 0;
3105         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3106
3107         ret = ql82xx_rom_lock_d(ha);
3108         if (ret < 0) {
3109                 ql_log(ql_log_warn, vha, 0xb019,
3110                     "ROM Lock failed.\n");
3111                 return ret;
3112         }
3113
3114         qla82xx_flash_set_write_enable(ha);
3115         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
3116         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
3117         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
3118
3119         if (qla82xx_wait_rom_done(ha)) {
3120                 ql_log(ql_log_warn, vha, 0xb01a,
3121                     "Error waiting for rom done.\n");
3122                 ret = -1;
3123                 goto done;
3124         }
3125         ret = qla82xx_flash_wait_write_finish(ha);
3126 done:
3127         qla82xx_rom_unlock(ha);
3128         return ret;
3129 }
3130
3131 /*
3132  * Address and length are byte address
3133  */
3134 uint8_t *
3135 qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3136         uint32_t offset, uint32_t length)
3137 {
3138         scsi_block_requests(vha->host);
3139         qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
3140         scsi_unblock_requests(vha->host);
3141         return buf;
3142 }
3143
3144 static int
3145 qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3146         uint32_t faddr, uint32_t dwords)
3147 {
3148         int ret;
3149         uint32_t liter;
3150         uint32_t sec_mask, rest_addr;
3151         dma_addr_t optrom_dma;
3152         void *optrom = NULL;
3153         int page_mode = 0;
3154         struct qla_hw_data *ha = vha->hw;
3155
3156         ret = -1;
3157
3158         /* Prepare burst-capable write on supported ISPs. */
3159         if (page_mode && !(faddr & 0xfff) &&
3160             dwords > OPTROM_BURST_DWORDS) {
3161                 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
3162                     &optrom_dma, GFP_KERNEL);
3163                 if (!optrom) {
3164                         ql_log(ql_log_warn, vha, 0xb01b,
3165                             "Unable to allocate memory "
3166                             "for optron burst write (%x KB).\n",
3167                             OPTROM_BURST_SIZE / 1024);
3168                 }
3169         }
3170
3171         rest_addr = ha->fdt_block_size - 1;
3172         sec_mask = ~rest_addr;
3173
3174         ret = qla82xx_unprotect_flash(ha);
3175         if (ret) {
3176                 ql_log(ql_log_warn, vha, 0xb01c,
3177                     "Unable to unprotect flash for update.\n");
3178                 goto write_done;
3179         }
3180
3181         for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
3182                 /* Are we at the beginning of a sector? */
3183                 if ((faddr & rest_addr) == 0) {
3184
3185                         ret = qla82xx_erase_sector(ha, faddr);
3186                         if (ret) {
3187                                 ql_log(ql_log_warn, vha, 0xb01d,
3188                                     "Unable to erase sector: address=%x.\n",
3189                                     faddr);
3190                                 break;
3191                         }
3192                 }
3193
3194                 /* Go with burst-write. */
3195                 if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
3196                         /* Copy data to DMA'ble buffer. */
3197                         memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
3198
3199                         ret = qla2x00_load_ram(vha, optrom_dma,
3200                             (ha->flash_data_off | faddr),
3201                             OPTROM_BURST_DWORDS);
3202                         if (ret != QLA_SUCCESS) {
3203                                 ql_log(ql_log_warn, vha, 0xb01e,
3204                                     "Unable to burst-write optrom segment "
3205                                     "(%x/%x/%llx).\n", ret,
3206                                     (ha->flash_data_off | faddr),
3207                                     (unsigned long long)optrom_dma);
3208                                 ql_log(ql_log_warn, vha, 0xb01f,
3209                                     "Reverting to slow-write.\n");
3210
3211                                 dma_free_coherent(&ha->pdev->dev,
3212                                     OPTROM_BURST_SIZE, optrom, optrom_dma);
3213                                 optrom = NULL;
3214                         } else {
3215                                 liter += OPTROM_BURST_DWORDS - 1;
3216                                 faddr += OPTROM_BURST_DWORDS - 1;
3217                                 dwptr += OPTROM_BURST_DWORDS - 1;
3218                                 continue;
3219                         }
3220                 }
3221
3222                 ret = qla82xx_write_flash_dword(ha, faddr,
3223                     cpu_to_le32(*dwptr));
3224                 if (ret) {
3225                         ql_dbg(ql_dbg_p3p, vha, 0xb020,
3226                             "Unable to program flash address=%x data=%x.\n",
3227                             faddr, *dwptr);
3228                         break;
3229                 }
3230         }
3231
3232         ret = qla82xx_protect_flash(ha);
3233         if (ret)
3234                 ql_log(ql_log_warn, vha, 0xb021,
3235                     "Unable to protect flash after update.\n");
3236 write_done:
3237         if (optrom)
3238                 dma_free_coherent(&ha->pdev->dev,
3239                     OPTROM_BURST_SIZE, optrom, optrom_dma);
3240         return ret;
3241 }
3242
3243 int
3244 qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3245         uint32_t offset, uint32_t length)
3246 {
3247         int rval;
3248
3249         /* Suspend HBA. */
3250         scsi_block_requests(vha->host);
3251         rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
3252                 length >> 2);
3253         scsi_unblock_requests(vha->host);
3254
3255         /* Convert return ISP82xx to generic */
3256         if (rval)
3257                 rval = QLA_FUNCTION_FAILED;
3258         else
3259                 rval = QLA_SUCCESS;
3260         return rval;
3261 }
3262
3263 void
3264 qla82xx_start_iocbs(srb_t *sp)
3265 {
3266         struct qla_hw_data *ha = sp->fcport->vha->hw;
3267         struct req_que *req = ha->req_q_map[0];
3268         struct device_reg_82xx __iomem *reg;
3269         uint32_t dbval;
3270
3271         /* Adjust ring index. */
3272         req->ring_index++;
3273         if (req->ring_index == req->length) {
3274                 req->ring_index = 0;
3275                 req->ring_ptr = req->ring;
3276         } else
3277                 req->ring_ptr++;
3278
3279         reg = &ha->iobase->isp82;
3280         dbval = 0x04 | (ha->portnum << 5);
3281
3282         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3283         if (ql2xdbwr)
3284                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
3285         else {
3286                 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
3287                 wmb();
3288                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3289                         WRT_REG_DWORD((unsigned long  __iomem *)ha->nxdb_wr_ptr,
3290                                 dbval);
3291                         wmb();
3292                 }
3293         }
3294 }
3295
3296 void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
3297 {
3298         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3299
3300         if (qla82xx_rom_lock(ha))
3301                 /* Someone else is holding the lock. */
3302                 ql_log(ql_log_info, vha, 0xb022,
3303                     "Resetting rom_lock.\n");
3304
3305         /*
3306          * Either we got the lock, or someone
3307          * else died while holding it.
3308          * In either case, unlock.
3309          */
3310         qla82xx_rom_unlock(ha);
3311 }
3312
3313 /*
3314  * qla82xx_device_bootstrap
3315  *    Initialize device, set DEV_READY, start fw
3316  *
3317  * Note:
3318  *      IDC lock must be held upon entry
3319  *
3320  * Return:
3321  *    Success : 0
3322  *    Failed  : 1
3323  */
3324 static int
3325 qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3326 {
3327         int rval = QLA_SUCCESS;
3328         int i, timeout;
3329         uint32_t old_count, count;
3330         struct qla_hw_data *ha = vha->hw;
3331         int need_reset = 0, peg_stuck = 1;
3332
3333         need_reset = qla82xx_need_reset(ha);
3334
3335         old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3336
3337         for (i = 0; i < 10; i++) {
3338                 timeout = msleep_interruptible(200);
3339                 if (timeout) {
3340                         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3341                                 QLA82XX_DEV_FAILED);
3342                         return QLA_FUNCTION_FAILED;
3343                 }
3344
3345                 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3346                 if (count != old_count)
3347                         peg_stuck = 0;
3348         }
3349
3350         if (need_reset) {
3351                 /* We are trying to perform a recovery here. */
3352                 if (peg_stuck)
3353                         qla82xx_rom_lock_recovery(ha);
3354                 goto dev_initialize;
3355         } else  {
3356                 /* Start of day for this ha context. */
3357                 if (peg_stuck) {
3358                         /* Either we are the first or recovery in progress. */
3359                         qla82xx_rom_lock_recovery(ha);
3360                         goto dev_initialize;
3361                 } else
3362                         /* Firmware already running. */
3363                         goto dev_ready;
3364         }
3365
3366         return rval;
3367
3368 dev_initialize:
3369         /* set to DEV_INITIALIZING */
3370         ql_log(ql_log_info, vha, 0x009e,
3371             "HW State: INITIALIZING.\n");
3372         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
3373
3374         /* Driver that sets device state to initializating sets IDC version */
3375         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
3376
3377         qla82xx_idc_unlock(ha);
3378         rval = qla82xx_start_firmware(vha);
3379         qla82xx_idc_lock(ha);
3380
3381         if (rval != QLA_SUCCESS) {
3382                 ql_log(ql_log_fatal, vha, 0x00ad,
3383                     "HW State: FAILED.\n");
3384                 qla82xx_clear_drv_active(ha);
3385                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
3386                 return rval;
3387         }
3388
3389 dev_ready:
3390         ql_log(ql_log_info, vha, 0x00ae,
3391             "HW State: READY.\n");
3392         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
3393
3394         return QLA_SUCCESS;
3395 }
3396
3397 /*
3398 * qla82xx_need_qsnt_handler
3399 *    Code to start quiescence sequence
3400 *
3401 * Note:
3402 *      IDC lock must be held upon entry
3403 *
3404 * Return: void
3405 */
3406
3407 static void
3408 qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
3409 {
3410         struct qla_hw_data *ha = vha->hw;
3411         uint32_t dev_state, drv_state, drv_active;
3412         unsigned long reset_timeout;
3413
3414         if (vha->flags.online) {
3415                 /*Block any further I/O and wait for pending cmnds to complete*/
3416                 qla82xx_quiescent_state_cleanup(vha);
3417         }
3418
3419         /* Set the quiescence ready bit */
3420         qla82xx_set_qsnt_ready(ha);
3421
3422         /*wait for 30 secs for other functions to ack */
3423         reset_timeout = jiffies + (30 * HZ);
3424
3425         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3426         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3427         /* Its 2 that is written when qsnt is acked, moving one bit */
3428         drv_active = drv_active << 0x01;
3429
3430         while (drv_state != drv_active) {
3431
3432                 if (time_after_eq(jiffies, reset_timeout)) {
3433                         /* quiescence timeout, other functions didn't ack
3434                          * changing the state to DEV_READY
3435                          */
3436                         ql_log(ql_log_info, vha, 0xb023,
3437                             "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME);
3438                         ql_log(ql_log_info, vha, 0xb024,
3439                             "DRV_ACTIVE:%d DRV_STATE:%d.\n",
3440                             drv_active, drv_state);
3441                         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3442                             QLA82XX_DEV_READY);
3443                         ql_log(ql_log_info, vha, 0xb025,
3444                             "HW State: DEV_READY.\n");
3445                         qla82xx_idc_unlock(ha);
3446                         qla2x00_perform_loop_resync(vha);
3447                         qla82xx_idc_lock(ha);
3448
3449                         qla82xx_clear_qsnt_ready(vha);
3450                         return;
3451                 }
3452
3453                 qla82xx_idc_unlock(ha);
3454                 msleep(1000);
3455                 qla82xx_idc_lock(ha);
3456
3457                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3458                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3459                 drv_active = drv_active << 0x01;
3460         }
3461         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3462         /* everyone acked so set the state to DEV_QUIESCENCE */
3463         if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
3464                 ql_log(ql_log_info, vha, 0xb026,
3465                     "HW State: DEV_QUIESCENT.\n");
3466                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
3467         }
3468 }
3469
3470 /*
3471 * qla82xx_wait_for_state_change
3472 *    Wait for device state to change from given current state
3473 *
3474 * Note:
3475 *     IDC lock must not be held upon entry
3476 *
3477 * Return:
3478 *    Changed device state.
3479 */
3480 uint32_t
3481 qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
3482 {
3483         struct qla_hw_data *ha = vha->hw;
3484         uint32_t dev_state;
3485
3486         do {
3487                 msleep(1000);
3488                 qla82xx_idc_lock(ha);
3489                 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3490                 qla82xx_idc_unlock(ha);
3491         } while (dev_state == curr_state);
3492
3493         return dev_state;
3494 }
3495
3496 static void
3497 qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3498 {
3499         struct qla_hw_data *ha = vha->hw;
3500
3501         /* Disable the board */
3502         ql_log(ql_log_fatal, vha, 0x00b8,
3503             "Disabling the board.\n");
3504
3505         qla82xx_idc_lock(ha);
3506         qla82xx_clear_drv_active(ha);
3507         qla82xx_idc_unlock(ha);
3508
3509         /* Set DEV_FAILED flag to disable timer */
3510         vha->device_flags |= DFLG_DEV_FAILED;
3511         qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3512         qla2x00_mark_all_devices_lost(vha, 0);
3513         vha->flags.online = 0;
3514         vha->flags.init_done = 0;
3515 }
3516
3517 /*
3518  * qla82xx_need_reset_handler
3519  *    Code to start reset sequence
3520  *
3521  * Note:
3522  *      IDC lock must be held upon entry
3523  *
3524  * Return:
3525  *    Success : 0
3526  *    Failed  : 1
3527  */
3528 static void
3529 qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3530 {
3531         uint32_t dev_state, drv_state, drv_active;
3532         unsigned long reset_timeout;
3533         struct qla_hw_data *ha = vha->hw;
3534         struct req_que *req = ha->req_q_map[0];
3535
3536         if (vha->flags.online) {
3537                 qla82xx_idc_unlock(ha);
3538                 qla2x00_abort_isp_cleanup(vha);
3539                 ha->isp_ops->get_flash_version(vha, req->ring);
3540                 ha->isp_ops->nvram_config(vha);
3541                 qla82xx_idc_lock(ha);
3542         }
3543
3544         qla82xx_set_rst_ready(ha);
3545
3546         /* wait for 10 seconds for reset ack from all functions */
3547         reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
3548
3549         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3550         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3551
3552         while (drv_state != drv_active) {
3553                 if (time_after_eq(jiffies, reset_timeout)) {
3554                         ql_log(ql_log_warn, vha, 0x00b5,
3555                             "Reset timeout.\n");
3556                         break;
3557                 }
3558                 qla82xx_idc_unlock(ha);
3559                 msleep(1000);
3560                 qla82xx_idc_lock(ha);
3561                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3562                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3563         }
3564
3565         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3566         ql_log(ql_log_info, vha, 0x00b6,
3567             "Device state is 0x%x = %s.\n",
3568             dev_state,
3569             dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3570
3571         /* Force to DEV_COLD unless someone else is starting a reset */
3572         if (dev_state != QLA82XX_DEV_INITIALIZING) {
3573                 ql_log(ql_log_info, vha, 0x00b7,
3574                     "HW State: COLD/RE-INIT.\n");
3575                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3576         }
3577 }
3578
3579 int
3580 qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3581 {
3582         uint32_t fw_heartbeat_counter;
3583         int status = 0;
3584
3585         fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
3586                 QLA82XX_PEG_ALIVE_COUNTER);
3587         /* all 0xff, assume AER/EEH in progress, ignore */
3588         if (fw_heartbeat_counter == 0xffffffff) {
3589                 ql_dbg(ql_dbg_timer, vha, 0x6003,
3590                     "FW heartbeat counter is 0xffffffff, "
3591                     "returning status=%d.\n", status);
3592                 return status;
3593         }
3594         if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
3595                 vha->seconds_since_last_heartbeat++;
3596                 /* FW not alive after 2 seconds */
3597                 if (vha->seconds_since_last_heartbeat == 2) {
3598                         vha->seconds_since_last_heartbeat = 0;
3599                         status = 1;
3600                 }
3601         } else
3602                 vha->seconds_since_last_heartbeat = 0;
3603         vha->fw_heartbeat_counter = fw_heartbeat_counter;
3604         if (status)
3605                 ql_dbg(ql_dbg_timer, vha, 0x6004,
3606                     "Returning status=%d.\n", status);
3607         return status;
3608 }
3609
3610 /*
3611  * qla82xx_device_state_handler
3612  *      Main state handler
3613  *
3614  * Note:
3615  *      IDC lock must be held upon entry
3616  *
3617  * Return:
3618  *    Success : 0
3619  *    Failed  : 1
3620  */
3621 int
3622 qla82xx_device_state_handler(scsi_qla_host_t *vha)
3623 {
3624         uint32_t dev_state;
3625         uint32_t old_dev_state;
3626         int rval = QLA_SUCCESS;
3627         unsigned long dev_init_timeout;
3628         struct qla_hw_data *ha = vha->hw;
3629         int loopcount = 0;
3630
3631         qla82xx_idc_lock(ha);
3632         if (!vha->flags.init_done)
3633                 qla82xx_set_drv_active(vha);
3634
3635         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3636         old_dev_state = dev_state;
3637         ql_log(ql_log_info, vha, 0x009b,
3638             "Device state is 0x%x = %s.\n",
3639             dev_state,
3640             dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3641
3642         /* wait for 30 seconds for device to go ready */
3643         dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
3644
3645         while (1) {
3646
3647                 if (time_after_eq(jiffies, dev_init_timeout)) {
3648                         ql_log(ql_log_fatal, vha, 0x009c,
3649                             "Device init failed.\n");
3650                         rval = QLA_FUNCTION_FAILED;
3651                         break;
3652                 }
3653                 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3654                 if (old_dev_state != dev_state) {
3655                         loopcount = 0;
3656                         old_dev_state = dev_state;
3657                 }
3658                 if (loopcount < 5) {
3659                         ql_log(ql_log_info, vha, 0x009d,
3660                             "Device state is 0x%x = %s.\n",
3661                             dev_state,
3662                             dev_state < MAX_STATES ? qdev_state[dev_state] :
3663                             "Unknown");
3664                 }
3665
3666                 switch (dev_state) {
3667                 case QLA82XX_DEV_READY:
3668                         goto exit;
3669                 case QLA82XX_DEV_COLD:
3670                         rval = qla82xx_device_bootstrap(vha);
3671                         goto exit;
3672                 case QLA82XX_DEV_INITIALIZING:
3673                         qla82xx_idc_unlock(ha);
3674                         msleep(1000);
3675                         qla82xx_idc_lock(ha);
3676                         break;
3677                 case QLA82XX_DEV_NEED_RESET:
3678                     if (!ql2xdontresethba)
3679                         qla82xx_need_reset_handler(vha);
3680                         dev_init_timeout = jiffies +
3681                                 (ha->nx_dev_init_timeout * HZ);
3682                         break;
3683                 case QLA82XX_DEV_NEED_QUIESCENT:
3684                         qla82xx_need_qsnt_handler(vha);
3685                         /* Reset timeout value after quiescence handler */
3686                         dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3687                                                          * HZ);
3688                         break;
3689                 case QLA82XX_DEV_QUIESCENT:
3690                         /* Owner will exit and other will wait for the state
3691                          * to get changed
3692                          */
3693                         if (ha->flags.quiesce_owner)
3694                                 goto exit;
3695
3696                         qla82xx_idc_unlock(ha);
3697                         msleep(1000);
3698                         qla82xx_idc_lock(ha);
3699
3700                         /* Reset timeout value after quiescence handler */
3701                         dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3702                                                          * HZ);
3703                         break;
3704                 case QLA82XX_DEV_FAILED:
3705                         qla82xx_dev_failed_handler(vha);
3706                         rval = QLA_FUNCTION_FAILED;
3707                         goto exit;
3708                 default:
3709                         qla82xx_idc_unlock(ha);
3710                         msleep(1000);
3711                         qla82xx_idc_lock(ha);
3712                 }
3713                 loopcount++;
3714         }
3715 exit:
3716         qla82xx_idc_unlock(ha);
3717         return rval;
3718 }
3719
3720 void qla82xx_watchdog(scsi_qla_host_t *vha)
3721 {
3722         uint32_t dev_state, halt_status;
3723         struct qla_hw_data *ha = vha->hw;
3724
3725         /* don't poll if reset is going on */
3726         if (!ha->flags.isp82xx_reset_hdlr_active) {
3727                 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3728                 if (dev_state == QLA82XX_DEV_NEED_RESET &&
3729                     !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
3730                         ql_log(ql_log_warn, vha, 0x6001,
3731                             "Adapter reset needed.\n");
3732                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3733                         qla2xxx_wake_dpc(vha);
3734                 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
3735                         !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3736                         ql_log(ql_log_warn, vha, 0x6002,
3737                             "Quiescent needed.\n");
3738                         set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3739                         qla2xxx_wake_dpc(vha);
3740                 } else {
3741                         if (qla82xx_check_fw_alive(vha)) {
3742                                 halt_status = qla82xx_rd_32(ha,
3743                                     QLA82XX_PEG_HALT_STATUS1);
3744                                 ql_dbg(ql_dbg_timer, vha, 0x6005,
3745                                     "dumping hw/fw registers:.\n "
3746                                     " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
3747                                     " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
3748                                     " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n "
3749                                     " PEG_NET_4_PC: 0x%x.\n", halt_status,
3750                                     qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2),
3751                                     qla82xx_rd_32(ha,
3752                                             QLA82XX_CRB_PEG_NET_0 + 0x3c),
3753                                     qla82xx_rd_32(ha,
3754                                             QLA82XX_CRB_PEG_NET_1 + 0x3c),
3755                                     qla82xx_rd_32(ha,
3756                                             QLA82XX_CRB_PEG_NET_2 + 0x3c),
3757                                     qla82xx_rd_32(ha,
3758                                             QLA82XX_CRB_PEG_NET_3 + 0x3c),
3759                                     qla82xx_rd_32(ha,
3760                                             QLA82XX_CRB_PEG_NET_4 + 0x3c));
3761                                 if (halt_status & HALT_STATUS_UNRECOVERABLE) {
3762                                         set_bit(ISP_UNRECOVERABLE,
3763                                             &vha->dpc_flags);
3764                                 } else {
3765                                         ql_log(ql_log_info, vha, 0x6006,
3766                                             "Detect abort  needed.\n");
3767                                         set_bit(ISP_ABORT_NEEDED,
3768                                             &vha->dpc_flags);
3769                                 }
3770                                 qla2xxx_wake_dpc(vha);
3771                                 ha->flags.isp82xx_fw_hung = 1;
3772                                 if (ha->flags.mbox_busy) {
3773                                         ha->flags.mbox_int = 1;
3774                                         ql_log(ql_log_warn, vha, 0x6007,
3775                                             "Due to FW hung, doing "
3776                                             "premature completion of mbx "
3777                                             "command.\n");
3778                                         if (test_bit(MBX_INTR_WAIT,
3779                                             &ha->mbx_cmd_flags))
3780                                                 complete(&ha->mbx_intr_comp);
3781                                 }
3782                         }
3783                 }
3784         }
3785 }
3786
3787 int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3788 {
3789         int rval;
3790         rval = qla82xx_device_state_handler(vha);
3791         return rval;
3792 }
3793
3794 /*
3795  *  qla82xx_abort_isp
3796  *      Resets ISP and aborts all outstanding commands.
3797  *
3798  * Input:
3799  *      ha           = adapter block pointer.
3800  *
3801  * Returns:
3802  *      0 = success
3803  */
3804 int
3805 qla82xx_abort_isp(scsi_qla_host_t *vha)
3806 {
3807         int rval;
3808         struct qla_hw_data *ha = vha->hw;
3809         uint32_t dev_state;
3810
3811         if (vha->device_flags & DFLG_DEV_FAILED) {
3812                 ql_log(ql_log_warn, vha, 0x8024,
3813                     "Device in failed state, exiting.\n");
3814                 return QLA_SUCCESS;
3815         }
3816         ha->flags.isp82xx_reset_hdlr_active = 1;
3817
3818         qla82xx_idc_lock(ha);
3819         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3820         if (dev_state == QLA82XX_DEV_READY) {
3821                 ql_log(ql_log_info, vha, 0x8025,
3822                     "HW State: NEED RESET.\n");
3823                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3824                         QLA82XX_DEV_NEED_RESET);
3825         } else
3826                 ql_log(ql_log_info, vha, 0x8026,
3827                     "Hw State: %s.\n", dev_state < MAX_STATES ?
3828                     qdev_state[dev_state] : "Unknown");
3829         qla82xx_idc_unlock(ha);
3830
3831         rval = qla82xx_device_state_handler(vha);
3832
3833         qla82xx_idc_lock(ha);
3834         qla82xx_clear_rst_ready(ha);
3835         qla82xx_idc_unlock(ha);
3836
3837         if (rval == QLA_SUCCESS) {
3838                 ha->flags.isp82xx_fw_hung = 0;
3839                 ha->flags.isp82xx_reset_hdlr_active = 0;
3840                 qla82xx_restart_isp(vha);
3841         }
3842
3843         if (rval) {
3844                 vha->flags.online = 1;
3845                 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3846                         if (ha->isp_abort_cnt == 0) {
3847                                 ql_log(ql_log_warn, vha, 0x8027,
3848                                     "ISP error recover failed - board "
3849                                     "disabled.\n");
3850                                 /*
3851                                  * The next call disables the board
3852                                  * completely.
3853                                  */
3854                                 ha->isp_ops->reset_adapter(vha);
3855                                 vha->flags.online = 0;
3856                                 clear_bit(ISP_ABORT_RETRY,
3857                                     &vha->dpc_flags);
3858                                 rval = QLA_SUCCESS;
3859                         } else { /* schedule another ISP abort */
3860                                 ha->isp_abort_cnt--;
3861                                 ql_log(ql_log_warn, vha, 0x8036,
3862                                     "ISP abort - retry remaining %d.\n",
3863                                     ha->isp_abort_cnt);
3864                                 rval = QLA_FUNCTION_FAILED;
3865                         }
3866                 } else {
3867                         ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3868                         ql_dbg(ql_dbg_taskm, vha, 0x8029,
3869                             "ISP error recovery - retrying (%d) more times.\n",
3870                             ha->isp_abort_cnt);
3871                         set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3872                         rval = QLA_FUNCTION_FAILED;
3873                 }
3874         }
3875         return rval;
3876 }
3877
3878 /*
3879  *  qla82xx_fcoe_ctx_reset
3880  *      Perform a quick reset and aborts all outstanding commands.
3881  *      This will only perform an FCoE context reset and avoids a full blown
3882  *      chip reset.
3883  *
3884  * Input:
3885  *      ha = adapter block pointer.
3886  *      is_reset_path = flag for identifying the reset path.
3887  *
3888  * Returns:
3889  *      0 = success
3890  */
3891 int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha)
3892 {
3893         int rval = QLA_FUNCTION_FAILED;
3894
3895         if (vha->flags.online) {
3896                 /* Abort all outstanding commands, so as to be requeued later */
3897                 qla2x00_abort_isp_cleanup(vha);
3898         }
3899
3900         /* Stop currently executing firmware.
3901          * This will destroy existing FCoE context at the F/W end.
3902          */
3903         qla2x00_try_to_stop_firmware(vha);
3904
3905         /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */
3906         rval = qla82xx_restart_isp(vha);
3907
3908         return rval;
3909 }
3910
3911 /*
3912  * qla2x00_wait_for_fcoe_ctx_reset
3913  *    Wait till the FCoE context is reset.
3914  *
3915  * Note:
3916  *    Does context switching here.
3917  *    Release SPIN_LOCK (if any) before calling this routine.
3918  *
3919  * Return:
3920  *    Success (fcoe_ctx reset is done) : 0
3921  *    Failed  (fcoe_ctx reset not completed within max loop timout ) : 1
3922  */
3923 int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
3924 {
3925         int status = QLA_FUNCTION_FAILED;
3926         unsigned long wait_reset;
3927
3928         wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
3929         while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3930             test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
3931             && time_before(jiffies, wait_reset)) {
3932
3933                 set_current_state(TASK_UNINTERRUPTIBLE);
3934                 schedule_timeout(HZ);
3935
3936                 if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) &&
3937                     !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
3938                         status = QLA_SUCCESS;
3939                         break;
3940                 }
3941         }
3942         ql_dbg(ql_dbg_p3p, vha, 0xb027,
3943             "%s status=%d.\n", status);
3944
3945         return status;
3946 }
3947
3948 void
3949 qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3950 {
3951         int i;
3952         unsigned long flags;
3953         struct qla_hw_data *ha = vha->hw;
3954
3955         /* Check if 82XX firmware is alive or not
3956          * We may have arrived here from NEED_RESET
3957          * detection only
3958          */
3959         if (!ha->flags.isp82xx_fw_hung) {
3960                 for (i = 0; i < 2; i++) {
3961                         msleep(1000);
3962                         if (qla82xx_check_fw_alive(vha)) {
3963                                 ha->flags.isp82xx_fw_hung = 1;
3964                                 if (ha->flags.mbox_busy) {
3965                                         ha->flags.mbox_int = 1;
3966                                         complete(&ha->mbx_intr_comp);
3967                                 }
3968                                 break;
3969                         }
3970                 }
3971         }
3972         ql_dbg(ql_dbg_init, vha, 0x00b0,
3973             "Entered %s fw_hung=%d.\n",
3974             __func__, ha->flags.isp82xx_fw_hung);
3975
3976         /* Abort all commands gracefully if fw NOT hung */
3977         if (!ha->flags.isp82xx_fw_hung) {
3978                 int cnt, que;
3979                 srb_t *sp;
3980                 struct req_que *req;
3981
3982                 spin_lock_irqsave(&ha->hardware_lock, flags);
3983                 for (que = 0; que < ha->max_req_queues; que++) {
3984                         req = ha->req_q_map[que];
3985                         if (!req)
3986                                 continue;
3987                         for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3988                                 sp = req->outstanding_cmds[cnt];
3989                                 if (sp) {
3990                                         if (!sp->ctx ||
3991                                             (sp->flags & SRB_FCP_CMND_DMA_VALID)) {
3992                                                 spin_unlock_irqrestore(
3993                                                     &ha->hardware_lock, flags);
3994                                                 if (ha->isp_ops->abort_command(sp)) {
3995                                                         ql_log(ql_log_info, vha,
3996                                                             0x00b1,
3997                                                             "mbx abort failed.\n");
3998                                                 } else {
3999                                                         ql_log(ql_log_info, vha,
4000                                                             0x00b2,
4001                                                             "mbx abort success.\n");
4002                                                 }
4003                                                 spin_lock_irqsave(&ha->hardware_lock, flags);
4004                                         }
4005                                 }
4006                         }
4007                 }
4008                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4009
4010                 /* Wait for pending cmds (physical and virtual) to complete */
4011                 if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
4012                     WAIT_HOST) == QLA_SUCCESS) {
4013                         ql_dbg(ql_dbg_init, vha, 0x00b3,
4014                             "Done wait for "
4015                             "pending commands.\n");
4016                 }
4017         }
4018 }