[SCSI] qla2xxx: Add host number in reset and quiescent message logs.
[pandora-kernel.git] / drivers / scsi / qla2xxx / qla_nx.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include <linux/delay.h>
9 #include <linux/pci.h>
10 #include <scsi/scsi_tcq.h>
11
12 #define MASK(n)                 ((1ULL<<(n))-1)
13 #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
14         ((addr >> 25) & 0x3ff))
15 #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \
16         ((addr >> 25) & 0x3ff))
17 #define MS_WIN(addr) (addr & 0x0ffc0000)
18 #define QLA82XX_PCI_MN_2M   (0)
19 #define QLA82XX_PCI_MS_2M   (0x80000)
20 #define QLA82XX_PCI_OCM0_2M (0xc0000)
21 #define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
22 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
23 #define BLOCK_PROTECT_BITS 0x0F
24
25 /* CRB window related */
26 #define CRB_BLK(off)    ((off >> 20) & 0x3f)
27 #define CRB_SUBBLK(off) ((off >> 16) & 0xf)
28 #define CRB_WINDOW_2M   (0x130060)
29 #define QLA82XX_PCI_CAMQM_2M_END        (0x04800800UL)
30 #define CRB_HI(off)     ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
31                         ((off) & 0xf0000))
32 #define QLA82XX_PCI_CAMQM_2M_BASE       (0x000ff800UL)
33 #define CRB_INDIRECT_2M (0x1e0000UL)
34
35 #define MAX_CRB_XFORM 60
36 static unsigned long crb_addr_xform[MAX_CRB_XFORM];
37 int qla82xx_crb_table_initialized;
38
39 #define qla82xx_crb_addr_transform(name) \
40         (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
41         QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
42
43 static void qla82xx_crb_addr_transform_setup(void)
44 {
45         qla82xx_crb_addr_transform(XDMA);
46         qla82xx_crb_addr_transform(TIMR);
47         qla82xx_crb_addr_transform(SRE);
48         qla82xx_crb_addr_transform(SQN3);
49         qla82xx_crb_addr_transform(SQN2);
50         qla82xx_crb_addr_transform(SQN1);
51         qla82xx_crb_addr_transform(SQN0);
52         qla82xx_crb_addr_transform(SQS3);
53         qla82xx_crb_addr_transform(SQS2);
54         qla82xx_crb_addr_transform(SQS1);
55         qla82xx_crb_addr_transform(SQS0);
56         qla82xx_crb_addr_transform(RPMX7);
57         qla82xx_crb_addr_transform(RPMX6);
58         qla82xx_crb_addr_transform(RPMX5);
59         qla82xx_crb_addr_transform(RPMX4);
60         qla82xx_crb_addr_transform(RPMX3);
61         qla82xx_crb_addr_transform(RPMX2);
62         qla82xx_crb_addr_transform(RPMX1);
63         qla82xx_crb_addr_transform(RPMX0);
64         qla82xx_crb_addr_transform(ROMUSB);
65         qla82xx_crb_addr_transform(SN);
66         qla82xx_crb_addr_transform(QMN);
67         qla82xx_crb_addr_transform(QMS);
68         qla82xx_crb_addr_transform(PGNI);
69         qla82xx_crb_addr_transform(PGND);
70         qla82xx_crb_addr_transform(PGN3);
71         qla82xx_crb_addr_transform(PGN2);
72         qla82xx_crb_addr_transform(PGN1);
73         qla82xx_crb_addr_transform(PGN0);
74         qla82xx_crb_addr_transform(PGSI);
75         qla82xx_crb_addr_transform(PGSD);
76         qla82xx_crb_addr_transform(PGS3);
77         qla82xx_crb_addr_transform(PGS2);
78         qla82xx_crb_addr_transform(PGS1);
79         qla82xx_crb_addr_transform(PGS0);
80         qla82xx_crb_addr_transform(PS);
81         qla82xx_crb_addr_transform(PH);
82         qla82xx_crb_addr_transform(NIU);
83         qla82xx_crb_addr_transform(I2Q);
84         qla82xx_crb_addr_transform(EG);
85         qla82xx_crb_addr_transform(MN);
86         qla82xx_crb_addr_transform(MS);
87         qla82xx_crb_addr_transform(CAS2);
88         qla82xx_crb_addr_transform(CAS1);
89         qla82xx_crb_addr_transform(CAS0);
90         qla82xx_crb_addr_transform(CAM);
91         qla82xx_crb_addr_transform(C2C1);
92         qla82xx_crb_addr_transform(C2C0);
93         qla82xx_crb_addr_transform(SMB);
94         qla82xx_crb_addr_transform(OCM0);
95         /*
96          * Used only in P3 just define it for P2 also.
97          */
98         qla82xx_crb_addr_transform(I2C0);
99
100         qla82xx_crb_table_initialized = 1;
101 }
102
103 struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
104         {{{0, 0,         0,         0} } },
105         {{{1, 0x0100000, 0x0102000, 0x120000},
106         {1, 0x0110000, 0x0120000, 0x130000},
107         {1, 0x0120000, 0x0122000, 0x124000},
108         {1, 0x0130000, 0x0132000, 0x126000},
109         {1, 0x0140000, 0x0142000, 0x128000},
110         {1, 0x0150000, 0x0152000, 0x12a000},
111         {1, 0x0160000, 0x0170000, 0x110000},
112         {1, 0x0170000, 0x0172000, 0x12e000},
113         {0, 0x0000000, 0x0000000, 0x000000},
114         {0, 0x0000000, 0x0000000, 0x000000},
115         {0, 0x0000000, 0x0000000, 0x000000},
116         {0, 0x0000000, 0x0000000, 0x000000},
117         {0, 0x0000000, 0x0000000, 0x000000},
118         {0, 0x0000000, 0x0000000, 0x000000},
119         {1, 0x01e0000, 0x01e0800, 0x122000},
120         {0, 0x0000000, 0x0000000, 0x000000} } } ,
121         {{{1, 0x0200000, 0x0210000, 0x180000} } },
122         {{{0, 0,         0,         0} } },
123         {{{1, 0x0400000, 0x0401000, 0x169000} } },
124         {{{1, 0x0500000, 0x0510000, 0x140000} } },
125         {{{1, 0x0600000, 0x0610000, 0x1c0000} } },
126         {{{1, 0x0700000, 0x0704000, 0x1b8000} } },
127         {{{1, 0x0800000, 0x0802000, 0x170000},
128         {0, 0x0000000, 0x0000000, 0x000000},
129         {0, 0x0000000, 0x0000000, 0x000000},
130         {0, 0x0000000, 0x0000000, 0x000000},
131         {0, 0x0000000, 0x0000000, 0x000000},
132         {0, 0x0000000, 0x0000000, 0x000000},
133         {0, 0x0000000, 0x0000000, 0x000000},
134         {0, 0x0000000, 0x0000000, 0x000000},
135         {0, 0x0000000, 0x0000000, 0x000000},
136         {0, 0x0000000, 0x0000000, 0x000000},
137         {0, 0x0000000, 0x0000000, 0x000000},
138         {0, 0x0000000, 0x0000000, 0x000000},
139         {0, 0x0000000, 0x0000000, 0x000000},
140         {0, 0x0000000, 0x0000000, 0x000000},
141         {0, 0x0000000, 0x0000000, 0x000000},
142         {1, 0x08f0000, 0x08f2000, 0x172000} } },
143         {{{1, 0x0900000, 0x0902000, 0x174000},
144         {0, 0x0000000, 0x0000000, 0x000000},
145         {0, 0x0000000, 0x0000000, 0x000000},
146         {0, 0x0000000, 0x0000000, 0x000000},
147         {0, 0x0000000, 0x0000000, 0x000000},
148         {0, 0x0000000, 0x0000000, 0x000000},
149         {0, 0x0000000, 0x0000000, 0x000000},
150         {0, 0x0000000, 0x0000000, 0x000000},
151         {0, 0x0000000, 0x0000000, 0x000000},
152         {0, 0x0000000, 0x0000000, 0x000000},
153         {0, 0x0000000, 0x0000000, 0x000000},
154         {0, 0x0000000, 0x0000000, 0x000000},
155         {0, 0x0000000, 0x0000000, 0x000000},
156         {0, 0x0000000, 0x0000000, 0x000000},
157         {0, 0x0000000, 0x0000000, 0x000000},
158         {1, 0x09f0000, 0x09f2000, 0x176000} } },
159         {{{0, 0x0a00000, 0x0a02000, 0x178000},
160         {0, 0x0000000, 0x0000000, 0x000000},
161         {0, 0x0000000, 0x0000000, 0x000000},
162         {0, 0x0000000, 0x0000000, 0x000000},
163         {0, 0x0000000, 0x0000000, 0x000000},
164         {0, 0x0000000, 0x0000000, 0x000000},
165         {0, 0x0000000, 0x0000000, 0x000000},
166         {0, 0x0000000, 0x0000000, 0x000000},
167         {0, 0x0000000, 0x0000000, 0x000000},
168         {0, 0x0000000, 0x0000000, 0x000000},
169         {0, 0x0000000, 0x0000000, 0x000000},
170         {0, 0x0000000, 0x0000000, 0x000000},
171         {0, 0x0000000, 0x0000000, 0x000000},
172         {0, 0x0000000, 0x0000000, 0x000000},
173         {0, 0x0000000, 0x0000000, 0x000000},
174         {1, 0x0af0000, 0x0af2000, 0x17a000} } },
175         {{{0, 0x0b00000, 0x0b02000, 0x17c000},
176         {0, 0x0000000, 0x0000000, 0x000000},
177         {0, 0x0000000, 0x0000000, 0x000000},
178         {0, 0x0000000, 0x0000000, 0x000000},
179         {0, 0x0000000, 0x0000000, 0x000000},
180         {0, 0x0000000, 0x0000000, 0x000000},
181         {0, 0x0000000, 0x0000000, 0x000000},
182         {0, 0x0000000, 0x0000000, 0x000000},
183         {0, 0x0000000, 0x0000000, 0x000000},
184         {0, 0x0000000, 0x0000000, 0x000000},
185         {0, 0x0000000, 0x0000000, 0x000000},
186         {0, 0x0000000, 0x0000000, 0x000000},
187         {0, 0x0000000, 0x0000000, 0x000000},
188         {0, 0x0000000, 0x0000000, 0x000000},
189         {0, 0x0000000, 0x0000000, 0x000000},
190         {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
191         {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },
192         {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },
193         {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },
194         {{{1, 0x0f00000, 0x0f01000, 0x164000} } },
195         {{{0, 0x1000000, 0x1004000, 0x1a8000} } },
196         {{{1, 0x1100000, 0x1101000, 0x160000} } },
197         {{{1, 0x1200000, 0x1201000, 0x161000} } },
198         {{{1, 0x1300000, 0x1301000, 0x162000} } },
199         {{{1, 0x1400000, 0x1401000, 0x163000} } },
200         {{{1, 0x1500000, 0x1501000, 0x165000} } },
201         {{{1, 0x1600000, 0x1601000, 0x166000} } },
202         {{{0, 0,         0,         0} } },
203         {{{0, 0,         0,         0} } },
204         {{{0, 0,         0,         0} } },
205         {{{0, 0,         0,         0} } },
206         {{{0, 0,         0,         0} } },
207         {{{0, 0,         0,         0} } },
208         {{{1, 0x1d00000, 0x1d10000, 0x190000} } },
209         {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },
210         {{{1, 0x1f00000, 0x1f10000, 0x150000} } },
211         {{{0} } },
212         {{{1, 0x2100000, 0x2102000, 0x120000},
213         {1, 0x2110000, 0x2120000, 0x130000},
214         {1, 0x2120000, 0x2122000, 0x124000},
215         {1, 0x2130000, 0x2132000, 0x126000},
216         {1, 0x2140000, 0x2142000, 0x128000},
217         {1, 0x2150000, 0x2152000, 0x12a000},
218         {1, 0x2160000, 0x2170000, 0x110000},
219         {1, 0x2170000, 0x2172000, 0x12e000},
220         {0, 0x0000000, 0x0000000, 0x000000},
221         {0, 0x0000000, 0x0000000, 0x000000},
222         {0, 0x0000000, 0x0000000, 0x000000},
223         {0, 0x0000000, 0x0000000, 0x000000},
224         {0, 0x0000000, 0x0000000, 0x000000},
225         {0, 0x0000000, 0x0000000, 0x000000},
226         {0, 0x0000000, 0x0000000, 0x000000},
227         {0, 0x0000000, 0x0000000, 0x000000} } },
228         {{{1, 0x2200000, 0x2204000, 0x1b0000} } },
229         {{{0} } },
230         {{{0} } },
231         {{{0} } },
232         {{{0} } },
233         {{{0} } },
234         {{{1, 0x2800000, 0x2804000, 0x1a4000} } },
235         {{{1, 0x2900000, 0x2901000, 0x16b000} } },
236         {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },
237         {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },
238         {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },
239         {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },
240         {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },
241         {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },
242         {{{1, 0x3000000, 0x3000400, 0x1adc00} } },
243         {{{0, 0x3100000, 0x3104000, 0x1a8000} } },
244         {{{1, 0x3200000, 0x3204000, 0x1d4000} } },
245         {{{1, 0x3300000, 0x3304000, 0x1a0000} } },
246         {{{0} } },
247         {{{1, 0x3500000, 0x3500400, 0x1ac000} } },
248         {{{1, 0x3600000, 0x3600400, 0x1ae000} } },
249         {{{1, 0x3700000, 0x3700400, 0x1ae400} } },
250         {{{1, 0x3800000, 0x3804000, 0x1d0000} } },
251         {{{1, 0x3900000, 0x3904000, 0x1b4000} } },
252         {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },
253         {{{0} } },
254         {{{0} } },
255         {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },
256         {{{1, 0x3e00000, 0x3e01000, 0x167000} } },
257         {{{1, 0x3f00000, 0x3f01000, 0x168000} } }
258 };
259
260 /*
261  * top 12 bits of crb internal address (hub, agent)
262  */
263 unsigned qla82xx_crb_hub_agt[64] = {
264         0,
265         QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
266         QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
267         QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
268         0,
269         QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
270         QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
271         QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
272         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
273         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
274         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
275         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
276         QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
277         QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
278         QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
279         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
280         QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
281         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
282         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
283         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
284         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
285         QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
286         QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
287         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
288         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
289         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
290         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
291         0,
292         QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
293         QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
294         0,
295         QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
296         0,
297         QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
298         QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
299         0,
300         0,
301         0,
302         0,
303         0,
304         QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
305         0,
306         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
307         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
308         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
309         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
310         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
311         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
312         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
313         QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
314         QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
315         QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
316         0,
317         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
318         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
319         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
320         QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
321         0,
322         QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
323         QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
324         QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
325         0,
326         QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
327         0,
328 };
329
330 /* Device states */
331 char *qdev_state[] = {
332          "Unknown",
333         "Cold",
334         "Initializing",
335         "Ready",
336         "Need Reset",
337         "Need Quiescent",
338         "Failed",
339         "Quiescent",
340 };
341
342 /*
343  * In: 'off' is offset from CRB space in 128M pci map
344  * Out: 'off' is 2M pci map addr
345  * side effect: lock crb window
346  */
347 static void
348 qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
349 {
350         u32 win_read;
351
352         ha->crb_win = CRB_HI(*off);
353         writel(ha->crb_win,
354                 (void *)(CRB_WINDOW_2M + ha->nx_pcibase));
355
356         /* Read back value to make sure write has gone through before trying
357          * to use it.
358          */
359         win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
360         if (win_read != ha->crb_win) {
361                 DEBUG2(qla_printk(KERN_INFO, ha,
362                     "%s: Written crbwin (0x%x) != Read crbwin (0x%x), "
363                     "off=0x%lx\n", __func__, ha->crb_win, win_read, *off));
364         }
365         *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
366 }
367
368 static inline unsigned long
369 qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
370 {
371         /* See if we are currently pointing to the region we want to use next */
372         if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
373                 /* No need to change window. PCIX and PCIEregs are in both
374                  * regs are in both windows.
375                  */
376                 return off;
377         }
378
379         if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
380                 /* We are in first CRB window */
381                 if (ha->curr_window != 0)
382                         WARN_ON(1);
383                 return off;
384         }
385
386         if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
387                 /* We are in second CRB window */
388                 off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
389
390                 if (ha->curr_window != 1)
391                         return off;
392
393                 /* We are in the QM or direct access
394                  * register region - do nothing
395                  */
396                 if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
397                         (off < QLA82XX_PCI_CAMQM_MAX))
398                         return off;
399         }
400         /* strange address given */
401         qla_printk(KERN_WARNING, ha,
402                 "%s: Warning: unm_nic_pci_set_crbwindow called with"
403                 " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off);
404         return off;
405 }
406
407 static int
408 qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
409 {
410         struct crb_128M_2M_sub_block_map *m;
411
412         if (*off >= QLA82XX_CRB_MAX)
413                 return -1;
414
415         if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
416                 *off = (*off - QLA82XX_PCI_CAMQM) +
417                     QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
418                 return 0;
419         }
420
421         if (*off < QLA82XX_PCI_CRBSPACE)
422                 return -1;
423
424         *off -= QLA82XX_PCI_CRBSPACE;
425
426         /* Try direct map */
427         m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
428
429         if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
430                 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
431                 return 0;
432         }
433         /* Not in direct map, use crb window */
434         return 1;
435 }
436
437 #define CRB_WIN_LOCK_TIMEOUT 100000000
438 static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
439 {
440         int done = 0, timeout = 0;
441
442         while (!done) {
443                 /* acquire semaphore3 from PCI HW block */
444                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
445                 if (done == 1)
446                         break;
447                 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
448                         return -1;
449                 timeout++;
450         }
451         qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
452         return 0;
453 }
454
455 int
456 qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
457 {
458         unsigned long flags = 0;
459         int rv;
460
461         rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
462
463         BUG_ON(rv == -1);
464
465         if (rv == 1) {
466                 write_lock_irqsave(&ha->hw_lock, flags);
467                 qla82xx_crb_win_lock(ha);
468                 qla82xx_pci_set_crbwindow_2M(ha, &off);
469         }
470
471         writel(data, (void __iomem *)off);
472
473         if (rv == 1) {
474                 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
475                 write_unlock_irqrestore(&ha->hw_lock, flags);
476         }
477         return 0;
478 }
479
480 int
481 qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
482 {
483         unsigned long flags = 0;
484         int rv;
485         u32 data;
486
487         rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
488
489         BUG_ON(rv == -1);
490
491         if (rv == 1) {
492                 write_lock_irqsave(&ha->hw_lock, flags);
493                 qla82xx_crb_win_lock(ha);
494                 qla82xx_pci_set_crbwindow_2M(ha, &off);
495         }
496         data = RD_REG_DWORD((void __iomem *)off);
497
498         if (rv == 1) {
499                 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
500                 write_unlock_irqrestore(&ha->hw_lock, flags);
501         }
502         return data;
503 }
504
505 #define IDC_LOCK_TIMEOUT 100000000
506 int qla82xx_idc_lock(struct qla_hw_data *ha)
507 {
508         int i;
509         int done = 0, timeout = 0;
510
511         while (!done) {
512                 /* acquire semaphore5 from PCI HW block */
513                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
514                 if (done == 1)
515                         break;
516                 if (timeout >= IDC_LOCK_TIMEOUT)
517                         return -1;
518
519                 timeout++;
520
521                 /* Yield CPU */
522                 if (!in_interrupt())
523                         schedule();
524                 else {
525                         for (i = 0; i < 20; i++)
526                                 cpu_relax();
527                 }
528         }
529
530         return 0;
531 }
532
533 void qla82xx_idc_unlock(struct qla_hw_data *ha)
534 {
535         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
536 }
537
538 /*  PCI Windowing for DDR regions.  */
539 #define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
540         (((addr) <= (high)) && ((addr) >= (low)))
541 /*
542  * check memory access boundary.
543  * used by test agent. support ddr access only for now
544  */
545 static unsigned long
546 qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
547         unsigned long long addr, int size)
548 {
549         if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
550                 QLA82XX_ADDR_DDR_NET_MAX) ||
551                 !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET,
552                 QLA82XX_ADDR_DDR_NET_MAX) ||
553                 ((size != 1) && (size != 2) && (size != 4) && (size != 8)))
554                         return 0;
555         else
556                 return 1;
557 }
558
559 int qla82xx_pci_set_window_warning_count;
560
561 static unsigned long
562 qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
563 {
564         int window;
565         u32 win_read;
566
567         if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
568                 QLA82XX_ADDR_DDR_NET_MAX)) {
569                 /* DDR network side */
570                 window = MN_WIN(addr);
571                 ha->ddr_mn_window = window;
572                 qla82xx_wr_32(ha,
573                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
574                 win_read = qla82xx_rd_32(ha,
575                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
576                 if ((win_read << 17) != window) {
577                         qla_printk(KERN_WARNING, ha,
578                             "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n",
579                             __func__, window, win_read);
580                 }
581                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
582         } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
583                 QLA82XX_ADDR_OCM0_MAX)) {
584                 unsigned int temp1;
585                 if ((addr & 0x00ff800) == 0xff800) {
586                         qla_printk(KERN_WARNING, ha,
587                             "%s: QM access not handled.\n", __func__);
588                         addr = -1UL;
589                 }
590                 window = OCM_WIN(addr);
591                 ha->ddr_mn_window = window;
592                 qla82xx_wr_32(ha,
593                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
594                 win_read = qla82xx_rd_32(ha,
595                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
596                 temp1 = ((window & 0x1FF) << 7) |
597                     ((window & 0x0FFFE0000) >> 17);
598                 if (win_read != temp1) {
599                         qla_printk(KERN_WARNING, ha,
600                             "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n",
601                             __func__, temp1, win_read);
602                 }
603                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
604
605         } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
606                 QLA82XX_P3_ADDR_QDR_NET_MAX)) {
607                 /* QDR network side */
608                 window = MS_WIN(addr);
609                 ha->qdr_sn_window = window;
610                 qla82xx_wr_32(ha,
611                         ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window);
612                 win_read = qla82xx_rd_32(ha,
613                         ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
614                 if (win_read != window) {
615                         qla_printk(KERN_WARNING, ha,
616                             "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n",
617                             __func__, window, win_read);
618                 }
619                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
620         } else {
621                 /*
622                  * peg gdb frequently accesses memory that doesn't exist,
623                  * this limits the chit chat so debugging isn't slowed down.
624                  */
625                 if ((qla82xx_pci_set_window_warning_count++ < 8) ||
626                     (qla82xx_pci_set_window_warning_count%64 == 0)) {
627                         qla_printk(KERN_WARNING, ha,
628                             "%s: Warning:%s Unknown address range!\n", __func__,
629                             QLA2XXX_DRIVER_NAME);
630                 }
631                 addr = -1UL;
632         }
633         return addr;
634 }
635
636 /* check if address is in the same windows as the previous access */
637 static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
638         unsigned long long addr)
639 {
640         int                     window;
641         unsigned long long      qdr_max;
642
643         qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
644
645         /* DDR network side */
646         if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
647                 QLA82XX_ADDR_DDR_NET_MAX))
648                 BUG();
649         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
650                 QLA82XX_ADDR_OCM0_MAX))
651                 return 1;
652         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
653                 QLA82XX_ADDR_OCM1_MAX))
654                 return 1;
655         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
656                 /* QDR network side */
657                 window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
658                 if (ha->qdr_sn_window == window)
659                         return 1;
660         }
661         return 0;
662 }
663
664 static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
665         u64 off, void *data, int size)
666 {
667         unsigned long   flags;
668         void           *addr = NULL;
669         int             ret = 0;
670         u64             start;
671         uint8_t         *mem_ptr = NULL;
672         unsigned long   mem_base;
673         unsigned long   mem_page;
674
675         write_lock_irqsave(&ha->hw_lock, flags);
676
677         /*
678          * If attempting to access unknown address or straddle hw windows,
679          * do not access.
680          */
681         start = qla82xx_pci_set_window(ha, off);
682         if ((start == -1UL) ||
683                 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
684                 write_unlock_irqrestore(&ha->hw_lock, flags);
685                 qla_printk(KERN_ERR, ha,
686                         "%s out of bound pci memory access. "
687                         "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
688                 return -1;
689         }
690
691         write_unlock_irqrestore(&ha->hw_lock, flags);
692         mem_base = pci_resource_start(ha->pdev, 0);
693         mem_page = start & PAGE_MASK;
694         /* Map two pages whenever user tries to access addresses in two
695         * consecutive pages.
696         */
697         if (mem_page != ((start + size - 1) & PAGE_MASK))
698                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
699         else
700                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
701         if (mem_ptr == 0UL) {
702                 *(u8  *)data = 0;
703                 return -1;
704         }
705         addr = mem_ptr;
706         addr += start & (PAGE_SIZE - 1);
707         write_lock_irqsave(&ha->hw_lock, flags);
708
709         switch (size) {
710         case 1:
711                 *(u8  *)data = readb(addr);
712                 break;
713         case 2:
714                 *(u16 *)data = readw(addr);
715                 break;
716         case 4:
717                 *(u32 *)data = readl(addr);
718                 break;
719         case 8:
720                 *(u64 *)data = readq(addr);
721                 break;
722         default:
723                 ret = -1;
724                 break;
725         }
726         write_unlock_irqrestore(&ha->hw_lock, flags);
727
728         if (mem_ptr)
729                 iounmap(mem_ptr);
730         return ret;
731 }
732
733 static int
734 qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
735         u64 off, void *data, int size)
736 {
737         unsigned long   flags;
738         void           *addr = NULL;
739         int             ret = 0;
740         u64             start;
741         uint8_t         *mem_ptr = NULL;
742         unsigned long   mem_base;
743         unsigned long   mem_page;
744
745         write_lock_irqsave(&ha->hw_lock, flags);
746
747         /*
748          * If attempting to access unknown address or straddle hw windows,
749          * do not access.
750          */
751         start = qla82xx_pci_set_window(ha, off);
752         if ((start == -1UL) ||
753                 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
754                 write_unlock_irqrestore(&ha->hw_lock, flags);
755                 qla_printk(KERN_ERR, ha,
756                         "%s out of bound pci memory access. "
757                         "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
758                 return -1;
759         }
760
761         write_unlock_irqrestore(&ha->hw_lock, flags);
762         mem_base = pci_resource_start(ha->pdev, 0);
763         mem_page = start & PAGE_MASK;
764         /* Map two pages whenever user tries to access addresses in two
765          * consecutive pages.
766          */
767         if (mem_page != ((start + size - 1) & PAGE_MASK))
768                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
769         else
770                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
771         if (mem_ptr == 0UL)
772                 return -1;
773
774         addr = mem_ptr;
775         addr += start & (PAGE_SIZE - 1);
776         write_lock_irqsave(&ha->hw_lock, flags);
777
778         switch (size) {
779         case 1:
780                 writeb(*(u8  *)data, addr);
781                 break;
782         case 2:
783                 writew(*(u16 *)data, addr);
784                 break;
785         case 4:
786                 writel(*(u32 *)data, addr);
787                 break;
788         case 8:
789                 writeq(*(u64 *)data, addr);
790                 break;
791         default:
792                 ret = -1;
793                 break;
794         }
795         write_unlock_irqrestore(&ha->hw_lock, flags);
796         if (mem_ptr)
797                 iounmap(mem_ptr);
798         return ret;
799 }
800
801 #define MTU_FUDGE_FACTOR 100
802 static unsigned long
803 qla82xx_decode_crb_addr(unsigned long addr)
804 {
805         int i;
806         unsigned long base_addr, offset, pci_base;
807
808         if (!qla82xx_crb_table_initialized)
809                 qla82xx_crb_addr_transform_setup();
810
811         pci_base = ADDR_ERROR;
812         base_addr = addr & 0xfff00000;
813         offset = addr & 0x000fffff;
814
815         for (i = 0; i < MAX_CRB_XFORM; i++) {
816                 if (crb_addr_xform[i] == base_addr) {
817                         pci_base = i << 20;
818                         break;
819                 }
820         }
821         if (pci_base == ADDR_ERROR)
822                 return pci_base;
823         return pci_base + offset;
824 }
825
826 static long rom_max_timeout = 100;
827 static long qla82xx_rom_lock_timeout = 100;
828
829 static int
830 qla82xx_rom_lock(struct qla_hw_data *ha)
831 {
832         int done = 0, timeout = 0;
833
834         while (!done) {
835                 /* acquire semaphore2 from PCI HW block */
836                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
837                 if (done == 1)
838                         break;
839                 if (timeout >= qla82xx_rom_lock_timeout)
840                         return -1;
841                 timeout++;
842         }
843         qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
844         return 0;
845 }
846
847 static void
848 qla82xx_rom_unlock(struct qla_hw_data *ha)
849 {
850         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
851 }
852
853 static int
854 qla82xx_wait_rom_busy(struct qla_hw_data *ha)
855 {
856         long timeout = 0;
857         long done = 0 ;
858
859         while (done == 0) {
860                 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
861                 done &= 4;
862                 timeout++;
863                 if (timeout >= rom_max_timeout) {
864                         DEBUG(qla_printk(KERN_INFO, ha,
865                                 "%s: Timeout reached waiting for rom busy",
866                                 QLA2XXX_DRIVER_NAME));
867                         return -1;
868                 }
869         }
870         return 0;
871 }
872
873 static int
874 qla82xx_wait_rom_done(struct qla_hw_data *ha)
875 {
876         long timeout = 0;
877         long done = 0 ;
878
879         while (done == 0) {
880                 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
881                 done &= 2;
882                 timeout++;
883                 if (timeout >= rom_max_timeout) {
884                         DEBUG(qla_printk(KERN_INFO, ha,
885                                 "%s: Timeout reached  waiting for rom done",
886                                 QLA2XXX_DRIVER_NAME));
887                         return -1;
888                 }
889         }
890         return 0;
891 }
892
893 static int
894 qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
895 {
896         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
897         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
898         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
899         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
900         qla82xx_wait_rom_busy(ha);
901         if (qla82xx_wait_rom_done(ha)) {
902                 qla_printk(KERN_WARNING, ha,
903                         "%s: Error waiting for rom done\n",
904                         QLA2XXX_DRIVER_NAME);
905                 return -1;
906         }
907         /* Reset abyte_cnt and dummy_byte_cnt */
908         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
909         udelay(10);
910         cond_resched();
911         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
912         *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
913         return 0;
914 }
915
916 static int
917 qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
918 {
919         int ret, loops = 0;
920
921         while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
922                 udelay(100);
923                 schedule();
924                 loops++;
925         }
926         if (loops >= 50000) {
927                 qla_printk(KERN_INFO, ha,
928                         "%s: qla82xx_rom_lock failed\n",
929                         QLA2XXX_DRIVER_NAME);
930                 return -1;
931         }
932         ret = qla82xx_do_rom_fast_read(ha, addr, valp);
933         qla82xx_rom_unlock(ha);
934         return ret;
935 }
936
937 static int
938 qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
939 {
940         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
941         qla82xx_wait_rom_busy(ha);
942         if (qla82xx_wait_rom_done(ha)) {
943                 qla_printk(KERN_WARNING, ha,
944                     "Error waiting for rom done\n");
945                 return -1;
946         }
947         *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
948         return 0;
949 }
950
951 static int
952 qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
953 {
954         long timeout = 0;
955         uint32_t done = 1 ;
956         uint32_t val;
957         int ret = 0;
958
959         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
960         while ((done != 0) && (ret == 0)) {
961                 ret = qla82xx_read_status_reg(ha, &val);
962                 done = val & 1;
963                 timeout++;
964                 udelay(10);
965                 cond_resched();
966                 if (timeout >= 50000) {
967                         qla_printk(KERN_WARNING, ha,
968                             "Timeout reached  waiting for write finish");
969                         return -1;
970                 }
971         }
972         return ret;
973 }
974
975 static int
976 qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
977 {
978         uint32_t val;
979         qla82xx_wait_rom_busy(ha);
980         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
981         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
982         qla82xx_wait_rom_busy(ha);
983         if (qla82xx_wait_rom_done(ha))
984                 return -1;
985         if (qla82xx_read_status_reg(ha, &val) != 0)
986                 return -1;
987         if ((val & 2) != 2)
988                 return -1;
989         return 0;
990 }
991
992 static int
993 qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
994 {
995         if (qla82xx_flash_set_write_enable(ha))
996                 return -1;
997         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
998         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
999         if (qla82xx_wait_rom_done(ha)) {
1000                 qla_printk(KERN_WARNING, ha,
1001                     "Error waiting for rom done\n");
1002                 return -1;
1003         }
1004         return qla82xx_flash_wait_write_finish(ha);
1005 }
1006
1007 static int
1008 qla82xx_write_disable_flash(struct qla_hw_data *ha)
1009 {
1010         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
1011         if (qla82xx_wait_rom_done(ha)) {
1012                 qla_printk(KERN_WARNING, ha,
1013                     "Error waiting for rom done\n");
1014                 return -1;
1015         }
1016         return 0;
1017 }
1018
1019 static int
1020 ql82xx_rom_lock_d(struct qla_hw_data *ha)
1021 {
1022         int loops = 0;
1023         while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
1024                 udelay(100);
1025                 cond_resched();
1026                 loops++;
1027         }
1028         if (loops >= 50000) {
1029                 qla_printk(KERN_WARNING, ha, "ROM lock failed\n");
1030                 return -1;
1031         }
1032         return 0;;
1033 }
1034
1035 static int
1036 qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1037         uint32_t data)
1038 {
1039         int ret = 0;
1040
1041         ret = ql82xx_rom_lock_d(ha);
1042         if (ret < 0) {
1043                 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
1044                 return ret;
1045         }
1046
1047         if (qla82xx_flash_set_write_enable(ha))
1048                 goto done_write;
1049
1050         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
1051         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr);
1052         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
1053         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
1054         qla82xx_wait_rom_busy(ha);
1055         if (qla82xx_wait_rom_done(ha)) {
1056                 qla_printk(KERN_WARNING, ha,
1057                         "Error waiting for rom done\n");
1058                 ret = -1;
1059                 goto done_write;
1060         }
1061
1062         ret = qla82xx_flash_wait_write_finish(ha);
1063
1064 done_write:
1065         qla82xx_rom_unlock(ha);
1066         return ret;
1067 }
1068
1069 /* This routine does CRB initialize sequence
1070  *  to put the ISP into operational state
1071  */
1072 static int
1073 qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1074 {
1075         int addr, val;
1076         int i ;
1077         struct crb_addr_pair *buf;
1078         unsigned long off;
1079         unsigned offset, n;
1080         struct qla_hw_data *ha = vha->hw;
1081
1082         struct crb_addr_pair {
1083                 long addr;
1084                 long data;
1085         };
1086
1087         /* Halt all the indiviual PEGs and other blocks of the ISP */
1088         qla82xx_rom_lock(ha);
1089
1090         /* disable all I2Q */
1091         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
1092         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
1093         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
1094         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
1095         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
1096         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
1097
1098         /* disable all niu interrupts */
1099         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
1100         /* disable xge rx/tx */
1101         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
1102         /* disable xg1 rx/tx */
1103         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
1104         /* disable sideband mac */
1105         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
1106         /* disable ap0 mac */
1107         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
1108         /* disable ap1 mac */
1109         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
1110
1111         /* halt sre */
1112         val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
1113         qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
1114
1115         /* halt epg */
1116         qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
1117
1118         /* halt timers */
1119         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
1120         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
1121         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
1122         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
1123         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1124         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
1125
1126         /* halt pegs */
1127         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
1128         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
1129         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
1130         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
1131         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1132         msleep(20);
1133
1134         /* big hammer */
1135         if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
1136                 /* don't reset CAM block on reset */
1137                 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
1138         else
1139                 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
1140
1141         /* reset ms */
1142         val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1143         val |= (1 << 1);
1144         qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1145         msleep(20);
1146
1147         /* unreset ms */
1148         val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1149         val &= ~(1 << 1);
1150         qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1151         msleep(20);
1152
1153         qla82xx_rom_unlock(ha);
1154
1155         /* Read the signature value from the flash.
1156          * Offset 0: Contain signature (0xcafecafe)
1157          * Offset 4: Offset and number of addr/value pairs
1158          * that present in CRB initialize sequence
1159          */
1160         if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1161             qla82xx_rom_fast_read(ha, 4, &n) != 0) {
1162                 qla_printk(KERN_WARNING, ha,
1163                     "[ERROR] Reading crb_init area: n: %08x\n", n);
1164                 return -1;
1165         }
1166
1167         /* Offset in flash = lower 16 bits
1168          * Number of enteries = upper 16 bits
1169          */
1170         offset = n & 0xffffU;
1171         n = (n >> 16) & 0xffffU;
1172
1173         /* number of addr/value pair should not exceed 1024 enteries */
1174         if (n  >= 1024) {
1175                 qla_printk(KERN_WARNING, ha,
1176                     "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n",
1177                     QLA2XXX_DRIVER_NAME, __func__, n);
1178                 return -1;
1179         }
1180
1181         qla_printk(KERN_INFO, ha,
1182             "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n);
1183
1184         buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
1185         if (buf == NULL) {
1186                 qla_printk(KERN_WARNING, ha,
1187                     "%s: [ERROR] Unable to malloc memory.\n",
1188                     QLA2XXX_DRIVER_NAME);
1189                 return -1;
1190         }
1191
1192         for (i = 0; i < n; i++) {
1193                 if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
1194                     qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) {
1195                         kfree(buf);
1196                         return -1;
1197                 }
1198
1199                 buf[i].addr = addr;
1200                 buf[i].data = val;
1201         }
1202
1203         for (i = 0; i < n; i++) {
1204                 /* Translate internal CRB initialization
1205                  * address to PCI bus address
1206                  */
1207                 off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) +
1208                     QLA82XX_PCI_CRBSPACE;
1209                 /* Not all CRB  addr/value pair to be written,
1210                  * some of them are skipped
1211                  */
1212
1213                 /* skipping cold reboot MAGIC */
1214                 if (off == QLA82XX_CAM_RAM(0x1fc))
1215                         continue;
1216
1217                 /* do not reset PCI */
1218                 if (off == (ROMUSB_GLB + 0xbc))
1219                         continue;
1220
1221                 /* skip core clock, so that firmware can increase the clock */
1222                 if (off == (ROMUSB_GLB + 0xc8))
1223                         continue;
1224
1225                 /* skip the function enable register */
1226                 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
1227                         continue;
1228
1229                 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
1230                         continue;
1231
1232                 if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
1233                         continue;
1234
1235                 if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
1236                         continue;
1237
1238                 if (off == ADDR_ERROR) {
1239                         qla_printk(KERN_WARNING, ha,
1240                             "%s: [ERROR] Unknown addr: 0x%08lx\n",
1241                             QLA2XXX_DRIVER_NAME, buf[i].addr);
1242                         continue;
1243                 }
1244
1245                 qla82xx_wr_32(ha, off, buf[i].data);
1246
1247                 /* ISP requires much bigger delay to settle down,
1248                  * else crb_window returns 0xffffffff
1249                  */
1250                 if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
1251                         msleep(1000);
1252
1253                 /* ISP requires millisec delay between
1254                  * successive CRB register updation
1255                  */
1256                 msleep(1);
1257         }
1258
1259         kfree(buf);
1260
1261         /* Resetting the data and instruction cache */
1262         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
1263         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
1264         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
1265
1266         /* Clear all protocol processing engines */
1267         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
1268         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
1269         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
1270         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
1271         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
1272         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
1273         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
1274         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
1275         return 0;
1276 }
1277
1278 static int
1279 qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1280                 u64 off, void *data, int size)
1281 {
1282         int i, j, ret = 0, loop, sz[2], off0;
1283         int scale, shift_amount, startword;
1284         uint32_t temp;
1285         uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1286
1287         /*
1288          * If not MN, go check for MS or invalid.
1289          */
1290         if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1291                 mem_crb = QLA82XX_CRB_QDR_NET;
1292         else {
1293                 mem_crb = QLA82XX_CRB_DDR_NET;
1294                 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1295                         return qla82xx_pci_mem_write_direct(ha,
1296                             off, data, size);
1297         }
1298
1299         off0 = off & 0x7;
1300         sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1301         sz[1] = size - sz[0];
1302
1303         off8 = off & 0xfffffff0;
1304         loop = (((off & 0xf) + size - 1) >> 4) + 1;
1305         shift_amount = 4;
1306         scale = 2;
1307         startword = (off & 0xf)/8;
1308
1309         for (i = 0; i < loop; i++) {
1310                 if (qla82xx_pci_mem_read_2M(ha, off8 +
1311                     (i << shift_amount), &word[i * scale], 8))
1312                         return -1;
1313         }
1314
1315         switch (size) {
1316         case 1:
1317                 tmpw = *((uint8_t *)data);
1318                 break;
1319         case 2:
1320                 tmpw = *((uint16_t *)data);
1321                 break;
1322         case 4:
1323                 tmpw = *((uint32_t *)data);
1324                 break;
1325         case 8:
1326         default:
1327                 tmpw = *((uint64_t *)data);
1328                 break;
1329         }
1330
1331         if (sz[0] == 8) {
1332                 word[startword] = tmpw;
1333         } else {
1334                 word[startword] &=
1335                         ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1336                 word[startword] |= tmpw << (off0 * 8);
1337         }
1338         if (sz[1] != 0) {
1339                 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1340                 word[startword+1] |= tmpw >> (sz[0] * 8);
1341         }
1342
1343         for (i = 0; i < loop; i++) {
1344                 temp = off8 + (i << shift_amount);
1345                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1346                 temp = 0;
1347                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1348                 temp = word[i * scale] & 0xffffffff;
1349                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1350                 temp = (word[i * scale] >> 32) & 0xffffffff;
1351                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1352                 temp = word[i*scale + 1] & 0xffffffff;
1353                 qla82xx_wr_32(ha, mem_crb +
1354                     MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
1355                 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1356                 qla82xx_wr_32(ha, mem_crb +
1357                     MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
1358
1359                 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1360                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1361                 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1362                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1363
1364                 for (j = 0; j < MAX_CTL_CHECK; j++) {
1365                         temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1366                         if ((temp & MIU_TA_CTL_BUSY) == 0)
1367                                 break;
1368                 }
1369
1370                 if (j >= MAX_CTL_CHECK) {
1371                         if (printk_ratelimit())
1372                                 dev_err(&ha->pdev->dev,
1373                                     "failed to write through agent\n");
1374                         ret = -1;
1375                         break;
1376                 }
1377         }
1378
1379         return ret;
1380 }
1381
1382 static int
1383 qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
1384 {
1385         int  i;
1386         long size = 0;
1387         long flashaddr = ha->flt_region_bootload << 2;
1388         long memaddr = BOOTLD_START;
1389         u64 data;
1390         u32 high, low;
1391         size = (IMAGE_START - BOOTLD_START) / 8;
1392
1393         for (i = 0; i < size; i++) {
1394                 if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
1395                     (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) {
1396                         return -1;
1397                 }
1398                 data = ((u64)high << 32) | low ;
1399                 qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
1400                 flashaddr += 8;
1401                 memaddr += 8;
1402
1403                 if (i % 0x1000 == 0)
1404                         msleep(1);
1405         }
1406         udelay(100);
1407         read_lock(&ha->hw_lock);
1408         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1409         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1410         read_unlock(&ha->hw_lock);
1411         return 0;
1412 }
1413
1414 int
1415 qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1416                 u64 off, void *data, int size)
1417 {
1418         int i, j = 0, k, start, end, loop, sz[2], off0[2];
1419         int           shift_amount;
1420         uint32_t      temp;
1421         uint64_t      off8, val, mem_crb, word[2] = {0, 0};
1422
1423         /*
1424          * If not MN, go check for MS or invalid.
1425          */
1426
1427         if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1428                 mem_crb = QLA82XX_CRB_QDR_NET;
1429         else {
1430                 mem_crb = QLA82XX_CRB_DDR_NET;
1431                 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1432                         return qla82xx_pci_mem_read_direct(ha,
1433                             off, data, size);
1434         }
1435
1436         off8 = off & 0xfffffff0;
1437         off0[0] = off & 0xf;
1438         sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
1439         shift_amount = 4;
1440         loop = ((off0[0] + size - 1) >> shift_amount) + 1;
1441         off0[1] = 0;
1442         sz[1] = size - sz[0];
1443
1444         for (i = 0; i < loop; i++) {
1445                 temp = off8 + (i << shift_amount);
1446                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
1447                 temp = 0;
1448                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
1449                 temp = MIU_TA_CTL_ENABLE;
1450                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1451                 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
1452                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1453
1454                 for (j = 0; j < MAX_CTL_CHECK; j++) {
1455                         temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1456                         if ((temp & MIU_TA_CTL_BUSY) == 0)
1457                                 break;
1458                 }
1459
1460                 if (j >= MAX_CTL_CHECK) {
1461                         if (printk_ratelimit())
1462                                 dev_err(&ha->pdev->dev,
1463                                     "failed to read through agent\n");
1464                         break;
1465                 }
1466
1467                 start = off0[i] >> 2;
1468                 end   = (off0[i] + sz[i] - 1) >> 2;
1469                 for (k = start; k <= end; k++) {
1470                         temp = qla82xx_rd_32(ha,
1471                                         mem_crb + MIU_TEST_AGT_RDDATA(k));
1472                         word[i] |= ((uint64_t)temp << (32 * (k & 1)));
1473                 }
1474         }
1475
1476         if (j >= MAX_CTL_CHECK)
1477                 return -1;
1478
1479         if ((off0[0] & 7) == 0) {
1480                 val = word[0];
1481         } else {
1482                 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
1483                         ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
1484         }
1485
1486         switch (size) {
1487         case 1:
1488                 *(uint8_t  *)data = val;
1489                 break;
1490         case 2:
1491                 *(uint16_t *)data = val;
1492                 break;
1493         case 4:
1494                 *(uint32_t *)data = val;
1495                 break;
1496         case 8:
1497                 *(uint64_t *)data = val;
1498                 break;
1499         }
1500         return 0;
1501 }
1502
1503
1504 static struct qla82xx_uri_table_desc *
1505 qla82xx_get_table_desc(const u8 *unirom, int section)
1506 {
1507         uint32_t i;
1508         struct qla82xx_uri_table_desc *directory =
1509                 (struct qla82xx_uri_table_desc *)&unirom[0];
1510         __le32 offset;
1511         __le32 tab_type;
1512         __le32 entries = cpu_to_le32(directory->num_entries);
1513
1514         for (i = 0; i < entries; i++) {
1515                 offset = cpu_to_le32(directory->findex) +
1516                     (i * cpu_to_le32(directory->entry_size));
1517                 tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8));
1518
1519                 if (tab_type == section)
1520                         return (struct qla82xx_uri_table_desc *)&unirom[offset];
1521         }
1522
1523         return NULL;
1524 }
1525
1526 static struct qla82xx_uri_data_desc *
1527 qla82xx_get_data_desc(struct qla_hw_data *ha,
1528         u32 section, u32 idx_offset)
1529 {
1530         const u8 *unirom = ha->hablob->fw->data;
1531         int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset));
1532         struct qla82xx_uri_table_desc *tab_desc = NULL;
1533         __le32 offset;
1534
1535         tab_desc = qla82xx_get_table_desc(unirom, section);
1536         if (!tab_desc)
1537                 return NULL;
1538
1539         offset = cpu_to_le32(tab_desc->findex) +
1540             (cpu_to_le32(tab_desc->entry_size) * idx);
1541
1542         return (struct qla82xx_uri_data_desc *)&unirom[offset];
1543 }
1544
1545 static u8 *
1546 qla82xx_get_bootld_offset(struct qla_hw_data *ha)
1547 {
1548         u32 offset = BOOTLD_START;
1549         struct qla82xx_uri_data_desc *uri_desc = NULL;
1550
1551         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1552                 uri_desc = qla82xx_get_data_desc(ha,
1553                     QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF);
1554                 if (uri_desc)
1555                         offset = cpu_to_le32(uri_desc->findex);
1556         }
1557
1558         return (u8 *)&ha->hablob->fw->data[offset];
1559 }
1560
1561 static __le32
1562 qla82xx_get_fw_size(struct qla_hw_data *ha)
1563 {
1564         struct qla82xx_uri_data_desc *uri_desc = NULL;
1565
1566         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1567                 uri_desc =  qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1568                     QLA82XX_URI_FIRMWARE_IDX_OFF);
1569                 if (uri_desc)
1570                         return cpu_to_le32(uri_desc->size);
1571         }
1572
1573         return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
1574 }
1575
1576 static u8 *
1577 qla82xx_get_fw_offs(struct qla_hw_data *ha)
1578 {
1579         u32 offset = IMAGE_START;
1580         struct qla82xx_uri_data_desc *uri_desc = NULL;
1581
1582         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1583                 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1584                         QLA82XX_URI_FIRMWARE_IDX_OFF);
1585                 if (uri_desc)
1586                         offset = cpu_to_le32(uri_desc->findex);
1587         }
1588
1589         return (u8 *)&ha->hablob->fw->data[offset];
1590 }
1591
1592 /* PCI related functions */
1593 char *
1594 qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
1595 {
1596         int pcie_reg;
1597         struct qla_hw_data *ha = vha->hw;
1598         char lwstr[6];
1599         uint16_t lnk;
1600
1601         pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
1602         pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
1603         ha->link_width = (lnk >> 4) & 0x3f;
1604
1605         strcpy(str, "PCIe (");
1606         strcat(str, "2.5Gb/s ");
1607         snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width);
1608         strcat(str, lwstr);
1609         return str;
1610 }
1611
1612 int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
1613 {
1614         unsigned long val = 0;
1615         u32 control;
1616
1617         switch (region) {
1618         case 0:
1619                 val = 0;
1620                 break;
1621         case 1:
1622                 pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
1623                 val = control + QLA82XX_MSIX_TBL_SPACE;
1624                 break;
1625         }
1626         return val;
1627 }
1628
1629
1630 int
1631 qla82xx_iospace_config(struct qla_hw_data *ha)
1632 {
1633         uint32_t len = 0;
1634
1635         if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
1636                 qla_printk(KERN_WARNING, ha,
1637                         "Failed to reserve selected regions (%s)\n",
1638                         pci_name(ha->pdev));
1639                 goto iospace_error_exit;
1640         }
1641
1642         /* Use MMIO operations for all accesses. */
1643         if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
1644                 qla_printk(KERN_ERR, ha,
1645                         "region #0 not an MMIO resource (%s), aborting\n",
1646                         pci_name(ha->pdev));
1647                 goto iospace_error_exit;
1648         }
1649
1650         len = pci_resource_len(ha->pdev, 0);
1651         ha->nx_pcibase =
1652             (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
1653         if (!ha->nx_pcibase) {
1654                 qla_printk(KERN_ERR, ha,
1655                     "cannot remap pcibase MMIO (%s), aborting\n",
1656                     pci_name(ha->pdev));
1657                 pci_release_regions(ha->pdev);
1658                 goto iospace_error_exit;
1659         }
1660
1661         /* Mapping of IO base pointer */
1662         ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
1663             0xbc000 + (ha->pdev->devfn << 11));
1664
1665         if (!ql2xdbwr) {
1666                 ha->nxdb_wr_ptr =
1667                     (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
1668                     (ha->pdev->devfn << 12)), 4);
1669                 if (!ha->nxdb_wr_ptr) {
1670                         qla_printk(KERN_ERR, ha,
1671                             "cannot remap MMIO (%s), aborting\n",
1672                             pci_name(ha->pdev));
1673                         pci_release_regions(ha->pdev);
1674                         goto iospace_error_exit;
1675                 }
1676
1677                 /* Mapping of IO base pointer,
1678                  * door bell read and write pointer
1679                  */
1680                 ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
1681                     (ha->pdev->devfn * 8);
1682         } else {
1683                 ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
1684                         QLA82XX_CAMRAM_DB1 :
1685                         QLA82XX_CAMRAM_DB2);
1686         }
1687
1688         ha->max_req_queues = ha->max_rsp_queues = 1;
1689         ha->msix_count = ha->max_rsp_queues + 1;
1690         return 0;
1691
1692 iospace_error_exit:
1693         return -ENOMEM;
1694 }
1695
1696 /* GS related functions */
1697
1698 /* Initialization related functions */
1699
1700 /**
1701  * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers.
1702  * @ha: HA context
1703  *
1704  * Returns 0 on success.
1705 */
1706 int
1707 qla82xx_pci_config(scsi_qla_host_t *vha)
1708 {
1709         struct qla_hw_data *ha = vha->hw;
1710         int ret;
1711
1712         pci_set_master(ha->pdev);
1713         ret = pci_set_mwi(ha->pdev);
1714         ha->chip_revision = ha->pdev->revision;
1715         return 0;
1716 }
1717
1718 /**
1719  * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers.
1720  * @ha: HA context
1721  *
1722  * Returns 0 on success.
1723  */
1724 void
1725 qla82xx_reset_chip(scsi_qla_host_t *vha)
1726 {
1727         struct qla_hw_data *ha = vha->hw;
1728         ha->isp_ops->disable_intrs(ha);
1729 }
1730
1731 void qla82xx_config_rings(struct scsi_qla_host *vha)
1732 {
1733         struct qla_hw_data *ha = vha->hw;
1734         struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1735         struct init_cb_81xx *icb;
1736         struct req_que *req = ha->req_q_map[0];
1737         struct rsp_que *rsp = ha->rsp_q_map[0];
1738
1739         /* Setup ring parameters in initialization control block. */
1740         icb = (struct init_cb_81xx *)ha->init_cb;
1741         icb->request_q_outpointer = __constant_cpu_to_le16(0);
1742         icb->response_q_inpointer = __constant_cpu_to_le16(0);
1743         icb->request_q_length = cpu_to_le16(req->length);
1744         icb->response_q_length = cpu_to_le16(rsp->length);
1745         icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1746         icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1747         icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1748         icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1749
1750         WRT_REG_DWORD((unsigned long  __iomem *)&reg->req_q_out[0], 0);
1751         WRT_REG_DWORD((unsigned long  __iomem *)&reg->rsp_q_in[0], 0);
1752         WRT_REG_DWORD((unsigned long  __iomem *)&reg->rsp_q_out[0], 0);
1753 }
1754
1755 void qla82xx_reset_adapter(struct scsi_qla_host *vha)
1756 {
1757         struct qla_hw_data *ha = vha->hw;
1758         vha->flags.online = 0;
1759         qla2x00_try_to_stop_firmware(vha);
1760         ha->isp_ops->disable_intrs(ha);
1761 }
1762
1763 static int
1764 qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
1765 {
1766         u64 *ptr64;
1767         u32 i, flashaddr, size;
1768         __le64 data;
1769
1770         size = (IMAGE_START - BOOTLD_START) / 8;
1771
1772         ptr64 = (u64 *)qla82xx_get_bootld_offset(ha);
1773         flashaddr = BOOTLD_START;
1774
1775         for (i = 0; i < size; i++) {
1776                 data = cpu_to_le64(ptr64[i]);
1777                 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1778                         return -EIO;
1779                 flashaddr += 8;
1780         }
1781
1782         flashaddr = FLASH_ADDR_START;
1783         size = (__force u32)qla82xx_get_fw_size(ha) / 8;
1784         ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
1785
1786         for (i = 0; i < size; i++) {
1787                 data = cpu_to_le64(ptr64[i]);
1788
1789                 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1790                         return -EIO;
1791                 flashaddr += 8;
1792         }
1793         udelay(100);
1794
1795         /* Write a magic value to CAMRAM register
1796          * at a specified offset to indicate
1797          * that all data is written and
1798          * ready for firmware to initialize.
1799          */
1800         qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC);
1801
1802         read_lock(&ha->hw_lock);
1803         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1804         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1805         read_unlock(&ha->hw_lock);
1806         return 0;
1807 }
1808
1809 static int
1810 qla82xx_set_product_offset(struct qla_hw_data *ha)
1811 {
1812         struct qla82xx_uri_table_desc *ptab_desc = NULL;
1813         const uint8_t *unirom = ha->hablob->fw->data;
1814         uint32_t i;
1815         __le32 entries;
1816         __le32 flags, file_chiprev, offset;
1817         uint8_t chiprev = ha->chip_revision;
1818         /* Hardcoding mn_present flag for P3P */
1819         int mn_present = 0;
1820         uint32_t flagbit;
1821
1822         ptab_desc = qla82xx_get_table_desc(unirom,
1823                  QLA82XX_URI_DIR_SECT_PRODUCT_TBL);
1824        if (!ptab_desc)
1825                 return -1;
1826
1827         entries = cpu_to_le32(ptab_desc->num_entries);
1828
1829         for (i = 0; i < entries; i++) {
1830                 offset = cpu_to_le32(ptab_desc->findex) +
1831                         (i * cpu_to_le32(ptab_desc->entry_size));
1832                 flags = cpu_to_le32(*((int *)&unirom[offset] +
1833                         QLA82XX_URI_FLAGS_OFF));
1834                 file_chiprev = cpu_to_le32(*((int *)&unirom[offset] +
1835                         QLA82XX_URI_CHIP_REV_OFF));
1836
1837                 flagbit = mn_present ? 1 : 2;
1838
1839                 if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) {
1840                         ha->file_prd_off = offset;
1841                         return 0;
1842                 }
1843         }
1844         return -1;
1845 }
1846
1847 int
1848 qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
1849 {
1850         __le32 val;
1851         uint32_t min_size;
1852         struct qla_hw_data *ha = vha->hw;
1853         const struct firmware *fw = ha->hablob->fw;
1854
1855         ha->fw_type = fw_type;
1856
1857         if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1858                 if (qla82xx_set_product_offset(ha))
1859                         return -EINVAL;
1860
1861                 min_size = QLA82XX_URI_FW_MIN_SIZE;
1862         } else {
1863                 val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
1864                 if ((__force u32)val != QLA82XX_BDINFO_MAGIC)
1865                         return -EINVAL;
1866
1867                 min_size = QLA82XX_FW_MIN_SIZE;
1868         }
1869
1870         if (fw->size < min_size)
1871                 return -EINVAL;
1872         return 0;
1873 }
1874
1875 static int
1876 qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1877 {
1878         u32 val = 0;
1879         int retries = 60;
1880
1881         do {
1882                 read_lock(&ha->hw_lock);
1883                 val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE);
1884                 read_unlock(&ha->hw_lock);
1885
1886                 switch (val) {
1887                 case PHAN_INITIALIZE_COMPLETE:
1888                 case PHAN_INITIALIZE_ACK:
1889                         return QLA_SUCCESS;
1890                 case PHAN_INITIALIZE_FAILED:
1891                         break;
1892                 default:
1893                         break;
1894                 }
1895                 qla_printk(KERN_WARNING, ha,
1896                         "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n",
1897                         val, retries);
1898
1899                 msleep(500);
1900
1901         } while (--retries);
1902
1903         qla_printk(KERN_INFO, ha,
1904             "Cmd Peg initialization failed: 0x%x.\n", val);
1905
1906         val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
1907         read_lock(&ha->hw_lock);
1908         qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
1909         read_unlock(&ha->hw_lock);
1910         return QLA_FUNCTION_FAILED;
1911 }
1912
1913 static int
1914 qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1915 {
1916         u32 val = 0;
1917         int retries = 60;
1918
1919         do {
1920                 read_lock(&ha->hw_lock);
1921                 val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE);
1922                 read_unlock(&ha->hw_lock);
1923
1924                 switch (val) {
1925                 case PHAN_INITIALIZE_COMPLETE:
1926                 case PHAN_INITIALIZE_ACK:
1927                         return QLA_SUCCESS;
1928                 case PHAN_INITIALIZE_FAILED:
1929                         break;
1930                 default:
1931                         break;
1932                 }
1933
1934                 qla_printk(KERN_WARNING, ha,
1935                         "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n",
1936                         val, retries);
1937
1938                 msleep(500);
1939
1940         } while (--retries);
1941
1942         qla_printk(KERN_INFO, ha,
1943                 "Rcv Peg initialization failed: 0x%x.\n", val);
1944         read_lock(&ha->hw_lock);
1945         qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
1946         read_unlock(&ha->hw_lock);
1947         return QLA_FUNCTION_FAILED;
1948 }
1949
1950 /* ISR related functions */
1951 uint32_t qla82xx_isr_int_target_mask_enable[8] = {
1952         ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1,
1953         ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3,
1954         ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5,
1955         ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7
1956 };
1957
1958 uint32_t qla82xx_isr_int_target_status[8] = {
1959         ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
1960         ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
1961         ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
1962         ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7
1963 };
1964
1965 static struct qla82xx_legacy_intr_set legacy_intr[] = \
1966         QLA82XX_LEGACY_INTR_CONFIG;
1967
1968 /*
1969  * qla82xx_mbx_completion() - Process mailbox command completions.
1970  * @ha: SCSI driver HA context
1971  * @mb0: Mailbox0 register
1972  */
1973 static void
1974 qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1975 {
1976         uint16_t        cnt;
1977         uint16_t __iomem *wptr;
1978         struct qla_hw_data *ha = vha->hw;
1979         struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1980         wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
1981
1982         /* Load return mailbox registers. */
1983         ha->flags.mbox_int = 1;
1984         ha->mailbox_out[0] = mb0;
1985
1986         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1987                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1988                 wptr++;
1989         }
1990
1991         if (ha->mcp) {
1992                 DEBUG3_11(printk(KERN_INFO "%s(%ld): "
1993                         "Got mailbox completion. cmd=%x.\n",
1994                         __func__, vha->host_no, ha->mcp->mb[0]));
1995         } else {
1996                 qla_printk(KERN_INFO, ha,
1997                         "%s(%ld): MBX pointer ERROR!\n",
1998                         __func__, vha->host_no);
1999         }
2000 }
2001
2002 /*
2003  * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
2004  * @irq:
2005  * @dev_id: SCSI driver HA context
2006  * @regs:
2007  *
2008  * Called by system whenever the host adapter generates an interrupt.
2009  *
2010  * Returns handled flag.
2011  */
2012 irqreturn_t
2013 qla82xx_intr_handler(int irq, void *dev_id)
2014 {
2015         scsi_qla_host_t *vha;
2016         struct qla_hw_data *ha;
2017         struct rsp_que *rsp;
2018         struct device_reg_82xx __iomem *reg;
2019         int status = 0, status1 = 0;
2020         unsigned long   flags;
2021         unsigned long   iter;
2022         uint32_t        stat;
2023         uint16_t        mb[4];
2024
2025         rsp = (struct rsp_que *) dev_id;
2026         if (!rsp) {
2027                 printk(KERN_INFO
2028                         "%s(): NULL response queue pointer\n", __func__);
2029                 return IRQ_NONE;
2030         }
2031         ha = rsp->hw;
2032
2033         if (!ha->flags.msi_enabled) {
2034                 status = qla82xx_rd_32(ha, ISR_INT_VECTOR);
2035                 if (!(status & ha->nx_legacy_intr.int_vec_bit))
2036                         return IRQ_NONE;
2037
2038                 status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG);
2039                 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1))
2040                         return IRQ_NONE;
2041         }
2042
2043         /* clear the interrupt */
2044         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
2045
2046         /* read twice to ensure write is flushed */
2047         qla82xx_rd_32(ha, ISR_INT_VECTOR);
2048         qla82xx_rd_32(ha, ISR_INT_VECTOR);
2049
2050         reg = &ha->iobase->isp82;
2051
2052         spin_lock_irqsave(&ha->hardware_lock, flags);
2053         vha = pci_get_drvdata(ha->pdev);
2054         for (iter = 1; iter--; ) {
2055
2056                 if (RD_REG_DWORD(&reg->host_int)) {
2057                         stat = RD_REG_DWORD(&reg->host_status);
2058
2059                         switch (stat & 0xff) {
2060                         case 0x1:
2061                         case 0x2:
2062                         case 0x10:
2063                         case 0x11:
2064                                 qla82xx_mbx_completion(vha, MSW(stat));
2065                                 status |= MBX_INTERRUPT;
2066                                 break;
2067                         case 0x12:
2068                                 mb[0] = MSW(stat);
2069                                 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2070                                 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2071                                 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2072                                 qla2x00_async_event(vha, rsp, mb);
2073                                 break;
2074                         case 0x13:
2075                                 qla24xx_process_response_queue(vha, rsp);
2076                                 break;
2077                         default:
2078                                 DEBUG2(printk("scsi(%ld): "
2079                                         " Unrecognized interrupt type (%d).\n",
2080                                         vha->host_no, stat & 0xff));
2081                                 break;
2082                         }
2083                 }
2084                 WRT_REG_DWORD(&reg->host_int, 0);
2085         }
2086         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2087         if (!ha->flags.msi_enabled)
2088                 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2089
2090 #ifdef QL_DEBUG_LEVEL_17
2091         if (!irq && ha->flags.eeh_busy)
2092                 qla_printk(KERN_WARNING, ha,
2093                     "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
2094                     status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2095 #endif
2096
2097         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2098             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2099                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2100                 complete(&ha->mbx_intr_comp);
2101         }
2102         return IRQ_HANDLED;
2103 }
2104
2105 irqreturn_t
2106 qla82xx_msix_default(int irq, void *dev_id)
2107 {
2108         scsi_qla_host_t *vha;
2109         struct qla_hw_data *ha;
2110         struct rsp_que *rsp;
2111         struct device_reg_82xx __iomem *reg;
2112         int status = 0;
2113         unsigned long flags;
2114         uint32_t stat;
2115         uint16_t mb[4];
2116
2117         rsp = (struct rsp_que *) dev_id;
2118         if (!rsp) {
2119                 printk(KERN_INFO
2120                         "%s(): NULL response queue pointer\n", __func__);
2121                 return IRQ_NONE;
2122         }
2123         ha = rsp->hw;
2124
2125         reg = &ha->iobase->isp82;
2126
2127         spin_lock_irqsave(&ha->hardware_lock, flags);
2128         vha = pci_get_drvdata(ha->pdev);
2129         do {
2130                 if (RD_REG_DWORD(&reg->host_int)) {
2131                         stat = RD_REG_DWORD(&reg->host_status);
2132
2133                         switch (stat & 0xff) {
2134                         case 0x1:
2135                         case 0x2:
2136                         case 0x10:
2137                         case 0x11:
2138                                 qla82xx_mbx_completion(vha, MSW(stat));
2139                                 status |= MBX_INTERRUPT;
2140                                 break;
2141                         case 0x12:
2142                                 mb[0] = MSW(stat);
2143                                 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2144                                 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2145                                 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2146                                 qla2x00_async_event(vha, rsp, mb);
2147                                 break;
2148                         case 0x13:
2149                                 qla24xx_process_response_queue(vha, rsp);
2150                                 break;
2151                         default:
2152                                 DEBUG2(printk("scsi(%ld): "
2153                                         " Unrecognized interrupt type (%d).\n",
2154                                         vha->host_no, stat & 0xff));
2155                                 break;
2156                         }
2157                 }
2158                 WRT_REG_DWORD(&reg->host_int, 0);
2159         } while (0);
2160
2161         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2162
2163 #ifdef QL_DEBUG_LEVEL_17
2164         if (!irq && ha->flags.eeh_busy)
2165                 qla_printk(KERN_WARNING, ha,
2166                         "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
2167                         status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2168 #endif
2169
2170         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2171                 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2172                         set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2173                         complete(&ha->mbx_intr_comp);
2174         }
2175         return IRQ_HANDLED;
2176 }
2177
2178 irqreturn_t
2179 qla82xx_msix_rsp_q(int irq, void *dev_id)
2180 {
2181         scsi_qla_host_t *vha;
2182         struct qla_hw_data *ha;
2183         struct rsp_que *rsp;
2184         struct device_reg_82xx __iomem *reg;
2185
2186         rsp = (struct rsp_que *) dev_id;
2187         if (!rsp) {
2188                 printk(KERN_INFO
2189                         "%s(): NULL response queue pointer\n", __func__);
2190                 return IRQ_NONE;
2191         }
2192
2193         ha = rsp->hw;
2194         reg = &ha->iobase->isp82;
2195         spin_lock_irq(&ha->hardware_lock);
2196         vha = pci_get_drvdata(ha->pdev);
2197         qla24xx_process_response_queue(vha, rsp);
2198         WRT_REG_DWORD(&reg->host_int, 0);
2199         spin_unlock_irq(&ha->hardware_lock);
2200         return IRQ_HANDLED;
2201 }
2202
2203 void
2204 qla82xx_poll(int irq, void *dev_id)
2205 {
2206         scsi_qla_host_t *vha;
2207         struct qla_hw_data *ha;
2208         struct rsp_que *rsp;
2209         struct device_reg_82xx __iomem *reg;
2210         int status = 0;
2211         uint32_t stat;
2212         uint16_t mb[4];
2213         unsigned long flags;
2214
2215         rsp = (struct rsp_que *) dev_id;
2216         if (!rsp) {
2217                 printk(KERN_INFO
2218                         "%s(): NULL response queue pointer\n", __func__);
2219                 return;
2220         }
2221         ha = rsp->hw;
2222
2223         reg = &ha->iobase->isp82;
2224         spin_lock_irqsave(&ha->hardware_lock, flags);
2225         vha = pci_get_drvdata(ha->pdev);
2226
2227         if (RD_REG_DWORD(&reg->host_int)) {
2228                 stat = RD_REG_DWORD(&reg->host_status);
2229                 switch (stat & 0xff) {
2230                 case 0x1:
2231                 case 0x2:
2232                 case 0x10:
2233                 case 0x11:
2234                         qla82xx_mbx_completion(vha, MSW(stat));
2235                         status |= MBX_INTERRUPT;
2236                         break;
2237                 case 0x12:
2238                         mb[0] = MSW(stat);
2239                         mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2240                         mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2241                         mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2242                         qla2x00_async_event(vha, rsp, mb);
2243                         break;
2244                 case 0x13:
2245                         qla24xx_process_response_queue(vha, rsp);
2246                         break;
2247                 default:
2248                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2249                                 "(%d).\n",
2250                                 vha->host_no, stat & 0xff));
2251                         break;
2252                 }
2253         }
2254         WRT_REG_DWORD(&reg->host_int, 0);
2255         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2256 }
2257
2258 void
2259 qla82xx_enable_intrs(struct qla_hw_data *ha)
2260 {
2261         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2262         qla82xx_mbx_intr_enable(vha);
2263         spin_lock_irq(&ha->hardware_lock);
2264         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2265         spin_unlock_irq(&ha->hardware_lock);
2266         ha->interrupts_on = 1;
2267 }
2268
2269 void
2270 qla82xx_disable_intrs(struct qla_hw_data *ha)
2271 {
2272         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2273         qla82xx_mbx_intr_disable(vha);
2274         spin_lock_irq(&ha->hardware_lock);
2275         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
2276         spin_unlock_irq(&ha->hardware_lock);
2277         ha->interrupts_on = 0;
2278 }
2279
2280 void qla82xx_init_flags(struct qla_hw_data *ha)
2281 {
2282         struct qla82xx_legacy_intr_set *nx_legacy_intr;
2283
2284         /* ISP 8021 initializations */
2285         rwlock_init(&ha->hw_lock);
2286         ha->qdr_sn_window = -1;
2287         ha->ddr_mn_window = -1;
2288         ha->curr_window = 255;
2289         ha->portnum = PCI_FUNC(ha->pdev->devfn);
2290         nx_legacy_intr = &legacy_intr[ha->portnum];
2291         ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
2292         ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
2293         ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
2294         ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
2295 }
2296
2297 inline void
2298 qla82xx_set_drv_active(scsi_qla_host_t *vha)
2299 {
2300         uint32_t drv_active;
2301         struct qla_hw_data *ha = vha->hw;
2302
2303         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2304
2305         /* If reset value is all FF's, initialize DRV_ACTIVE */
2306         if (drv_active == 0xffffffff) {
2307                 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE,
2308                         QLA82XX_DRV_NOT_ACTIVE);
2309                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2310         }
2311         drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2312         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2313 }
2314
2315 inline void
2316 qla82xx_clear_drv_active(struct qla_hw_data *ha)
2317 {
2318         uint32_t drv_active;
2319
2320         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2321         drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2322         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2323 }
2324
2325 static inline int
2326 qla82xx_need_reset(struct qla_hw_data *ha)
2327 {
2328         uint32_t drv_state;
2329         int rval;
2330
2331         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2332         rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2333         return rval;
2334 }
2335
2336 static inline void
2337 qla82xx_set_rst_ready(struct qla_hw_data *ha)
2338 {
2339         uint32_t drv_state;
2340         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2341
2342         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2343
2344         /* If reset value is all FF's, initialize DRV_STATE */
2345         if (drv_state == 0xffffffff) {
2346                 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY);
2347                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2348         }
2349         drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2350         qla_printk(KERN_INFO, ha,
2351                 "%s(%ld):drv_state = 0x%x\n",
2352                 __func__, vha->host_no, drv_state);
2353         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2354 }
2355
2356 static inline void
2357 qla82xx_clear_rst_ready(struct qla_hw_data *ha)
2358 {
2359         uint32_t drv_state;
2360
2361         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2362         drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2363         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2364 }
2365
2366 static inline void
2367 qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
2368 {
2369         uint32_t qsnt_state;
2370
2371         qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2372         qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2373         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2374 }
2375
2376 void
2377 qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha)
2378 {
2379         struct qla_hw_data *ha = vha->hw;
2380         uint32_t qsnt_state;
2381
2382         qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2383         qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2384         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2385 }
2386
2387 static int
2388 qla82xx_load_fw(scsi_qla_host_t *vha)
2389 {
2390         int rst;
2391         struct fw_blob *blob;
2392         struct qla_hw_data *ha = vha->hw;
2393
2394         if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
2395                 qla_printk(KERN_ERR, ha,
2396                         "%s: Error during CRB Initialization\n", __func__);
2397                 return QLA_FUNCTION_FAILED;
2398         }
2399         udelay(500);
2400
2401         /* Bring QM and CAMRAM out of reset */
2402         rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
2403         rst &= ~((1 << 28) | (1 << 24));
2404         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
2405
2406         /*
2407          * FW Load priority:
2408          * 1) Operational firmware residing in flash.
2409          * 2) Firmware via request-firmware interface (.bin file).
2410          */
2411         if (ql2xfwloadbin == 2)
2412                 goto try_blob_fw;
2413
2414         qla_printk(KERN_INFO, ha,
2415                 "Attempting to load firmware from flash\n");
2416
2417         if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2418                 qla_printk(KERN_ERR, ha,
2419                     "Firmware loaded successfully from flash\n");
2420                 return QLA_SUCCESS;
2421         } else {
2422                 qla_printk(KERN_ERR, ha,
2423                     "Firmware load from flash failed\n");
2424         }
2425
2426 try_blob_fw:
2427         qla_printk(KERN_INFO, ha,
2428             "Attempting to load firmware from blob\n");
2429
2430         /* Load firmware blob. */
2431         blob = ha->hablob = qla2x00_request_firmware(vha);
2432         if (!blob) {
2433                 qla_printk(KERN_ERR, ha,
2434                         "Firmware image not present.\n");
2435                 goto fw_load_failed;
2436         }
2437
2438         /* Validating firmware blob */
2439         if (qla82xx_validate_firmware_blob(vha,
2440                 QLA82XX_FLASH_ROMIMAGE)) {
2441                 /* Fallback to URI format */
2442                 if (qla82xx_validate_firmware_blob(vha,
2443                         QLA82XX_UNIFIED_ROMIMAGE)) {
2444                         qla_printk(KERN_ERR, ha,
2445                                 "No valid firmware image found!!!");
2446                         return QLA_FUNCTION_FAILED;
2447                 }
2448         }
2449
2450         if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
2451                 qla_printk(KERN_ERR, ha,
2452                         "%s: Firmware loaded successfully "
2453                         " from binary blob\n", __func__);
2454                 return QLA_SUCCESS;
2455         } else {
2456                 qla_printk(KERN_ERR, ha,
2457                     "Firmware load failed from binary blob\n");
2458                 blob->fw = NULL;
2459                 blob = NULL;
2460                 goto fw_load_failed;
2461         }
2462         return QLA_SUCCESS;
2463
2464 fw_load_failed:
2465         return QLA_FUNCTION_FAILED;
2466 }
2467
2468 int
2469 qla82xx_start_firmware(scsi_qla_host_t *vha)
2470 {
2471         int           pcie_cap;
2472         uint16_t      lnk;
2473         struct qla_hw_data *ha = vha->hw;
2474
2475         /* scrub dma mask expansion register */
2476         qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE);
2477
2478         /* Put both the PEG CMD and RCV PEG to default state
2479          * of 0 before resetting the hardware
2480          */
2481         qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
2482         qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
2483
2484         /* Overwrite stale initialization register values */
2485         qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
2486         qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
2487
2488         if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
2489                 qla_printk(KERN_INFO, ha,
2490                         "%s: Error trying to start fw!\n", __func__);
2491                 return QLA_FUNCTION_FAILED;
2492         }
2493
2494         /* Handshake with the card before we register the devices. */
2495         if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
2496                 qla_printk(KERN_INFO, ha,
2497                         "%s: Error during card handshake!\n", __func__);
2498                 return QLA_FUNCTION_FAILED;
2499         }
2500
2501         /* Negotiated Link width */
2502         pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
2503         pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
2504         ha->link_width = (lnk >> 4) & 0x3f;
2505
2506         /* Synchronize with Receive peg */
2507         return qla82xx_check_rcvpeg_state(ha);
2508 }
2509
2510 static inline int
2511 qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
2512         uint16_t tot_dsds)
2513 {
2514         uint32_t *cur_dsd = NULL;
2515         scsi_qla_host_t *vha;
2516         struct qla_hw_data *ha;
2517         struct scsi_cmnd *cmd;
2518         struct  scatterlist *cur_seg;
2519         uint32_t *dsd_seg;
2520         void *next_dsd;
2521         uint8_t avail_dsds;
2522         uint8_t first_iocb = 1;
2523         uint32_t dsd_list_len;
2524         struct dsd_dma *dsd_ptr;
2525         struct ct6_dsd *ctx;
2526
2527         cmd = sp->cmd;
2528
2529         /* Update entry type to indicate Command Type 3 IOCB */
2530         *((uint32_t *)(&cmd_pkt->entry_type)) =
2531                 __constant_cpu_to_le32(COMMAND_TYPE_6);
2532
2533         /* No data transfer */
2534         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
2535                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
2536                 return 0;
2537         }
2538
2539         vha = sp->fcport->vha;
2540         ha = vha->hw;
2541
2542         /* Set transfer direction */
2543         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2544                 cmd_pkt->control_flags =
2545                     __constant_cpu_to_le16(CF_WRITE_DATA);
2546                 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
2547         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
2548                 cmd_pkt->control_flags =
2549                     __constant_cpu_to_le16(CF_READ_DATA);
2550                 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
2551         }
2552
2553         cur_seg = scsi_sglist(cmd);
2554         ctx = sp->ctx;
2555
2556         while (tot_dsds) {
2557                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
2558                     QLA_DSDS_PER_IOCB : tot_dsds;
2559                 tot_dsds -= avail_dsds;
2560                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
2561
2562                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
2563                     struct dsd_dma, list);
2564                 next_dsd = dsd_ptr->dsd_addr;
2565                 list_del(&dsd_ptr->list);
2566                 ha->gbl_dsd_avail--;
2567                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
2568                 ctx->dsd_use_cnt++;
2569                 ha->gbl_dsd_inuse++;
2570
2571                 if (first_iocb) {
2572                         first_iocb = 0;
2573                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2574                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2575                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2576                         *dsd_seg++ = cpu_to_le32(dsd_list_len);
2577                 } else {
2578                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2579                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2580                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
2581                 }
2582                 cur_dsd = (uint32_t *)next_dsd;
2583                 while (avail_dsds) {
2584                         dma_addr_t      sle_dma;
2585
2586                         sle_dma = sg_dma_address(cur_seg);
2587                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2588                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2589                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
2590                         cur_seg = sg_next(cur_seg);
2591                         avail_dsds--;
2592                 }
2593         }
2594
2595         /* Null termination */
2596         *cur_dsd++ =  0;
2597         *cur_dsd++ = 0;
2598         *cur_dsd++ = 0;
2599         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
2600         return 0;
2601 }
2602
2603 /*
2604  * qla82xx_calc_dsd_lists() - Determine number of DSD list required
2605  * for Command Type 6.
2606  *
2607  * @dsds: number of data segment decriptors needed
2608  *
2609  * Returns the number of dsd list needed to store @dsds.
2610  */
2611 inline uint16_t
2612 qla82xx_calc_dsd_lists(uint16_t dsds)
2613 {
2614         uint16_t dsd_lists = 0;
2615
2616         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
2617         if (dsds % QLA_DSDS_PER_IOCB)
2618                 dsd_lists++;
2619         return dsd_lists;
2620 }
2621
2622 /*
2623  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2624  * @sp: command to send to the ISP
2625  *
2626  * Returns non-zero if a failure occurred, else zero.
2627  */
2628 int
2629 qla82xx_start_scsi(srb_t *sp)
2630 {
2631         int             ret, nseg;
2632         unsigned long   flags;
2633         struct scsi_cmnd *cmd;
2634         uint32_t        *clr_ptr;
2635         uint32_t        index;
2636         uint32_t        handle;
2637         uint16_t        cnt;
2638         uint16_t        req_cnt;
2639         uint16_t        tot_dsds;
2640         struct device_reg_82xx __iomem *reg;
2641         uint32_t dbval;
2642         uint32_t *fcp_dl;
2643         uint8_t additional_cdb_len;
2644         struct ct6_dsd *ctx;
2645         struct scsi_qla_host *vha = sp->fcport->vha;
2646         struct qla_hw_data *ha = vha->hw;
2647         struct req_que *req = NULL;
2648         struct rsp_que *rsp = NULL;
2649         char            tag[2];
2650
2651         /* Setup device pointers. */
2652         ret = 0;
2653         reg = &ha->iobase->isp82;
2654         cmd = sp->cmd;
2655         req = vha->req;
2656         rsp = ha->rsp_q_map[0];
2657
2658         /* So we know we haven't pci_map'ed anything yet */
2659         tot_dsds = 0;
2660
2661         dbval = 0x04 | (ha->portnum << 5);
2662
2663         /* Send marker if required */
2664         if (vha->marker_needed != 0) {
2665                 if (qla2x00_marker(vha, req,
2666                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2667                         return QLA_FUNCTION_FAILED;
2668                 vha->marker_needed = 0;
2669         }
2670
2671         /* Acquire ring specific lock */
2672         spin_lock_irqsave(&ha->hardware_lock, flags);
2673
2674         /* Check for room in outstanding command list. */
2675         handle = req->current_outstanding_cmd;
2676         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2677                 handle++;
2678                 if (handle == MAX_OUTSTANDING_COMMANDS)
2679                         handle = 1;
2680                 if (!req->outstanding_cmds[handle])
2681                         break;
2682         }
2683         if (index == MAX_OUTSTANDING_COMMANDS)
2684                 goto queuing_error;
2685
2686         /* Map the sg table so we have an accurate count of sg entries needed */
2687         if (scsi_sg_count(cmd)) {
2688                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2689                     scsi_sg_count(cmd), cmd->sc_data_direction);
2690                 if (unlikely(!nseg))
2691                         goto queuing_error;
2692         } else
2693                 nseg = 0;
2694
2695         tot_dsds = nseg;
2696
2697         if (tot_dsds > ql2xshiftctondsd) {
2698                 struct cmd_type_6 *cmd_pkt;
2699                 uint16_t more_dsd_lists = 0;
2700                 struct dsd_dma *dsd_ptr;
2701                 uint16_t i;
2702
2703                 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
2704                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN)
2705                         goto queuing_error;
2706
2707                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2708                         goto sufficient_dsds;
2709                 else
2710                         more_dsd_lists -= ha->gbl_dsd_avail;
2711
2712                 for (i = 0; i < more_dsd_lists; i++) {
2713                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2714                         if (!dsd_ptr)
2715                                 goto queuing_error;
2716
2717                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2718                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2719                         if (!dsd_ptr->dsd_addr) {
2720                                 kfree(dsd_ptr);
2721                                 goto queuing_error;
2722                         }
2723                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2724                         ha->gbl_dsd_avail++;
2725                 }
2726
2727 sufficient_dsds:
2728                 req_cnt = 1;
2729
2730                 if (req->cnt < (req_cnt + 2)) {
2731                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2732                                 &reg->req_q_out[0]);
2733                         if (req->ring_index < cnt)
2734                                 req->cnt = cnt - req->ring_index;
2735                         else
2736                                 req->cnt = req->length -
2737                                         (req->ring_index - cnt);
2738                 }
2739
2740                 if (req->cnt < (req_cnt + 2))
2741                         goto queuing_error;
2742
2743                 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2744                 if (!sp->ctx) {
2745                         DEBUG(printk(KERN_INFO
2746                                 "%s(%ld): failed to allocate"
2747                                 " ctx.\n", __func__, vha->host_no));
2748                         goto queuing_error;
2749                 }
2750                 memset(ctx, 0, sizeof(struct ct6_dsd));
2751                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2752                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2753                 if (!ctx->fcp_cmnd) {
2754                         DEBUG2_3(printk("%s(%ld): failed to allocate"
2755                                 " fcp_cmnd.\n", __func__, vha->host_no));
2756                         goto queuing_error_fcp_cmnd;
2757                 }
2758
2759                 /* Initialize the DSD list and dma handle */
2760                 INIT_LIST_HEAD(&ctx->dsd_list);
2761                 ctx->dsd_use_cnt = 0;
2762
2763                 if (cmd->cmd_len > 16) {
2764                         additional_cdb_len = cmd->cmd_len - 16;
2765                         if ((cmd->cmd_len % 4) != 0) {
2766                                 /* SCSI command bigger than 16 bytes must be
2767                                  * multiple of 4
2768                                  */
2769                                 goto queuing_error_fcp_cmnd;
2770                         }
2771                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2772                 } else {
2773                         additional_cdb_len = 0;
2774                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2775                 }
2776
2777                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2778                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2779
2780                 /* Zero out remaining portion of packet. */
2781                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2782                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2783                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2784                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2785
2786                 /* Set NPORT-ID and LUN number*/
2787                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2788                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2789                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2790                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2791                 cmd_pkt->vp_index = sp->fcport->vp_idx;
2792
2793                 /* Build IOCB segments */
2794                 if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2795                         goto queuing_error_fcp_cmnd;
2796
2797                 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2798                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2799
2800                 /*
2801                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2802                  */
2803                 if (scsi_populate_tag_msg(cmd, tag)) {
2804                         switch (tag[0]) {
2805                         case HEAD_OF_QUEUE_TAG:
2806                                 ctx->fcp_cmnd->task_attribute =
2807                                     TSK_HEAD_OF_QUEUE;
2808                                 break;
2809                         case ORDERED_QUEUE_TAG:
2810                                 ctx->fcp_cmnd->task_attribute =
2811                                     TSK_ORDERED;
2812                                 break;
2813                         }
2814                 }
2815
2816                 /* build FCP_CMND IU */
2817                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2818                 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2819                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2820
2821                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2822                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2823                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2824                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2825
2826                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2827
2828                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2829                     additional_cdb_len);
2830                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2831
2832                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2833                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2834                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2835                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2836                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2837
2838                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2839                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2840                 /* Set total data segment count. */
2841                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2842                 /* Specify response queue number where
2843                  * completion should happen
2844                  */
2845                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2846         } else {
2847                 struct cmd_type_7 *cmd_pkt;
2848                 req_cnt = qla24xx_calc_iocbs(tot_dsds);
2849                 if (req->cnt < (req_cnt + 2)) {
2850                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2851                             &reg->req_q_out[0]);
2852                         if (req->ring_index < cnt)
2853                                 req->cnt = cnt - req->ring_index;
2854                         else
2855                                 req->cnt = req->length -
2856                                         (req->ring_index - cnt);
2857                 }
2858                 if (req->cnt < (req_cnt + 2))
2859                         goto queuing_error;
2860
2861                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2862                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2863
2864                 /* Zero out remaining portion of packet. */
2865                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2866                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2867                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2868                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2869
2870                 /* Set NPORT-ID and LUN number*/
2871                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2872                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2873                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2874                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2875                 cmd_pkt->vp_index = sp->fcport->vp_idx;
2876
2877                 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2878                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2879                         sizeof(cmd_pkt->lun));
2880
2881                 /*
2882                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2883                  */
2884                 if (scsi_populate_tag_msg(cmd, tag)) {
2885                         switch (tag[0]) {
2886                         case HEAD_OF_QUEUE_TAG:
2887                                 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2888                                 break;
2889                         case ORDERED_QUEUE_TAG:
2890                                 cmd_pkt->task = TSK_ORDERED;
2891                                 break;
2892                         }
2893                 }
2894
2895                 /* Load SCSI command packet. */
2896                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2897                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2898
2899                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2900
2901                 /* Build IOCB segments */
2902                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2903
2904                 /* Set total data segment count. */
2905                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2906                 /* Specify response queue number where
2907                  * completion should happen.
2908                  */
2909                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2910
2911         }
2912         /* Build command packet. */
2913         req->current_outstanding_cmd = handle;
2914         req->outstanding_cmds[handle] = sp;
2915         sp->handle = handle;
2916         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2917         req->cnt -= req_cnt;
2918         wmb();
2919
2920         /* Adjust ring index. */
2921         req->ring_index++;
2922         if (req->ring_index == req->length) {
2923                 req->ring_index = 0;
2924                 req->ring_ptr = req->ring;
2925         } else
2926                 req->ring_ptr++;
2927
2928         sp->flags |= SRB_DMA_VALID;
2929
2930         /* Set chip new ring index. */
2931         /* write, read and verify logic */
2932         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2933         if (ql2xdbwr)
2934                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2935         else {
2936                 WRT_REG_DWORD(
2937                         (unsigned long __iomem *)ha->nxdb_wr_ptr,
2938                         dbval);
2939                 wmb();
2940                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2941                         WRT_REG_DWORD(
2942                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2943                                 dbval);
2944                         wmb();
2945                 }
2946         }
2947
2948         /* Manage unprocessed RIO/ZIO commands in response queue. */
2949         if (vha->flags.process_response_queue &&
2950             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2951                 qla24xx_process_response_queue(vha, rsp);
2952
2953         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2954         return QLA_SUCCESS;
2955
2956 queuing_error_fcp_cmnd:
2957         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2958 queuing_error:
2959         if (tot_dsds)
2960                 scsi_dma_unmap(cmd);
2961
2962         if (sp->ctx) {
2963                 mempool_free(sp->ctx, ha->ctx_mempool);
2964                 sp->ctx = NULL;
2965         }
2966         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2967
2968         return QLA_FUNCTION_FAILED;
2969 }
2970
2971 static uint32_t *
2972 qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
2973         uint32_t length)
2974 {
2975         uint32_t i;
2976         uint32_t val;
2977         struct qla_hw_data *ha = vha->hw;
2978
2979         /* Dword reads to flash. */
2980         for (i = 0; i < length/4; i++, faddr += 4) {
2981                 if (qla82xx_rom_fast_read(ha, faddr, &val)) {
2982                         qla_printk(KERN_WARNING, ha,
2983                             "Do ROM fast read failed\n");
2984                         goto done_read;
2985                 }
2986                 dwptr[i] = __constant_cpu_to_le32(val);
2987         }
2988 done_read:
2989         return dwptr;
2990 }
2991
2992 static int
2993 qla82xx_unprotect_flash(struct qla_hw_data *ha)
2994 {
2995         int ret;
2996         uint32_t val;
2997
2998         ret = ql82xx_rom_lock_d(ha);
2999         if (ret < 0) {
3000                 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
3001                 return ret;
3002         }
3003
3004         ret = qla82xx_read_status_reg(ha, &val);
3005         if (ret < 0)
3006                 goto done_unprotect;
3007
3008         val &= ~(BLOCK_PROTECT_BITS << 2);
3009         ret = qla82xx_write_status_reg(ha, val);
3010         if (ret < 0) {
3011                 val |= (BLOCK_PROTECT_BITS << 2);
3012                 qla82xx_write_status_reg(ha, val);
3013         }
3014
3015         if (qla82xx_write_disable_flash(ha) != 0)
3016                 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
3017
3018 done_unprotect:
3019         qla82xx_rom_unlock(ha);
3020         return ret;
3021 }
3022
3023 static int
3024 qla82xx_protect_flash(struct qla_hw_data *ha)
3025 {
3026         int ret;
3027         uint32_t val;
3028
3029         ret = ql82xx_rom_lock_d(ha);
3030         if (ret < 0) {
3031                 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
3032                 return ret;
3033         }
3034
3035         ret = qla82xx_read_status_reg(ha, &val);
3036         if (ret < 0)
3037                 goto done_protect;
3038
3039         val |= (BLOCK_PROTECT_BITS << 2);
3040         /* LOCK all sectors */
3041         ret = qla82xx_write_status_reg(ha, val);
3042         if (ret < 0)
3043                 qla_printk(KERN_WARNING, ha, "Write status register failed\n");
3044
3045         if (qla82xx_write_disable_flash(ha) != 0)
3046                 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
3047 done_protect:
3048         qla82xx_rom_unlock(ha);
3049         return ret;
3050 }
3051
3052 static int
3053 qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
3054 {
3055         int ret = 0;
3056
3057         ret = ql82xx_rom_lock_d(ha);
3058         if (ret < 0) {
3059                 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
3060                 return ret;
3061         }
3062
3063         qla82xx_flash_set_write_enable(ha);
3064         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
3065         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
3066         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
3067
3068         if (qla82xx_wait_rom_done(ha)) {
3069                 qla_printk(KERN_WARNING, ha,
3070                     "Error waiting for rom done\n");
3071                 ret = -1;
3072                 goto done;
3073         }
3074         ret = qla82xx_flash_wait_write_finish(ha);
3075 done:
3076         qla82xx_rom_unlock(ha);
3077         return ret;
3078 }
3079
3080 /*
3081  * Address and length are byte address
3082  */
3083 uint8_t *
3084 qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3085         uint32_t offset, uint32_t length)
3086 {
3087         scsi_block_requests(vha->host);
3088         qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
3089         scsi_unblock_requests(vha->host);
3090         return buf;
3091 }
3092
3093 static int
3094 qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3095         uint32_t faddr, uint32_t dwords)
3096 {
3097         int ret;
3098         uint32_t liter;
3099         uint32_t sec_mask, rest_addr;
3100         dma_addr_t optrom_dma;
3101         void *optrom = NULL;
3102         int page_mode = 0;
3103         struct qla_hw_data *ha = vha->hw;
3104
3105         ret = -1;
3106
3107         /* Prepare burst-capable write on supported ISPs. */
3108         if (page_mode && !(faddr & 0xfff) &&
3109             dwords > OPTROM_BURST_DWORDS) {
3110                 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
3111                     &optrom_dma, GFP_KERNEL);
3112                 if (!optrom) {
3113                         qla_printk(KERN_DEBUG, ha,
3114                                 "Unable to allocate memory for optrom "
3115                                 "burst write (%x KB).\n",
3116                                 OPTROM_BURST_SIZE / 1024);
3117                 }
3118         }
3119
3120         rest_addr = ha->fdt_block_size - 1;
3121         sec_mask = ~rest_addr;
3122
3123         ret = qla82xx_unprotect_flash(ha);
3124         if (ret) {
3125                 qla_printk(KERN_WARNING, ha,
3126                         "Unable to unprotect flash for update.\n");
3127                 goto write_done;
3128         }
3129
3130         for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
3131                 /* Are we at the beginning of a sector? */
3132                 if ((faddr & rest_addr) == 0) {
3133
3134                         ret = qla82xx_erase_sector(ha, faddr);
3135                         if (ret) {
3136                                 DEBUG9(qla_printk(KERN_ERR, ha,
3137                                     "Unable to erase sector: "
3138                                     "address=%x.\n", faddr));
3139                                 break;
3140                         }
3141                 }
3142
3143                 /* Go with burst-write. */
3144                 if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
3145                         /* Copy data to DMA'ble buffer. */
3146                         memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
3147
3148                         ret = qla2x00_load_ram(vha, optrom_dma,
3149                             (ha->flash_data_off | faddr),
3150                             OPTROM_BURST_DWORDS);
3151                         if (ret != QLA_SUCCESS) {
3152                                 qla_printk(KERN_WARNING, ha,
3153                                     "Unable to burst-write optrom segment "
3154                                     "(%x/%x/%llx).\n", ret,
3155                                     (ha->flash_data_off | faddr),
3156                                     (unsigned long long)optrom_dma);
3157                                 qla_printk(KERN_WARNING, ha,
3158                                     "Reverting to slow-write.\n");
3159
3160                                 dma_free_coherent(&ha->pdev->dev,
3161                                     OPTROM_BURST_SIZE, optrom, optrom_dma);
3162                                 optrom = NULL;
3163                         } else {
3164                                 liter += OPTROM_BURST_DWORDS - 1;
3165                                 faddr += OPTROM_BURST_DWORDS - 1;
3166                                 dwptr += OPTROM_BURST_DWORDS - 1;
3167                                 continue;
3168                         }
3169                 }
3170
3171                 ret = qla82xx_write_flash_dword(ha, faddr,
3172                     cpu_to_le32(*dwptr));
3173                 if (ret) {
3174                         DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program"
3175                             "flash address=%x data=%x.\n", __func__,
3176                             ha->host_no, faddr, *dwptr));
3177                         break;
3178                 }
3179         }
3180
3181         ret = qla82xx_protect_flash(ha);
3182         if (ret)
3183                 qla_printk(KERN_WARNING, ha,
3184                     "Unable to protect flash after update.\n");
3185 write_done:
3186         if (optrom)
3187                 dma_free_coherent(&ha->pdev->dev,
3188                     OPTROM_BURST_SIZE, optrom, optrom_dma);
3189         return ret;
3190 }
3191
3192 int
3193 qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3194         uint32_t offset, uint32_t length)
3195 {
3196         int rval;
3197
3198         /* Suspend HBA. */
3199         scsi_block_requests(vha->host);
3200         rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
3201                 length >> 2);
3202         scsi_unblock_requests(vha->host);
3203
3204         /* Convert return ISP82xx to generic */
3205         if (rval)
3206                 rval = QLA_FUNCTION_FAILED;
3207         else
3208                 rval = QLA_SUCCESS;
3209         return rval;
3210 }
3211
3212 void
3213 qla82xx_start_iocbs(srb_t *sp)
3214 {
3215         struct qla_hw_data *ha = sp->fcport->vha->hw;
3216         struct req_que *req = ha->req_q_map[0];
3217         struct device_reg_82xx __iomem *reg;
3218         uint32_t dbval;
3219
3220         /* Adjust ring index. */
3221         req->ring_index++;
3222         if (req->ring_index == req->length) {
3223                 req->ring_index = 0;
3224                 req->ring_ptr = req->ring;
3225         } else
3226                 req->ring_ptr++;
3227
3228         reg = &ha->iobase->isp82;
3229         dbval = 0x04 | (ha->portnum << 5);
3230
3231         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3232         if (ql2xdbwr)
3233                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
3234         else {
3235                 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
3236                 wmb();
3237                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3238                         WRT_REG_DWORD((unsigned long  __iomem *)ha->nxdb_wr_ptr,
3239                                 dbval);
3240                         wmb();
3241                 }
3242         }
3243 }
3244
3245 void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
3246 {
3247         if (qla82xx_rom_lock(ha))
3248                 /* Someone else is holding the lock. */
3249                 qla_printk(KERN_INFO, ha, "Resetting rom_lock\n");
3250
3251         /*
3252          * Either we got the lock, or someone
3253          * else died while holding it.
3254          * In either case, unlock.
3255          */
3256         qla82xx_rom_unlock(ha);
3257 }
3258
3259 /*
3260  * qla82xx_device_bootstrap
3261  *    Initialize device, set DEV_READY, start fw
3262  *
3263  * Note:
3264  *      IDC lock must be held upon entry
3265  *
3266  * Return:
3267  *    Success : 0
3268  *    Failed  : 1
3269  */
3270 static int
3271 qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3272 {
3273         int rval = QLA_SUCCESS;
3274         int i, timeout;
3275         uint32_t old_count, count;
3276         struct qla_hw_data *ha = vha->hw;
3277         int need_reset = 0, peg_stuck = 1;
3278
3279         need_reset = qla82xx_need_reset(ha);
3280
3281         old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3282
3283         for (i = 0; i < 10; i++) {
3284                 timeout = msleep_interruptible(200);
3285                 if (timeout) {
3286                         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3287                                 QLA82XX_DEV_FAILED);
3288                         return QLA_FUNCTION_FAILED;
3289                 }
3290
3291                 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3292                 if (count != old_count)
3293                         peg_stuck = 0;
3294         }
3295
3296         if (need_reset) {
3297                 /* We are trying to perform a recovery here. */
3298                 if (peg_stuck)
3299                         qla82xx_rom_lock_recovery(ha);
3300                 goto dev_initialize;
3301         } else  {
3302                 /* Start of day for this ha context. */
3303                 if (peg_stuck) {
3304                         /* Either we are the first or recovery in progress. */
3305                         qla82xx_rom_lock_recovery(ha);
3306                         goto dev_initialize;
3307                 } else
3308                         /* Firmware already running. */
3309                         goto dev_ready;
3310         }
3311
3312         return rval;
3313
3314 dev_initialize:
3315         /* set to DEV_INITIALIZING */
3316         qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
3317         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
3318
3319         /* Driver that sets device state to initializating sets IDC version */
3320         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
3321
3322         qla82xx_idc_unlock(ha);
3323         rval = qla82xx_start_firmware(vha);
3324         qla82xx_idc_lock(ha);
3325
3326         if (rval != QLA_SUCCESS) {
3327                 qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
3328                 qla82xx_clear_drv_active(ha);
3329                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
3330                 return rval;
3331         }
3332
3333 dev_ready:
3334         qla_printk(KERN_INFO, ha, "HW State: READY\n");
3335         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
3336
3337         return QLA_SUCCESS;
3338 }
3339
3340 /*
3341 * qla82xx_need_qsnt_handler
3342 *    Code to start quiescence sequence
3343 *
3344 * Note:
3345 *      IDC lock must be held upon entry
3346 *
3347 * Return: void
3348 */
3349
3350 static void
3351 qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
3352 {
3353         struct qla_hw_data *ha = vha->hw;
3354         uint32_t dev_state, drv_state, drv_active;
3355         unsigned long reset_timeout;
3356
3357         if (vha->flags.online) {
3358                 /*Block any further I/O and wait for pending cmnds to complete*/
3359                 qla82xx_quiescent_state_cleanup(vha);
3360         }
3361
3362         /* Set the quiescence ready bit */
3363         qla82xx_set_qsnt_ready(ha);
3364
3365         /*wait for 30 secs for other functions to ack */
3366         reset_timeout = jiffies + (30 * HZ);
3367
3368         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3369         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3370         /* Its 2 that is written when qsnt is acked, moving one bit */
3371         drv_active = drv_active << 0x01;
3372
3373         while (drv_state != drv_active) {
3374
3375                 if (time_after_eq(jiffies, reset_timeout)) {
3376                         /* quiescence timeout, other functions didn't ack
3377                          * changing the state to DEV_READY
3378                          */
3379                         qla_printk(KERN_INFO, ha,
3380                             "%s: QUIESCENT TIMEOUT\n", QLA2XXX_DRIVER_NAME);
3381                         qla_printk(KERN_INFO, ha,
3382                             "DRV_ACTIVE:%d DRV_STATE:%d\n", drv_active,
3383                             drv_state);
3384                         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3385                                                 QLA82XX_DEV_READY);
3386                         qla_printk(KERN_INFO, ha,
3387                             "HW State: DEV_READY\n");
3388                         qla82xx_idc_unlock(ha);
3389                         qla2x00_perform_loop_resync(vha);
3390                         qla82xx_idc_lock(ha);
3391
3392                         qla82xx_clear_qsnt_ready(vha);
3393                         return;
3394                 }
3395
3396                 qla82xx_idc_unlock(ha);
3397                 msleep(1000);
3398                 qla82xx_idc_lock(ha);
3399
3400                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3401                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3402                 drv_active = drv_active << 0x01;
3403         }
3404         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3405         /* everyone acked so set the state to DEV_QUIESCENCE */
3406         if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
3407                 qla_printk(KERN_INFO, ha, "HW State: DEV_QUIESCENT\n");
3408                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
3409         }
3410 }
3411
3412 /*
3413 * qla82xx_wait_for_state_change
3414 *    Wait for device state to change from given current state
3415 *
3416 * Note:
3417 *     IDC lock must not be held upon entry
3418 *
3419 * Return:
3420 *    Changed device state.
3421 */
3422 uint32_t
3423 qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
3424 {
3425         struct qla_hw_data *ha = vha->hw;
3426         uint32_t dev_state;
3427
3428         do {
3429                 msleep(1000);
3430                 qla82xx_idc_lock(ha);
3431                 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3432                 qla82xx_idc_unlock(ha);
3433         } while (dev_state == curr_state);
3434
3435         return dev_state;
3436 }
3437
3438 static void
3439 qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3440 {
3441         struct qla_hw_data *ha = vha->hw;
3442
3443         /* Disable the board */
3444         qla_printk(KERN_INFO, ha, "Disabling the board\n");
3445
3446         qla82xx_idc_lock(ha);
3447         qla82xx_clear_drv_active(ha);
3448         qla82xx_idc_unlock(ha);
3449
3450         /* Set DEV_FAILED flag to disable timer */
3451         vha->device_flags |= DFLG_DEV_FAILED;
3452         qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3453         qla2x00_mark_all_devices_lost(vha, 0);
3454         vha->flags.online = 0;
3455         vha->flags.init_done = 0;
3456 }
3457
3458 /*
3459  * qla82xx_need_reset_handler
3460  *    Code to start reset sequence
3461  *
3462  * Note:
3463  *      IDC lock must be held upon entry
3464  *
3465  * Return:
3466  *    Success : 0
3467  *    Failed  : 1
3468  */
3469 static void
3470 qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3471 {
3472         uint32_t dev_state, drv_state, drv_active;
3473         unsigned long reset_timeout;
3474         struct qla_hw_data *ha = vha->hw;
3475         struct req_que *req = ha->req_q_map[0];
3476
3477         if (vha->flags.online) {
3478                 qla82xx_idc_unlock(ha);
3479                 qla2x00_abort_isp_cleanup(vha);
3480                 ha->isp_ops->get_flash_version(vha, req->ring);
3481                 ha->isp_ops->nvram_config(vha);
3482                 qla82xx_idc_lock(ha);
3483         }
3484
3485         qla82xx_set_rst_ready(ha);
3486
3487         /* wait for 10 seconds for reset ack from all functions */
3488         reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
3489
3490         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3491         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3492
3493         while (drv_state != drv_active) {
3494                 if (time_after_eq(jiffies, reset_timeout)) {
3495                         qla_printk(KERN_INFO, ha,
3496                                 "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME);
3497                         break;
3498                 }
3499                 qla82xx_idc_unlock(ha);
3500                 msleep(1000);
3501                 qla82xx_idc_lock(ha);
3502                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3503                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3504         }
3505
3506         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3507         qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
3508                 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3509
3510         /* Force to DEV_COLD unless someone else is starting a reset */
3511         if (dev_state != QLA82XX_DEV_INITIALIZING) {
3512                 qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
3513                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3514         }
3515 }
3516
3517 int
3518 qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3519 {
3520         uint32_t fw_heartbeat_counter;
3521         int status = 0;
3522
3523         fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
3524                 QLA82XX_PEG_ALIVE_COUNTER);
3525         /* all 0xff, assume AER/EEH in progress, ignore */
3526         if (fw_heartbeat_counter == 0xffffffff)
3527                 return status;
3528         if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
3529                 vha->seconds_since_last_heartbeat++;
3530                 /* FW not alive after 2 seconds */
3531                 if (vha->seconds_since_last_heartbeat == 2) {
3532                         vha->seconds_since_last_heartbeat = 0;
3533                         status = 1;
3534                 }
3535         } else
3536                 vha->seconds_since_last_heartbeat = 0;
3537         vha->fw_heartbeat_counter = fw_heartbeat_counter;
3538         return status;
3539 }
3540
3541 /*
3542  * qla82xx_device_state_handler
3543  *      Main state handler
3544  *
3545  * Note:
3546  *      IDC lock must be held upon entry
3547  *
3548  * Return:
3549  *    Success : 0
3550  *    Failed  : 1
3551  */
3552 int
3553 qla82xx_device_state_handler(scsi_qla_host_t *vha)
3554 {
3555         uint32_t dev_state;
3556         uint32_t old_dev_state;
3557         int rval = QLA_SUCCESS;
3558         unsigned long dev_init_timeout;
3559         struct qla_hw_data *ha = vha->hw;
3560         int loopcount = 0;
3561
3562         qla82xx_idc_lock(ha);
3563         if (!vha->flags.init_done)
3564                 qla82xx_set_drv_active(vha);
3565
3566         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3567         old_dev_state = dev_state;
3568         qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
3569                 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3570
3571         /* wait for 30 seconds for device to go ready */
3572         dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
3573
3574         while (1) {
3575
3576                 if (time_after_eq(jiffies, dev_init_timeout)) {
3577                         DEBUG(qla_printk(KERN_INFO, ha,
3578                                 "%s: device init failed!\n",
3579                                 QLA2XXX_DRIVER_NAME));
3580                         rval = QLA_FUNCTION_FAILED;
3581                         break;
3582                 }
3583                 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3584                 if (old_dev_state != dev_state) {
3585                         loopcount = 0;
3586                         old_dev_state = dev_state;
3587                 }
3588                 if (loopcount < 5) {
3589                         qla_printk(KERN_INFO, ha,
3590                             "2:Device state is 0x%x = %s\n", dev_state,
3591                             dev_state < MAX_STATES ?
3592                             qdev_state[dev_state] : "Unknown");
3593                 }
3594
3595                 switch (dev_state) {
3596                 case QLA82XX_DEV_READY:
3597                         goto exit;
3598                 case QLA82XX_DEV_COLD:
3599                         rval = qla82xx_device_bootstrap(vha);
3600                         goto exit;
3601                 case QLA82XX_DEV_INITIALIZING:
3602                         qla82xx_idc_unlock(ha);
3603                         msleep(1000);
3604                         qla82xx_idc_lock(ha);
3605                         break;
3606                 case QLA82XX_DEV_NEED_RESET:
3607                     if (!ql2xdontresethba)
3608                         qla82xx_need_reset_handler(vha);
3609                         dev_init_timeout = jiffies +
3610                                 (ha->nx_dev_init_timeout * HZ);
3611                         break;
3612                 case QLA82XX_DEV_NEED_QUIESCENT:
3613                         qla82xx_need_qsnt_handler(vha);
3614                         /* Reset timeout value after quiescence handler */
3615                         dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3616                                                          * HZ);
3617                         break;
3618                 case QLA82XX_DEV_QUIESCENT:
3619                         /* Owner will exit and other will wait for the state
3620                          * to get changed
3621                          */
3622                         if (ha->flags.quiesce_owner)
3623                                 goto exit;
3624
3625                         qla82xx_idc_unlock(ha);
3626                         msleep(1000);
3627                         qla82xx_idc_lock(ha);
3628
3629                         /* Reset timeout value after quiescence handler */
3630                         dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3631                                                          * HZ);
3632                         break;
3633                 case QLA82XX_DEV_FAILED:
3634                         qla82xx_dev_failed_handler(vha);
3635                         rval = QLA_FUNCTION_FAILED;
3636                         goto exit;
3637                 default:
3638                         qla82xx_idc_unlock(ha);
3639                         msleep(1000);
3640                         qla82xx_idc_lock(ha);
3641                 }
3642                 loopcount++;
3643         }
3644 exit:
3645         qla82xx_idc_unlock(ha);
3646         return rval;
3647 }
3648
3649 void qla82xx_watchdog(scsi_qla_host_t *vha)
3650 {
3651         uint32_t dev_state, halt_status;
3652         struct qla_hw_data *ha = vha->hw;
3653
3654         /* don't poll if reset is going on */
3655         if (!ha->flags.isp82xx_reset_hdlr_active) {
3656                 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3657                 if (dev_state == QLA82XX_DEV_NEED_RESET &&
3658                     !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
3659                         qla_printk(KERN_WARNING, ha,
3660                             "scsi(%ld) %s: Adapter reset needed!\n",
3661                                 vha->host_no, __func__);
3662                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3663                         qla2xxx_wake_dpc(vha);
3664                 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
3665                         !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3666                         DEBUG(qla_printk(KERN_INFO, ha,
3667                                 "scsi(%ld) %s - detected quiescence needed\n",
3668                                 vha->host_no, __func__));
3669                         set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3670                         qla2xxx_wake_dpc(vha);
3671                 } else {
3672                         if (qla82xx_check_fw_alive(vha)) {
3673                                 halt_status = qla82xx_rd_32(ha,
3674                                     QLA82XX_PEG_HALT_STATUS1);
3675                                 qla_printk(KERN_INFO, ha,
3676                                     "scsi(%ld): %s, Dumping hw/fw registers:\n "
3677                                     " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n "
3678                                     " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n "
3679                                     " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n "
3680                                     " PEG_NET_4_PC: 0x%x\n",
3681                                     vha->host_no, __func__, halt_status,
3682                                     qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2),
3683                                     qla82xx_rd_32(ha,
3684                                             QLA82XX_CRB_PEG_NET_0 + 0x3c),
3685                                     qla82xx_rd_32(ha,
3686                                             QLA82XX_CRB_PEG_NET_1 + 0x3c),
3687                                     qla82xx_rd_32(ha,
3688                                             QLA82XX_CRB_PEG_NET_2 + 0x3c),
3689                                     qla82xx_rd_32(ha,
3690                                             QLA82XX_CRB_PEG_NET_3 + 0x3c),
3691                                     qla82xx_rd_32(ha,
3692                                             QLA82XX_CRB_PEG_NET_4 + 0x3c));
3693                                 if (halt_status & HALT_STATUS_UNRECOVERABLE) {
3694                                         set_bit(ISP_UNRECOVERABLE,
3695                                             &vha->dpc_flags);
3696                                 } else {
3697                                         qla_printk(KERN_INFO, ha,
3698                                             "scsi(%ld): %s - detect abort needed\n",
3699                                             vha->host_no, __func__);
3700                                         set_bit(ISP_ABORT_NEEDED,
3701                                             &vha->dpc_flags);
3702                                 }
3703                                 qla2xxx_wake_dpc(vha);
3704                                 ha->flags.isp82xx_fw_hung = 1;
3705                                 if (ha->flags.mbox_busy) {
3706                                         ha->flags.mbox_int = 1;
3707                                         DEBUG2(qla_printk(KERN_ERR, ha,
3708                                             "scsi(%ld) Due to fw hung, doing "
3709                                             "premature completion of mbx "
3710                                             "command\n", vha->host_no));
3711                                         if (test_bit(MBX_INTR_WAIT,
3712                                             &ha->mbx_cmd_flags))
3713                                                 complete(&ha->mbx_intr_comp);
3714                                 }
3715                         }
3716                 }
3717         }
3718 }
3719
3720 int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3721 {
3722         int rval;
3723         rval = qla82xx_device_state_handler(vha);
3724         return rval;
3725 }
3726
3727 /*
3728  *  qla82xx_abort_isp
3729  *      Resets ISP and aborts all outstanding commands.
3730  *
3731  * Input:
3732  *      ha           = adapter block pointer.
3733  *
3734  * Returns:
3735  *      0 = success
3736  */
3737 int
3738 qla82xx_abort_isp(scsi_qla_host_t *vha)
3739 {
3740         int rval;
3741         struct qla_hw_data *ha = vha->hw;
3742         uint32_t dev_state;
3743
3744         if (vha->device_flags & DFLG_DEV_FAILED) {
3745                 qla_printk(KERN_WARNING, ha,
3746                         "%s(%ld): Device in failed state, "
3747                         "Exiting.\n", __func__, vha->host_no);
3748                 return QLA_SUCCESS;
3749         }
3750         ha->flags.isp82xx_reset_hdlr_active = 1;
3751
3752         qla82xx_idc_lock(ha);
3753         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3754         if (dev_state == QLA82XX_DEV_READY) {
3755                 qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
3756                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3757                         QLA82XX_DEV_NEED_RESET);
3758         } else
3759                 qla_printk(KERN_INFO, ha, "HW State: %s\n",
3760                         dev_state < MAX_STATES ?
3761                         qdev_state[dev_state] : "Unknown");
3762         qla82xx_idc_unlock(ha);
3763
3764         rval = qla82xx_device_state_handler(vha);
3765
3766         qla82xx_idc_lock(ha);
3767         qla82xx_clear_rst_ready(ha);
3768         qla82xx_idc_unlock(ha);
3769
3770         if (rval == QLA_SUCCESS) {
3771                 ha->flags.isp82xx_fw_hung = 0;
3772                 ha->flags.isp82xx_reset_hdlr_active = 0;
3773                 qla82xx_restart_isp(vha);
3774         }
3775
3776         if (rval) {
3777                 vha->flags.online = 1;
3778                 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3779                         if (ha->isp_abort_cnt == 0) {
3780                                 qla_printk(KERN_WARNING, ha,
3781                                     "ISP error recovery failed - "
3782                                     "board disabled\n");
3783                                 /*
3784                                  * The next call disables the board
3785                                  * completely.
3786                                  */
3787                                 ha->isp_ops->reset_adapter(vha);
3788                                 vha->flags.online = 0;
3789                                 clear_bit(ISP_ABORT_RETRY,
3790                                     &vha->dpc_flags);
3791                                 rval = QLA_SUCCESS;
3792                         } else { /* schedule another ISP abort */
3793                                 ha->isp_abort_cnt--;
3794                                 DEBUG(qla_printk(KERN_INFO, ha,
3795                                     "qla%ld: ISP abort - retry remaining %d\n",
3796                                     vha->host_no, ha->isp_abort_cnt));
3797                                 rval = QLA_FUNCTION_FAILED;
3798                         }
3799                 } else {
3800                         ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3801                         DEBUG(qla_printk(KERN_INFO, ha,
3802                             "(%ld): ISP error recovery - retrying (%d) "
3803                             "more times\n", vha->host_no, ha->isp_abort_cnt));
3804                         set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3805                         rval = QLA_FUNCTION_FAILED;
3806                 }
3807         }
3808         return rval;
3809 }
3810
3811 /*
3812  *  qla82xx_fcoe_ctx_reset
3813  *      Perform a quick reset and aborts all outstanding commands.
3814  *      This will only perform an FCoE context reset and avoids a full blown
3815  *      chip reset.
3816  *
3817  * Input:
3818  *      ha = adapter block pointer.
3819  *      is_reset_path = flag for identifying the reset path.
3820  *
3821  * Returns:
3822  *      0 = success
3823  */
3824 int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha)
3825 {
3826         int rval = QLA_FUNCTION_FAILED;
3827
3828         if (vha->flags.online) {
3829                 /* Abort all outstanding commands, so as to be requeued later */
3830                 qla2x00_abort_isp_cleanup(vha);
3831         }
3832
3833         /* Stop currently executing firmware.
3834          * This will destroy existing FCoE context at the F/W end.
3835          */
3836         qla2x00_try_to_stop_firmware(vha);
3837
3838         /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */
3839         rval = qla82xx_restart_isp(vha);
3840
3841         return rval;
3842 }
3843
3844 /*
3845  * qla2x00_wait_for_fcoe_ctx_reset
3846  *    Wait till the FCoE context is reset.
3847  *
3848  * Note:
3849  *    Does context switching here.
3850  *    Release SPIN_LOCK (if any) before calling this routine.
3851  *
3852  * Return:
3853  *    Success (fcoe_ctx reset is done) : 0
3854  *    Failed  (fcoe_ctx reset not completed within max loop timout ) : 1
3855  */
3856 int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
3857 {
3858         int status = QLA_FUNCTION_FAILED;
3859         unsigned long wait_reset;
3860
3861         wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
3862         while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3863             test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
3864             && time_before(jiffies, wait_reset)) {
3865
3866                 set_current_state(TASK_UNINTERRUPTIBLE);
3867                 schedule_timeout(HZ);
3868
3869                 if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) &&
3870                     !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
3871                         status = QLA_SUCCESS;
3872                         break;
3873                 }
3874         }
3875         DEBUG2(printk(KERN_INFO
3876             "%s status=%d\n", __func__, status));
3877
3878         return status;
3879 }
3880
3881 void
3882 qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3883 {
3884         int i;
3885         unsigned long flags;
3886         struct qla_hw_data *ha = vha->hw;
3887
3888         /* Check if 82XX firmware is alive or not
3889          * We may have arrived here from NEED_RESET
3890          * detection only
3891          */
3892         if (!ha->flags.isp82xx_fw_hung) {
3893                 for (i = 0; i < 2; i++) {
3894                         msleep(1000);
3895                         if (qla82xx_check_fw_alive(vha)) {
3896                                 ha->flags.isp82xx_fw_hung = 1;
3897                                 if (ha->flags.mbox_busy) {
3898                                         ha->flags.mbox_int = 1;
3899                                         complete(&ha->mbx_intr_comp);
3900                                 }
3901                                 break;
3902                         }
3903                 }
3904         }
3905
3906         /* Abort all commands gracefully if fw NOT hung */
3907         if (!ha->flags.isp82xx_fw_hung) {
3908                 int cnt, que;
3909                 srb_t *sp;
3910                 struct req_que *req;
3911
3912                 spin_lock_irqsave(&ha->hardware_lock, flags);
3913                 for (que = 0; que < ha->max_req_queues; que++) {
3914                         req = ha->req_q_map[que];
3915                         if (!req)
3916                                 continue;
3917                         for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3918                                 sp = req->outstanding_cmds[cnt];
3919                                 if (sp) {
3920                                         if (!sp->ctx ||
3921                                             (sp->flags & SRB_FCP_CMND_DMA_VALID)) {
3922                                                 spin_unlock_irqrestore(
3923                                                     &ha->hardware_lock, flags);
3924                                                 if (ha->isp_ops->abort_command(sp)) {
3925                                                         qla_printk(KERN_INFO, ha,
3926                                                             "scsi(%ld): mbx abort command failed in %s\n",
3927                                                             vha->host_no, __func__);
3928                                                 } else {
3929                                                         qla_printk(KERN_INFO, ha,
3930                                                             "scsi(%ld): mbx abort command success in %s\n",
3931                                                             vha->host_no, __func__);
3932                                                 }
3933                                                 spin_lock_irqsave(&ha->hardware_lock, flags);
3934                                         }
3935                                 }
3936                         }
3937                 }
3938                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3939
3940                 /* Wait for pending cmds (physical and virtual) to complete */
3941                 if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
3942                     WAIT_HOST) == QLA_SUCCESS) {
3943                         DEBUG2(qla_printk(KERN_INFO, ha,
3944                             "Done wait for pending commands\n"));
3945                 }
3946         }
3947 }