Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[pandora-kernel.git] / drivers / scsi / qla2xxx / qla_nx.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2010 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include <linux/delay.h>
9 #include <linux/pci.h>
10
11 #define MASK(n)                 ((1ULL<<(n))-1)
12 #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
13         ((addr >> 25) & 0x3ff))
14 #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \
15         ((addr >> 25) & 0x3ff))
16 #define MS_WIN(addr) (addr & 0x0ffc0000)
17 #define QLA82XX_PCI_MN_2M   (0)
18 #define QLA82XX_PCI_MS_2M   (0x80000)
19 #define QLA82XX_PCI_OCM0_2M (0xc0000)
20 #define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
21 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
22 #define BLOCK_PROTECT_BITS 0x0F
23
24 /* CRB window related */
25 #define CRB_BLK(off)    ((off >> 20) & 0x3f)
26 #define CRB_SUBBLK(off) ((off >> 16) & 0xf)
27 #define CRB_WINDOW_2M   (0x130060)
28 #define QLA82XX_PCI_CAMQM_2M_END        (0x04800800UL)
29 #define CRB_HI(off)     ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
30                         ((off) & 0xf0000))
31 #define QLA82XX_PCI_CAMQM_2M_BASE       (0x000ff800UL)
32 #define CRB_INDIRECT_2M (0x1e0000UL)
33
34 #define MAX_CRB_XFORM 60
35 static unsigned long crb_addr_xform[MAX_CRB_XFORM];
36 int qla82xx_crb_table_initialized;
37
38 #define qla82xx_crb_addr_transform(name) \
39         (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
40         QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
41
42 static void qla82xx_crb_addr_transform_setup(void)
43 {
44         qla82xx_crb_addr_transform(XDMA);
45         qla82xx_crb_addr_transform(TIMR);
46         qla82xx_crb_addr_transform(SRE);
47         qla82xx_crb_addr_transform(SQN3);
48         qla82xx_crb_addr_transform(SQN2);
49         qla82xx_crb_addr_transform(SQN1);
50         qla82xx_crb_addr_transform(SQN0);
51         qla82xx_crb_addr_transform(SQS3);
52         qla82xx_crb_addr_transform(SQS2);
53         qla82xx_crb_addr_transform(SQS1);
54         qla82xx_crb_addr_transform(SQS0);
55         qla82xx_crb_addr_transform(RPMX7);
56         qla82xx_crb_addr_transform(RPMX6);
57         qla82xx_crb_addr_transform(RPMX5);
58         qla82xx_crb_addr_transform(RPMX4);
59         qla82xx_crb_addr_transform(RPMX3);
60         qla82xx_crb_addr_transform(RPMX2);
61         qla82xx_crb_addr_transform(RPMX1);
62         qla82xx_crb_addr_transform(RPMX0);
63         qla82xx_crb_addr_transform(ROMUSB);
64         qla82xx_crb_addr_transform(SN);
65         qla82xx_crb_addr_transform(QMN);
66         qla82xx_crb_addr_transform(QMS);
67         qla82xx_crb_addr_transform(PGNI);
68         qla82xx_crb_addr_transform(PGND);
69         qla82xx_crb_addr_transform(PGN3);
70         qla82xx_crb_addr_transform(PGN2);
71         qla82xx_crb_addr_transform(PGN1);
72         qla82xx_crb_addr_transform(PGN0);
73         qla82xx_crb_addr_transform(PGSI);
74         qla82xx_crb_addr_transform(PGSD);
75         qla82xx_crb_addr_transform(PGS3);
76         qla82xx_crb_addr_transform(PGS2);
77         qla82xx_crb_addr_transform(PGS1);
78         qla82xx_crb_addr_transform(PGS0);
79         qla82xx_crb_addr_transform(PS);
80         qla82xx_crb_addr_transform(PH);
81         qla82xx_crb_addr_transform(NIU);
82         qla82xx_crb_addr_transform(I2Q);
83         qla82xx_crb_addr_transform(EG);
84         qla82xx_crb_addr_transform(MN);
85         qla82xx_crb_addr_transform(MS);
86         qla82xx_crb_addr_transform(CAS2);
87         qla82xx_crb_addr_transform(CAS1);
88         qla82xx_crb_addr_transform(CAS0);
89         qla82xx_crb_addr_transform(CAM);
90         qla82xx_crb_addr_transform(C2C1);
91         qla82xx_crb_addr_transform(C2C0);
92         qla82xx_crb_addr_transform(SMB);
93         qla82xx_crb_addr_transform(OCM0);
94         /*
95          * Used only in P3 just define it for P2 also.
96          */
97         qla82xx_crb_addr_transform(I2C0);
98
99         qla82xx_crb_table_initialized = 1;
100 }
101
102 struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
103         {{{0, 0,         0,         0} } },
104         {{{1, 0x0100000, 0x0102000, 0x120000},
105         {1, 0x0110000, 0x0120000, 0x130000},
106         {1, 0x0120000, 0x0122000, 0x124000},
107         {1, 0x0130000, 0x0132000, 0x126000},
108         {1, 0x0140000, 0x0142000, 0x128000},
109         {1, 0x0150000, 0x0152000, 0x12a000},
110         {1, 0x0160000, 0x0170000, 0x110000},
111         {1, 0x0170000, 0x0172000, 0x12e000},
112         {0, 0x0000000, 0x0000000, 0x000000},
113         {0, 0x0000000, 0x0000000, 0x000000},
114         {0, 0x0000000, 0x0000000, 0x000000},
115         {0, 0x0000000, 0x0000000, 0x000000},
116         {0, 0x0000000, 0x0000000, 0x000000},
117         {0, 0x0000000, 0x0000000, 0x000000},
118         {1, 0x01e0000, 0x01e0800, 0x122000},
119         {0, 0x0000000, 0x0000000, 0x000000} } } ,
120         {{{1, 0x0200000, 0x0210000, 0x180000} } },
121         {{{0, 0,         0,         0} } },
122         {{{1, 0x0400000, 0x0401000, 0x169000} } },
123         {{{1, 0x0500000, 0x0510000, 0x140000} } },
124         {{{1, 0x0600000, 0x0610000, 0x1c0000} } },
125         {{{1, 0x0700000, 0x0704000, 0x1b8000} } },
126         {{{1, 0x0800000, 0x0802000, 0x170000},
127         {0, 0x0000000, 0x0000000, 0x000000},
128         {0, 0x0000000, 0x0000000, 0x000000},
129         {0, 0x0000000, 0x0000000, 0x000000},
130         {0, 0x0000000, 0x0000000, 0x000000},
131         {0, 0x0000000, 0x0000000, 0x000000},
132         {0, 0x0000000, 0x0000000, 0x000000},
133         {0, 0x0000000, 0x0000000, 0x000000},
134         {0, 0x0000000, 0x0000000, 0x000000},
135         {0, 0x0000000, 0x0000000, 0x000000},
136         {0, 0x0000000, 0x0000000, 0x000000},
137         {0, 0x0000000, 0x0000000, 0x000000},
138         {0, 0x0000000, 0x0000000, 0x000000},
139         {0, 0x0000000, 0x0000000, 0x000000},
140         {0, 0x0000000, 0x0000000, 0x000000},
141         {1, 0x08f0000, 0x08f2000, 0x172000} } },
142         {{{1, 0x0900000, 0x0902000, 0x174000},
143         {0, 0x0000000, 0x0000000, 0x000000},
144         {0, 0x0000000, 0x0000000, 0x000000},
145         {0, 0x0000000, 0x0000000, 0x000000},
146         {0, 0x0000000, 0x0000000, 0x000000},
147         {0, 0x0000000, 0x0000000, 0x000000},
148         {0, 0x0000000, 0x0000000, 0x000000},
149         {0, 0x0000000, 0x0000000, 0x000000},
150         {0, 0x0000000, 0x0000000, 0x000000},
151         {0, 0x0000000, 0x0000000, 0x000000},
152         {0, 0x0000000, 0x0000000, 0x000000},
153         {0, 0x0000000, 0x0000000, 0x000000},
154         {0, 0x0000000, 0x0000000, 0x000000},
155         {0, 0x0000000, 0x0000000, 0x000000},
156         {0, 0x0000000, 0x0000000, 0x000000},
157         {1, 0x09f0000, 0x09f2000, 0x176000} } },
158         {{{0, 0x0a00000, 0x0a02000, 0x178000},
159         {0, 0x0000000, 0x0000000, 0x000000},
160         {0, 0x0000000, 0x0000000, 0x000000},
161         {0, 0x0000000, 0x0000000, 0x000000},
162         {0, 0x0000000, 0x0000000, 0x000000},
163         {0, 0x0000000, 0x0000000, 0x000000},
164         {0, 0x0000000, 0x0000000, 0x000000},
165         {0, 0x0000000, 0x0000000, 0x000000},
166         {0, 0x0000000, 0x0000000, 0x000000},
167         {0, 0x0000000, 0x0000000, 0x000000},
168         {0, 0x0000000, 0x0000000, 0x000000},
169         {0, 0x0000000, 0x0000000, 0x000000},
170         {0, 0x0000000, 0x0000000, 0x000000},
171         {0, 0x0000000, 0x0000000, 0x000000},
172         {0, 0x0000000, 0x0000000, 0x000000},
173         {1, 0x0af0000, 0x0af2000, 0x17a000} } },
174         {{{0, 0x0b00000, 0x0b02000, 0x17c000},
175         {0, 0x0000000, 0x0000000, 0x000000},
176         {0, 0x0000000, 0x0000000, 0x000000},
177         {0, 0x0000000, 0x0000000, 0x000000},
178         {0, 0x0000000, 0x0000000, 0x000000},
179         {0, 0x0000000, 0x0000000, 0x000000},
180         {0, 0x0000000, 0x0000000, 0x000000},
181         {0, 0x0000000, 0x0000000, 0x000000},
182         {0, 0x0000000, 0x0000000, 0x000000},
183         {0, 0x0000000, 0x0000000, 0x000000},
184         {0, 0x0000000, 0x0000000, 0x000000},
185         {0, 0x0000000, 0x0000000, 0x000000},
186         {0, 0x0000000, 0x0000000, 0x000000},
187         {0, 0x0000000, 0x0000000, 0x000000},
188         {0, 0x0000000, 0x0000000, 0x000000},
189         {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
190         {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },
191         {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },
192         {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },
193         {{{1, 0x0f00000, 0x0f01000, 0x164000} } },
194         {{{0, 0x1000000, 0x1004000, 0x1a8000} } },
195         {{{1, 0x1100000, 0x1101000, 0x160000} } },
196         {{{1, 0x1200000, 0x1201000, 0x161000} } },
197         {{{1, 0x1300000, 0x1301000, 0x162000} } },
198         {{{1, 0x1400000, 0x1401000, 0x163000} } },
199         {{{1, 0x1500000, 0x1501000, 0x165000} } },
200         {{{1, 0x1600000, 0x1601000, 0x166000} } },
201         {{{0, 0,         0,         0} } },
202         {{{0, 0,         0,         0} } },
203         {{{0, 0,         0,         0} } },
204         {{{0, 0,         0,         0} } },
205         {{{0, 0,         0,         0} } },
206         {{{0, 0,         0,         0} } },
207         {{{1, 0x1d00000, 0x1d10000, 0x190000} } },
208         {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },
209         {{{1, 0x1f00000, 0x1f10000, 0x150000} } },
210         {{{0} } },
211         {{{1, 0x2100000, 0x2102000, 0x120000},
212         {1, 0x2110000, 0x2120000, 0x130000},
213         {1, 0x2120000, 0x2122000, 0x124000},
214         {1, 0x2130000, 0x2132000, 0x126000},
215         {1, 0x2140000, 0x2142000, 0x128000},
216         {1, 0x2150000, 0x2152000, 0x12a000},
217         {1, 0x2160000, 0x2170000, 0x110000},
218         {1, 0x2170000, 0x2172000, 0x12e000},
219         {0, 0x0000000, 0x0000000, 0x000000},
220         {0, 0x0000000, 0x0000000, 0x000000},
221         {0, 0x0000000, 0x0000000, 0x000000},
222         {0, 0x0000000, 0x0000000, 0x000000},
223         {0, 0x0000000, 0x0000000, 0x000000},
224         {0, 0x0000000, 0x0000000, 0x000000},
225         {0, 0x0000000, 0x0000000, 0x000000},
226         {0, 0x0000000, 0x0000000, 0x000000} } },
227         {{{1, 0x2200000, 0x2204000, 0x1b0000} } },
228         {{{0} } },
229         {{{0} } },
230         {{{0} } },
231         {{{0} } },
232         {{{0} } },
233         {{{1, 0x2800000, 0x2804000, 0x1a4000} } },
234         {{{1, 0x2900000, 0x2901000, 0x16b000} } },
235         {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },
236         {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },
237         {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },
238         {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },
239         {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },
240         {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },
241         {{{1, 0x3000000, 0x3000400, 0x1adc00} } },
242         {{{0, 0x3100000, 0x3104000, 0x1a8000} } },
243         {{{1, 0x3200000, 0x3204000, 0x1d4000} } },
244         {{{1, 0x3300000, 0x3304000, 0x1a0000} } },
245         {{{0} } },
246         {{{1, 0x3500000, 0x3500400, 0x1ac000} } },
247         {{{1, 0x3600000, 0x3600400, 0x1ae000} } },
248         {{{1, 0x3700000, 0x3700400, 0x1ae400} } },
249         {{{1, 0x3800000, 0x3804000, 0x1d0000} } },
250         {{{1, 0x3900000, 0x3904000, 0x1b4000} } },
251         {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },
252         {{{0} } },
253         {{{0} } },
254         {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },
255         {{{1, 0x3e00000, 0x3e01000, 0x167000} } },
256         {{{1, 0x3f00000, 0x3f01000, 0x168000} } }
257 };
258
259 /*
260  * top 12 bits of crb internal address (hub, agent)
261  */
262 unsigned qla82xx_crb_hub_agt[64] = {
263         0,
264         QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
265         QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
266         QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
267         0,
268         QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
269         QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
270         QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
271         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
272         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
273         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
274         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
275         QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
276         QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
277         QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
278         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
279         QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
280         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
281         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
282         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
283         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
284         QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
285         QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
286         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
287         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
288         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
289         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
290         0,
291         QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
292         QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
293         0,
294         QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
295         0,
296         QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
297         QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
298         0,
299         0,
300         0,
301         0,
302         0,
303         QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
304         0,
305         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
306         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
307         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
308         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
309         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
310         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
311         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
312         QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
313         QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
314         QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
315         0,
316         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
317         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
318         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
319         QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
320         0,
321         QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
322         QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
323         QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
324         0,
325         QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
326         0,
327 };
328
329 /* Device states */
330 char *qdev_state[] = {
331          "Unknown",
332         "Cold",
333         "Initializing",
334         "Ready",
335         "Need Reset",
336         "Need Quiescent",
337         "Failed",
338         "Quiescent",
339 };
340
341 /*
342  * In: 'off' is offset from CRB space in 128M pci map
343  * Out: 'off' is 2M pci map addr
344  * side effect: lock crb window
345  */
346 static void
347 qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
348 {
349         u32 win_read;
350
351         ha->crb_win = CRB_HI(*off);
352         writel(ha->crb_win,
353                 (void *)(CRB_WINDOW_2M + ha->nx_pcibase));
354
355         /* Read back value to make sure write has gone through before trying
356          * to use it.
357          */
358         win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
359         if (win_read != ha->crb_win) {
360                 DEBUG2(qla_printk(KERN_INFO, ha,
361                     "%s: Written crbwin (0x%x) != Read crbwin (0x%x), "
362                     "off=0x%lx\n", __func__, ha->crb_win, win_read, *off));
363         }
364         *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
365 }
366
367 static inline unsigned long
368 qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
369 {
370         /* See if we are currently pointing to the region we want to use next */
371         if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
372                 /* No need to change window. PCIX and PCIEregs are in both
373                  * regs are in both windows.
374                  */
375                 return off;
376         }
377
378         if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
379                 /* We are in first CRB window */
380                 if (ha->curr_window != 0)
381                         WARN_ON(1);
382                 return off;
383         }
384
385         if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
386                 /* We are in second CRB window */
387                 off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
388
389                 if (ha->curr_window != 1)
390                         return off;
391
392                 /* We are in the QM or direct access
393                  * register region - do nothing
394                  */
395                 if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
396                         (off < QLA82XX_PCI_CAMQM_MAX))
397                         return off;
398         }
399         /* strange address given */
400         qla_printk(KERN_WARNING, ha,
401                 "%s: Warning: unm_nic_pci_set_crbwindow called with"
402                 " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off);
403         return off;
404 }
405
406 static int
407 qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
408 {
409         struct crb_128M_2M_sub_block_map *m;
410
411         if (*off >= QLA82XX_CRB_MAX)
412                 return -1;
413
414         if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
415                 *off = (*off - QLA82XX_PCI_CAMQM) +
416                     QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
417                 return 0;
418         }
419
420         if (*off < QLA82XX_PCI_CRBSPACE)
421                 return -1;
422
423         *off -= QLA82XX_PCI_CRBSPACE;
424
425         /* Try direct map */
426         m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
427
428         if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
429                 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
430                 return 0;
431         }
432         /* Not in direct map, use crb window */
433         return 1;
434 }
435
436 #define CRB_WIN_LOCK_TIMEOUT 100000000
437 static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
438 {
439         int done = 0, timeout = 0;
440
441         while (!done) {
442                 /* acquire semaphore3 from PCI HW block */
443                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
444                 if (done == 1)
445                         break;
446                 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
447                         return -1;
448                 timeout++;
449         }
450         qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
451         return 0;
452 }
453
454 int
455 qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
456 {
457         unsigned long flags = 0;
458         int rv;
459
460         rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
461
462         BUG_ON(rv == -1);
463
464         if (rv == 1) {
465                 write_lock_irqsave(&ha->hw_lock, flags);
466                 qla82xx_crb_win_lock(ha);
467                 qla82xx_pci_set_crbwindow_2M(ha, &off);
468         }
469
470         writel(data, (void __iomem *)off);
471
472         if (rv == 1) {
473                 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
474                 write_unlock_irqrestore(&ha->hw_lock, flags);
475         }
476         return 0;
477 }
478
479 int
480 qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
481 {
482         unsigned long flags = 0;
483         int rv;
484         u32 data;
485
486         rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
487
488         BUG_ON(rv == -1);
489
490         if (rv == 1) {
491                 write_lock_irqsave(&ha->hw_lock, flags);
492                 qla82xx_crb_win_lock(ha);
493                 qla82xx_pci_set_crbwindow_2M(ha, &off);
494         }
495         data = RD_REG_DWORD((void __iomem *)off);
496
497         if (rv == 1) {
498                 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
499                 write_unlock_irqrestore(&ha->hw_lock, flags);
500         }
501         return data;
502 }
503
504 #define IDC_LOCK_TIMEOUT 100000000
505 int qla82xx_idc_lock(struct qla_hw_data *ha)
506 {
507         int i;
508         int done = 0, timeout = 0;
509
510         while (!done) {
511                 /* acquire semaphore5 from PCI HW block */
512                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
513                 if (done == 1)
514                         break;
515                 if (timeout >= IDC_LOCK_TIMEOUT)
516                         return -1;
517
518                 timeout++;
519
520                 /* Yield CPU */
521                 if (!in_interrupt())
522                         schedule();
523                 else {
524                         for (i = 0; i < 20; i++)
525                                 cpu_relax();
526                 }
527         }
528
529         return 0;
530 }
531
532 void qla82xx_idc_unlock(struct qla_hw_data *ha)
533 {
534         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
535 }
536
537 /*  PCI Windowing for DDR regions.  */
538 #define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
539         (((addr) <= (high)) && ((addr) >= (low)))
540 /*
541  * check memory access boundary.
542  * used by test agent. support ddr access only for now
543  */
544 static unsigned long
545 qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
546         unsigned long long addr, int size)
547 {
548         if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
549                 QLA82XX_ADDR_DDR_NET_MAX) ||
550                 !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET,
551                 QLA82XX_ADDR_DDR_NET_MAX) ||
552                 ((size != 1) && (size != 2) && (size != 4) && (size != 8)))
553                         return 0;
554         else
555                 return 1;
556 }
557
558 int qla82xx_pci_set_window_warning_count;
559
560 static unsigned long
561 qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
562 {
563         int window;
564         u32 win_read;
565
566         if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
567                 QLA82XX_ADDR_DDR_NET_MAX)) {
568                 /* DDR network side */
569                 window = MN_WIN(addr);
570                 ha->ddr_mn_window = window;
571                 qla82xx_wr_32(ha,
572                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
573                 win_read = qla82xx_rd_32(ha,
574                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
575                 if ((win_read << 17) != window) {
576                         qla_printk(KERN_WARNING, ha,
577                             "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n",
578                             __func__, window, win_read);
579                 }
580                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
581         } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
582                 QLA82XX_ADDR_OCM0_MAX)) {
583                 unsigned int temp1;
584                 if ((addr & 0x00ff800) == 0xff800) {
585                         qla_printk(KERN_WARNING, ha,
586                             "%s: QM access not handled.\n", __func__);
587                         addr = -1UL;
588                 }
589                 window = OCM_WIN(addr);
590                 ha->ddr_mn_window = window;
591                 qla82xx_wr_32(ha,
592                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
593                 win_read = qla82xx_rd_32(ha,
594                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
595                 temp1 = ((window & 0x1FF) << 7) |
596                     ((window & 0x0FFFE0000) >> 17);
597                 if (win_read != temp1) {
598                         qla_printk(KERN_WARNING, ha,
599                             "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n",
600                             __func__, temp1, win_read);
601                 }
602                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
603
604         } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
605                 QLA82XX_P3_ADDR_QDR_NET_MAX)) {
606                 /* QDR network side */
607                 window = MS_WIN(addr);
608                 ha->qdr_sn_window = window;
609                 qla82xx_wr_32(ha,
610                         ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window);
611                 win_read = qla82xx_rd_32(ha,
612                         ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
613                 if (win_read != window) {
614                         qla_printk(KERN_WARNING, ha,
615                             "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n",
616                             __func__, window, win_read);
617                 }
618                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
619         } else {
620                 /*
621                  * peg gdb frequently accesses memory that doesn't exist,
622                  * this limits the chit chat so debugging isn't slowed down.
623                  */
624                 if ((qla82xx_pci_set_window_warning_count++ < 8) ||
625                     (qla82xx_pci_set_window_warning_count%64 == 0)) {
626                         qla_printk(KERN_WARNING, ha,
627                             "%s: Warning:%s Unknown address range!\n", __func__,
628                             QLA2XXX_DRIVER_NAME);
629                 }
630                 addr = -1UL;
631         }
632         return addr;
633 }
634
635 /* check if address is in the same windows as the previous access */
636 static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
637         unsigned long long addr)
638 {
639         int                     window;
640         unsigned long long      qdr_max;
641
642         qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
643
644         /* DDR network side */
645         if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
646                 QLA82XX_ADDR_DDR_NET_MAX))
647                 BUG();
648         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
649                 QLA82XX_ADDR_OCM0_MAX))
650                 return 1;
651         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
652                 QLA82XX_ADDR_OCM1_MAX))
653                 return 1;
654         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
655                 /* QDR network side */
656                 window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
657                 if (ha->qdr_sn_window == window)
658                         return 1;
659         }
660         return 0;
661 }
662
663 static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
664         u64 off, void *data, int size)
665 {
666         unsigned long   flags;
667         void           *addr = NULL;
668         int             ret = 0;
669         u64             start;
670         uint8_t         *mem_ptr = NULL;
671         unsigned long   mem_base;
672         unsigned long   mem_page;
673
674         write_lock_irqsave(&ha->hw_lock, flags);
675
676         /*
677          * If attempting to access unknown address or straddle hw windows,
678          * do not access.
679          */
680         start = qla82xx_pci_set_window(ha, off);
681         if ((start == -1UL) ||
682                 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
683                 write_unlock_irqrestore(&ha->hw_lock, flags);
684                 qla_printk(KERN_ERR, ha,
685                         "%s out of bound pci memory access. "
686                         "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
687                 return -1;
688         }
689
690         write_unlock_irqrestore(&ha->hw_lock, flags);
691         mem_base = pci_resource_start(ha->pdev, 0);
692         mem_page = start & PAGE_MASK;
693         /* Map two pages whenever user tries to access addresses in two
694         * consecutive pages.
695         */
696         if (mem_page != ((start + size - 1) & PAGE_MASK))
697                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
698         else
699                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
700         if (mem_ptr == 0UL) {
701                 *(u8  *)data = 0;
702                 return -1;
703         }
704         addr = mem_ptr;
705         addr += start & (PAGE_SIZE - 1);
706         write_lock_irqsave(&ha->hw_lock, flags);
707
708         switch (size) {
709         case 1:
710                 *(u8  *)data = readb(addr);
711                 break;
712         case 2:
713                 *(u16 *)data = readw(addr);
714                 break;
715         case 4:
716                 *(u32 *)data = readl(addr);
717                 break;
718         case 8:
719                 *(u64 *)data = readq(addr);
720                 break;
721         default:
722                 ret = -1;
723                 break;
724         }
725         write_unlock_irqrestore(&ha->hw_lock, flags);
726
727         if (mem_ptr)
728                 iounmap(mem_ptr);
729         return ret;
730 }
731
732 static int
733 qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
734         u64 off, void *data, int size)
735 {
736         unsigned long   flags;
737         void           *addr = NULL;
738         int             ret = 0;
739         u64             start;
740         uint8_t         *mem_ptr = NULL;
741         unsigned long   mem_base;
742         unsigned long   mem_page;
743
744         write_lock_irqsave(&ha->hw_lock, flags);
745
746         /*
747          * If attempting to access unknown address or straddle hw windows,
748          * do not access.
749          */
750         start = qla82xx_pci_set_window(ha, off);
751         if ((start == -1UL) ||
752                 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
753                 write_unlock_irqrestore(&ha->hw_lock, flags);
754                 qla_printk(KERN_ERR, ha,
755                         "%s out of bound pci memory access. "
756                         "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
757                 return -1;
758         }
759
760         write_unlock_irqrestore(&ha->hw_lock, flags);
761         mem_base = pci_resource_start(ha->pdev, 0);
762         mem_page = start & PAGE_MASK;
763         /* Map two pages whenever user tries to access addresses in two
764          * consecutive pages.
765          */
766         if (mem_page != ((start + size - 1) & PAGE_MASK))
767                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
768         else
769                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
770         if (mem_ptr == 0UL)
771                 return -1;
772
773         addr = mem_ptr;
774         addr += start & (PAGE_SIZE - 1);
775         write_lock_irqsave(&ha->hw_lock, flags);
776
777         switch (size) {
778         case 1:
779                 writeb(*(u8  *)data, addr);
780                 break;
781         case 2:
782                 writew(*(u16 *)data, addr);
783                 break;
784         case 4:
785                 writel(*(u32 *)data, addr);
786                 break;
787         case 8:
788                 writeq(*(u64 *)data, addr);
789                 break;
790         default:
791                 ret = -1;
792                 break;
793         }
794         write_unlock_irqrestore(&ha->hw_lock, flags);
795         if (mem_ptr)
796                 iounmap(mem_ptr);
797         return ret;
798 }
799
800 #define MTU_FUDGE_FACTOR 100
801 static unsigned long
802 qla82xx_decode_crb_addr(unsigned long addr)
803 {
804         int i;
805         unsigned long base_addr, offset, pci_base;
806
807         if (!qla82xx_crb_table_initialized)
808                 qla82xx_crb_addr_transform_setup();
809
810         pci_base = ADDR_ERROR;
811         base_addr = addr & 0xfff00000;
812         offset = addr & 0x000fffff;
813
814         for (i = 0; i < MAX_CRB_XFORM; i++) {
815                 if (crb_addr_xform[i] == base_addr) {
816                         pci_base = i << 20;
817                         break;
818                 }
819         }
820         if (pci_base == ADDR_ERROR)
821                 return pci_base;
822         return pci_base + offset;
823 }
824
825 static long rom_max_timeout = 100;
826 static long qla82xx_rom_lock_timeout = 100;
827
828 static int
829 qla82xx_rom_lock(struct qla_hw_data *ha)
830 {
831         int done = 0, timeout = 0;
832
833         while (!done) {
834                 /* acquire semaphore2 from PCI HW block */
835                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
836                 if (done == 1)
837                         break;
838                 if (timeout >= qla82xx_rom_lock_timeout)
839                         return -1;
840                 timeout++;
841         }
842         qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
843         return 0;
844 }
845
846 static int
847 qla82xx_wait_rom_busy(struct qla_hw_data *ha)
848 {
849         long timeout = 0;
850         long done = 0 ;
851
852         while (done == 0) {
853                 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
854                 done &= 4;
855                 timeout++;
856                 if (timeout >= rom_max_timeout) {
857                         DEBUG(qla_printk(KERN_INFO, ha,
858                                 "%s: Timeout reached waiting for rom busy",
859                                 QLA2XXX_DRIVER_NAME));
860                         return -1;
861                 }
862         }
863         return 0;
864 }
865
866 static int
867 qla82xx_wait_rom_done(struct qla_hw_data *ha)
868 {
869         long timeout = 0;
870         long done = 0 ;
871
872         while (done == 0) {
873                 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
874                 done &= 2;
875                 timeout++;
876                 if (timeout >= rom_max_timeout) {
877                         DEBUG(qla_printk(KERN_INFO, ha,
878                                 "%s: Timeout reached  waiting for rom done",
879                                 QLA2XXX_DRIVER_NAME));
880                         return -1;
881                 }
882         }
883         return 0;
884 }
885
886 static int
887 qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
888 {
889         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
890         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
891         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
892         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
893         qla82xx_wait_rom_busy(ha);
894         if (qla82xx_wait_rom_done(ha)) {
895                 qla_printk(KERN_WARNING, ha,
896                         "%s: Error waiting for rom done\n",
897                         QLA2XXX_DRIVER_NAME);
898                 return -1;
899         }
900         /* Reset abyte_cnt and dummy_byte_cnt */
901         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
902         udelay(10);
903         cond_resched();
904         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
905         *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
906         return 0;
907 }
908
909 static int
910 qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
911 {
912         int ret, loops = 0;
913
914         while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
915                 udelay(100);
916                 schedule();
917                 loops++;
918         }
919         if (loops >= 50000) {
920                 qla_printk(KERN_INFO, ha,
921                         "%s: qla82xx_rom_lock failed\n",
922                         QLA2XXX_DRIVER_NAME);
923                 return -1;
924         }
925         ret = qla82xx_do_rom_fast_read(ha, addr, valp);
926         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
927         return ret;
928 }
929
930 static int
931 qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
932 {
933         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
934         qla82xx_wait_rom_busy(ha);
935         if (qla82xx_wait_rom_done(ha)) {
936                 qla_printk(KERN_WARNING, ha,
937                     "Error waiting for rom done\n");
938                 return -1;
939         }
940         *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
941         return 0;
942 }
943
944 static int
945 qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
946 {
947         long timeout = 0;
948         uint32_t done = 1 ;
949         uint32_t val;
950         int ret = 0;
951
952         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
953         while ((done != 0) && (ret == 0)) {
954                 ret = qla82xx_read_status_reg(ha, &val);
955                 done = val & 1;
956                 timeout++;
957                 udelay(10);
958                 cond_resched();
959                 if (timeout >= 50000) {
960                         qla_printk(KERN_WARNING, ha,
961                             "Timeout reached  waiting for write finish");
962                         return -1;
963                 }
964         }
965         return ret;
966 }
967
968 static int
969 qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
970 {
971         uint32_t val;
972         qla82xx_wait_rom_busy(ha);
973         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
974         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
975         qla82xx_wait_rom_busy(ha);
976         if (qla82xx_wait_rom_done(ha))
977                 return -1;
978         if (qla82xx_read_status_reg(ha, &val) != 0)
979                 return -1;
980         if ((val & 2) != 2)
981                 return -1;
982         return 0;
983 }
984
985 static int
986 qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
987 {
988         if (qla82xx_flash_set_write_enable(ha))
989                 return -1;
990         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
991         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
992         if (qla82xx_wait_rom_done(ha)) {
993                 qla_printk(KERN_WARNING, ha,
994                     "Error waiting for rom done\n");
995                 return -1;
996         }
997         return qla82xx_flash_wait_write_finish(ha);
998 }
999
1000 static int
1001 qla82xx_write_disable_flash(struct qla_hw_data *ha)
1002 {
1003         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
1004         if (qla82xx_wait_rom_done(ha)) {
1005                 qla_printk(KERN_WARNING, ha,
1006                     "Error waiting for rom done\n");
1007                 return -1;
1008         }
1009         return 0;
1010 }
1011
1012 static int
1013 ql82xx_rom_lock_d(struct qla_hw_data *ha)
1014 {
1015         int loops = 0;
1016         while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
1017                 udelay(100);
1018                 cond_resched();
1019                 loops++;
1020         }
1021         if (loops >= 50000) {
1022                 qla_printk(KERN_WARNING, ha, "ROM lock failed\n");
1023                 return -1;
1024         }
1025         return 0;;
1026 }
1027
1028 static int
1029 qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1030         uint32_t data)
1031 {
1032         int ret = 0;
1033
1034         ret = ql82xx_rom_lock_d(ha);
1035         if (ret < 0) {
1036                 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
1037                 return ret;
1038         }
1039
1040         if (qla82xx_flash_set_write_enable(ha))
1041                 goto done_write;
1042
1043         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
1044         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr);
1045         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
1046         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
1047         qla82xx_wait_rom_busy(ha);
1048         if (qla82xx_wait_rom_done(ha)) {
1049                 qla_printk(KERN_WARNING, ha,
1050                         "Error waiting for rom done\n");
1051                 ret = -1;
1052                 goto done_write;
1053         }
1054
1055         ret = qla82xx_flash_wait_write_finish(ha);
1056
1057 done_write:
1058         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
1059         return ret;
1060 }
1061
1062 /* This routine does CRB initialize sequence
1063  *  to put the ISP into operational state
1064  */
1065 static int
1066 qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1067 {
1068         int addr, val;
1069         int i ;
1070         struct crb_addr_pair *buf;
1071         unsigned long off;
1072         unsigned offset, n;
1073         struct qla_hw_data *ha = vha->hw;
1074
1075         struct crb_addr_pair {
1076                 long addr;
1077                 long data;
1078         };
1079
1080         /* Halt all the indiviual PEGs and other blocks of the ISP */
1081         qla82xx_rom_lock(ha);
1082         if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
1083                 /* don't reset CAM block on reset */
1084                 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
1085         else
1086                 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
1087         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
1088
1089         /* Read the signature value from the flash.
1090          * Offset 0: Contain signature (0xcafecafe)
1091          * Offset 4: Offset and number of addr/value pairs
1092          * that present in CRB initialize sequence
1093          */
1094         if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1095             qla82xx_rom_fast_read(ha, 4, &n) != 0) {
1096                 qla_printk(KERN_WARNING, ha,
1097                     "[ERROR] Reading crb_init area: n: %08x\n", n);
1098                 return -1;
1099         }
1100
1101         /* Offset in flash = lower 16 bits
1102          * Number of enteries = upper 16 bits
1103          */
1104         offset = n & 0xffffU;
1105         n = (n >> 16) & 0xffffU;
1106
1107         /* number of addr/value pair should not exceed 1024 enteries */
1108         if (n  >= 1024) {
1109                 qla_printk(KERN_WARNING, ha,
1110                     "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n",
1111                     QLA2XXX_DRIVER_NAME, __func__, n);
1112                 return -1;
1113         }
1114
1115         qla_printk(KERN_INFO, ha,
1116             "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n);
1117
1118         buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
1119         if (buf == NULL) {
1120                 qla_printk(KERN_WARNING, ha,
1121                     "%s: [ERROR] Unable to malloc memory.\n",
1122                     QLA2XXX_DRIVER_NAME);
1123                 return -1;
1124         }
1125
1126         for (i = 0; i < n; i++) {
1127                 if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
1128                     qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) {
1129                         kfree(buf);
1130                         return -1;
1131                 }
1132
1133                 buf[i].addr = addr;
1134                 buf[i].data = val;
1135         }
1136
1137         for (i = 0; i < n; i++) {
1138                 /* Translate internal CRB initialization
1139                  * address to PCI bus address
1140                  */
1141                 off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) +
1142                     QLA82XX_PCI_CRBSPACE;
1143                 /* Not all CRB  addr/value pair to be written,
1144                  * some of them are skipped
1145                  */
1146
1147                 /* skipping cold reboot MAGIC */
1148                 if (off == QLA82XX_CAM_RAM(0x1fc))
1149                         continue;
1150
1151                 /* do not reset PCI */
1152                 if (off == (ROMUSB_GLB + 0xbc))
1153                         continue;
1154
1155                 /* skip core clock, so that firmware can increase the clock */
1156                 if (off == (ROMUSB_GLB + 0xc8))
1157                         continue;
1158
1159                 /* skip the function enable register */
1160                 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
1161                         continue;
1162
1163                 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
1164                         continue;
1165
1166                 if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
1167                         continue;
1168
1169                 if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
1170                         continue;
1171
1172                 if (off == ADDR_ERROR) {
1173                         qla_printk(KERN_WARNING, ha,
1174                             "%s: [ERROR] Unknown addr: 0x%08lx\n",
1175                             QLA2XXX_DRIVER_NAME, buf[i].addr);
1176                         continue;
1177                 }
1178
1179                 qla82xx_wr_32(ha, off, buf[i].data);
1180
1181                 /* ISP requires much bigger delay to settle down,
1182                  * else crb_window returns 0xffffffff
1183                  */
1184                 if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
1185                         msleep(1000);
1186
1187                 /* ISP requires millisec delay between
1188                  * successive CRB register updation
1189                  */
1190                 msleep(1);
1191         }
1192
1193         kfree(buf);
1194
1195         /* Resetting the data and instruction cache */
1196         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
1197         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
1198         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
1199
1200         /* Clear all protocol processing engines */
1201         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
1202         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
1203         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
1204         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
1205         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
1206         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
1207         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
1208         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
1209         return 0;
1210 }
1211
1212 static int
1213 qla82xx_check_for_bad_spd(struct qla_hw_data *ha)
1214 {
1215         u32 val = 0;
1216         val = qla82xx_rd_32(ha, BOOT_LOADER_DIMM_STATUS);
1217         val &= QLA82XX_BOOT_LOADER_MN_ISSUE;
1218         if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) {
1219                 qla_printk(KERN_INFO, ha,
1220                         "Memory DIMM SPD not programmed. "
1221                         " Assumed valid.\n");
1222                 return 1;
1223         } else if (val) {
1224                 qla_printk(KERN_INFO, ha,
1225                         "Memory DIMM type incorrect.Info:%08X.\n", val);
1226                 return 2;
1227         }
1228         return 0;
1229 }
1230
1231 static int
1232 qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1233                 u64 off, void *data, int size)
1234 {
1235         int i, j, ret = 0, loop, sz[2], off0;
1236         int scale, shift_amount, startword;
1237         uint32_t temp;
1238         uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1239
1240         /*
1241          * If not MN, go check for MS or invalid.
1242          */
1243         if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1244                 mem_crb = QLA82XX_CRB_QDR_NET;
1245         else {
1246                 mem_crb = QLA82XX_CRB_DDR_NET;
1247                 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1248                         return qla82xx_pci_mem_write_direct(ha,
1249                             off, data, size);
1250         }
1251
1252         off0 = off & 0x7;
1253         sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1254         sz[1] = size - sz[0];
1255
1256         off8 = off & 0xfffffff0;
1257         loop = (((off & 0xf) + size - 1) >> 4) + 1;
1258         shift_amount = 4;
1259         scale = 2;
1260         startword = (off & 0xf)/8;
1261
1262         for (i = 0; i < loop; i++) {
1263                 if (qla82xx_pci_mem_read_2M(ha, off8 +
1264                     (i << shift_amount), &word[i * scale], 8))
1265                         return -1;
1266         }
1267
1268         switch (size) {
1269         case 1:
1270                 tmpw = *((uint8_t *)data);
1271                 break;
1272         case 2:
1273                 tmpw = *((uint16_t *)data);
1274                 break;
1275         case 4:
1276                 tmpw = *((uint32_t *)data);
1277                 break;
1278         case 8:
1279         default:
1280                 tmpw = *((uint64_t *)data);
1281                 break;
1282         }
1283
1284         if (sz[0] == 8) {
1285                 word[startword] = tmpw;
1286         } else {
1287                 word[startword] &=
1288                         ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1289                 word[startword] |= tmpw << (off0 * 8);
1290         }
1291         if (sz[1] != 0) {
1292                 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1293                 word[startword+1] |= tmpw >> (sz[0] * 8);
1294         }
1295
1296         /*
1297          * don't lock here - write_wx gets the lock if each time
1298          * write_lock_irqsave(&adapter->adapter_lock, flags);
1299          * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1300          */
1301         for (i = 0; i < loop; i++) {
1302                 temp = off8 + (i << shift_amount);
1303                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1304                 temp = 0;
1305                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1306                 temp = word[i * scale] & 0xffffffff;
1307                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1308                 temp = (word[i * scale] >> 32) & 0xffffffff;
1309                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1310                 temp = word[i*scale + 1] & 0xffffffff;
1311                 qla82xx_wr_32(ha, mem_crb +
1312                     MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
1313                 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1314                 qla82xx_wr_32(ha, mem_crb +
1315                     MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
1316
1317                 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1318                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1319                 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1320                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1321
1322                 for (j = 0; j < MAX_CTL_CHECK; j++) {
1323                         temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1324                         if ((temp & MIU_TA_CTL_BUSY) == 0)
1325                                 break;
1326                 }
1327
1328                 if (j >= MAX_CTL_CHECK) {
1329                         if (printk_ratelimit())
1330                                 dev_err(&ha->pdev->dev,
1331                                     "failed to write through agent\n");
1332                         ret = -1;
1333                         break;
1334                 }
1335         }
1336
1337         return ret;
1338 }
1339
1340 static int
1341 qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
1342 {
1343         int  i;
1344         long size = 0;
1345         long flashaddr = ha->flt_region_bootload << 2;
1346         long memaddr = BOOTLD_START;
1347         u64 data;
1348         u32 high, low;
1349         size = (IMAGE_START - BOOTLD_START) / 8;
1350
1351         for (i = 0; i < size; i++) {
1352                 if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
1353                     (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) {
1354                         return -1;
1355                 }
1356                 data = ((u64)high << 32) | low ;
1357                 qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
1358                 flashaddr += 8;
1359                 memaddr += 8;
1360
1361                 if (i % 0x1000 == 0)
1362                         msleep(1);
1363         }
1364         udelay(100);
1365         read_lock(&ha->hw_lock);
1366         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1367         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1368         read_unlock(&ha->hw_lock);
1369         return 0;
1370 }
1371
1372 int
1373 qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1374                 u64 off, void *data, int size)
1375 {
1376         int i, j = 0, k, start, end, loop, sz[2], off0[2];
1377         int           shift_amount;
1378         uint32_t      temp;
1379         uint64_t      off8, val, mem_crb, word[2] = {0, 0};
1380
1381         /*
1382          * If not MN, go check for MS or invalid.
1383          */
1384
1385         if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1386                 mem_crb = QLA82XX_CRB_QDR_NET;
1387         else {
1388                 mem_crb = QLA82XX_CRB_DDR_NET;
1389                 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1390                         return qla82xx_pci_mem_read_direct(ha,
1391                             off, data, size);
1392         }
1393
1394         off8 = off & 0xfffffff0;
1395         off0[0] = off & 0xf;
1396         sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
1397         shift_amount = 4;
1398         loop = ((off0[0] + size - 1) >> shift_amount) + 1;
1399         off0[1] = 0;
1400         sz[1] = size - sz[0];
1401
1402         /*
1403          * don't lock here - write_wx gets the lock if each time
1404          * write_lock_irqsave(&adapter->adapter_lock, flags);
1405          * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1406          */
1407
1408         for (i = 0; i < loop; i++) {
1409                 temp = off8 + (i << shift_amount);
1410                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
1411                 temp = 0;
1412                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
1413                 temp = MIU_TA_CTL_ENABLE;
1414                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1415                 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
1416                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1417
1418                 for (j = 0; j < MAX_CTL_CHECK; j++) {
1419                         temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1420                         if ((temp & MIU_TA_CTL_BUSY) == 0)
1421                                 break;
1422                 }
1423
1424                 if (j >= MAX_CTL_CHECK) {
1425                         if (printk_ratelimit())
1426                                 dev_err(&ha->pdev->dev,
1427                                     "failed to read through agent\n");
1428                         break;
1429                 }
1430
1431                 start = off0[i] >> 2;
1432                 end   = (off0[i] + sz[i] - 1) >> 2;
1433                 for (k = start; k <= end; k++) {
1434                         temp = qla82xx_rd_32(ha,
1435                                         mem_crb + MIU_TEST_AGT_RDDATA(k));
1436                         word[i] |= ((uint64_t)temp << (32 * (k & 1)));
1437                 }
1438         }
1439
1440         /*
1441          * netxen_nic_pci_change_crbwindow_128M(adapter, 1);
1442          * write_unlock_irqrestore(&adapter->adapter_lock, flags);
1443          */
1444
1445         if (j >= MAX_CTL_CHECK)
1446                 return -1;
1447
1448         if ((off0[0] & 7) == 0) {
1449                 val = word[0];
1450         } else {
1451                 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
1452                         ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
1453         }
1454
1455         switch (size) {
1456         case 1:
1457                 *(uint8_t  *)data = val;
1458                 break;
1459         case 2:
1460                 *(uint16_t *)data = val;
1461                 break;
1462         case 4:
1463                 *(uint32_t *)data = val;
1464                 break;
1465         case 8:
1466                 *(uint64_t *)data = val;
1467                 break;
1468         }
1469         return 0;
1470 }
1471
1472
1473 static struct qla82xx_uri_table_desc *
1474 qla82xx_get_table_desc(const u8 *unirom, int section)
1475 {
1476         uint32_t i;
1477         struct qla82xx_uri_table_desc *directory =
1478                 (struct qla82xx_uri_table_desc *)&unirom[0];
1479         __le32 offset;
1480         __le32 tab_type;
1481         __le32 entries = cpu_to_le32(directory->num_entries);
1482
1483         for (i = 0; i < entries; i++) {
1484                 offset = cpu_to_le32(directory->findex) +
1485                     (i * cpu_to_le32(directory->entry_size));
1486                 tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8));
1487
1488                 if (tab_type == section)
1489                         return (struct qla82xx_uri_table_desc *)&unirom[offset];
1490         }
1491
1492         return NULL;
1493 }
1494
1495 static struct qla82xx_uri_data_desc *
1496 qla82xx_get_data_desc(struct qla_hw_data *ha,
1497         u32 section, u32 idx_offset)
1498 {
1499         const u8 *unirom = ha->hablob->fw->data;
1500         int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset));
1501         struct qla82xx_uri_table_desc *tab_desc = NULL;
1502         __le32 offset;
1503
1504         tab_desc = qla82xx_get_table_desc(unirom, section);
1505         if (!tab_desc)
1506                 return NULL;
1507
1508         offset = cpu_to_le32(tab_desc->findex) +
1509             (cpu_to_le32(tab_desc->entry_size) * idx);
1510
1511         return (struct qla82xx_uri_data_desc *)&unirom[offset];
1512 }
1513
1514 static u8 *
1515 qla82xx_get_bootld_offset(struct qla_hw_data *ha)
1516 {
1517         u32 offset = BOOTLD_START;
1518         struct qla82xx_uri_data_desc *uri_desc = NULL;
1519
1520         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1521                 uri_desc = qla82xx_get_data_desc(ha,
1522                     QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF);
1523                 if (uri_desc)
1524                         offset = cpu_to_le32(uri_desc->findex);
1525         }
1526
1527         return (u8 *)&ha->hablob->fw->data[offset];
1528 }
1529
1530 static __le32
1531 qla82xx_get_fw_size(struct qla_hw_data *ha)
1532 {
1533         struct qla82xx_uri_data_desc *uri_desc = NULL;
1534
1535         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1536                 uri_desc =  qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1537                     QLA82XX_URI_FIRMWARE_IDX_OFF);
1538                 if (uri_desc)
1539                         return cpu_to_le32(uri_desc->size);
1540         }
1541
1542         return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
1543 }
1544
1545 static u8 *
1546 qla82xx_get_fw_offs(struct qla_hw_data *ha)
1547 {
1548         u32 offset = IMAGE_START;
1549         struct qla82xx_uri_data_desc *uri_desc = NULL;
1550
1551         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1552                 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1553                         QLA82XX_URI_FIRMWARE_IDX_OFF);
1554                 if (uri_desc)
1555                         offset = cpu_to_le32(uri_desc->findex);
1556         }
1557
1558         return (u8 *)&ha->hablob->fw->data[offset];
1559 }
1560
1561 /* PCI related functions */
1562 char *
1563 qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
1564 {
1565         int pcie_reg;
1566         struct qla_hw_data *ha = vha->hw;
1567         char lwstr[6];
1568         uint16_t lnk;
1569
1570         pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
1571         pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
1572         ha->link_width = (lnk >> 4) & 0x3f;
1573
1574         strcpy(str, "PCIe (");
1575         strcat(str, "2.5Gb/s ");
1576         snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width);
1577         strcat(str, lwstr);
1578         return str;
1579 }
1580
1581 int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
1582 {
1583         unsigned long val = 0;
1584         u32 control;
1585
1586         switch (region) {
1587         case 0:
1588                 val = 0;
1589                 break;
1590         case 1:
1591                 pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
1592                 val = control + QLA82XX_MSIX_TBL_SPACE;
1593                 break;
1594         }
1595         return val;
1596 }
1597
1598
1599 int
1600 qla82xx_iospace_config(struct qla_hw_data *ha)
1601 {
1602         uint32_t len = 0;
1603
1604         if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
1605                 qla_printk(KERN_WARNING, ha,
1606                         "Failed to reserve selected regions (%s)\n",
1607                         pci_name(ha->pdev));
1608                 goto iospace_error_exit;
1609         }
1610
1611         /* Use MMIO operations for all accesses. */
1612         if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
1613                 qla_printk(KERN_ERR, ha,
1614                         "region #0 not an MMIO resource (%s), aborting\n",
1615                         pci_name(ha->pdev));
1616                 goto iospace_error_exit;
1617         }
1618
1619         len = pci_resource_len(ha->pdev, 0);
1620         ha->nx_pcibase =
1621             (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
1622         if (!ha->nx_pcibase) {
1623                 qla_printk(KERN_ERR, ha,
1624                     "cannot remap pcibase MMIO (%s), aborting\n",
1625                     pci_name(ha->pdev));
1626                 pci_release_regions(ha->pdev);
1627                 goto iospace_error_exit;
1628         }
1629
1630         /* Mapping of IO base pointer */
1631         ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
1632             0xbc000 + (ha->pdev->devfn << 11));
1633
1634         if (!ql2xdbwr) {
1635                 ha->nxdb_wr_ptr =
1636                     (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
1637                     (ha->pdev->devfn << 12)), 4);
1638                 if (!ha->nxdb_wr_ptr) {
1639                         qla_printk(KERN_ERR, ha,
1640                             "cannot remap MMIO (%s), aborting\n",
1641                             pci_name(ha->pdev));
1642                         pci_release_regions(ha->pdev);
1643                         goto iospace_error_exit;
1644                 }
1645
1646                 /* Mapping of IO base pointer,
1647                  * door bell read and write pointer
1648                  */
1649                 ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
1650                     (ha->pdev->devfn * 8);
1651         } else {
1652                 ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
1653                         QLA82XX_CAMRAM_DB1 :
1654                         QLA82XX_CAMRAM_DB2);
1655         }
1656
1657         ha->max_req_queues = ha->max_rsp_queues = 1;
1658         ha->msix_count = ha->max_rsp_queues + 1;
1659         return 0;
1660
1661 iospace_error_exit:
1662         return -ENOMEM;
1663 }
1664
1665 /* GS related functions */
1666
1667 /* Initialization related functions */
1668
1669 /**
1670  * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers.
1671  * @ha: HA context
1672  *
1673  * Returns 0 on success.
1674 */
1675 int
1676 qla82xx_pci_config(scsi_qla_host_t *vha)
1677 {
1678         struct qla_hw_data *ha = vha->hw;
1679         int ret;
1680
1681         pci_set_master(ha->pdev);
1682         ret = pci_set_mwi(ha->pdev);
1683         ha->chip_revision = ha->pdev->revision;
1684         return 0;
1685 }
1686
1687 /**
1688  * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers.
1689  * @ha: HA context
1690  *
1691  * Returns 0 on success.
1692  */
1693 void
1694 qla82xx_reset_chip(scsi_qla_host_t *vha)
1695 {
1696         struct qla_hw_data *ha = vha->hw;
1697         ha->isp_ops->disable_intrs(ha);
1698 }
1699
1700 void qla82xx_config_rings(struct scsi_qla_host *vha)
1701 {
1702         struct qla_hw_data *ha = vha->hw;
1703         struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1704         struct init_cb_81xx *icb;
1705         struct req_que *req = ha->req_q_map[0];
1706         struct rsp_que *rsp = ha->rsp_q_map[0];
1707
1708         /* Setup ring parameters in initialization control block. */
1709         icb = (struct init_cb_81xx *)ha->init_cb;
1710         icb->request_q_outpointer = __constant_cpu_to_le16(0);
1711         icb->response_q_inpointer = __constant_cpu_to_le16(0);
1712         icb->request_q_length = cpu_to_le16(req->length);
1713         icb->response_q_length = cpu_to_le16(rsp->length);
1714         icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1715         icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1716         icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1717         icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1718
1719         WRT_REG_DWORD((unsigned long  __iomem *)&reg->req_q_out[0], 0);
1720         WRT_REG_DWORD((unsigned long  __iomem *)&reg->rsp_q_in[0], 0);
1721         WRT_REG_DWORD((unsigned long  __iomem *)&reg->rsp_q_out[0], 0);
1722 }
1723
1724 void qla82xx_reset_adapter(struct scsi_qla_host *vha)
1725 {
1726         struct qla_hw_data *ha = vha->hw;
1727         vha->flags.online = 0;
1728         qla2x00_try_to_stop_firmware(vha);
1729         ha->isp_ops->disable_intrs(ha);
1730 }
1731
1732 static int
1733 qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
1734 {
1735         u64 *ptr64;
1736         u32 i, flashaddr, size;
1737         __le64 data;
1738
1739         size = (IMAGE_START - BOOTLD_START) / 8;
1740
1741         ptr64 = (u64 *)qla82xx_get_bootld_offset(ha);
1742         flashaddr = BOOTLD_START;
1743
1744         for (i = 0; i < size; i++) {
1745                 data = cpu_to_le64(ptr64[i]);
1746                 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1747                         return -EIO;
1748                 flashaddr += 8;
1749         }
1750
1751         flashaddr = FLASH_ADDR_START;
1752         size = (__force u32)qla82xx_get_fw_size(ha) / 8;
1753         ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
1754
1755         for (i = 0; i < size; i++) {
1756                 data = cpu_to_le64(ptr64[i]);
1757
1758                 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1759                         return -EIO;
1760                 flashaddr += 8;
1761         }
1762         udelay(100);
1763
1764         /* Write a magic value to CAMRAM register
1765          * at a specified offset to indicate
1766          * that all data is written and
1767          * ready for firmware to initialize.
1768          */
1769         qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC);
1770
1771         read_lock(&ha->hw_lock);
1772         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1773         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1774         read_unlock(&ha->hw_lock);
1775         return 0;
1776 }
1777
1778 static int
1779 qla82xx_set_product_offset(struct qla_hw_data *ha)
1780 {
1781         struct qla82xx_uri_table_desc *ptab_desc = NULL;
1782         const uint8_t *unirom = ha->hablob->fw->data;
1783         uint32_t i;
1784         __le32 entries;
1785         __le32 flags, file_chiprev, offset;
1786         uint8_t chiprev = ha->chip_revision;
1787         /* Hardcoding mn_present flag for P3P */
1788         int mn_present = 0;
1789         uint32_t flagbit;
1790
1791         ptab_desc = qla82xx_get_table_desc(unirom,
1792                  QLA82XX_URI_DIR_SECT_PRODUCT_TBL);
1793        if (!ptab_desc)
1794                 return -1;
1795
1796         entries = cpu_to_le32(ptab_desc->num_entries);
1797
1798         for (i = 0; i < entries; i++) {
1799                 offset = cpu_to_le32(ptab_desc->findex) +
1800                         (i * cpu_to_le32(ptab_desc->entry_size));
1801                 flags = cpu_to_le32(*((int *)&unirom[offset] +
1802                         QLA82XX_URI_FLAGS_OFF));
1803                 file_chiprev = cpu_to_le32(*((int *)&unirom[offset] +
1804                         QLA82XX_URI_CHIP_REV_OFF));
1805
1806                 flagbit = mn_present ? 1 : 2;
1807
1808                 if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) {
1809                         ha->file_prd_off = offset;
1810                         return 0;
1811                 }
1812         }
1813         return -1;
1814 }
1815
1816 int
1817 qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
1818 {
1819         __le32 val;
1820         uint32_t min_size;
1821         struct qla_hw_data *ha = vha->hw;
1822         const struct firmware *fw = ha->hablob->fw;
1823
1824         ha->fw_type = fw_type;
1825
1826         if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1827                 if (qla82xx_set_product_offset(ha))
1828                         return -EINVAL;
1829
1830                 min_size = QLA82XX_URI_FW_MIN_SIZE;
1831         } else {
1832                 val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
1833                 if ((__force u32)val != QLA82XX_BDINFO_MAGIC)
1834                         return -EINVAL;
1835
1836                 min_size = QLA82XX_FW_MIN_SIZE;
1837         }
1838
1839         if (fw->size < min_size)
1840                 return -EINVAL;
1841         return 0;
1842 }
1843
1844 static int
1845 qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1846 {
1847         u32 val = 0;
1848         int retries = 60;
1849
1850         do {
1851                 read_lock(&ha->hw_lock);
1852                 val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE);
1853                 read_unlock(&ha->hw_lock);
1854
1855                 switch (val) {
1856                 case PHAN_INITIALIZE_COMPLETE:
1857                 case PHAN_INITIALIZE_ACK:
1858                         return QLA_SUCCESS;
1859                 case PHAN_INITIALIZE_FAILED:
1860                         break;
1861                 default:
1862                         break;
1863                 }
1864                 qla_printk(KERN_WARNING, ha,
1865                         "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n",
1866                         val, retries);
1867
1868                 msleep(500);
1869
1870         } while (--retries);
1871
1872         qla_printk(KERN_INFO, ha,
1873             "Cmd Peg initialization failed: 0x%x.\n", val);
1874
1875         qla82xx_check_for_bad_spd(ha);
1876         val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
1877         read_lock(&ha->hw_lock);
1878         qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
1879         read_unlock(&ha->hw_lock);
1880         return QLA_FUNCTION_FAILED;
1881 }
1882
1883 static int
1884 qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1885 {
1886         u32 val = 0;
1887         int retries = 60;
1888
1889         do {
1890                 read_lock(&ha->hw_lock);
1891                 val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE);
1892                 read_unlock(&ha->hw_lock);
1893
1894                 switch (val) {
1895                 case PHAN_INITIALIZE_COMPLETE:
1896                 case PHAN_INITIALIZE_ACK:
1897                         return QLA_SUCCESS;
1898                 case PHAN_INITIALIZE_FAILED:
1899                         break;
1900                 default:
1901                         break;
1902                 }
1903
1904                 qla_printk(KERN_WARNING, ha,
1905                         "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n",
1906                         val, retries);
1907
1908                 msleep(500);
1909
1910         } while (--retries);
1911
1912         qla_printk(KERN_INFO, ha,
1913                 "Rcv Peg initialization failed: 0x%x.\n", val);
1914         read_lock(&ha->hw_lock);
1915         qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
1916         read_unlock(&ha->hw_lock);
1917         return QLA_FUNCTION_FAILED;
1918 }
1919
1920 /* ISR related functions */
1921 uint32_t qla82xx_isr_int_target_mask_enable[8] = {
1922         ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1,
1923         ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3,
1924         ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5,
1925         ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7
1926 };
1927
1928 uint32_t qla82xx_isr_int_target_status[8] = {
1929         ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
1930         ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
1931         ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
1932         ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7
1933 };
1934
1935 static struct qla82xx_legacy_intr_set legacy_intr[] = \
1936         QLA82XX_LEGACY_INTR_CONFIG;
1937
1938 /*
1939  * qla82xx_mbx_completion() - Process mailbox command completions.
1940  * @ha: SCSI driver HA context
1941  * @mb0: Mailbox0 register
1942  */
1943 static void
1944 qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1945 {
1946         uint16_t        cnt;
1947         uint16_t __iomem *wptr;
1948         struct qla_hw_data *ha = vha->hw;
1949         struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1950         wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
1951
1952         /* Load return mailbox registers. */
1953         ha->flags.mbox_int = 1;
1954         ha->mailbox_out[0] = mb0;
1955
1956         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1957                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1958                 wptr++;
1959         }
1960
1961         if (ha->mcp) {
1962                 DEBUG3_11(printk(KERN_INFO "%s(%ld): "
1963                         "Got mailbox completion. cmd=%x.\n",
1964                         __func__, vha->host_no, ha->mcp->mb[0]));
1965         } else {
1966                 qla_printk(KERN_INFO, ha,
1967                         "%s(%ld): MBX pointer ERROR!\n",
1968                         __func__, vha->host_no);
1969         }
1970 }
1971
1972 /*
1973  * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1974  * @irq:
1975  * @dev_id: SCSI driver HA context
1976  * @regs:
1977  *
1978  * Called by system whenever the host adapter generates an interrupt.
1979  *
1980  * Returns handled flag.
1981  */
1982 irqreturn_t
1983 qla82xx_intr_handler(int irq, void *dev_id)
1984 {
1985         scsi_qla_host_t *vha;
1986         struct qla_hw_data *ha;
1987         struct rsp_que *rsp;
1988         struct device_reg_82xx __iomem *reg;
1989         int status = 0, status1 = 0;
1990         unsigned long   flags;
1991         unsigned long   iter;
1992         uint32_t        stat;
1993         uint16_t        mb[4];
1994
1995         rsp = (struct rsp_que *) dev_id;
1996         if (!rsp) {
1997                 printk(KERN_INFO
1998                         "%s(): NULL response queue pointer\n", __func__);
1999                 return IRQ_NONE;
2000         }
2001         ha = rsp->hw;
2002
2003         if (!ha->flags.msi_enabled) {
2004                 status = qla82xx_rd_32(ha, ISR_INT_VECTOR);
2005                 if (!(status & ha->nx_legacy_intr.int_vec_bit))
2006                         return IRQ_NONE;
2007
2008                 status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG);
2009                 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1))
2010                         return IRQ_NONE;
2011         }
2012
2013         /* clear the interrupt */
2014         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
2015
2016         /* read twice to ensure write is flushed */
2017         qla82xx_rd_32(ha, ISR_INT_VECTOR);
2018         qla82xx_rd_32(ha, ISR_INT_VECTOR);
2019
2020         reg = &ha->iobase->isp82;
2021
2022         spin_lock_irqsave(&ha->hardware_lock, flags);
2023         vha = pci_get_drvdata(ha->pdev);
2024         for (iter = 1; iter--; ) {
2025
2026                 if (RD_REG_DWORD(&reg->host_int)) {
2027                         stat = RD_REG_DWORD(&reg->host_status);
2028
2029                         switch (stat & 0xff) {
2030                         case 0x1:
2031                         case 0x2:
2032                         case 0x10:
2033                         case 0x11:
2034                                 qla82xx_mbx_completion(vha, MSW(stat));
2035                                 status |= MBX_INTERRUPT;
2036                                 break;
2037                         case 0x12:
2038                                 mb[0] = MSW(stat);
2039                                 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2040                                 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2041                                 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2042                                 qla2x00_async_event(vha, rsp, mb);
2043                                 break;
2044                         case 0x13:
2045                                 qla24xx_process_response_queue(vha, rsp);
2046                                 break;
2047                         default:
2048                                 DEBUG2(printk("scsi(%ld): "
2049                                         " Unrecognized interrupt type (%d).\n",
2050                                         vha->host_no, stat & 0xff));
2051                                 break;
2052                         }
2053                 }
2054                 WRT_REG_DWORD(&reg->host_int, 0);
2055         }
2056         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2057         if (!ha->flags.msi_enabled)
2058                 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2059
2060 #ifdef QL_DEBUG_LEVEL_17
2061         if (!irq && ha->flags.eeh_busy)
2062                 qla_printk(KERN_WARNING, ha,
2063                     "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
2064                     status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2065 #endif
2066
2067         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2068             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2069                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2070                 complete(&ha->mbx_intr_comp);
2071         }
2072         return IRQ_HANDLED;
2073 }
2074
2075 irqreturn_t
2076 qla82xx_msix_default(int irq, void *dev_id)
2077 {
2078         scsi_qla_host_t *vha;
2079         struct qla_hw_data *ha;
2080         struct rsp_que *rsp;
2081         struct device_reg_82xx __iomem *reg;
2082         int status = 0;
2083         unsigned long flags;
2084         uint32_t stat;
2085         uint16_t mb[4];
2086
2087         rsp = (struct rsp_que *) dev_id;
2088         if (!rsp) {
2089                 printk(KERN_INFO
2090                         "%s(): NULL response queue pointer\n", __func__);
2091                 return IRQ_NONE;
2092         }
2093         ha = rsp->hw;
2094
2095         reg = &ha->iobase->isp82;
2096
2097         spin_lock_irqsave(&ha->hardware_lock, flags);
2098         vha = pci_get_drvdata(ha->pdev);
2099         do {
2100                 if (RD_REG_DWORD(&reg->host_int)) {
2101                         stat = RD_REG_DWORD(&reg->host_status);
2102
2103                         switch (stat & 0xff) {
2104                         case 0x1:
2105                         case 0x2:
2106                         case 0x10:
2107                         case 0x11:
2108                                 qla82xx_mbx_completion(vha, MSW(stat));
2109                                 status |= MBX_INTERRUPT;
2110                                 break;
2111                         case 0x12:
2112                                 mb[0] = MSW(stat);
2113                                 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2114                                 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2115                                 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2116                                 qla2x00_async_event(vha, rsp, mb);
2117                                 break;
2118                         case 0x13:
2119                                 qla24xx_process_response_queue(vha, rsp);
2120                                 break;
2121                         default:
2122                                 DEBUG2(printk("scsi(%ld): "
2123                                         " Unrecognized interrupt type (%d).\n",
2124                                         vha->host_no, stat & 0xff));
2125                                 break;
2126                         }
2127                 }
2128                 WRT_REG_DWORD(&reg->host_int, 0);
2129         } while (0);
2130
2131         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2132
2133 #ifdef QL_DEBUG_LEVEL_17
2134         if (!irq && ha->flags.eeh_busy)
2135                 qla_printk(KERN_WARNING, ha,
2136                         "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
2137                         status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2138 #endif
2139
2140         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2141                 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2142                         set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2143                         complete(&ha->mbx_intr_comp);
2144         }
2145         return IRQ_HANDLED;
2146 }
2147
2148 irqreturn_t
2149 qla82xx_msix_rsp_q(int irq, void *dev_id)
2150 {
2151         scsi_qla_host_t *vha;
2152         struct qla_hw_data *ha;
2153         struct rsp_que *rsp;
2154         struct device_reg_82xx __iomem *reg;
2155
2156         rsp = (struct rsp_que *) dev_id;
2157         if (!rsp) {
2158                 printk(KERN_INFO
2159                         "%s(): NULL response queue pointer\n", __func__);
2160                 return IRQ_NONE;
2161         }
2162
2163         ha = rsp->hw;
2164         reg = &ha->iobase->isp82;
2165         spin_lock_irq(&ha->hardware_lock);
2166         vha = pci_get_drvdata(ha->pdev);
2167         qla24xx_process_response_queue(vha, rsp);
2168         WRT_REG_DWORD(&reg->host_int, 0);
2169         spin_unlock_irq(&ha->hardware_lock);
2170         return IRQ_HANDLED;
2171 }
2172
2173 void
2174 qla82xx_poll(int irq, void *dev_id)
2175 {
2176         scsi_qla_host_t *vha;
2177         struct qla_hw_data *ha;
2178         struct rsp_que *rsp;
2179         struct device_reg_82xx __iomem *reg;
2180         int status = 0;
2181         uint32_t stat;
2182         uint16_t mb[4];
2183         unsigned long flags;
2184
2185         rsp = (struct rsp_que *) dev_id;
2186         if (!rsp) {
2187                 printk(KERN_INFO
2188                         "%s(): NULL response queue pointer\n", __func__);
2189                 return;
2190         }
2191         ha = rsp->hw;
2192
2193         reg = &ha->iobase->isp82;
2194         spin_lock_irqsave(&ha->hardware_lock, flags);
2195         vha = pci_get_drvdata(ha->pdev);
2196
2197         if (RD_REG_DWORD(&reg->host_int)) {
2198                 stat = RD_REG_DWORD(&reg->host_status);
2199                 switch (stat & 0xff) {
2200                 case 0x1:
2201                 case 0x2:
2202                 case 0x10:
2203                 case 0x11:
2204                         qla82xx_mbx_completion(vha, MSW(stat));
2205                         status |= MBX_INTERRUPT;
2206                         break;
2207                 case 0x12:
2208                         mb[0] = MSW(stat);
2209                         mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2210                         mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2211                         mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2212                         qla2x00_async_event(vha, rsp, mb);
2213                         break;
2214                 case 0x13:
2215                         qla24xx_process_response_queue(vha, rsp);
2216                         break;
2217                 default:
2218                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2219                                 "(%d).\n",
2220                                 vha->host_no, stat & 0xff));
2221                         break;
2222                 }
2223         }
2224         WRT_REG_DWORD(&reg->host_int, 0);
2225         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2226 }
2227
2228 void
2229 qla82xx_enable_intrs(struct qla_hw_data *ha)
2230 {
2231         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2232         qla82xx_mbx_intr_enable(vha);
2233         spin_lock_irq(&ha->hardware_lock);
2234         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2235         spin_unlock_irq(&ha->hardware_lock);
2236         ha->interrupts_on = 1;
2237 }
2238
2239 void
2240 qla82xx_disable_intrs(struct qla_hw_data *ha)
2241 {
2242         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2243         qla82xx_mbx_intr_disable(vha);
2244         spin_lock_irq(&ha->hardware_lock);
2245         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
2246         spin_unlock_irq(&ha->hardware_lock);
2247         ha->interrupts_on = 0;
2248 }
2249
2250 void qla82xx_init_flags(struct qla_hw_data *ha)
2251 {
2252         struct qla82xx_legacy_intr_set *nx_legacy_intr;
2253
2254         /* ISP 8021 initializations */
2255         rwlock_init(&ha->hw_lock);
2256         ha->qdr_sn_window = -1;
2257         ha->ddr_mn_window = -1;
2258         ha->curr_window = 255;
2259         ha->portnum = PCI_FUNC(ha->pdev->devfn);
2260         nx_legacy_intr = &legacy_intr[ha->portnum];
2261         ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
2262         ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
2263         ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
2264         ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
2265 }
2266
2267 inline void
2268 qla82xx_set_drv_active(scsi_qla_host_t *vha)
2269 {
2270         uint32_t drv_active;
2271         struct qla_hw_data *ha = vha->hw;
2272
2273         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2274
2275         /* If reset value is all FF's, initialize DRV_ACTIVE */
2276         if (drv_active == 0xffffffff) {
2277                 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE,
2278                         QLA82XX_DRV_NOT_ACTIVE);
2279                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2280         }
2281         drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2282         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2283 }
2284
2285 inline void
2286 qla82xx_clear_drv_active(struct qla_hw_data *ha)
2287 {
2288         uint32_t drv_active;
2289
2290         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2291         drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2292         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2293 }
2294
2295 static inline int
2296 qla82xx_need_reset(struct qla_hw_data *ha)
2297 {
2298         uint32_t drv_state;
2299         int rval;
2300
2301         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2302         rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2303         return rval;
2304 }
2305
2306 static inline void
2307 qla82xx_set_rst_ready(struct qla_hw_data *ha)
2308 {
2309         uint32_t drv_state;
2310         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2311
2312         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2313
2314         /* If reset value is all FF's, initialize DRV_STATE */
2315         if (drv_state == 0xffffffff) {
2316                 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY);
2317                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2318         }
2319         drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2320         qla_printk(KERN_INFO, ha,
2321                 "%s(%ld):drv_state = 0x%x\n",
2322                 __func__, vha->host_no, drv_state);
2323         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2324 }
2325
2326 static inline void
2327 qla82xx_clear_rst_ready(struct qla_hw_data *ha)
2328 {
2329         uint32_t drv_state;
2330
2331         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2332         drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2333         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2334 }
2335
2336 static inline void
2337 qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
2338 {
2339         uint32_t qsnt_state;
2340
2341         qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2342         qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2343         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2344 }
2345
2346 static int
2347 qla82xx_load_fw(scsi_qla_host_t *vha)
2348 {
2349         int rst;
2350         struct fw_blob *blob;
2351         struct qla_hw_data *ha = vha->hw;
2352
2353         if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
2354                 qla_printk(KERN_ERR, ha,
2355                         "%s: Error during CRB Initialization\n", __func__);
2356                 return QLA_FUNCTION_FAILED;
2357         }
2358         udelay(500);
2359
2360         /* Bring QM and CAMRAM out of reset */
2361         rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
2362         rst &= ~((1 << 28) | (1 << 24));
2363         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
2364
2365         /*
2366          * FW Load priority:
2367          * 1) Operational firmware residing in flash.
2368          * 2) Firmware via request-firmware interface (.bin file).
2369          */
2370         if (ql2xfwloadbin == 2)
2371                 goto try_blob_fw;
2372
2373         qla_printk(KERN_INFO, ha,
2374                 "Attempting to load firmware from flash\n");
2375
2376         if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2377                 qla_printk(KERN_ERR, ha,
2378                         "Firmware loaded successfully from flash\n");
2379                 return QLA_SUCCESS;
2380         }
2381 try_blob_fw:
2382         qla_printk(KERN_INFO, ha,
2383             "Attempting to load firmware from blob\n");
2384
2385         /* Load firmware blob. */
2386         blob = ha->hablob = qla2x00_request_firmware(vha);
2387         if (!blob) {
2388                 qla_printk(KERN_ERR, ha,
2389                         "Firmware image not present.\n");
2390                 goto fw_load_failed;
2391         }
2392
2393         /* Validating firmware blob */
2394         if (qla82xx_validate_firmware_blob(vha,
2395                 QLA82XX_FLASH_ROMIMAGE)) {
2396                 /* Fallback to URI format */
2397                 if (qla82xx_validate_firmware_blob(vha,
2398                         QLA82XX_UNIFIED_ROMIMAGE)) {
2399                         qla_printk(KERN_ERR, ha,
2400                                 "No valid firmware image found!!!");
2401                         return QLA_FUNCTION_FAILED;
2402                 }
2403         }
2404
2405         if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
2406                 qla_printk(KERN_ERR, ha,
2407                         "%s: Firmware loaded successfully "
2408                         " from binary blob\n", __func__);
2409                 return QLA_SUCCESS;
2410         } else {
2411                 qla_printk(KERN_ERR, ha,
2412                     "Firmware load failed from binary blob\n");
2413                 blob->fw = NULL;
2414                 blob = NULL;
2415                 goto fw_load_failed;
2416         }
2417         return QLA_SUCCESS;
2418
2419 fw_load_failed:
2420         return QLA_FUNCTION_FAILED;
2421 }
2422
2423 int
2424 qla82xx_start_firmware(scsi_qla_host_t *vha)
2425 {
2426         int           pcie_cap;
2427         uint16_t      lnk;
2428         struct qla_hw_data *ha = vha->hw;
2429
2430         /* scrub dma mask expansion register */
2431         qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE);
2432
2433         /* Put both the PEG CMD and RCV PEG to default state
2434          * of 0 before resetting the hardware
2435          */
2436         qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
2437         qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
2438
2439         /* Overwrite stale initialization register values */
2440         qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
2441         qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
2442
2443         if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
2444                 qla_printk(KERN_INFO, ha,
2445                         "%s: Error trying to start fw!\n", __func__);
2446                 return QLA_FUNCTION_FAILED;
2447         }
2448
2449         /* Handshake with the card before we register the devices. */
2450         if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
2451                 qla_printk(KERN_INFO, ha,
2452                         "%s: Error during card handshake!\n", __func__);
2453                 return QLA_FUNCTION_FAILED;
2454         }
2455
2456         /* Negotiated Link width */
2457         pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
2458         pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
2459         ha->link_width = (lnk >> 4) & 0x3f;
2460
2461         /* Synchronize with Receive peg */
2462         return qla82xx_check_rcvpeg_state(ha);
2463 }
2464
2465 static inline int
2466 qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
2467         uint16_t tot_dsds)
2468 {
2469         uint32_t *cur_dsd = NULL;
2470         scsi_qla_host_t *vha;
2471         struct qla_hw_data *ha;
2472         struct scsi_cmnd *cmd;
2473         struct  scatterlist *cur_seg;
2474         uint32_t *dsd_seg;
2475         void *next_dsd;
2476         uint8_t avail_dsds;
2477         uint8_t first_iocb = 1;
2478         uint32_t dsd_list_len;
2479         struct dsd_dma *dsd_ptr;
2480         struct ct6_dsd *ctx;
2481
2482         cmd = sp->cmd;
2483
2484         /* Update entry type to indicate Command Type 3 IOCB */
2485         *((uint32_t *)(&cmd_pkt->entry_type)) =
2486                 __constant_cpu_to_le32(COMMAND_TYPE_6);
2487
2488         /* No data transfer */
2489         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
2490                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
2491                 return 0;
2492         }
2493
2494         vha = sp->fcport->vha;
2495         ha = vha->hw;
2496
2497         /* Set transfer direction */
2498         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2499                 cmd_pkt->control_flags =
2500                     __constant_cpu_to_le16(CF_WRITE_DATA);
2501                 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
2502         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
2503                 cmd_pkt->control_flags =
2504                     __constant_cpu_to_le16(CF_READ_DATA);
2505                 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
2506         }
2507
2508         cur_seg = scsi_sglist(cmd);
2509         ctx = sp->ctx;
2510
2511         while (tot_dsds) {
2512                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
2513                     QLA_DSDS_PER_IOCB : tot_dsds;
2514                 tot_dsds -= avail_dsds;
2515                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
2516
2517                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
2518                     struct dsd_dma, list);
2519                 next_dsd = dsd_ptr->dsd_addr;
2520                 list_del(&dsd_ptr->list);
2521                 ha->gbl_dsd_avail--;
2522                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
2523                 ctx->dsd_use_cnt++;
2524                 ha->gbl_dsd_inuse++;
2525
2526                 if (first_iocb) {
2527                         first_iocb = 0;
2528                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2529                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2530                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2531                         *dsd_seg++ = dsd_list_len;
2532                 } else {
2533                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2534                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2535                         *cur_dsd++ = dsd_list_len;
2536                 }
2537                 cur_dsd = (uint32_t *)next_dsd;
2538                 while (avail_dsds) {
2539                         dma_addr_t      sle_dma;
2540
2541                         sle_dma = sg_dma_address(cur_seg);
2542                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2543                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2544                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
2545                         cur_seg++;
2546                         avail_dsds--;
2547                 }
2548         }
2549
2550         /* Null termination */
2551         *cur_dsd++ =  0;
2552         *cur_dsd++ = 0;
2553         *cur_dsd++ = 0;
2554         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
2555         return 0;
2556 }
2557
2558 /*
2559  * qla82xx_calc_dsd_lists() - Determine number of DSD list required
2560  * for Command Type 6.
2561  *
2562  * @dsds: number of data segment decriptors needed
2563  *
2564  * Returns the number of dsd list needed to store @dsds.
2565  */
2566 inline uint16_t
2567 qla82xx_calc_dsd_lists(uint16_t dsds)
2568 {
2569         uint16_t dsd_lists = 0;
2570
2571         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
2572         if (dsds % QLA_DSDS_PER_IOCB)
2573                 dsd_lists++;
2574         return dsd_lists;
2575 }
2576
2577 /*
2578  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2579  * @sp: command to send to the ISP
2580  *
2581  * Returns non-zero if a failure occured, else zero.
2582  */
2583 int
2584 qla82xx_start_scsi(srb_t *sp)
2585 {
2586         int             ret, nseg;
2587         unsigned long   flags;
2588         struct scsi_cmnd *cmd;
2589         uint32_t        *clr_ptr;
2590         uint32_t        index;
2591         uint32_t        handle;
2592         uint16_t        cnt;
2593         uint16_t        req_cnt;
2594         uint16_t        tot_dsds;
2595         struct device_reg_82xx __iomem *reg;
2596         uint32_t dbval;
2597         uint32_t *fcp_dl;
2598         uint8_t additional_cdb_len;
2599         struct ct6_dsd *ctx;
2600         struct scsi_qla_host *vha = sp->fcport->vha;
2601         struct qla_hw_data *ha = vha->hw;
2602         struct req_que *req = NULL;
2603         struct rsp_que *rsp = NULL;
2604
2605         /* Setup device pointers. */
2606         ret = 0;
2607         reg = &ha->iobase->isp82;
2608         cmd = sp->cmd;
2609         req = vha->req;
2610         rsp = ha->rsp_q_map[0];
2611
2612         /* So we know we haven't pci_map'ed anything yet */
2613         tot_dsds = 0;
2614
2615         dbval = 0x04 | (ha->portnum << 5);
2616
2617         /* Send marker if required */
2618         if (vha->marker_needed != 0) {
2619                 if (qla2x00_marker(vha, req,
2620                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2621                         return QLA_FUNCTION_FAILED;
2622                 vha->marker_needed = 0;
2623         }
2624
2625         /* Acquire ring specific lock */
2626         spin_lock_irqsave(&ha->hardware_lock, flags);
2627
2628         /* Check for room in outstanding command list. */
2629         handle = req->current_outstanding_cmd;
2630         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2631                 handle++;
2632                 if (handle == MAX_OUTSTANDING_COMMANDS)
2633                         handle = 1;
2634                 if (!req->outstanding_cmds[handle])
2635                         break;
2636         }
2637         if (index == MAX_OUTSTANDING_COMMANDS)
2638                 goto queuing_error;
2639
2640         /* Map the sg table so we have an accurate count of sg entries needed */
2641         if (scsi_sg_count(cmd)) {
2642                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2643                     scsi_sg_count(cmd), cmd->sc_data_direction);
2644                 if (unlikely(!nseg))
2645                         goto queuing_error;
2646         } else
2647                 nseg = 0;
2648
2649         tot_dsds = nseg;
2650
2651         if (tot_dsds > ql2xshiftctondsd) {
2652                 struct cmd_type_6 *cmd_pkt;
2653                 uint16_t more_dsd_lists = 0;
2654                 struct dsd_dma *dsd_ptr;
2655                 uint16_t i;
2656
2657                 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
2658                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN)
2659                         goto queuing_error;
2660
2661                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2662                         goto sufficient_dsds;
2663                 else
2664                         more_dsd_lists -= ha->gbl_dsd_avail;
2665
2666                 for (i = 0; i < more_dsd_lists; i++) {
2667                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2668                         if (!dsd_ptr)
2669                                 goto queuing_error;
2670
2671                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2672                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2673                         if (!dsd_ptr->dsd_addr) {
2674                                 kfree(dsd_ptr);
2675                                 goto queuing_error;
2676                         }
2677                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2678                         ha->gbl_dsd_avail++;
2679                 }
2680
2681 sufficient_dsds:
2682                 req_cnt = 1;
2683
2684                 if (req->cnt < (req_cnt + 2)) {
2685                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2686                                 &reg->req_q_out[0]);
2687                         if (req->ring_index < cnt)
2688                                 req->cnt = cnt - req->ring_index;
2689                         else
2690                                 req->cnt = req->length -
2691                                         (req->ring_index - cnt);
2692                 }
2693
2694                 if (req->cnt < (req_cnt + 2))
2695                         goto queuing_error;
2696
2697                 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2698                 if (!sp->ctx) {
2699                         DEBUG(printk(KERN_INFO
2700                                 "%s(%ld): failed to allocate"
2701                                 " ctx.\n", __func__, vha->host_no));
2702                         goto queuing_error;
2703                 }
2704                 memset(ctx, 0, sizeof(struct ct6_dsd));
2705                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2706                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2707                 if (!ctx->fcp_cmnd) {
2708                         DEBUG2_3(printk("%s(%ld): failed to allocate"
2709                                 " fcp_cmnd.\n", __func__, vha->host_no));
2710                         goto queuing_error_fcp_cmnd;
2711                 }
2712
2713                 /* Initialize the DSD list and dma handle */
2714                 INIT_LIST_HEAD(&ctx->dsd_list);
2715                 ctx->dsd_use_cnt = 0;
2716
2717                 if (cmd->cmd_len > 16) {
2718                         additional_cdb_len = cmd->cmd_len - 16;
2719                         if ((cmd->cmd_len % 4) != 0) {
2720                                 /* SCSI command bigger than 16 bytes must be
2721                                  * multiple of 4
2722                                  */
2723                                 goto queuing_error_fcp_cmnd;
2724                         }
2725                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2726                 } else {
2727                         additional_cdb_len = 0;
2728                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2729                 }
2730
2731                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2732                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2733
2734                 /* Zero out remaining portion of packet. */
2735                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2736                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2737                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2738                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2739
2740                 /* Set NPORT-ID and LUN number*/
2741                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2742                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2743                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2744                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2745                 cmd_pkt->vp_index = sp->fcport->vp_idx;
2746
2747                 /* Build IOCB segments */
2748                 if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2749                         goto queuing_error_fcp_cmnd;
2750
2751                 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2752
2753                 /* build FCP_CMND IU */
2754                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2755                 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2756                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2757
2758                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2759                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2760                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2761                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2762
2763                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2764
2765                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2766                     additional_cdb_len);
2767                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2768
2769                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2770                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2771                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2772                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2773                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2774
2775                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2776                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2777                 /* Set total data segment count. */
2778                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2779                 /* Specify response queue number where
2780                  * completion should happen
2781                  */
2782                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2783         } else {
2784                 struct cmd_type_7 *cmd_pkt;
2785                 req_cnt = qla24xx_calc_iocbs(tot_dsds);
2786                 if (req->cnt < (req_cnt + 2)) {
2787                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2788                             &reg->req_q_out[0]);
2789                         if (req->ring_index < cnt)
2790                                 req->cnt = cnt - req->ring_index;
2791                         else
2792                                 req->cnt = req->length -
2793                                         (req->ring_index - cnt);
2794                 }
2795                 if (req->cnt < (req_cnt + 2))
2796                         goto queuing_error;
2797
2798                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2799                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2800
2801                 /* Zero out remaining portion of packet. */
2802                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2803                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2804                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2805                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2806
2807                 /* Set NPORT-ID and LUN number*/
2808                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2809                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2810                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2811                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2812                 cmd_pkt->vp_index = sp->fcport->vp_idx;
2813
2814                 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2815                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2816                         sizeof(cmd_pkt->lun));
2817
2818                 /* Load SCSI command packet. */
2819                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2820                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2821
2822                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2823
2824                 /* Build IOCB segments */
2825                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2826
2827                 /* Set total data segment count. */
2828                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2829                 /* Specify response queue number where
2830                  * completion should happen.
2831                  */
2832                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2833
2834         }
2835         /* Build command packet. */
2836         req->current_outstanding_cmd = handle;
2837         req->outstanding_cmds[handle] = sp;
2838         sp->handle = handle;
2839         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2840         req->cnt -= req_cnt;
2841         wmb();
2842
2843         /* Adjust ring index. */
2844         req->ring_index++;
2845         if (req->ring_index == req->length) {
2846                 req->ring_index = 0;
2847                 req->ring_ptr = req->ring;
2848         } else
2849                 req->ring_ptr++;
2850
2851         sp->flags |= SRB_DMA_VALID;
2852
2853         /* Set chip new ring index. */
2854         /* write, read and verify logic */
2855         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2856         if (ql2xdbwr)
2857                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2858         else {
2859                 WRT_REG_DWORD(
2860                         (unsigned long __iomem *)ha->nxdb_wr_ptr,
2861                         dbval);
2862                 wmb();
2863                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2864                         WRT_REG_DWORD(
2865                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2866                                 dbval);
2867                         wmb();
2868                 }
2869         }
2870
2871         /* Manage unprocessed RIO/ZIO commands in response queue. */
2872         if (vha->flags.process_response_queue &&
2873             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2874                 qla24xx_process_response_queue(vha, rsp);
2875
2876         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2877         return QLA_SUCCESS;
2878
2879 queuing_error_fcp_cmnd:
2880         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2881 queuing_error:
2882         if (tot_dsds)
2883                 scsi_dma_unmap(cmd);
2884
2885         if (sp->ctx) {
2886                 mempool_free(sp->ctx, ha->ctx_mempool);
2887                 sp->ctx = NULL;
2888         }
2889         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2890
2891         return QLA_FUNCTION_FAILED;
2892 }
2893
2894 static uint32_t *
2895 qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
2896         uint32_t length)
2897 {
2898         uint32_t i;
2899         uint32_t val;
2900         struct qla_hw_data *ha = vha->hw;
2901
2902         /* Dword reads to flash. */
2903         for (i = 0; i < length/4; i++, faddr += 4) {
2904                 if (qla82xx_rom_fast_read(ha, faddr, &val)) {
2905                         qla_printk(KERN_WARNING, ha,
2906                             "Do ROM fast read failed\n");
2907                         goto done_read;
2908                 }
2909                 dwptr[i] = __constant_cpu_to_le32(val);
2910         }
2911 done_read:
2912         return dwptr;
2913 }
2914
2915 static int
2916 qla82xx_unprotect_flash(struct qla_hw_data *ha)
2917 {
2918         int ret;
2919         uint32_t val;
2920
2921         ret = ql82xx_rom_lock_d(ha);
2922         if (ret < 0) {
2923                 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
2924                 return ret;
2925         }
2926
2927         ret = qla82xx_read_status_reg(ha, &val);
2928         if (ret < 0)
2929                 goto done_unprotect;
2930
2931         val &= ~(BLOCK_PROTECT_BITS << 2);
2932         ret = qla82xx_write_status_reg(ha, val);
2933         if (ret < 0) {
2934                 val |= (BLOCK_PROTECT_BITS << 2);
2935                 qla82xx_write_status_reg(ha, val);
2936         }
2937
2938         if (qla82xx_write_disable_flash(ha) != 0)
2939                 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
2940
2941 done_unprotect:
2942         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
2943         return ret;
2944 }
2945
2946 static int
2947 qla82xx_protect_flash(struct qla_hw_data *ha)
2948 {
2949         int ret;
2950         uint32_t val;
2951
2952         ret = ql82xx_rom_lock_d(ha);
2953         if (ret < 0) {
2954                 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
2955                 return ret;
2956         }
2957
2958         ret = qla82xx_read_status_reg(ha, &val);
2959         if (ret < 0)
2960                 goto done_protect;
2961
2962         val |= (BLOCK_PROTECT_BITS << 2);
2963         /* LOCK all sectors */
2964         ret = qla82xx_write_status_reg(ha, val);
2965         if (ret < 0)
2966                 qla_printk(KERN_WARNING, ha, "Write status register failed\n");
2967
2968         if (qla82xx_write_disable_flash(ha) != 0)
2969                 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
2970 done_protect:
2971         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
2972         return ret;
2973 }
2974
2975 static int
2976 qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
2977 {
2978         int ret = 0;
2979
2980         ret = ql82xx_rom_lock_d(ha);
2981         if (ret < 0) {
2982                 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
2983                 return ret;
2984         }
2985
2986         qla82xx_flash_set_write_enable(ha);
2987         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
2988         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
2989         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
2990
2991         if (qla82xx_wait_rom_done(ha)) {
2992                 qla_printk(KERN_WARNING, ha,
2993                     "Error waiting for rom done\n");
2994                 ret = -1;
2995                 goto done;
2996         }
2997         ret = qla82xx_flash_wait_write_finish(ha);
2998 done:
2999         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
3000         return ret;
3001 }
3002
3003 /*
3004  * Address and length are byte address
3005  */
3006 uint8_t *
3007 qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3008         uint32_t offset, uint32_t length)
3009 {
3010         scsi_block_requests(vha->host);
3011         qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
3012         scsi_unblock_requests(vha->host);
3013         return buf;
3014 }
3015
3016 static int
3017 qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3018         uint32_t faddr, uint32_t dwords)
3019 {
3020         int ret;
3021         uint32_t liter;
3022         uint32_t sec_mask, rest_addr;
3023         dma_addr_t optrom_dma;
3024         void *optrom = NULL;
3025         int page_mode = 0;
3026         struct qla_hw_data *ha = vha->hw;
3027
3028         ret = -1;
3029
3030         /* Prepare burst-capable write on supported ISPs. */
3031         if (page_mode && !(faddr & 0xfff) &&
3032             dwords > OPTROM_BURST_DWORDS) {
3033                 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
3034                     &optrom_dma, GFP_KERNEL);
3035                 if (!optrom) {
3036                         qla_printk(KERN_DEBUG, ha,
3037                                 "Unable to allocate memory for optrom "
3038                                 "burst write (%x KB).\n",
3039                                 OPTROM_BURST_SIZE / 1024);
3040                 }
3041         }
3042
3043         rest_addr = ha->fdt_block_size - 1;
3044         sec_mask = ~rest_addr;
3045
3046         ret = qla82xx_unprotect_flash(ha);
3047         if (ret) {
3048                 qla_printk(KERN_WARNING, ha,
3049                         "Unable to unprotect flash for update.\n");
3050                 goto write_done;
3051         }
3052
3053         for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
3054                 /* Are we at the beginning of a sector? */
3055                 if ((faddr & rest_addr) == 0) {
3056
3057                         ret = qla82xx_erase_sector(ha, faddr);
3058                         if (ret) {
3059                                 DEBUG9(qla_printk(KERN_ERR, ha,
3060                                     "Unable to erase sector: "
3061                                     "address=%x.\n", faddr));
3062                                 break;
3063                         }
3064                 }
3065
3066                 /* Go with burst-write. */
3067                 if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
3068                         /* Copy data to DMA'ble buffer. */
3069                         memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
3070
3071                         ret = qla2x00_load_ram(vha, optrom_dma,
3072                             (ha->flash_data_off | faddr),
3073                             OPTROM_BURST_DWORDS);
3074                         if (ret != QLA_SUCCESS) {
3075                                 qla_printk(KERN_WARNING, ha,
3076                                     "Unable to burst-write optrom segment "
3077                                     "(%x/%x/%llx).\n", ret,
3078                                     (ha->flash_data_off | faddr),
3079                                     (unsigned long long)optrom_dma);
3080                                 qla_printk(KERN_WARNING, ha,
3081                                     "Reverting to slow-write.\n");
3082
3083                                 dma_free_coherent(&ha->pdev->dev,
3084                                     OPTROM_BURST_SIZE, optrom, optrom_dma);
3085                                 optrom = NULL;
3086                         } else {
3087                                 liter += OPTROM_BURST_DWORDS - 1;
3088                                 faddr += OPTROM_BURST_DWORDS - 1;
3089                                 dwptr += OPTROM_BURST_DWORDS - 1;
3090                                 continue;
3091                         }
3092                 }
3093
3094                 ret = qla82xx_write_flash_dword(ha, faddr,
3095                     cpu_to_le32(*dwptr));
3096                 if (ret) {
3097                         DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program"
3098                             "flash address=%x data=%x.\n", __func__,
3099                             ha->host_no, faddr, *dwptr));
3100                         break;
3101                 }
3102         }
3103
3104         ret = qla82xx_protect_flash(ha);
3105         if (ret)
3106                 qla_printk(KERN_WARNING, ha,
3107                     "Unable to protect flash after update.\n");
3108 write_done:
3109         if (optrom)
3110                 dma_free_coherent(&ha->pdev->dev,
3111                     OPTROM_BURST_SIZE, optrom, optrom_dma);
3112         return ret;
3113 }
3114
3115 int
3116 qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3117         uint32_t offset, uint32_t length)
3118 {
3119         int rval;
3120
3121         /* Suspend HBA. */
3122         scsi_block_requests(vha->host);
3123         rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
3124                 length >> 2);
3125         scsi_unblock_requests(vha->host);
3126
3127         /* Convert return ISP82xx to generic */
3128         if (rval)
3129                 rval = QLA_FUNCTION_FAILED;
3130         else
3131                 rval = QLA_SUCCESS;
3132         return rval;
3133 }
3134
3135 void
3136 qla82xx_start_iocbs(srb_t *sp)
3137 {
3138         struct qla_hw_data *ha = sp->fcport->vha->hw;
3139         struct req_que *req = ha->req_q_map[0];
3140         struct device_reg_82xx __iomem *reg;
3141         uint32_t dbval;
3142
3143         /* Adjust ring index. */
3144         req->ring_index++;
3145         if (req->ring_index == req->length) {
3146                 req->ring_index = 0;
3147                 req->ring_ptr = req->ring;
3148         } else
3149                 req->ring_ptr++;
3150
3151         reg = &ha->iobase->isp82;
3152         dbval = 0x04 | (ha->portnum << 5);
3153
3154         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3155         if (ql2xdbwr)
3156                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
3157         else {
3158                 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
3159                 wmb();
3160                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3161                         WRT_REG_DWORD((unsigned long  __iomem *)ha->nxdb_wr_ptr,
3162                                 dbval);
3163                         wmb();
3164                 }
3165         }
3166 }
3167
3168 void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
3169 {
3170         if (qla82xx_rom_lock(ha))
3171                 /* Someone else is holding the lock. */
3172                 qla_printk(KERN_INFO, ha, "Resetting rom_lock\n");
3173
3174         /*
3175          * Either we got the lock, or someone
3176          * else died while holding it.
3177          * In either case, unlock.
3178          */
3179         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
3180 }
3181
3182 /*
3183  * qla82xx_device_bootstrap
3184  *    Initialize device, set DEV_READY, start fw
3185  *
3186  * Note:
3187  *      IDC lock must be held upon entry
3188  *
3189  * Return:
3190  *    Success : 0
3191  *    Failed  : 1
3192  */
3193 static int
3194 qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3195 {
3196         int rval = QLA_SUCCESS;
3197         int i, timeout;
3198         uint32_t old_count, count;
3199         struct qla_hw_data *ha = vha->hw;
3200         int need_reset = 0, peg_stuck = 1;
3201
3202         need_reset = qla82xx_need_reset(ha);
3203
3204         old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3205
3206         for (i = 0; i < 10; i++) {
3207                 timeout = msleep_interruptible(200);
3208                 if (timeout) {
3209                         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3210                                 QLA82XX_DEV_FAILED);
3211                         return QLA_FUNCTION_FAILED;
3212                 }
3213
3214                 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3215                 if (count != old_count)
3216                         peg_stuck = 0;
3217         }
3218
3219         if (need_reset) {
3220                 /* We are trying to perform a recovery here. */
3221                 if (peg_stuck)
3222                         qla82xx_rom_lock_recovery(ha);
3223                 goto dev_initialize;
3224         } else  {
3225                 /* Start of day for this ha context. */
3226                 if (peg_stuck) {
3227                         /* Either we are the first or recovery in progress. */
3228                         qla82xx_rom_lock_recovery(ha);
3229                         goto dev_initialize;
3230                 } else
3231                         /* Firmware already running. */
3232                         goto dev_ready;
3233         }
3234
3235         return rval;
3236
3237 dev_initialize:
3238         /* set to DEV_INITIALIZING */
3239         qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
3240         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
3241
3242         /* Driver that sets device state to initializating sets IDC version */
3243         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
3244
3245         qla82xx_idc_unlock(ha);
3246         rval = qla82xx_start_firmware(vha);
3247         qla82xx_idc_lock(ha);
3248
3249         if (rval != QLA_SUCCESS) {
3250                 qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
3251                 qla82xx_clear_drv_active(ha);
3252                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
3253                 return rval;
3254         }
3255
3256 dev_ready:
3257         qla_printk(KERN_INFO, ha, "HW State: READY\n");
3258         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
3259
3260         return QLA_SUCCESS;
3261 }
3262
3263 static void
3264 qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3265 {
3266         struct qla_hw_data *ha = vha->hw;
3267
3268         /* Disable the board */
3269         qla_printk(KERN_INFO, ha, "Disabling the board\n");
3270
3271         qla82xx_idc_lock(ha);
3272         qla82xx_clear_drv_active(ha);
3273         qla82xx_idc_unlock(ha);
3274
3275         /* Set DEV_FAILED flag to disable timer */
3276         vha->device_flags |= DFLG_DEV_FAILED;
3277         qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3278         qla2x00_mark_all_devices_lost(vha, 0);
3279         vha->flags.online = 0;
3280         vha->flags.init_done = 0;
3281 }
3282
3283 /*
3284  * qla82xx_need_reset_handler
3285  *    Code to start reset sequence
3286  *
3287  * Note:
3288  *      IDC lock must be held upon entry
3289  *
3290  * Return:
3291  *    Success : 0
3292  *    Failed  : 1
3293  */
3294 static void
3295 qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3296 {
3297         uint32_t dev_state, drv_state, drv_active;
3298         unsigned long reset_timeout;
3299         struct qla_hw_data *ha = vha->hw;
3300         struct req_que *req = ha->req_q_map[0];
3301
3302         if (vha->flags.online) {
3303                 qla82xx_idc_unlock(ha);
3304                 qla2x00_abort_isp_cleanup(vha);
3305                 ha->isp_ops->get_flash_version(vha, req->ring);
3306                 ha->isp_ops->nvram_config(vha);
3307                 qla82xx_idc_lock(ha);
3308         }
3309
3310         qla82xx_set_rst_ready(ha);
3311
3312         /* wait for 10 seconds for reset ack from all functions */
3313         reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
3314
3315         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3316         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3317
3318         while (drv_state != drv_active) {
3319                 if (time_after_eq(jiffies, reset_timeout)) {
3320                         qla_printk(KERN_INFO, ha,
3321                                 "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME);
3322                         break;
3323                 }
3324                 qla82xx_idc_unlock(ha);
3325                 msleep(1000);
3326                 qla82xx_idc_lock(ha);
3327                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3328                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3329         }
3330
3331         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3332         qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
3333                 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3334
3335         /* Force to DEV_COLD unless someone else is starting a reset */
3336         if (dev_state != QLA82XX_DEV_INITIALIZING) {
3337                 qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
3338                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3339         }
3340 }
3341
3342 static void
3343 qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3344 {
3345         uint32_t fw_heartbeat_counter, halt_status;
3346         struct qla_hw_data *ha = vha->hw;
3347
3348         fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3349         /* all 0xff, assume AER/EEH in progress, ignore */
3350         if (fw_heartbeat_counter == 0xffffffff)
3351                 return;
3352         if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
3353                 vha->seconds_since_last_heartbeat++;
3354                 /* FW not alive after 2 seconds */
3355                 if (vha->seconds_since_last_heartbeat == 2) {
3356                         vha->seconds_since_last_heartbeat = 0;
3357                         halt_status = qla82xx_rd_32(ha,
3358                                 QLA82XX_PEG_HALT_STATUS1);
3359                         if (halt_status & HALT_STATUS_UNRECOVERABLE) {
3360                                 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
3361                         } else {
3362                                 qla_printk(KERN_INFO, ha,
3363                                         "scsi(%ld): %s - detect abort needed\n",
3364                                         vha->host_no, __func__);
3365                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3366                         }
3367                         qla2xxx_wake_dpc(vha);
3368                         ha->flags.fw_hung = 1;
3369                         if (ha->flags.mbox_busy) {
3370                                 ha->flags.mbox_int = 1;
3371                                 DEBUG2(qla_printk(KERN_ERR, ha,
3372                                         "Due to fw hung, doing premature "
3373                                         "completion of mbx command\n"));
3374                                 if (test_bit(MBX_INTR_WAIT,
3375                                         &ha->mbx_cmd_flags))
3376                                         complete(&ha->mbx_intr_comp);
3377                         }
3378                 }
3379         } else
3380                 vha->seconds_since_last_heartbeat = 0;
3381         vha->fw_heartbeat_counter = fw_heartbeat_counter;
3382 }
3383
3384 /*
3385  * qla82xx_device_state_handler
3386  *      Main state handler
3387  *
3388  * Note:
3389  *      IDC lock must be held upon entry
3390  *
3391  * Return:
3392  *    Success : 0
3393  *    Failed  : 1
3394  */
3395 int
3396 qla82xx_device_state_handler(scsi_qla_host_t *vha)
3397 {
3398         uint32_t dev_state;
3399         int rval = QLA_SUCCESS;
3400         unsigned long dev_init_timeout;
3401         struct qla_hw_data *ha = vha->hw;
3402
3403         qla82xx_idc_lock(ha);
3404         if (!vha->flags.init_done)
3405                 qla82xx_set_drv_active(vha);
3406
3407         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3408         qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
3409                 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3410
3411         /* wait for 30 seconds for device to go ready */
3412         dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
3413
3414         while (1) {
3415
3416                 if (time_after_eq(jiffies, dev_init_timeout)) {
3417                         DEBUG(qla_printk(KERN_INFO, ha,
3418                                 "%s: device init failed!\n",
3419                                 QLA2XXX_DRIVER_NAME));
3420                         rval = QLA_FUNCTION_FAILED;
3421                         break;
3422                 }
3423                 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3424                 qla_printk(KERN_INFO, ha,
3425                         "2:Device state is 0x%x = %s\n", dev_state,
3426                         dev_state < MAX_STATES ?
3427                         qdev_state[dev_state] : "Unknown");
3428
3429                 switch (dev_state) {
3430                 case QLA82XX_DEV_READY:
3431                         goto exit;
3432                 case QLA82XX_DEV_COLD:
3433                         rval = qla82xx_device_bootstrap(vha);
3434                         goto exit;
3435                 case QLA82XX_DEV_INITIALIZING:
3436                         qla82xx_idc_unlock(ha);
3437                         msleep(1000);
3438                         qla82xx_idc_lock(ha);
3439                         break;
3440                 case QLA82XX_DEV_NEED_RESET:
3441                         if (!ql2xdontresethba)
3442                                 qla82xx_need_reset_handler(vha);
3443                         break;
3444                 case QLA82XX_DEV_NEED_QUIESCENT:
3445                         qla82xx_set_qsnt_ready(ha);
3446                 case QLA82XX_DEV_QUIESCENT:
3447                         qla82xx_idc_unlock(ha);
3448                         msleep(1000);
3449                         qla82xx_idc_lock(ha);
3450                         break;
3451                 case QLA82XX_DEV_FAILED:
3452                         qla82xx_dev_failed_handler(vha);
3453                         rval = QLA_FUNCTION_FAILED;
3454                         goto exit;
3455                 default:
3456                         qla82xx_idc_unlock(ha);
3457                         msleep(1000);
3458                         qla82xx_idc_lock(ha);
3459                 }
3460         }
3461 exit:
3462         qla82xx_idc_unlock(ha);
3463         return rval;
3464 }
3465
3466 void qla82xx_watchdog(scsi_qla_host_t *vha)
3467 {
3468         uint32_t dev_state;
3469         struct qla_hw_data *ha = vha->hw;
3470
3471         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3472
3473         /* don't poll if reset is going on */
3474         if (!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
3475                 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
3476                 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))) {
3477                 if (dev_state == QLA82XX_DEV_NEED_RESET) {
3478                         qla_printk(KERN_WARNING, ha,
3479                                 "%s(): Adapter reset needed!\n", __func__);
3480                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3481                         qla2xxx_wake_dpc(vha);
3482                         ha->flags.fw_hung = 1;
3483                         if (ha->flags.mbox_busy) {
3484                                 ha->flags.mbox_int = 1;
3485                                 DEBUG2(qla_printk(KERN_ERR, ha,
3486                                         "Need reset, doing premature "
3487                                         "completion of mbx command\n"));
3488                                 if (test_bit(MBX_INTR_WAIT,
3489                                         &ha->mbx_cmd_flags))
3490                                         complete(&ha->mbx_intr_comp);
3491                         }
3492                 } else {
3493                         qla82xx_check_fw_alive(vha);
3494                 }
3495         }
3496 }
3497
3498 int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3499 {
3500         int rval;
3501         rval = qla82xx_device_state_handler(vha);
3502         return rval;
3503 }
3504
3505 /*
3506  *  qla82xx_abort_isp
3507  *      Resets ISP and aborts all outstanding commands.
3508  *
3509  * Input:
3510  *      ha           = adapter block pointer.
3511  *
3512  * Returns:
3513  *      0 = success
3514  */
3515 int
3516 qla82xx_abort_isp(scsi_qla_host_t *vha)
3517 {
3518         int rval;
3519         struct qla_hw_data *ha = vha->hw;
3520         uint32_t dev_state;
3521
3522         if (vha->device_flags & DFLG_DEV_FAILED) {
3523                 qla_printk(KERN_WARNING, ha,
3524                         "%s(%ld): Device in failed state, "
3525                         "Exiting.\n", __func__, vha->host_no);
3526                 return QLA_SUCCESS;
3527         }
3528
3529         qla82xx_idc_lock(ha);
3530         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3531         if (dev_state == QLA82XX_DEV_READY) {
3532                 qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
3533                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3534                         QLA82XX_DEV_NEED_RESET);
3535         } else
3536                 qla_printk(KERN_INFO, ha, "HW State: %s\n",
3537                         dev_state < MAX_STATES ?
3538                         qdev_state[dev_state] : "Unknown");
3539         qla82xx_idc_unlock(ha);
3540
3541         rval = qla82xx_device_state_handler(vha);
3542
3543         qla82xx_idc_lock(ha);
3544         qla82xx_clear_rst_ready(ha);
3545         qla82xx_idc_unlock(ha);
3546
3547         if (rval == QLA_SUCCESS) {
3548                 ha->flags.fw_hung = 0;
3549                 qla82xx_restart_isp(vha);
3550         }
3551
3552         if (rval) {
3553                 vha->flags.online = 1;
3554                 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3555                         if (ha->isp_abort_cnt == 0) {
3556                                 qla_printk(KERN_WARNING, ha,
3557                                     "ISP error recovery failed - "
3558                                     "board disabled\n");
3559                                 /*
3560                                  * The next call disables the board
3561                                  * completely.
3562                                  */
3563                                 ha->isp_ops->reset_adapter(vha);
3564                                 vha->flags.online = 0;
3565                                 clear_bit(ISP_ABORT_RETRY,
3566                                     &vha->dpc_flags);
3567                                 rval = QLA_SUCCESS;
3568                         } else { /* schedule another ISP abort */
3569                                 ha->isp_abort_cnt--;
3570                                 DEBUG(qla_printk(KERN_INFO, ha,
3571                                     "qla%ld: ISP abort - retry remaining %d\n",
3572                                     vha->host_no, ha->isp_abort_cnt));
3573                                 rval = QLA_FUNCTION_FAILED;
3574                         }
3575                 } else {
3576                         ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3577                         DEBUG(qla_printk(KERN_INFO, ha,
3578                             "(%ld): ISP error recovery - retrying (%d) "
3579                             "more times\n", vha->host_no, ha->isp_abort_cnt));
3580                         set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3581                         rval = QLA_FUNCTION_FAILED;
3582                 }
3583         }
3584         return rval;
3585 }
3586
3587 /*
3588  *  qla82xx_fcoe_ctx_reset
3589  *      Perform a quick reset and aborts all outstanding commands.
3590  *      This will only perform an FCoE context reset and avoids a full blown
3591  *      chip reset.
3592  *
3593  * Input:
3594  *      ha = adapter block pointer.
3595  *      is_reset_path = flag for identifying the reset path.
3596  *
3597  * Returns:
3598  *      0 = success
3599  */
3600 int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha)
3601 {
3602         int rval = QLA_FUNCTION_FAILED;
3603
3604         if (vha->flags.online) {
3605                 /* Abort all outstanding commands, so as to be requeued later */
3606                 qla2x00_abort_isp_cleanup(vha);
3607         }
3608
3609         /* Stop currently executing firmware.
3610          * This will destroy existing FCoE context at the F/W end.
3611          */
3612         qla2x00_try_to_stop_firmware(vha);
3613
3614         /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */
3615         rval = qla82xx_restart_isp(vha);
3616
3617         return rval;
3618 }
3619
3620 /*
3621  * qla2x00_wait_for_fcoe_ctx_reset
3622  *    Wait till the FCoE context is reset.
3623  *
3624  * Note:
3625  *    Does context switching here.
3626  *    Release SPIN_LOCK (if any) before calling this routine.
3627  *
3628  * Return:
3629  *    Success (fcoe_ctx reset is done) : 0
3630  *    Failed  (fcoe_ctx reset not completed within max loop timout ) : 1
3631  */
3632 int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
3633 {
3634         int status = QLA_FUNCTION_FAILED;
3635         unsigned long wait_reset;
3636
3637         wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
3638         while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3639             test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
3640             && time_before(jiffies, wait_reset)) {
3641
3642                 set_current_state(TASK_UNINTERRUPTIBLE);
3643                 schedule_timeout(HZ);
3644
3645                 if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) &&
3646                     !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
3647                         status = QLA_SUCCESS;
3648                         break;
3649                 }
3650         }
3651         DEBUG2(printk(KERN_INFO
3652             "%s status=%d\n", __func__, status));
3653
3654         return status;
3655 }