[SCSI] qla2xxx: Implemeted beacon on/off for ISP82XX.
[pandora-kernel.git] / drivers / scsi / qla2xxx / qla_nx.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include <linux/delay.h>
9 #include <linux/pci.h>
10 #include <linux/ratelimit.h>
11 #include <linux/vmalloc.h>
12 #include <scsi/scsi_tcq.h>
13
14 #define MASK(n)                 ((1ULL<<(n))-1)
15 #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
16         ((addr >> 25) & 0x3ff))
17 #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \
18         ((addr >> 25) & 0x3ff))
19 #define MS_WIN(addr) (addr & 0x0ffc0000)
20 #define QLA82XX_PCI_MN_2M   (0)
21 #define QLA82XX_PCI_MS_2M   (0x80000)
22 #define QLA82XX_PCI_OCM0_2M (0xc0000)
23 #define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
24 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
25 #define BLOCK_PROTECT_BITS 0x0F
26
27 /* CRB window related */
28 #define CRB_BLK(off)    ((off >> 20) & 0x3f)
29 #define CRB_SUBBLK(off) ((off >> 16) & 0xf)
30 #define CRB_WINDOW_2M   (0x130060)
31 #define QLA82XX_PCI_CAMQM_2M_END        (0x04800800UL)
32 #define CRB_HI(off)     ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
33                         ((off) & 0xf0000))
34 #define QLA82XX_PCI_CAMQM_2M_BASE       (0x000ff800UL)
35 #define CRB_INDIRECT_2M (0x1e0000UL)
36
37 #define MAX_CRB_XFORM 60
38 static unsigned long crb_addr_xform[MAX_CRB_XFORM];
39 int qla82xx_crb_table_initialized;
40
41 #define qla82xx_crb_addr_transform(name) \
42         (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
43         QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
44
45 static void qla82xx_crb_addr_transform_setup(void)
46 {
47         qla82xx_crb_addr_transform(XDMA);
48         qla82xx_crb_addr_transform(TIMR);
49         qla82xx_crb_addr_transform(SRE);
50         qla82xx_crb_addr_transform(SQN3);
51         qla82xx_crb_addr_transform(SQN2);
52         qla82xx_crb_addr_transform(SQN1);
53         qla82xx_crb_addr_transform(SQN0);
54         qla82xx_crb_addr_transform(SQS3);
55         qla82xx_crb_addr_transform(SQS2);
56         qla82xx_crb_addr_transform(SQS1);
57         qla82xx_crb_addr_transform(SQS0);
58         qla82xx_crb_addr_transform(RPMX7);
59         qla82xx_crb_addr_transform(RPMX6);
60         qla82xx_crb_addr_transform(RPMX5);
61         qla82xx_crb_addr_transform(RPMX4);
62         qla82xx_crb_addr_transform(RPMX3);
63         qla82xx_crb_addr_transform(RPMX2);
64         qla82xx_crb_addr_transform(RPMX1);
65         qla82xx_crb_addr_transform(RPMX0);
66         qla82xx_crb_addr_transform(ROMUSB);
67         qla82xx_crb_addr_transform(SN);
68         qla82xx_crb_addr_transform(QMN);
69         qla82xx_crb_addr_transform(QMS);
70         qla82xx_crb_addr_transform(PGNI);
71         qla82xx_crb_addr_transform(PGND);
72         qla82xx_crb_addr_transform(PGN3);
73         qla82xx_crb_addr_transform(PGN2);
74         qla82xx_crb_addr_transform(PGN1);
75         qla82xx_crb_addr_transform(PGN0);
76         qla82xx_crb_addr_transform(PGSI);
77         qla82xx_crb_addr_transform(PGSD);
78         qla82xx_crb_addr_transform(PGS3);
79         qla82xx_crb_addr_transform(PGS2);
80         qla82xx_crb_addr_transform(PGS1);
81         qla82xx_crb_addr_transform(PGS0);
82         qla82xx_crb_addr_transform(PS);
83         qla82xx_crb_addr_transform(PH);
84         qla82xx_crb_addr_transform(NIU);
85         qla82xx_crb_addr_transform(I2Q);
86         qla82xx_crb_addr_transform(EG);
87         qla82xx_crb_addr_transform(MN);
88         qla82xx_crb_addr_transform(MS);
89         qla82xx_crb_addr_transform(CAS2);
90         qla82xx_crb_addr_transform(CAS1);
91         qla82xx_crb_addr_transform(CAS0);
92         qla82xx_crb_addr_transform(CAM);
93         qla82xx_crb_addr_transform(C2C1);
94         qla82xx_crb_addr_transform(C2C0);
95         qla82xx_crb_addr_transform(SMB);
96         qla82xx_crb_addr_transform(OCM0);
97         /*
98          * Used only in P3 just define it for P2 also.
99          */
100         qla82xx_crb_addr_transform(I2C0);
101
102         qla82xx_crb_table_initialized = 1;
103 }
104
105 struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
106         {{{0, 0,         0,         0} } },
107         {{{1, 0x0100000, 0x0102000, 0x120000},
108         {1, 0x0110000, 0x0120000, 0x130000},
109         {1, 0x0120000, 0x0122000, 0x124000},
110         {1, 0x0130000, 0x0132000, 0x126000},
111         {1, 0x0140000, 0x0142000, 0x128000},
112         {1, 0x0150000, 0x0152000, 0x12a000},
113         {1, 0x0160000, 0x0170000, 0x110000},
114         {1, 0x0170000, 0x0172000, 0x12e000},
115         {0, 0x0000000, 0x0000000, 0x000000},
116         {0, 0x0000000, 0x0000000, 0x000000},
117         {0, 0x0000000, 0x0000000, 0x000000},
118         {0, 0x0000000, 0x0000000, 0x000000},
119         {0, 0x0000000, 0x0000000, 0x000000},
120         {0, 0x0000000, 0x0000000, 0x000000},
121         {1, 0x01e0000, 0x01e0800, 0x122000},
122         {0, 0x0000000, 0x0000000, 0x000000} } } ,
123         {{{1, 0x0200000, 0x0210000, 0x180000} } },
124         {{{0, 0,         0,         0} } },
125         {{{1, 0x0400000, 0x0401000, 0x169000} } },
126         {{{1, 0x0500000, 0x0510000, 0x140000} } },
127         {{{1, 0x0600000, 0x0610000, 0x1c0000} } },
128         {{{1, 0x0700000, 0x0704000, 0x1b8000} } },
129         {{{1, 0x0800000, 0x0802000, 0x170000},
130         {0, 0x0000000, 0x0000000, 0x000000},
131         {0, 0x0000000, 0x0000000, 0x000000},
132         {0, 0x0000000, 0x0000000, 0x000000},
133         {0, 0x0000000, 0x0000000, 0x000000},
134         {0, 0x0000000, 0x0000000, 0x000000},
135         {0, 0x0000000, 0x0000000, 0x000000},
136         {0, 0x0000000, 0x0000000, 0x000000},
137         {0, 0x0000000, 0x0000000, 0x000000},
138         {0, 0x0000000, 0x0000000, 0x000000},
139         {0, 0x0000000, 0x0000000, 0x000000},
140         {0, 0x0000000, 0x0000000, 0x000000},
141         {0, 0x0000000, 0x0000000, 0x000000},
142         {0, 0x0000000, 0x0000000, 0x000000},
143         {0, 0x0000000, 0x0000000, 0x000000},
144         {1, 0x08f0000, 0x08f2000, 0x172000} } },
145         {{{1, 0x0900000, 0x0902000, 0x174000},
146         {0, 0x0000000, 0x0000000, 0x000000},
147         {0, 0x0000000, 0x0000000, 0x000000},
148         {0, 0x0000000, 0x0000000, 0x000000},
149         {0, 0x0000000, 0x0000000, 0x000000},
150         {0, 0x0000000, 0x0000000, 0x000000},
151         {0, 0x0000000, 0x0000000, 0x000000},
152         {0, 0x0000000, 0x0000000, 0x000000},
153         {0, 0x0000000, 0x0000000, 0x000000},
154         {0, 0x0000000, 0x0000000, 0x000000},
155         {0, 0x0000000, 0x0000000, 0x000000},
156         {0, 0x0000000, 0x0000000, 0x000000},
157         {0, 0x0000000, 0x0000000, 0x000000},
158         {0, 0x0000000, 0x0000000, 0x000000},
159         {0, 0x0000000, 0x0000000, 0x000000},
160         {1, 0x09f0000, 0x09f2000, 0x176000} } },
161         {{{0, 0x0a00000, 0x0a02000, 0x178000},
162         {0, 0x0000000, 0x0000000, 0x000000},
163         {0, 0x0000000, 0x0000000, 0x000000},
164         {0, 0x0000000, 0x0000000, 0x000000},
165         {0, 0x0000000, 0x0000000, 0x000000},
166         {0, 0x0000000, 0x0000000, 0x000000},
167         {0, 0x0000000, 0x0000000, 0x000000},
168         {0, 0x0000000, 0x0000000, 0x000000},
169         {0, 0x0000000, 0x0000000, 0x000000},
170         {0, 0x0000000, 0x0000000, 0x000000},
171         {0, 0x0000000, 0x0000000, 0x000000},
172         {0, 0x0000000, 0x0000000, 0x000000},
173         {0, 0x0000000, 0x0000000, 0x000000},
174         {0, 0x0000000, 0x0000000, 0x000000},
175         {0, 0x0000000, 0x0000000, 0x000000},
176         {1, 0x0af0000, 0x0af2000, 0x17a000} } },
177         {{{0, 0x0b00000, 0x0b02000, 0x17c000},
178         {0, 0x0000000, 0x0000000, 0x000000},
179         {0, 0x0000000, 0x0000000, 0x000000},
180         {0, 0x0000000, 0x0000000, 0x000000},
181         {0, 0x0000000, 0x0000000, 0x000000},
182         {0, 0x0000000, 0x0000000, 0x000000},
183         {0, 0x0000000, 0x0000000, 0x000000},
184         {0, 0x0000000, 0x0000000, 0x000000},
185         {0, 0x0000000, 0x0000000, 0x000000},
186         {0, 0x0000000, 0x0000000, 0x000000},
187         {0, 0x0000000, 0x0000000, 0x000000},
188         {0, 0x0000000, 0x0000000, 0x000000},
189         {0, 0x0000000, 0x0000000, 0x000000},
190         {0, 0x0000000, 0x0000000, 0x000000},
191         {0, 0x0000000, 0x0000000, 0x000000},
192         {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
193         {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },
194         {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },
195         {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },
196         {{{1, 0x0f00000, 0x0f01000, 0x164000} } },
197         {{{0, 0x1000000, 0x1004000, 0x1a8000} } },
198         {{{1, 0x1100000, 0x1101000, 0x160000} } },
199         {{{1, 0x1200000, 0x1201000, 0x161000} } },
200         {{{1, 0x1300000, 0x1301000, 0x162000} } },
201         {{{1, 0x1400000, 0x1401000, 0x163000} } },
202         {{{1, 0x1500000, 0x1501000, 0x165000} } },
203         {{{1, 0x1600000, 0x1601000, 0x166000} } },
204         {{{0, 0,         0,         0} } },
205         {{{0, 0,         0,         0} } },
206         {{{0, 0,         0,         0} } },
207         {{{0, 0,         0,         0} } },
208         {{{0, 0,         0,         0} } },
209         {{{0, 0,         0,         0} } },
210         {{{1, 0x1d00000, 0x1d10000, 0x190000} } },
211         {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },
212         {{{1, 0x1f00000, 0x1f10000, 0x150000} } },
213         {{{0} } },
214         {{{1, 0x2100000, 0x2102000, 0x120000},
215         {1, 0x2110000, 0x2120000, 0x130000},
216         {1, 0x2120000, 0x2122000, 0x124000},
217         {1, 0x2130000, 0x2132000, 0x126000},
218         {1, 0x2140000, 0x2142000, 0x128000},
219         {1, 0x2150000, 0x2152000, 0x12a000},
220         {1, 0x2160000, 0x2170000, 0x110000},
221         {1, 0x2170000, 0x2172000, 0x12e000},
222         {0, 0x0000000, 0x0000000, 0x000000},
223         {0, 0x0000000, 0x0000000, 0x000000},
224         {0, 0x0000000, 0x0000000, 0x000000},
225         {0, 0x0000000, 0x0000000, 0x000000},
226         {0, 0x0000000, 0x0000000, 0x000000},
227         {0, 0x0000000, 0x0000000, 0x000000},
228         {0, 0x0000000, 0x0000000, 0x000000},
229         {0, 0x0000000, 0x0000000, 0x000000} } },
230         {{{1, 0x2200000, 0x2204000, 0x1b0000} } },
231         {{{0} } },
232         {{{0} } },
233         {{{0} } },
234         {{{0} } },
235         {{{0} } },
236         {{{1, 0x2800000, 0x2804000, 0x1a4000} } },
237         {{{1, 0x2900000, 0x2901000, 0x16b000} } },
238         {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },
239         {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },
240         {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },
241         {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },
242         {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },
243         {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },
244         {{{1, 0x3000000, 0x3000400, 0x1adc00} } },
245         {{{0, 0x3100000, 0x3104000, 0x1a8000} } },
246         {{{1, 0x3200000, 0x3204000, 0x1d4000} } },
247         {{{1, 0x3300000, 0x3304000, 0x1a0000} } },
248         {{{0} } },
249         {{{1, 0x3500000, 0x3500400, 0x1ac000} } },
250         {{{1, 0x3600000, 0x3600400, 0x1ae000} } },
251         {{{1, 0x3700000, 0x3700400, 0x1ae400} } },
252         {{{1, 0x3800000, 0x3804000, 0x1d0000} } },
253         {{{1, 0x3900000, 0x3904000, 0x1b4000} } },
254         {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },
255         {{{0} } },
256         {{{0} } },
257         {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },
258         {{{1, 0x3e00000, 0x3e01000, 0x167000} } },
259         {{{1, 0x3f00000, 0x3f01000, 0x168000} } }
260 };
261
262 /*
263  * top 12 bits of crb internal address (hub, agent)
264  */
265 unsigned qla82xx_crb_hub_agt[64] = {
266         0,
267         QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
268         QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
269         QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
270         0,
271         QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
272         QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
273         QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
274         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
275         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
276         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
277         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
278         QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
279         QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
280         QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
281         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
282         QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
283         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
284         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
285         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
286         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
287         QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
288         QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
289         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
290         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
291         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
292         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
293         0,
294         QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
295         QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
296         0,
297         QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
298         0,
299         QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
300         QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
301         0,
302         0,
303         0,
304         0,
305         0,
306         QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
307         0,
308         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
309         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
310         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
311         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
312         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
313         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
314         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
315         QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
316         QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
317         QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
318         0,
319         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
320         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
321         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
322         QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
323         0,
324         QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
325         QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
326         QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
327         0,
328         QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
329         0,
330 };
331
332 /* Device states */
333 char *q_dev_state[] = {
334          "Unknown",
335         "Cold",
336         "Initializing",
337         "Ready",
338         "Need Reset",
339         "Need Quiescent",
340         "Failed",
341         "Quiescent",
342 };
343
344 char *qdev_state(uint32_t dev_state)
345 {
346         return q_dev_state[dev_state];
347 }
348
349 /*
350  * In: 'off' is offset from CRB space in 128M pci map
351  * Out: 'off' is 2M pci map addr
352  * side effect: lock crb window
353  */
354 static void
355 qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
356 {
357         u32 win_read;
358         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
359
360         ha->crb_win = CRB_HI(*off);
361         writel(ha->crb_win,
362                 (void *)(CRB_WINDOW_2M + ha->nx_pcibase));
363
364         /* Read back value to make sure write has gone through before trying
365          * to use it.
366          */
367         win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
368         if (win_read != ha->crb_win) {
369                 ql_dbg(ql_dbg_p3p, vha, 0xb000,
370                     "%s: Written crbwin (0x%x) "
371                     "!= Read crbwin (0x%x), off=0x%lx.\n",
372                     ha->crb_win, win_read, *off);
373         }
374         *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
375 }
376
377 static inline unsigned long
378 qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
379 {
380         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
381         /* See if we are currently pointing to the region we want to use next */
382         if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
383                 /* No need to change window. PCIX and PCIEregs are in both
384                  * regs are in both windows.
385                  */
386                 return off;
387         }
388
389         if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
390                 /* We are in first CRB window */
391                 if (ha->curr_window != 0)
392                         WARN_ON(1);
393                 return off;
394         }
395
396         if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
397                 /* We are in second CRB window */
398                 off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
399
400                 if (ha->curr_window != 1)
401                         return off;
402
403                 /* We are in the QM or direct access
404                  * register region - do nothing
405                  */
406                 if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
407                         (off < QLA82XX_PCI_CAMQM_MAX))
408                         return off;
409         }
410         /* strange address given */
411         ql_dbg(ql_dbg_p3p, vha, 0xb001,
412             "%x: Warning: unm_nic_pci_set_crbwindow "
413             "called with an unknown address(%llx).\n",
414             QLA2XXX_DRIVER_NAME, off);
415         return off;
416 }
417
418 static int
419 qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
420 {
421         struct crb_128M_2M_sub_block_map *m;
422
423         if (*off >= QLA82XX_CRB_MAX)
424                 return -1;
425
426         if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
427                 *off = (*off - QLA82XX_PCI_CAMQM) +
428                     QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
429                 return 0;
430         }
431
432         if (*off < QLA82XX_PCI_CRBSPACE)
433                 return -1;
434
435         *off -= QLA82XX_PCI_CRBSPACE;
436
437         /* Try direct map */
438         m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
439
440         if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
441                 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
442                 return 0;
443         }
444         /* Not in direct map, use crb window */
445         return 1;
446 }
447
448 #define CRB_WIN_LOCK_TIMEOUT 100000000
449 static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
450 {
451         int done = 0, timeout = 0;
452
453         while (!done) {
454                 /* acquire semaphore3 from PCI HW block */
455                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
456                 if (done == 1)
457                         break;
458                 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
459                         return -1;
460                 timeout++;
461         }
462         qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
463         return 0;
464 }
465
466 int
467 qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
468 {
469         unsigned long flags = 0;
470         int rv;
471
472         rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
473
474         BUG_ON(rv == -1);
475
476         if (rv == 1) {
477                 write_lock_irqsave(&ha->hw_lock, flags);
478                 qla82xx_crb_win_lock(ha);
479                 qla82xx_pci_set_crbwindow_2M(ha, &off);
480         }
481
482         writel(data, (void __iomem *)off);
483
484         if (rv == 1) {
485                 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
486                 write_unlock_irqrestore(&ha->hw_lock, flags);
487         }
488         return 0;
489 }
490
491 int
492 qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
493 {
494         unsigned long flags = 0;
495         int rv;
496         u32 data;
497
498         rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
499
500         BUG_ON(rv == -1);
501
502         if (rv == 1) {
503                 write_lock_irqsave(&ha->hw_lock, flags);
504                 qla82xx_crb_win_lock(ha);
505                 qla82xx_pci_set_crbwindow_2M(ha, &off);
506         }
507         data = RD_REG_DWORD((void __iomem *)off);
508
509         if (rv == 1) {
510                 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
511                 write_unlock_irqrestore(&ha->hw_lock, flags);
512         }
513         return data;
514 }
515
516 #define IDC_LOCK_TIMEOUT 100000000
517 int qla82xx_idc_lock(struct qla_hw_data *ha)
518 {
519         int i;
520         int done = 0, timeout = 0;
521
522         while (!done) {
523                 /* acquire semaphore5 from PCI HW block */
524                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
525                 if (done == 1)
526                         break;
527                 if (timeout >= IDC_LOCK_TIMEOUT)
528                         return -1;
529
530                 timeout++;
531
532                 /* Yield CPU */
533                 if (!in_interrupt())
534                         schedule();
535                 else {
536                         for (i = 0; i < 20; i++)
537                                 cpu_relax();
538                 }
539         }
540
541         return 0;
542 }
543
544 void qla82xx_idc_unlock(struct qla_hw_data *ha)
545 {
546         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
547 }
548
549 /*  PCI Windowing for DDR regions.  */
550 #define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
551         (((addr) <= (high)) && ((addr) >= (low)))
552 /*
553  * check memory access boundary.
554  * used by test agent. support ddr access only for now
555  */
556 static unsigned long
557 qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
558         unsigned long long addr, int size)
559 {
560         if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
561                 QLA82XX_ADDR_DDR_NET_MAX) ||
562                 !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET,
563                 QLA82XX_ADDR_DDR_NET_MAX) ||
564                 ((size != 1) && (size != 2) && (size != 4) && (size != 8)))
565                         return 0;
566         else
567                 return 1;
568 }
569
570 int qla82xx_pci_set_window_warning_count;
571
572 static unsigned long
573 qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
574 {
575         int window;
576         u32 win_read;
577         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
578
579         if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
580                 QLA82XX_ADDR_DDR_NET_MAX)) {
581                 /* DDR network side */
582                 window = MN_WIN(addr);
583                 ha->ddr_mn_window = window;
584                 qla82xx_wr_32(ha,
585                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
586                 win_read = qla82xx_rd_32(ha,
587                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
588                 if ((win_read << 17) != window) {
589                         ql_dbg(ql_dbg_p3p, vha, 0xb003,
590                             "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n",
591                             __func__, window, win_read);
592                 }
593                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
594         } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
595                 QLA82XX_ADDR_OCM0_MAX)) {
596                 unsigned int temp1;
597                 if ((addr & 0x00ff800) == 0xff800) {
598                         ql_log(ql_log_warn, vha, 0xb004,
599                             "%s: QM access not handled.\n", __func__);
600                         addr = -1UL;
601                 }
602                 window = OCM_WIN(addr);
603                 ha->ddr_mn_window = window;
604                 qla82xx_wr_32(ha,
605                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
606                 win_read = qla82xx_rd_32(ha,
607                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
608                 temp1 = ((window & 0x1FF) << 7) |
609                     ((window & 0x0FFFE0000) >> 17);
610                 if (win_read != temp1) {
611                         ql_log(ql_log_warn, vha, 0xb005,
612                             "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n",
613                             __func__, temp1, win_read);
614                 }
615                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
616
617         } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
618                 QLA82XX_P3_ADDR_QDR_NET_MAX)) {
619                 /* QDR network side */
620                 window = MS_WIN(addr);
621                 ha->qdr_sn_window = window;
622                 qla82xx_wr_32(ha,
623                         ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window);
624                 win_read = qla82xx_rd_32(ha,
625                         ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
626                 if (win_read != window) {
627                         ql_log(ql_log_warn, vha, 0xb006,
628                             "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n",
629                             __func__, window, win_read);
630                 }
631                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
632         } else {
633                 /*
634                  * peg gdb frequently accesses memory that doesn't exist,
635                  * this limits the chit chat so debugging isn't slowed down.
636                  */
637                 if ((qla82xx_pci_set_window_warning_count++ < 8) ||
638                     (qla82xx_pci_set_window_warning_count%64 == 0)) {
639                         ql_log(ql_log_warn, vha, 0xb007,
640                             "%s: Warning:%s Unknown address range!.\n",
641                             __func__, QLA2XXX_DRIVER_NAME);
642                 }
643                 addr = -1UL;
644         }
645         return addr;
646 }
647
648 /* check if address is in the same windows as the previous access */
649 static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
650         unsigned long long addr)
651 {
652         int                     window;
653         unsigned long long      qdr_max;
654
655         qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
656
657         /* DDR network side */
658         if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
659                 QLA82XX_ADDR_DDR_NET_MAX))
660                 BUG();
661         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
662                 QLA82XX_ADDR_OCM0_MAX))
663                 return 1;
664         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
665                 QLA82XX_ADDR_OCM1_MAX))
666                 return 1;
667         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
668                 /* QDR network side */
669                 window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
670                 if (ha->qdr_sn_window == window)
671                         return 1;
672         }
673         return 0;
674 }
675
676 static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
677         u64 off, void *data, int size)
678 {
679         unsigned long   flags;
680         void           *addr = NULL;
681         int             ret = 0;
682         u64             start;
683         uint8_t         *mem_ptr = NULL;
684         unsigned long   mem_base;
685         unsigned long   mem_page;
686         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
687
688         write_lock_irqsave(&ha->hw_lock, flags);
689
690         /*
691          * If attempting to access unknown address or straddle hw windows,
692          * do not access.
693          */
694         start = qla82xx_pci_set_window(ha, off);
695         if ((start == -1UL) ||
696                 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
697                 write_unlock_irqrestore(&ha->hw_lock, flags);
698                 ql_log(ql_log_fatal, vha, 0xb008,
699                     "%s out of bound pci memory "
700                     "access, offset is 0x%llx.\n",
701                     QLA2XXX_DRIVER_NAME, off);
702                 return -1;
703         }
704
705         write_unlock_irqrestore(&ha->hw_lock, flags);
706         mem_base = pci_resource_start(ha->pdev, 0);
707         mem_page = start & PAGE_MASK;
708         /* Map two pages whenever user tries to access addresses in two
709         * consecutive pages.
710         */
711         if (mem_page != ((start + size - 1) & PAGE_MASK))
712                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
713         else
714                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
715         if (mem_ptr == 0UL) {
716                 *(u8  *)data = 0;
717                 return -1;
718         }
719         addr = mem_ptr;
720         addr += start & (PAGE_SIZE - 1);
721         write_lock_irqsave(&ha->hw_lock, flags);
722
723         switch (size) {
724         case 1:
725                 *(u8  *)data = readb(addr);
726                 break;
727         case 2:
728                 *(u16 *)data = readw(addr);
729                 break;
730         case 4:
731                 *(u32 *)data = readl(addr);
732                 break;
733         case 8:
734                 *(u64 *)data = readq(addr);
735                 break;
736         default:
737                 ret = -1;
738                 break;
739         }
740         write_unlock_irqrestore(&ha->hw_lock, flags);
741
742         if (mem_ptr)
743                 iounmap(mem_ptr);
744         return ret;
745 }
746
747 static int
748 qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
749         u64 off, void *data, int size)
750 {
751         unsigned long   flags;
752         void           *addr = NULL;
753         int             ret = 0;
754         u64             start;
755         uint8_t         *mem_ptr = NULL;
756         unsigned long   mem_base;
757         unsigned long   mem_page;
758         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
759
760         write_lock_irqsave(&ha->hw_lock, flags);
761
762         /*
763          * If attempting to access unknown address or straddle hw windows,
764          * do not access.
765          */
766         start = qla82xx_pci_set_window(ha, off);
767         if ((start == -1UL) ||
768                 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
769                 write_unlock_irqrestore(&ha->hw_lock, flags);
770                 ql_log(ql_log_fatal, vha, 0xb009,
771                     "%s out of bount memory "
772                     "access, offset is 0x%llx.\n",
773                     QLA2XXX_DRIVER_NAME, off);
774                 return -1;
775         }
776
777         write_unlock_irqrestore(&ha->hw_lock, flags);
778         mem_base = pci_resource_start(ha->pdev, 0);
779         mem_page = start & PAGE_MASK;
780         /* Map two pages whenever user tries to access addresses in two
781          * consecutive pages.
782          */
783         if (mem_page != ((start + size - 1) & PAGE_MASK))
784                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
785         else
786                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
787         if (mem_ptr == 0UL)
788                 return -1;
789
790         addr = mem_ptr;
791         addr += start & (PAGE_SIZE - 1);
792         write_lock_irqsave(&ha->hw_lock, flags);
793
794         switch (size) {
795         case 1:
796                 writeb(*(u8  *)data, addr);
797                 break;
798         case 2:
799                 writew(*(u16 *)data, addr);
800                 break;
801         case 4:
802                 writel(*(u32 *)data, addr);
803                 break;
804         case 8:
805                 writeq(*(u64 *)data, addr);
806                 break;
807         default:
808                 ret = -1;
809                 break;
810         }
811         write_unlock_irqrestore(&ha->hw_lock, flags);
812         if (mem_ptr)
813                 iounmap(mem_ptr);
814         return ret;
815 }
816
817 #define MTU_FUDGE_FACTOR 100
818 static unsigned long
819 qla82xx_decode_crb_addr(unsigned long addr)
820 {
821         int i;
822         unsigned long base_addr, offset, pci_base;
823
824         if (!qla82xx_crb_table_initialized)
825                 qla82xx_crb_addr_transform_setup();
826
827         pci_base = ADDR_ERROR;
828         base_addr = addr & 0xfff00000;
829         offset = addr & 0x000fffff;
830
831         for (i = 0; i < MAX_CRB_XFORM; i++) {
832                 if (crb_addr_xform[i] == base_addr) {
833                         pci_base = i << 20;
834                         break;
835                 }
836         }
837         if (pci_base == ADDR_ERROR)
838                 return pci_base;
839         return pci_base + offset;
840 }
841
842 static long rom_max_timeout = 100;
843 static long qla82xx_rom_lock_timeout = 100;
844
845 static int
846 qla82xx_rom_lock(struct qla_hw_data *ha)
847 {
848         int done = 0, timeout = 0;
849
850         while (!done) {
851                 /* acquire semaphore2 from PCI HW block */
852                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
853                 if (done == 1)
854                         break;
855                 if (timeout >= qla82xx_rom_lock_timeout)
856                         return -1;
857                 timeout++;
858         }
859         qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
860         return 0;
861 }
862
863 static void
864 qla82xx_rom_unlock(struct qla_hw_data *ha)
865 {
866         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
867 }
868
869 static int
870 qla82xx_wait_rom_busy(struct qla_hw_data *ha)
871 {
872         long timeout = 0;
873         long done = 0 ;
874         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
875
876         while (done == 0) {
877                 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
878                 done &= 4;
879                 timeout++;
880                 if (timeout >= rom_max_timeout) {
881                         ql_dbg(ql_dbg_p3p, vha, 0xb00a,
882                             "%s: Timeout reached waiting for rom busy.\n",
883                             QLA2XXX_DRIVER_NAME);
884                         return -1;
885                 }
886         }
887         return 0;
888 }
889
890 static int
891 qla82xx_wait_rom_done(struct qla_hw_data *ha)
892 {
893         long timeout = 0;
894         long done = 0 ;
895         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
896
897         while (done == 0) {
898                 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
899                 done &= 2;
900                 timeout++;
901                 if (timeout >= rom_max_timeout) {
902                         ql_dbg(ql_dbg_p3p, vha, 0xb00b,
903                             "%s: Timeout reached waiting for rom done.\n",
904                             QLA2XXX_DRIVER_NAME);
905                         return -1;
906                 }
907         }
908         return 0;
909 }
910
911 static int
912 qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
913 {
914         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
915
916         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
917         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
918         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
919         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
920         qla82xx_wait_rom_busy(ha);
921         if (qla82xx_wait_rom_done(ha)) {
922                 ql_log(ql_log_fatal, vha, 0x00ba,
923                     "Error waiting for rom done.\n");
924                 return -1;
925         }
926         /* Reset abyte_cnt and dummy_byte_cnt */
927         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
928         udelay(10);
929         cond_resched();
930         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
931         *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
932         return 0;
933 }
934
935 static int
936 qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
937 {
938         int ret, loops = 0;
939         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
940
941         while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
942                 udelay(100);
943                 schedule();
944                 loops++;
945         }
946         if (loops >= 50000) {
947                 ql_log(ql_log_fatal, vha, 0x00b9,
948                     "Failed to aquire SEM2 lock.\n");
949                 return -1;
950         }
951         ret = qla82xx_do_rom_fast_read(ha, addr, valp);
952         qla82xx_rom_unlock(ha);
953         return ret;
954 }
955
956 static int
957 qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
958 {
959         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
960         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
961         qla82xx_wait_rom_busy(ha);
962         if (qla82xx_wait_rom_done(ha)) {
963                 ql_log(ql_log_warn, vha, 0xb00c,
964                     "Error waiting for rom done.\n");
965                 return -1;
966         }
967         *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
968         return 0;
969 }
970
971 static int
972 qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
973 {
974         long timeout = 0;
975         uint32_t done = 1 ;
976         uint32_t val;
977         int ret = 0;
978         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
979
980         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
981         while ((done != 0) && (ret == 0)) {
982                 ret = qla82xx_read_status_reg(ha, &val);
983                 done = val & 1;
984                 timeout++;
985                 udelay(10);
986                 cond_resched();
987                 if (timeout >= 50000) {
988                         ql_log(ql_log_warn, vha, 0xb00d,
989                             "Timeout reached waiting for write finish.\n");
990                         return -1;
991                 }
992         }
993         return ret;
994 }
995
996 static int
997 qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
998 {
999         uint32_t val;
1000         qla82xx_wait_rom_busy(ha);
1001         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
1002         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
1003         qla82xx_wait_rom_busy(ha);
1004         if (qla82xx_wait_rom_done(ha))
1005                 return -1;
1006         if (qla82xx_read_status_reg(ha, &val) != 0)
1007                 return -1;
1008         if ((val & 2) != 2)
1009                 return -1;
1010         return 0;
1011 }
1012
1013 static int
1014 qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
1015 {
1016         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1017         if (qla82xx_flash_set_write_enable(ha))
1018                 return -1;
1019         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
1020         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
1021         if (qla82xx_wait_rom_done(ha)) {
1022                 ql_log(ql_log_warn, vha, 0xb00e,
1023                     "Error waiting for rom done.\n");
1024                 return -1;
1025         }
1026         return qla82xx_flash_wait_write_finish(ha);
1027 }
1028
1029 static int
1030 qla82xx_write_disable_flash(struct qla_hw_data *ha)
1031 {
1032         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1033         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
1034         if (qla82xx_wait_rom_done(ha)) {
1035                 ql_log(ql_log_warn, vha, 0xb00f,
1036                     "Error waiting for rom done.\n");
1037                 return -1;
1038         }
1039         return 0;
1040 }
1041
1042 static int
1043 ql82xx_rom_lock_d(struct qla_hw_data *ha)
1044 {
1045         int loops = 0;
1046         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1047
1048         while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
1049                 udelay(100);
1050                 cond_resched();
1051                 loops++;
1052         }
1053         if (loops >= 50000) {
1054                 ql_log(ql_log_warn, vha, 0xb010,
1055                     "ROM lock failed.\n");
1056                 return -1;
1057         }
1058         return 0;;
1059 }
1060
1061 static int
1062 qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1063         uint32_t data)
1064 {
1065         int ret = 0;
1066         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1067
1068         ret = ql82xx_rom_lock_d(ha);
1069         if (ret < 0) {
1070                 ql_log(ql_log_warn, vha, 0xb011,
1071                     "ROM lock failed.\n");
1072                 return ret;
1073         }
1074
1075         if (qla82xx_flash_set_write_enable(ha))
1076                 goto done_write;
1077
1078         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
1079         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr);
1080         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
1081         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
1082         qla82xx_wait_rom_busy(ha);
1083         if (qla82xx_wait_rom_done(ha)) {
1084                 ql_log(ql_log_warn, vha, 0xb012,
1085                     "Error waiting for rom done.\n");
1086                 ret = -1;
1087                 goto done_write;
1088         }
1089
1090         ret = qla82xx_flash_wait_write_finish(ha);
1091
1092 done_write:
1093         qla82xx_rom_unlock(ha);
1094         return ret;
1095 }
1096
1097 /* This routine does CRB initialize sequence
1098  *  to put the ISP into operational state
1099  */
1100 static int
1101 qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1102 {
1103         int addr, val;
1104         int i ;
1105         struct crb_addr_pair *buf;
1106         unsigned long off;
1107         unsigned offset, n;
1108         struct qla_hw_data *ha = vha->hw;
1109
1110         struct crb_addr_pair {
1111                 long addr;
1112                 long data;
1113         };
1114
1115         /* Halt all the indiviual PEGs and other blocks of the ISP */
1116         qla82xx_rom_lock(ha);
1117
1118         /* disable all I2Q */
1119         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
1120         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
1121         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
1122         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
1123         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
1124         qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
1125
1126         /* disable all niu interrupts */
1127         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
1128         /* disable xge rx/tx */
1129         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
1130         /* disable xg1 rx/tx */
1131         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
1132         /* disable sideband mac */
1133         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
1134         /* disable ap0 mac */
1135         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
1136         /* disable ap1 mac */
1137         qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
1138
1139         /* halt sre */
1140         val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
1141         qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
1142
1143         /* halt epg */
1144         qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
1145
1146         /* halt timers */
1147         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
1148         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
1149         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
1150         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
1151         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1152         qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
1153
1154         /* halt pegs */
1155         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
1156         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
1157         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
1158         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
1159         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1160         msleep(20);
1161
1162         /* big hammer */
1163         if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
1164                 /* don't reset CAM block on reset */
1165                 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
1166         else
1167                 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
1168
1169         /* reset ms */
1170         val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1171         val |= (1 << 1);
1172         qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1173         msleep(20);
1174
1175         /* unreset ms */
1176         val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1177         val &= ~(1 << 1);
1178         qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1179         msleep(20);
1180
1181         qla82xx_rom_unlock(ha);
1182
1183         /* Read the signature value from the flash.
1184          * Offset 0: Contain signature (0xcafecafe)
1185          * Offset 4: Offset and number of addr/value pairs
1186          * that present in CRB initialize sequence
1187          */
1188         if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1189             qla82xx_rom_fast_read(ha, 4, &n) != 0) {
1190                 ql_log(ql_log_fatal, vha, 0x006e,
1191                     "Error Reading crb_init area: n: %08x.\n", n);
1192                 return -1;
1193         }
1194
1195         /* Offset in flash = lower 16 bits
1196          * Number of enteries = upper 16 bits
1197          */
1198         offset = n & 0xffffU;
1199         n = (n >> 16) & 0xffffU;
1200
1201         /* number of addr/value pair should not exceed 1024 enteries */
1202         if (n  >= 1024) {
1203                 ql_log(ql_log_fatal, vha, 0x0071,
1204                     "Card flash not initialized:n=0x%x.\n", n);
1205                 return -1;
1206         }
1207
1208         ql_log(ql_log_info, vha, 0x0072,
1209             "%d CRB init values found in ROM.\n", n);
1210
1211         buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
1212         if (buf == NULL) {
1213                 ql_log(ql_log_fatal, vha, 0x010c,
1214                     "Unable to allocate memory.\n");
1215                 return -1;
1216         }
1217
1218         for (i = 0; i < n; i++) {
1219                 if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
1220                     qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) {
1221                         kfree(buf);
1222                         return -1;
1223                 }
1224
1225                 buf[i].addr = addr;
1226                 buf[i].data = val;
1227         }
1228
1229         for (i = 0; i < n; i++) {
1230                 /* Translate internal CRB initialization
1231                  * address to PCI bus address
1232                  */
1233                 off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) +
1234                     QLA82XX_PCI_CRBSPACE;
1235                 /* Not all CRB  addr/value pair to be written,
1236                  * some of them are skipped
1237                  */
1238
1239                 /* skipping cold reboot MAGIC */
1240                 if (off == QLA82XX_CAM_RAM(0x1fc))
1241                         continue;
1242
1243                 /* do not reset PCI */
1244                 if (off == (ROMUSB_GLB + 0xbc))
1245                         continue;
1246
1247                 /* skip core clock, so that firmware can increase the clock */
1248                 if (off == (ROMUSB_GLB + 0xc8))
1249                         continue;
1250
1251                 /* skip the function enable register */
1252                 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
1253                         continue;
1254
1255                 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
1256                         continue;
1257
1258                 if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
1259                         continue;
1260
1261                 if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
1262                         continue;
1263
1264                 if (off == ADDR_ERROR) {
1265                         ql_log(ql_log_fatal, vha, 0x0116,
1266                             "Unknow addr: 0x%08lx.\n", buf[i].addr);
1267                         continue;
1268                 }
1269
1270                 qla82xx_wr_32(ha, off, buf[i].data);
1271
1272                 /* ISP requires much bigger delay to settle down,
1273                  * else crb_window returns 0xffffffff
1274                  */
1275                 if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
1276                         msleep(1000);
1277
1278                 /* ISP requires millisec delay between
1279                  * successive CRB register updation
1280                  */
1281                 msleep(1);
1282         }
1283
1284         kfree(buf);
1285
1286         /* Resetting the data and instruction cache */
1287         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
1288         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
1289         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
1290
1291         /* Clear all protocol processing engines */
1292         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
1293         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
1294         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
1295         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
1296         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
1297         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
1298         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
1299         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
1300         return 0;
1301 }
1302
1303 static int
1304 qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1305                 u64 off, void *data, int size)
1306 {
1307         int i, j, ret = 0, loop, sz[2], off0;
1308         int scale, shift_amount, startword;
1309         uint32_t temp;
1310         uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1311
1312         /*
1313          * If not MN, go check for MS or invalid.
1314          */
1315         if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1316                 mem_crb = QLA82XX_CRB_QDR_NET;
1317         else {
1318                 mem_crb = QLA82XX_CRB_DDR_NET;
1319                 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1320                         return qla82xx_pci_mem_write_direct(ha,
1321                             off, data, size);
1322         }
1323
1324         off0 = off & 0x7;
1325         sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1326         sz[1] = size - sz[0];
1327
1328         off8 = off & 0xfffffff0;
1329         loop = (((off & 0xf) + size - 1) >> 4) + 1;
1330         shift_amount = 4;
1331         scale = 2;
1332         startword = (off & 0xf)/8;
1333
1334         for (i = 0; i < loop; i++) {
1335                 if (qla82xx_pci_mem_read_2M(ha, off8 +
1336                     (i << shift_amount), &word[i * scale], 8))
1337                         return -1;
1338         }
1339
1340         switch (size) {
1341         case 1:
1342                 tmpw = *((uint8_t *)data);
1343                 break;
1344         case 2:
1345                 tmpw = *((uint16_t *)data);
1346                 break;
1347         case 4:
1348                 tmpw = *((uint32_t *)data);
1349                 break;
1350         case 8:
1351         default:
1352                 tmpw = *((uint64_t *)data);
1353                 break;
1354         }
1355
1356         if (sz[0] == 8) {
1357                 word[startword] = tmpw;
1358         } else {
1359                 word[startword] &=
1360                         ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1361                 word[startword] |= tmpw << (off0 * 8);
1362         }
1363         if (sz[1] != 0) {
1364                 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1365                 word[startword+1] |= tmpw >> (sz[0] * 8);
1366         }
1367
1368         for (i = 0; i < loop; i++) {
1369                 temp = off8 + (i << shift_amount);
1370                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1371                 temp = 0;
1372                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1373                 temp = word[i * scale] & 0xffffffff;
1374                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1375                 temp = (word[i * scale] >> 32) & 0xffffffff;
1376                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1377                 temp = word[i*scale + 1] & 0xffffffff;
1378                 qla82xx_wr_32(ha, mem_crb +
1379                     MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
1380                 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1381                 qla82xx_wr_32(ha, mem_crb +
1382                     MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
1383
1384                 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1385                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1386                 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1387                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1388
1389                 for (j = 0; j < MAX_CTL_CHECK; j++) {
1390                         temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1391                         if ((temp & MIU_TA_CTL_BUSY) == 0)
1392                                 break;
1393                 }
1394
1395                 if (j >= MAX_CTL_CHECK) {
1396                         if (printk_ratelimit())
1397                                 dev_err(&ha->pdev->dev,
1398                                     "failed to write through agent.\n");
1399                         ret = -1;
1400                         break;
1401                 }
1402         }
1403
1404         return ret;
1405 }
1406
1407 static int
1408 qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
1409 {
1410         int  i;
1411         long size = 0;
1412         long flashaddr = ha->flt_region_bootload << 2;
1413         long memaddr = BOOTLD_START;
1414         u64 data;
1415         u32 high, low;
1416         size = (IMAGE_START - BOOTLD_START) / 8;
1417
1418         for (i = 0; i < size; i++) {
1419                 if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
1420                     (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) {
1421                         return -1;
1422                 }
1423                 data = ((u64)high << 32) | low ;
1424                 qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
1425                 flashaddr += 8;
1426                 memaddr += 8;
1427
1428                 if (i % 0x1000 == 0)
1429                         msleep(1);
1430         }
1431         udelay(100);
1432         read_lock(&ha->hw_lock);
1433         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1434         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1435         read_unlock(&ha->hw_lock);
1436         return 0;
1437 }
1438
1439 int
1440 qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1441                 u64 off, void *data, int size)
1442 {
1443         int i, j = 0, k, start, end, loop, sz[2], off0[2];
1444         int           shift_amount;
1445         uint32_t      temp;
1446         uint64_t      off8, val, mem_crb, word[2] = {0, 0};
1447
1448         /*
1449          * If not MN, go check for MS or invalid.
1450          */
1451
1452         if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1453                 mem_crb = QLA82XX_CRB_QDR_NET;
1454         else {
1455                 mem_crb = QLA82XX_CRB_DDR_NET;
1456                 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1457                         return qla82xx_pci_mem_read_direct(ha,
1458                             off, data, size);
1459         }
1460
1461         off8 = off & 0xfffffff0;
1462         off0[0] = off & 0xf;
1463         sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
1464         shift_amount = 4;
1465         loop = ((off0[0] + size - 1) >> shift_amount) + 1;
1466         off0[1] = 0;
1467         sz[1] = size - sz[0];
1468
1469         for (i = 0; i < loop; i++) {
1470                 temp = off8 + (i << shift_amount);
1471                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
1472                 temp = 0;
1473                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
1474                 temp = MIU_TA_CTL_ENABLE;
1475                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1476                 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
1477                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1478
1479                 for (j = 0; j < MAX_CTL_CHECK; j++) {
1480                         temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1481                         if ((temp & MIU_TA_CTL_BUSY) == 0)
1482                                 break;
1483                 }
1484
1485                 if (j >= MAX_CTL_CHECK) {
1486                         if (printk_ratelimit())
1487                                 dev_err(&ha->pdev->dev,
1488                                     "failed to read through agent.\n");
1489                         break;
1490                 }
1491
1492                 start = off0[i] >> 2;
1493                 end   = (off0[i] + sz[i] - 1) >> 2;
1494                 for (k = start; k <= end; k++) {
1495                         temp = qla82xx_rd_32(ha,
1496                                         mem_crb + MIU_TEST_AGT_RDDATA(k));
1497                         word[i] |= ((uint64_t)temp << (32 * (k & 1)));
1498                 }
1499         }
1500
1501         if (j >= MAX_CTL_CHECK)
1502                 return -1;
1503
1504         if ((off0[0] & 7) == 0) {
1505                 val = word[0];
1506         } else {
1507                 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
1508                         ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
1509         }
1510
1511         switch (size) {
1512         case 1:
1513                 *(uint8_t  *)data = val;
1514                 break;
1515         case 2:
1516                 *(uint16_t *)data = val;
1517                 break;
1518         case 4:
1519                 *(uint32_t *)data = val;
1520                 break;
1521         case 8:
1522                 *(uint64_t *)data = val;
1523                 break;
1524         }
1525         return 0;
1526 }
1527
1528
1529 static struct qla82xx_uri_table_desc *
1530 qla82xx_get_table_desc(const u8 *unirom, int section)
1531 {
1532         uint32_t i;
1533         struct qla82xx_uri_table_desc *directory =
1534                 (struct qla82xx_uri_table_desc *)&unirom[0];
1535         __le32 offset;
1536         __le32 tab_type;
1537         __le32 entries = cpu_to_le32(directory->num_entries);
1538
1539         for (i = 0; i < entries; i++) {
1540                 offset = cpu_to_le32(directory->findex) +
1541                     (i * cpu_to_le32(directory->entry_size));
1542                 tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8));
1543
1544                 if (tab_type == section)
1545                         return (struct qla82xx_uri_table_desc *)&unirom[offset];
1546         }
1547
1548         return NULL;
1549 }
1550
1551 static struct qla82xx_uri_data_desc *
1552 qla82xx_get_data_desc(struct qla_hw_data *ha,
1553         u32 section, u32 idx_offset)
1554 {
1555         const u8 *unirom = ha->hablob->fw->data;
1556         int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset));
1557         struct qla82xx_uri_table_desc *tab_desc = NULL;
1558         __le32 offset;
1559
1560         tab_desc = qla82xx_get_table_desc(unirom, section);
1561         if (!tab_desc)
1562                 return NULL;
1563
1564         offset = cpu_to_le32(tab_desc->findex) +
1565             (cpu_to_le32(tab_desc->entry_size) * idx);
1566
1567         return (struct qla82xx_uri_data_desc *)&unirom[offset];
1568 }
1569
1570 static u8 *
1571 qla82xx_get_bootld_offset(struct qla_hw_data *ha)
1572 {
1573         u32 offset = BOOTLD_START;
1574         struct qla82xx_uri_data_desc *uri_desc = NULL;
1575
1576         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1577                 uri_desc = qla82xx_get_data_desc(ha,
1578                     QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF);
1579                 if (uri_desc)
1580                         offset = cpu_to_le32(uri_desc->findex);
1581         }
1582
1583         return (u8 *)&ha->hablob->fw->data[offset];
1584 }
1585
1586 static __le32
1587 qla82xx_get_fw_size(struct qla_hw_data *ha)
1588 {
1589         struct qla82xx_uri_data_desc *uri_desc = NULL;
1590
1591         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1592                 uri_desc =  qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1593                     QLA82XX_URI_FIRMWARE_IDX_OFF);
1594                 if (uri_desc)
1595                         return cpu_to_le32(uri_desc->size);
1596         }
1597
1598         return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
1599 }
1600
1601 static u8 *
1602 qla82xx_get_fw_offs(struct qla_hw_data *ha)
1603 {
1604         u32 offset = IMAGE_START;
1605         struct qla82xx_uri_data_desc *uri_desc = NULL;
1606
1607         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1608                 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1609                         QLA82XX_URI_FIRMWARE_IDX_OFF);
1610                 if (uri_desc)
1611                         offset = cpu_to_le32(uri_desc->findex);
1612         }
1613
1614         return (u8 *)&ha->hablob->fw->data[offset];
1615 }
1616
1617 /* PCI related functions */
1618 char *
1619 qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
1620 {
1621         int pcie_reg;
1622         struct qla_hw_data *ha = vha->hw;
1623         char lwstr[6];
1624         uint16_t lnk;
1625
1626         pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
1627         pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
1628         ha->link_width = (lnk >> 4) & 0x3f;
1629
1630         strcpy(str, "PCIe (");
1631         strcat(str, "2.5Gb/s ");
1632         snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width);
1633         strcat(str, lwstr);
1634         return str;
1635 }
1636
1637 int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
1638 {
1639         unsigned long val = 0;
1640         u32 control;
1641
1642         switch (region) {
1643         case 0:
1644                 val = 0;
1645                 break;
1646         case 1:
1647                 pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
1648                 val = control + QLA82XX_MSIX_TBL_SPACE;
1649                 break;
1650         }
1651         return val;
1652 }
1653
1654
1655 int
1656 qla82xx_iospace_config(struct qla_hw_data *ha)
1657 {
1658         uint32_t len = 0;
1659
1660         if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
1661                 ql_log_pci(ql_log_fatal, ha->pdev, 0x000c,
1662                     "Failed to reserver selected regions.\n");
1663                 goto iospace_error_exit;
1664         }
1665
1666         /* Use MMIO operations for all accesses. */
1667         if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
1668                 ql_log_pci(ql_log_fatal, ha->pdev, 0x000d,
1669                     "Region #0 not an MMIO resource, aborting.\n");
1670                 goto iospace_error_exit;
1671         }
1672
1673         len = pci_resource_len(ha->pdev, 0);
1674         ha->nx_pcibase =
1675             (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
1676         if (!ha->nx_pcibase) {
1677                 ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
1678                     "Cannot remap pcibase MMIO, aborting.\n");
1679                 pci_release_regions(ha->pdev);
1680                 goto iospace_error_exit;
1681         }
1682
1683         /* Mapping of IO base pointer */
1684         ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
1685             0xbc000 + (ha->pdev->devfn << 11));
1686
1687         if (!ql2xdbwr) {
1688                 ha->nxdb_wr_ptr =
1689                     (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
1690                     (ha->pdev->devfn << 12)), 4);
1691                 if (!ha->nxdb_wr_ptr) {
1692                         ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
1693                             "Cannot remap MMIO, aborting.\n");
1694                         pci_release_regions(ha->pdev);
1695                         goto iospace_error_exit;
1696                 }
1697
1698                 /* Mapping of IO base pointer,
1699                  * door bell read and write pointer
1700                  */
1701                 ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
1702                     (ha->pdev->devfn * 8);
1703         } else {
1704                 ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
1705                         QLA82XX_CAMRAM_DB1 :
1706                         QLA82XX_CAMRAM_DB2);
1707         }
1708
1709         ha->max_req_queues = ha->max_rsp_queues = 1;
1710         ha->msix_count = ha->max_rsp_queues + 1;
1711         ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
1712             "nx_pci_base=%p iobase=%p "
1713             "max_req_queues=%d msix_count=%d.\n",
1714             ha->nx_pcibase, ha->iobase,
1715             ha->max_req_queues, ha->msix_count);
1716         ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
1717             "nx_pci_base=%p iobase=%p "
1718             "max_req_queues=%d msix_count=%d.\n",
1719             ha->nx_pcibase, ha->iobase,
1720             ha->max_req_queues, ha->msix_count);
1721         return 0;
1722
1723 iospace_error_exit:
1724         return -ENOMEM;
1725 }
1726
1727 /* GS related functions */
1728
1729 /* Initialization related functions */
1730
1731 /**
1732  * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers.
1733  * @ha: HA context
1734  *
1735  * Returns 0 on success.
1736 */
1737 int
1738 qla82xx_pci_config(scsi_qla_host_t *vha)
1739 {
1740         struct qla_hw_data *ha = vha->hw;
1741         int ret;
1742
1743         pci_set_master(ha->pdev);
1744         ret = pci_set_mwi(ha->pdev);
1745         ha->chip_revision = ha->pdev->revision;
1746         ql_dbg(ql_dbg_init, vha, 0x0043,
1747             "Chip revision:%ld.\n",
1748             ha->chip_revision);
1749         return 0;
1750 }
1751
1752 /**
1753  * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers.
1754  * @ha: HA context
1755  *
1756  * Returns 0 on success.
1757  */
1758 void
1759 qla82xx_reset_chip(scsi_qla_host_t *vha)
1760 {
1761         struct qla_hw_data *ha = vha->hw;
1762         ha->isp_ops->disable_intrs(ha);
1763 }
1764
1765 void qla82xx_config_rings(struct scsi_qla_host *vha)
1766 {
1767         struct qla_hw_data *ha = vha->hw;
1768         struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1769         struct init_cb_81xx *icb;
1770         struct req_que *req = ha->req_q_map[0];
1771         struct rsp_que *rsp = ha->rsp_q_map[0];
1772
1773         /* Setup ring parameters in initialization control block. */
1774         icb = (struct init_cb_81xx *)ha->init_cb;
1775         icb->request_q_outpointer = __constant_cpu_to_le16(0);
1776         icb->response_q_inpointer = __constant_cpu_to_le16(0);
1777         icb->request_q_length = cpu_to_le16(req->length);
1778         icb->response_q_length = cpu_to_le16(rsp->length);
1779         icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1780         icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1781         icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1782         icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1783
1784         WRT_REG_DWORD((unsigned long  __iomem *)&reg->req_q_out[0], 0);
1785         WRT_REG_DWORD((unsigned long  __iomem *)&reg->rsp_q_in[0], 0);
1786         WRT_REG_DWORD((unsigned long  __iomem *)&reg->rsp_q_out[0], 0);
1787 }
1788
1789 void qla82xx_reset_adapter(struct scsi_qla_host *vha)
1790 {
1791         struct qla_hw_data *ha = vha->hw;
1792         vha->flags.online = 0;
1793         qla2x00_try_to_stop_firmware(vha);
1794         ha->isp_ops->disable_intrs(ha);
1795 }
1796
1797 static int
1798 qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
1799 {
1800         u64 *ptr64;
1801         u32 i, flashaddr, size;
1802         __le64 data;
1803
1804         size = (IMAGE_START - BOOTLD_START) / 8;
1805
1806         ptr64 = (u64 *)qla82xx_get_bootld_offset(ha);
1807         flashaddr = BOOTLD_START;
1808
1809         for (i = 0; i < size; i++) {
1810                 data = cpu_to_le64(ptr64[i]);
1811                 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1812                         return -EIO;
1813                 flashaddr += 8;
1814         }
1815
1816         flashaddr = FLASH_ADDR_START;
1817         size = (__force u32)qla82xx_get_fw_size(ha) / 8;
1818         ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
1819
1820         for (i = 0; i < size; i++) {
1821                 data = cpu_to_le64(ptr64[i]);
1822
1823                 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1824                         return -EIO;
1825                 flashaddr += 8;
1826         }
1827         udelay(100);
1828
1829         /* Write a magic value to CAMRAM register
1830          * at a specified offset to indicate
1831          * that all data is written and
1832          * ready for firmware to initialize.
1833          */
1834         qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC);
1835
1836         read_lock(&ha->hw_lock);
1837         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1838         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1839         read_unlock(&ha->hw_lock);
1840         return 0;
1841 }
1842
1843 static int
1844 qla82xx_set_product_offset(struct qla_hw_data *ha)
1845 {
1846         struct qla82xx_uri_table_desc *ptab_desc = NULL;
1847         const uint8_t *unirom = ha->hablob->fw->data;
1848         uint32_t i;
1849         __le32 entries;
1850         __le32 flags, file_chiprev, offset;
1851         uint8_t chiprev = ha->chip_revision;
1852         /* Hardcoding mn_present flag for P3P */
1853         int mn_present = 0;
1854         uint32_t flagbit;
1855
1856         ptab_desc = qla82xx_get_table_desc(unirom,
1857                  QLA82XX_URI_DIR_SECT_PRODUCT_TBL);
1858        if (!ptab_desc)
1859                 return -1;
1860
1861         entries = cpu_to_le32(ptab_desc->num_entries);
1862
1863         for (i = 0; i < entries; i++) {
1864                 offset = cpu_to_le32(ptab_desc->findex) +
1865                         (i * cpu_to_le32(ptab_desc->entry_size));
1866                 flags = cpu_to_le32(*((int *)&unirom[offset] +
1867                         QLA82XX_URI_FLAGS_OFF));
1868                 file_chiprev = cpu_to_le32(*((int *)&unirom[offset] +
1869                         QLA82XX_URI_CHIP_REV_OFF));
1870
1871                 flagbit = mn_present ? 1 : 2;
1872
1873                 if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) {
1874                         ha->file_prd_off = offset;
1875                         return 0;
1876                 }
1877         }
1878         return -1;
1879 }
1880
1881 int
1882 qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
1883 {
1884         __le32 val;
1885         uint32_t min_size;
1886         struct qla_hw_data *ha = vha->hw;
1887         const struct firmware *fw = ha->hablob->fw;
1888
1889         ha->fw_type = fw_type;
1890
1891         if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1892                 if (qla82xx_set_product_offset(ha))
1893                         return -EINVAL;
1894
1895                 min_size = QLA82XX_URI_FW_MIN_SIZE;
1896         } else {
1897                 val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
1898                 if ((__force u32)val != QLA82XX_BDINFO_MAGIC)
1899                         return -EINVAL;
1900
1901                 min_size = QLA82XX_FW_MIN_SIZE;
1902         }
1903
1904         if (fw->size < min_size)
1905                 return -EINVAL;
1906         return 0;
1907 }
1908
1909 static int
1910 qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1911 {
1912         u32 val = 0;
1913         int retries = 60;
1914         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1915
1916         do {
1917                 read_lock(&ha->hw_lock);
1918                 val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE);
1919                 read_unlock(&ha->hw_lock);
1920
1921                 switch (val) {
1922                 case PHAN_INITIALIZE_COMPLETE:
1923                 case PHAN_INITIALIZE_ACK:
1924                         return QLA_SUCCESS;
1925                 case PHAN_INITIALIZE_FAILED:
1926                         break;
1927                 default:
1928                         break;
1929                 }
1930                 ql_log(ql_log_info, vha, 0x00a8,
1931                     "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n",
1932                     val, retries);
1933
1934                 msleep(500);
1935
1936         } while (--retries);
1937
1938         ql_log(ql_log_fatal, vha, 0x00a9,
1939             "Cmd Peg initialization failed: 0x%x.\n", val);
1940
1941         val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
1942         read_lock(&ha->hw_lock);
1943         qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
1944         read_unlock(&ha->hw_lock);
1945         return QLA_FUNCTION_FAILED;
1946 }
1947
1948 static int
1949 qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1950 {
1951         u32 val = 0;
1952         int retries = 60;
1953         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1954
1955         do {
1956                 read_lock(&ha->hw_lock);
1957                 val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE);
1958                 read_unlock(&ha->hw_lock);
1959
1960                 switch (val) {
1961                 case PHAN_INITIALIZE_COMPLETE:
1962                 case PHAN_INITIALIZE_ACK:
1963                         return QLA_SUCCESS;
1964                 case PHAN_INITIALIZE_FAILED:
1965                         break;
1966                 default:
1967                         break;
1968                 }
1969                 ql_log(ql_log_info, vha, 0x00ab,
1970                     "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n",
1971                     val, retries);
1972
1973                 msleep(500);
1974
1975         } while (--retries);
1976
1977         ql_log(ql_log_fatal, vha, 0x00ac,
1978             "Rcv Peg initializatin failed: 0x%x.\n", val);
1979         read_lock(&ha->hw_lock);
1980         qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
1981         read_unlock(&ha->hw_lock);
1982         return QLA_FUNCTION_FAILED;
1983 }
1984
1985 /* ISR related functions */
1986 uint32_t qla82xx_isr_int_target_mask_enable[8] = {
1987         ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1,
1988         ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3,
1989         ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5,
1990         ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7
1991 };
1992
1993 uint32_t qla82xx_isr_int_target_status[8] = {
1994         ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
1995         ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
1996         ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
1997         ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7
1998 };
1999
2000 static struct qla82xx_legacy_intr_set legacy_intr[] = \
2001         QLA82XX_LEGACY_INTR_CONFIG;
2002
2003 /*
2004  * qla82xx_mbx_completion() - Process mailbox command completions.
2005  * @ha: SCSI driver HA context
2006  * @mb0: Mailbox0 register
2007  */
2008 static void
2009 qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2010 {
2011         uint16_t        cnt;
2012         uint16_t __iomem *wptr;
2013         struct qla_hw_data *ha = vha->hw;
2014         struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2015         wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
2016
2017         /* Load return mailbox registers. */
2018         ha->flags.mbox_int = 1;
2019         ha->mailbox_out[0] = mb0;
2020
2021         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2022                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2023                 wptr++;
2024         }
2025
2026         if (ha->mcp) {
2027                 ql_dbg(ql_dbg_async, vha, 0x5052,
2028                     "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
2029         } else {
2030                 ql_dbg(ql_dbg_async, vha, 0x5053,
2031                     "MBX pointer ERROR.\n");
2032         }
2033 }
2034
2035 /*
2036  * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
2037  * @irq:
2038  * @dev_id: SCSI driver HA context
2039  * @regs:
2040  *
2041  * Called by system whenever the host adapter generates an interrupt.
2042  *
2043  * Returns handled flag.
2044  */
2045 irqreturn_t
2046 qla82xx_intr_handler(int irq, void *dev_id)
2047 {
2048         scsi_qla_host_t *vha;
2049         struct qla_hw_data *ha;
2050         struct rsp_que *rsp;
2051         struct device_reg_82xx __iomem *reg;
2052         int status = 0, status1 = 0;
2053         unsigned long   flags;
2054         unsigned long   iter;
2055         uint32_t        stat = 0;
2056         uint16_t        mb[4];
2057
2058         rsp = (struct rsp_que *) dev_id;
2059         if (!rsp) {
2060                 printk(KERN_INFO
2061                         "%s(): NULL response queue pointer.\n", __func__);
2062                 return IRQ_NONE;
2063         }
2064         ha = rsp->hw;
2065
2066         if (!ha->flags.msi_enabled) {
2067                 status = qla82xx_rd_32(ha, ISR_INT_VECTOR);
2068                 if (!(status & ha->nx_legacy_intr.int_vec_bit))
2069                         return IRQ_NONE;
2070
2071                 status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG);
2072                 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1))
2073                         return IRQ_NONE;
2074         }
2075
2076         /* clear the interrupt */
2077         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
2078
2079         /* read twice to ensure write is flushed */
2080         qla82xx_rd_32(ha, ISR_INT_VECTOR);
2081         qla82xx_rd_32(ha, ISR_INT_VECTOR);
2082
2083         reg = &ha->iobase->isp82;
2084
2085         spin_lock_irqsave(&ha->hardware_lock, flags);
2086         vha = pci_get_drvdata(ha->pdev);
2087         for (iter = 1; iter--; ) {
2088
2089                 if (RD_REG_DWORD(&reg->host_int)) {
2090                         stat = RD_REG_DWORD(&reg->host_status);
2091
2092                         switch (stat & 0xff) {
2093                         case 0x1:
2094                         case 0x2:
2095                         case 0x10:
2096                         case 0x11:
2097                                 qla82xx_mbx_completion(vha, MSW(stat));
2098                                 status |= MBX_INTERRUPT;
2099                                 break;
2100                         case 0x12:
2101                                 mb[0] = MSW(stat);
2102                                 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2103                                 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2104                                 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2105                                 qla2x00_async_event(vha, rsp, mb);
2106                                 break;
2107                         case 0x13:
2108                                 qla24xx_process_response_queue(vha, rsp);
2109                                 break;
2110                         default:
2111                                 ql_dbg(ql_dbg_async, vha, 0x5054,
2112                                     "Unrecognized interrupt type (%d).\n",
2113                                     stat & 0xff);
2114                                 break;
2115                         }
2116                 }
2117                 WRT_REG_DWORD(&reg->host_int, 0);
2118         }
2119         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2120         if (!ha->flags.msi_enabled)
2121                 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2122
2123 #ifdef QL_DEBUG_LEVEL_17
2124         if (!irq && ha->flags.eeh_busy)
2125                 ql_log(ql_log_warn, vha, 0x503d,
2126                     "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2127                     status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2128 #endif
2129
2130         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2131             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2132                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2133                 complete(&ha->mbx_intr_comp);
2134         }
2135         return IRQ_HANDLED;
2136 }
2137
2138 irqreturn_t
2139 qla82xx_msix_default(int irq, void *dev_id)
2140 {
2141         scsi_qla_host_t *vha;
2142         struct qla_hw_data *ha;
2143         struct rsp_que *rsp;
2144         struct device_reg_82xx __iomem *reg;
2145         int status = 0;
2146         unsigned long flags;
2147         uint32_t stat = 0;
2148         uint16_t mb[4];
2149
2150         rsp = (struct rsp_que *) dev_id;
2151         if (!rsp) {
2152                 printk(KERN_INFO
2153                         "%s(): NULL response queue pointer.\n", __func__);
2154                 return IRQ_NONE;
2155         }
2156         ha = rsp->hw;
2157
2158         reg = &ha->iobase->isp82;
2159
2160         spin_lock_irqsave(&ha->hardware_lock, flags);
2161         vha = pci_get_drvdata(ha->pdev);
2162         do {
2163                 if (RD_REG_DWORD(&reg->host_int)) {
2164                         stat = RD_REG_DWORD(&reg->host_status);
2165
2166                         switch (stat & 0xff) {
2167                         case 0x1:
2168                         case 0x2:
2169                         case 0x10:
2170                         case 0x11:
2171                                 qla82xx_mbx_completion(vha, MSW(stat));
2172                                 status |= MBX_INTERRUPT;
2173                                 break;
2174                         case 0x12:
2175                                 mb[0] = MSW(stat);
2176                                 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2177                                 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2178                                 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2179                                 qla2x00_async_event(vha, rsp, mb);
2180                                 break;
2181                         case 0x13:
2182                                 qla24xx_process_response_queue(vha, rsp);
2183                                 break;
2184                         default:
2185                                 ql_dbg(ql_dbg_async, vha, 0x5041,
2186                                     "Unrecognized interrupt type (%d).\n",
2187                                     stat & 0xff);
2188                                 break;
2189                         }
2190                 }
2191                 WRT_REG_DWORD(&reg->host_int, 0);
2192         } while (0);
2193
2194         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2195
2196 #ifdef QL_DEBUG_LEVEL_17
2197         if (!irq && ha->flags.eeh_busy)
2198                 ql_log(ql_log_warn, vha, 0x5044,
2199                     "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2200                     status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2201 #endif
2202
2203         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2204                 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2205                         set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2206                         complete(&ha->mbx_intr_comp);
2207         }
2208         return IRQ_HANDLED;
2209 }
2210
2211 irqreturn_t
2212 qla82xx_msix_rsp_q(int irq, void *dev_id)
2213 {
2214         scsi_qla_host_t *vha;
2215         struct qla_hw_data *ha;
2216         struct rsp_que *rsp;
2217         struct device_reg_82xx __iomem *reg;
2218         unsigned long flags;
2219
2220         rsp = (struct rsp_que *) dev_id;
2221         if (!rsp) {
2222                 printk(KERN_INFO
2223                         "%s(): NULL response queue pointer.\n", __func__);
2224                 return IRQ_NONE;
2225         }
2226
2227         ha = rsp->hw;
2228         reg = &ha->iobase->isp82;
2229         spin_lock_irqsave(&ha->hardware_lock, flags);
2230         vha = pci_get_drvdata(ha->pdev);
2231         qla24xx_process_response_queue(vha, rsp);
2232         WRT_REG_DWORD(&reg->host_int, 0);
2233         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2234         return IRQ_HANDLED;
2235 }
2236
2237 void
2238 qla82xx_poll(int irq, void *dev_id)
2239 {
2240         scsi_qla_host_t *vha;
2241         struct qla_hw_data *ha;
2242         struct rsp_que *rsp;
2243         struct device_reg_82xx __iomem *reg;
2244         int status = 0;
2245         uint32_t stat;
2246         uint16_t mb[4];
2247         unsigned long flags;
2248
2249         rsp = (struct rsp_que *) dev_id;
2250         if (!rsp) {
2251                 printk(KERN_INFO
2252                         "%s(): NULL response queue pointer.\n", __func__);
2253                 return;
2254         }
2255         ha = rsp->hw;
2256
2257         reg = &ha->iobase->isp82;
2258         spin_lock_irqsave(&ha->hardware_lock, flags);
2259         vha = pci_get_drvdata(ha->pdev);
2260
2261         if (RD_REG_DWORD(&reg->host_int)) {
2262                 stat = RD_REG_DWORD(&reg->host_status);
2263                 switch (stat & 0xff) {
2264                 case 0x1:
2265                 case 0x2:
2266                 case 0x10:
2267                 case 0x11:
2268                         qla82xx_mbx_completion(vha, MSW(stat));
2269                         status |= MBX_INTERRUPT;
2270                         break;
2271                 case 0x12:
2272                         mb[0] = MSW(stat);
2273                         mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2274                         mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2275                         mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2276                         qla2x00_async_event(vha, rsp, mb);
2277                         break;
2278                 case 0x13:
2279                         qla24xx_process_response_queue(vha, rsp);
2280                         break;
2281                 default:
2282                         ql_dbg(ql_dbg_p3p, vha, 0xb013,
2283                             "Unrecognized interrupt type (%d).\n",
2284                             stat * 0xff);
2285                         break;
2286                 }
2287         }
2288         WRT_REG_DWORD(&reg->host_int, 0);
2289         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2290 }
2291
2292 void
2293 qla82xx_enable_intrs(struct qla_hw_data *ha)
2294 {
2295         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2296         qla82xx_mbx_intr_enable(vha);
2297         spin_lock_irq(&ha->hardware_lock);
2298         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2299         spin_unlock_irq(&ha->hardware_lock);
2300         ha->interrupts_on = 1;
2301 }
2302
2303 void
2304 qla82xx_disable_intrs(struct qla_hw_data *ha)
2305 {
2306         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2307         qla82xx_mbx_intr_disable(vha);
2308         spin_lock_irq(&ha->hardware_lock);
2309         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
2310         spin_unlock_irq(&ha->hardware_lock);
2311         ha->interrupts_on = 0;
2312 }
2313
2314 void qla82xx_init_flags(struct qla_hw_data *ha)
2315 {
2316         struct qla82xx_legacy_intr_set *nx_legacy_intr;
2317
2318         /* ISP 8021 initializations */
2319         rwlock_init(&ha->hw_lock);
2320         ha->qdr_sn_window = -1;
2321         ha->ddr_mn_window = -1;
2322         ha->curr_window = 255;
2323         ha->portnum = PCI_FUNC(ha->pdev->devfn);
2324         nx_legacy_intr = &legacy_intr[ha->portnum];
2325         ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
2326         ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
2327         ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
2328         ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
2329 }
2330
2331 inline void
2332 qla82xx_set_drv_active(scsi_qla_host_t *vha)
2333 {
2334         uint32_t drv_active;
2335         struct qla_hw_data *ha = vha->hw;
2336
2337         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2338
2339         /* If reset value is all FF's, initialize DRV_ACTIVE */
2340         if (drv_active == 0xffffffff) {
2341                 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE,
2342                         QLA82XX_DRV_NOT_ACTIVE);
2343                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2344         }
2345         drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2346         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2347 }
2348
2349 inline void
2350 qla82xx_clear_drv_active(struct qla_hw_data *ha)
2351 {
2352         uint32_t drv_active;
2353
2354         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2355         drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2356         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2357 }
2358
2359 static inline int
2360 qla82xx_need_reset(struct qla_hw_data *ha)
2361 {
2362         uint32_t drv_state;
2363         int rval;
2364
2365         if (ha->flags.isp82xx_reset_owner)
2366                 return 1;
2367         else {
2368                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2369                 rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2370                 return rval;
2371         }
2372 }
2373
2374 static inline void
2375 qla82xx_set_rst_ready(struct qla_hw_data *ha)
2376 {
2377         uint32_t drv_state;
2378         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2379
2380         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2381
2382         /* If reset value is all FF's, initialize DRV_STATE */
2383         if (drv_state == 0xffffffff) {
2384                 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY);
2385                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2386         }
2387         drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2388         ql_dbg(ql_dbg_init, vha, 0x00bb,
2389             "drv_state = 0x%08x.\n", drv_state);
2390         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2391 }
2392
2393 static inline void
2394 qla82xx_clear_rst_ready(struct qla_hw_data *ha)
2395 {
2396         uint32_t drv_state;
2397
2398         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2399         drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2400         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2401 }
2402
2403 static inline void
2404 qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
2405 {
2406         uint32_t qsnt_state;
2407
2408         qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2409         qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2410         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2411 }
2412
2413 void
2414 qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha)
2415 {
2416         struct qla_hw_data *ha = vha->hw;
2417         uint32_t qsnt_state;
2418
2419         qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2420         qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2421         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2422 }
2423
2424 static int
2425 qla82xx_load_fw(scsi_qla_host_t *vha)
2426 {
2427         int rst;
2428         struct fw_blob *blob;
2429         struct qla_hw_data *ha = vha->hw;
2430
2431         if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
2432                 ql_log(ql_log_fatal, vha, 0x009f,
2433                     "Error during CRB initialization.\n");
2434                 return QLA_FUNCTION_FAILED;
2435         }
2436         udelay(500);
2437
2438         /* Bring QM and CAMRAM out of reset */
2439         rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
2440         rst &= ~((1 << 28) | (1 << 24));
2441         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
2442
2443         /*
2444          * FW Load priority:
2445          * 1) Operational firmware residing in flash.
2446          * 2) Firmware via request-firmware interface (.bin file).
2447          */
2448         if (ql2xfwloadbin == 2)
2449                 goto try_blob_fw;
2450
2451         ql_log(ql_log_info, vha, 0x00a0,
2452             "Attempting to load firmware from flash.\n");
2453
2454         if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2455                 ql_log(ql_log_info, vha, 0x00a1,
2456                     "Firmware loaded successully from flash.\n");
2457                 return QLA_SUCCESS;
2458         } else {
2459                 ql_log(ql_log_warn, vha, 0x0108,
2460                     "Firmware load from flash failed.\n");
2461         }
2462
2463 try_blob_fw:
2464         ql_log(ql_log_info, vha, 0x00a2,
2465             "Attempting to load firmware from blob.\n");
2466
2467         /* Load firmware blob. */
2468         blob = ha->hablob = qla2x00_request_firmware(vha);
2469         if (!blob) {
2470                 ql_log(ql_log_fatal, vha, 0x00a3,
2471                     "Firmware image not preset.\n");
2472                 goto fw_load_failed;
2473         }
2474
2475         /* Validating firmware blob */
2476         if (qla82xx_validate_firmware_blob(vha,
2477                 QLA82XX_FLASH_ROMIMAGE)) {
2478                 /* Fallback to URI format */
2479                 if (qla82xx_validate_firmware_blob(vha,
2480                         QLA82XX_UNIFIED_ROMIMAGE)) {
2481                         ql_log(ql_log_fatal, vha, 0x00a4,
2482                             "No valid firmware image found.\n");
2483                         return QLA_FUNCTION_FAILED;
2484                 }
2485         }
2486
2487         if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
2488                 ql_log(ql_log_info, vha, 0x00a5,
2489                     "Firmware loaded successfully from binary blob.\n");
2490                 return QLA_SUCCESS;
2491         } else {
2492                 ql_log(ql_log_fatal, vha, 0x00a6,
2493                     "Firmware load failed for binary blob.\n");
2494                 blob->fw = NULL;
2495                 blob = NULL;
2496                 goto fw_load_failed;
2497         }
2498         return QLA_SUCCESS;
2499
2500 fw_load_failed:
2501         return QLA_FUNCTION_FAILED;
2502 }
2503
2504 int
2505 qla82xx_start_firmware(scsi_qla_host_t *vha)
2506 {
2507         int           pcie_cap;
2508         uint16_t      lnk;
2509         struct qla_hw_data *ha = vha->hw;
2510
2511         /* scrub dma mask expansion register */
2512         qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE);
2513
2514         /* Put both the PEG CMD and RCV PEG to default state
2515          * of 0 before resetting the hardware
2516          */
2517         qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
2518         qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
2519
2520         /* Overwrite stale initialization register values */
2521         qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
2522         qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
2523
2524         if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
2525                 ql_log(ql_log_fatal, vha, 0x00a7,
2526                     "Error trying to start fw.\n");
2527                 return QLA_FUNCTION_FAILED;
2528         }
2529
2530         /* Handshake with the card before we register the devices. */
2531         if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
2532                 ql_log(ql_log_fatal, vha, 0x00aa,
2533                     "Error during card handshake.\n");
2534                 return QLA_FUNCTION_FAILED;
2535         }
2536
2537         /* Negotiated Link width */
2538         pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
2539         pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
2540         ha->link_width = (lnk >> 4) & 0x3f;
2541
2542         /* Synchronize with Receive peg */
2543         return qla82xx_check_rcvpeg_state(ha);
2544 }
2545
2546 static inline int
2547 qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
2548         uint16_t tot_dsds)
2549 {
2550         uint32_t *cur_dsd = NULL;
2551         scsi_qla_host_t *vha;
2552         struct qla_hw_data *ha;
2553         struct scsi_cmnd *cmd;
2554         struct  scatterlist *cur_seg;
2555         uint32_t *dsd_seg;
2556         void *next_dsd;
2557         uint8_t avail_dsds;
2558         uint8_t first_iocb = 1;
2559         uint32_t dsd_list_len;
2560         struct dsd_dma *dsd_ptr;
2561         struct ct6_dsd *ctx;
2562
2563         cmd = sp->cmd;
2564
2565         /* Update entry type to indicate Command Type 3 IOCB */
2566         *((uint32_t *)(&cmd_pkt->entry_type)) =
2567                 __constant_cpu_to_le32(COMMAND_TYPE_6);
2568
2569         /* No data transfer */
2570         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
2571                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
2572                 return 0;
2573         }
2574
2575         vha = sp->fcport->vha;
2576         ha = vha->hw;
2577
2578         /* Set transfer direction */
2579         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2580                 cmd_pkt->control_flags =
2581                     __constant_cpu_to_le16(CF_WRITE_DATA);
2582                 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
2583         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
2584                 cmd_pkt->control_flags =
2585                     __constant_cpu_to_le16(CF_READ_DATA);
2586                 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
2587         }
2588
2589         cur_seg = scsi_sglist(cmd);
2590         ctx = sp->ctx;
2591
2592         while (tot_dsds) {
2593                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
2594                     QLA_DSDS_PER_IOCB : tot_dsds;
2595                 tot_dsds -= avail_dsds;
2596                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
2597
2598                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
2599                     struct dsd_dma, list);
2600                 next_dsd = dsd_ptr->dsd_addr;
2601                 list_del(&dsd_ptr->list);
2602                 ha->gbl_dsd_avail--;
2603                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
2604                 ctx->dsd_use_cnt++;
2605                 ha->gbl_dsd_inuse++;
2606
2607                 if (first_iocb) {
2608                         first_iocb = 0;
2609                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2610                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2611                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2612                         *dsd_seg++ = cpu_to_le32(dsd_list_len);
2613                 } else {
2614                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2615                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2616                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
2617                 }
2618                 cur_dsd = (uint32_t *)next_dsd;
2619                 while (avail_dsds) {
2620                         dma_addr_t      sle_dma;
2621
2622                         sle_dma = sg_dma_address(cur_seg);
2623                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2624                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2625                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
2626                         cur_seg = sg_next(cur_seg);
2627                         avail_dsds--;
2628                 }
2629         }
2630
2631         /* Null termination */
2632         *cur_dsd++ =  0;
2633         *cur_dsd++ = 0;
2634         *cur_dsd++ = 0;
2635         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
2636         return 0;
2637 }
2638
2639 /*
2640  * qla82xx_calc_dsd_lists() - Determine number of DSD list required
2641  * for Command Type 6.
2642  *
2643  * @dsds: number of data segment decriptors needed
2644  *
2645  * Returns the number of dsd list needed to store @dsds.
2646  */
2647 inline uint16_t
2648 qla82xx_calc_dsd_lists(uint16_t dsds)
2649 {
2650         uint16_t dsd_lists = 0;
2651
2652         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
2653         if (dsds % QLA_DSDS_PER_IOCB)
2654                 dsd_lists++;
2655         return dsd_lists;
2656 }
2657
2658 /*
2659  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2660  * @sp: command to send to the ISP
2661  *
2662  * Returns non-zero if a failure occurred, else zero.
2663  */
2664 int
2665 qla82xx_start_scsi(srb_t *sp)
2666 {
2667         int             ret, nseg;
2668         unsigned long   flags;
2669         struct scsi_cmnd *cmd;
2670         uint32_t        *clr_ptr;
2671         uint32_t        index;
2672         uint32_t        handle;
2673         uint16_t        cnt;
2674         uint16_t        req_cnt;
2675         uint16_t        tot_dsds;
2676         struct device_reg_82xx __iomem *reg;
2677         uint32_t dbval;
2678         uint32_t *fcp_dl;
2679         uint8_t additional_cdb_len;
2680         struct ct6_dsd *ctx;
2681         struct scsi_qla_host *vha = sp->fcport->vha;
2682         struct qla_hw_data *ha = vha->hw;
2683         struct req_que *req = NULL;
2684         struct rsp_que *rsp = NULL;
2685         char            tag[2];
2686
2687         /* Setup device pointers. */
2688         ret = 0;
2689         reg = &ha->iobase->isp82;
2690         cmd = sp->cmd;
2691         req = vha->req;
2692         rsp = ha->rsp_q_map[0];
2693
2694         /* So we know we haven't pci_map'ed anything yet */
2695         tot_dsds = 0;
2696
2697         dbval = 0x04 | (ha->portnum << 5);
2698
2699         /* Send marker if required */
2700         if (vha->marker_needed != 0) {
2701                 if (qla2x00_marker(vha, req,
2702                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2703                         ql_log(ql_log_warn, vha, 0x300c,
2704                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2705                         return QLA_FUNCTION_FAILED;
2706                 }
2707                 vha->marker_needed = 0;
2708         }
2709
2710         /* Acquire ring specific lock */
2711         spin_lock_irqsave(&ha->hardware_lock, flags);
2712
2713         /* Check for room in outstanding command list. */
2714         handle = req->current_outstanding_cmd;
2715         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2716                 handle++;
2717                 if (handle == MAX_OUTSTANDING_COMMANDS)
2718                         handle = 1;
2719                 if (!req->outstanding_cmds[handle])
2720                         break;
2721         }
2722         if (index == MAX_OUTSTANDING_COMMANDS)
2723                 goto queuing_error;
2724
2725         /* Map the sg table so we have an accurate count of sg entries needed */
2726         if (scsi_sg_count(cmd)) {
2727                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2728                     scsi_sg_count(cmd), cmd->sc_data_direction);
2729                 if (unlikely(!nseg))
2730                         goto queuing_error;
2731         } else
2732                 nseg = 0;
2733
2734         tot_dsds = nseg;
2735
2736         if (tot_dsds > ql2xshiftctondsd) {
2737                 struct cmd_type_6 *cmd_pkt;
2738                 uint16_t more_dsd_lists = 0;
2739                 struct dsd_dma *dsd_ptr;
2740                 uint16_t i;
2741
2742                 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
2743                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2744                         ql_dbg(ql_dbg_io, vha, 0x300d,
2745                             "Num of DSD list %d is than %d for cmd=%p.\n",
2746                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2747                             cmd);
2748                         goto queuing_error;
2749                 }
2750
2751                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2752                         goto sufficient_dsds;
2753                 else
2754                         more_dsd_lists -= ha->gbl_dsd_avail;
2755
2756                 for (i = 0; i < more_dsd_lists; i++) {
2757                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2758                         if (!dsd_ptr) {
2759                                 ql_log(ql_log_fatal, vha, 0x300e,
2760                                     "Failed to allocate memory for dsd_dma "
2761                                     "for cmd=%p.\n", cmd);
2762                                 goto queuing_error;
2763                         }
2764
2765                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2766                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2767                         if (!dsd_ptr->dsd_addr) {
2768                                 kfree(dsd_ptr);
2769                                 ql_log(ql_log_fatal, vha, 0x300f,
2770                                     "Failed to allocate memory for dsd_addr "
2771                                     "for cmd=%p.\n", cmd);
2772                                 goto queuing_error;
2773                         }
2774                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2775                         ha->gbl_dsd_avail++;
2776                 }
2777
2778 sufficient_dsds:
2779                 req_cnt = 1;
2780
2781                 if (req->cnt < (req_cnt + 2)) {
2782                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2783                                 &reg->req_q_out[0]);
2784                         if (req->ring_index < cnt)
2785                                 req->cnt = cnt - req->ring_index;
2786                         else
2787                                 req->cnt = req->length -
2788                                         (req->ring_index - cnt);
2789                 }
2790
2791                 if (req->cnt < (req_cnt + 2))
2792                         goto queuing_error;
2793
2794                 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2795                 if (!sp->ctx) {
2796                         ql_log(ql_log_fatal, vha, 0x3010,
2797                             "Failed to allocate ctx for cmd=%p.\n", cmd);
2798                         goto queuing_error;
2799                 }
2800                 memset(ctx, 0, sizeof(struct ct6_dsd));
2801                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2802                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2803                 if (!ctx->fcp_cmnd) {
2804                         ql_log(ql_log_fatal, vha, 0x3011,
2805                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2806                         goto queuing_error_fcp_cmnd;
2807                 }
2808
2809                 /* Initialize the DSD list and dma handle */
2810                 INIT_LIST_HEAD(&ctx->dsd_list);
2811                 ctx->dsd_use_cnt = 0;
2812
2813                 if (cmd->cmd_len > 16) {
2814                         additional_cdb_len = cmd->cmd_len - 16;
2815                         if ((cmd->cmd_len % 4) != 0) {
2816                                 /* SCSI command bigger than 16 bytes must be
2817                                  * multiple of 4
2818                                  */
2819                                 ql_log(ql_log_warn, vha, 0x3012,
2820                                     "scsi cmd len %d not multiple of 4 "
2821                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
2822                                 goto queuing_error_fcp_cmnd;
2823                         }
2824                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2825                 } else {
2826                         additional_cdb_len = 0;
2827                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2828                 }
2829
2830                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2831                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2832
2833                 /* Zero out remaining portion of packet. */
2834                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2835                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2836                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2837                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2838
2839                 /* Set NPORT-ID and LUN number*/
2840                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2841                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2842                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2843                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2844                 cmd_pkt->vp_index = sp->fcport->vp_idx;
2845
2846                 /* Build IOCB segments */
2847                 if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2848                         goto queuing_error_fcp_cmnd;
2849
2850                 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2851                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2852
2853                 /* build FCP_CMND IU */
2854                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2855                 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2856                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2857
2858                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2859                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2860                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2861                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2862
2863                 /*
2864                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2865                  */
2866                 if (scsi_populate_tag_msg(cmd, tag)) {
2867                         switch (tag[0]) {
2868                         case HEAD_OF_QUEUE_TAG:
2869                                 ctx->fcp_cmnd->task_attribute =
2870                                     TSK_HEAD_OF_QUEUE;
2871                                 break;
2872                         case ORDERED_QUEUE_TAG:
2873                                 ctx->fcp_cmnd->task_attribute =
2874                                     TSK_ORDERED;
2875                                 break;
2876                         }
2877                 }
2878
2879                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2880
2881                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2882                     additional_cdb_len);
2883                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2884
2885                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2886                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2887                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2888                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2889                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2890
2891                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2892                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2893                 /* Set total data segment count. */
2894                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2895                 /* Specify response queue number where
2896                  * completion should happen
2897                  */
2898                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2899         } else {
2900                 struct cmd_type_7 *cmd_pkt;
2901                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2902                 if (req->cnt < (req_cnt + 2)) {
2903                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2904                             &reg->req_q_out[0]);
2905                         if (req->ring_index < cnt)
2906                                 req->cnt = cnt - req->ring_index;
2907                         else
2908                                 req->cnt = req->length -
2909                                         (req->ring_index - cnt);
2910                 }
2911                 if (req->cnt < (req_cnt + 2))
2912                         goto queuing_error;
2913
2914                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2915                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2916
2917                 /* Zero out remaining portion of packet. */
2918                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2919                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2920                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2921                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2922
2923                 /* Set NPORT-ID and LUN number*/
2924                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2925                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2926                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2927                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2928                 cmd_pkt->vp_index = sp->fcport->vp_idx;
2929
2930                 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2931                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2932                         sizeof(cmd_pkt->lun));
2933
2934                 /*
2935                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2936                  */
2937                 if (scsi_populate_tag_msg(cmd, tag)) {
2938                         switch (tag[0]) {
2939                         case HEAD_OF_QUEUE_TAG:
2940                                 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2941                                 break;
2942                         case ORDERED_QUEUE_TAG:
2943                                 cmd_pkt->task = TSK_ORDERED;
2944                                 break;
2945                         }
2946                 }
2947
2948                 /* Load SCSI command packet. */
2949                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2950                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2951
2952                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2953
2954                 /* Build IOCB segments */
2955                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2956
2957                 /* Set total data segment count. */
2958                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2959                 /* Specify response queue number where
2960                  * completion should happen.
2961                  */
2962                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2963
2964         }
2965         /* Build command packet. */
2966         req->current_outstanding_cmd = handle;
2967         req->outstanding_cmds[handle] = sp;
2968         sp->handle = handle;
2969         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2970         req->cnt -= req_cnt;
2971         wmb();
2972
2973         /* Adjust ring index. */
2974         req->ring_index++;
2975         if (req->ring_index == req->length) {
2976                 req->ring_index = 0;
2977                 req->ring_ptr = req->ring;
2978         } else
2979                 req->ring_ptr++;
2980
2981         sp->flags |= SRB_DMA_VALID;
2982
2983         /* Set chip new ring index. */
2984         /* write, read and verify logic */
2985         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2986         if (ql2xdbwr)
2987                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2988         else {
2989                 WRT_REG_DWORD(
2990                         (unsigned long __iomem *)ha->nxdb_wr_ptr,
2991                         dbval);
2992                 wmb();
2993                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2994                         WRT_REG_DWORD(
2995                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2996                                 dbval);
2997                         wmb();
2998                 }
2999         }
3000
3001         /* Manage unprocessed RIO/ZIO commands in response queue. */
3002         if (vha->flags.process_response_queue &&
3003             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3004                 qla24xx_process_response_queue(vha, rsp);
3005
3006         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3007         return QLA_SUCCESS;
3008
3009 queuing_error_fcp_cmnd:
3010         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3011 queuing_error:
3012         if (tot_dsds)
3013                 scsi_dma_unmap(cmd);
3014
3015         if (sp->ctx) {
3016                 mempool_free(sp->ctx, ha->ctx_mempool);
3017                 sp->ctx = NULL;
3018         }
3019         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3020
3021         return QLA_FUNCTION_FAILED;
3022 }
3023
3024 static uint32_t *
3025 qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
3026         uint32_t length)
3027 {
3028         uint32_t i;
3029         uint32_t val;
3030         struct qla_hw_data *ha = vha->hw;
3031
3032         /* Dword reads to flash. */
3033         for (i = 0; i < length/4; i++, faddr += 4) {
3034                 if (qla82xx_rom_fast_read(ha, faddr, &val)) {
3035                         ql_log(ql_log_warn, vha, 0x0106,
3036                             "Do ROM fast read failed.\n");
3037                         goto done_read;
3038                 }
3039                 dwptr[i] = __constant_cpu_to_le32(val);
3040         }
3041 done_read:
3042         return dwptr;
3043 }
3044
3045 static int
3046 qla82xx_unprotect_flash(struct qla_hw_data *ha)
3047 {
3048         int ret;
3049         uint32_t val;
3050         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3051
3052         ret = ql82xx_rom_lock_d(ha);
3053         if (ret < 0) {
3054                 ql_log(ql_log_warn, vha, 0xb014,
3055                     "ROM Lock failed.\n");
3056                 return ret;
3057         }
3058
3059         ret = qla82xx_read_status_reg(ha, &val);
3060         if (ret < 0)
3061                 goto done_unprotect;
3062
3063         val &= ~(BLOCK_PROTECT_BITS << 2);
3064         ret = qla82xx_write_status_reg(ha, val);
3065         if (ret < 0) {
3066                 val |= (BLOCK_PROTECT_BITS << 2);
3067                 qla82xx_write_status_reg(ha, val);
3068         }
3069
3070         if (qla82xx_write_disable_flash(ha) != 0)
3071                 ql_log(ql_log_warn, vha, 0xb015,
3072                     "Write disable failed.\n");
3073
3074 done_unprotect:
3075         qla82xx_rom_unlock(ha);
3076         return ret;
3077 }
3078
3079 static int
3080 qla82xx_protect_flash(struct qla_hw_data *ha)
3081 {
3082         int ret;
3083         uint32_t val;
3084         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3085
3086         ret = ql82xx_rom_lock_d(ha);
3087         if (ret < 0) {
3088                 ql_log(ql_log_warn, vha, 0xb016,
3089                     "ROM Lock failed.\n");
3090                 return ret;
3091         }
3092
3093         ret = qla82xx_read_status_reg(ha, &val);
3094         if (ret < 0)
3095                 goto done_protect;
3096
3097         val |= (BLOCK_PROTECT_BITS << 2);
3098         /* LOCK all sectors */
3099         ret = qla82xx_write_status_reg(ha, val);
3100         if (ret < 0)
3101                 ql_log(ql_log_warn, vha, 0xb017,
3102                     "Write status register failed.\n");
3103
3104         if (qla82xx_write_disable_flash(ha) != 0)
3105                 ql_log(ql_log_warn, vha, 0xb018,
3106                     "Write disable failed.\n");
3107 done_protect:
3108         qla82xx_rom_unlock(ha);
3109         return ret;
3110 }
3111
3112 static int
3113 qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
3114 {
3115         int ret = 0;
3116         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3117
3118         ret = ql82xx_rom_lock_d(ha);
3119         if (ret < 0) {
3120                 ql_log(ql_log_warn, vha, 0xb019,
3121                     "ROM Lock failed.\n");
3122                 return ret;
3123         }
3124
3125         qla82xx_flash_set_write_enable(ha);
3126         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
3127         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
3128         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
3129
3130         if (qla82xx_wait_rom_done(ha)) {
3131                 ql_log(ql_log_warn, vha, 0xb01a,
3132                     "Error waiting for rom done.\n");
3133                 ret = -1;
3134                 goto done;
3135         }
3136         ret = qla82xx_flash_wait_write_finish(ha);
3137 done:
3138         qla82xx_rom_unlock(ha);
3139         return ret;
3140 }
3141
3142 /*
3143  * Address and length are byte address
3144  */
3145 uint8_t *
3146 qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3147         uint32_t offset, uint32_t length)
3148 {
3149         scsi_block_requests(vha->host);
3150         qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
3151         scsi_unblock_requests(vha->host);
3152         return buf;
3153 }
3154
3155 static int
3156 qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3157         uint32_t faddr, uint32_t dwords)
3158 {
3159         int ret;
3160         uint32_t liter;
3161         uint32_t sec_mask, rest_addr;
3162         dma_addr_t optrom_dma;
3163         void *optrom = NULL;
3164         int page_mode = 0;
3165         struct qla_hw_data *ha = vha->hw;
3166
3167         ret = -1;
3168
3169         /* Prepare burst-capable write on supported ISPs. */
3170         if (page_mode && !(faddr & 0xfff) &&
3171             dwords > OPTROM_BURST_DWORDS) {
3172                 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
3173                     &optrom_dma, GFP_KERNEL);
3174                 if (!optrom) {
3175                         ql_log(ql_log_warn, vha, 0xb01b,
3176                             "Unable to allocate memory "
3177                             "for optron burst write (%x KB).\n",
3178                             OPTROM_BURST_SIZE / 1024);
3179                 }
3180         }
3181
3182         rest_addr = ha->fdt_block_size - 1;
3183         sec_mask = ~rest_addr;
3184
3185         ret = qla82xx_unprotect_flash(ha);
3186         if (ret) {
3187                 ql_log(ql_log_warn, vha, 0xb01c,
3188                     "Unable to unprotect flash for update.\n");
3189                 goto write_done;
3190         }
3191
3192         for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
3193                 /* Are we at the beginning of a sector? */
3194                 if ((faddr & rest_addr) == 0) {
3195
3196                         ret = qla82xx_erase_sector(ha, faddr);
3197                         if (ret) {
3198                                 ql_log(ql_log_warn, vha, 0xb01d,
3199                                     "Unable to erase sector: address=%x.\n",
3200                                     faddr);
3201                                 break;
3202                         }
3203                 }
3204
3205                 /* Go with burst-write. */
3206                 if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
3207                         /* Copy data to DMA'ble buffer. */
3208                         memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
3209
3210                         ret = qla2x00_load_ram(vha, optrom_dma,
3211                             (ha->flash_data_off | faddr),
3212                             OPTROM_BURST_DWORDS);
3213                         if (ret != QLA_SUCCESS) {
3214                                 ql_log(ql_log_warn, vha, 0xb01e,
3215                                     "Unable to burst-write optrom segment "
3216                                     "(%x/%x/%llx).\n", ret,
3217                                     (ha->flash_data_off | faddr),
3218                                     (unsigned long long)optrom_dma);
3219                                 ql_log(ql_log_warn, vha, 0xb01f,
3220                                     "Reverting to slow-write.\n");
3221
3222                                 dma_free_coherent(&ha->pdev->dev,
3223                                     OPTROM_BURST_SIZE, optrom, optrom_dma);
3224                                 optrom = NULL;
3225                         } else {
3226                                 liter += OPTROM_BURST_DWORDS - 1;
3227                                 faddr += OPTROM_BURST_DWORDS - 1;
3228                                 dwptr += OPTROM_BURST_DWORDS - 1;
3229                                 continue;
3230                         }
3231                 }
3232
3233                 ret = qla82xx_write_flash_dword(ha, faddr,
3234                     cpu_to_le32(*dwptr));
3235                 if (ret) {
3236                         ql_dbg(ql_dbg_p3p, vha, 0xb020,
3237                             "Unable to program flash address=%x data=%x.\n",
3238                             faddr, *dwptr);
3239                         break;
3240                 }
3241         }
3242
3243         ret = qla82xx_protect_flash(ha);
3244         if (ret)
3245                 ql_log(ql_log_warn, vha, 0xb021,
3246                     "Unable to protect flash after update.\n");
3247 write_done:
3248         if (optrom)
3249                 dma_free_coherent(&ha->pdev->dev,
3250                     OPTROM_BURST_SIZE, optrom, optrom_dma);
3251         return ret;
3252 }
3253
3254 int
3255 qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3256         uint32_t offset, uint32_t length)
3257 {
3258         int rval;
3259
3260         /* Suspend HBA. */
3261         scsi_block_requests(vha->host);
3262         rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
3263                 length >> 2);
3264         scsi_unblock_requests(vha->host);
3265
3266         /* Convert return ISP82xx to generic */
3267         if (rval)
3268                 rval = QLA_FUNCTION_FAILED;
3269         else
3270                 rval = QLA_SUCCESS;
3271         return rval;
3272 }
3273
3274 void
3275 qla82xx_start_iocbs(srb_t *sp)
3276 {
3277         struct qla_hw_data *ha = sp->fcport->vha->hw;
3278         struct req_que *req = ha->req_q_map[0];
3279         struct device_reg_82xx __iomem *reg;
3280         uint32_t dbval;
3281
3282         /* Adjust ring index. */
3283         req->ring_index++;
3284         if (req->ring_index == req->length) {
3285                 req->ring_index = 0;
3286                 req->ring_ptr = req->ring;
3287         } else
3288                 req->ring_ptr++;
3289
3290         reg = &ha->iobase->isp82;
3291         dbval = 0x04 | (ha->portnum << 5);
3292
3293         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3294         if (ql2xdbwr)
3295                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
3296         else {
3297                 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
3298                 wmb();
3299                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3300                         WRT_REG_DWORD((unsigned long  __iomem *)ha->nxdb_wr_ptr,
3301                                 dbval);
3302                         wmb();
3303                 }
3304         }
3305 }
3306
3307 void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
3308 {
3309         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3310
3311         if (qla82xx_rom_lock(ha))
3312                 /* Someone else is holding the lock. */
3313                 ql_log(ql_log_info, vha, 0xb022,
3314                     "Resetting rom_lock.\n");
3315
3316         /*
3317          * Either we got the lock, or someone
3318          * else died while holding it.
3319          * In either case, unlock.
3320          */
3321         qla82xx_rom_unlock(ha);
3322 }
3323
3324 /*
3325  * qla82xx_device_bootstrap
3326  *    Initialize device, set DEV_READY, start fw
3327  *
3328  * Note:
3329  *      IDC lock must be held upon entry
3330  *
3331  * Return:
3332  *    Success : 0
3333  *    Failed  : 1
3334  */
3335 static int
3336 qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3337 {
3338         int rval = QLA_SUCCESS;
3339         int i, timeout;
3340         uint32_t old_count, count;
3341         struct qla_hw_data *ha = vha->hw;
3342         int need_reset = 0, peg_stuck = 1;
3343
3344         need_reset = qla82xx_need_reset(ha);
3345
3346         old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3347
3348         for (i = 0; i < 10; i++) {
3349                 timeout = msleep_interruptible(200);
3350                 if (timeout) {
3351                         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3352                                 QLA82XX_DEV_FAILED);
3353                         return QLA_FUNCTION_FAILED;
3354                 }
3355
3356                 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3357                 if (count != old_count)
3358                         peg_stuck = 0;
3359         }
3360
3361         if (need_reset) {
3362                 /* We are trying to perform a recovery here. */
3363                 if (peg_stuck)
3364                         qla82xx_rom_lock_recovery(ha);
3365                 goto dev_initialize;
3366         } else  {
3367                 /* Start of day for this ha context. */
3368                 if (peg_stuck) {
3369                         /* Either we are the first or recovery in progress. */
3370                         qla82xx_rom_lock_recovery(ha);
3371                         goto dev_initialize;
3372                 } else
3373                         /* Firmware already running. */
3374                         goto dev_ready;
3375         }
3376
3377         return rval;
3378
3379 dev_initialize:
3380         /* set to DEV_INITIALIZING */
3381         ql_log(ql_log_info, vha, 0x009e,
3382             "HW State: INITIALIZING.\n");
3383         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
3384
3385         /* Driver that sets device state to initializating sets IDC version */
3386         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
3387
3388         qla82xx_idc_unlock(ha);
3389         rval = qla82xx_start_firmware(vha);
3390         qla82xx_idc_lock(ha);
3391
3392         if (rval != QLA_SUCCESS) {
3393                 ql_log(ql_log_fatal, vha, 0x00ad,
3394                     "HW State: FAILED.\n");
3395                 qla82xx_clear_drv_active(ha);
3396                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
3397                 return rval;
3398         }
3399
3400 dev_ready:
3401         ql_log(ql_log_info, vha, 0x00ae,
3402             "HW State: READY.\n");
3403         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
3404
3405         return QLA_SUCCESS;
3406 }
3407
3408 /*
3409 * qla82xx_need_qsnt_handler
3410 *    Code to start quiescence sequence
3411 *
3412 * Note:
3413 *      IDC lock must be held upon entry
3414 *
3415 * Return: void
3416 */
3417
3418 static void
3419 qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
3420 {
3421         struct qla_hw_data *ha = vha->hw;
3422         uint32_t dev_state, drv_state, drv_active;
3423         unsigned long reset_timeout;
3424
3425         if (vha->flags.online) {
3426                 /*Block any further I/O and wait for pending cmnds to complete*/
3427                 qla82xx_quiescent_state_cleanup(vha);
3428         }
3429
3430         /* Set the quiescence ready bit */
3431         qla82xx_set_qsnt_ready(ha);
3432
3433         /*wait for 30 secs for other functions to ack */
3434         reset_timeout = jiffies + (30 * HZ);
3435
3436         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3437         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3438         /* Its 2 that is written when qsnt is acked, moving one bit */
3439         drv_active = drv_active << 0x01;
3440
3441         while (drv_state != drv_active) {
3442
3443                 if (time_after_eq(jiffies, reset_timeout)) {
3444                         /* quiescence timeout, other functions didn't ack
3445                          * changing the state to DEV_READY
3446                          */
3447                         ql_log(ql_log_info, vha, 0xb023,
3448                             "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME);
3449                         ql_log(ql_log_info, vha, 0xb024,
3450                             "DRV_ACTIVE:%d DRV_STATE:%d.\n",
3451                             drv_active, drv_state);
3452                         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3453                             QLA82XX_DEV_READY);
3454                         ql_log(ql_log_info, vha, 0xb025,
3455                             "HW State: DEV_READY.\n");
3456                         qla82xx_idc_unlock(ha);
3457                         qla2x00_perform_loop_resync(vha);
3458                         qla82xx_idc_lock(ha);
3459
3460                         qla82xx_clear_qsnt_ready(vha);
3461                         return;
3462                 }
3463
3464                 qla82xx_idc_unlock(ha);
3465                 msleep(1000);
3466                 qla82xx_idc_lock(ha);
3467
3468                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3469                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3470                 drv_active = drv_active << 0x01;
3471         }
3472         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3473         /* everyone acked so set the state to DEV_QUIESCENCE */
3474         if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
3475                 ql_log(ql_log_info, vha, 0xb026,
3476                     "HW State: DEV_QUIESCENT.\n");
3477                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
3478         }
3479 }
3480
3481 /*
3482 * qla82xx_wait_for_state_change
3483 *    Wait for device state to change from given current state
3484 *
3485 * Note:
3486 *     IDC lock must not be held upon entry
3487 *
3488 * Return:
3489 *    Changed device state.
3490 */
3491 uint32_t
3492 qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
3493 {
3494         struct qla_hw_data *ha = vha->hw;
3495         uint32_t dev_state;
3496
3497         do {
3498                 msleep(1000);
3499                 qla82xx_idc_lock(ha);
3500                 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3501                 qla82xx_idc_unlock(ha);
3502         } while (dev_state == curr_state);
3503
3504         return dev_state;
3505 }
3506
3507 static void
3508 qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3509 {
3510         struct qla_hw_data *ha = vha->hw;
3511
3512         /* Disable the board */
3513         ql_log(ql_log_fatal, vha, 0x00b8,
3514             "Disabling the board.\n");
3515
3516         qla82xx_idc_lock(ha);
3517         qla82xx_clear_drv_active(ha);
3518         qla82xx_idc_unlock(ha);
3519
3520         /* Set DEV_FAILED flag to disable timer */
3521         vha->device_flags |= DFLG_DEV_FAILED;
3522         qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3523         qla2x00_mark_all_devices_lost(vha, 0);
3524         vha->flags.online = 0;
3525         vha->flags.init_done = 0;
3526 }
3527
3528 /*
3529  * qla82xx_need_reset_handler
3530  *    Code to start reset sequence
3531  *
3532  * Note:
3533  *      IDC lock must be held upon entry
3534  *
3535  * Return:
3536  *    Success : 0
3537  *    Failed  : 1
3538  */
3539 static void
3540 qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3541 {
3542         uint32_t dev_state, drv_state, drv_active, active_mask;
3543         unsigned long reset_timeout;
3544         struct qla_hw_data *ha = vha->hw;
3545         struct req_que *req = ha->req_q_map[0];
3546
3547         if (vha->flags.online) {
3548                 qla82xx_idc_unlock(ha);
3549                 qla2x00_abort_isp_cleanup(vha);
3550                 ha->isp_ops->get_flash_version(vha, req->ring);
3551                 ha->isp_ops->nvram_config(vha);
3552                 qla82xx_idc_lock(ha);
3553         }
3554
3555         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3556         if (!ha->flags.isp82xx_reset_owner) {
3557                 ql_dbg(ql_dbg_p3p, vha, 0xb028,
3558                     "reset_acknowledged by 0x%x\n", ha->portnum);
3559                 qla82xx_set_rst_ready(ha);
3560         } else {
3561                 active_mask = ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
3562                 drv_active &= active_mask;
3563                 ql_dbg(ql_dbg_p3p, vha, 0xb029,
3564                     "active_mask: 0x%08x\n", active_mask);
3565         }
3566
3567         /* wait for 10 seconds for reset ack from all functions */
3568         reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
3569
3570         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3571         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3572         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3573
3574         ql_dbg(ql_dbg_p3p, vha, 0xb02a,
3575             "drv_state: 0x%08x, drv_active: 0x%08x, "
3576             "dev_state: 0x%08x, active_mask: 0x%08x\n",
3577             drv_state, drv_active, dev_state, active_mask);
3578
3579         while (drv_state != drv_active &&
3580             dev_state != QLA82XX_DEV_INITIALIZING) {
3581                 if (time_after_eq(jiffies, reset_timeout)) {
3582                         ql_log(ql_log_warn, vha, 0x00b5,
3583                             "Reset timeout.\n");
3584                         break;
3585                 }
3586                 qla82xx_idc_unlock(ha);
3587                 msleep(1000);
3588                 qla82xx_idc_lock(ha);
3589                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3590                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3591                 if (ha->flags.isp82xx_reset_owner)
3592                         drv_active &= active_mask;
3593                 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3594         }
3595
3596         ql_dbg(ql_dbg_p3p, vha, 0xb02b,
3597             "drv_state: 0x%08x, drv_active: 0x%08x, "
3598             "dev_state: 0x%08x, active_mask: 0x%08x\n",
3599             drv_state, drv_active, dev_state, active_mask);
3600
3601         ql_log(ql_log_info, vha, 0x00b6,
3602             "Device state is 0x%x = %s.\n",
3603             dev_state,
3604             dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3605
3606         /* Force to DEV_COLD unless someone else is starting a reset */
3607         if (dev_state != QLA82XX_DEV_INITIALIZING &&
3608             dev_state != QLA82XX_DEV_COLD) {
3609                 ql_log(ql_log_info, vha, 0x00b7,
3610                     "HW State: COLD/RE-INIT.\n");
3611                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3612                 if (ql2xmdenable) {
3613                         if (qla82xx_md_collect(vha))
3614                                 ql_log(ql_log_warn, vha, 0xb02c,
3615                                     "Not able to collect minidump.\n");
3616                 } else
3617                         ql_log(ql_log_warn, vha, 0xb04f,
3618                             "Minidump disabled.\n");
3619         }
3620 }
3621
3622 static void
3623 qla82xx_check_md_needed(scsi_qla_host_t *vha)
3624 {
3625         struct qla_hw_data *ha = vha->hw;
3626         uint16_t fw_major_version, fw_minor_version, fw_subminor_version;
3627         uint16_t fw_attributes;
3628         uint32_t fw_memory_size, mpi_capabilities;
3629         uint8_t mpi_version[3], phy_version[3];
3630
3631         if (!ha->fw_dumped) {
3632                 qla2x00_get_fw_version(vha,
3633                     &fw_major_version,
3634                     &fw_minor_version,
3635                     &fw_subminor_version,
3636                     &fw_attributes, &fw_memory_size,
3637                     mpi_version, &mpi_capabilities,
3638                     phy_version);
3639
3640                 if (fw_major_version != ha->fw_major_version ||
3641                     fw_minor_version != ha->fw_minor_version ||
3642                     fw_subminor_version != ha->fw_subminor_version) {
3643                         ql_log(ql_log_info, vha, 0xb02d,
3644                             "Firmware version differs "
3645                             "Previous version: %d:%d:%d - "
3646                             "New version: %d:%d:%d\n",
3647                             ha->fw_major_version,
3648                             ha->fw_minor_version, ha->fw_subminor_version,
3649                             fw_major_version, fw_minor_version,
3650                             fw_subminor_version);
3651                         /* Release MiniDump resources */
3652                         qla82xx_md_free(vha);
3653                         /* ALlocate MiniDump resources */
3654                         qla82xx_md_prep(vha);
3655                 }
3656         } else
3657                 ql_log(ql_log_info, vha, 0xb02e,
3658                     "Firmware dump available to retrieve\n",
3659                     vha->host_no);
3660 }
3661
3662
3663 int
3664 qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3665 {
3666         uint32_t fw_heartbeat_counter;
3667         int status = 0;
3668
3669         fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
3670                 QLA82XX_PEG_ALIVE_COUNTER);
3671         /* all 0xff, assume AER/EEH in progress, ignore */
3672         if (fw_heartbeat_counter == 0xffffffff) {
3673                 ql_dbg(ql_dbg_timer, vha, 0x6003,
3674                     "FW heartbeat counter is 0xffffffff, "
3675                     "returning status=%d.\n", status);
3676                 return status;
3677         }
3678         if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
3679                 vha->seconds_since_last_heartbeat++;
3680                 /* FW not alive after 2 seconds */
3681                 if (vha->seconds_since_last_heartbeat == 2) {
3682                         vha->seconds_since_last_heartbeat = 0;
3683                         status = 1;
3684                 }
3685         } else
3686                 vha->seconds_since_last_heartbeat = 0;
3687         vha->fw_heartbeat_counter = fw_heartbeat_counter;
3688         if (status)
3689                 ql_dbg(ql_dbg_timer, vha, 0x6004,
3690                     "Returning status=%d.\n", status);
3691         return status;
3692 }
3693
3694 /*
3695  * qla82xx_device_state_handler
3696  *      Main state handler
3697  *
3698  * Note:
3699  *      IDC lock must be held upon entry
3700  *
3701  * Return:
3702  *    Success : 0
3703  *    Failed  : 1
3704  */
3705 int
3706 qla82xx_device_state_handler(scsi_qla_host_t *vha)
3707 {
3708         uint32_t dev_state;
3709         uint32_t old_dev_state;
3710         int rval = QLA_SUCCESS;
3711         unsigned long dev_init_timeout;
3712         struct qla_hw_data *ha = vha->hw;
3713         int loopcount = 0;
3714
3715         qla82xx_idc_lock(ha);
3716         if (!vha->flags.init_done)
3717                 qla82xx_set_drv_active(vha);
3718
3719         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3720         old_dev_state = dev_state;
3721         ql_log(ql_log_info, vha, 0x009b,
3722             "Device state is 0x%x = %s.\n",
3723             dev_state,
3724             dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3725
3726         /* wait for 30 seconds for device to go ready */
3727         dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
3728
3729         while (1) {
3730
3731                 if (time_after_eq(jiffies, dev_init_timeout)) {
3732                         ql_log(ql_log_fatal, vha, 0x009c,
3733                             "Device init failed.\n");
3734                         rval = QLA_FUNCTION_FAILED;
3735                         break;
3736                 }
3737                 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3738                 if (old_dev_state != dev_state) {
3739                         loopcount = 0;
3740                         old_dev_state = dev_state;
3741                 }
3742                 if (loopcount < 5) {
3743                         ql_log(ql_log_info, vha, 0x009d,
3744                             "Device state is 0x%x = %s.\n",
3745                             dev_state,
3746                             dev_state < MAX_STATES ? qdev_state(dev_state) :
3747                             "Unknown");
3748                 }
3749
3750                 switch (dev_state) {
3751                 case QLA82XX_DEV_READY:
3752                         qla82xx_check_md_needed(vha);
3753                         ha->flags.isp82xx_reset_owner = 0;
3754                         goto exit;
3755                 case QLA82XX_DEV_COLD:
3756                         rval = qla82xx_device_bootstrap(vha);
3757                         break;
3758                 case QLA82XX_DEV_INITIALIZING:
3759                         qla82xx_idc_unlock(ha);
3760                         msleep(1000);
3761                         qla82xx_idc_lock(ha);
3762                         break;
3763                 case QLA82XX_DEV_NEED_RESET:
3764                     if (!ql2xdontresethba)
3765                         qla82xx_need_reset_handler(vha);
3766                         dev_init_timeout = jiffies +
3767                                 (ha->nx_dev_init_timeout * HZ);
3768                         break;
3769                 case QLA82XX_DEV_NEED_QUIESCENT:
3770                         qla82xx_need_qsnt_handler(vha);
3771                         /* Reset timeout value after quiescence handler */
3772                         dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3773                                                          * HZ);
3774                         break;
3775                 case QLA82XX_DEV_QUIESCENT:
3776                         /* Owner will exit and other will wait for the state
3777                          * to get changed
3778                          */
3779                         if (ha->flags.quiesce_owner)
3780                                 goto exit;
3781
3782                         qla82xx_idc_unlock(ha);
3783                         msleep(1000);
3784                         qla82xx_idc_lock(ha);
3785
3786                         /* Reset timeout value after quiescence handler */
3787                         dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3788                                                          * HZ);
3789                         break;
3790                 case QLA82XX_DEV_FAILED:
3791                         qla82xx_dev_failed_handler(vha);
3792                         rval = QLA_FUNCTION_FAILED;
3793                         goto exit;
3794                 default:
3795                         qla82xx_idc_unlock(ha);
3796                         msleep(1000);
3797                         qla82xx_idc_lock(ha);
3798                 }
3799                 loopcount++;
3800         }
3801 exit:
3802         qla82xx_idc_unlock(ha);
3803         return rval;
3804 }
3805
3806 void qla82xx_watchdog(scsi_qla_host_t *vha)
3807 {
3808         uint32_t dev_state, halt_status;
3809         struct qla_hw_data *ha = vha->hw;
3810
3811         /* don't poll if reset is going on */
3812         if (!ha->flags.isp82xx_reset_hdlr_active) {
3813                 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3814                 if (dev_state == QLA82XX_DEV_NEED_RESET &&
3815                     !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
3816                         ql_log(ql_log_warn, vha, 0x6001,
3817                             "Adapter reset needed.\n");
3818                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3819                         qla2xxx_wake_dpc(vha);
3820                 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
3821                         !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3822                         ql_log(ql_log_warn, vha, 0x6002,
3823                             "Quiescent needed.\n");
3824                         set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3825                         qla2xxx_wake_dpc(vha);
3826                 } else {
3827                         if (qla82xx_check_fw_alive(vha)) {
3828                                 halt_status = qla82xx_rd_32(ha,
3829                                     QLA82XX_PEG_HALT_STATUS1);
3830                                 ql_dbg(ql_dbg_timer, vha, 0x6005,
3831                                     "dumping hw/fw registers:.\n "
3832                                     " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
3833                                     " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
3834                                     " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n "
3835                                     " PEG_NET_4_PC: 0x%x.\n", halt_status,
3836                                     qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2),
3837                                     qla82xx_rd_32(ha,
3838                                             QLA82XX_CRB_PEG_NET_0 + 0x3c),
3839                                     qla82xx_rd_32(ha,
3840                                             QLA82XX_CRB_PEG_NET_1 + 0x3c),
3841                                     qla82xx_rd_32(ha,
3842                                             QLA82XX_CRB_PEG_NET_2 + 0x3c),
3843                                     qla82xx_rd_32(ha,
3844                                             QLA82XX_CRB_PEG_NET_3 + 0x3c),
3845                                     qla82xx_rd_32(ha,
3846                                             QLA82XX_CRB_PEG_NET_4 + 0x3c));
3847                                 if (halt_status & HALT_STATUS_UNRECOVERABLE) {
3848                                         set_bit(ISP_UNRECOVERABLE,
3849                                             &vha->dpc_flags);
3850                                 } else {
3851                                         ql_log(ql_log_info, vha, 0x6006,
3852                                             "Detect abort  needed.\n");
3853                                         set_bit(ISP_ABORT_NEEDED,
3854                                             &vha->dpc_flags);
3855                                 }
3856                                 qla2xxx_wake_dpc(vha);
3857                                 ha->flags.isp82xx_fw_hung = 1;
3858                                 if (ha->flags.mbox_busy) {
3859                                         ha->flags.mbox_int = 1;
3860                                         ql_log(ql_log_warn, vha, 0x6007,
3861                                             "Due to FW hung, doing "
3862                                             "premature completion of mbx "
3863                                             "command.\n");
3864                                         if (test_bit(MBX_INTR_WAIT,
3865                                             &ha->mbx_cmd_flags))
3866                                                 complete(&ha->mbx_intr_comp);
3867                                 }
3868                         }
3869                 }
3870         }
3871 }
3872
3873 int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3874 {
3875         int rval;
3876         rval = qla82xx_device_state_handler(vha);
3877         return rval;
3878 }
3879
3880 void
3881 qla82xx_set_reset_owner(scsi_qla_host_t *vha)
3882 {
3883         struct qla_hw_data *ha = vha->hw;
3884         uint32_t dev_state;
3885
3886         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3887         if (dev_state == QLA82XX_DEV_READY) {
3888                 ql_log(ql_log_info, vha, 0xb02f,
3889                     "HW State: NEED RESET\n");
3890                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3891                         QLA82XX_DEV_NEED_RESET);
3892                 ha->flags.isp82xx_reset_owner = 1;
3893                 ql_dbg(ql_dbg_p3p, vha, 0xb030,
3894                     "reset_owner is 0x%x\n", ha->portnum);
3895         } else
3896                 ql_log(ql_log_info, vha, 0xb031,
3897                     "Device state is 0x%x = %s.\n",
3898                     dev_state,
3899                     dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3900 }
3901
3902 /*
3903  *  qla82xx_abort_isp
3904  *      Resets ISP and aborts all outstanding commands.
3905  *
3906  * Input:
3907  *      ha           = adapter block pointer.
3908  *
3909  * Returns:
3910  *      0 = success
3911  */
3912 int
3913 qla82xx_abort_isp(scsi_qla_host_t *vha)
3914 {
3915         int rval;
3916         struct qla_hw_data *ha = vha->hw;
3917
3918         if (vha->device_flags & DFLG_DEV_FAILED) {
3919                 ql_log(ql_log_warn, vha, 0x8024,
3920                     "Device in failed state, exiting.\n");
3921                 return QLA_SUCCESS;
3922         }
3923         ha->flags.isp82xx_reset_hdlr_active = 1;
3924
3925         qla82xx_idc_lock(ha);
3926         qla82xx_set_reset_owner(vha);
3927         qla82xx_idc_unlock(ha);
3928
3929         rval = qla82xx_device_state_handler(vha);
3930
3931         qla82xx_idc_lock(ha);
3932         qla82xx_clear_rst_ready(ha);
3933         qla82xx_idc_unlock(ha);
3934
3935         if (rval == QLA_SUCCESS) {
3936                 ha->flags.isp82xx_fw_hung = 0;
3937                 ha->flags.isp82xx_reset_hdlr_active = 0;
3938                 qla82xx_restart_isp(vha);
3939         }
3940
3941         if (rval) {
3942                 vha->flags.online = 1;
3943                 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3944                         if (ha->isp_abort_cnt == 0) {
3945                                 ql_log(ql_log_warn, vha, 0x8027,
3946                                     "ISP error recover failed - board "
3947                                     "disabled.\n");
3948                                 /*
3949                                  * The next call disables the board
3950                                  * completely.
3951                                  */
3952                                 ha->isp_ops->reset_adapter(vha);
3953                                 vha->flags.online = 0;
3954                                 clear_bit(ISP_ABORT_RETRY,
3955                                     &vha->dpc_flags);
3956                                 rval = QLA_SUCCESS;
3957                         } else { /* schedule another ISP abort */
3958                                 ha->isp_abort_cnt--;
3959                                 ql_log(ql_log_warn, vha, 0x8036,
3960                                     "ISP abort - retry remaining %d.\n",
3961                                     ha->isp_abort_cnt);
3962                                 rval = QLA_FUNCTION_FAILED;
3963                         }
3964                 } else {
3965                         ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3966                         ql_dbg(ql_dbg_taskm, vha, 0x8029,
3967                             "ISP error recovery - retrying (%d) more times.\n",
3968                             ha->isp_abort_cnt);
3969                         set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3970                         rval = QLA_FUNCTION_FAILED;
3971                 }
3972         }
3973         return rval;
3974 }
3975
3976 /*
3977  *  qla82xx_fcoe_ctx_reset
3978  *      Perform a quick reset and aborts all outstanding commands.
3979  *      This will only perform an FCoE context reset and avoids a full blown
3980  *      chip reset.
3981  *
3982  * Input:
3983  *      ha = adapter block pointer.
3984  *      is_reset_path = flag for identifying the reset path.
3985  *
3986  * Returns:
3987  *      0 = success
3988  */
3989 int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha)
3990 {
3991         int rval = QLA_FUNCTION_FAILED;
3992
3993         if (vha->flags.online) {
3994                 /* Abort all outstanding commands, so as to be requeued later */
3995                 qla2x00_abort_isp_cleanup(vha);
3996         }
3997
3998         /* Stop currently executing firmware.
3999          * This will destroy existing FCoE context at the F/W end.
4000          */
4001         qla2x00_try_to_stop_firmware(vha);
4002
4003         /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */
4004         rval = qla82xx_restart_isp(vha);
4005
4006         return rval;
4007 }
4008
4009 /*
4010  * qla2x00_wait_for_fcoe_ctx_reset
4011  *    Wait till the FCoE context is reset.
4012  *
4013  * Note:
4014  *    Does context switching here.
4015  *    Release SPIN_LOCK (if any) before calling this routine.
4016  *
4017  * Return:
4018  *    Success (fcoe_ctx reset is done) : 0
4019  *    Failed  (fcoe_ctx reset not completed within max loop timout ) : 1
4020  */
4021 int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
4022 {
4023         int status = QLA_FUNCTION_FAILED;
4024         unsigned long wait_reset;
4025
4026         wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
4027         while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
4028             test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
4029             && time_before(jiffies, wait_reset)) {
4030
4031                 set_current_state(TASK_UNINTERRUPTIBLE);
4032                 schedule_timeout(HZ);
4033
4034                 if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) &&
4035                     !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
4036                         status = QLA_SUCCESS;
4037                         break;
4038                 }
4039         }
4040         ql_dbg(ql_dbg_p3p, vha, 0xb027,
4041             "%s status=%d.\n", status);
4042
4043         return status;
4044 }
4045
4046 void
4047 qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
4048 {
4049         int i;
4050         unsigned long flags;
4051         struct qla_hw_data *ha = vha->hw;
4052
4053         /* Check if 82XX firmware is alive or not
4054          * We may have arrived here from NEED_RESET
4055          * detection only
4056          */
4057         if (!ha->flags.isp82xx_fw_hung) {
4058                 for (i = 0; i < 2; i++) {
4059                         msleep(1000);
4060                         if (qla82xx_check_fw_alive(vha)) {
4061                                 ha->flags.isp82xx_fw_hung = 1;
4062                                 if (ha->flags.mbox_busy) {
4063                                         ha->flags.mbox_int = 1;
4064                                         complete(&ha->mbx_intr_comp);
4065                                 }
4066                                 break;
4067                         }
4068                 }
4069         }
4070         ql_dbg(ql_dbg_init, vha, 0x00b0,
4071             "Entered %s fw_hung=%d.\n",
4072             __func__, ha->flags.isp82xx_fw_hung);
4073
4074         /* Abort all commands gracefully if fw NOT hung */
4075         if (!ha->flags.isp82xx_fw_hung) {
4076                 int cnt, que;
4077                 srb_t *sp;
4078                 struct req_que *req;
4079
4080                 spin_lock_irqsave(&ha->hardware_lock, flags);
4081                 for (que = 0; que < ha->max_req_queues; que++) {
4082                         req = ha->req_q_map[que];
4083                         if (!req)
4084                                 continue;
4085                         for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
4086                                 sp = req->outstanding_cmds[cnt];
4087                                 if (sp) {
4088                                         if (!sp->ctx ||
4089                                             (sp->flags & SRB_FCP_CMND_DMA_VALID)) {
4090                                                 spin_unlock_irqrestore(
4091                                                     &ha->hardware_lock, flags);
4092                                                 if (ha->isp_ops->abort_command(sp)) {
4093                                                         ql_log(ql_log_info, vha,
4094                                                             0x00b1,
4095                                                             "mbx abort failed.\n");
4096                                                 } else {
4097                                                         ql_log(ql_log_info, vha,
4098                                                             0x00b2,
4099                                                             "mbx abort success.\n");
4100                                                 }
4101                                                 spin_lock_irqsave(&ha->hardware_lock, flags);
4102                                         }
4103                                 }
4104                         }
4105                 }
4106                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4107
4108                 /* Wait for pending cmds (physical and virtual) to complete */
4109                 if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
4110                     WAIT_HOST) == QLA_SUCCESS) {
4111                         ql_dbg(ql_dbg_init, vha, 0x00b3,
4112                             "Done wait for "
4113                             "pending commands.\n");
4114                 }
4115         }
4116 }
4117
4118 /* Minidump related functions */
4119 int
4120 qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
4121 {
4122         uint32_t  off_value, rval = 0;
4123
4124         WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase),
4125             (off & 0xFFFF0000));
4126
4127         /* Read back value to make sure write has gone through */
4128         RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
4129         off_value  = (off & 0x0000FFFF);
4130
4131         if (flag)
4132                 WRT_REG_DWORD((void *)
4133                     (off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
4134                     data);
4135         else
4136                 rval = RD_REG_DWORD((void *)
4137                     (off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
4138
4139         return rval;
4140 }
4141
4142 static int
4143 qla82xx_minidump_process_control(scsi_qla_host_t *vha,
4144         qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4145 {
4146         struct qla_hw_data *ha = vha->hw;
4147         struct qla82xx_md_entry_crb *crb_entry;
4148         uint32_t read_value, opcode, poll_time;
4149         uint32_t addr, index, crb_addr;
4150         unsigned long wtime;
4151         struct qla82xx_md_template_hdr *tmplt_hdr;
4152         uint32_t rval = QLA_SUCCESS;
4153         int i;
4154
4155         tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
4156         crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr;
4157         crb_addr = crb_entry->addr;
4158
4159         for (i = 0; i < crb_entry->op_count; i++) {
4160                 opcode = crb_entry->crb_ctrl.opcode;
4161                 if (opcode & QLA82XX_DBG_OPCODE_WR) {
4162                         qla82xx_md_rw_32(ha, crb_addr,
4163                             crb_entry->value_1, 1);
4164                         opcode &= ~QLA82XX_DBG_OPCODE_WR;
4165                 }
4166
4167                 if (opcode & QLA82XX_DBG_OPCODE_RW) {
4168                         read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
4169                         qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
4170                         opcode &= ~QLA82XX_DBG_OPCODE_RW;
4171                 }
4172
4173                 if (opcode & QLA82XX_DBG_OPCODE_AND) {
4174                         read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
4175                         read_value &= crb_entry->value_2;
4176                         opcode &= ~QLA82XX_DBG_OPCODE_AND;
4177                         if (opcode & QLA82XX_DBG_OPCODE_OR) {
4178                                 read_value |= crb_entry->value_3;
4179                                 opcode &= ~QLA82XX_DBG_OPCODE_OR;
4180                         }
4181                         qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
4182                 }
4183
4184                 if (opcode & QLA82XX_DBG_OPCODE_OR) {
4185                         read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
4186                         read_value |= crb_entry->value_3;
4187                         qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
4188                         opcode &= ~QLA82XX_DBG_OPCODE_OR;
4189                 }
4190
4191                 if (opcode & QLA82XX_DBG_OPCODE_POLL) {
4192                         poll_time = crb_entry->crb_strd.poll_timeout;
4193                         wtime = jiffies + poll_time;
4194                         read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
4195
4196                         do {
4197                                 if ((read_value & crb_entry->value_2)
4198                                     == crb_entry->value_1)
4199                                         break;
4200                                 else if (time_after_eq(jiffies, wtime)) {
4201                                         /* capturing dump failed */
4202                                         rval = QLA_FUNCTION_FAILED;
4203                                         break;
4204                                 } else
4205                                         read_value = qla82xx_md_rw_32(ha,
4206                                             crb_addr, 0, 0);
4207                         } while (1);
4208                         opcode &= ~QLA82XX_DBG_OPCODE_POLL;
4209                 }
4210
4211                 if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
4212                         if (crb_entry->crb_strd.state_index_a) {
4213                                 index = crb_entry->crb_strd.state_index_a;
4214                                 addr = tmplt_hdr->saved_state_array[index];
4215                         } else
4216                                 addr = crb_addr;
4217
4218                         read_value = qla82xx_md_rw_32(ha, addr, 0, 0);
4219                         index = crb_entry->crb_ctrl.state_index_v;
4220                         tmplt_hdr->saved_state_array[index] = read_value;
4221                         opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
4222                 }
4223
4224                 if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
4225                         if (crb_entry->crb_strd.state_index_a) {
4226                                 index = crb_entry->crb_strd.state_index_a;
4227                                 addr = tmplt_hdr->saved_state_array[index];
4228                         } else
4229                                 addr = crb_addr;
4230
4231                         if (crb_entry->crb_ctrl.state_index_v) {
4232                                 index = crb_entry->crb_ctrl.state_index_v;
4233                                 read_value =
4234                                     tmplt_hdr->saved_state_array[index];
4235                         } else
4236                                 read_value = crb_entry->value_1;
4237
4238                         qla82xx_md_rw_32(ha, addr, read_value, 1);
4239                         opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
4240                 }
4241
4242                 if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
4243                         index = crb_entry->crb_ctrl.state_index_v;
4244                         read_value = tmplt_hdr->saved_state_array[index];
4245                         read_value <<= crb_entry->crb_ctrl.shl;
4246                         read_value >>= crb_entry->crb_ctrl.shr;
4247                         if (crb_entry->value_2)
4248                                 read_value &= crb_entry->value_2;
4249                         read_value |= crb_entry->value_3;
4250                         read_value += crb_entry->value_1;
4251                         tmplt_hdr->saved_state_array[index] = read_value;
4252                         opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
4253                 }
4254                 crb_addr += crb_entry->crb_strd.addr_stride;
4255         }
4256         return rval;
4257 }
4258
4259 static void
4260 qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
4261         qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4262 {
4263         struct qla_hw_data *ha = vha->hw;
4264         uint32_t r_addr, r_stride, loop_cnt, i, r_value;
4265         struct qla82xx_md_entry_rdocm *ocm_hdr;
4266         uint32_t *data_ptr = *d_ptr;
4267
4268         ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr;
4269         r_addr = ocm_hdr->read_addr;
4270         r_stride = ocm_hdr->read_addr_stride;
4271         loop_cnt = ocm_hdr->op_count;
4272
4273         for (i = 0; i < loop_cnt; i++) {
4274                 r_value = RD_REG_DWORD((void *)(r_addr + ha->nx_pcibase));
4275                 *data_ptr++ = cpu_to_le32(r_value);
4276                 r_addr += r_stride;
4277         }
4278         *d_ptr = data_ptr;
4279 }
4280
4281 static void
4282 qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha,
4283         qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4284 {
4285         struct qla_hw_data *ha = vha->hw;
4286         uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
4287         struct qla82xx_md_entry_mux *mux_hdr;
4288         uint32_t *data_ptr = *d_ptr;
4289
4290         mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr;
4291         r_addr = mux_hdr->read_addr;
4292         s_addr = mux_hdr->select_addr;
4293         s_stride = mux_hdr->select_value_stride;
4294         s_value = mux_hdr->select_value;
4295         loop_cnt = mux_hdr->op_count;
4296
4297         for (i = 0; i < loop_cnt; i++) {
4298                 qla82xx_md_rw_32(ha, s_addr, s_value, 1);
4299                 r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
4300                 *data_ptr++ = cpu_to_le32(s_value);
4301                 *data_ptr++ = cpu_to_le32(r_value);
4302                 s_value += s_stride;
4303         }
4304         *d_ptr = data_ptr;
4305 }
4306
4307 static void
4308 qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha,
4309         qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4310 {
4311         struct qla_hw_data *ha = vha->hw;
4312         uint32_t r_addr, r_stride, loop_cnt, i, r_value;
4313         struct qla82xx_md_entry_crb *crb_hdr;
4314         uint32_t *data_ptr = *d_ptr;
4315
4316         crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr;
4317         r_addr = crb_hdr->addr;
4318         r_stride = crb_hdr->crb_strd.addr_stride;
4319         loop_cnt = crb_hdr->op_count;
4320
4321         for (i = 0; i < loop_cnt; i++) {
4322                 r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
4323                 *data_ptr++ = cpu_to_le32(r_addr);
4324                 *data_ptr++ = cpu_to_le32(r_value);
4325                 r_addr += r_stride;
4326         }
4327         *d_ptr = data_ptr;
4328 }
4329
4330 static int
4331 qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
4332         qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4333 {
4334         struct qla_hw_data *ha = vha->hw;
4335         uint32_t addr, r_addr, c_addr, t_r_addr;
4336         uint32_t i, k, loop_count, t_value, r_cnt, r_value;
4337         unsigned long p_wait, w_time, p_mask;
4338         uint32_t c_value_w, c_value_r;
4339         struct qla82xx_md_entry_cache *cache_hdr;
4340         int rval = QLA_FUNCTION_FAILED;
4341         uint32_t *data_ptr = *d_ptr;
4342
4343         cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
4344         loop_count = cache_hdr->op_count;
4345         r_addr = cache_hdr->read_addr;
4346         c_addr = cache_hdr->control_addr;
4347         c_value_w = cache_hdr->cache_ctrl.write_value;
4348
4349         t_r_addr = cache_hdr->tag_reg_addr;
4350         t_value = cache_hdr->addr_ctrl.init_tag_value;
4351         r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
4352         p_wait = cache_hdr->cache_ctrl.poll_wait;
4353         p_mask = cache_hdr->cache_ctrl.poll_mask;
4354
4355         for (i = 0; i < loop_count; i++) {
4356                 qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
4357                 if (c_value_w)
4358                         qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
4359
4360                 if (p_mask) {
4361                         w_time = jiffies + p_wait;
4362                         do {
4363                                 c_value_r = qla82xx_md_rw_32(ha, c_addr, 0, 0);
4364                                 if ((c_value_r & p_mask) == 0)
4365                                         break;
4366                                 else if (time_after_eq(jiffies, w_time)) {
4367                                         /* capturing dump failed */
4368                                         ql_dbg(ql_dbg_p3p, vha, 0xb032,
4369                                             "c_value_r: 0x%x, poll_mask: 0x%lx, "
4370                                             "w_time: 0x%lx\n",
4371                                             c_value_r, p_mask, w_time);
4372                                         return rval;
4373                                 }
4374                         } while (1);
4375                 }
4376
4377                 addr = r_addr;
4378                 for (k = 0; k < r_cnt; k++) {
4379                         r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
4380                         *data_ptr++ = cpu_to_le32(r_value);
4381                         addr += cache_hdr->read_ctrl.read_addr_stride;
4382                 }
4383                 t_value += cache_hdr->addr_ctrl.tag_value_stride;
4384         }
4385         *d_ptr = data_ptr;
4386         return QLA_SUCCESS;
4387 }
4388
4389 static void
4390 qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha,
4391         qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4392 {
4393         struct qla_hw_data *ha = vha->hw;
4394         uint32_t addr, r_addr, c_addr, t_r_addr;
4395         uint32_t i, k, loop_count, t_value, r_cnt, r_value;
4396         uint32_t c_value_w;
4397         struct qla82xx_md_entry_cache *cache_hdr;
4398         uint32_t *data_ptr = *d_ptr;
4399
4400         cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
4401         loop_count = cache_hdr->op_count;
4402         r_addr = cache_hdr->read_addr;
4403         c_addr = cache_hdr->control_addr;
4404         c_value_w = cache_hdr->cache_ctrl.write_value;
4405
4406         t_r_addr = cache_hdr->tag_reg_addr;
4407         t_value = cache_hdr->addr_ctrl.init_tag_value;
4408         r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
4409
4410         for (i = 0; i < loop_count; i++) {
4411                 qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
4412                 qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
4413                 addr = r_addr;
4414                 for (k = 0; k < r_cnt; k++) {
4415                         r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
4416                         *data_ptr++ = cpu_to_le32(r_value);
4417                         addr += cache_hdr->read_ctrl.read_addr_stride;
4418                 }
4419                 t_value += cache_hdr->addr_ctrl.tag_value_stride;
4420         }
4421         *d_ptr = data_ptr;
4422 }
4423
4424 static void
4425 qla82xx_minidump_process_queue(scsi_qla_host_t *vha,
4426         qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4427 {
4428         struct qla_hw_data *ha = vha->hw;
4429         uint32_t s_addr, r_addr;
4430         uint32_t r_stride, r_value, r_cnt, qid = 0;
4431         uint32_t i, k, loop_cnt;
4432         struct qla82xx_md_entry_queue *q_hdr;
4433         uint32_t *data_ptr = *d_ptr;
4434
4435         q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr;
4436         s_addr = q_hdr->select_addr;
4437         r_cnt = q_hdr->rd_strd.read_addr_cnt;
4438         r_stride = q_hdr->rd_strd.read_addr_stride;
4439         loop_cnt = q_hdr->op_count;
4440
4441         for (i = 0; i < loop_cnt; i++) {
4442                 qla82xx_md_rw_32(ha, s_addr, qid, 1);
4443                 r_addr = q_hdr->read_addr;
4444                 for (k = 0; k < r_cnt; k++) {
4445                         r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
4446                         *data_ptr++ = cpu_to_le32(r_value);
4447                         r_addr += r_stride;
4448                 }
4449                 qid += q_hdr->q_strd.queue_id_stride;
4450         }
4451         *d_ptr = data_ptr;
4452 }
4453
4454 static void
4455 qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha,
4456         qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4457 {
4458         struct qla_hw_data *ha = vha->hw;
4459         uint32_t r_addr, r_value;
4460         uint32_t i, loop_cnt;
4461         struct qla82xx_md_entry_rdrom *rom_hdr;
4462         uint32_t *data_ptr = *d_ptr;
4463
4464         rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr;
4465         r_addr = rom_hdr->read_addr;
4466         loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
4467
4468         for (i = 0; i < loop_cnt; i++) {
4469                 qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
4470                     (r_addr & 0xFFFF0000), 1);
4471                 r_value = qla82xx_md_rw_32(ha,
4472                     MD_DIRECT_ROM_READ_BASE +
4473                     (r_addr & 0x0000FFFF), 0, 0);
4474                 *data_ptr++ = cpu_to_le32(r_value);
4475                 r_addr += sizeof(uint32_t);
4476         }
4477         *d_ptr = data_ptr;
4478 }
4479
4480 static int
4481 qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
4482         qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4483 {
4484         struct qla_hw_data *ha = vha->hw;
4485         uint32_t r_addr, r_value, r_data;
4486         uint32_t i, j, loop_cnt;
4487         struct qla82xx_md_entry_rdmem *m_hdr;
4488         unsigned long flags;
4489         int rval = QLA_FUNCTION_FAILED;
4490         uint32_t *data_ptr = *d_ptr;
4491
4492         m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr;
4493         r_addr = m_hdr->read_addr;
4494         loop_cnt = m_hdr->read_data_size/16;
4495
4496         if (r_addr & 0xf) {
4497                 ql_log(ql_log_warn, vha, 0xb033,
4498                     "Read addr 0x%x not 16 bytes alligned\n", r_addr);
4499                 return rval;
4500         }
4501
4502         if (m_hdr->read_data_size % 16) {
4503                 ql_log(ql_log_warn, vha, 0xb034,
4504                     "Read data[0x%x] not multiple of 16 bytes\n",
4505                     m_hdr->read_data_size);
4506                 return rval;
4507         }
4508
4509         ql_dbg(ql_dbg_p3p, vha, 0xb035,
4510             "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
4511             __func__, r_addr, m_hdr->read_data_size, loop_cnt);
4512
4513         write_lock_irqsave(&ha->hw_lock, flags);
4514         for (i = 0; i < loop_cnt; i++) {
4515                 qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
4516                 r_value = 0;
4517                 qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
4518                 r_value = MIU_TA_CTL_ENABLE;
4519                 qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
4520                 r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
4521                 qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
4522
4523                 for (j = 0; j < MAX_CTL_CHECK; j++) {
4524                         r_value = qla82xx_md_rw_32(ha,
4525                             MD_MIU_TEST_AGT_CTRL, 0, 0);
4526                         if ((r_value & MIU_TA_CTL_BUSY) == 0)
4527                                 break;
4528                 }
4529
4530                 if (j >= MAX_CTL_CHECK) {
4531                         printk_ratelimited(KERN_ERR
4532                             "failed to read through agent\n");
4533                         write_unlock_irqrestore(&ha->hw_lock, flags);
4534                         return rval;
4535                 }
4536
4537                 for (j = 0; j < 4; j++) {
4538                         r_data = qla82xx_md_rw_32(ha,
4539                             MD_MIU_TEST_AGT_RDDATA[j], 0, 0);
4540                         *data_ptr++ = cpu_to_le32(r_data);
4541                 }
4542                 r_addr += 16;
4543         }
4544         write_unlock_irqrestore(&ha->hw_lock, flags);
4545         *d_ptr = data_ptr;
4546         return QLA_SUCCESS;
4547 }
4548
4549 static int
4550 qla82xx_validate_template_chksum(scsi_qla_host_t *vha)
4551 {
4552         struct qla_hw_data *ha = vha->hw;
4553         uint64_t chksum = 0;
4554         uint32_t *d_ptr = (uint32_t *)ha->md_tmplt_hdr;
4555         int count = ha->md_template_size/sizeof(uint32_t);
4556
4557         while (count-- > 0)
4558                 chksum += *d_ptr++;
4559         while (chksum >> 32)
4560                 chksum = (chksum & 0xFFFFFFFF) + (chksum >> 32);
4561         return ~chksum;
4562 }
4563
4564 static void
4565 qla82xx_mark_entry_skipped(scsi_qla_host_t *vha,
4566         qla82xx_md_entry_hdr_t *entry_hdr, int index)
4567 {
4568         entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
4569         ql_dbg(ql_dbg_p3p, vha, 0xb036,
4570             "Skipping entry[%d]: "
4571             "ETYPE[0x%x]-ELEVEL[0x%x]\n",
4572             index, entry_hdr->entry_type,
4573             entry_hdr->d_ctrl.entry_capture_mask);
4574 }
4575
4576 int
4577 qla82xx_md_collect(scsi_qla_host_t *vha)
4578 {
4579         struct qla_hw_data *ha = vha->hw;
4580         int no_entry_hdr = 0;
4581         qla82xx_md_entry_hdr_t *entry_hdr;
4582         struct qla82xx_md_template_hdr *tmplt_hdr;
4583         uint32_t *data_ptr;
4584         uint32_t total_data_size = 0, f_capture_mask, data_collected = 0;
4585         int i = 0, rval = QLA_FUNCTION_FAILED;
4586
4587         tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
4588         data_ptr = (uint32_t *)ha->md_dump;
4589
4590         if (ha->fw_dumped) {
4591                 ql_log(ql_log_info, vha, 0xb037,
4592                     "Firmware dump available to retrive\n");
4593                 goto md_failed;
4594         }
4595
4596         ha->fw_dumped = 0;
4597
4598         if (!ha->md_tmplt_hdr || !ha->md_dump) {
4599                 ql_log(ql_log_warn, vha, 0xb038,
4600                     "Memory not allocated for minidump capture\n");
4601                 goto md_failed;
4602         }
4603
4604         if (qla82xx_validate_template_chksum(vha)) {
4605                 ql_log(ql_log_info, vha, 0xb039,
4606                     "Template checksum validation error\n");
4607                 goto md_failed;
4608         }
4609
4610         no_entry_hdr = tmplt_hdr->num_of_entries;
4611         ql_dbg(ql_dbg_p3p, vha, 0xb03a,
4612             "No of entry headers in Template: 0x%x\n", no_entry_hdr);
4613
4614         ql_dbg(ql_dbg_p3p, vha, 0xb03b,
4615             "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
4616
4617         f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
4618
4619         /* Validate whether required debug level is set */
4620         if ((f_capture_mask & 0x3) != 0x3) {
4621                 ql_log(ql_log_warn, vha, 0xb03c,
4622                     "Minimum required capture mask[0x%x] level not set\n",
4623                     f_capture_mask);
4624                 goto md_failed;
4625         }
4626         tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
4627
4628         tmplt_hdr->driver_info[0] = vha->host_no;
4629         tmplt_hdr->driver_info[1] = (QLA_DRIVER_MAJOR_VER << 24) |
4630             (QLA_DRIVER_MINOR_VER << 16) | (QLA_DRIVER_PATCH_VER << 8) |
4631             QLA_DRIVER_BETA_VER;
4632
4633         total_data_size = ha->md_dump_size;
4634
4635         ql_dbg(ql_log_info, vha, 0xb03d,
4636             "Total minidump data_size 0x%x to be captured\n", total_data_size);
4637
4638         /* Check whether template obtained is valid */
4639         if (tmplt_hdr->entry_type != QLA82XX_TLHDR) {
4640                 ql_log(ql_log_warn, vha, 0xb04e,
4641                     "Bad template header entry type: 0x%x obtained\n",
4642                     tmplt_hdr->entry_type);
4643                 goto md_failed;
4644         }
4645
4646         entry_hdr = (qla82xx_md_entry_hdr_t *) \
4647             (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
4648
4649         /* Walk through the entry headers */
4650         for (i = 0; i < no_entry_hdr; i++) {
4651
4652                 if (data_collected > total_data_size) {
4653                         ql_log(ql_log_warn, vha, 0xb03e,
4654                             "More MiniDump data collected: [0x%x]\n",
4655                             data_collected);
4656                         goto md_failed;
4657                 }
4658
4659                 if (!(entry_hdr->d_ctrl.entry_capture_mask &
4660                     ql2xmdcapmask)) {
4661                         entry_hdr->d_ctrl.driver_flags |=
4662                             QLA82XX_DBG_SKIPPED_FLAG;
4663                         ql_dbg(ql_dbg_p3p, vha, 0xb03f,
4664                             "Skipping entry[%d]: "
4665                             "ETYPE[0x%x]-ELEVEL[0x%x]\n",
4666                             i, entry_hdr->entry_type,
4667                             entry_hdr->d_ctrl.entry_capture_mask);
4668                         goto skip_nxt_entry;
4669                 }
4670
4671                 ql_dbg(ql_dbg_p3p, vha, 0xb040,
4672                     "[%s]: data ptr[%d]: %p, entry_hdr: %p\n"
4673                     "entry_type: 0x%x, captrue_mask: 0x%x\n",
4674                     __func__, i, data_ptr, entry_hdr,
4675                     entry_hdr->entry_type,
4676                     entry_hdr->d_ctrl.entry_capture_mask);
4677
4678                 ql_dbg(ql_dbg_p3p, vha, 0xb041,
4679                     "Data collected: [0x%x], Dump size left:[0x%x]\n",
4680                     data_collected, (ha->md_dump_size - data_collected));
4681
4682                 /* Decode the entry type and take
4683                  * required action to capture debug data */
4684                 switch (entry_hdr->entry_type) {
4685                 case QLA82XX_RDEND:
4686                         qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4687                         break;
4688                 case QLA82XX_CNTRL:
4689                         rval = qla82xx_minidump_process_control(vha,
4690                             entry_hdr, &data_ptr);
4691                         if (rval != QLA_SUCCESS) {
4692                                 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4693                                 goto md_failed;
4694                         }
4695                         break;
4696                 case QLA82XX_RDCRB:
4697                         qla82xx_minidump_process_rdcrb(vha,
4698                             entry_hdr, &data_ptr);
4699                         break;
4700                 case QLA82XX_RDMEM:
4701                         rval = qla82xx_minidump_process_rdmem(vha,
4702                             entry_hdr, &data_ptr);
4703                         if (rval != QLA_SUCCESS) {
4704                                 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4705                                 goto md_failed;
4706                         }
4707                         break;
4708                 case QLA82XX_BOARD:
4709                 case QLA82XX_RDROM:
4710                         qla82xx_minidump_process_rdrom(vha,
4711                             entry_hdr, &data_ptr);
4712                         break;
4713                 case QLA82XX_L2DTG:
4714                 case QLA82XX_L2ITG:
4715                 case QLA82XX_L2DAT:
4716                 case QLA82XX_L2INS:
4717                         rval = qla82xx_minidump_process_l2tag(vha,
4718                             entry_hdr, &data_ptr);
4719                         if (rval != QLA_SUCCESS) {
4720                                 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4721                                 goto md_failed;
4722                         }
4723                         break;
4724                 case QLA82XX_L1DAT:
4725                 case QLA82XX_L1INS:
4726                         qla82xx_minidump_process_l1cache(vha,
4727                             entry_hdr, &data_ptr);
4728                         break;
4729                 case QLA82XX_RDOCM:
4730                         qla82xx_minidump_process_rdocm(vha,
4731                             entry_hdr, &data_ptr);
4732                         break;
4733                 case QLA82XX_RDMUX:
4734                         qla82xx_minidump_process_rdmux(vha,
4735                             entry_hdr, &data_ptr);
4736                         break;
4737                 case QLA82XX_QUEUE:
4738                         qla82xx_minidump_process_queue(vha,
4739                             entry_hdr, &data_ptr);
4740                         break;
4741                 case QLA82XX_RDNOP:
4742                 default:
4743                         qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4744                         break;
4745                 }
4746
4747                 ql_dbg(ql_dbg_p3p, vha, 0xb042,
4748                     "[%s]: data ptr[%d]: %p\n", __func__, i, data_ptr);
4749
4750                 data_collected = (uint8_t *)data_ptr -
4751                     (uint8_t *)ha->md_dump;
4752 skip_nxt_entry:
4753                 entry_hdr = (qla82xx_md_entry_hdr_t *) \
4754                     (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
4755         }
4756
4757         if (data_collected != total_data_size) {
4758                 ql_dbg(ql_log_warn, vha, 0xb043,
4759                     "MiniDump data mismatch: Data collected: [0x%x],"
4760                     "total_data_size:[0x%x]\n",
4761                     data_collected, total_data_size);
4762                 goto md_failed;
4763         }
4764
4765         ql_log(ql_log_info, vha, 0xb044,
4766             "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
4767             vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
4768         ha->fw_dumped = 1;
4769         qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
4770
4771 md_failed:
4772         return rval;
4773 }
4774
4775 int
4776 qla82xx_md_alloc(scsi_qla_host_t *vha)
4777 {
4778         struct qla_hw_data *ha = vha->hw;
4779         int i, k;
4780         struct qla82xx_md_template_hdr *tmplt_hdr;
4781
4782         tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
4783
4784         if (ql2xmdcapmask < 0x3 || ql2xmdcapmask > 0x7F) {
4785                 ql2xmdcapmask = tmplt_hdr->capture_debug_level & 0xFF;
4786                 ql_log(ql_log_info, vha, 0xb045,
4787                     "Forcing driver capture mask to firmware default capture mask: 0x%x.\n",
4788                     ql2xmdcapmask);
4789         }
4790
4791         for (i = 0x2, k = 1; (i & QLA82XX_DEFAULT_CAP_MASK); i <<= 1, k++) {
4792                 if (i & ql2xmdcapmask)
4793                         ha->md_dump_size += tmplt_hdr->capture_size_array[k];
4794         }
4795
4796         if (ha->md_dump) {
4797                 ql_log(ql_log_warn, vha, 0xb046,
4798                     "Firmware dump previously allocated.\n");
4799                 return 1;
4800         }
4801
4802         ha->md_dump = vmalloc(ha->md_dump_size);
4803         if (ha->md_dump == NULL) {
4804                 ql_log(ql_log_warn, vha, 0xb047,
4805                     "Unable to allocate memory for Minidump size "
4806                     "(0x%x).\n", ha->md_dump_size);
4807                 return 1;
4808         }
4809         return 0;
4810 }
4811
4812 void
4813 qla82xx_md_free(scsi_qla_host_t *vha)
4814 {
4815         struct qla_hw_data *ha = vha->hw;
4816
4817         /* Release the template header allocated */
4818         if (ha->md_tmplt_hdr) {
4819                 ql_log(ql_log_info, vha, 0xb048,
4820                     "Free MiniDump template: %p, size (%d KB)\n",
4821                     ha->md_tmplt_hdr, ha->md_template_size / 1024);
4822                 dma_free_coherent(&ha->pdev->dev, ha->md_template_size,
4823                     ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
4824                 ha->md_tmplt_hdr = 0;
4825         }
4826
4827         /* Release the template data buffer allocated */
4828         if (ha->md_dump) {
4829                 ql_log(ql_log_info, vha, 0xb049,
4830                     "Free MiniDump memory: %p, size (%d KB)\n",
4831                     ha->md_dump, ha->md_dump_size / 1024);
4832                 vfree(ha->md_dump);
4833                 ha->md_dump_size = 0;
4834                 ha->md_dump = 0;
4835         }
4836 }
4837
4838 void
4839 qla82xx_md_prep(scsi_qla_host_t *vha)
4840 {
4841         struct qla_hw_data *ha = vha->hw;
4842         int rval;
4843
4844         /* Get Minidump template size */
4845         rval = qla82xx_md_get_template_size(vha);
4846         if (rval == QLA_SUCCESS) {
4847                 ql_log(ql_log_info, vha, 0xb04a,
4848                     "MiniDump Template size obtained (%d KB)\n",
4849                     ha->md_template_size / 1024);
4850
4851                 /* Get Minidump template */
4852                 rval = qla82xx_md_get_template(vha);
4853                 if (rval == QLA_SUCCESS) {
4854                         ql_dbg(ql_dbg_p3p, vha, 0xb04b,
4855                             "MiniDump Template obtained\n");
4856
4857                         /* Allocate memory for minidump */
4858                         rval = qla82xx_md_alloc(vha);
4859                         if (rval == QLA_SUCCESS)
4860                                 ql_log(ql_log_info, vha, 0xb04c,
4861                                     "MiniDump memory allocated (%d KB)\n",
4862                                     ha->md_dump_size / 1024);
4863                         else {
4864                                 ql_log(ql_log_info, vha, 0xb04d,
4865                                     "Free MiniDump template: %p, size: (%d KB)\n",
4866                                     ha->md_tmplt_hdr,
4867                                     ha->md_template_size / 1024);
4868                                 dma_free_coherent(&ha->pdev->dev,
4869                                     ha->md_template_size,
4870                                     ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
4871                                 ha->md_tmplt_hdr = 0;
4872                         }
4873
4874                 }
4875         }
4876 }
4877
4878 int
4879 qla82xx_beacon_on(struct scsi_qla_host *vha)
4880 {
4881
4882         int rval;
4883         struct qla_hw_data *ha = vha->hw;
4884         qla82xx_idc_lock(ha);
4885         rval = qla82xx_mbx_beacon_ctl(vha, 1);
4886
4887         if (rval) {
4888                 ql_log(ql_log_warn, vha, 0xb050,
4889                     "mbx set led config failed in %s\n", __func__);
4890                 goto exit;
4891         }
4892         ha->beacon_blink_led = 1;
4893 exit:
4894         qla82xx_idc_unlock(ha);
4895         return rval;
4896 }
4897
4898 int
4899 qla82xx_beacon_off(struct scsi_qla_host *vha)
4900 {
4901
4902         int rval;
4903         struct qla_hw_data *ha = vha->hw;
4904         qla82xx_idc_lock(ha);
4905         rval = qla82xx_mbx_beacon_ctl(vha, 0);
4906
4907         if (rval) {
4908                 ql_log(ql_log_warn, vha, 0xb051,
4909                     "mbx set led config failed in %s\n", __func__);
4910                 goto exit;
4911         }
4912         ha->beacon_blink_led = 0;
4913 exit:
4914         qla82xx_idc_unlock(ha);
4915         return rval;
4916 }