9175e847b93ade89277b43480a8d42ac51301dd1
[pandora-kernel.git] / drivers / scsi / qla2xxx / qla_nx.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2010 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include <linux/delay.h>
9 #include <linux/pci.h>
10
11 #define MASK(n)                 ((1ULL<<(n))-1)
12 #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
13         ((addr >> 25) & 0x3ff))
14 #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \
15         ((addr >> 25) & 0x3ff))
16 #define MS_WIN(addr) (addr & 0x0ffc0000)
17 #define QLA82XX_PCI_MN_2M   (0)
18 #define QLA82XX_PCI_MS_2M   (0x80000)
19 #define QLA82XX_PCI_OCM0_2M (0xc0000)
20 #define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
21 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
22 #define BLOCK_PROTECT_BITS 0x0F
23
24 /* CRB window related */
25 #define CRB_BLK(off)    ((off >> 20) & 0x3f)
26 #define CRB_SUBBLK(off) ((off >> 16) & 0xf)
27 #define CRB_WINDOW_2M   (0x130060)
28 #define QLA82XX_PCI_CAMQM_2M_END        (0x04800800UL)
29 #define CRB_HI(off)     ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
30                         ((off) & 0xf0000))
31 #define QLA82XX_PCI_CAMQM_2M_BASE       (0x000ff800UL)
32 #define CRB_INDIRECT_2M (0x1e0000UL)
33
34 #define MAX_CRB_XFORM 60
35 static unsigned long crb_addr_xform[MAX_CRB_XFORM];
36 int qla82xx_crb_table_initialized;
37
38 #define qla82xx_crb_addr_transform(name) \
39         (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
40         QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
41
42 static void qla82xx_crb_addr_transform_setup(void)
43 {
44         qla82xx_crb_addr_transform(XDMA);
45         qla82xx_crb_addr_transform(TIMR);
46         qla82xx_crb_addr_transform(SRE);
47         qla82xx_crb_addr_transform(SQN3);
48         qla82xx_crb_addr_transform(SQN2);
49         qla82xx_crb_addr_transform(SQN1);
50         qla82xx_crb_addr_transform(SQN0);
51         qla82xx_crb_addr_transform(SQS3);
52         qla82xx_crb_addr_transform(SQS2);
53         qla82xx_crb_addr_transform(SQS1);
54         qla82xx_crb_addr_transform(SQS0);
55         qla82xx_crb_addr_transform(RPMX7);
56         qla82xx_crb_addr_transform(RPMX6);
57         qla82xx_crb_addr_transform(RPMX5);
58         qla82xx_crb_addr_transform(RPMX4);
59         qla82xx_crb_addr_transform(RPMX3);
60         qla82xx_crb_addr_transform(RPMX2);
61         qla82xx_crb_addr_transform(RPMX1);
62         qla82xx_crb_addr_transform(RPMX0);
63         qla82xx_crb_addr_transform(ROMUSB);
64         qla82xx_crb_addr_transform(SN);
65         qla82xx_crb_addr_transform(QMN);
66         qla82xx_crb_addr_transform(QMS);
67         qla82xx_crb_addr_transform(PGNI);
68         qla82xx_crb_addr_transform(PGND);
69         qla82xx_crb_addr_transform(PGN3);
70         qla82xx_crb_addr_transform(PGN2);
71         qla82xx_crb_addr_transform(PGN1);
72         qla82xx_crb_addr_transform(PGN0);
73         qla82xx_crb_addr_transform(PGSI);
74         qla82xx_crb_addr_transform(PGSD);
75         qla82xx_crb_addr_transform(PGS3);
76         qla82xx_crb_addr_transform(PGS2);
77         qla82xx_crb_addr_transform(PGS1);
78         qla82xx_crb_addr_transform(PGS0);
79         qla82xx_crb_addr_transform(PS);
80         qla82xx_crb_addr_transform(PH);
81         qla82xx_crb_addr_transform(NIU);
82         qla82xx_crb_addr_transform(I2Q);
83         qla82xx_crb_addr_transform(EG);
84         qla82xx_crb_addr_transform(MN);
85         qla82xx_crb_addr_transform(MS);
86         qla82xx_crb_addr_transform(CAS2);
87         qla82xx_crb_addr_transform(CAS1);
88         qla82xx_crb_addr_transform(CAS0);
89         qla82xx_crb_addr_transform(CAM);
90         qla82xx_crb_addr_transform(C2C1);
91         qla82xx_crb_addr_transform(C2C0);
92         qla82xx_crb_addr_transform(SMB);
93         qla82xx_crb_addr_transform(OCM0);
94         /*
95          * Used only in P3 just define it for P2 also.
96          */
97         qla82xx_crb_addr_transform(I2C0);
98
99         qla82xx_crb_table_initialized = 1;
100 }
101
102 struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
103         {{{0, 0,         0,         0} } },
104         {{{1, 0x0100000, 0x0102000, 0x120000},
105         {1, 0x0110000, 0x0120000, 0x130000},
106         {1, 0x0120000, 0x0122000, 0x124000},
107         {1, 0x0130000, 0x0132000, 0x126000},
108         {1, 0x0140000, 0x0142000, 0x128000},
109         {1, 0x0150000, 0x0152000, 0x12a000},
110         {1, 0x0160000, 0x0170000, 0x110000},
111         {1, 0x0170000, 0x0172000, 0x12e000},
112         {0, 0x0000000, 0x0000000, 0x000000},
113         {0, 0x0000000, 0x0000000, 0x000000},
114         {0, 0x0000000, 0x0000000, 0x000000},
115         {0, 0x0000000, 0x0000000, 0x000000},
116         {0, 0x0000000, 0x0000000, 0x000000},
117         {0, 0x0000000, 0x0000000, 0x000000},
118         {1, 0x01e0000, 0x01e0800, 0x122000},
119         {0, 0x0000000, 0x0000000, 0x000000} } } ,
120         {{{1, 0x0200000, 0x0210000, 0x180000} } },
121         {{{0, 0,         0,         0} } },
122         {{{1, 0x0400000, 0x0401000, 0x169000} } },
123         {{{1, 0x0500000, 0x0510000, 0x140000} } },
124         {{{1, 0x0600000, 0x0610000, 0x1c0000} } },
125         {{{1, 0x0700000, 0x0704000, 0x1b8000} } },
126         {{{1, 0x0800000, 0x0802000, 0x170000},
127         {0, 0x0000000, 0x0000000, 0x000000},
128         {0, 0x0000000, 0x0000000, 0x000000},
129         {0, 0x0000000, 0x0000000, 0x000000},
130         {0, 0x0000000, 0x0000000, 0x000000},
131         {0, 0x0000000, 0x0000000, 0x000000},
132         {0, 0x0000000, 0x0000000, 0x000000},
133         {0, 0x0000000, 0x0000000, 0x000000},
134         {0, 0x0000000, 0x0000000, 0x000000},
135         {0, 0x0000000, 0x0000000, 0x000000},
136         {0, 0x0000000, 0x0000000, 0x000000},
137         {0, 0x0000000, 0x0000000, 0x000000},
138         {0, 0x0000000, 0x0000000, 0x000000},
139         {0, 0x0000000, 0x0000000, 0x000000},
140         {0, 0x0000000, 0x0000000, 0x000000},
141         {1, 0x08f0000, 0x08f2000, 0x172000} } },
142         {{{1, 0x0900000, 0x0902000, 0x174000},
143         {0, 0x0000000, 0x0000000, 0x000000},
144         {0, 0x0000000, 0x0000000, 0x000000},
145         {0, 0x0000000, 0x0000000, 0x000000},
146         {0, 0x0000000, 0x0000000, 0x000000},
147         {0, 0x0000000, 0x0000000, 0x000000},
148         {0, 0x0000000, 0x0000000, 0x000000},
149         {0, 0x0000000, 0x0000000, 0x000000},
150         {0, 0x0000000, 0x0000000, 0x000000},
151         {0, 0x0000000, 0x0000000, 0x000000},
152         {0, 0x0000000, 0x0000000, 0x000000},
153         {0, 0x0000000, 0x0000000, 0x000000},
154         {0, 0x0000000, 0x0000000, 0x000000},
155         {0, 0x0000000, 0x0000000, 0x000000},
156         {0, 0x0000000, 0x0000000, 0x000000},
157         {1, 0x09f0000, 0x09f2000, 0x176000} } },
158         {{{0, 0x0a00000, 0x0a02000, 0x178000},
159         {0, 0x0000000, 0x0000000, 0x000000},
160         {0, 0x0000000, 0x0000000, 0x000000},
161         {0, 0x0000000, 0x0000000, 0x000000},
162         {0, 0x0000000, 0x0000000, 0x000000},
163         {0, 0x0000000, 0x0000000, 0x000000},
164         {0, 0x0000000, 0x0000000, 0x000000},
165         {0, 0x0000000, 0x0000000, 0x000000},
166         {0, 0x0000000, 0x0000000, 0x000000},
167         {0, 0x0000000, 0x0000000, 0x000000},
168         {0, 0x0000000, 0x0000000, 0x000000},
169         {0, 0x0000000, 0x0000000, 0x000000},
170         {0, 0x0000000, 0x0000000, 0x000000},
171         {0, 0x0000000, 0x0000000, 0x000000},
172         {0, 0x0000000, 0x0000000, 0x000000},
173         {1, 0x0af0000, 0x0af2000, 0x17a000} } },
174         {{{0, 0x0b00000, 0x0b02000, 0x17c000},
175         {0, 0x0000000, 0x0000000, 0x000000},
176         {0, 0x0000000, 0x0000000, 0x000000},
177         {0, 0x0000000, 0x0000000, 0x000000},
178         {0, 0x0000000, 0x0000000, 0x000000},
179         {0, 0x0000000, 0x0000000, 0x000000},
180         {0, 0x0000000, 0x0000000, 0x000000},
181         {0, 0x0000000, 0x0000000, 0x000000},
182         {0, 0x0000000, 0x0000000, 0x000000},
183         {0, 0x0000000, 0x0000000, 0x000000},
184         {0, 0x0000000, 0x0000000, 0x000000},
185         {0, 0x0000000, 0x0000000, 0x000000},
186         {0, 0x0000000, 0x0000000, 0x000000},
187         {0, 0x0000000, 0x0000000, 0x000000},
188         {0, 0x0000000, 0x0000000, 0x000000},
189         {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
190         {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },
191         {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },
192         {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },
193         {{{1, 0x0f00000, 0x0f01000, 0x164000} } },
194         {{{0, 0x1000000, 0x1004000, 0x1a8000} } },
195         {{{1, 0x1100000, 0x1101000, 0x160000} } },
196         {{{1, 0x1200000, 0x1201000, 0x161000} } },
197         {{{1, 0x1300000, 0x1301000, 0x162000} } },
198         {{{1, 0x1400000, 0x1401000, 0x163000} } },
199         {{{1, 0x1500000, 0x1501000, 0x165000} } },
200         {{{1, 0x1600000, 0x1601000, 0x166000} } },
201         {{{0, 0,         0,         0} } },
202         {{{0, 0,         0,         0} } },
203         {{{0, 0,         0,         0} } },
204         {{{0, 0,         0,         0} } },
205         {{{0, 0,         0,         0} } },
206         {{{0, 0,         0,         0} } },
207         {{{1, 0x1d00000, 0x1d10000, 0x190000} } },
208         {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },
209         {{{1, 0x1f00000, 0x1f10000, 0x150000} } },
210         {{{0} } },
211         {{{1, 0x2100000, 0x2102000, 0x120000},
212         {1, 0x2110000, 0x2120000, 0x130000},
213         {1, 0x2120000, 0x2122000, 0x124000},
214         {1, 0x2130000, 0x2132000, 0x126000},
215         {1, 0x2140000, 0x2142000, 0x128000},
216         {1, 0x2150000, 0x2152000, 0x12a000},
217         {1, 0x2160000, 0x2170000, 0x110000},
218         {1, 0x2170000, 0x2172000, 0x12e000},
219         {0, 0x0000000, 0x0000000, 0x000000},
220         {0, 0x0000000, 0x0000000, 0x000000},
221         {0, 0x0000000, 0x0000000, 0x000000},
222         {0, 0x0000000, 0x0000000, 0x000000},
223         {0, 0x0000000, 0x0000000, 0x000000},
224         {0, 0x0000000, 0x0000000, 0x000000},
225         {0, 0x0000000, 0x0000000, 0x000000},
226         {0, 0x0000000, 0x0000000, 0x000000} } },
227         {{{1, 0x2200000, 0x2204000, 0x1b0000} } },
228         {{{0} } },
229         {{{0} } },
230         {{{0} } },
231         {{{0} } },
232         {{{0} } },
233         {{{1, 0x2800000, 0x2804000, 0x1a4000} } },
234         {{{1, 0x2900000, 0x2901000, 0x16b000} } },
235         {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },
236         {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },
237         {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },
238         {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },
239         {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },
240         {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },
241         {{{1, 0x3000000, 0x3000400, 0x1adc00} } },
242         {{{0, 0x3100000, 0x3104000, 0x1a8000} } },
243         {{{1, 0x3200000, 0x3204000, 0x1d4000} } },
244         {{{1, 0x3300000, 0x3304000, 0x1a0000} } },
245         {{{0} } },
246         {{{1, 0x3500000, 0x3500400, 0x1ac000} } },
247         {{{1, 0x3600000, 0x3600400, 0x1ae000} } },
248         {{{1, 0x3700000, 0x3700400, 0x1ae400} } },
249         {{{1, 0x3800000, 0x3804000, 0x1d0000} } },
250         {{{1, 0x3900000, 0x3904000, 0x1b4000} } },
251         {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },
252         {{{0} } },
253         {{{0} } },
254         {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },
255         {{{1, 0x3e00000, 0x3e01000, 0x167000} } },
256         {{{1, 0x3f00000, 0x3f01000, 0x168000} } }
257 };
258
259 /*
260  * top 12 bits of crb internal address (hub, agent)
261  */
262 unsigned qla82xx_crb_hub_agt[64] = {
263         0,
264         QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
265         QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
266         QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
267         0,
268         QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
269         QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
270         QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
271         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
272         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
273         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
274         QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
275         QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
276         QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
277         QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
278         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
279         QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
280         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
281         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
282         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
283         QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
284         QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
285         QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
286         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
287         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
288         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
289         QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
290         0,
291         QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
292         QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
293         0,
294         QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
295         0,
296         QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
297         QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
298         0,
299         0,
300         0,
301         0,
302         0,
303         QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
304         0,
305         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
306         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
307         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
308         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
309         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
310         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
311         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
312         QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
313         QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
314         QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
315         0,
316         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
317         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
318         QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
319         QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
320         0,
321         QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
322         QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
323         QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
324         0,
325         QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
326         0,
327 };
328
329 /* Device states */
330 char *qdev_state[] = {
331          "Unknown",
332         "Cold",
333         "Initializing",
334         "Ready",
335         "Need Reset",
336         "Need Quiescent",
337         "Failed",
338         "Quiescent",
339 };
340
341 /*
342  * In: 'off' is offset from CRB space in 128M pci map
343  * Out: 'off' is 2M pci map addr
344  * side effect: lock crb window
345  */
346 static void
347 qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
348 {
349         u32 win_read;
350
351         ha->crb_win = CRB_HI(*off);
352         writel(ha->crb_win,
353                 (void *)(CRB_WINDOW_2M + ha->nx_pcibase));
354
355         /* Read back value to make sure write has gone through before trying
356          * to use it.
357          */
358         win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
359         if (win_read != ha->crb_win) {
360                 DEBUG2(qla_printk(KERN_INFO, ha,
361                     "%s: Written crbwin (0x%x) != Read crbwin (0x%x), "
362                     "off=0x%lx\n", __func__, ha->crb_win, win_read, *off));
363         }
364         *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
365 }
366
367 static inline unsigned long
368 qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
369 {
370         /* See if we are currently pointing to the region we want to use next */
371         if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
372                 /* No need to change window. PCIX and PCIEregs are in both
373                  * regs are in both windows.
374                  */
375                 return off;
376         }
377
378         if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
379                 /* We are in first CRB window */
380                 if (ha->curr_window != 0)
381                         WARN_ON(1);
382                 return off;
383         }
384
385         if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
386                 /* We are in second CRB window */
387                 off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
388
389                 if (ha->curr_window != 1)
390                         return off;
391
392                 /* We are in the QM or direct access
393                  * register region - do nothing
394                  */
395                 if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
396                         (off < QLA82XX_PCI_CAMQM_MAX))
397                         return off;
398         }
399         /* strange address given */
400         qla_printk(KERN_WARNING, ha,
401                 "%s: Warning: unm_nic_pci_set_crbwindow called with"
402                 " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off);
403         return off;
404 }
405
406 static int
407 qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
408 {
409         struct crb_128M_2M_sub_block_map *m;
410
411         if (*off >= QLA82XX_CRB_MAX)
412                 return -1;
413
414         if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
415                 *off = (*off - QLA82XX_PCI_CAMQM) +
416                     QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
417                 return 0;
418         }
419
420         if (*off < QLA82XX_PCI_CRBSPACE)
421                 return -1;
422
423         *off -= QLA82XX_PCI_CRBSPACE;
424
425         /* Try direct map */
426         m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
427
428         if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
429                 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
430                 return 0;
431         }
432         /* Not in direct map, use crb window */
433         return 1;
434 }
435
436 #define CRB_WIN_LOCK_TIMEOUT 100000000
437 static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
438 {
439         int done = 0, timeout = 0;
440
441         while (!done) {
442                 /* acquire semaphore3 from PCI HW block */
443                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
444                 if (done == 1)
445                         break;
446                 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
447                         return -1;
448                 timeout++;
449         }
450         qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
451         return 0;
452 }
453
454 int
455 qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
456 {
457         unsigned long flags = 0;
458         int rv;
459
460         rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
461
462         BUG_ON(rv == -1);
463
464         if (rv == 1) {
465                 write_lock_irqsave(&ha->hw_lock, flags);
466                 qla82xx_crb_win_lock(ha);
467                 qla82xx_pci_set_crbwindow_2M(ha, &off);
468         }
469
470         writel(data, (void __iomem *)off);
471
472         if (rv == 1) {
473                 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
474                 write_unlock_irqrestore(&ha->hw_lock, flags);
475         }
476         return 0;
477 }
478
479 int
480 qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
481 {
482         unsigned long flags = 0;
483         int rv;
484         u32 data;
485
486         rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
487
488         BUG_ON(rv == -1);
489
490         if (rv == 1) {
491                 write_lock_irqsave(&ha->hw_lock, flags);
492                 qla82xx_crb_win_lock(ha);
493                 qla82xx_pci_set_crbwindow_2M(ha, &off);
494         }
495         data = RD_REG_DWORD((void __iomem *)off);
496
497         if (rv == 1) {
498                 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
499                 write_unlock_irqrestore(&ha->hw_lock, flags);
500         }
501         return data;
502 }
503
504 #define IDC_LOCK_TIMEOUT 100000000
505 int qla82xx_idc_lock(struct qla_hw_data *ha)
506 {
507         int i;
508         int done = 0, timeout = 0;
509
510         while (!done) {
511                 /* acquire semaphore5 from PCI HW block */
512                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
513                 if (done == 1)
514                         break;
515                 if (timeout >= IDC_LOCK_TIMEOUT)
516                         return -1;
517
518                 timeout++;
519
520                 /* Yield CPU */
521                 if (!in_interrupt())
522                         schedule();
523                 else {
524                         for (i = 0; i < 20; i++)
525                                 cpu_relax();
526                 }
527         }
528
529         return 0;
530 }
531
532 void qla82xx_idc_unlock(struct qla_hw_data *ha)
533 {
534         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
535 }
536
537 /*  PCI Windowing for DDR regions.  */
538 #define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
539         (((addr) <= (high)) && ((addr) >= (low)))
540 /*
541  * check memory access boundary.
542  * used by test agent. support ddr access only for now
543  */
544 static unsigned long
545 qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
546         unsigned long long addr, int size)
547 {
548         if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
549                 QLA82XX_ADDR_DDR_NET_MAX) ||
550                 !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET,
551                 QLA82XX_ADDR_DDR_NET_MAX) ||
552                 ((size != 1) && (size != 2) && (size != 4) && (size != 8)))
553                         return 0;
554         else
555                 return 1;
556 }
557
558 int qla82xx_pci_set_window_warning_count;
559
560 static unsigned long
561 qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
562 {
563         int window;
564         u32 win_read;
565
566         if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
567                 QLA82XX_ADDR_DDR_NET_MAX)) {
568                 /* DDR network side */
569                 window = MN_WIN(addr);
570                 ha->ddr_mn_window = window;
571                 qla82xx_wr_32(ha,
572                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
573                 win_read = qla82xx_rd_32(ha,
574                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
575                 if ((win_read << 17) != window) {
576                         qla_printk(KERN_WARNING, ha,
577                             "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n",
578                             __func__, window, win_read);
579                 }
580                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
581         } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
582                 QLA82XX_ADDR_OCM0_MAX)) {
583                 unsigned int temp1;
584                 if ((addr & 0x00ff800) == 0xff800) {
585                         qla_printk(KERN_WARNING, ha,
586                             "%s: QM access not handled.\n", __func__);
587                         addr = -1UL;
588                 }
589                 window = OCM_WIN(addr);
590                 ha->ddr_mn_window = window;
591                 qla82xx_wr_32(ha,
592                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
593                 win_read = qla82xx_rd_32(ha,
594                         ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
595                 temp1 = ((window & 0x1FF) << 7) |
596                     ((window & 0x0FFFE0000) >> 17);
597                 if (win_read != temp1) {
598                         qla_printk(KERN_WARNING, ha,
599                             "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n",
600                             __func__, temp1, win_read);
601                 }
602                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
603
604         } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
605                 QLA82XX_P3_ADDR_QDR_NET_MAX)) {
606                 /* QDR network side */
607                 window = MS_WIN(addr);
608                 ha->qdr_sn_window = window;
609                 qla82xx_wr_32(ha,
610                         ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window);
611                 win_read = qla82xx_rd_32(ha,
612                         ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
613                 if (win_read != window) {
614                         qla_printk(KERN_WARNING, ha,
615                             "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n",
616                             __func__, window, win_read);
617                 }
618                 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
619         } else {
620                 /*
621                  * peg gdb frequently accesses memory that doesn't exist,
622                  * this limits the chit chat so debugging isn't slowed down.
623                  */
624                 if ((qla82xx_pci_set_window_warning_count++ < 8) ||
625                     (qla82xx_pci_set_window_warning_count%64 == 0)) {
626                         qla_printk(KERN_WARNING, ha,
627                             "%s: Warning:%s Unknown address range!\n", __func__,
628                             QLA2XXX_DRIVER_NAME);
629                 }
630                 addr = -1UL;
631         }
632         return addr;
633 }
634
635 /* check if address is in the same windows as the previous access */
636 static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
637         unsigned long long addr)
638 {
639         int                     window;
640         unsigned long long      qdr_max;
641
642         qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
643
644         /* DDR network side */
645         if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
646                 QLA82XX_ADDR_DDR_NET_MAX))
647                 BUG();
648         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
649                 QLA82XX_ADDR_OCM0_MAX))
650                 return 1;
651         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
652                 QLA82XX_ADDR_OCM1_MAX))
653                 return 1;
654         else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
655                 /* QDR network side */
656                 window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
657                 if (ha->qdr_sn_window == window)
658                         return 1;
659         }
660         return 0;
661 }
662
663 static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
664         u64 off, void *data, int size)
665 {
666         unsigned long   flags;
667         void           *addr = NULL;
668         int             ret = 0;
669         u64             start;
670         uint8_t         *mem_ptr = NULL;
671         unsigned long   mem_base;
672         unsigned long   mem_page;
673
674         write_lock_irqsave(&ha->hw_lock, flags);
675
676         /*
677          * If attempting to access unknown address or straddle hw windows,
678          * do not access.
679          */
680         start = qla82xx_pci_set_window(ha, off);
681         if ((start == -1UL) ||
682                 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
683                 write_unlock_irqrestore(&ha->hw_lock, flags);
684                 qla_printk(KERN_ERR, ha,
685                         "%s out of bound pci memory access. "
686                         "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
687                 return -1;
688         }
689
690         write_unlock_irqrestore(&ha->hw_lock, flags);
691         mem_base = pci_resource_start(ha->pdev, 0);
692         mem_page = start & PAGE_MASK;
693         /* Map two pages whenever user tries to access addresses in two
694         * consecutive pages.
695         */
696         if (mem_page != ((start + size - 1) & PAGE_MASK))
697                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
698         else
699                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
700         if (mem_ptr == 0UL) {
701                 *(u8  *)data = 0;
702                 return -1;
703         }
704         addr = mem_ptr;
705         addr += start & (PAGE_SIZE - 1);
706         write_lock_irqsave(&ha->hw_lock, flags);
707
708         switch (size) {
709         case 1:
710                 *(u8  *)data = readb(addr);
711                 break;
712         case 2:
713                 *(u16 *)data = readw(addr);
714                 break;
715         case 4:
716                 *(u32 *)data = readl(addr);
717                 break;
718         case 8:
719                 *(u64 *)data = readq(addr);
720                 break;
721         default:
722                 ret = -1;
723                 break;
724         }
725         write_unlock_irqrestore(&ha->hw_lock, flags);
726
727         if (mem_ptr)
728                 iounmap(mem_ptr);
729         return ret;
730 }
731
732 static int
733 qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
734         u64 off, void *data, int size)
735 {
736         unsigned long   flags;
737         void           *addr = NULL;
738         int             ret = 0;
739         u64             start;
740         uint8_t         *mem_ptr = NULL;
741         unsigned long   mem_base;
742         unsigned long   mem_page;
743
744         write_lock_irqsave(&ha->hw_lock, flags);
745
746         /*
747          * If attempting to access unknown address or straddle hw windows,
748          * do not access.
749          */
750         start = qla82xx_pci_set_window(ha, off);
751         if ((start == -1UL) ||
752                 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
753                 write_unlock_irqrestore(&ha->hw_lock, flags);
754                 qla_printk(KERN_ERR, ha,
755                         "%s out of bound pci memory access. "
756                         "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
757                 return -1;
758         }
759
760         write_unlock_irqrestore(&ha->hw_lock, flags);
761         mem_base = pci_resource_start(ha->pdev, 0);
762         mem_page = start & PAGE_MASK;
763         /* Map two pages whenever user tries to access addresses in two
764          * consecutive pages.
765          */
766         if (mem_page != ((start + size - 1) & PAGE_MASK))
767                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
768         else
769                 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
770         if (mem_ptr == 0UL)
771                 return -1;
772
773         addr = mem_ptr;
774         addr += start & (PAGE_SIZE - 1);
775         write_lock_irqsave(&ha->hw_lock, flags);
776
777         switch (size) {
778         case 1:
779                 writeb(*(u8  *)data, addr);
780                 break;
781         case 2:
782                 writew(*(u16 *)data, addr);
783                 break;
784         case 4:
785                 writel(*(u32 *)data, addr);
786                 break;
787         case 8:
788                 writeq(*(u64 *)data, addr);
789                 break;
790         default:
791                 ret = -1;
792                 break;
793         }
794         write_unlock_irqrestore(&ha->hw_lock, flags);
795         if (mem_ptr)
796                 iounmap(mem_ptr);
797         return ret;
798 }
799
800 #define MTU_FUDGE_FACTOR 100
801 static unsigned long
802 qla82xx_decode_crb_addr(unsigned long addr)
803 {
804         int i;
805         unsigned long base_addr, offset, pci_base;
806
807         if (!qla82xx_crb_table_initialized)
808                 qla82xx_crb_addr_transform_setup();
809
810         pci_base = ADDR_ERROR;
811         base_addr = addr & 0xfff00000;
812         offset = addr & 0x000fffff;
813
814         for (i = 0; i < MAX_CRB_XFORM; i++) {
815                 if (crb_addr_xform[i] == base_addr) {
816                         pci_base = i << 20;
817                         break;
818                 }
819         }
820         if (pci_base == ADDR_ERROR)
821                 return pci_base;
822         return pci_base + offset;
823 }
824
825 static long rom_max_timeout = 100;
826 static long qla82xx_rom_lock_timeout = 100;
827
828 static int
829 qla82xx_rom_lock(struct qla_hw_data *ha)
830 {
831         int done = 0, timeout = 0;
832
833         while (!done) {
834                 /* acquire semaphore2 from PCI HW block */
835                 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
836                 if (done == 1)
837                         break;
838                 if (timeout >= qla82xx_rom_lock_timeout)
839                         return -1;
840                 timeout++;
841         }
842         qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
843         return 0;
844 }
845
846 static int
847 qla82xx_wait_rom_busy(struct qla_hw_data *ha)
848 {
849         long timeout = 0;
850         long done = 0 ;
851
852         while (done == 0) {
853                 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
854                 done &= 4;
855                 timeout++;
856                 if (timeout >= rom_max_timeout) {
857                         DEBUG(qla_printk(KERN_INFO, ha,
858                                 "%s: Timeout reached waiting for rom busy",
859                                 QLA2XXX_DRIVER_NAME));
860                         return -1;
861                 }
862         }
863         return 0;
864 }
865
866 static int
867 qla82xx_wait_rom_done(struct qla_hw_data *ha)
868 {
869         long timeout = 0;
870         long done = 0 ;
871
872         while (done == 0) {
873                 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
874                 done &= 2;
875                 timeout++;
876                 if (timeout >= rom_max_timeout) {
877                         DEBUG(qla_printk(KERN_INFO, ha,
878                                 "%s: Timeout reached  waiting for rom done",
879                                 QLA2XXX_DRIVER_NAME));
880                         return -1;
881                 }
882         }
883         return 0;
884 }
885
886 static int
887 qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
888 {
889         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
890         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
891         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
892         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
893         qla82xx_wait_rom_busy(ha);
894         if (qla82xx_wait_rom_done(ha)) {
895                 qla_printk(KERN_WARNING, ha,
896                         "%s: Error waiting for rom done\n",
897                         QLA2XXX_DRIVER_NAME);
898                 return -1;
899         }
900         /* Reset abyte_cnt and dummy_byte_cnt */
901         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
902         udelay(10);
903         cond_resched();
904         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
905         *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
906         return 0;
907 }
908
909 static int
910 qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
911 {
912         int ret, loops = 0;
913
914         while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
915                 udelay(100);
916                 schedule();
917                 loops++;
918         }
919         if (loops >= 50000) {
920                 qla_printk(KERN_INFO, ha,
921                         "%s: qla82xx_rom_lock failed\n",
922                         QLA2XXX_DRIVER_NAME);
923                 return -1;
924         }
925         ret = qla82xx_do_rom_fast_read(ha, addr, valp);
926         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
927         return ret;
928 }
929
930 static int
931 qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
932 {
933         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
934         qla82xx_wait_rom_busy(ha);
935         if (qla82xx_wait_rom_done(ha)) {
936                 qla_printk(KERN_WARNING, ha,
937                     "Error waiting for rom done\n");
938                 return -1;
939         }
940         *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
941         return 0;
942 }
943
944 static int
945 qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
946 {
947         long timeout = 0;
948         uint32_t done = 1 ;
949         uint32_t val;
950         int ret = 0;
951
952         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
953         while ((done != 0) && (ret == 0)) {
954                 ret = qla82xx_read_status_reg(ha, &val);
955                 done = val & 1;
956                 timeout++;
957                 udelay(10);
958                 cond_resched();
959                 if (timeout >= 50000) {
960                         qla_printk(KERN_WARNING, ha,
961                             "Timeout reached  waiting for write finish");
962                         return -1;
963                 }
964         }
965         return ret;
966 }
967
968 static int
969 qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
970 {
971         uint32_t val;
972         qla82xx_wait_rom_busy(ha);
973         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
974         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
975         qla82xx_wait_rom_busy(ha);
976         if (qla82xx_wait_rom_done(ha))
977                 return -1;
978         if (qla82xx_read_status_reg(ha, &val) != 0)
979                 return -1;
980         if ((val & 2) != 2)
981                 return -1;
982         return 0;
983 }
984
985 static int
986 qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
987 {
988         if (qla82xx_flash_set_write_enable(ha))
989                 return -1;
990         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
991         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
992         if (qla82xx_wait_rom_done(ha)) {
993                 qla_printk(KERN_WARNING, ha,
994                     "Error waiting for rom done\n");
995                 return -1;
996         }
997         return qla82xx_flash_wait_write_finish(ha);
998 }
999
1000 static int
1001 qla82xx_write_disable_flash(struct qla_hw_data *ha)
1002 {
1003         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
1004         if (qla82xx_wait_rom_done(ha)) {
1005                 qla_printk(KERN_WARNING, ha,
1006                     "Error waiting for rom done\n");
1007                 return -1;
1008         }
1009         return 0;
1010 }
1011
1012 static int
1013 ql82xx_rom_lock_d(struct qla_hw_data *ha)
1014 {
1015         int loops = 0;
1016         while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
1017                 udelay(100);
1018                 cond_resched();
1019                 loops++;
1020         }
1021         if (loops >= 50000) {
1022                 qla_printk(KERN_WARNING, ha, "ROM lock failed\n");
1023                 return -1;
1024         }
1025         return 0;;
1026 }
1027
1028 static int
1029 qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1030         uint32_t data)
1031 {
1032         int ret = 0;
1033
1034         ret = ql82xx_rom_lock_d(ha);
1035         if (ret < 0) {
1036                 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
1037                 return ret;
1038         }
1039
1040         if (qla82xx_flash_set_write_enable(ha))
1041                 goto done_write;
1042
1043         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
1044         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr);
1045         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
1046         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
1047         qla82xx_wait_rom_busy(ha);
1048         if (qla82xx_wait_rom_done(ha)) {
1049                 qla_printk(KERN_WARNING, ha,
1050                         "Error waiting for rom done\n");
1051                 ret = -1;
1052                 goto done_write;
1053         }
1054
1055         ret = qla82xx_flash_wait_write_finish(ha);
1056
1057 done_write:
1058         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
1059         return ret;
1060 }
1061
1062 /* This routine does CRB initialize sequence
1063  *  to put the ISP into operational state
1064  */
1065 static int
1066 qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1067 {
1068         int addr, val;
1069         int i ;
1070         struct crb_addr_pair *buf;
1071         unsigned long off;
1072         unsigned offset, n;
1073         struct qla_hw_data *ha = vha->hw;
1074
1075         struct crb_addr_pair {
1076                 long addr;
1077                 long data;
1078         };
1079
1080         /* Halt all the indiviual PEGs and other blocks of the ISP */
1081         qla82xx_rom_lock(ha);
1082         if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
1083                 /* don't reset CAM block on reset */
1084                 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
1085         else
1086                 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
1087         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
1088
1089         /* Read the signature value from the flash.
1090          * Offset 0: Contain signature (0xcafecafe)
1091          * Offset 4: Offset and number of addr/value pairs
1092          * that present in CRB initialize sequence
1093          */
1094         if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1095             qla82xx_rom_fast_read(ha, 4, &n) != 0) {
1096                 qla_printk(KERN_WARNING, ha,
1097                     "[ERROR] Reading crb_init area: n: %08x\n", n);
1098                 return -1;
1099         }
1100
1101         /* Offset in flash = lower 16 bits
1102          * Number of enteries = upper 16 bits
1103          */
1104         offset = n & 0xffffU;
1105         n = (n >> 16) & 0xffffU;
1106
1107         /* number of addr/value pair should not exceed 1024 enteries */
1108         if (n  >= 1024) {
1109                 qla_printk(KERN_WARNING, ha,
1110                     "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n",
1111                     QLA2XXX_DRIVER_NAME, __func__, n);
1112                 return -1;
1113         }
1114
1115         qla_printk(KERN_INFO, ha,
1116             "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n);
1117
1118         buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
1119         if (buf == NULL) {
1120                 qla_printk(KERN_WARNING, ha,
1121                     "%s: [ERROR] Unable to malloc memory.\n",
1122                     QLA2XXX_DRIVER_NAME);
1123                 return -1;
1124         }
1125
1126         for (i = 0; i < n; i++) {
1127                 if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
1128                     qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) {
1129                         kfree(buf);
1130                         return -1;
1131                 }
1132
1133                 buf[i].addr = addr;
1134                 buf[i].data = val;
1135         }
1136
1137         for (i = 0; i < n; i++) {
1138                 /* Translate internal CRB initialization
1139                  * address to PCI bus address
1140                  */
1141                 off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) +
1142                     QLA82XX_PCI_CRBSPACE;
1143                 /* Not all CRB  addr/value pair to be written,
1144                  * some of them are skipped
1145                  */
1146
1147                 /* skipping cold reboot MAGIC */
1148                 if (off == QLA82XX_CAM_RAM(0x1fc))
1149                         continue;
1150
1151                 /* do not reset PCI */
1152                 if (off == (ROMUSB_GLB + 0xbc))
1153                         continue;
1154
1155                 /* skip core clock, so that firmware can increase the clock */
1156                 if (off == (ROMUSB_GLB + 0xc8))
1157                         continue;
1158
1159                 /* skip the function enable register */
1160                 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
1161                         continue;
1162
1163                 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
1164                         continue;
1165
1166                 if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
1167                         continue;
1168
1169                 if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
1170                         continue;
1171
1172                 if (off == ADDR_ERROR) {
1173                         qla_printk(KERN_WARNING, ha,
1174                             "%s: [ERROR] Unknown addr: 0x%08lx\n",
1175                             QLA2XXX_DRIVER_NAME, buf[i].addr);
1176                         continue;
1177                 }
1178
1179                 qla82xx_wr_32(ha, off, buf[i].data);
1180
1181                 /* ISP requires much bigger delay to settle down,
1182                  * else crb_window returns 0xffffffff
1183                  */
1184                 if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
1185                         msleep(1000);
1186
1187                 /* ISP requires millisec delay between
1188                  * successive CRB register updation
1189                  */
1190                 msleep(1);
1191         }
1192
1193         kfree(buf);
1194
1195         /* Resetting the data and instruction cache */
1196         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
1197         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
1198         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
1199
1200         /* Clear all protocol processing engines */
1201         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
1202         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
1203         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
1204         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
1205         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
1206         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
1207         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
1208         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
1209         return 0;
1210 }
1211
1212 static int
1213 qla82xx_check_for_bad_spd(struct qla_hw_data *ha)
1214 {
1215         u32 val = 0;
1216         val = qla82xx_rd_32(ha, BOOT_LOADER_DIMM_STATUS);
1217         val &= QLA82XX_BOOT_LOADER_MN_ISSUE;
1218         if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) {
1219                 qla_printk(KERN_INFO, ha,
1220                         "Memory DIMM SPD not programmed. "
1221                         " Assumed valid.\n");
1222                 return 1;
1223         } else if (val) {
1224                 qla_printk(KERN_INFO, ha,
1225                         "Memory DIMM type incorrect.Info:%08X.\n", val);
1226                 return 2;
1227         }
1228         return 0;
1229 }
1230
1231 static int
1232 qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1233                 u64 off, void *data, int size)
1234 {
1235         int i, j, ret = 0, loop, sz[2], off0;
1236         int scale, shift_amount, startword;
1237         uint32_t temp;
1238         uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1239
1240         /*
1241          * If not MN, go check for MS or invalid.
1242          */
1243         if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1244                 mem_crb = QLA82XX_CRB_QDR_NET;
1245         else {
1246                 mem_crb = QLA82XX_CRB_DDR_NET;
1247                 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1248                         return qla82xx_pci_mem_write_direct(ha,
1249                             off, data, size);
1250         }
1251
1252         off0 = off & 0x7;
1253         sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1254         sz[1] = size - sz[0];
1255
1256         off8 = off & 0xfffffff0;
1257         loop = (((off & 0xf) + size - 1) >> 4) + 1;
1258         shift_amount = 4;
1259         scale = 2;
1260         startword = (off & 0xf)/8;
1261
1262         for (i = 0; i < loop; i++) {
1263                 if (qla82xx_pci_mem_read_2M(ha, off8 +
1264                     (i << shift_amount), &word[i * scale], 8))
1265                         return -1;
1266         }
1267
1268         switch (size) {
1269         case 1:
1270                 tmpw = *((uint8_t *)data);
1271                 break;
1272         case 2:
1273                 tmpw = *((uint16_t *)data);
1274                 break;
1275         case 4:
1276                 tmpw = *((uint32_t *)data);
1277                 break;
1278         case 8:
1279         default:
1280                 tmpw = *((uint64_t *)data);
1281                 break;
1282         }
1283
1284         if (sz[0] == 8) {
1285                 word[startword] = tmpw;
1286         } else {
1287                 word[startword] &=
1288                         ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1289                 word[startword] |= tmpw << (off0 * 8);
1290         }
1291         if (sz[1] != 0) {
1292                 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1293                 word[startword+1] |= tmpw >> (sz[0] * 8);
1294         }
1295
1296         /*
1297          * don't lock here - write_wx gets the lock if each time
1298          * write_lock_irqsave(&adapter->adapter_lock, flags);
1299          * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1300          */
1301         for (i = 0; i < loop; i++) {
1302                 temp = off8 + (i << shift_amount);
1303                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1304                 temp = 0;
1305                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1306                 temp = word[i * scale] & 0xffffffff;
1307                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1308                 temp = (word[i * scale] >> 32) & 0xffffffff;
1309                 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1310                 temp = word[i*scale + 1] & 0xffffffff;
1311                 qla82xx_wr_32(ha, mem_crb +
1312                     MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
1313                 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1314                 qla82xx_wr_32(ha, mem_crb +
1315                     MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
1316
1317                 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1318                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1319                 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1320                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1321
1322                 for (j = 0; j < MAX_CTL_CHECK; j++) {
1323                         temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1324                         if ((temp & MIU_TA_CTL_BUSY) == 0)
1325                                 break;
1326                 }
1327
1328                 if (j >= MAX_CTL_CHECK) {
1329                         if (printk_ratelimit())
1330                                 dev_err(&ha->pdev->dev,
1331                                     "failed to write through agent\n");
1332                         ret = -1;
1333                         break;
1334                 }
1335         }
1336
1337         return ret;
1338 }
1339
1340 static int
1341 qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
1342 {
1343         int  i;
1344         long size = 0;
1345         long flashaddr = ha->flt_region_bootload << 2;
1346         long memaddr = BOOTLD_START;
1347         u64 data;
1348         u32 high, low;
1349         size = (IMAGE_START - BOOTLD_START) / 8;
1350
1351         for (i = 0; i < size; i++) {
1352                 if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
1353                     (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) {
1354                         return -1;
1355                 }
1356                 data = ((u64)high << 32) | low ;
1357                 qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
1358                 flashaddr += 8;
1359                 memaddr += 8;
1360
1361                 if (i % 0x1000 == 0)
1362                         msleep(1);
1363         }
1364         udelay(100);
1365         read_lock(&ha->hw_lock);
1366         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1367         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1368         read_unlock(&ha->hw_lock);
1369         return 0;
1370 }
1371
1372 int
1373 qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1374                 u64 off, void *data, int size)
1375 {
1376         int i, j = 0, k, start, end, loop, sz[2], off0[2];
1377         int           shift_amount;
1378         uint32_t      temp;
1379         uint64_t      off8, val, mem_crb, word[2] = {0, 0};
1380
1381         /*
1382          * If not MN, go check for MS or invalid.
1383          */
1384
1385         if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1386                 mem_crb = QLA82XX_CRB_QDR_NET;
1387         else {
1388                 mem_crb = QLA82XX_CRB_DDR_NET;
1389                 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1390                         return qla82xx_pci_mem_read_direct(ha,
1391                             off, data, size);
1392         }
1393
1394         off8 = off & 0xfffffff0;
1395         off0[0] = off & 0xf;
1396         sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
1397         shift_amount = 4;
1398         loop = ((off0[0] + size - 1) >> shift_amount) + 1;
1399         off0[1] = 0;
1400         sz[1] = size - sz[0];
1401
1402         /*
1403          * don't lock here - write_wx gets the lock if each time
1404          * write_lock_irqsave(&adapter->adapter_lock, flags);
1405          * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1406          */
1407
1408         for (i = 0; i < loop; i++) {
1409                 temp = off8 + (i << shift_amount);
1410                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
1411                 temp = 0;
1412                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
1413                 temp = MIU_TA_CTL_ENABLE;
1414                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1415                 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
1416                 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1417
1418                 for (j = 0; j < MAX_CTL_CHECK; j++) {
1419                         temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1420                         if ((temp & MIU_TA_CTL_BUSY) == 0)
1421                                 break;
1422                 }
1423
1424                 if (j >= MAX_CTL_CHECK) {
1425                         if (printk_ratelimit())
1426                                 dev_err(&ha->pdev->dev,
1427                                     "failed to read through agent\n");
1428                         break;
1429                 }
1430
1431                 start = off0[i] >> 2;
1432                 end   = (off0[i] + sz[i] - 1) >> 2;
1433                 for (k = start; k <= end; k++) {
1434                         temp = qla82xx_rd_32(ha,
1435                                         mem_crb + MIU_TEST_AGT_RDDATA(k));
1436                         word[i] |= ((uint64_t)temp << (32 * (k & 1)));
1437                 }
1438         }
1439
1440         /*
1441          * netxen_nic_pci_change_crbwindow_128M(adapter, 1);
1442          * write_unlock_irqrestore(&adapter->adapter_lock, flags);
1443          */
1444
1445         if (j >= MAX_CTL_CHECK)
1446                 return -1;
1447
1448         if ((off0[0] & 7) == 0) {
1449                 val = word[0];
1450         } else {
1451                 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
1452                         ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
1453         }
1454
1455         switch (size) {
1456         case 1:
1457                 *(uint8_t  *)data = val;
1458                 break;
1459         case 2:
1460                 *(uint16_t *)data = val;
1461                 break;
1462         case 4:
1463                 *(uint32_t *)data = val;
1464                 break;
1465         case 8:
1466                 *(uint64_t *)data = val;
1467                 break;
1468         }
1469         return 0;
1470 }
1471
1472
1473 static struct qla82xx_uri_table_desc *
1474 qla82xx_get_table_desc(const u8 *unirom, int section)
1475 {
1476         uint32_t i;
1477         struct qla82xx_uri_table_desc *directory =
1478                 (struct qla82xx_uri_table_desc *)&unirom[0];
1479         __le32 offset;
1480         __le32 tab_type;
1481         __le32 entries = cpu_to_le32(directory->num_entries);
1482
1483         for (i = 0; i < entries; i++) {
1484                 offset = cpu_to_le32(directory->findex) +
1485                     (i * cpu_to_le32(directory->entry_size));
1486                 tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8));
1487
1488                 if (tab_type == section)
1489                         return (struct qla82xx_uri_table_desc *)&unirom[offset];
1490         }
1491
1492         return NULL;
1493 }
1494
1495 static struct qla82xx_uri_data_desc *
1496 qla82xx_get_data_desc(struct qla_hw_data *ha,
1497         u32 section, u32 idx_offset)
1498 {
1499         const u8 *unirom = ha->hablob->fw->data;
1500         int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset));
1501         struct qla82xx_uri_table_desc *tab_desc = NULL;
1502         __le32 offset;
1503
1504         tab_desc = qla82xx_get_table_desc(unirom, section);
1505         if (!tab_desc)
1506                 return NULL;
1507
1508         offset = cpu_to_le32(tab_desc->findex) +
1509             (cpu_to_le32(tab_desc->entry_size) * idx);
1510
1511         return (struct qla82xx_uri_data_desc *)&unirom[offset];
1512 }
1513
1514 static u8 *
1515 qla82xx_get_bootld_offset(struct qla_hw_data *ha)
1516 {
1517         u32 offset = BOOTLD_START;
1518         struct qla82xx_uri_data_desc *uri_desc = NULL;
1519
1520         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1521                 uri_desc = qla82xx_get_data_desc(ha,
1522                     QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF);
1523                 if (uri_desc)
1524                         offset = cpu_to_le32(uri_desc->findex);
1525         }
1526
1527         return (u8 *)&ha->hablob->fw->data[offset];
1528 }
1529
1530 static __le32
1531 qla82xx_get_fw_size(struct qla_hw_data *ha)
1532 {
1533         struct qla82xx_uri_data_desc *uri_desc = NULL;
1534
1535         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1536                 uri_desc =  qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1537                     QLA82XX_URI_FIRMWARE_IDX_OFF);
1538                 if (uri_desc)
1539                         return cpu_to_le32(uri_desc->size);
1540         }
1541
1542         return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
1543 }
1544
1545 static u8 *
1546 qla82xx_get_fw_offs(struct qla_hw_data *ha)
1547 {
1548         u32 offset = IMAGE_START;
1549         struct qla82xx_uri_data_desc *uri_desc = NULL;
1550
1551         if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1552                 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1553                         QLA82XX_URI_FIRMWARE_IDX_OFF);
1554                 if (uri_desc)
1555                         offset = cpu_to_le32(uri_desc->findex);
1556         }
1557
1558         return (u8 *)&ha->hablob->fw->data[offset];
1559 }
1560
1561 /* PCI related functions */
1562 char *
1563 qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
1564 {
1565         int pcie_reg;
1566         struct qla_hw_data *ha = vha->hw;
1567         char lwstr[6];
1568         uint16_t lnk;
1569
1570         pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
1571         pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
1572         ha->link_width = (lnk >> 4) & 0x3f;
1573
1574         strcpy(str, "PCIe (");
1575         strcat(str, "2.5Gb/s ");
1576         snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width);
1577         strcat(str, lwstr);
1578         return str;
1579 }
1580
1581 int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
1582 {
1583         unsigned long val = 0;
1584         u32 control;
1585
1586         switch (region) {
1587         case 0:
1588                 val = 0;
1589                 break;
1590         case 1:
1591                 pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
1592                 val = control + QLA82XX_MSIX_TBL_SPACE;
1593                 break;
1594         }
1595         return val;
1596 }
1597
1598
1599 int
1600 qla82xx_iospace_config(struct qla_hw_data *ha)
1601 {
1602         uint32_t len = 0;
1603
1604         if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
1605                 qla_printk(KERN_WARNING, ha,
1606                         "Failed to reserve selected regions (%s)\n",
1607                         pci_name(ha->pdev));
1608                 goto iospace_error_exit;
1609         }
1610
1611         /* Use MMIO operations for all accesses. */
1612         if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
1613                 qla_printk(KERN_ERR, ha,
1614                         "region #0 not an MMIO resource (%s), aborting\n",
1615                         pci_name(ha->pdev));
1616                 goto iospace_error_exit;
1617         }
1618
1619         len = pci_resource_len(ha->pdev, 0);
1620         ha->nx_pcibase =
1621             (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
1622         if (!ha->nx_pcibase) {
1623                 qla_printk(KERN_ERR, ha,
1624                     "cannot remap pcibase MMIO (%s), aborting\n",
1625                     pci_name(ha->pdev));
1626                 pci_release_regions(ha->pdev);
1627                 goto iospace_error_exit;
1628         }
1629
1630         /* Mapping of IO base pointer */
1631         ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
1632             0xbc000 + (ha->pdev->devfn << 11));
1633
1634         if (!ql2xdbwr) {
1635                 ha->nxdb_wr_ptr =
1636                     (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
1637                     (ha->pdev->devfn << 12)), 4);
1638                 if (!ha->nxdb_wr_ptr) {
1639                         qla_printk(KERN_ERR, ha,
1640                             "cannot remap MMIO (%s), aborting\n",
1641                             pci_name(ha->pdev));
1642                         pci_release_regions(ha->pdev);
1643                         goto iospace_error_exit;
1644                 }
1645
1646                 /* Mapping of IO base pointer,
1647                  * door bell read and write pointer
1648                  */
1649                 ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
1650                     (ha->pdev->devfn * 8);
1651         } else {
1652                 ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
1653                         QLA82XX_CAMRAM_DB1 :
1654                         QLA82XX_CAMRAM_DB2);
1655         }
1656
1657         ha->max_req_queues = ha->max_rsp_queues = 1;
1658         ha->msix_count = ha->max_rsp_queues + 1;
1659         return 0;
1660
1661 iospace_error_exit:
1662         return -ENOMEM;
1663 }
1664
1665 /* GS related functions */
1666
1667 /* Initialization related functions */
1668
1669 /**
1670  * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers.
1671  * @ha: HA context
1672  *
1673  * Returns 0 on success.
1674 */
1675 int
1676 qla82xx_pci_config(scsi_qla_host_t *vha)
1677 {
1678         struct qla_hw_data *ha = vha->hw;
1679         int ret;
1680
1681         pci_set_master(ha->pdev);
1682         ret = pci_set_mwi(ha->pdev);
1683         ha->chip_revision = ha->pdev->revision;
1684         return 0;
1685 }
1686
1687 /**
1688  * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers.
1689  * @ha: HA context
1690  *
1691  * Returns 0 on success.
1692  */
1693 void
1694 qla82xx_reset_chip(scsi_qla_host_t *vha)
1695 {
1696         struct qla_hw_data *ha = vha->hw;
1697         ha->isp_ops->disable_intrs(ha);
1698 }
1699
1700 void qla82xx_config_rings(struct scsi_qla_host *vha)
1701 {
1702         struct qla_hw_data *ha = vha->hw;
1703         struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1704         struct init_cb_81xx *icb;
1705         struct req_que *req = ha->req_q_map[0];
1706         struct rsp_que *rsp = ha->rsp_q_map[0];
1707
1708         /* Setup ring parameters in initialization control block. */
1709         icb = (struct init_cb_81xx *)ha->init_cb;
1710         icb->request_q_outpointer = __constant_cpu_to_le16(0);
1711         icb->response_q_inpointer = __constant_cpu_to_le16(0);
1712         icb->request_q_length = cpu_to_le16(req->length);
1713         icb->response_q_length = cpu_to_le16(rsp->length);
1714         icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1715         icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1716         icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1717         icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1718
1719         WRT_REG_DWORD((unsigned long  __iomem *)&reg->req_q_out[0], 0);
1720         WRT_REG_DWORD((unsigned long  __iomem *)&reg->rsp_q_in[0], 0);
1721         WRT_REG_DWORD((unsigned long  __iomem *)&reg->rsp_q_out[0], 0);
1722 }
1723
1724 void qla82xx_reset_adapter(struct scsi_qla_host *vha)
1725 {
1726         struct qla_hw_data *ha = vha->hw;
1727         vha->flags.online = 0;
1728         qla2x00_try_to_stop_firmware(vha);
1729         ha->isp_ops->disable_intrs(ha);
1730 }
1731
1732 static int
1733 qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
1734 {
1735         u64 *ptr64;
1736         u32 i, flashaddr, size;
1737         __le64 data;
1738
1739         size = (IMAGE_START - BOOTLD_START) / 8;
1740
1741         ptr64 = (u64 *)qla82xx_get_bootld_offset(ha);
1742         flashaddr = BOOTLD_START;
1743
1744         for (i = 0; i < size; i++) {
1745                 data = cpu_to_le64(ptr64[i]);
1746                 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1747                         return -EIO;
1748                 flashaddr += 8;
1749         }
1750
1751         flashaddr = FLASH_ADDR_START;
1752         size = (__force u32)qla82xx_get_fw_size(ha) / 8;
1753         ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
1754
1755         for (i = 0; i < size; i++) {
1756                 data = cpu_to_le64(ptr64[i]);
1757
1758                 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1759                         return -EIO;
1760                 flashaddr += 8;
1761         }
1762         udelay(100);
1763
1764         /* Write a magic value to CAMRAM register
1765          * at a specified offset to indicate
1766          * that all data is written and
1767          * ready for firmware to initialize.
1768          */
1769         qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC);
1770
1771         read_lock(&ha->hw_lock);
1772         qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1773         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1774         read_unlock(&ha->hw_lock);
1775         return 0;
1776 }
1777
1778 static int
1779 qla82xx_set_product_offset(struct qla_hw_data *ha)
1780 {
1781         struct qla82xx_uri_table_desc *ptab_desc = NULL;
1782         const uint8_t *unirom = ha->hablob->fw->data;
1783         uint32_t i;
1784         __le32 entries;
1785         __le32 flags, file_chiprev, offset;
1786         uint8_t chiprev = ha->chip_revision;
1787         /* Hardcoding mn_present flag for P3P */
1788         int mn_present = 0;
1789         uint32_t flagbit;
1790
1791         ptab_desc = qla82xx_get_table_desc(unirom,
1792                  QLA82XX_URI_DIR_SECT_PRODUCT_TBL);
1793        if (!ptab_desc)
1794                 return -1;
1795
1796         entries = cpu_to_le32(ptab_desc->num_entries);
1797
1798         for (i = 0; i < entries; i++) {
1799                 offset = cpu_to_le32(ptab_desc->findex) +
1800                         (i * cpu_to_le32(ptab_desc->entry_size));
1801                 flags = cpu_to_le32(*((int *)&unirom[offset] +
1802                         QLA82XX_URI_FLAGS_OFF));
1803                 file_chiprev = cpu_to_le32(*((int *)&unirom[offset] +
1804                         QLA82XX_URI_CHIP_REV_OFF));
1805
1806                 flagbit = mn_present ? 1 : 2;
1807
1808                 if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) {
1809                         ha->file_prd_off = offset;
1810                         return 0;
1811                 }
1812         }
1813         return -1;
1814 }
1815
1816 int
1817 qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
1818 {
1819         __le32 val;
1820         uint32_t min_size;
1821         struct qla_hw_data *ha = vha->hw;
1822         const struct firmware *fw = ha->hablob->fw;
1823
1824         ha->fw_type = fw_type;
1825
1826         if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1827                 if (qla82xx_set_product_offset(ha))
1828                         return -EINVAL;
1829
1830                 min_size = QLA82XX_URI_FW_MIN_SIZE;
1831         } else {
1832                 val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
1833                 if ((__force u32)val != QLA82XX_BDINFO_MAGIC)
1834                         return -EINVAL;
1835
1836                 min_size = QLA82XX_FW_MIN_SIZE;
1837         }
1838
1839         if (fw->size < min_size)
1840                 return -EINVAL;
1841         return 0;
1842 }
1843
1844 static int
1845 qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1846 {
1847         u32 val = 0;
1848         int retries = 60;
1849
1850         do {
1851                 read_lock(&ha->hw_lock);
1852                 val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE);
1853                 read_unlock(&ha->hw_lock);
1854
1855                 switch (val) {
1856                 case PHAN_INITIALIZE_COMPLETE:
1857                 case PHAN_INITIALIZE_ACK:
1858                         return QLA_SUCCESS;
1859                 case PHAN_INITIALIZE_FAILED:
1860                         break;
1861                 default:
1862                         break;
1863                 }
1864                 qla_printk(KERN_WARNING, ha,
1865                         "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n",
1866                         val, retries);
1867
1868                 msleep(500);
1869
1870         } while (--retries);
1871
1872         qla_printk(KERN_INFO, ha,
1873             "Cmd Peg initialization failed: 0x%x.\n", val);
1874
1875         qla82xx_check_for_bad_spd(ha);
1876         val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
1877         read_lock(&ha->hw_lock);
1878         qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
1879         read_unlock(&ha->hw_lock);
1880         return QLA_FUNCTION_FAILED;
1881 }
1882
1883 static int
1884 qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1885 {
1886         u32 val = 0;
1887         int retries = 60;
1888
1889         do {
1890                 read_lock(&ha->hw_lock);
1891                 val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE);
1892                 read_unlock(&ha->hw_lock);
1893
1894                 switch (val) {
1895                 case PHAN_INITIALIZE_COMPLETE:
1896                 case PHAN_INITIALIZE_ACK:
1897                         return QLA_SUCCESS;
1898                 case PHAN_INITIALIZE_FAILED:
1899                         break;
1900                 default:
1901                         break;
1902                 }
1903
1904                 qla_printk(KERN_WARNING, ha,
1905                         "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n",
1906                         val, retries);
1907
1908                 msleep(500);
1909
1910         } while (--retries);
1911
1912         qla_printk(KERN_INFO, ha,
1913                 "Rcv Peg initialization failed: 0x%x.\n", val);
1914         read_lock(&ha->hw_lock);
1915         qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
1916         read_unlock(&ha->hw_lock);
1917         return QLA_FUNCTION_FAILED;
1918 }
1919
1920 /* ISR related functions */
1921 uint32_t qla82xx_isr_int_target_mask_enable[8] = {
1922         ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1,
1923         ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3,
1924         ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5,
1925         ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7
1926 };
1927
1928 uint32_t qla82xx_isr_int_target_status[8] = {
1929         ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
1930         ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
1931         ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
1932         ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7
1933 };
1934
1935 static struct qla82xx_legacy_intr_set legacy_intr[] = \
1936         QLA82XX_LEGACY_INTR_CONFIG;
1937
1938 /*
1939  * qla82xx_mbx_completion() - Process mailbox command completions.
1940  * @ha: SCSI driver HA context
1941  * @mb0: Mailbox0 register
1942  */
1943 static void
1944 qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1945 {
1946         uint16_t        cnt;
1947         uint16_t __iomem *wptr;
1948         struct qla_hw_data *ha = vha->hw;
1949         struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1950         wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
1951
1952         /* Load return mailbox registers. */
1953         ha->flags.mbox_int = 1;
1954         ha->mailbox_out[0] = mb0;
1955
1956         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1957                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1958                 wptr++;
1959         }
1960
1961         if (ha->mcp) {
1962                 DEBUG3_11(printk(KERN_INFO "%s(%ld): "
1963                         "Got mailbox completion. cmd=%x.\n",
1964                         __func__, vha->host_no, ha->mcp->mb[0]));
1965         } else {
1966                 qla_printk(KERN_INFO, ha,
1967                         "%s(%ld): MBX pointer ERROR!\n",
1968                         __func__, vha->host_no);
1969         }
1970 }
1971
1972 /*
1973  * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1974  * @irq:
1975  * @dev_id: SCSI driver HA context
1976  * @regs:
1977  *
1978  * Called by system whenever the host adapter generates an interrupt.
1979  *
1980  * Returns handled flag.
1981  */
1982 irqreturn_t
1983 qla82xx_intr_handler(int irq, void *dev_id)
1984 {
1985         scsi_qla_host_t *vha;
1986         struct qla_hw_data *ha;
1987         struct rsp_que *rsp;
1988         struct device_reg_82xx __iomem *reg;
1989         int status = 0, status1 = 0;
1990         unsigned long   flags;
1991         unsigned long   iter;
1992         uint32_t        stat;
1993         uint16_t        mb[4];
1994
1995         rsp = (struct rsp_que *) dev_id;
1996         if (!rsp) {
1997                 printk(KERN_INFO
1998                         "%s(): NULL response queue pointer\n", __func__);
1999                 return IRQ_NONE;
2000         }
2001         ha = rsp->hw;
2002
2003         if (!ha->flags.msi_enabled) {
2004                 status = qla82xx_rd_32(ha, ISR_INT_VECTOR);
2005                 if (!(status & ha->nx_legacy_intr.int_vec_bit))
2006                         return IRQ_NONE;
2007
2008                 status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG);
2009                 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1))
2010                         return IRQ_NONE;
2011         }
2012
2013         /* clear the interrupt */
2014         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
2015
2016         /* read twice to ensure write is flushed */
2017         qla82xx_rd_32(ha, ISR_INT_VECTOR);
2018         qla82xx_rd_32(ha, ISR_INT_VECTOR);
2019
2020         reg = &ha->iobase->isp82;
2021
2022         spin_lock_irqsave(&ha->hardware_lock, flags);
2023         vha = pci_get_drvdata(ha->pdev);
2024         for (iter = 1; iter--; ) {
2025
2026                 if (RD_REG_DWORD(&reg->host_int)) {
2027                         stat = RD_REG_DWORD(&reg->host_status);
2028
2029                         switch (stat & 0xff) {
2030                         case 0x1:
2031                         case 0x2:
2032                         case 0x10:
2033                         case 0x11:
2034                                 qla82xx_mbx_completion(vha, MSW(stat));
2035                                 status |= MBX_INTERRUPT;
2036                                 break;
2037                         case 0x12:
2038                                 mb[0] = MSW(stat);
2039                                 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2040                                 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2041                                 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2042                                 qla2x00_async_event(vha, rsp, mb);
2043                                 break;
2044                         case 0x13:
2045                                 qla24xx_process_response_queue(vha, rsp);
2046                                 break;
2047                         default:
2048                                 DEBUG2(printk("scsi(%ld): "
2049                                         " Unrecognized interrupt type (%d).\n",
2050                                         vha->host_no, stat & 0xff));
2051                                 break;
2052                         }
2053                 }
2054                 WRT_REG_DWORD(&reg->host_int, 0);
2055         }
2056         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2057         if (!ha->flags.msi_enabled)
2058                 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2059
2060 #ifdef QL_DEBUG_LEVEL_17
2061         if (!irq && ha->flags.eeh_busy)
2062                 qla_printk(KERN_WARNING, ha,
2063                     "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
2064                     status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2065 #endif
2066
2067         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2068             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2069                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2070                 complete(&ha->mbx_intr_comp);
2071         }
2072         return IRQ_HANDLED;
2073 }
2074
2075 irqreturn_t
2076 qla82xx_msix_default(int irq, void *dev_id)
2077 {
2078         scsi_qla_host_t *vha;
2079         struct qla_hw_data *ha;
2080         struct rsp_que *rsp;
2081         struct device_reg_82xx __iomem *reg;
2082         int status = 0;
2083         unsigned long flags;
2084         uint32_t stat;
2085         uint16_t mb[4];
2086
2087         rsp = (struct rsp_que *) dev_id;
2088         if (!rsp) {
2089                 printk(KERN_INFO
2090                         "%s(): NULL response queue pointer\n", __func__);
2091                 return IRQ_NONE;
2092         }
2093         ha = rsp->hw;
2094
2095         reg = &ha->iobase->isp82;
2096
2097         spin_lock_irqsave(&ha->hardware_lock, flags);
2098         vha = pci_get_drvdata(ha->pdev);
2099         do {
2100                 if (RD_REG_DWORD(&reg->host_int)) {
2101                         stat = RD_REG_DWORD(&reg->host_status);
2102
2103                         switch (stat & 0xff) {
2104                         case 0x1:
2105                         case 0x2:
2106                         case 0x10:
2107                         case 0x11:
2108                                 qla82xx_mbx_completion(vha, MSW(stat));
2109                                 status |= MBX_INTERRUPT;
2110                                 break;
2111                         case 0x12:
2112                                 mb[0] = MSW(stat);
2113                                 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2114                                 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2115                                 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2116                                 qla2x00_async_event(vha, rsp, mb);
2117                                 break;
2118                         case 0x13:
2119                                 qla24xx_process_response_queue(vha, rsp);
2120                                 break;
2121                         default:
2122                                 DEBUG2(printk("scsi(%ld): "
2123                                         " Unrecognized interrupt type (%d).\n",
2124                                         vha->host_no, stat & 0xff));
2125                                 break;
2126                         }
2127                 }
2128                 WRT_REG_DWORD(&reg->host_int, 0);
2129         } while (0);
2130
2131         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2132
2133 #ifdef QL_DEBUG_LEVEL_17
2134         if (!irq && ha->flags.eeh_busy)
2135                 qla_printk(KERN_WARNING, ha,
2136                         "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
2137                         status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2138 #endif
2139
2140         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2141                 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2142                         set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2143                         complete(&ha->mbx_intr_comp);
2144         }
2145         return IRQ_HANDLED;
2146 }
2147
2148 irqreturn_t
2149 qla82xx_msix_rsp_q(int irq, void *dev_id)
2150 {
2151         scsi_qla_host_t *vha;
2152         struct qla_hw_data *ha;
2153         struct rsp_que *rsp;
2154         struct device_reg_82xx __iomem *reg;
2155
2156         rsp = (struct rsp_que *) dev_id;
2157         if (!rsp) {
2158                 printk(KERN_INFO
2159                         "%s(): NULL response queue pointer\n", __func__);
2160                 return IRQ_NONE;
2161         }
2162
2163         ha = rsp->hw;
2164         reg = &ha->iobase->isp82;
2165         spin_lock_irq(&ha->hardware_lock);
2166         vha = pci_get_drvdata(ha->pdev);
2167         qla24xx_process_response_queue(vha, rsp);
2168         WRT_REG_DWORD(&reg->host_int, 0);
2169         spin_unlock_irq(&ha->hardware_lock);
2170         return IRQ_HANDLED;
2171 }
2172
2173 void
2174 qla82xx_poll(int irq, void *dev_id)
2175 {
2176         scsi_qla_host_t *vha;
2177         struct qla_hw_data *ha;
2178         struct rsp_que *rsp;
2179         struct device_reg_82xx __iomem *reg;
2180         int status = 0;
2181         uint32_t stat;
2182         uint16_t mb[4];
2183         unsigned long flags;
2184
2185         rsp = (struct rsp_que *) dev_id;
2186         if (!rsp) {
2187                 printk(KERN_INFO
2188                         "%s(): NULL response queue pointer\n", __func__);
2189                 return;
2190         }
2191         ha = rsp->hw;
2192
2193         reg = &ha->iobase->isp82;
2194         spin_lock_irqsave(&ha->hardware_lock, flags);
2195         vha = pci_get_drvdata(ha->pdev);
2196
2197         if (RD_REG_DWORD(&reg->host_int)) {
2198                 stat = RD_REG_DWORD(&reg->host_status);
2199                 switch (stat & 0xff) {
2200                 case 0x1:
2201                 case 0x2:
2202                 case 0x10:
2203                 case 0x11:
2204                         qla82xx_mbx_completion(vha, MSW(stat));
2205                         status |= MBX_INTERRUPT;
2206                         break;
2207                 case 0x12:
2208                         mb[0] = MSW(stat);
2209                         mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2210                         mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2211                         mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2212                         qla2x00_async_event(vha, rsp, mb);
2213                         break;
2214                 case 0x13:
2215                         qla24xx_process_response_queue(vha, rsp);
2216                         break;
2217                 default:
2218                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2219                                 "(%d).\n",
2220                                 vha->host_no, stat & 0xff));
2221                         break;
2222                 }
2223         }
2224         WRT_REG_DWORD(&reg->host_int, 0);
2225         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2226 }
2227
2228 void
2229 qla82xx_enable_intrs(struct qla_hw_data *ha)
2230 {
2231         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2232         qla82xx_mbx_intr_enable(vha);
2233         spin_lock_irq(&ha->hardware_lock);
2234         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2235         spin_unlock_irq(&ha->hardware_lock);
2236         ha->interrupts_on = 1;
2237 }
2238
2239 void
2240 qla82xx_disable_intrs(struct qla_hw_data *ha)
2241 {
2242         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2243         qla82xx_mbx_intr_disable(vha);
2244         spin_lock_irq(&ha->hardware_lock);
2245         qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
2246         spin_unlock_irq(&ha->hardware_lock);
2247         ha->interrupts_on = 0;
2248 }
2249
2250 void qla82xx_init_flags(struct qla_hw_data *ha)
2251 {
2252         struct qla82xx_legacy_intr_set *nx_legacy_intr;
2253
2254         /* ISP 8021 initializations */
2255         rwlock_init(&ha->hw_lock);
2256         ha->qdr_sn_window = -1;
2257         ha->ddr_mn_window = -1;
2258         ha->curr_window = 255;
2259         ha->portnum = PCI_FUNC(ha->pdev->devfn);
2260         nx_legacy_intr = &legacy_intr[ha->portnum];
2261         ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
2262         ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
2263         ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
2264         ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
2265 }
2266
2267 inline void
2268 qla82xx_set_drv_active(scsi_qla_host_t *vha)
2269 {
2270         uint32_t drv_active;
2271         struct qla_hw_data *ha = vha->hw;
2272
2273         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2274
2275         /* If reset value is all FF's, initialize DRV_ACTIVE */
2276         if (drv_active == 0xffffffff) {
2277                 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE,
2278                         QLA82XX_DRV_NOT_ACTIVE);
2279                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2280         }
2281         drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2282         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2283 }
2284
2285 inline void
2286 qla82xx_clear_drv_active(struct qla_hw_data *ha)
2287 {
2288         uint32_t drv_active;
2289
2290         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2291         drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2292         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2293 }
2294
2295 static inline int
2296 qla82xx_need_reset(struct qla_hw_data *ha)
2297 {
2298         uint32_t drv_state;
2299         int rval;
2300
2301         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2302         rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2303         return rval;
2304 }
2305
2306 static inline void
2307 qla82xx_set_rst_ready(struct qla_hw_data *ha)
2308 {
2309         uint32_t drv_state;
2310         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2311
2312         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2313
2314         /* If reset value is all FF's, initialize DRV_STATE */
2315         if (drv_state == 0xffffffff) {
2316                 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY);
2317                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2318         }
2319         drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2320         qla_printk(KERN_INFO, ha,
2321                 "%s(%ld):drv_state = 0x%x\n",
2322                 __func__, vha->host_no, drv_state);
2323         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2324 }
2325
2326 static inline void
2327 qla82xx_clear_rst_ready(struct qla_hw_data *ha)
2328 {
2329         uint32_t drv_state;
2330
2331         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2332         drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2333         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2334 }
2335
2336 static inline void
2337 qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
2338 {
2339         uint32_t qsnt_state;
2340
2341         qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2342         qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2343         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2344 }
2345
2346 void
2347 qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha)
2348 {
2349         struct qla_hw_data *ha = vha->hw;
2350         uint32_t qsnt_state;
2351
2352         qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2353         qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2354         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2355 }
2356
2357 static int
2358 qla82xx_load_fw(scsi_qla_host_t *vha)
2359 {
2360         int rst;
2361         struct fw_blob *blob;
2362         struct qla_hw_data *ha = vha->hw;
2363
2364         if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
2365                 qla_printk(KERN_ERR, ha,
2366                         "%s: Error during CRB Initialization\n", __func__);
2367                 return QLA_FUNCTION_FAILED;
2368         }
2369         udelay(500);
2370
2371         /* Bring QM and CAMRAM out of reset */
2372         rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
2373         rst &= ~((1 << 28) | (1 << 24));
2374         qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
2375
2376         /*
2377          * FW Load priority:
2378          * 1) Operational firmware residing in flash.
2379          * 2) Firmware via request-firmware interface (.bin file).
2380          */
2381         if (ql2xfwloadbin == 2)
2382                 goto try_blob_fw;
2383
2384         qla_printk(KERN_INFO, ha,
2385                 "Attempting to load firmware from flash\n");
2386
2387         if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2388                 qla_printk(KERN_ERR, ha,
2389                         "Firmware loaded successfully from flash\n");
2390                 return QLA_SUCCESS;
2391         }
2392 try_blob_fw:
2393         qla_printk(KERN_INFO, ha,
2394             "Attempting to load firmware from blob\n");
2395
2396         /* Load firmware blob. */
2397         blob = ha->hablob = qla2x00_request_firmware(vha);
2398         if (!blob) {
2399                 qla_printk(KERN_ERR, ha,
2400                         "Firmware image not present.\n");
2401                 goto fw_load_failed;
2402         }
2403
2404         /* Validating firmware blob */
2405         if (qla82xx_validate_firmware_blob(vha,
2406                 QLA82XX_FLASH_ROMIMAGE)) {
2407                 /* Fallback to URI format */
2408                 if (qla82xx_validate_firmware_blob(vha,
2409                         QLA82XX_UNIFIED_ROMIMAGE)) {
2410                         qla_printk(KERN_ERR, ha,
2411                                 "No valid firmware image found!!!");
2412                         return QLA_FUNCTION_FAILED;
2413                 }
2414         }
2415
2416         if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
2417                 qla_printk(KERN_ERR, ha,
2418                         "%s: Firmware loaded successfully "
2419                         " from binary blob\n", __func__);
2420                 return QLA_SUCCESS;
2421         } else {
2422                 qla_printk(KERN_ERR, ha,
2423                     "Firmware load failed from binary blob\n");
2424                 blob->fw = NULL;
2425                 blob = NULL;
2426                 goto fw_load_failed;
2427         }
2428         return QLA_SUCCESS;
2429
2430 fw_load_failed:
2431         return QLA_FUNCTION_FAILED;
2432 }
2433
2434 int
2435 qla82xx_start_firmware(scsi_qla_host_t *vha)
2436 {
2437         int           pcie_cap;
2438         uint16_t      lnk;
2439         struct qla_hw_data *ha = vha->hw;
2440
2441         /* scrub dma mask expansion register */
2442         qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE);
2443
2444         /* Put both the PEG CMD and RCV PEG to default state
2445          * of 0 before resetting the hardware
2446          */
2447         qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
2448         qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
2449
2450         /* Overwrite stale initialization register values */
2451         qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
2452         qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
2453
2454         if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
2455                 qla_printk(KERN_INFO, ha,
2456                         "%s: Error trying to start fw!\n", __func__);
2457                 return QLA_FUNCTION_FAILED;
2458         }
2459
2460         /* Handshake with the card before we register the devices. */
2461         if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
2462                 qla_printk(KERN_INFO, ha,
2463                         "%s: Error during card handshake!\n", __func__);
2464                 return QLA_FUNCTION_FAILED;
2465         }
2466
2467         /* Negotiated Link width */
2468         pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
2469         pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
2470         ha->link_width = (lnk >> 4) & 0x3f;
2471
2472         /* Synchronize with Receive peg */
2473         return qla82xx_check_rcvpeg_state(ha);
2474 }
2475
2476 static inline int
2477 qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
2478         uint16_t tot_dsds)
2479 {
2480         uint32_t *cur_dsd = NULL;
2481         scsi_qla_host_t *vha;
2482         struct qla_hw_data *ha;
2483         struct scsi_cmnd *cmd;
2484         struct  scatterlist *cur_seg;
2485         uint32_t *dsd_seg;
2486         void *next_dsd;
2487         uint8_t avail_dsds;
2488         uint8_t first_iocb = 1;
2489         uint32_t dsd_list_len;
2490         struct dsd_dma *dsd_ptr;
2491         struct ct6_dsd *ctx;
2492
2493         cmd = sp->cmd;
2494
2495         /* Update entry type to indicate Command Type 3 IOCB */
2496         *((uint32_t *)(&cmd_pkt->entry_type)) =
2497                 __constant_cpu_to_le32(COMMAND_TYPE_6);
2498
2499         /* No data transfer */
2500         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
2501                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
2502                 return 0;
2503         }
2504
2505         vha = sp->fcport->vha;
2506         ha = vha->hw;
2507
2508         /* Set transfer direction */
2509         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2510                 cmd_pkt->control_flags =
2511                     __constant_cpu_to_le16(CF_WRITE_DATA);
2512                 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
2513         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
2514                 cmd_pkt->control_flags =
2515                     __constant_cpu_to_le16(CF_READ_DATA);
2516                 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
2517         }
2518
2519         cur_seg = scsi_sglist(cmd);
2520         ctx = sp->ctx;
2521
2522         while (tot_dsds) {
2523                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
2524                     QLA_DSDS_PER_IOCB : tot_dsds;
2525                 tot_dsds -= avail_dsds;
2526                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
2527
2528                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
2529                     struct dsd_dma, list);
2530                 next_dsd = dsd_ptr->dsd_addr;
2531                 list_del(&dsd_ptr->list);
2532                 ha->gbl_dsd_avail--;
2533                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
2534                 ctx->dsd_use_cnt++;
2535                 ha->gbl_dsd_inuse++;
2536
2537                 if (first_iocb) {
2538                         first_iocb = 0;
2539                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2540                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2541                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2542                         *dsd_seg++ = dsd_list_len;
2543                 } else {
2544                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2545                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2546                         *cur_dsd++ = dsd_list_len;
2547                 }
2548                 cur_dsd = (uint32_t *)next_dsd;
2549                 while (avail_dsds) {
2550                         dma_addr_t      sle_dma;
2551
2552                         sle_dma = sg_dma_address(cur_seg);
2553                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2554                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2555                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
2556                         cur_seg++;
2557                         avail_dsds--;
2558                 }
2559         }
2560
2561         /* Null termination */
2562         *cur_dsd++ =  0;
2563         *cur_dsd++ = 0;
2564         *cur_dsd++ = 0;
2565         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
2566         return 0;
2567 }
2568
2569 /*
2570  * qla82xx_calc_dsd_lists() - Determine number of DSD list required
2571  * for Command Type 6.
2572  *
2573  * @dsds: number of data segment decriptors needed
2574  *
2575  * Returns the number of dsd list needed to store @dsds.
2576  */
2577 inline uint16_t
2578 qla82xx_calc_dsd_lists(uint16_t dsds)
2579 {
2580         uint16_t dsd_lists = 0;
2581
2582         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
2583         if (dsds % QLA_DSDS_PER_IOCB)
2584                 dsd_lists++;
2585         return dsd_lists;
2586 }
2587
2588 /*
2589  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2590  * @sp: command to send to the ISP
2591  *
2592  * Returns non-zero if a failure occured, else zero.
2593  */
2594 int
2595 qla82xx_start_scsi(srb_t *sp)
2596 {
2597         int             ret, nseg;
2598         unsigned long   flags;
2599         struct scsi_cmnd *cmd;
2600         uint32_t        *clr_ptr;
2601         uint32_t        index;
2602         uint32_t        handle;
2603         uint16_t        cnt;
2604         uint16_t        req_cnt;
2605         uint16_t        tot_dsds;
2606         struct device_reg_82xx __iomem *reg;
2607         uint32_t dbval;
2608         uint32_t *fcp_dl;
2609         uint8_t additional_cdb_len;
2610         struct ct6_dsd *ctx;
2611         struct scsi_qla_host *vha = sp->fcport->vha;
2612         struct qla_hw_data *ha = vha->hw;
2613         struct req_que *req = NULL;
2614         struct rsp_que *rsp = NULL;
2615
2616         /* Setup device pointers. */
2617         ret = 0;
2618         reg = &ha->iobase->isp82;
2619         cmd = sp->cmd;
2620         req = vha->req;
2621         rsp = ha->rsp_q_map[0];
2622
2623         /* So we know we haven't pci_map'ed anything yet */
2624         tot_dsds = 0;
2625
2626         dbval = 0x04 | (ha->portnum << 5);
2627
2628         /* Send marker if required */
2629         if (vha->marker_needed != 0) {
2630                 if (qla2x00_marker(vha, req,
2631                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2632                         return QLA_FUNCTION_FAILED;
2633                 vha->marker_needed = 0;
2634         }
2635
2636         /* Acquire ring specific lock */
2637         spin_lock_irqsave(&ha->hardware_lock, flags);
2638
2639         /* Check for room in outstanding command list. */
2640         handle = req->current_outstanding_cmd;
2641         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2642                 handle++;
2643                 if (handle == MAX_OUTSTANDING_COMMANDS)
2644                         handle = 1;
2645                 if (!req->outstanding_cmds[handle])
2646                         break;
2647         }
2648         if (index == MAX_OUTSTANDING_COMMANDS)
2649                 goto queuing_error;
2650
2651         /* Map the sg table so we have an accurate count of sg entries needed */
2652         if (scsi_sg_count(cmd)) {
2653                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2654                     scsi_sg_count(cmd), cmd->sc_data_direction);
2655                 if (unlikely(!nseg))
2656                         goto queuing_error;
2657         } else
2658                 nseg = 0;
2659
2660         tot_dsds = nseg;
2661
2662         if (tot_dsds > ql2xshiftctondsd) {
2663                 struct cmd_type_6 *cmd_pkt;
2664                 uint16_t more_dsd_lists = 0;
2665                 struct dsd_dma *dsd_ptr;
2666                 uint16_t i;
2667
2668                 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
2669                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN)
2670                         goto queuing_error;
2671
2672                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2673                         goto sufficient_dsds;
2674                 else
2675                         more_dsd_lists -= ha->gbl_dsd_avail;
2676
2677                 for (i = 0; i < more_dsd_lists; i++) {
2678                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2679                         if (!dsd_ptr)
2680                                 goto queuing_error;
2681
2682                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2683                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2684                         if (!dsd_ptr->dsd_addr) {
2685                                 kfree(dsd_ptr);
2686                                 goto queuing_error;
2687                         }
2688                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2689                         ha->gbl_dsd_avail++;
2690                 }
2691
2692 sufficient_dsds:
2693                 req_cnt = 1;
2694
2695                 if (req->cnt < (req_cnt + 2)) {
2696                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2697                                 &reg->req_q_out[0]);
2698                         if (req->ring_index < cnt)
2699                                 req->cnt = cnt - req->ring_index;
2700                         else
2701                                 req->cnt = req->length -
2702                                         (req->ring_index - cnt);
2703                 }
2704
2705                 if (req->cnt < (req_cnt + 2))
2706                         goto queuing_error;
2707
2708                 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2709                 if (!sp->ctx) {
2710                         DEBUG(printk(KERN_INFO
2711                                 "%s(%ld): failed to allocate"
2712                                 " ctx.\n", __func__, vha->host_no));
2713                         goto queuing_error;
2714                 }
2715                 memset(ctx, 0, sizeof(struct ct6_dsd));
2716                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2717                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2718                 if (!ctx->fcp_cmnd) {
2719                         DEBUG2_3(printk("%s(%ld): failed to allocate"
2720                                 " fcp_cmnd.\n", __func__, vha->host_no));
2721                         goto queuing_error_fcp_cmnd;
2722                 }
2723
2724                 /* Initialize the DSD list and dma handle */
2725                 INIT_LIST_HEAD(&ctx->dsd_list);
2726                 ctx->dsd_use_cnt = 0;
2727
2728                 if (cmd->cmd_len > 16) {
2729                         additional_cdb_len = cmd->cmd_len - 16;
2730                         if ((cmd->cmd_len % 4) != 0) {
2731                                 /* SCSI command bigger than 16 bytes must be
2732                                  * multiple of 4
2733                                  */
2734                                 goto queuing_error_fcp_cmnd;
2735                         }
2736                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2737                 } else {
2738                         additional_cdb_len = 0;
2739                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2740                 }
2741
2742                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2743                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2744
2745                 /* Zero out remaining portion of packet. */
2746                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2747                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2748                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2749                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2750
2751                 /* Set NPORT-ID and LUN number*/
2752                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2753                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2754                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2755                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2756                 cmd_pkt->vp_index = sp->fcport->vp_idx;
2757
2758                 /* Build IOCB segments */
2759                 if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2760                         goto queuing_error_fcp_cmnd;
2761
2762                 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2763                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2764
2765                 /* build FCP_CMND IU */
2766                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2767                 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2768                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2769
2770                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2771                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2772                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2773                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2774
2775                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2776
2777                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2778                     additional_cdb_len);
2779                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2780
2781                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2782                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2783                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2784                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2785                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2786
2787                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2788                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2789                 /* Set total data segment count. */
2790                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2791                 /* Specify response queue number where
2792                  * completion should happen
2793                  */
2794                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2795         } else {
2796                 struct cmd_type_7 *cmd_pkt;
2797                 req_cnt = qla24xx_calc_iocbs(tot_dsds);
2798                 if (req->cnt < (req_cnt + 2)) {
2799                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2800                             &reg->req_q_out[0]);
2801                         if (req->ring_index < cnt)
2802                                 req->cnt = cnt - req->ring_index;
2803                         else
2804                                 req->cnt = req->length -
2805                                         (req->ring_index - cnt);
2806                 }
2807                 if (req->cnt < (req_cnt + 2))
2808                         goto queuing_error;
2809
2810                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2811                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2812
2813                 /* Zero out remaining portion of packet. */
2814                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2815                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2816                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2817                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2818
2819                 /* Set NPORT-ID and LUN number*/
2820                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2821                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2822                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2823                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2824                 cmd_pkt->vp_index = sp->fcport->vp_idx;
2825
2826                 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2827                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2828                         sizeof(cmd_pkt->lun));
2829
2830                 /* Load SCSI command packet. */
2831                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2832                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2833
2834                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2835
2836                 /* Build IOCB segments */
2837                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2838
2839                 /* Set total data segment count. */
2840                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2841                 /* Specify response queue number where
2842                  * completion should happen.
2843                  */
2844                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2845
2846         }
2847         /* Build command packet. */
2848         req->current_outstanding_cmd = handle;
2849         req->outstanding_cmds[handle] = sp;
2850         sp->handle = handle;
2851         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2852         req->cnt -= req_cnt;
2853         wmb();
2854
2855         /* Adjust ring index. */
2856         req->ring_index++;
2857         if (req->ring_index == req->length) {
2858                 req->ring_index = 0;
2859                 req->ring_ptr = req->ring;
2860         } else
2861                 req->ring_ptr++;
2862
2863         sp->flags |= SRB_DMA_VALID;
2864
2865         /* Set chip new ring index. */
2866         /* write, read and verify logic */
2867         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2868         if (ql2xdbwr)
2869                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2870         else {
2871                 WRT_REG_DWORD(
2872                         (unsigned long __iomem *)ha->nxdb_wr_ptr,
2873                         dbval);
2874                 wmb();
2875                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2876                         WRT_REG_DWORD(
2877                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2878                                 dbval);
2879                         wmb();
2880                 }
2881         }
2882
2883         /* Manage unprocessed RIO/ZIO commands in response queue. */
2884         if (vha->flags.process_response_queue &&
2885             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2886                 qla24xx_process_response_queue(vha, rsp);
2887
2888         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2889         return QLA_SUCCESS;
2890
2891 queuing_error_fcp_cmnd:
2892         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2893 queuing_error:
2894         if (tot_dsds)
2895                 scsi_dma_unmap(cmd);
2896
2897         if (sp->ctx) {
2898                 mempool_free(sp->ctx, ha->ctx_mempool);
2899                 sp->ctx = NULL;
2900         }
2901         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2902
2903         return QLA_FUNCTION_FAILED;
2904 }
2905
2906 static uint32_t *
2907 qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
2908         uint32_t length)
2909 {
2910         uint32_t i;
2911         uint32_t val;
2912         struct qla_hw_data *ha = vha->hw;
2913
2914         /* Dword reads to flash. */
2915         for (i = 0; i < length/4; i++, faddr += 4) {
2916                 if (qla82xx_rom_fast_read(ha, faddr, &val)) {
2917                         qla_printk(KERN_WARNING, ha,
2918                             "Do ROM fast read failed\n");
2919                         goto done_read;
2920                 }
2921                 dwptr[i] = __constant_cpu_to_le32(val);
2922         }
2923 done_read:
2924         return dwptr;
2925 }
2926
2927 static int
2928 qla82xx_unprotect_flash(struct qla_hw_data *ha)
2929 {
2930         int ret;
2931         uint32_t val;
2932
2933         ret = ql82xx_rom_lock_d(ha);
2934         if (ret < 0) {
2935                 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
2936                 return ret;
2937         }
2938
2939         ret = qla82xx_read_status_reg(ha, &val);
2940         if (ret < 0)
2941                 goto done_unprotect;
2942
2943         val &= ~(BLOCK_PROTECT_BITS << 2);
2944         ret = qla82xx_write_status_reg(ha, val);
2945         if (ret < 0) {
2946                 val |= (BLOCK_PROTECT_BITS << 2);
2947                 qla82xx_write_status_reg(ha, val);
2948         }
2949
2950         if (qla82xx_write_disable_flash(ha) != 0)
2951                 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
2952
2953 done_unprotect:
2954         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
2955         return ret;
2956 }
2957
2958 static int
2959 qla82xx_protect_flash(struct qla_hw_data *ha)
2960 {
2961         int ret;
2962         uint32_t val;
2963
2964         ret = ql82xx_rom_lock_d(ha);
2965         if (ret < 0) {
2966                 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
2967                 return ret;
2968         }
2969
2970         ret = qla82xx_read_status_reg(ha, &val);
2971         if (ret < 0)
2972                 goto done_protect;
2973
2974         val |= (BLOCK_PROTECT_BITS << 2);
2975         /* LOCK all sectors */
2976         ret = qla82xx_write_status_reg(ha, val);
2977         if (ret < 0)
2978                 qla_printk(KERN_WARNING, ha, "Write status register failed\n");
2979
2980         if (qla82xx_write_disable_flash(ha) != 0)
2981                 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
2982 done_protect:
2983         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
2984         return ret;
2985 }
2986
2987 static int
2988 qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
2989 {
2990         int ret = 0;
2991
2992         ret = ql82xx_rom_lock_d(ha);
2993         if (ret < 0) {
2994                 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
2995                 return ret;
2996         }
2997
2998         qla82xx_flash_set_write_enable(ha);
2999         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
3000         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
3001         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
3002
3003         if (qla82xx_wait_rom_done(ha)) {
3004                 qla_printk(KERN_WARNING, ha,
3005                     "Error waiting for rom done\n");
3006                 ret = -1;
3007                 goto done;
3008         }
3009         ret = qla82xx_flash_wait_write_finish(ha);
3010 done:
3011         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
3012         return ret;
3013 }
3014
3015 /*
3016  * Address and length are byte address
3017  */
3018 uint8_t *
3019 qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3020         uint32_t offset, uint32_t length)
3021 {
3022         scsi_block_requests(vha->host);
3023         qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
3024         scsi_unblock_requests(vha->host);
3025         return buf;
3026 }
3027
3028 static int
3029 qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3030         uint32_t faddr, uint32_t dwords)
3031 {
3032         int ret;
3033         uint32_t liter;
3034         uint32_t sec_mask, rest_addr;
3035         dma_addr_t optrom_dma;
3036         void *optrom = NULL;
3037         int page_mode = 0;
3038         struct qla_hw_data *ha = vha->hw;
3039
3040         ret = -1;
3041
3042         /* Prepare burst-capable write on supported ISPs. */
3043         if (page_mode && !(faddr & 0xfff) &&
3044             dwords > OPTROM_BURST_DWORDS) {
3045                 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
3046                     &optrom_dma, GFP_KERNEL);
3047                 if (!optrom) {
3048                         qla_printk(KERN_DEBUG, ha,
3049                                 "Unable to allocate memory for optrom "
3050                                 "burst write (%x KB).\n",
3051                                 OPTROM_BURST_SIZE / 1024);
3052                 }
3053         }
3054
3055         rest_addr = ha->fdt_block_size - 1;
3056         sec_mask = ~rest_addr;
3057
3058         ret = qla82xx_unprotect_flash(ha);
3059         if (ret) {
3060                 qla_printk(KERN_WARNING, ha,
3061                         "Unable to unprotect flash for update.\n");
3062                 goto write_done;
3063         }
3064
3065         for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
3066                 /* Are we at the beginning of a sector? */
3067                 if ((faddr & rest_addr) == 0) {
3068
3069                         ret = qla82xx_erase_sector(ha, faddr);
3070                         if (ret) {
3071                                 DEBUG9(qla_printk(KERN_ERR, ha,
3072                                     "Unable to erase sector: "
3073                                     "address=%x.\n", faddr));
3074                                 break;
3075                         }
3076                 }
3077
3078                 /* Go with burst-write. */
3079                 if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
3080                         /* Copy data to DMA'ble buffer. */
3081                         memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
3082
3083                         ret = qla2x00_load_ram(vha, optrom_dma,
3084                             (ha->flash_data_off | faddr),
3085                             OPTROM_BURST_DWORDS);
3086                         if (ret != QLA_SUCCESS) {
3087                                 qla_printk(KERN_WARNING, ha,
3088                                     "Unable to burst-write optrom segment "
3089                                     "(%x/%x/%llx).\n", ret,
3090                                     (ha->flash_data_off | faddr),
3091                                     (unsigned long long)optrom_dma);
3092                                 qla_printk(KERN_WARNING, ha,
3093                                     "Reverting to slow-write.\n");
3094
3095                                 dma_free_coherent(&ha->pdev->dev,
3096                                     OPTROM_BURST_SIZE, optrom, optrom_dma);
3097                                 optrom = NULL;
3098                         } else {
3099                                 liter += OPTROM_BURST_DWORDS - 1;
3100                                 faddr += OPTROM_BURST_DWORDS - 1;
3101                                 dwptr += OPTROM_BURST_DWORDS - 1;
3102                                 continue;
3103                         }
3104                 }
3105
3106                 ret = qla82xx_write_flash_dword(ha, faddr,
3107                     cpu_to_le32(*dwptr));
3108                 if (ret) {
3109                         DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program"
3110                             "flash address=%x data=%x.\n", __func__,
3111                             ha->host_no, faddr, *dwptr));
3112                         break;
3113                 }
3114         }
3115
3116         ret = qla82xx_protect_flash(ha);
3117         if (ret)
3118                 qla_printk(KERN_WARNING, ha,
3119                     "Unable to protect flash after update.\n");
3120 write_done:
3121         if (optrom)
3122                 dma_free_coherent(&ha->pdev->dev,
3123                     OPTROM_BURST_SIZE, optrom, optrom_dma);
3124         return ret;
3125 }
3126
3127 int
3128 qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3129         uint32_t offset, uint32_t length)
3130 {
3131         int rval;
3132
3133         /* Suspend HBA. */
3134         scsi_block_requests(vha->host);
3135         rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
3136                 length >> 2);
3137         scsi_unblock_requests(vha->host);
3138
3139         /* Convert return ISP82xx to generic */
3140         if (rval)
3141                 rval = QLA_FUNCTION_FAILED;
3142         else
3143                 rval = QLA_SUCCESS;
3144         return rval;
3145 }
3146
3147 void
3148 qla82xx_start_iocbs(srb_t *sp)
3149 {
3150         struct qla_hw_data *ha = sp->fcport->vha->hw;
3151         struct req_que *req = ha->req_q_map[0];
3152         struct device_reg_82xx __iomem *reg;
3153         uint32_t dbval;
3154
3155         /* Adjust ring index. */
3156         req->ring_index++;
3157         if (req->ring_index == req->length) {
3158                 req->ring_index = 0;
3159                 req->ring_ptr = req->ring;
3160         } else
3161                 req->ring_ptr++;
3162
3163         reg = &ha->iobase->isp82;
3164         dbval = 0x04 | (ha->portnum << 5);
3165
3166         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3167         if (ql2xdbwr)
3168                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
3169         else {
3170                 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
3171                 wmb();
3172                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3173                         WRT_REG_DWORD((unsigned long  __iomem *)ha->nxdb_wr_ptr,
3174                                 dbval);
3175                         wmb();
3176                 }
3177         }
3178 }
3179
3180 void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
3181 {
3182         if (qla82xx_rom_lock(ha))
3183                 /* Someone else is holding the lock. */
3184                 qla_printk(KERN_INFO, ha, "Resetting rom_lock\n");
3185
3186         /*
3187          * Either we got the lock, or someone
3188          * else died while holding it.
3189          * In either case, unlock.
3190          */
3191         qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
3192 }
3193
3194 /*
3195  * qla82xx_device_bootstrap
3196  *    Initialize device, set DEV_READY, start fw
3197  *
3198  * Note:
3199  *      IDC lock must be held upon entry
3200  *
3201  * Return:
3202  *    Success : 0
3203  *    Failed  : 1
3204  */
3205 static int
3206 qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3207 {
3208         int rval = QLA_SUCCESS;
3209         int i, timeout;
3210         uint32_t old_count, count;
3211         struct qla_hw_data *ha = vha->hw;
3212         int need_reset = 0, peg_stuck = 1;
3213
3214         need_reset = qla82xx_need_reset(ha);
3215
3216         old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3217
3218         for (i = 0; i < 10; i++) {
3219                 timeout = msleep_interruptible(200);
3220                 if (timeout) {
3221                         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3222                                 QLA82XX_DEV_FAILED);
3223                         return QLA_FUNCTION_FAILED;
3224                 }
3225
3226                 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3227                 if (count != old_count)
3228                         peg_stuck = 0;
3229         }
3230
3231         if (need_reset) {
3232                 /* We are trying to perform a recovery here. */
3233                 if (peg_stuck)
3234                         qla82xx_rom_lock_recovery(ha);
3235                 goto dev_initialize;
3236         } else  {
3237                 /* Start of day for this ha context. */
3238                 if (peg_stuck) {
3239                         /* Either we are the first or recovery in progress. */
3240                         qla82xx_rom_lock_recovery(ha);
3241                         goto dev_initialize;
3242                 } else
3243                         /* Firmware already running. */
3244                         goto dev_ready;
3245         }
3246
3247         return rval;
3248
3249 dev_initialize:
3250         /* set to DEV_INITIALIZING */
3251         qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
3252         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
3253
3254         /* Driver that sets device state to initializating sets IDC version */
3255         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
3256
3257         qla82xx_idc_unlock(ha);
3258         rval = qla82xx_start_firmware(vha);
3259         qla82xx_idc_lock(ha);
3260
3261         if (rval != QLA_SUCCESS) {
3262                 qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
3263                 qla82xx_clear_drv_active(ha);
3264                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
3265                 return rval;
3266         }
3267
3268 dev_ready:
3269         qla_printk(KERN_INFO, ha, "HW State: READY\n");
3270         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
3271
3272         return QLA_SUCCESS;
3273 }
3274
3275 /*
3276 * qla82xx_need_qsnt_handler
3277 *    Code to start quiescence sequence
3278 *
3279 * Note:
3280 *      IDC lock must be held upon entry
3281 *
3282 * Return: void
3283 */
3284
3285 static void
3286 qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
3287 {
3288         struct qla_hw_data *ha = vha->hw;
3289         uint32_t dev_state, drv_state, drv_active;
3290         unsigned long reset_timeout;
3291
3292         if (vha->flags.online) {
3293                 /*Block any further I/O and wait for pending cmnds to complete*/
3294                 qla82xx_quiescent_state_cleanup(vha);
3295         }
3296
3297         /* Set the quiescence ready bit */
3298         qla82xx_set_qsnt_ready(ha);
3299
3300         /*wait for 30 secs for other functions to ack */
3301         reset_timeout = jiffies + (30 * HZ);
3302
3303         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3304         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3305         /* Its 2 that is written when qsnt is acked, moving one bit */
3306         drv_active = drv_active << 0x01;
3307
3308         while (drv_state != drv_active) {
3309
3310                 if (time_after_eq(jiffies, reset_timeout)) {
3311                         /* quiescence timeout, other functions didn't ack
3312                          * changing the state to DEV_READY
3313                          */
3314                         qla_printk(KERN_INFO, ha,
3315                             "%s: QUIESCENT TIMEOUT\n", QLA2XXX_DRIVER_NAME);
3316                         qla_printk(KERN_INFO, ha,
3317                             "DRV_ACTIVE:%d DRV_STATE:%d\n", drv_active,
3318                             drv_state);
3319                         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3320                                                 QLA82XX_DEV_READY);
3321                         qla_printk(KERN_INFO, ha,
3322                             "HW State: DEV_READY\n");
3323                         qla82xx_idc_unlock(ha);
3324                         qla2x00_perform_loop_resync(vha);
3325                         qla82xx_idc_lock(ha);
3326
3327                         qla82xx_clear_qsnt_ready(vha);
3328                         return;
3329                 }
3330
3331                 qla82xx_idc_unlock(ha);
3332                 msleep(1000);
3333                 qla82xx_idc_lock(ha);
3334
3335                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3336                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3337                 drv_active = drv_active << 0x01;
3338         }
3339         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3340         /* everyone acked so set the state to DEV_QUIESCENCE */
3341         if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
3342                 qla_printk(KERN_INFO, ha, "HW State: DEV_QUIESCENT\n");
3343                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
3344         }
3345 }
3346
3347 /*
3348 * qla82xx_wait_for_state_change
3349 *    Wait for device state to change from given current state
3350 *
3351 * Note:
3352 *     IDC lock must not be held upon entry
3353 *
3354 * Return:
3355 *    Changed device state.
3356 */
3357 uint32_t
3358 qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
3359 {
3360         struct qla_hw_data *ha = vha->hw;
3361         uint32_t dev_state;
3362
3363         do {
3364                 msleep(1000);
3365                 qla82xx_idc_lock(ha);
3366                 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3367                 qla82xx_idc_unlock(ha);
3368         } while (dev_state == curr_state);
3369
3370         return dev_state;
3371 }
3372
3373 static void
3374 qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3375 {
3376         struct qla_hw_data *ha = vha->hw;
3377
3378         /* Disable the board */
3379         qla_printk(KERN_INFO, ha, "Disabling the board\n");
3380
3381         qla82xx_idc_lock(ha);
3382         qla82xx_clear_drv_active(ha);
3383         qla82xx_idc_unlock(ha);
3384
3385         /* Set DEV_FAILED flag to disable timer */
3386         vha->device_flags |= DFLG_DEV_FAILED;
3387         qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3388         qla2x00_mark_all_devices_lost(vha, 0);
3389         vha->flags.online = 0;
3390         vha->flags.init_done = 0;
3391 }
3392
3393 /*
3394  * qla82xx_need_reset_handler
3395  *    Code to start reset sequence
3396  *
3397  * Note:
3398  *      IDC lock must be held upon entry
3399  *
3400  * Return:
3401  *    Success : 0
3402  *    Failed  : 1
3403  */
3404 static void
3405 qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3406 {
3407         uint32_t dev_state, drv_state, drv_active;
3408         unsigned long reset_timeout;
3409         struct qla_hw_data *ha = vha->hw;
3410         struct req_que *req = ha->req_q_map[0];
3411
3412         if (vha->flags.online) {
3413                 qla82xx_idc_unlock(ha);
3414                 qla2x00_abort_isp_cleanup(vha);
3415                 ha->isp_ops->get_flash_version(vha, req->ring);
3416                 ha->isp_ops->nvram_config(vha);
3417                 qla82xx_idc_lock(ha);
3418         }
3419
3420         qla82xx_set_rst_ready(ha);
3421
3422         /* wait for 10 seconds for reset ack from all functions */
3423         reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
3424
3425         drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3426         drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3427
3428         while (drv_state != drv_active) {
3429                 if (time_after_eq(jiffies, reset_timeout)) {
3430                         qla_printk(KERN_INFO, ha,
3431                                 "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME);
3432                         break;
3433                 }
3434                 qla82xx_idc_unlock(ha);
3435                 msleep(1000);
3436                 qla82xx_idc_lock(ha);
3437                 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3438                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3439         }
3440
3441         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3442         qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
3443                 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3444
3445         /* Force to DEV_COLD unless someone else is starting a reset */
3446         if (dev_state != QLA82XX_DEV_INITIALIZING) {
3447                 qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
3448                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3449         }
3450 }
3451
3452 static void
3453 qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3454 {
3455         uint32_t fw_heartbeat_counter, halt_status;
3456         struct qla_hw_data *ha = vha->hw;
3457
3458         fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3459         /* all 0xff, assume AER/EEH in progress, ignore */
3460         if (fw_heartbeat_counter == 0xffffffff)
3461                 return;
3462         if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
3463                 vha->seconds_since_last_heartbeat++;
3464                 /* FW not alive after 2 seconds */
3465                 if (vha->seconds_since_last_heartbeat == 2) {
3466                         vha->seconds_since_last_heartbeat = 0;
3467                         halt_status = qla82xx_rd_32(ha,
3468                                 QLA82XX_PEG_HALT_STATUS1);
3469                         if (halt_status & HALT_STATUS_UNRECOVERABLE) {
3470                                 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
3471                         } else {
3472                                 qla_printk(KERN_INFO, ha,
3473                                         "scsi(%ld): %s - detect abort needed\n",
3474                                         vha->host_no, __func__);
3475                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3476                         }
3477                         qla2xxx_wake_dpc(vha);
3478                         ha->flags.fw_hung = 1;
3479                         if (ha->flags.mbox_busy) {
3480                                 ha->flags.mbox_int = 1;
3481                                 DEBUG2(qla_printk(KERN_ERR, ha,
3482                                         "Due to fw hung, doing premature "
3483                                         "completion of mbx command\n"));
3484                                 if (test_bit(MBX_INTR_WAIT,
3485                                         &ha->mbx_cmd_flags))
3486                                         complete(&ha->mbx_intr_comp);
3487                         }
3488                 }
3489         } else
3490                 vha->seconds_since_last_heartbeat = 0;
3491         vha->fw_heartbeat_counter = fw_heartbeat_counter;
3492 }
3493
3494 /*
3495  * qla82xx_device_state_handler
3496  *      Main state handler
3497  *
3498  * Note:
3499  *      IDC lock must be held upon entry
3500  *
3501  * Return:
3502  *    Success : 0
3503  *    Failed  : 1
3504  */
3505 int
3506 qla82xx_device_state_handler(scsi_qla_host_t *vha)
3507 {
3508         uint32_t dev_state;
3509         int rval = QLA_SUCCESS;
3510         unsigned long dev_init_timeout;
3511         struct qla_hw_data *ha = vha->hw;
3512
3513         qla82xx_idc_lock(ha);
3514         if (!vha->flags.init_done)
3515                 qla82xx_set_drv_active(vha);
3516
3517         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3518         qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
3519                 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3520
3521         /* wait for 30 seconds for device to go ready */
3522         dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
3523
3524         while (1) {
3525
3526                 if (time_after_eq(jiffies, dev_init_timeout)) {
3527                         DEBUG(qla_printk(KERN_INFO, ha,
3528                                 "%s: device init failed!\n",
3529                                 QLA2XXX_DRIVER_NAME));
3530                         rval = QLA_FUNCTION_FAILED;
3531                         break;
3532                 }
3533                 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3534                 qla_printk(KERN_INFO, ha,
3535                         "2:Device state is 0x%x = %s\n", dev_state,
3536                         dev_state < MAX_STATES ?
3537                         qdev_state[dev_state] : "Unknown");
3538
3539                 switch (dev_state) {
3540                 case QLA82XX_DEV_READY:
3541                         goto exit;
3542                 case QLA82XX_DEV_COLD:
3543                         rval = qla82xx_device_bootstrap(vha);
3544                         goto exit;
3545                 case QLA82XX_DEV_INITIALIZING:
3546                         qla82xx_idc_unlock(ha);
3547                         msleep(1000);
3548                         qla82xx_idc_lock(ha);
3549                         break;
3550                 case QLA82XX_DEV_NEED_RESET:
3551                         if (!ql2xdontresethba)
3552                                 qla82xx_need_reset_handler(vha);
3553                         break;
3554                 case QLA82XX_DEV_NEED_QUIESCENT:
3555                         qla82xx_need_qsnt_handler(vha);
3556                         /* Reset timeout value after quiescence handler */
3557                         dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3558                                                          * HZ);
3559                         break;
3560                 case QLA82XX_DEV_QUIESCENT:
3561                         /* Owner will exit and other will wait for the state
3562                          * to get changed
3563                          */
3564                         if (ha->flags.quiesce_owner)
3565                                 goto exit;
3566
3567                         qla82xx_idc_unlock(ha);
3568                         msleep(1000);
3569                         qla82xx_idc_lock(ha);
3570
3571                         /* Reset timeout value after quiescence handler */
3572                         dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3573                                                          * HZ);
3574                         break;
3575                 case QLA82XX_DEV_FAILED:
3576                         qla82xx_dev_failed_handler(vha);
3577                         rval = QLA_FUNCTION_FAILED;
3578                         goto exit;
3579                 default:
3580                         qla82xx_idc_unlock(ha);
3581                         msleep(1000);
3582                         qla82xx_idc_lock(ha);
3583                 }
3584         }
3585 exit:
3586         qla82xx_idc_unlock(ha);
3587         return rval;
3588 }
3589
3590 void qla82xx_watchdog(scsi_qla_host_t *vha)
3591 {
3592         uint32_t dev_state;
3593         struct qla_hw_data *ha = vha->hw;
3594
3595         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3596
3597         /* don't poll if reset is going on */
3598         if (!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
3599                 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
3600                 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))) {
3601                 if (dev_state == QLA82XX_DEV_NEED_RESET) {
3602                         qla_printk(KERN_WARNING, ha,
3603                                 "%s(): Adapter reset needed!\n", __func__);
3604                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3605                         qla2xxx_wake_dpc(vha);
3606                         ha->flags.fw_hung = 1;
3607                         if (ha->flags.mbox_busy) {
3608                                 ha->flags.mbox_int = 1;
3609                                 DEBUG2(qla_printk(KERN_ERR, ha,
3610                                         "Need reset, doing premature "
3611                                         "completion of mbx command\n"));
3612                                 if (test_bit(MBX_INTR_WAIT,
3613                                         &ha->mbx_cmd_flags))
3614                                         complete(&ha->mbx_intr_comp);
3615                         }
3616                 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
3617                         !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3618                         DEBUG(qla_printk(KERN_INFO, ha,
3619                                 "scsi(%ld) %s - detected quiescence needed\n",
3620                                 vha->host_no, __func__));
3621                         set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3622                         qla2xxx_wake_dpc(vha);
3623                 } else {
3624                         qla82xx_check_fw_alive(vha);
3625                 }
3626         }
3627 }
3628
3629 int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3630 {
3631         int rval;
3632         rval = qla82xx_device_state_handler(vha);
3633         return rval;
3634 }
3635
3636 /*
3637  *  qla82xx_abort_isp
3638  *      Resets ISP and aborts all outstanding commands.
3639  *
3640  * Input:
3641  *      ha           = adapter block pointer.
3642  *
3643  * Returns:
3644  *      0 = success
3645  */
3646 int
3647 qla82xx_abort_isp(scsi_qla_host_t *vha)
3648 {
3649         int rval;
3650         struct qla_hw_data *ha = vha->hw;
3651         uint32_t dev_state;
3652
3653         if (vha->device_flags & DFLG_DEV_FAILED) {
3654                 qla_printk(KERN_WARNING, ha,
3655                         "%s(%ld): Device in failed state, "
3656                         "Exiting.\n", __func__, vha->host_no);
3657                 return QLA_SUCCESS;
3658         }
3659
3660         qla82xx_idc_lock(ha);
3661         dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3662         if (dev_state == QLA82XX_DEV_READY) {
3663                 qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
3664                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3665                         QLA82XX_DEV_NEED_RESET);
3666         } else
3667                 qla_printk(KERN_INFO, ha, "HW State: %s\n",
3668                         dev_state < MAX_STATES ?
3669                         qdev_state[dev_state] : "Unknown");
3670         qla82xx_idc_unlock(ha);
3671
3672         rval = qla82xx_device_state_handler(vha);
3673
3674         qla82xx_idc_lock(ha);
3675         qla82xx_clear_rst_ready(ha);
3676         qla82xx_idc_unlock(ha);
3677
3678         if (rval == QLA_SUCCESS) {
3679                 ha->flags.fw_hung = 0;
3680                 qla82xx_restart_isp(vha);
3681         }
3682
3683         if (rval) {
3684                 vha->flags.online = 1;
3685                 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3686                         if (ha->isp_abort_cnt == 0) {
3687                                 qla_printk(KERN_WARNING, ha,
3688                                     "ISP error recovery failed - "
3689                                     "board disabled\n");
3690                                 /*
3691                                  * The next call disables the board
3692                                  * completely.
3693                                  */
3694                                 ha->isp_ops->reset_adapter(vha);
3695                                 vha->flags.online = 0;
3696                                 clear_bit(ISP_ABORT_RETRY,
3697                                     &vha->dpc_flags);
3698                                 rval = QLA_SUCCESS;
3699                         } else { /* schedule another ISP abort */
3700                                 ha->isp_abort_cnt--;
3701                                 DEBUG(qla_printk(KERN_INFO, ha,
3702                                     "qla%ld: ISP abort - retry remaining %d\n",
3703                                     vha->host_no, ha->isp_abort_cnt));
3704                                 rval = QLA_FUNCTION_FAILED;
3705                         }
3706                 } else {
3707                         ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3708                         DEBUG(qla_printk(KERN_INFO, ha,
3709                             "(%ld): ISP error recovery - retrying (%d) "
3710                             "more times\n", vha->host_no, ha->isp_abort_cnt));
3711                         set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3712                         rval = QLA_FUNCTION_FAILED;
3713                 }
3714         }
3715         return rval;
3716 }
3717
3718 /*
3719  *  qla82xx_fcoe_ctx_reset
3720  *      Perform a quick reset and aborts all outstanding commands.
3721  *      This will only perform an FCoE context reset and avoids a full blown
3722  *      chip reset.
3723  *
3724  * Input:
3725  *      ha = adapter block pointer.
3726  *      is_reset_path = flag for identifying the reset path.
3727  *
3728  * Returns:
3729  *      0 = success
3730  */
3731 int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha)
3732 {
3733         int rval = QLA_FUNCTION_FAILED;
3734
3735         if (vha->flags.online) {
3736                 /* Abort all outstanding commands, so as to be requeued later */
3737                 qla2x00_abort_isp_cleanup(vha);
3738         }
3739
3740         /* Stop currently executing firmware.
3741          * This will destroy existing FCoE context at the F/W end.
3742          */
3743         qla2x00_try_to_stop_firmware(vha);
3744
3745         /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */
3746         rval = qla82xx_restart_isp(vha);
3747
3748         return rval;
3749 }
3750
3751 /*
3752  * qla2x00_wait_for_fcoe_ctx_reset
3753  *    Wait till the FCoE context is reset.
3754  *
3755  * Note:
3756  *    Does context switching here.
3757  *    Release SPIN_LOCK (if any) before calling this routine.
3758  *
3759  * Return:
3760  *    Success (fcoe_ctx reset is done) : 0
3761  *    Failed  (fcoe_ctx reset not completed within max loop timout ) : 1
3762  */
3763 int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
3764 {
3765         int status = QLA_FUNCTION_FAILED;
3766         unsigned long wait_reset;
3767
3768         wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
3769         while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3770             test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
3771             && time_before(jiffies, wait_reset)) {
3772
3773                 set_current_state(TASK_UNINTERRUPTIBLE);
3774                 schedule_timeout(HZ);
3775
3776                 if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) &&
3777                     !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
3778                         status = QLA_SUCCESS;
3779                         break;
3780                 }
3781         }
3782         DEBUG2(printk(KERN_INFO
3783             "%s status=%d\n", __func__, status));
3784
3785         return status;
3786 }