Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[pandora-kernel.git] / drivers / scsi / bfa / bfa_ioc_ct.c
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17
18 #include "bfa_ioc.h"
19 #include "bfi_ctreg.h"
20 #include "bfa_defs.h"
21
22 BFA_TRC_FILE(CNA, IOC_CT);
23
24 /*
25  * forward declarations
26  */
27 static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
28 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
29 static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
30 static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
31 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
32 static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc);
33 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
34
35 struct bfa_ioc_hwif_s hwif_ct;
36
37 /*
38  * Called from bfa_ioc_attach() to map asic specific calls.
39  */
40 void
41 bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
42 {
43         hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
44         hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
45         hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
46         hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
47         hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
48         hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
49         hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
50         hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
51
52         ioc->ioc_hwif = &hwif_ct;
53 }
54
55 /*
56  * Return true if firmware of current driver matches the running firmware.
57  */
58 static bfa_boolean_t
59 bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
60 {
61         enum bfi_ioc_state ioc_fwstate;
62         u32 usecnt;
63         struct bfi_ioc_image_hdr_s fwhdr;
64
65         /*
66          * Firmware match check is relevant only for CNA.
67          */
68         if (!ioc->cna)
69                 return BFA_TRUE;
70
71         /*
72          * If bios boot (flash based) -- do not increment usage count
73          */
74         if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
75                                                 BFA_IOC_FWIMG_MINSZ)
76                 return BFA_TRUE;
77
78         bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
79         usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
80
81         /*
82          * If usage count is 0, always return TRUE.
83          */
84         if (usecnt == 0) {
85                 writel(1, ioc->ioc_regs.ioc_usage_reg);
86                 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
87                 bfa_trc(ioc, usecnt);
88                 return BFA_TRUE;
89         }
90
91         ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
92         bfa_trc(ioc, ioc_fwstate);
93
94         /*
95          * Use count cannot be non-zero and chip in uninitialized state.
96          */
97         bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
98
99         /*
100          * Check if another driver with a different firmware is active
101          */
102         bfa_ioc_fwver_get(ioc, &fwhdr);
103         if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
104                 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
105                 bfa_trc(ioc, usecnt);
106                 return BFA_FALSE;
107         }
108
109         /*
110          * Same firmware version. Increment the reference count.
111          */
112         usecnt++;
113         writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
114         bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
115         bfa_trc(ioc, usecnt);
116         return BFA_TRUE;
117 }
118
119 static void
120 bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
121 {
122         u32 usecnt;
123
124         /*
125          * Firmware lock is relevant only for CNA.
126          */
127         if (!ioc->cna)
128                 return;
129
130         /*
131          * If bios boot (flash based) -- do not decrement usage count
132          */
133         if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
134                                                 BFA_IOC_FWIMG_MINSZ)
135                 return;
136
137         /*
138          * decrement usage count
139          */
140         bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
141         usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
142         bfa_assert(usecnt > 0);
143
144         usecnt--;
145         writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
146         bfa_trc(ioc, usecnt);
147
148         bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
149 }
150
151 /*
152  * Notify other functions on HB failure.
153  */
154 static void
155 bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
156 {
157         if (ioc->cna) {
158                 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
159                 /* Wait for halt to take effect */
160                 readl(ioc->ioc_regs.ll_halt);
161         } else {
162                 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
163                 readl(ioc->ioc_regs.err_set);
164         }
165 }
166
167 /*
168  * Host to LPU mailbox message addresses
169  */
170 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
171         { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
172         { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
173         { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
174         { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
175 };
176
177 /*
178  * Host <-> LPU mailbox command/status registers - port 0
179  */
180 static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
181         { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
182         { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
183         { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
184         { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
185 };
186
187 /*
188  * Host <-> LPU mailbox command/status registers - port 1
189  */
190 static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
191         { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
192         { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
193         { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
194         { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
195 };
196
197 static void
198 bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
199 {
200         void __iomem *rb;
201         int             pcifn = bfa_ioc_pcifn(ioc);
202
203         rb = bfa_ioc_bar0(ioc);
204
205         ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
206         ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
207         ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
208
209         if (ioc->port_id == 0) {
210                 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
211                 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
212                 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
213                 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
214                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
215         } else {
216                 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
217                 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
218                 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
219                 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
220                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
221         }
222
223         /*
224          * PSS control registers
225          */
226         ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
227         ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
228         ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
229         ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
230
231         /*
232          * IOC semaphore registers and serialization
233          */
234         ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
235         ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
236         ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
237         ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
238
239         /*
240          * sram memory access
241          */
242         ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
243         ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
244
245         /*
246          * err set reg : for notification of hb failure in fcmode
247          */
248         ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
249 }
250
251 /*
252  * Initialize IOC to port mapping.
253  */
254
255 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
256 static void
257 bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
258 {
259         void __iomem *rb = ioc->pcidev.pci_bar_kva;
260         u32     r32;
261
262         /*
263          * For catapult, base port id on personality register and IOC type
264          */
265         r32 = readl(rb + FNC_PERS_REG);
266         r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
267         ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
268
269         bfa_trc(ioc, bfa_ioc_pcifn(ioc));
270         bfa_trc(ioc, ioc->port_id);
271 }
272
273 /*
274  * Set interrupt mode for a function: INTX or MSIX
275  */
276 static void
277 bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
278 {
279         void __iomem *rb = ioc->pcidev.pci_bar_kva;
280         u32     r32, mode;
281
282         r32 = readl(rb + FNC_PERS_REG);
283         bfa_trc(ioc, r32);
284
285         mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
286                 __F0_INTX_STATUS;
287
288         /*
289          * If already in desired mode, do not change anything
290          */
291         if (!msix && mode)
292                 return;
293
294         if (msix)
295                 mode = __F0_INTX_STATUS_MSIX;
296         else
297                 mode = __F0_INTX_STATUS_INTA;
298
299         r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
300         r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
301         bfa_trc(ioc, r32);
302
303         writel(r32, rb + FNC_PERS_REG);
304 }
305
306 /*
307  * Cleanup hw semaphore and usecnt registers
308  */
309 static void
310 bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
311 {
312
313         if (ioc->cna) {
314                 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
315                 writel(0, ioc->ioc_regs.ioc_usage_reg);
316                 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
317         }
318
319         /*
320          * Read the hw sem reg to make sure that it is locked
321          * before we clear it. If it is not locked, writing 1
322          * will lock it instead of clearing it.
323          */
324         readl(ioc->ioc_regs.ioc_sem_reg);
325         bfa_ioc_hw_sem_release(ioc);
326 }
327
328
329
330 /*
331  * Check the firmware state to know if pll_init has been completed already
332  */
333 bfa_boolean_t
334 bfa_ioc_ct_pll_init_complete(void __iomem *rb)
335 {
336         if ((readl(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
337           (readl(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
338                 return BFA_TRUE;
339
340         return BFA_FALSE;
341 }
342
343 bfa_status_t
344 bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
345 {
346         u32     pll_sclk, pll_fclk, r32;
347
348         pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
349                 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
350                 __APP_PLL_312_JITLMT0_1(3U) |
351                 __APP_PLL_312_CNTLMT0_1(1U);
352         pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
353                 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
354                 __APP_PLL_425_JITLMT0_1(3U) |
355                 __APP_PLL_425_CNTLMT0_1(1U);
356         if (fcmode) {
357                 writel(0, (rb + OP_MODE));
358                 writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
359                          __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
360         } else {
361                 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
362                 writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
363         }
364         writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
365         writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
366         writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
367         writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
368         writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
369         writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
370         writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
371         writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
372         writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET,
373                         rb + APP_PLL_312_CTL_REG);
374         writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET,
375                         rb + APP_PLL_425_CTL_REG);
376         writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
377                         rb + APP_PLL_312_CTL_REG);
378         writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
379                         rb + APP_PLL_425_CTL_REG);
380         readl(rb + HOSTFN0_INT_MSK);
381         udelay(2000);
382         writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
383         writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
384         writel(pll_sclk | __APP_PLL_312_ENABLE, rb + APP_PLL_312_CTL_REG);
385         writel(pll_fclk | __APP_PLL_425_ENABLE, rb + APP_PLL_425_CTL_REG);
386         if (!fcmode) {
387                 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
388                 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
389         }
390         r32 = readl((rb + PSS_CTL_REG));
391         r32 &= ~__PSS_LMEM_RESET;
392         writel(r32, (rb + PSS_CTL_REG));
393         udelay(1000);
394         if (!fcmode) {
395                 writel(0, (rb + PMM_1T_RESET_REG_P0));
396                 writel(0, (rb + PMM_1T_RESET_REG_P1));
397         }
398
399         writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
400         udelay(1000);
401         r32 = readl((rb + MBIST_STAT_REG));
402         writel(0, (rb + MBIST_CTL_REG));
403         return BFA_STATUS_OK;
404 }