Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
[pandora-kernel.git] / drivers / net / bna / bfa_ioc.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18
19 #include "bfa_ioc.h"
20 #include "cna.h"
21 #include "bfi.h"
22 #include "bfi_ctreg.h"
23 #include "bfa_defs.h"
24
25 /**
26  * IOC local definitions
27  */
28
29 /**
30  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
31  */
32
33 #define bfa_ioc_firmware_lock(__ioc)                    \
34                         ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
35 #define bfa_ioc_firmware_unlock(__ioc)                  \
36                         ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
37 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
38 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
39 #define bfa_ioc_notify_fail(__ioc)                      \
40                         ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
41 #define bfa_ioc_sync_start(__ioc)               \
42                         ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
43 #define bfa_ioc_sync_join(__ioc)                        \
44                         ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
45 #define bfa_ioc_sync_leave(__ioc)                       \
46                         ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
47 #define bfa_ioc_sync_ack(__ioc)                         \
48                         ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
49 #define bfa_ioc_sync_complete(__ioc)                    \
50                         ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
51
52 #define bfa_ioc_mbox_cmd_pending(__ioc)         \
53                         (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
54                         readl((__ioc)->ioc_regs.hfn_mbox_cmd))
55
56 static bool bfa_nw_auto_recover = true;
57
58 /*
59  * forward declarations
60  */
61 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
62 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
63 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
64 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
65 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
66 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
67 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
68 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
69 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
70 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
71 static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
72 static void bfa_ioc_recover(struct bfa_ioc *ioc);
73 static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
74 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
75 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
76 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
77 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
78 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
79 static void bfa_ioc_pf_initfailed(struct bfa_ioc *ioc);
80 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
81 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
82 static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
83                          u32 boot_param);
84 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
85 static u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
86 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
87                                                 char *serial_num);
88 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
89                                                 char *fw_ver);
90 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
91                                                 char *chip_rev);
92 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
93                                                 char *optrom_ver);
94 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
95                                                 char *manufacturer);
96 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
97 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
98
99 /**
100  * IOC state machine definitions/declarations
101  */
102 enum ioc_event {
103         IOC_E_RESET             = 1,    /*!< IOC reset request          */
104         IOC_E_ENABLE            = 2,    /*!< IOC enable request         */
105         IOC_E_DISABLE           = 3,    /*!< IOC disable request        */
106         IOC_E_DETACH            = 4,    /*!< driver detach cleanup      */
107         IOC_E_ENABLED           = 5,    /*!< f/w enabled                */
108         IOC_E_FWRSP_GETATTR     = 6,    /*!< IOC get attribute response */
109         IOC_E_DISABLED          = 7,    /*!< f/w disabled               */
110         IOC_E_INITFAILED        = 8,    /*!< failure notice by iocpf sm */
111         IOC_E_PFAILED           = 9,    /*!< failure notice by iocpf sm */
112         IOC_E_HBFAIL            = 10,   /*!< heartbeat failure          */
113         IOC_E_HWERROR           = 11,   /*!< hardware error interrupt   */
114         IOC_E_TIMEOUT           = 12,   /*!< timeout                    */
115 };
116
117 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
126
127 static struct bfa_sm_table ioc_sm_table[] = {
128         {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
129         {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
130         {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
131         {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
132         {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
133         {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
134         {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
135         {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
136         {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
137 };
138
139 /**
140  * IOCPF state machine definitions/declarations
141  */
142
143 /*
144  * Forward declareations for iocpf state machine
145  */
146 static void bfa_iocpf_enable(struct bfa_ioc *ioc);
147 static void bfa_iocpf_disable(struct bfa_ioc *ioc);
148 static void bfa_iocpf_fail(struct bfa_ioc *ioc);
149 static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
150 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
151 static void bfa_iocpf_stop(struct bfa_ioc *ioc);
152
153 /**
154  * IOCPF state machine events
155  */
156 enum iocpf_event {
157         IOCPF_E_ENABLE          = 1,    /*!< IOCPF enable request       */
158         IOCPF_E_DISABLE         = 2,    /*!< IOCPF disable request      */
159         IOCPF_E_STOP            = 3,    /*!< stop on driver detach      */
160         IOCPF_E_FWREADY         = 4,    /*!< f/w initialization done    */
161         IOCPF_E_FWRSP_ENABLE    = 5,    /*!< enable f/w response        */
162         IOCPF_E_FWRSP_DISABLE   = 6,    /*!< disable f/w response       */
163         IOCPF_E_FAIL            = 7,    /*!< failure notice by ioc sm   */
164         IOCPF_E_INITFAIL        = 8,    /*!< init fail notice by ioc sm */
165         IOCPF_E_GETATTRFAIL     = 9,    /*!< init fail notice by ioc sm */
166         IOCPF_E_SEMLOCKED       = 10,   /*!< h/w semaphore is locked    */
167         IOCPF_E_TIMEOUT         = 11,   /*!< f/w response timeout       */
168 };
169
170 /**
171  * IOCPF states
172  */
173 enum bfa_iocpf_state {
174         BFA_IOCPF_RESET         = 1,    /*!< IOC is in reset state */
175         BFA_IOCPF_SEMWAIT       = 2,    /*!< Waiting for IOC h/w semaphore */
176         BFA_IOCPF_HWINIT        = 3,    /*!< IOC h/w is being initialized */
177         BFA_IOCPF_READY         = 4,    /*!< IOCPF is initialized */
178         BFA_IOCPF_INITFAIL      = 5,    /*!< IOCPF failed */
179         BFA_IOCPF_FAIL          = 6,    /*!< IOCPF failed */
180         BFA_IOCPF_DISABLING     = 7,    /*!< IOCPF is being disabled */
181         BFA_IOCPF_DISABLED      = 8,    /*!< IOCPF is disabled */
182         BFA_IOCPF_FWMISMATCH    = 9,    /*!< IOC f/w different from drivers */
183 };
184
185 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
186 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
187 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
188 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
189 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
190 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
191 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
192 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
193                                                 enum iocpf_event);
194 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
195 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
196 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
197 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
198 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
199                                                 enum iocpf_event);
200 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
201
202 static struct bfa_sm_table iocpf_sm_table[] = {
203         {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
204         {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
205         {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
206         {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
207         {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
208         {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
209         {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
210         {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
211         {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
212         {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
213         {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
214         {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
215         {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
216         {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
217 };
218
219 /**
220  * IOC State Machine
221  */
222
223 /**
224  * Beginning state. IOC uninit state.
225  */
226 static void
227 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
228 {
229 }
230
231 /**
232  * IOC is in uninit state.
233  */
234 static void
235 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
236 {
237         switch (event) {
238         case IOC_E_RESET:
239                 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
240                 break;
241
242         default:
243                 bfa_sm_fault(ioc, event);
244         }
245 }
246
247 /**
248  * Reset entry actions -- initialize state machine
249  */
250 static void
251 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
252 {
253         bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
254 }
255
256 /**
257  * IOC is in reset state.
258  */
259 static void
260 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
261 {
262         switch (event) {
263         case IOC_E_ENABLE:
264                 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
265                 break;
266
267         case IOC_E_DISABLE:
268                 bfa_ioc_disable_comp(ioc);
269                 break;
270
271         case IOC_E_DETACH:
272                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
273                 break;
274
275         default:
276                 bfa_sm_fault(ioc, event);
277         }
278 }
279
280 static void
281 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
282 {
283         bfa_iocpf_enable(ioc);
284 }
285
286 /**
287  * Host IOC function is being enabled, awaiting response from firmware.
288  * Semaphore is acquired.
289  */
290 static void
291 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
292 {
293         switch (event) {
294         case IOC_E_ENABLED:
295                 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
296                 break;
297
298         case IOC_E_PFAILED:
299                 /* !!! fall through !!! */
300         case IOC_E_HWERROR:
301                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
302                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
303                 if (event != IOC_E_PFAILED)
304                         bfa_iocpf_initfail(ioc);
305                 break;
306
307         case IOC_E_DISABLE:
308                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
309                 break;
310
311         case IOC_E_DETACH:
312                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
313                 bfa_iocpf_stop(ioc);
314                 break;
315
316         case IOC_E_ENABLE:
317                 break;
318
319         default:
320                 bfa_sm_fault(ioc, event);
321         }
322 }
323
324 /**
325  * Semaphore should be acquired for version check.
326  */
327 static void
328 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
329 {
330         mod_timer(&ioc->ioc_timer, jiffies +
331                 msecs_to_jiffies(BFA_IOC_TOV));
332         bfa_ioc_send_getattr(ioc);
333 }
334
335 /**
336  * IOC configuration in progress. Timer is active.
337  */
338 static void
339 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
340 {
341         switch (event) {
342         case IOC_E_FWRSP_GETATTR:
343                 del_timer(&ioc->ioc_timer);
344                 bfa_ioc_check_attr_wwns(ioc);
345                 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
346                 break;
347
348         case IOC_E_PFAILED:
349         case IOC_E_HWERROR:
350                 del_timer(&ioc->ioc_timer);
351                 /* fall through */
352         case IOC_E_TIMEOUT:
353                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
354                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
355                 if (event != IOC_E_PFAILED)
356                         bfa_iocpf_getattrfail(ioc);
357                 break;
358
359         case IOC_E_DISABLE:
360                 del_timer(&ioc->ioc_timer);
361                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
362                 break;
363
364         case IOC_E_ENABLE:
365                 break;
366
367         default:
368                 bfa_sm_fault(ioc, event);
369         }
370 }
371
372 static void
373 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
374 {
375         ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
376         bfa_ioc_hb_monitor(ioc);
377 }
378
379 static void
380 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
381 {
382         switch (event) {
383         case IOC_E_ENABLE:
384                 break;
385
386         case IOC_E_DISABLE:
387                 bfa_ioc_hb_stop(ioc);
388                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
389                 break;
390
391         case IOC_E_PFAILED:
392         case IOC_E_HWERROR:
393                 bfa_ioc_hb_stop(ioc);
394                 /* !!! fall through !!! */
395         case IOC_E_HBFAIL:
396                 bfa_ioc_fail_notify(ioc);
397                 if (ioc->iocpf.auto_recover)
398                         bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
399                 else
400                         bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
401
402                 if (event != IOC_E_PFAILED)
403                         bfa_iocpf_fail(ioc);
404                 break;
405
406         default:
407                 bfa_sm_fault(ioc, event);
408         }
409 }
410
411 static void
412 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
413 {
414         bfa_iocpf_disable(ioc);
415 }
416
417 /**
418  * IOC is being desabled
419  */
420 static void
421 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
422 {
423         switch (event) {
424         case IOC_E_DISABLED:
425                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
426                 break;
427
428         case IOC_E_HWERROR:
429                 /*
430                  * No state change.  Will move to disabled state
431                  * after iocpf sm completes failure processing and
432                  * moves to disabled state.
433                  */
434                 bfa_iocpf_fail(ioc);
435                 break;
436
437         default:
438                 bfa_sm_fault(ioc, event);
439         }
440 }
441
442 /**
443  * IOC desable completion entry.
444  */
445 static void
446 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
447 {
448         bfa_ioc_disable_comp(ioc);
449 }
450
451 static void
452 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
453 {
454         switch (event) {
455         case IOC_E_ENABLE:
456                 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
457                 break;
458
459         case IOC_E_DISABLE:
460                 ioc->cbfn->disable_cbfn(ioc->bfa);
461                 break;
462
463         case IOC_E_DETACH:
464                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
465                 bfa_iocpf_stop(ioc);
466                 break;
467
468         default:
469                 bfa_sm_fault(ioc, event);
470         }
471 }
472
473 static void
474 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
475 {
476 }
477
478 /**
479  * Hardware initialization retry.
480  */
481 static void
482 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
483 {
484         switch (event) {
485         case IOC_E_ENABLED:
486                 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
487                 break;
488
489         case IOC_E_PFAILED:
490         case IOC_E_HWERROR:
491                 /**
492                  * Initialization retry failed.
493                  */
494                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
495                 if (event != IOC_E_PFAILED)
496                         bfa_iocpf_initfail(ioc);
497                 break;
498
499         case IOC_E_INITFAILED:
500                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
501                 break;
502
503         case IOC_E_ENABLE:
504                 break;
505
506         case IOC_E_DISABLE:
507                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
508                 break;
509
510         case IOC_E_DETACH:
511                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
512                 bfa_iocpf_stop(ioc);
513                 break;
514
515         default:
516                 bfa_sm_fault(ioc, event);
517         }
518 }
519
520 static void
521 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
522 {
523 }
524
525 /**
526  * IOC failure.
527  */
528 static void
529 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
530 {
531         switch (event) {
532         case IOC_E_ENABLE:
533                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
534                 break;
535
536         case IOC_E_DISABLE:
537                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
538                 break;
539
540         case IOC_E_DETACH:
541                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
542                 bfa_iocpf_stop(ioc);
543                 break;
544
545         case IOC_E_HWERROR:
546                 /* HB failure notification, ignore. */
547                 break;
548
549         default:
550                 bfa_sm_fault(ioc, event);
551         }
552 }
553
554 /**
555  * IOCPF State Machine
556  */
557
558 /**
559  * Reset entry actions -- initialize state machine
560  */
561 static void
562 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
563 {
564         iocpf->retry_count = 0;
565         iocpf->auto_recover = bfa_nw_auto_recover;
566 }
567
568 /**
569  * Beginning state. IOC is in reset state.
570  */
571 static void
572 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
573 {
574         switch (event) {
575         case IOCPF_E_ENABLE:
576                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
577                 break;
578
579         case IOCPF_E_STOP:
580                 break;
581
582         default:
583                 bfa_sm_fault(iocpf->ioc, event);
584         }
585 }
586
587 /**
588  * Semaphore should be acquired for version check.
589  */
590 static void
591 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
592 {
593         bfa_ioc_hw_sem_get(iocpf->ioc);
594 }
595
596 /**
597  * Awaiting h/w semaphore to continue with version check.
598  */
599 static void
600 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
601 {
602         struct bfa_ioc *ioc = iocpf->ioc;
603
604         switch (event) {
605         case IOCPF_E_SEMLOCKED:
606                 if (bfa_ioc_firmware_lock(ioc)) {
607                         if (bfa_ioc_sync_start(ioc)) {
608                                 iocpf->retry_count = 0;
609                                 bfa_ioc_sync_join(ioc);
610                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
611                         } else {
612                                 bfa_ioc_firmware_unlock(ioc);
613                                 bfa_nw_ioc_hw_sem_release(ioc);
614                                 mod_timer(&ioc->sem_timer, jiffies +
615                                         msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
616                         }
617                 } else {
618                         bfa_nw_ioc_hw_sem_release(ioc);
619                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
620                 }
621                 break;
622
623         case IOCPF_E_DISABLE:
624                 bfa_ioc_hw_sem_get_cancel(ioc);
625                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
626                 bfa_ioc_pf_disabled(ioc);
627                 break;
628
629         case IOCPF_E_STOP:
630                 bfa_ioc_hw_sem_get_cancel(ioc);
631                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
632                 break;
633
634         default:
635                 bfa_sm_fault(ioc, event);
636         }
637 }
638
639 /**
640  * Notify enable completion callback
641  */
642 static void
643 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
644 {
645         /* Call only the first time sm enters fwmismatch state. */
646         if (iocpf->retry_count == 0)
647                 bfa_ioc_pf_fwmismatch(iocpf->ioc);
648
649         iocpf->retry_count++;
650         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
651                 msecs_to_jiffies(BFA_IOC_TOV));
652 }
653
654 /**
655  * Awaiting firmware version match.
656  */
657 static void
658 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
659 {
660         struct bfa_ioc *ioc = iocpf->ioc;
661
662         switch (event) {
663         case IOCPF_E_TIMEOUT:
664                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
665                 break;
666
667         case IOCPF_E_DISABLE:
668                 del_timer(&ioc->iocpf_timer);
669                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
670                 bfa_ioc_pf_disabled(ioc);
671                 break;
672
673         case IOCPF_E_STOP:
674                 del_timer(&ioc->iocpf_timer);
675                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
676                 break;
677
678         default:
679                 bfa_sm_fault(ioc, event);
680         }
681 }
682
683 /**
684  * Request for semaphore.
685  */
686 static void
687 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
688 {
689         bfa_ioc_hw_sem_get(iocpf->ioc);
690 }
691
692 /**
693  * Awaiting semaphore for h/w initialzation.
694  */
695 static void
696 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
697 {
698         struct bfa_ioc *ioc = iocpf->ioc;
699
700         switch (event) {
701         case IOCPF_E_SEMLOCKED:
702                 if (bfa_ioc_sync_complete(ioc)) {
703                         bfa_ioc_sync_join(ioc);
704                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
705                 } else {
706                         bfa_nw_ioc_hw_sem_release(ioc);
707                         mod_timer(&ioc->sem_timer, jiffies +
708                                 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
709                 }
710                 break;
711
712         case IOCPF_E_DISABLE:
713                 bfa_ioc_hw_sem_get_cancel(ioc);
714                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
715                 break;
716
717         default:
718                 bfa_sm_fault(ioc, event);
719         }
720 }
721
722 static void
723 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
724 {
725         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
726                 msecs_to_jiffies(BFA_IOC_TOV));
727         bfa_ioc_reset(iocpf->ioc, 0);
728 }
729
730 /**
731  * Hardware is being initialized. Interrupts are enabled.
732  * Holding hardware semaphore lock.
733  */
734 static void
735 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
736 {
737         struct bfa_ioc *ioc = iocpf->ioc;
738
739         switch (event) {
740         case IOCPF_E_FWREADY:
741                 del_timer(&ioc->iocpf_timer);
742                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
743                 break;
744
745         case IOCPF_E_INITFAIL:
746                 del_timer(&ioc->iocpf_timer);
747                 /*
748                  * !!! fall through !!!
749                  */
750
751         case IOCPF_E_TIMEOUT:
752                 bfa_nw_ioc_hw_sem_release(ioc);
753                 if (event == IOCPF_E_TIMEOUT)
754                         bfa_ioc_pf_failed(ioc);
755                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
756                 break;
757
758         case IOCPF_E_DISABLE:
759                 del_timer(&ioc->iocpf_timer);
760                 bfa_ioc_sync_leave(ioc);
761                 bfa_nw_ioc_hw_sem_release(ioc);
762                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
763                 break;
764
765         default:
766                 bfa_sm_fault(ioc, event);
767         }
768 }
769
770 static void
771 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
772 {
773         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
774                 msecs_to_jiffies(BFA_IOC_TOV));
775         bfa_ioc_send_enable(iocpf->ioc);
776 }
777
778 /**
779  * Host IOC function is being enabled, awaiting response from firmware.
780  * Semaphore is acquired.
781  */
782 static void
783 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
784 {
785         struct bfa_ioc *ioc = iocpf->ioc;
786
787         switch (event) {
788         case IOCPF_E_FWRSP_ENABLE:
789                 del_timer(&ioc->iocpf_timer);
790                 bfa_nw_ioc_hw_sem_release(ioc);
791                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
792                 break;
793
794         case IOCPF_E_INITFAIL:
795                 del_timer(&ioc->iocpf_timer);
796                 /*
797                  * !!! fall through !!!
798                  */
799         case IOCPF_E_TIMEOUT:
800                 bfa_nw_ioc_hw_sem_release(ioc);
801                 if (event == IOCPF_E_TIMEOUT)
802                         bfa_ioc_pf_failed(ioc);
803                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
804                 break;
805
806         case IOCPF_E_DISABLE:
807                 del_timer(&ioc->iocpf_timer);
808                 bfa_nw_ioc_hw_sem_release(ioc);
809                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
810                 break;
811
812         case IOCPF_E_FWREADY:
813                 bfa_ioc_send_enable(ioc);
814                 break;
815
816         default:
817                 bfa_sm_fault(ioc, event);
818         }
819 }
820
821 static bool
822 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
823 {
824         return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
825 }
826
827 static void
828 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
829 {
830         bfa_ioc_pf_enabled(iocpf->ioc);
831 }
832
833 static void
834 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
835 {
836         struct bfa_ioc *ioc = iocpf->ioc;
837
838         switch (event) {
839         case IOCPF_E_DISABLE:
840                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
841                 break;
842
843         case IOCPF_E_GETATTRFAIL:
844                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
845                 break;
846
847         case IOCPF_E_FAIL:
848                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
849                 break;
850
851         case IOCPF_E_FWREADY:
852                 bfa_ioc_pf_failed(ioc);
853                 if (bfa_nw_ioc_is_operational(ioc))
854                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
855                 else
856                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
857                 break;
858
859         default:
860                 bfa_sm_fault(ioc, event);
861         }
862 }
863
864 static void
865 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
866 {
867         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
868                 msecs_to_jiffies(BFA_IOC_TOV));
869         bfa_ioc_send_disable(iocpf->ioc);
870 }
871
872 /**
873  * IOC is being disabled
874  */
875 static void
876 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
877 {
878         struct bfa_ioc *ioc = iocpf->ioc;
879
880         switch (event) {
881         case IOCPF_E_FWRSP_DISABLE:
882         case IOCPF_E_FWREADY:
883                 del_timer(&ioc->iocpf_timer);
884                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
885                 break;
886
887         case IOCPF_E_FAIL:
888                 del_timer(&ioc->iocpf_timer);
889                 /*
890                  * !!! fall through !!!
891                  */
892
893         case IOCPF_E_TIMEOUT:
894                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
895                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
896                 break;
897
898         case IOCPF_E_FWRSP_ENABLE:
899                 break;
900
901         default:
902                 bfa_sm_fault(ioc, event);
903         }
904 }
905
906 static void
907 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
908 {
909         bfa_ioc_hw_sem_get(iocpf->ioc);
910 }
911
912 /**
913  * IOC hb ack request is being removed.
914  */
915 static void
916 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
917 {
918         struct bfa_ioc *ioc = iocpf->ioc;
919
920         switch (event) {
921         case IOCPF_E_SEMLOCKED:
922                 bfa_ioc_sync_leave(ioc);
923                 bfa_nw_ioc_hw_sem_release(ioc);
924                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
925                 break;
926
927         case IOCPF_E_FAIL:
928                 break;
929
930         default:
931                 bfa_sm_fault(ioc, event);
932         }
933 }
934
935 /**
936  * IOC disable completion entry.
937  */
938 static void
939 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
940 {
941         bfa_ioc_pf_disabled(iocpf->ioc);
942 }
943
944 static void
945 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
946 {
947         struct bfa_ioc *ioc = iocpf->ioc;
948
949         switch (event) {
950         case IOCPF_E_ENABLE:
951                 iocpf->retry_count = 0;
952                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
953                 break;
954
955         case IOCPF_E_STOP:
956                 bfa_ioc_firmware_unlock(ioc);
957                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
958                 break;
959
960         default:
961                 bfa_sm_fault(ioc, event);
962         }
963 }
964
965 static void
966 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
967 {
968         bfa_ioc_hw_sem_get(iocpf->ioc);
969 }
970
971 /**
972  * Hardware initialization failed.
973  */
974 static void
975 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
976 {
977         struct bfa_ioc *ioc = iocpf->ioc;
978
979         switch (event) {
980         case IOCPF_E_SEMLOCKED:
981                 bfa_ioc_notify_fail(ioc);
982                 bfa_ioc_sync_ack(ioc);
983                 iocpf->retry_count++;
984                 if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
985                         bfa_ioc_sync_leave(ioc);
986                         bfa_nw_ioc_hw_sem_release(ioc);
987                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
988                 } else {
989                         if (bfa_ioc_sync_complete(ioc))
990                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
991                         else {
992                                 bfa_nw_ioc_hw_sem_release(ioc);
993                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
994                         }
995                 }
996                 break;
997
998         case IOCPF_E_DISABLE:
999                 bfa_ioc_hw_sem_get_cancel(ioc);
1000                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1001                 break;
1002
1003         case IOCPF_E_STOP:
1004                 bfa_ioc_hw_sem_get_cancel(ioc);
1005                 bfa_ioc_firmware_unlock(ioc);
1006                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1007                 break;
1008
1009         case IOCPF_E_FAIL:
1010                 break;
1011
1012         default:
1013                 bfa_sm_fault(ioc, event);
1014         }
1015 }
1016
1017 static void
1018 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
1019 {
1020         bfa_ioc_pf_initfailed(iocpf->ioc);
1021 }
1022
1023 /**
1024  * Hardware initialization failed.
1025  */
1026 static void
1027 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1028 {
1029         struct bfa_ioc *ioc = iocpf->ioc;
1030
1031         switch (event) {
1032         case IOCPF_E_DISABLE:
1033                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1034                 break;
1035
1036         case IOCPF_E_STOP:
1037                 bfa_ioc_firmware_unlock(ioc);
1038                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1039                 break;
1040
1041         default:
1042                 bfa_sm_fault(ioc, event);
1043         }
1044 }
1045
1046 static void
1047 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1048 {
1049         /**
1050          * Mark IOC as failed in hardware and stop firmware.
1051          */
1052         bfa_ioc_lpu_stop(iocpf->ioc);
1053
1054         /**
1055          * Flush any queued up mailbox requests.
1056          */
1057         bfa_ioc_mbox_hbfail(iocpf->ioc);
1058         bfa_ioc_hw_sem_get(iocpf->ioc);
1059 }
1060
1061 /**
1062  * IOC is in failed state.
1063  */
1064 static void
1065 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1066 {
1067         struct bfa_ioc *ioc = iocpf->ioc;
1068
1069         switch (event) {
1070         case IOCPF_E_SEMLOCKED:
1071                 iocpf->retry_count = 0;
1072                 bfa_ioc_sync_ack(ioc);
1073                 bfa_ioc_notify_fail(ioc);
1074                 if (!iocpf->auto_recover) {
1075                         bfa_ioc_sync_leave(ioc);
1076                         bfa_nw_ioc_hw_sem_release(ioc);
1077                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1078                 } else {
1079                         if (bfa_ioc_sync_complete(ioc))
1080                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1081                         else {
1082                                 bfa_nw_ioc_hw_sem_release(ioc);
1083                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1084                         }
1085                 }
1086                 break;
1087
1088         case IOCPF_E_DISABLE:
1089                 bfa_ioc_hw_sem_get_cancel(ioc);
1090                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1091                 break;
1092
1093         case IOCPF_E_FAIL:
1094                 break;
1095
1096         default:
1097                 bfa_sm_fault(ioc, event);
1098         }
1099 }
1100
1101 static void
1102 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1103 {
1104 }
1105
1106 /**
1107  * @brief
1108  * IOC is in failed state.
1109  */
1110 static void
1111 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1112 {
1113         switch (event) {
1114         case IOCPF_E_DISABLE:
1115                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1116                 break;
1117
1118         default:
1119                 bfa_sm_fault(iocpf->ioc, event);
1120         }
1121 }
1122
1123 /**
1124  * BFA IOC private functions
1125  */
1126
1127 static void
1128 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1129 {
1130         struct list_head                        *qe;
1131         struct bfa_ioc_hbfail_notify *notify;
1132
1133         ioc->cbfn->disable_cbfn(ioc->bfa);
1134
1135         /**
1136          * Notify common modules registered for notification.
1137          */
1138         list_for_each(qe, &ioc->hb_notify_q) {
1139                 notify = (struct bfa_ioc_hbfail_notify *) qe;
1140                 notify->cbfn(notify->cbarg);
1141         }
1142 }
1143
1144 bool
1145 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1146 {
1147         u32 r32;
1148         int cnt = 0;
1149 #define BFA_SEM_SPINCNT 3000
1150
1151         r32 = readl(sem_reg);
1152
1153         while (r32 && (cnt < BFA_SEM_SPINCNT)) {
1154                 cnt++;
1155                 udelay(2);
1156                 r32 = readl(sem_reg);
1157         }
1158
1159         if (r32 == 0)
1160                 return true;
1161
1162         BUG_ON(!(cnt < BFA_SEM_SPINCNT));
1163         return false;
1164 }
1165
1166 void
1167 bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1168 {
1169         writel(1, sem_reg);
1170 }
1171
1172 static void
1173 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1174 {
1175         u32     r32;
1176
1177         /**
1178          * First read to the semaphore register will return 0, subsequent reads
1179          * will return 1. Semaphore is released by writing 1 to the register
1180          */
1181         r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1182         if (r32 == 0) {
1183                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1184                 return;
1185         }
1186
1187         mod_timer(&ioc->sem_timer, jiffies +
1188                 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1189 }
1190
1191 void
1192 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
1193 {
1194         writel(1, ioc->ioc_regs.ioc_sem_reg);
1195 }
1196
1197 static void
1198 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1199 {
1200         del_timer(&ioc->sem_timer);
1201 }
1202
1203 /**
1204  * @brief
1205  * Initialize LPU local memory (aka secondary memory / SRAM)
1206  */
1207 static void
1208 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1209 {
1210         u32     pss_ctl;
1211         int             i;
1212 #define PSS_LMEM_INIT_TIME  10000
1213
1214         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1215         pss_ctl &= ~__PSS_LMEM_RESET;
1216         pss_ctl |= __PSS_LMEM_INIT_EN;
1217
1218         /*
1219          * i2c workaround 12.5khz clock
1220          */
1221         pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1222         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1223
1224         /**
1225          * wait for memory initialization to be complete
1226          */
1227         i = 0;
1228         do {
1229                 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1230                 i++;
1231         } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1232
1233         /**
1234          * If memory initialization is not successful, IOC timeout will catch
1235          * such failures.
1236          */
1237         BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1238
1239         pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1240         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1241 }
1242
1243 static void
1244 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1245 {
1246         u32     pss_ctl;
1247
1248         /**
1249          * Take processor out of reset.
1250          */
1251         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1252         pss_ctl &= ~__PSS_LPU0_RESET;
1253
1254         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1255 }
1256
1257 static void
1258 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1259 {
1260         u32     pss_ctl;
1261
1262         /**
1263          * Put processors in reset.
1264          */
1265         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1266         pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1267
1268         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1269 }
1270
1271 /**
1272  * Get driver and firmware versions.
1273  */
1274 void
1275 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1276 {
1277         u32     pgnum, pgoff;
1278         u32     loff = 0;
1279         int             i;
1280         u32     *fwsig = (u32 *) fwhdr;
1281
1282         pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1283         pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1284         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1285
1286         for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1287              i++) {
1288                 fwsig[i] =
1289                         swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1290                 loff += sizeof(u32);
1291         }
1292 }
1293
1294 /**
1295  * Returns TRUE if same.
1296  */
1297 bool
1298 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1299 {
1300         struct bfi_ioc_image_hdr *drv_fwhdr;
1301         int i;
1302
1303         drv_fwhdr = (struct bfi_ioc_image_hdr *)
1304                 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1305
1306         for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1307                 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
1308                         return false;
1309         }
1310
1311         return true;
1312 }
1313
1314 /**
1315  * Return true if current running version is valid. Firmware signature and
1316  * execution context (driver/bios) must match.
1317  */
1318 static bool
1319 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1320 {
1321         struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
1322
1323         bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1324         drv_fwhdr = (struct bfi_ioc_image_hdr *)
1325                 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1326
1327         if (fwhdr.signature != drv_fwhdr->signature)
1328                 return false;
1329
1330         if (swab32(fwhdr.param) != boot_env)
1331                 return false;
1332
1333         return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1334 }
1335
1336 /**
1337  * Conditionally flush any pending message from firmware at start.
1338  */
1339 static void
1340 bfa_ioc_msgflush(struct bfa_ioc *ioc)
1341 {
1342         u32     r32;
1343
1344         r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1345         if (r32)
1346                 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1347 }
1348
1349 /**
1350  * @img ioc_init_logic.jpg
1351  */
1352 static void
1353 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1354 {
1355         enum bfi_ioc_state ioc_fwstate;
1356         bool fwvalid;
1357         u32 boot_env;
1358
1359         ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1360
1361         boot_env = BFI_BOOT_LOADER_OS;
1362
1363         if (force)
1364                 ioc_fwstate = BFI_IOC_UNINIT;
1365
1366         /**
1367          * check if firmware is valid
1368          */
1369         fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1370                 false : bfa_ioc_fwver_valid(ioc, boot_env);
1371
1372         if (!fwvalid) {
1373                 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
1374                 return;
1375         }
1376
1377         /**
1378          * If hardware initialization is in progress (initialized by other IOC),
1379          * just wait for an initialization completion interrupt.
1380          */
1381         if (ioc_fwstate == BFI_IOC_INITING) {
1382                 ioc->cbfn->reset_cbfn(ioc->bfa);
1383                 return;
1384         }
1385
1386         /**
1387          * If IOC function is disabled and firmware version is same,
1388          * just re-enable IOC.
1389          */
1390         if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1391                 /**
1392                  * When using MSI-X any pending firmware ready event should
1393                  * be flushed. Otherwise MSI-X interrupts are not delivered.
1394                  */
1395                 bfa_ioc_msgflush(ioc);
1396                 ioc->cbfn->reset_cbfn(ioc->bfa);
1397                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1398                 return;
1399         }
1400
1401         /**
1402          * Initialize the h/w for any other states.
1403          */
1404         bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
1405 }
1406
1407 void
1408 bfa_nw_ioc_timeout(void *ioc_arg)
1409 {
1410         struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1411
1412         bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1413 }
1414
1415 static void
1416 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1417 {
1418         u32 *msgp = (u32 *) ioc_msg;
1419         u32 i;
1420
1421         BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1422
1423         /*
1424          * first write msg to mailbox registers
1425          */
1426         for (i = 0; i < len / sizeof(u32); i++)
1427                 writel(cpu_to_le32(msgp[i]),
1428                               ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1429
1430         for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1431                 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1432
1433         /*
1434          * write 1 to mailbox CMD to trigger LPU event
1435          */
1436         writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1437         (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1438 }
1439
1440 static void
1441 bfa_ioc_send_enable(struct bfa_ioc *ioc)
1442 {
1443         struct bfi_ioc_ctrl_req enable_req;
1444         struct timeval tv;
1445
1446         bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1447                     bfa_ioc_portid(ioc));
1448         enable_req.ioc_class = ioc->ioc_mc;
1449         do_gettimeofday(&tv);
1450         enable_req.tv_sec = ntohl(tv.tv_sec);
1451         bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1452 }
1453
1454 static void
1455 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1456 {
1457         struct bfi_ioc_ctrl_req disable_req;
1458
1459         bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1460                     bfa_ioc_portid(ioc));
1461         bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1462 }
1463
1464 static void
1465 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1466 {
1467         struct bfi_ioc_getattr_req attr_req;
1468
1469         bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1470                     bfa_ioc_portid(ioc));
1471         bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1472         bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1473 }
1474
1475 void
1476 bfa_nw_ioc_hb_check(void *cbarg)
1477 {
1478         struct bfa_ioc *ioc = cbarg;
1479         u32     hb_count;
1480
1481         hb_count = readl(ioc->ioc_regs.heartbeat);
1482         if (ioc->hb_count == hb_count) {
1483                 bfa_ioc_recover(ioc);
1484                 return;
1485         } else {
1486                 ioc->hb_count = hb_count;
1487         }
1488
1489         bfa_ioc_mbox_poll(ioc);
1490         mod_timer(&ioc->hb_timer, jiffies +
1491                 msecs_to_jiffies(BFA_IOC_HB_TOV));
1492 }
1493
1494 static void
1495 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1496 {
1497         ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1498         mod_timer(&ioc->hb_timer, jiffies +
1499                 msecs_to_jiffies(BFA_IOC_HB_TOV));
1500 }
1501
1502 static void
1503 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1504 {
1505         del_timer(&ioc->hb_timer);
1506 }
1507
1508 /**
1509  * @brief
1510  *      Initiate a full firmware download.
1511  */
1512 static void
1513 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1514                     u32 boot_env)
1515 {
1516         u32 *fwimg;
1517         u32 pgnum, pgoff;
1518         u32 loff = 0;
1519         u32 chunkno = 0;
1520         u32 i;
1521
1522         /**
1523          * Initialize LMEM first before code download
1524          */
1525         bfa_ioc_lmem_init(ioc);
1526
1527         fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1528
1529         pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1530         pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1531
1532         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1533
1534         for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1535                 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1536                         chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1537                         fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1538                                         BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1539                 }
1540
1541                 /**
1542                  * write smem
1543                  */
1544                 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1545                               ((ioc->ioc_regs.smem_page_start) + (loff)));
1546
1547                 loff += sizeof(u32);
1548
1549                 /**
1550                  * handle page offset wrap around
1551                  */
1552                 loff = PSS_SMEM_PGOFF(loff);
1553                 if (loff == 0) {
1554                         pgnum++;
1555                         writel(pgnum,
1556                                       ioc->ioc_regs.host_page_num_fn);
1557                 }
1558         }
1559
1560         writel(bfa_ioc_smem_pgnum(ioc, 0),
1561                       ioc->ioc_regs.host_page_num_fn);
1562
1563         /*
1564          * Set boot type and boot param at the end.
1565         */
1566         writel(boot_type, ((ioc->ioc_regs.smem_page_start)
1567                         + (BFI_BOOT_TYPE_OFF)));
1568         writel(boot_env, ((ioc->ioc_regs.smem_page_start)
1569                         + (BFI_BOOT_LOADER_OFF)));
1570 }
1571
1572 static void
1573 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1574 {
1575         bfa_ioc_hwinit(ioc, force);
1576 }
1577
1578 /**
1579  * @brief
1580  * Update BFA configuration from firmware configuration.
1581  */
1582 static void
1583 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1584 {
1585         struct bfi_ioc_attr *attr = ioc->attr;
1586
1587         attr->adapter_prop  = ntohl(attr->adapter_prop);
1588         attr->card_type     = ntohl(attr->card_type);
1589         attr->maxfrsize     = ntohs(attr->maxfrsize);
1590
1591         bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1592 }
1593
1594 /**
1595  * Attach time initialization of mbox logic.
1596  */
1597 static void
1598 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1599 {
1600         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1601         int     mc;
1602
1603         INIT_LIST_HEAD(&mod->cmd_q);
1604         for (mc = 0; mc < BFI_MC_MAX; mc++) {
1605                 mod->mbhdlr[mc].cbfn = NULL;
1606                 mod->mbhdlr[mc].cbarg = ioc->bfa;
1607         }
1608 }
1609
1610 /**
1611  * Mbox poll timer -- restarts any pending mailbox requests.
1612  */
1613 static void
1614 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1615 {
1616         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1617         struct bfa_mbox_cmd *cmd;
1618         u32                     stat;
1619
1620         /**
1621          * If no command pending, do nothing
1622          */
1623         if (list_empty(&mod->cmd_q))
1624                 return;
1625
1626         /**
1627          * If previous command is not yet fetched by firmware, do nothing
1628          */
1629         stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1630         if (stat)
1631                 return;
1632
1633         /**
1634          * Enqueue command to firmware.
1635          */
1636         bfa_q_deq(&mod->cmd_q, &cmd);
1637         bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1638 }
1639
1640 /**
1641  * Cleanup any pending requests.
1642  */
1643 static void
1644 bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
1645 {
1646         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1647         struct bfa_mbox_cmd *cmd;
1648
1649         while (!list_empty(&mod->cmd_q))
1650                 bfa_q_deq(&mod->cmd_q, &cmd);
1651 }
1652
1653 static void
1654 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1655 {
1656         struct list_head                *qe;
1657         struct bfa_ioc_hbfail_notify    *notify;
1658
1659         /**
1660          * Notify driver and common modules registered for notification.
1661          */
1662         ioc->cbfn->hbfail_cbfn(ioc->bfa);
1663         list_for_each(qe, &ioc->hb_notify_q) {
1664                 notify = (struct bfa_ioc_hbfail_notify *) qe;
1665                 notify->cbfn(notify->cbarg);
1666         }
1667 }
1668
1669 static void
1670 bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1671 {
1672         bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1673 }
1674
1675 static void
1676 bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1677 {
1678         bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1679 }
1680
1681 static void
1682 bfa_ioc_pf_initfailed(struct bfa_ioc *ioc)
1683 {
1684         bfa_fsm_send_event(ioc, IOC_E_INITFAILED);
1685 }
1686
1687 static void
1688 bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1689 {
1690         bfa_fsm_send_event(ioc, IOC_E_PFAILED);
1691 }
1692
1693 static void
1694 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1695 {
1696         /**
1697          * Provide enable completion callback and AEN notification.
1698          */
1699         ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1700 }
1701
1702 /**
1703  * IOC public
1704  */
1705 static enum bfa_status
1706 bfa_ioc_pll_init(struct bfa_ioc *ioc)
1707 {
1708         /*
1709          *  Hold semaphore so that nobody can access the chip during init.
1710          */
1711         bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1712
1713         bfa_ioc_pll_init_asic(ioc);
1714
1715         ioc->pllinit = true;
1716         /*
1717          *  release semaphore.
1718          */
1719         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1720
1721         return BFA_STATUS_OK;
1722 }
1723
1724 /**
1725  * Interface used by diag module to do firmware boot with memory test
1726  * as the entry vector.
1727  */
1728 static void
1729 bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
1730 {
1731         void __iomem *rb;
1732
1733         bfa_ioc_stats(ioc, ioc_boots);
1734
1735         if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1736                 return;
1737
1738         /**
1739          * Initialize IOC state of all functions on a chip reset.
1740          */
1741         rb = ioc->pcidev.pci_bar_kva;
1742         if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
1743                 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1744                 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1745         } else {
1746                 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1747                 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1748         }
1749
1750         bfa_ioc_msgflush(ioc);
1751         bfa_ioc_download_fw(ioc, boot_type, boot_env);
1752
1753         /**
1754          * Enable interrupts just before starting LPU
1755          */
1756         ioc->cbfn->reset_cbfn(ioc->bfa);
1757         bfa_ioc_lpu_start(ioc);
1758 }
1759
1760 /**
1761  * Enable/disable IOC failure auto recovery.
1762  */
1763 void
1764 bfa_nw_ioc_auto_recover(bool auto_recover)
1765 {
1766         bfa_nw_auto_recover = auto_recover;
1767 }
1768
1769 static void
1770 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1771 {
1772         u32     *msgp = mbmsg;
1773         u32     r32;
1774         int             i;
1775
1776         /**
1777          * read the MBOX msg
1778          */
1779         for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1780              i++) {
1781                 r32 = readl(ioc->ioc_regs.lpu_mbox +
1782                                    i * sizeof(u32));
1783                 msgp[i] = htonl(r32);
1784         }
1785
1786         /**
1787          * turn off mailbox interrupt by clearing mailbox status
1788          */
1789         writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1790         readl(ioc->ioc_regs.lpu_mbox_cmd);
1791 }
1792
1793 static void
1794 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1795 {
1796         union bfi_ioc_i2h_msg_u *msg;
1797         struct bfa_iocpf *iocpf = &ioc->iocpf;
1798
1799         msg = (union bfi_ioc_i2h_msg_u *) m;
1800
1801         bfa_ioc_stats(ioc, ioc_isrs);
1802
1803         switch (msg->mh.msg_id) {
1804         case BFI_IOC_I2H_HBEAT:
1805                 break;
1806
1807         case BFI_IOC_I2H_READY_EVENT:
1808                 bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1809                 break;
1810
1811         case BFI_IOC_I2H_ENABLE_REPLY:
1812                 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1813                 break;
1814
1815         case BFI_IOC_I2H_DISABLE_REPLY:
1816                 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1817                 break;
1818
1819         case BFI_IOC_I2H_GETATTR_REPLY:
1820                 bfa_ioc_getattr_reply(ioc);
1821                 break;
1822
1823         default:
1824                 BUG_ON(1);
1825         }
1826 }
1827
1828 /**
1829  * IOC attach time initialization and setup.
1830  *
1831  * @param[in]   ioc     memory for IOC
1832  * @param[in]   bfa     driver instance structure
1833  */
1834 void
1835 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1836 {
1837         ioc->bfa        = bfa;
1838         ioc->cbfn       = cbfn;
1839         ioc->fcmode     = false;
1840         ioc->pllinit    = false;
1841         ioc->dbg_fwsave_once = true;
1842         ioc->iocpf.ioc  = ioc;
1843
1844         bfa_ioc_mbox_attach(ioc);
1845         INIT_LIST_HEAD(&ioc->hb_notify_q);
1846
1847         bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1848         bfa_fsm_send_event(ioc, IOC_E_RESET);
1849 }
1850
1851 /**
1852  * Driver detach time IOC cleanup.
1853  */
1854 void
1855 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
1856 {
1857         bfa_fsm_send_event(ioc, IOC_E_DETACH);
1858 }
1859
1860 /**
1861  * Setup IOC PCI properties.
1862  *
1863  * @param[in]   pcidev  PCI device information for this IOC
1864  */
1865 void
1866 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1867                  enum bfi_mclass mc)
1868 {
1869         ioc->ioc_mc     = mc;
1870         ioc->pcidev     = *pcidev;
1871         ioc->ctdev      = bfa_asic_id_ct(ioc->pcidev.device_id);
1872         ioc->cna        = ioc->ctdev && !ioc->fcmode;
1873
1874         bfa_nw_ioc_set_ct_hwif(ioc);
1875
1876         bfa_ioc_map_port(ioc);
1877         bfa_ioc_reg_init(ioc);
1878 }
1879
1880 /**
1881  * Initialize IOC dma memory
1882  *
1883  * @param[in]   dm_kva  kernel virtual address of IOC dma memory
1884  * @param[in]   dm_pa   physical address of IOC dma memory
1885  */
1886 void
1887 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa)
1888 {
1889         /**
1890          * dma memory for firmware attribute
1891          */
1892         ioc->attr_dma.kva = dm_kva;
1893         ioc->attr_dma.pa = dm_pa;
1894         ioc->attr = (struct bfi_ioc_attr *) dm_kva;
1895 }
1896
1897 /**
1898  * Return size of dma memory required.
1899  */
1900 u32
1901 bfa_nw_ioc_meminfo(void)
1902 {
1903         return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
1904 }
1905
1906 void
1907 bfa_nw_ioc_enable(struct bfa_ioc *ioc)
1908 {
1909         bfa_ioc_stats(ioc, ioc_enables);
1910         ioc->dbg_fwsave_once = true;
1911
1912         bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1913 }
1914
1915 void
1916 bfa_nw_ioc_disable(struct bfa_ioc *ioc)
1917 {
1918         bfa_ioc_stats(ioc, ioc_disables);
1919         bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1920 }
1921
1922 static u32
1923 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
1924 {
1925         return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1926 }
1927
1928 static u32
1929 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
1930 {
1931         return PSS_SMEM_PGOFF(fmaddr);
1932 }
1933
1934 /**
1935  * Register mailbox message handler function, to be called by common modules
1936  */
1937 void
1938 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
1939                     bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1940 {
1941         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1942
1943         mod->mbhdlr[mc].cbfn    = cbfn;
1944         mod->mbhdlr[mc].cbarg = cbarg;
1945 }
1946
1947 /**
1948  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1949  * Responsibility of caller to serialize
1950  *
1951  * @param[in]   ioc     IOC instance
1952  * @param[i]    cmd     Mailbox command
1953  */
1954 void
1955 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
1956 {
1957         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1958         u32                     stat;
1959
1960         /**
1961          * If a previous command is pending, queue new command
1962          */
1963         if (!list_empty(&mod->cmd_q)) {
1964                 list_add_tail(&cmd->qe, &mod->cmd_q);
1965                 return;
1966         }
1967
1968         /**
1969          * If mailbox is busy, queue command for poll timer
1970          */
1971         stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1972         if (stat) {
1973                 list_add_tail(&cmd->qe, &mod->cmd_q);
1974                 return;
1975         }
1976
1977         /**
1978          * mailbox is free -- queue command to firmware
1979          */
1980         bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1981 }
1982
1983 /**
1984  * Handle mailbox interrupts
1985  */
1986 void
1987 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
1988 {
1989         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1990         struct bfi_mbmsg m;
1991         int                             mc;
1992
1993         bfa_ioc_msgget(ioc, &m);
1994
1995         /**
1996          * Treat IOC message class as special.
1997          */
1998         mc = m.mh.msg_class;
1999         if (mc == BFI_MC_IOC) {
2000                 bfa_ioc_isr(ioc, &m);
2001                 return;
2002         }
2003
2004         if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2005                 return;
2006
2007         mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2008 }
2009
2010 void
2011 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2012 {
2013         bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2014 }
2015
2016 /**
2017  * Add to IOC heartbeat failure notification queue. To be used by common
2018  * modules such as cee, port, diag.
2019  */
2020 void
2021 bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
2022                         struct bfa_ioc_hbfail_notify *notify)
2023 {
2024         list_add_tail(&notify->qe, &ioc->hb_notify_q);
2025 }
2026
2027 #define BFA_MFG_NAME "Brocade"
2028 static void
2029 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2030                          struct bfa_adapter_attr *ad_attr)
2031 {
2032         struct bfi_ioc_attr *ioc_attr;
2033
2034         ioc_attr = ioc->attr;
2035
2036         bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2037         bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2038         bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2039         bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2040         memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2041                       sizeof(struct bfa_mfg_vpd));
2042
2043         ad_attr->nports = bfa_ioc_get_nports(ioc);
2044         ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2045
2046         bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2047         /* For now, model descr uses same model string */
2048         bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2049
2050         ad_attr->card_type = ioc_attr->card_type;
2051         ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2052
2053         if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2054                 ad_attr->prototype = 1;
2055         else
2056                 ad_attr->prototype = 0;
2057
2058         ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2059         ad_attr->mac  = bfa_nw_ioc_get_mac(ioc);
2060
2061         ad_attr->pcie_gen = ioc_attr->pcie_gen;
2062         ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2063         ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2064         ad_attr->asic_rev = ioc_attr->asic_rev;
2065
2066         bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2067
2068         ad_attr->cna_capable = ioc->cna;
2069         ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
2070 }
2071
2072 static enum bfa_ioc_type
2073 bfa_ioc_get_type(struct bfa_ioc *ioc)
2074 {
2075         if (!ioc->ctdev || ioc->fcmode)
2076                 return BFA_IOC_TYPE_FC;
2077         else if (ioc->ioc_mc == BFI_MC_IOCFC)
2078                 return BFA_IOC_TYPE_FCoE;
2079         else if (ioc->ioc_mc == BFI_MC_LL)
2080                 return BFA_IOC_TYPE_LL;
2081         else {
2082                 BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
2083                 return BFA_IOC_TYPE_LL;
2084         }
2085 }
2086
2087 static void
2088 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2089 {
2090         memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2091         memcpy(serial_num,
2092                         (void *)ioc->attr->brcd_serialnum,
2093                         BFA_ADAPTER_SERIAL_NUM_LEN);
2094 }
2095
2096 static void
2097 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2098 {
2099         memset(fw_ver, 0, BFA_VERSION_LEN);
2100         memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2101 }
2102
2103 static void
2104 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2105 {
2106         BUG_ON(!(chip_rev));
2107
2108         memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2109
2110         chip_rev[0] = 'R';
2111         chip_rev[1] = 'e';
2112         chip_rev[2] = 'v';
2113         chip_rev[3] = '-';
2114         chip_rev[4] = ioc->attr->asic_rev;
2115         chip_rev[5] = '\0';
2116 }
2117
2118 static void
2119 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2120 {
2121         memset(optrom_ver, 0, BFA_VERSION_LEN);
2122         memcpy(optrom_ver, ioc->attr->optrom_version,
2123                       BFA_VERSION_LEN);
2124 }
2125
2126 static void
2127 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2128 {
2129         memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2130         memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2131 }
2132
2133 static void
2134 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2135 {
2136         struct bfi_ioc_attr *ioc_attr;
2137
2138         BUG_ON(!(model));
2139         memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2140
2141         ioc_attr = ioc->attr;
2142
2143         /**
2144          * model name
2145          */
2146         snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2147                 BFA_MFG_NAME, ioc_attr->card_type);
2148 }
2149
2150 static enum bfa_ioc_state
2151 bfa_ioc_get_state(struct bfa_ioc *ioc)
2152 {
2153         enum bfa_iocpf_state iocpf_st;
2154         enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2155
2156         if (ioc_st == BFA_IOC_ENABLING ||
2157                 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2158
2159                 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2160
2161                 switch (iocpf_st) {
2162                 case BFA_IOCPF_SEMWAIT:
2163                         ioc_st = BFA_IOC_SEMWAIT;
2164                         break;
2165
2166                 case BFA_IOCPF_HWINIT:
2167                         ioc_st = BFA_IOC_HWINIT;
2168                         break;
2169
2170                 case BFA_IOCPF_FWMISMATCH:
2171                         ioc_st = BFA_IOC_FWMISMATCH;
2172                         break;
2173
2174                 case BFA_IOCPF_FAIL:
2175                         ioc_st = BFA_IOC_FAIL;
2176                         break;
2177
2178                 case BFA_IOCPF_INITFAIL:
2179                         ioc_st = BFA_IOC_INITFAIL;
2180                         break;
2181
2182                 default:
2183                         break;
2184                 }
2185         }
2186         return ioc_st;
2187 }
2188
2189 void
2190 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2191 {
2192         memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2193
2194         ioc_attr->state = bfa_ioc_get_state(ioc);
2195         ioc_attr->port_id = ioc->port_id;
2196
2197         ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2198
2199         bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2200
2201         ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2202         ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2203         bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2204 }
2205
2206 /**
2207  * WWN public
2208  */
2209 static u64
2210 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2211 {
2212         return ioc->attr->pwwn;
2213 }
2214
2215 mac_t
2216 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
2217 {
2218         return ioc->attr->mac;
2219 }
2220
2221 /**
2222  * Firmware failure detected. Start recovery actions.
2223  */
2224 static void
2225 bfa_ioc_recover(struct bfa_ioc *ioc)
2226 {
2227         pr_crit("Heart Beat of IOC has failed\n");
2228         bfa_ioc_stats(ioc, ioc_hbfails);
2229         bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2230 }
2231
2232 static void
2233 bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
2234 {
2235         if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2236                 return;
2237 }
2238
2239 /**
2240  * @dg hal_iocpf_pvt BFA IOC PF private functions
2241  * @{
2242  */
2243
2244 static void
2245 bfa_iocpf_enable(struct bfa_ioc *ioc)
2246 {
2247         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2248 }
2249
2250 static void
2251 bfa_iocpf_disable(struct bfa_ioc *ioc)
2252 {
2253         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2254 }
2255
2256 static void
2257 bfa_iocpf_fail(struct bfa_ioc *ioc)
2258 {
2259         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2260 }
2261
2262 static void
2263 bfa_iocpf_initfail(struct bfa_ioc *ioc)
2264 {
2265         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2266 }
2267
2268 static void
2269 bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2270 {
2271         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2272 }
2273
2274 static void
2275 bfa_iocpf_stop(struct bfa_ioc *ioc)
2276 {
2277         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2278 }
2279
2280 void
2281 bfa_nw_iocpf_timeout(void *ioc_arg)
2282 {
2283         struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
2284
2285         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2286 }
2287
2288 void
2289 bfa_nw_iocpf_sem_timeout(void *ioc_arg)
2290 {
2291         struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
2292
2293         bfa_ioc_hw_sem_get(ioc);
2294 }