Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/horms/ipvs-2.6
[pandora-kernel.git] / drivers / scsi / bfa / bfa_fcpim.c
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17
18 #include "bfad_drv.h"
19 #include "bfa_modules.h"
20
21 BFA_TRC_FILE(HAL, FCPIM);
22
23 /*
24  *  BFA ITNIM Related definitions
25  */
26 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
27
28 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag)                                \
29         (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
30
31 #define bfa_fcpim_additn(__itnim)                                       \
32         list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
33 #define bfa_fcpim_delitn(__itnim)       do {                            \
34         WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim));   \
35         bfa_itnim_update_del_itn_stats(__itnim);      \
36         list_del(&(__itnim)->qe);      \
37         WARN_ON(!list_empty(&(__itnim)->io_q));                         \
38         WARN_ON(!list_empty(&(__itnim)->io_cleanup_q));                 \
39         WARN_ON(!list_empty(&(__itnim)->pending_q));                    \
40 } while (0)
41
42 #define bfa_itnim_online_cb(__itnim) do {                               \
43         if ((__itnim)->bfa->fcs)                                        \
44                 bfa_cb_itnim_online((__itnim)->ditn);      \
45         else {                                                          \
46                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
47                 __bfa_cb_itnim_online, (__itnim));      \
48         }                                                               \
49 } while (0)
50
51 #define bfa_itnim_offline_cb(__itnim) do {                              \
52         if ((__itnim)->bfa->fcs)                                        \
53                 bfa_cb_itnim_offline((__itnim)->ditn);      \
54         else {                                                          \
55                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
56                 __bfa_cb_itnim_offline, (__itnim));      \
57         }                                                               \
58 } while (0)
59
60 #define bfa_itnim_sler_cb(__itnim) do {                                 \
61         if ((__itnim)->bfa->fcs)                                        \
62                 bfa_cb_itnim_sler((__itnim)->ditn);      \
63         else {                                                          \
64                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
65                 __bfa_cb_itnim_sler, (__itnim));      \
66         }                                                               \
67 } while (0)
68
69 /*
70  *  itnim state machine event
71  */
72 enum bfa_itnim_event {
73         BFA_ITNIM_SM_CREATE = 1,        /*  itnim is created */
74         BFA_ITNIM_SM_ONLINE = 2,        /*  itnim is online */
75         BFA_ITNIM_SM_OFFLINE = 3,       /*  itnim is offline */
76         BFA_ITNIM_SM_FWRSP = 4,         /*  firmware response */
77         BFA_ITNIM_SM_DELETE = 5,        /*  deleting an existing itnim */
78         BFA_ITNIM_SM_CLEANUP = 6,       /*  IO cleanup completion */
79         BFA_ITNIM_SM_SLER = 7,          /*  second level error recovery */
80         BFA_ITNIM_SM_HWFAIL = 8,        /*  IOC h/w failure event */
81         BFA_ITNIM_SM_QRESUME = 9,       /*  queue space available */
82 };
83
84 /*
85  *  BFA IOIM related definitions
86  */
87 #define bfa_ioim_move_to_comp_q(__ioim) do {                            \
88         list_del(&(__ioim)->qe);                                        \
89         list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q);    \
90 } while (0)
91
92
93 #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do {                  \
94         if ((__fcpim)->profile_comp)                                    \
95                 (__fcpim)->profile_comp(__ioim);                        \
96 } while (0)
97
98 #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do {                 \
99         if ((__fcpim)->profile_start)                                   \
100                 (__fcpim)->profile_start(__ioim);                       \
101 } while (0)
102
103 /*
104  * IO state machine events
105  */
106 enum bfa_ioim_event {
107         BFA_IOIM_SM_START       = 1,    /*  io start request from host */
108         BFA_IOIM_SM_COMP_GOOD   = 2,    /*  io good comp, resource free */
109         BFA_IOIM_SM_COMP        = 3,    /*  io comp, resource is free */
110         BFA_IOIM_SM_COMP_UTAG   = 4,    /*  io comp, resource is free */
111         BFA_IOIM_SM_DONE        = 5,    /*  io comp, resource not free */
112         BFA_IOIM_SM_FREE        = 6,    /*  io resource is freed */
113         BFA_IOIM_SM_ABORT       = 7,    /*  abort request from scsi stack */
114         BFA_IOIM_SM_ABORT_COMP  = 8,    /*  abort from f/w */
115         BFA_IOIM_SM_ABORT_DONE  = 9,    /*  abort completion from f/w */
116         BFA_IOIM_SM_QRESUME     = 10,   /*  CQ space available to queue IO */
117         BFA_IOIM_SM_SGALLOCED   = 11,   /*  SG page allocation successful */
118         BFA_IOIM_SM_SQRETRY     = 12,   /*  sequence recovery retry */
119         BFA_IOIM_SM_HCB         = 13,   /*  bfa callback complete */
120         BFA_IOIM_SM_CLEANUP     = 14,   /*  IO cleanup from itnim */
121         BFA_IOIM_SM_TMSTART     = 15,   /*  IO cleanup from tskim */
122         BFA_IOIM_SM_TMDONE      = 16,   /*  IO cleanup from tskim */
123         BFA_IOIM_SM_HWFAIL      = 17,   /*  IOC h/w failure event */
124         BFA_IOIM_SM_IOTOV       = 18,   /*  ITN offline TOV */
125 };
126
127
128 /*
129  *  BFA TSKIM related definitions
130  */
131
132 /*
133  * task management completion handling
134  */
135 #define bfa_tskim_qcomp(__tskim, __cbfn) do {                           \
136         bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
137         bfa_tskim_notify_comp(__tskim);      \
138 } while (0)
139
140 #define bfa_tskim_notify_comp(__tskim) do {                             \
141         if ((__tskim)->notify)                                          \
142                 bfa_itnim_tskdone((__tskim)->itnim);      \
143 } while (0)
144
145
146 enum bfa_tskim_event {
147         BFA_TSKIM_SM_START      = 1,    /*  TM command start            */
148         BFA_TSKIM_SM_DONE       = 2,    /*  TM completion               */
149         BFA_TSKIM_SM_QRESUME    = 3,    /*  resume after qfull          */
150         BFA_TSKIM_SM_HWFAIL     = 5,    /*  IOC h/w failure event       */
151         BFA_TSKIM_SM_HCB        = 6,    /*  BFA callback completion     */
152         BFA_TSKIM_SM_IOS_DONE   = 7,    /*  IO and sub TM completions   */
153         BFA_TSKIM_SM_CLEANUP    = 8,    /*  TM cleanup on ITN offline   */
154         BFA_TSKIM_SM_CLEANUP_DONE = 9,  /*  TM abort completion */
155 };
156
157 /*
158  * forward declaration for BFA ITNIM functions
159  */
160 static void     bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
161 static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
162 static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
163 static void     bfa_itnim_cleanp_comp(void *itnim_cbarg);
164 static void     bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
165 static void     __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
166 static void     __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
167 static void     __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
168 static void     bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
169 static void     bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
170 static void     bfa_itnim_iotov(void *itnim_arg);
171 static void     bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
172 static void     bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
173 static void     bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
174
175 /*
176  * forward declaration of ITNIM state machine
177  */
178 static void     bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
179                                         enum bfa_itnim_event event);
180 static void     bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
181                                         enum bfa_itnim_event event);
182 static void     bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
183                                         enum bfa_itnim_event event);
184 static void     bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
185                                         enum bfa_itnim_event event);
186 static void     bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
187                                         enum bfa_itnim_event event);
188 static void     bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
189                                         enum bfa_itnim_event event);
190 static void     bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
191                                         enum bfa_itnim_event event);
192 static void     bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
193                                         enum bfa_itnim_event event);
194 static void     bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
195                                         enum bfa_itnim_event event);
196 static void     bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
197                                         enum bfa_itnim_event event);
198 static void     bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
199                                         enum bfa_itnim_event event);
200 static void     bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
201                                         enum bfa_itnim_event event);
202 static void     bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
203                                         enum bfa_itnim_event event);
204 static void     bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
205                                         enum bfa_itnim_event event);
206 static void     bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
207                                         enum bfa_itnim_event event);
208
209 /*
210  * forward declaration for BFA IOIM functions
211  */
212 static bfa_boolean_t    bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
213 static bfa_boolean_t    bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
214 static bfa_boolean_t    bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
215 static void             bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
216 static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
217 static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
218 static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
219 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
220 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
221 static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
222
223 /*
224  * forward declaration of BFA IO state machine
225  */
226 static void     bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
227                                         enum bfa_ioim_event event);
228 static void     bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
229                                         enum bfa_ioim_event event);
230 static void     bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
231                                         enum bfa_ioim_event event);
232 static void     bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
233                                         enum bfa_ioim_event event);
234 static void     bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
235                                         enum bfa_ioim_event event);
236 static void     bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
237                                         enum bfa_ioim_event event);
238 static void     bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
239                                         enum bfa_ioim_event event);
240 static void     bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
241                                         enum bfa_ioim_event event);
242 static void     bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
243                                         enum bfa_ioim_event event);
244 static void     bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
245                                         enum bfa_ioim_event event);
246 static void     bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
247                                         enum bfa_ioim_event event);
248 static void     bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
249                                         enum bfa_ioim_event event);
250 /*
251  * forward declaration for BFA TSKIM functions
252  */
253 static void     __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
254 static void     __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
255 static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
256                                         struct scsi_lun lun);
257 static void     bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
258 static void     bfa_tskim_cleanp_comp(void *tskim_cbarg);
259 static void     bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
260 static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
261 static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
262 static void     bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
263
264 /*
265  * forward declaration of BFA TSKIM state machine
266  */
267 static void     bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
268                                         enum bfa_tskim_event event);
269 static void     bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
270                                         enum bfa_tskim_event event);
271 static void     bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
272                                         enum bfa_tskim_event event);
273 static void     bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
274                                         enum bfa_tskim_event event);
275 static void     bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
276                                         enum bfa_tskim_event event);
277 static void     bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
278                                         enum bfa_tskim_event event);
279 static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
280                                         enum bfa_tskim_event event);
281 /*
282  *  BFA FCP Initiator Mode module
283  */
284
285 /*
286  * Compute and return memory needed by FCP(im) module.
287  */
288 static void
289 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
290 {
291         bfa_itnim_meminfo(cfg, km_len);
292
293         /*
294          * IO memory
295          */
296         *km_len += cfg->fwcfg.num_ioim_reqs *
297           (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
298
299         /*
300          * task management command memory
301          */
302         if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
303                 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
304         *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
305 }
306
307
308 static void
309 bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
310                 struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
311 {
312         struct bfa_fcpim_s *fcpim = &fcp->fcpim;
313         struct bfa_s *bfa = fcp->bfa;
314
315         bfa_trc(bfa, cfg->drvcfg.path_tov);
316         bfa_trc(bfa, cfg->fwcfg.num_rports);
317         bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
318         bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
319
320         fcpim->fcp              = fcp;
321         fcpim->bfa              = bfa;
322         fcpim->num_itnims       = cfg->fwcfg.num_rports;
323         fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
324         fcpim->path_tov         = cfg->drvcfg.path_tov;
325         fcpim->delay_comp       = cfg->drvcfg.delay_comp;
326         fcpim->profile_comp = NULL;
327         fcpim->profile_start = NULL;
328
329         bfa_itnim_attach(fcpim);
330         bfa_tskim_attach(fcpim);
331         bfa_ioim_attach(fcpim);
332 }
333
334 static void
335 bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
336 {
337         struct bfa_fcpim_s *fcpim = &fcp->fcpim;
338         struct bfa_itnim_s *itnim;
339         struct list_head *qe, *qen;
340
341         /* Enqueue unused ioim resources to free_q */
342         list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
343
344         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
345                 itnim = (struct bfa_itnim_s *) qe;
346                 bfa_itnim_iocdisable(itnim);
347         }
348 }
349
350 void
351 bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
352 {
353         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
354
355         fcpim->path_tov = path_tov * 1000;
356         if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
357                 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
358 }
359
360 u16
361 bfa_fcpim_path_tov_get(struct bfa_s *bfa)
362 {
363         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
364
365         return fcpim->path_tov / 1000;
366 }
367
368 #define bfa_fcpim_add_iostats(__l, __r, __stats)        \
369         (__l->__stats += __r->__stats)
370
371 void
372 bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
373                 struct bfa_itnim_iostats_s *rstats)
374 {
375         bfa_fcpim_add_iostats(lstats, rstats, total_ios);
376         bfa_fcpim_add_iostats(lstats, rstats, qresumes);
377         bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
378         bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
379         bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
380         bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
381         bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
382         bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
383         bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
384         bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
385         bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
386         bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
387         bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
388         bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
389         bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
390         bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
391         bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
392         bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
393         bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
394         bfa_fcpim_add_iostats(lstats, rstats, onlines);
395         bfa_fcpim_add_iostats(lstats, rstats, offlines);
396         bfa_fcpim_add_iostats(lstats, rstats, creates);
397         bfa_fcpim_add_iostats(lstats, rstats, deletes);
398         bfa_fcpim_add_iostats(lstats, rstats, create_comps);
399         bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
400         bfa_fcpim_add_iostats(lstats, rstats, sler_events);
401         bfa_fcpim_add_iostats(lstats, rstats, fw_create);
402         bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
403         bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
404         bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
405         bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
406         bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
407         bfa_fcpim_add_iostats(lstats, rstats, tm_success);
408         bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
409         bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
410         bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
411         bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
412         bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
413         bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
414         bfa_fcpim_add_iostats(lstats, rstats, io_comps);
415         bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
416         bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
417         bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
418         bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
419 }
420
421 bfa_status_t
422 bfa_fcpim_port_iostats(struct bfa_s *bfa,
423                 struct bfa_itnim_iostats_s *stats, u8 lp_tag)
424 {
425         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
426         struct list_head *qe, *qen;
427         struct bfa_itnim_s *itnim;
428
429         /* accumulate IO stats from itnim */
430         memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
431         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
432                 itnim = (struct bfa_itnim_s *) qe;
433                 if (itnim->rport->rport_info.lp_tag != lp_tag)
434                         continue;
435                 bfa_fcpim_add_stats(stats, &(itnim->stats));
436         }
437         return BFA_STATUS_OK;
438 }
439
440 u16
441 bfa_fcpim_qdepth_get(struct bfa_s *bfa)
442 {
443         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
444
445         return fcpim->q_depth;
446 }
447
448 /*
449  *  BFA ITNIM module state machine functions
450  */
451
452 /*
453  * Beginning/unallocated state - no events expected.
454  */
455 static void
456 bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
457 {
458         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
459         bfa_trc(itnim->bfa, event);
460
461         switch (event) {
462         case BFA_ITNIM_SM_CREATE:
463                 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
464                 itnim->is_online = BFA_FALSE;
465                 bfa_fcpim_additn(itnim);
466                 break;
467
468         default:
469                 bfa_sm_fault(itnim->bfa, event);
470         }
471 }
472
473 /*
474  * Beginning state, only online event expected.
475  */
476 static void
477 bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
478 {
479         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
480         bfa_trc(itnim->bfa, event);
481
482         switch (event) {
483         case BFA_ITNIM_SM_ONLINE:
484                 if (bfa_itnim_send_fwcreate(itnim))
485                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
486                 else
487                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
488                 break;
489
490         case BFA_ITNIM_SM_DELETE:
491                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
492                 bfa_fcpim_delitn(itnim);
493                 break;
494
495         case BFA_ITNIM_SM_HWFAIL:
496                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
497                 break;
498
499         default:
500                 bfa_sm_fault(itnim->bfa, event);
501         }
502 }
503
504 /*
505  *      Waiting for itnim create response from firmware.
506  */
507 static void
508 bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
509 {
510         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
511         bfa_trc(itnim->bfa, event);
512
513         switch (event) {
514         case BFA_ITNIM_SM_FWRSP:
515                 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
516                 itnim->is_online = BFA_TRUE;
517                 bfa_itnim_iotov_online(itnim);
518                 bfa_itnim_online_cb(itnim);
519                 break;
520
521         case BFA_ITNIM_SM_DELETE:
522                 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
523                 break;
524
525         case BFA_ITNIM_SM_OFFLINE:
526                 if (bfa_itnim_send_fwdelete(itnim))
527                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
528                 else
529                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
530                 break;
531
532         case BFA_ITNIM_SM_HWFAIL:
533                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
534                 break;
535
536         default:
537                 bfa_sm_fault(itnim->bfa, event);
538         }
539 }
540
541 static void
542 bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
543                         enum bfa_itnim_event event)
544 {
545         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
546         bfa_trc(itnim->bfa, event);
547
548         switch (event) {
549         case BFA_ITNIM_SM_QRESUME:
550                 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
551                 bfa_itnim_send_fwcreate(itnim);
552                 break;
553
554         case BFA_ITNIM_SM_DELETE:
555                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
556                 bfa_reqq_wcancel(&itnim->reqq_wait);
557                 bfa_fcpim_delitn(itnim);
558                 break;
559
560         case BFA_ITNIM_SM_OFFLINE:
561                 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
562                 bfa_reqq_wcancel(&itnim->reqq_wait);
563                 bfa_itnim_offline_cb(itnim);
564                 break;
565
566         case BFA_ITNIM_SM_HWFAIL:
567                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
568                 bfa_reqq_wcancel(&itnim->reqq_wait);
569                 break;
570
571         default:
572                 bfa_sm_fault(itnim->bfa, event);
573         }
574 }
575
576 /*
577  * Waiting for itnim create response from firmware, a delete is pending.
578  */
579 static void
580 bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
581                                 enum bfa_itnim_event event)
582 {
583         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
584         bfa_trc(itnim->bfa, event);
585
586         switch (event) {
587         case BFA_ITNIM_SM_FWRSP:
588                 if (bfa_itnim_send_fwdelete(itnim))
589                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
590                 else
591                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
592                 break;
593
594         case BFA_ITNIM_SM_HWFAIL:
595                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
596                 bfa_fcpim_delitn(itnim);
597                 break;
598
599         default:
600                 bfa_sm_fault(itnim->bfa, event);
601         }
602 }
603
604 /*
605  * Online state - normal parking state.
606  */
607 static void
608 bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
609 {
610         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
611         bfa_trc(itnim->bfa, event);
612
613         switch (event) {
614         case BFA_ITNIM_SM_OFFLINE:
615                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
616                 itnim->is_online = BFA_FALSE;
617                 bfa_itnim_iotov_start(itnim);
618                 bfa_itnim_cleanup(itnim);
619                 break;
620
621         case BFA_ITNIM_SM_DELETE:
622                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
623                 itnim->is_online = BFA_FALSE;
624                 bfa_itnim_cleanup(itnim);
625                 break;
626
627         case BFA_ITNIM_SM_SLER:
628                 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
629                 itnim->is_online = BFA_FALSE;
630                 bfa_itnim_iotov_start(itnim);
631                 bfa_itnim_sler_cb(itnim);
632                 break;
633
634         case BFA_ITNIM_SM_HWFAIL:
635                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
636                 itnim->is_online = BFA_FALSE;
637                 bfa_itnim_iotov_start(itnim);
638                 bfa_itnim_iocdisable_cleanup(itnim);
639                 break;
640
641         default:
642                 bfa_sm_fault(itnim->bfa, event);
643         }
644 }
645
646 /*
647  * Second level error recovery need.
648  */
649 static void
650 bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
651 {
652         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
653         bfa_trc(itnim->bfa, event);
654
655         switch (event) {
656         case BFA_ITNIM_SM_OFFLINE:
657                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
658                 bfa_itnim_cleanup(itnim);
659                 break;
660
661         case BFA_ITNIM_SM_DELETE:
662                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
663                 bfa_itnim_cleanup(itnim);
664                 bfa_itnim_iotov_delete(itnim);
665                 break;
666
667         case BFA_ITNIM_SM_HWFAIL:
668                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
669                 bfa_itnim_iocdisable_cleanup(itnim);
670                 break;
671
672         default:
673                 bfa_sm_fault(itnim->bfa, event);
674         }
675 }
676
677 /*
678  * Going offline. Waiting for active IO cleanup.
679  */
680 static void
681 bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
682                                  enum bfa_itnim_event event)
683 {
684         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
685         bfa_trc(itnim->bfa, event);
686
687         switch (event) {
688         case BFA_ITNIM_SM_CLEANUP:
689                 if (bfa_itnim_send_fwdelete(itnim))
690                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
691                 else
692                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
693                 break;
694
695         case BFA_ITNIM_SM_DELETE:
696                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
697                 bfa_itnim_iotov_delete(itnim);
698                 break;
699
700         case BFA_ITNIM_SM_HWFAIL:
701                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
702                 bfa_itnim_iocdisable_cleanup(itnim);
703                 bfa_itnim_offline_cb(itnim);
704                 break;
705
706         case BFA_ITNIM_SM_SLER:
707                 break;
708
709         default:
710                 bfa_sm_fault(itnim->bfa, event);
711         }
712 }
713
714 /*
715  * Deleting itnim. Waiting for active IO cleanup.
716  */
717 static void
718 bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
719                                 enum bfa_itnim_event event)
720 {
721         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
722         bfa_trc(itnim->bfa, event);
723
724         switch (event) {
725         case BFA_ITNIM_SM_CLEANUP:
726                 if (bfa_itnim_send_fwdelete(itnim))
727                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
728                 else
729                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
730                 break;
731
732         case BFA_ITNIM_SM_HWFAIL:
733                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
734                 bfa_itnim_iocdisable_cleanup(itnim);
735                 break;
736
737         default:
738                 bfa_sm_fault(itnim->bfa, event);
739         }
740 }
741
742 /*
743  * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
744  */
745 static void
746 bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
747 {
748         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
749         bfa_trc(itnim->bfa, event);
750
751         switch (event) {
752         case BFA_ITNIM_SM_FWRSP:
753                 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
754                 bfa_itnim_offline_cb(itnim);
755                 break;
756
757         case BFA_ITNIM_SM_DELETE:
758                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
759                 break;
760
761         case BFA_ITNIM_SM_HWFAIL:
762                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
763                 bfa_itnim_offline_cb(itnim);
764                 break;
765
766         default:
767                 bfa_sm_fault(itnim->bfa, event);
768         }
769 }
770
771 static void
772 bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
773                         enum bfa_itnim_event event)
774 {
775         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
776         bfa_trc(itnim->bfa, event);
777
778         switch (event) {
779         case BFA_ITNIM_SM_QRESUME:
780                 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
781                 bfa_itnim_send_fwdelete(itnim);
782                 break;
783
784         case BFA_ITNIM_SM_DELETE:
785                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
786                 break;
787
788         case BFA_ITNIM_SM_HWFAIL:
789                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
790                 bfa_reqq_wcancel(&itnim->reqq_wait);
791                 bfa_itnim_offline_cb(itnim);
792                 break;
793
794         default:
795                 bfa_sm_fault(itnim->bfa, event);
796         }
797 }
798
799 /*
800  * Offline state.
801  */
802 static void
803 bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
804 {
805         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
806         bfa_trc(itnim->bfa, event);
807
808         switch (event) {
809         case BFA_ITNIM_SM_DELETE:
810                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
811                 bfa_itnim_iotov_delete(itnim);
812                 bfa_fcpim_delitn(itnim);
813                 break;
814
815         case BFA_ITNIM_SM_ONLINE:
816                 if (bfa_itnim_send_fwcreate(itnim))
817                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
818                 else
819                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
820                 break;
821
822         case BFA_ITNIM_SM_HWFAIL:
823                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
824                 break;
825
826         default:
827                 bfa_sm_fault(itnim->bfa, event);
828         }
829 }
830
831 static void
832 bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
833                                 enum bfa_itnim_event event)
834 {
835         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
836         bfa_trc(itnim->bfa, event);
837
838         switch (event) {
839         case BFA_ITNIM_SM_DELETE:
840                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
841                 bfa_itnim_iotov_delete(itnim);
842                 bfa_fcpim_delitn(itnim);
843                 break;
844
845         case BFA_ITNIM_SM_OFFLINE:
846                 bfa_itnim_offline_cb(itnim);
847                 break;
848
849         case BFA_ITNIM_SM_ONLINE:
850                 if (bfa_itnim_send_fwcreate(itnim))
851                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
852                 else
853                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
854                 break;
855
856         case BFA_ITNIM_SM_HWFAIL:
857                 break;
858
859         default:
860                 bfa_sm_fault(itnim->bfa, event);
861         }
862 }
863
864 /*
865  * Itnim is deleted, waiting for firmware response to delete.
866  */
867 static void
868 bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
869 {
870         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
871         bfa_trc(itnim->bfa, event);
872
873         switch (event) {
874         case BFA_ITNIM_SM_FWRSP:
875         case BFA_ITNIM_SM_HWFAIL:
876                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
877                 bfa_fcpim_delitn(itnim);
878                 break;
879
880         default:
881                 bfa_sm_fault(itnim->bfa, event);
882         }
883 }
884
885 static void
886 bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
887                 enum bfa_itnim_event event)
888 {
889         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
890         bfa_trc(itnim->bfa, event);
891
892         switch (event) {
893         case BFA_ITNIM_SM_QRESUME:
894                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
895                 bfa_itnim_send_fwdelete(itnim);
896                 break;
897
898         case BFA_ITNIM_SM_HWFAIL:
899                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
900                 bfa_reqq_wcancel(&itnim->reqq_wait);
901                 bfa_fcpim_delitn(itnim);
902                 break;
903
904         default:
905                 bfa_sm_fault(itnim->bfa, event);
906         }
907 }
908
909 /*
910  * Initiate cleanup of all IOs on an IOC failure.
911  */
912 static void
913 bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
914 {
915         struct bfa_tskim_s *tskim;
916         struct bfa_ioim_s *ioim;
917         struct list_head        *qe, *qen;
918
919         list_for_each_safe(qe, qen, &itnim->tsk_q) {
920                 tskim = (struct bfa_tskim_s *) qe;
921                 bfa_tskim_iocdisable(tskim);
922         }
923
924         list_for_each_safe(qe, qen, &itnim->io_q) {
925                 ioim = (struct bfa_ioim_s *) qe;
926                 bfa_ioim_iocdisable(ioim);
927         }
928
929         /*
930          * For IO request in pending queue, we pretend an early timeout.
931          */
932         list_for_each_safe(qe, qen, &itnim->pending_q) {
933                 ioim = (struct bfa_ioim_s *) qe;
934                 bfa_ioim_tov(ioim);
935         }
936
937         list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
938                 ioim = (struct bfa_ioim_s *) qe;
939                 bfa_ioim_iocdisable(ioim);
940         }
941 }
942
943 /*
944  * IO cleanup completion
945  */
946 static void
947 bfa_itnim_cleanp_comp(void *itnim_cbarg)
948 {
949         struct bfa_itnim_s *itnim = itnim_cbarg;
950
951         bfa_stats(itnim, cleanup_comps);
952         bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
953 }
954
955 /*
956  * Initiate cleanup of all IOs.
957  */
958 static void
959 bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
960 {
961         struct bfa_ioim_s  *ioim;
962         struct bfa_tskim_s *tskim;
963         struct list_head        *qe, *qen;
964
965         bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
966
967         list_for_each_safe(qe, qen, &itnim->io_q) {
968                 ioim = (struct bfa_ioim_s *) qe;
969
970                 /*
971                  * Move IO to a cleanup queue from active queue so that a later
972                  * TM will not pickup this IO.
973                  */
974                 list_del(&ioim->qe);
975                 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
976
977                 bfa_wc_up(&itnim->wc);
978                 bfa_ioim_cleanup(ioim);
979         }
980
981         list_for_each_safe(qe, qen, &itnim->tsk_q) {
982                 tskim = (struct bfa_tskim_s *) qe;
983                 bfa_wc_up(&itnim->wc);
984                 bfa_tskim_cleanup(tskim);
985         }
986
987         bfa_wc_wait(&itnim->wc);
988 }
989
990 static void
991 __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
992 {
993         struct bfa_itnim_s *itnim = cbarg;
994
995         if (complete)
996                 bfa_cb_itnim_online(itnim->ditn);
997 }
998
999 static void
1000 __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1001 {
1002         struct bfa_itnim_s *itnim = cbarg;
1003
1004         if (complete)
1005                 bfa_cb_itnim_offline(itnim->ditn);
1006 }
1007
1008 static void
1009 __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1010 {
1011         struct bfa_itnim_s *itnim = cbarg;
1012
1013         if (complete)
1014                 bfa_cb_itnim_sler(itnim->ditn);
1015 }
1016
1017 /*
1018  * Call to resume any I/O requests waiting for room in request queue.
1019  */
1020 static void
1021 bfa_itnim_qresume(void *cbarg)
1022 {
1023         struct bfa_itnim_s *itnim = cbarg;
1024
1025         bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1026 }
1027
1028 /*
1029  *  bfa_itnim_public
1030  */
1031
1032 void
1033 bfa_itnim_iodone(struct bfa_itnim_s *itnim)
1034 {
1035         bfa_wc_down(&itnim->wc);
1036 }
1037
1038 void
1039 bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
1040 {
1041         bfa_wc_down(&itnim->wc);
1042 }
1043
1044 void
1045 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
1046 {
1047         /*
1048          * ITN memory
1049          */
1050         *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1051 }
1052
1053 void
1054 bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
1055 {
1056         struct bfa_s    *bfa = fcpim->bfa;
1057         struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
1058         struct bfa_itnim_s *itnim;
1059         int     i, j;
1060
1061         INIT_LIST_HEAD(&fcpim->itnim_q);
1062
1063         itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
1064         fcpim->itnim_arr = itnim;
1065
1066         for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
1067                 memset(itnim, 0, sizeof(struct bfa_itnim_s));
1068                 itnim->bfa = bfa;
1069                 itnim->fcpim = fcpim;
1070                 itnim->reqq = BFA_REQQ_QOS_LO;
1071                 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1072                 itnim->iotov_active = BFA_FALSE;
1073                 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1074
1075                 INIT_LIST_HEAD(&itnim->io_q);
1076                 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1077                 INIT_LIST_HEAD(&itnim->pending_q);
1078                 INIT_LIST_HEAD(&itnim->tsk_q);
1079                 INIT_LIST_HEAD(&itnim->delay_comp_q);
1080                 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1081                         itnim->ioprofile.io_latency.min[j] = ~0;
1082                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1083         }
1084
1085         bfa_mem_kva_curp(fcp) = (u8 *) itnim;
1086 }
1087
1088 void
1089 bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1090 {
1091         bfa_stats(itnim, ioc_disabled);
1092         bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1093 }
1094
1095 static bfa_boolean_t
1096 bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1097 {
1098         struct bfi_itn_create_req_s *m;
1099
1100         itnim->msg_no++;
1101
1102         /*
1103          * check for room in queue to send request now
1104          */
1105         m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1106         if (!m) {
1107                 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1108                 return BFA_FALSE;
1109         }
1110
1111         bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
1112                         bfa_fn_lpu(itnim->bfa));
1113         m->fw_handle = itnim->rport->fw_handle;
1114         m->class = FC_CLASS_3;
1115         m->seq_rec = itnim->seq_rec;
1116         m->msg_no = itnim->msg_no;
1117         bfa_stats(itnim, fw_create);
1118
1119         /*
1120          * queue I/O message to firmware
1121          */
1122         bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1123         return BFA_TRUE;
1124 }
1125
1126 static bfa_boolean_t
1127 bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1128 {
1129         struct bfi_itn_delete_req_s *m;
1130
1131         /*
1132          * check for room in queue to send request now
1133          */
1134         m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1135         if (!m) {
1136                 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1137                 return BFA_FALSE;
1138         }
1139
1140         bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
1141                         bfa_fn_lpu(itnim->bfa));
1142         m->fw_handle = itnim->rport->fw_handle;
1143         bfa_stats(itnim, fw_delete);
1144
1145         /*
1146          * queue I/O message to firmware
1147          */
1148         bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1149         return BFA_TRUE;
1150 }
1151
1152 /*
1153  * Cleanup all pending failed inflight requests.
1154  */
1155 static void
1156 bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1157 {
1158         struct bfa_ioim_s *ioim;
1159         struct list_head *qe, *qen;
1160
1161         list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1162                 ioim = (struct bfa_ioim_s *)qe;
1163                 bfa_ioim_delayed_comp(ioim, iotov);
1164         }
1165 }
1166
1167 /*
1168  * Start all pending IO requests.
1169  */
1170 static void
1171 bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1172 {
1173         struct bfa_ioim_s *ioim;
1174
1175         bfa_itnim_iotov_stop(itnim);
1176
1177         /*
1178          * Abort all inflight IO requests in the queue
1179          */
1180         bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1181
1182         /*
1183          * Start all pending IO requests.
1184          */
1185         while (!list_empty(&itnim->pending_q)) {
1186                 bfa_q_deq(&itnim->pending_q, &ioim);
1187                 list_add_tail(&ioim->qe, &itnim->io_q);
1188                 bfa_ioim_start(ioim);
1189         }
1190 }
1191
1192 /*
1193  * Fail all pending IO requests
1194  */
1195 static void
1196 bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1197 {
1198         struct bfa_ioim_s *ioim;
1199
1200         /*
1201          * Fail all inflight IO requests in the queue
1202          */
1203         bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1204
1205         /*
1206          * Fail any pending IO requests.
1207          */
1208         while (!list_empty(&itnim->pending_q)) {
1209                 bfa_q_deq(&itnim->pending_q, &ioim);
1210                 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1211                 bfa_ioim_tov(ioim);
1212         }
1213 }
1214
1215 /*
1216  * IO TOV timer callback. Fail any pending IO requests.
1217  */
1218 static void
1219 bfa_itnim_iotov(void *itnim_arg)
1220 {
1221         struct bfa_itnim_s *itnim = itnim_arg;
1222
1223         itnim->iotov_active = BFA_FALSE;
1224
1225         bfa_cb_itnim_tov_begin(itnim->ditn);
1226         bfa_itnim_iotov_cleanup(itnim);
1227         bfa_cb_itnim_tov(itnim->ditn);
1228 }
1229
1230 /*
1231  * Start IO TOV timer for failing back pending IO requests in offline state.
1232  */
1233 static void
1234 bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1235 {
1236         if (itnim->fcpim->path_tov > 0) {
1237
1238                 itnim->iotov_active = BFA_TRUE;
1239                 WARN_ON(!bfa_itnim_hold_io(itnim));
1240                 bfa_timer_start(itnim->bfa, &itnim->timer,
1241                         bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1242         }
1243 }
1244
1245 /*
1246  * Stop IO TOV timer.
1247  */
1248 static void
1249 bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1250 {
1251         if (itnim->iotov_active) {
1252                 itnim->iotov_active = BFA_FALSE;
1253                 bfa_timer_stop(&itnim->timer);
1254         }
1255 }
1256
1257 /*
1258  * Stop IO TOV timer.
1259  */
1260 static void
1261 bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1262 {
1263         bfa_boolean_t pathtov_active = BFA_FALSE;
1264
1265         if (itnim->iotov_active)
1266                 pathtov_active = BFA_TRUE;
1267
1268         bfa_itnim_iotov_stop(itnim);
1269         if (pathtov_active)
1270                 bfa_cb_itnim_tov_begin(itnim->ditn);
1271         bfa_itnim_iotov_cleanup(itnim);
1272         if (pathtov_active)
1273                 bfa_cb_itnim_tov(itnim->ditn);
1274 }
1275
1276 static void
1277 bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1278 {
1279         struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
1280         fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1281                 itnim->stats.iocomp_aborted;
1282         fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1283                 itnim->stats.iocomp_timedout;
1284         fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1285                 itnim->stats.iocom_sqer_needed;
1286         fcpim->del_itn_stats.del_itn_iocom_res_free +=
1287                 itnim->stats.iocom_res_free;
1288         fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1289                 itnim->stats.iocom_hostabrts;
1290         fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1291         fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1292         fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1293 }
1294
1295 /*
1296  * bfa_itnim_public
1297  */
1298
1299 /*
1300  * Itnim interrupt processing.
1301  */
1302 void
1303 bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1304 {
1305         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1306         union bfi_itn_i2h_msg_u msg;
1307         struct bfa_itnim_s *itnim;
1308
1309         bfa_trc(bfa, m->mhdr.msg_id);
1310
1311         msg.msg = m;
1312
1313         switch (m->mhdr.msg_id) {
1314         case BFI_ITN_I2H_CREATE_RSP:
1315                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1316                                                 msg.create_rsp->bfa_handle);
1317                 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
1318                 bfa_stats(itnim, create_comps);
1319                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1320                 break;
1321
1322         case BFI_ITN_I2H_DELETE_RSP:
1323                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1324                                                 msg.delete_rsp->bfa_handle);
1325                 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
1326                 bfa_stats(itnim, delete_comps);
1327                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1328                 break;
1329
1330         case BFI_ITN_I2H_SLER_EVENT:
1331                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1332                                                 msg.sler_event->bfa_handle);
1333                 bfa_stats(itnim, sler_events);
1334                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1335                 break;
1336
1337         default:
1338                 bfa_trc(bfa, m->mhdr.msg_id);
1339                 WARN_ON(1);
1340         }
1341 }
1342
1343 /*
1344  * bfa_itnim_api
1345  */
1346
1347 struct bfa_itnim_s *
1348 bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1349 {
1350         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1351         struct bfa_itnim_s *itnim;
1352
1353         bfa_itn_create(bfa, rport, bfa_itnim_isr);
1354
1355         itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1356         WARN_ON(itnim->rport != rport);
1357
1358         itnim->ditn = ditn;
1359
1360         bfa_stats(itnim, creates);
1361         bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1362
1363         return itnim;
1364 }
1365
1366 void
1367 bfa_itnim_delete(struct bfa_itnim_s *itnim)
1368 {
1369         bfa_stats(itnim, deletes);
1370         bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1371 }
1372
1373 void
1374 bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1375 {
1376         itnim->seq_rec = seq_rec;
1377         bfa_stats(itnim, onlines);
1378         bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1379 }
1380
1381 void
1382 bfa_itnim_offline(struct bfa_itnim_s *itnim)
1383 {
1384         bfa_stats(itnim, offlines);
1385         bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1386 }
1387
1388 /*
1389  * Return true if itnim is considered offline for holding off IO request.
1390  * IO is not held if itnim is being deleted.
1391  */
1392 bfa_boolean_t
1393 bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1394 {
1395         return itnim->fcpim->path_tov && itnim->iotov_active &&
1396                 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1397                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1398                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1399                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1400                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1401                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1402 }
1403
1404 void
1405 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1406 {
1407         int j;
1408         memset(&itnim->stats, 0, sizeof(itnim->stats));
1409         memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1410         for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1411                 itnim->ioprofile.io_latency.min[j] = ~0;
1412 }
1413
1414 /*
1415  *  BFA IO module state machine functions
1416  */
1417
1418 /*
1419  * IO is not started (unallocated).
1420  */
1421 static void
1422 bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1423 {
1424         switch (event) {
1425         case BFA_IOIM_SM_START:
1426                 if (!bfa_itnim_is_online(ioim->itnim)) {
1427                         if (!bfa_itnim_hold_io(ioim->itnim)) {
1428                                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1429                                 list_del(&ioim->qe);
1430                                 list_add_tail(&ioim->qe,
1431                                         &ioim->fcpim->ioim_comp_q);
1432                                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1433                                                 __bfa_cb_ioim_pathtov, ioim);
1434                         } else {
1435                                 list_del(&ioim->qe);
1436                                 list_add_tail(&ioim->qe,
1437                                         &ioim->itnim->pending_q);
1438                         }
1439                         break;
1440                 }
1441
1442                 if (ioim->nsges > BFI_SGE_INLINE) {
1443                         if (!bfa_ioim_sgpg_alloc(ioim)) {
1444                                 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1445                                 return;
1446                         }
1447                 }
1448
1449                 if (!bfa_ioim_send_ioreq(ioim)) {
1450                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1451                         break;
1452                 }
1453
1454                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1455                 break;
1456
1457         case BFA_IOIM_SM_IOTOV:
1458                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1459                 bfa_ioim_move_to_comp_q(ioim);
1460                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1461                                 __bfa_cb_ioim_pathtov, ioim);
1462                 break;
1463
1464         case BFA_IOIM_SM_ABORT:
1465                 /*
1466                  * IO in pending queue can get abort requests. Complete abort
1467                  * requests immediately.
1468                  */
1469                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1470                 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1471                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1472                                 __bfa_cb_ioim_abort, ioim);
1473                 break;
1474
1475         default:
1476                 bfa_sm_fault(ioim->bfa, event);
1477         }
1478 }
1479
1480 /*
1481  * IO is waiting for SG pages.
1482  */
1483 static void
1484 bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1485 {
1486         bfa_trc(ioim->bfa, ioim->iotag);
1487         bfa_trc(ioim->bfa, event);
1488
1489         switch (event) {
1490         case BFA_IOIM_SM_SGALLOCED:
1491                 if (!bfa_ioim_send_ioreq(ioim)) {
1492                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1493                         break;
1494                 }
1495                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1496                 break;
1497
1498         case BFA_IOIM_SM_CLEANUP:
1499                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1500                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1501                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1502                               ioim);
1503                 bfa_ioim_notify_cleanup(ioim);
1504                 break;
1505
1506         case BFA_IOIM_SM_ABORT:
1507                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1508                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1509                 bfa_ioim_move_to_comp_q(ioim);
1510                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1511                               ioim);
1512                 break;
1513
1514         case BFA_IOIM_SM_HWFAIL:
1515                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1516                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1517                 bfa_ioim_move_to_comp_q(ioim);
1518                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1519                               ioim);
1520                 break;
1521
1522         default:
1523                 bfa_sm_fault(ioim->bfa, event);
1524         }
1525 }
1526
1527 /*
1528  * IO is active.
1529  */
1530 static void
1531 bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1532 {
1533         switch (event) {
1534         case BFA_IOIM_SM_COMP_GOOD:
1535                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1536                 bfa_ioim_move_to_comp_q(ioim);
1537                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1538                               __bfa_cb_ioim_good_comp, ioim);
1539                 break;
1540
1541         case BFA_IOIM_SM_COMP:
1542                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1543                 bfa_ioim_move_to_comp_q(ioim);
1544                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1545                               ioim);
1546                 break;
1547
1548         case BFA_IOIM_SM_DONE:
1549                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1550                 bfa_ioim_move_to_comp_q(ioim);
1551                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1552                               ioim);
1553                 break;
1554
1555         case BFA_IOIM_SM_ABORT:
1556                 ioim->iosp->abort_explicit = BFA_TRUE;
1557                 ioim->io_cbfn = __bfa_cb_ioim_abort;
1558
1559                 if (bfa_ioim_send_abort(ioim))
1560                         bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1561                 else {
1562                         bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1563                         bfa_stats(ioim->itnim, qwait);
1564                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1565                                           &ioim->iosp->reqq_wait);
1566                 }
1567                 break;
1568
1569         case BFA_IOIM_SM_CLEANUP:
1570                 ioim->iosp->abort_explicit = BFA_FALSE;
1571                 ioim->io_cbfn = __bfa_cb_ioim_failed;
1572
1573                 if (bfa_ioim_send_abort(ioim))
1574                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1575                 else {
1576                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1577                         bfa_stats(ioim->itnim, qwait);
1578                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1579                                           &ioim->iosp->reqq_wait);
1580                 }
1581                 break;
1582
1583         case BFA_IOIM_SM_HWFAIL:
1584                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1585                 bfa_ioim_move_to_comp_q(ioim);
1586                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1587                               ioim);
1588                 break;
1589
1590         case BFA_IOIM_SM_SQRETRY:
1591                 if (bfa_ioim_maxretry_reached(ioim)) {
1592                         /* max retry reached, free IO */
1593                         bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1594                         bfa_ioim_move_to_comp_q(ioim);
1595                         bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1596                                         __bfa_cb_ioim_failed, ioim);
1597                         break;
1598                 }
1599                 /* waiting for IO tag resource free */
1600                 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1601                 break;
1602
1603         default:
1604                 bfa_sm_fault(ioim->bfa, event);
1605         }
1606 }
1607
1608 /*
1609  * IO is retried with new tag.
1610  */
1611 static void
1612 bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1613 {
1614         switch (event) {
1615         case BFA_IOIM_SM_FREE:
1616                 /* abts and rrq done. Now retry the IO with new tag */
1617                 bfa_ioim_update_iotag(ioim);
1618                 if (!bfa_ioim_send_ioreq(ioim)) {
1619                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1620                         break;
1621                 }
1622                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1623         break;
1624
1625         case BFA_IOIM_SM_CLEANUP:
1626                 ioim->iosp->abort_explicit = BFA_FALSE;
1627                 ioim->io_cbfn = __bfa_cb_ioim_failed;
1628
1629                 if (bfa_ioim_send_abort(ioim))
1630                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1631                 else {
1632                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1633                         bfa_stats(ioim->itnim, qwait);
1634                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1635                                           &ioim->iosp->reqq_wait);
1636                 }
1637         break;
1638
1639         case BFA_IOIM_SM_HWFAIL:
1640                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1641                 bfa_ioim_move_to_comp_q(ioim);
1642                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1643                          __bfa_cb_ioim_failed, ioim);
1644                 break;
1645
1646         case BFA_IOIM_SM_ABORT:
1647                 /* in this state IO abort is done.
1648                  * Waiting for IO tag resource free.
1649                  */
1650                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1651                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1652                               ioim);
1653                 break;
1654
1655         default:
1656                 bfa_sm_fault(ioim->bfa, event);
1657         }
1658 }
1659
1660 /*
1661  * IO is being aborted, waiting for completion from firmware.
1662  */
1663 static void
1664 bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1665 {
1666         bfa_trc(ioim->bfa, ioim->iotag);
1667         bfa_trc(ioim->bfa, event);
1668
1669         switch (event) {
1670         case BFA_IOIM_SM_COMP_GOOD:
1671         case BFA_IOIM_SM_COMP:
1672         case BFA_IOIM_SM_DONE:
1673         case BFA_IOIM_SM_FREE:
1674                 break;
1675
1676         case BFA_IOIM_SM_ABORT_DONE:
1677                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1678                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1679                               ioim);
1680                 break;
1681
1682         case BFA_IOIM_SM_ABORT_COMP:
1683                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1684                 bfa_ioim_move_to_comp_q(ioim);
1685                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1686                               ioim);
1687                 break;
1688
1689         case BFA_IOIM_SM_COMP_UTAG:
1690                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1691                 bfa_ioim_move_to_comp_q(ioim);
1692                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1693                               ioim);
1694                 break;
1695
1696         case BFA_IOIM_SM_CLEANUP:
1697                 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1698                 ioim->iosp->abort_explicit = BFA_FALSE;
1699
1700                 if (bfa_ioim_send_abort(ioim))
1701                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1702                 else {
1703                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1704                         bfa_stats(ioim->itnim, qwait);
1705                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1706                                           &ioim->iosp->reqq_wait);
1707                 }
1708                 break;
1709
1710         case BFA_IOIM_SM_HWFAIL:
1711                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1712                 bfa_ioim_move_to_comp_q(ioim);
1713                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1714                               ioim);
1715                 break;
1716
1717         default:
1718                 bfa_sm_fault(ioim->bfa, event);
1719         }
1720 }
1721
1722 /*
1723  * IO is being cleaned up (implicit abort), waiting for completion from
1724  * firmware.
1725  */
1726 static void
1727 bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1728 {
1729         bfa_trc(ioim->bfa, ioim->iotag);
1730         bfa_trc(ioim->bfa, event);
1731
1732         switch (event) {
1733         case BFA_IOIM_SM_COMP_GOOD:
1734         case BFA_IOIM_SM_COMP:
1735         case BFA_IOIM_SM_DONE:
1736         case BFA_IOIM_SM_FREE:
1737                 break;
1738
1739         case BFA_IOIM_SM_ABORT:
1740                 /*
1741                  * IO is already being aborted implicitly
1742                  */
1743                 ioim->io_cbfn = __bfa_cb_ioim_abort;
1744                 break;
1745
1746         case BFA_IOIM_SM_ABORT_DONE:
1747                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1748                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1749                 bfa_ioim_notify_cleanup(ioim);
1750                 break;
1751
1752         case BFA_IOIM_SM_ABORT_COMP:
1753                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1754                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1755                 bfa_ioim_notify_cleanup(ioim);
1756                 break;
1757
1758         case BFA_IOIM_SM_COMP_UTAG:
1759                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1760                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1761                 bfa_ioim_notify_cleanup(ioim);
1762                 break;
1763
1764         case BFA_IOIM_SM_HWFAIL:
1765                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1766                 bfa_ioim_move_to_comp_q(ioim);
1767                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1768                               ioim);
1769                 break;
1770
1771         case BFA_IOIM_SM_CLEANUP:
1772                 /*
1773                  * IO can be in cleanup state already due to TM command.
1774                  * 2nd cleanup request comes from ITN offline event.
1775                  */
1776                 break;
1777
1778         default:
1779                 bfa_sm_fault(ioim->bfa, event);
1780         }
1781 }
1782
1783 /*
1784  * IO is waiting for room in request CQ
1785  */
1786 static void
1787 bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1788 {
1789         bfa_trc(ioim->bfa, ioim->iotag);
1790         bfa_trc(ioim->bfa, event);
1791
1792         switch (event) {
1793         case BFA_IOIM_SM_QRESUME:
1794                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1795                 bfa_ioim_send_ioreq(ioim);
1796                 break;
1797
1798         case BFA_IOIM_SM_ABORT:
1799                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1800                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1801                 bfa_ioim_move_to_comp_q(ioim);
1802                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1803                               ioim);
1804                 break;
1805
1806         case BFA_IOIM_SM_CLEANUP:
1807                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1808                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1809                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1810                               ioim);
1811                 bfa_ioim_notify_cleanup(ioim);
1812                 break;
1813
1814         case BFA_IOIM_SM_HWFAIL:
1815                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1816                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1817                 bfa_ioim_move_to_comp_q(ioim);
1818                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1819                               ioim);
1820                 break;
1821
1822         default:
1823                 bfa_sm_fault(ioim->bfa, event);
1824         }
1825 }
1826
1827 /*
1828  * Active IO is being aborted, waiting for room in request CQ.
1829  */
1830 static void
1831 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1832 {
1833         bfa_trc(ioim->bfa, ioim->iotag);
1834         bfa_trc(ioim->bfa, event);
1835
1836         switch (event) {
1837         case BFA_IOIM_SM_QRESUME:
1838                 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1839                 bfa_ioim_send_abort(ioim);
1840                 break;
1841
1842         case BFA_IOIM_SM_CLEANUP:
1843                 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1844                 ioim->iosp->abort_explicit = BFA_FALSE;
1845                 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1846                 break;
1847
1848         case BFA_IOIM_SM_COMP_GOOD:
1849         case BFA_IOIM_SM_COMP:
1850                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1851                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1852                 bfa_ioim_move_to_comp_q(ioim);
1853                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1854                               ioim);
1855                 break;
1856
1857         case BFA_IOIM_SM_DONE:
1858                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1859                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1860                 bfa_ioim_move_to_comp_q(ioim);
1861                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1862                               ioim);
1863                 break;
1864
1865         case BFA_IOIM_SM_HWFAIL:
1866                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1867                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1868                 bfa_ioim_move_to_comp_q(ioim);
1869                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1870                               ioim);
1871                 break;
1872
1873         default:
1874                 bfa_sm_fault(ioim->bfa, event);
1875         }
1876 }
1877
1878 /*
1879  * Active IO is being cleaned up, waiting for room in request CQ.
1880  */
1881 static void
1882 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1883 {
1884         bfa_trc(ioim->bfa, ioim->iotag);
1885         bfa_trc(ioim->bfa, event);
1886
1887         switch (event) {
1888         case BFA_IOIM_SM_QRESUME:
1889                 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1890                 bfa_ioim_send_abort(ioim);
1891                 break;
1892
1893         case BFA_IOIM_SM_ABORT:
1894                 /*
1895                  * IO is already being cleaned up implicitly
1896                  */
1897                 ioim->io_cbfn = __bfa_cb_ioim_abort;
1898                 break;
1899
1900         case BFA_IOIM_SM_COMP_GOOD:
1901         case BFA_IOIM_SM_COMP:
1902                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1903                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1904                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1905                 bfa_ioim_notify_cleanup(ioim);
1906                 break;
1907
1908         case BFA_IOIM_SM_DONE:
1909                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1910                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1911                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1912                 bfa_ioim_notify_cleanup(ioim);
1913                 break;
1914
1915         case BFA_IOIM_SM_HWFAIL:
1916                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1917                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1918                 bfa_ioim_move_to_comp_q(ioim);
1919                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1920                               ioim);
1921                 break;
1922
1923         default:
1924                 bfa_sm_fault(ioim->bfa, event);
1925         }
1926 }
1927
1928 /*
1929  * IO bfa callback is pending.
1930  */
1931 static void
1932 bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1933 {
1934         switch (event) {
1935         case BFA_IOIM_SM_HCB:
1936                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1937                 bfa_ioim_free(ioim);
1938                 break;
1939
1940         case BFA_IOIM_SM_CLEANUP:
1941                 bfa_ioim_notify_cleanup(ioim);
1942                 break;
1943
1944         case BFA_IOIM_SM_HWFAIL:
1945                 break;
1946
1947         default:
1948                 bfa_sm_fault(ioim->bfa, event);
1949         }
1950 }
1951
1952 /*
1953  * IO bfa callback is pending. IO resource cannot be freed.
1954  */
1955 static void
1956 bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1957 {
1958         bfa_trc(ioim->bfa, ioim->iotag);
1959         bfa_trc(ioim->bfa, event);
1960
1961         switch (event) {
1962         case BFA_IOIM_SM_HCB:
1963                 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
1964                 list_del(&ioim->qe);
1965                 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
1966                 break;
1967
1968         case BFA_IOIM_SM_FREE:
1969                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1970                 break;
1971
1972         case BFA_IOIM_SM_CLEANUP:
1973                 bfa_ioim_notify_cleanup(ioim);
1974                 break;
1975
1976         case BFA_IOIM_SM_HWFAIL:
1977                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1978                 break;
1979
1980         default:
1981                 bfa_sm_fault(ioim->bfa, event);
1982         }
1983 }
1984
1985 /*
1986  * IO is completed, waiting resource free from firmware.
1987  */
1988 static void
1989 bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1990 {
1991         bfa_trc(ioim->bfa, ioim->iotag);
1992         bfa_trc(ioim->bfa, event);
1993
1994         switch (event) {
1995         case BFA_IOIM_SM_FREE:
1996                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1997                 bfa_ioim_free(ioim);
1998                 break;
1999
2000         case BFA_IOIM_SM_CLEANUP:
2001                 bfa_ioim_notify_cleanup(ioim);
2002                 break;
2003
2004         case BFA_IOIM_SM_HWFAIL:
2005                 break;
2006
2007         default:
2008                 bfa_sm_fault(ioim->bfa, event);
2009         }
2010 }
2011
2012
2013 static void
2014 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2015 {
2016         struct bfa_ioim_s *ioim = cbarg;
2017
2018         if (!complete) {
2019                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2020                 return;
2021         }
2022
2023         bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2024 }
2025
2026 static void
2027 __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2028 {
2029         struct bfa_ioim_s       *ioim = cbarg;
2030         struct bfi_ioim_rsp_s *m;
2031         u8      *snsinfo = NULL;
2032         u8      sns_len = 0;
2033         s32     residue = 0;
2034
2035         if (!complete) {
2036                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2037                 return;
2038         }
2039
2040         m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2041         if (m->io_status == BFI_IOIM_STS_OK) {
2042                 /*
2043                  * setup sense information, if present
2044                  */
2045                 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2046                                         m->sns_len) {
2047                         sns_len = m->sns_len;
2048                         snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2049                                                 ioim->iotag);
2050                 }
2051
2052                 /*
2053                  * setup residue value correctly for normal completions
2054                  */
2055                 if (m->resid_flags == FCP_RESID_UNDER) {
2056                         residue = be32_to_cpu(m->residue);
2057                         bfa_stats(ioim->itnim, iocomp_underrun);
2058                 }
2059                 if (m->resid_flags == FCP_RESID_OVER) {
2060                         residue = be32_to_cpu(m->residue);
2061                         residue = -residue;
2062                         bfa_stats(ioim->itnim, iocomp_overrun);
2063                 }
2064         }
2065
2066         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2067                           m->scsi_status, sns_len, snsinfo, residue);
2068 }
2069
2070 static void
2071 __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2072 {
2073         struct bfa_ioim_s *ioim = cbarg;
2074
2075         if (!complete) {
2076                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2077                 return;
2078         }
2079
2080         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2081                           0, 0, NULL, 0);
2082 }
2083
2084 static void
2085 __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2086 {
2087         struct bfa_ioim_s *ioim = cbarg;
2088
2089         bfa_stats(ioim->itnim, path_tov_expired);
2090         if (!complete) {
2091                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2092                 return;
2093         }
2094
2095         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2096                           0, 0, NULL, 0);
2097 }
2098
2099 static void
2100 __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2101 {
2102         struct bfa_ioim_s *ioim = cbarg;
2103
2104         if (!complete) {
2105                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2106                 return;
2107         }
2108
2109         bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2110 }
2111
2112 static void
2113 bfa_ioim_sgpg_alloced(void *cbarg)
2114 {
2115         struct bfa_ioim_s *ioim = cbarg;
2116
2117         ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2118         list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2119         ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2120         bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2121 }
2122
2123 /*
2124  * Send I/O request to firmware.
2125  */
2126 static  bfa_boolean_t
2127 bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2128 {
2129         struct bfa_itnim_s *itnim = ioim->itnim;
2130         struct bfi_ioim_req_s *m;
2131         static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
2132         struct bfi_sge_s *sge, *sgpge;
2133         u32     pgdlen = 0;
2134         u32     fcp_dl;
2135         u64 addr;
2136         struct scatterlist *sg;
2137         struct bfa_sgpg_s *sgpg;
2138         struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2139         u32 i, sge_id, pgcumsz;
2140         enum dma_data_direction dmadir;
2141
2142         /*
2143          * check for room in queue to send request now
2144          */
2145         m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2146         if (!m) {
2147                 bfa_stats(ioim->itnim, qwait);
2148                 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2149                                   &ioim->iosp->reqq_wait);
2150                 return BFA_FALSE;
2151         }
2152
2153         /*
2154          * build i/o request message next
2155          */
2156         m->io_tag = cpu_to_be16(ioim->iotag);
2157         m->rport_hdl = ioim->itnim->rport->fw_handle;
2158         m->io_timeout = 0;
2159
2160         sge = &m->sges[0];
2161         sgpg = ioim->sgpg;
2162         sge_id = 0;
2163         sgpge = NULL;
2164         pgcumsz = 0;
2165         scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2166                 if (i == 0) {
2167                         /* build inline IO SG element */
2168                         addr = bfa_sgaddr_le(sg_dma_address(sg));
2169                         sge->sga = *(union bfi_addr_u *) &addr;
2170                         pgdlen = sg_dma_len(sg);
2171                         sge->sg_len = pgdlen;
2172                         sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2173                                         BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2174                         bfa_sge_to_be(sge);
2175                         sge++;
2176                 } else {
2177                         if (sge_id == 0)
2178                                 sgpge = sgpg->sgpg->sges;
2179
2180                         addr = bfa_sgaddr_le(sg_dma_address(sg));
2181                         sgpge->sga = *(union bfi_addr_u *) &addr;
2182                         sgpge->sg_len = sg_dma_len(sg);
2183                         pgcumsz += sgpge->sg_len;
2184
2185                         /* set flags */
2186                         if (i < (ioim->nsges - 1) &&
2187                                         sge_id < (BFI_SGPG_DATA_SGES - 1))
2188                                 sgpge->flags = BFI_SGE_DATA;
2189                         else if (i < (ioim->nsges - 1))
2190                                 sgpge->flags = BFI_SGE_DATA_CPL;
2191                         else
2192                                 sgpge->flags = BFI_SGE_DATA_LAST;
2193
2194                         bfa_sge_to_le(sgpge);
2195
2196                         sgpge++;
2197                         if (i == (ioim->nsges - 1)) {
2198                                 sgpge->flags = BFI_SGE_PGDLEN;
2199                                 sgpge->sga.a32.addr_lo = 0;
2200                                 sgpge->sga.a32.addr_hi = 0;
2201                                 sgpge->sg_len = pgcumsz;
2202                                 bfa_sge_to_le(sgpge);
2203                         } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2204                                 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2205                                 sgpge->flags = BFI_SGE_LINK;
2206                                 sgpge->sga = sgpg->sgpg_pa;
2207                                 sgpge->sg_len = pgcumsz;
2208                                 bfa_sge_to_le(sgpge);
2209                                 sge_id = 0;
2210                                 pgcumsz = 0;
2211                         }
2212                 }
2213         }
2214
2215         if (ioim->nsges > BFI_SGE_INLINE) {
2216                 sge->sga = ioim->sgpg->sgpg_pa;
2217         } else {
2218                 sge->sga.a32.addr_lo = 0;
2219                 sge->sga.a32.addr_hi = 0;
2220         }
2221         sge->sg_len = pgdlen;
2222         sge->flags = BFI_SGE_PGDLEN;
2223         bfa_sge_to_be(sge);
2224
2225         /*
2226          * set up I/O command parameters
2227          */
2228         m->cmnd = cmnd_z0;
2229         int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2230         dmadir = cmnd->sc_data_direction;
2231         if (dmadir == DMA_TO_DEVICE)
2232                 m->cmnd.iodir = FCP_IODIR_WRITE;
2233         else if (dmadir == DMA_FROM_DEVICE)
2234                 m->cmnd.iodir = FCP_IODIR_READ;
2235         else
2236                 m->cmnd.iodir = FCP_IODIR_NONE;
2237
2238         m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
2239         fcp_dl = scsi_bufflen(cmnd);
2240         m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
2241
2242         /*
2243          * set up I/O message header
2244          */
2245         switch (m->cmnd.iodir) {
2246         case FCP_IODIR_READ:
2247                 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
2248                 bfa_stats(itnim, input_reqs);
2249                 ioim->itnim->stats.rd_throughput += fcp_dl;
2250                 break;
2251         case FCP_IODIR_WRITE:
2252                 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
2253                 bfa_stats(itnim, output_reqs);
2254                 ioim->itnim->stats.wr_throughput += fcp_dl;
2255                 break;
2256         case FCP_IODIR_RW:
2257                 bfa_stats(itnim, input_reqs);
2258                 bfa_stats(itnim, output_reqs);
2259         default:
2260                 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2261         }
2262         if (itnim->seq_rec ||
2263             (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
2264                 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2265
2266         /*
2267          * queue I/O message to firmware
2268          */
2269         bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2270         return BFA_TRUE;
2271 }
2272
2273 /*
2274  * Setup any additional SG pages needed.Inline SG element is setup
2275  * at queuing time.
2276  */
2277 static bfa_boolean_t
2278 bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
2279 {
2280         u16     nsgpgs;
2281
2282         WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
2283
2284         /*
2285          * allocate SG pages needed
2286          */
2287         nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2288         if (!nsgpgs)
2289                 return BFA_TRUE;
2290
2291         if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2292             != BFA_STATUS_OK) {
2293                 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2294                 return BFA_FALSE;
2295         }
2296
2297         ioim->nsgpgs = nsgpgs;
2298         ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2299
2300         return BFA_TRUE;
2301 }
2302
2303 /*
2304  * Send I/O abort request to firmware.
2305  */
2306 static  bfa_boolean_t
2307 bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2308 {
2309         struct bfi_ioim_abort_req_s *m;
2310         enum bfi_ioim_h2i       msgop;
2311
2312         /*
2313          * check for room in queue to send request now
2314          */
2315         m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2316         if (!m)
2317                 return BFA_FALSE;
2318
2319         /*
2320          * build i/o request message next
2321          */
2322         if (ioim->iosp->abort_explicit)
2323                 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2324         else
2325                 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2326
2327         bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
2328         m->io_tag    = cpu_to_be16(ioim->iotag);
2329         m->abort_tag = ++ioim->abort_tag;
2330
2331         /*
2332          * queue I/O message to firmware
2333          */
2334         bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2335         return BFA_TRUE;
2336 }
2337
2338 /*
2339  * Call to resume any I/O requests waiting for room in request queue.
2340  */
2341 static void
2342 bfa_ioim_qresume(void *cbarg)
2343 {
2344         struct bfa_ioim_s *ioim = cbarg;
2345
2346         bfa_stats(ioim->itnim, qresumes);
2347         bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2348 }
2349
2350
2351 static void
2352 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2353 {
2354         /*
2355          * Move IO from itnim queue to fcpim global queue since itnim will be
2356          * freed.
2357          */
2358         list_del(&ioim->qe);
2359         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2360
2361         if (!ioim->iosp->tskim) {
2362                 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2363                         bfa_cb_dequeue(&ioim->hcb_qe);
2364                         list_del(&ioim->qe);
2365                         list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2366                 }
2367                 bfa_itnim_iodone(ioim->itnim);
2368         } else
2369                 bfa_wc_down(&ioim->iosp->tskim->wc);
2370 }
2371
2372 static bfa_boolean_t
2373 bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2374 {
2375         if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2376             (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)))    ||
2377             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort))         ||
2378             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull))   ||
2379             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb))           ||
2380             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free))      ||
2381             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2382                 return BFA_FALSE;
2383
2384         return BFA_TRUE;
2385 }
2386
2387 void
2388 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2389 {
2390         /*
2391          * If path tov timer expired, failback with PATHTOV status - these
2392          * IO requests are not normally retried by IO stack.
2393          *
2394          * Otherwise device cameback online and fail it with normal failed
2395          * status so that IO stack retries these failed IO requests.
2396          */
2397         if (iotov)
2398                 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2399         else {
2400                 ioim->io_cbfn = __bfa_cb_ioim_failed;
2401                 bfa_stats(ioim->itnim, iocom_nexus_abort);
2402         }
2403         bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2404
2405         /*
2406          * Move IO to fcpim global queue since itnim will be
2407          * freed.
2408          */
2409         list_del(&ioim->qe);
2410         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2411 }
2412
2413
2414 /*
2415  * Memory allocation and initialization.
2416  */
2417 void
2418 bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
2419 {
2420         struct bfa_ioim_s               *ioim;
2421         struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
2422         struct bfa_ioim_sp_s    *iosp;
2423         u16             i;
2424
2425         /*
2426          * claim memory first
2427          */
2428         ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
2429         fcpim->ioim_arr = ioim;
2430         bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
2431
2432         iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
2433         fcpim->ioim_sp_arr = iosp;
2434         bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
2435
2436         /*
2437          * Initialize ioim free queues
2438          */
2439         INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2440         INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2441
2442         for (i = 0; i < fcpim->fcp->num_ioim_reqs;
2443              i++, ioim++, iosp++) {
2444                 /*
2445                  * initialize IOIM
2446                  */
2447                 memset(ioim, 0, sizeof(struct bfa_ioim_s));
2448                 ioim->iotag   = i;
2449                 ioim->bfa     = fcpim->bfa;
2450                 ioim->fcpim   = fcpim;
2451                 ioim->iosp    = iosp;
2452                 INIT_LIST_HEAD(&ioim->sgpg_q);
2453                 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2454                                    bfa_ioim_qresume, ioim);
2455                 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2456                                    bfa_ioim_sgpg_alloced, ioim);
2457                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2458         }
2459 }
2460
2461 void
2462 bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2463 {
2464         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2465         struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2466         struct bfa_ioim_s *ioim;
2467         u16     iotag;
2468         enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2469
2470         iotag = be16_to_cpu(rsp->io_tag);
2471
2472         ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2473         WARN_ON(ioim->iotag != iotag);
2474
2475         bfa_trc(ioim->bfa, ioim->iotag);
2476         bfa_trc(ioim->bfa, rsp->io_status);
2477         bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2478
2479         if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
2480                 ioim->iosp->comp_rspmsg = *m;
2481
2482         switch (rsp->io_status) {
2483         case BFI_IOIM_STS_OK:
2484                 bfa_stats(ioim->itnim, iocomp_ok);
2485                 if (rsp->reuse_io_tag == 0)
2486                         evt = BFA_IOIM_SM_DONE;
2487                 else
2488                         evt = BFA_IOIM_SM_COMP;
2489                 break;
2490
2491         case BFI_IOIM_STS_TIMEDOUT:
2492                 bfa_stats(ioim->itnim, iocomp_timedout);
2493         case BFI_IOIM_STS_ABORTED:
2494                 rsp->io_status = BFI_IOIM_STS_ABORTED;
2495                 bfa_stats(ioim->itnim, iocomp_aborted);
2496                 if (rsp->reuse_io_tag == 0)
2497                         evt = BFA_IOIM_SM_DONE;
2498                 else
2499                         evt = BFA_IOIM_SM_COMP;
2500                 break;
2501
2502         case BFI_IOIM_STS_PROTO_ERR:
2503                 bfa_stats(ioim->itnim, iocom_proto_err);
2504                 WARN_ON(!rsp->reuse_io_tag);
2505                 evt = BFA_IOIM_SM_COMP;
2506                 break;
2507
2508         case BFI_IOIM_STS_SQER_NEEDED:
2509                 bfa_stats(ioim->itnim, iocom_sqer_needed);
2510                 WARN_ON(rsp->reuse_io_tag != 0);
2511                 evt = BFA_IOIM_SM_SQRETRY;
2512                 break;
2513
2514         case BFI_IOIM_STS_RES_FREE:
2515                 bfa_stats(ioim->itnim, iocom_res_free);
2516                 evt = BFA_IOIM_SM_FREE;
2517                 break;
2518
2519         case BFI_IOIM_STS_HOST_ABORTED:
2520                 bfa_stats(ioim->itnim, iocom_hostabrts);
2521                 if (rsp->abort_tag != ioim->abort_tag) {
2522                         bfa_trc(ioim->bfa, rsp->abort_tag);
2523                         bfa_trc(ioim->bfa, ioim->abort_tag);
2524                         return;
2525                 }
2526
2527                 if (rsp->reuse_io_tag)
2528                         evt = BFA_IOIM_SM_ABORT_COMP;
2529                 else
2530                         evt = BFA_IOIM_SM_ABORT_DONE;
2531                 break;
2532
2533         case BFI_IOIM_STS_UTAG:
2534                 bfa_stats(ioim->itnim, iocom_utags);
2535                 evt = BFA_IOIM_SM_COMP_UTAG;
2536                 break;
2537
2538         default:
2539                 WARN_ON(1);
2540         }
2541
2542         bfa_sm_send_event(ioim, evt);
2543 }
2544
2545 void
2546 bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2547 {
2548         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2549         struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2550         struct bfa_ioim_s *ioim;
2551         u16     iotag;
2552
2553         iotag = be16_to_cpu(rsp->io_tag);
2554
2555         ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2556         WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
2557
2558         bfa_ioim_cb_profile_comp(fcpim, ioim);
2559         bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2560 }
2561
2562 /*
2563  * Called by itnim to clean up IO while going offline.
2564  */
2565 void
2566 bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2567 {
2568         bfa_trc(ioim->bfa, ioim->iotag);
2569         bfa_stats(ioim->itnim, io_cleanups);
2570
2571         ioim->iosp->tskim = NULL;
2572         bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2573 }
2574
2575 void
2576 bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2577 {
2578         bfa_trc(ioim->bfa, ioim->iotag);
2579         bfa_stats(ioim->itnim, io_tmaborts);
2580
2581         ioim->iosp->tskim = tskim;
2582         bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2583 }
2584
2585 /*
2586  * IOC failure handling.
2587  */
2588 void
2589 bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2590 {
2591         bfa_trc(ioim->bfa, ioim->iotag);
2592         bfa_stats(ioim->itnim, io_iocdowns);
2593         bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2594 }
2595
2596 /*
2597  * IO offline TOV popped. Fail the pending IO.
2598  */
2599 void
2600 bfa_ioim_tov(struct bfa_ioim_s *ioim)
2601 {
2602         bfa_trc(ioim->bfa, ioim->iotag);
2603         bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2604 }
2605
2606
2607 /*
2608  * Allocate IOIM resource for initiator mode I/O request.
2609  */
2610 struct bfa_ioim_s *
2611 bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2612                 struct bfa_itnim_s *itnim, u16 nsges)
2613 {
2614         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2615         struct bfa_ioim_s *ioim;
2616         struct bfa_iotag_s *iotag = NULL;
2617
2618         /*
2619          * alocate IOIM resource
2620          */
2621         bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
2622         if (!iotag) {
2623                 bfa_stats(itnim, no_iotags);
2624                 return NULL;
2625         }
2626
2627         ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
2628
2629         ioim->dio = dio;
2630         ioim->itnim = itnim;
2631         ioim->nsges = nsges;
2632         ioim->nsgpgs = 0;
2633
2634         bfa_stats(itnim, total_ios);
2635         fcpim->ios_active++;
2636
2637         list_add_tail(&ioim->qe, &itnim->io_q);
2638
2639         return ioim;
2640 }
2641
2642 void
2643 bfa_ioim_free(struct bfa_ioim_s *ioim)
2644 {
2645         struct bfa_fcpim_s *fcpim = ioim->fcpim;
2646         struct bfa_iotag_s *iotag;
2647
2648         if (ioim->nsgpgs > 0)
2649                 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2650
2651         bfa_stats(ioim->itnim, io_comps);
2652         fcpim->ios_active--;
2653
2654         ioim->iotag &= BFA_IOIM_IOTAG_MASK;
2655
2656         WARN_ON(!(ioim->iotag <
2657                 (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
2658         iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
2659
2660         if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
2661                 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
2662         else
2663                 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
2664
2665         list_del(&ioim->qe);
2666 }
2667
2668 void
2669 bfa_ioim_start(struct bfa_ioim_s *ioim)
2670 {
2671         bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2672
2673         /*
2674          * Obtain the queue over which this request has to be issued
2675          */
2676         ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
2677                         BFA_FALSE : bfa_itnim_get_reqq(ioim);
2678
2679         bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2680 }
2681
2682 /*
2683  * Driver I/O abort request.
2684  */
2685 bfa_status_t
2686 bfa_ioim_abort(struct bfa_ioim_s *ioim)
2687 {
2688
2689         bfa_trc(ioim->bfa, ioim->iotag);
2690
2691         if (!bfa_ioim_is_abortable(ioim))
2692                 return BFA_STATUS_FAILED;
2693
2694         bfa_stats(ioim->itnim, io_aborts);
2695         bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
2696
2697         return BFA_STATUS_OK;
2698 }
2699
2700 /*
2701  *  BFA TSKIM state machine functions
2702  */
2703
2704 /*
2705  * Task management command beginning state.
2706  */
2707 static void
2708 bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2709 {
2710         bfa_trc(tskim->bfa, event);
2711
2712         switch (event) {
2713         case BFA_TSKIM_SM_START:
2714                 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2715                 bfa_tskim_gather_ios(tskim);
2716
2717                 /*
2718                  * If device is offline, do not send TM on wire. Just cleanup
2719                  * any pending IO requests and complete TM request.
2720                  */
2721                 if (!bfa_itnim_is_online(tskim->itnim)) {
2722                         bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2723                         tskim->tsk_status = BFI_TSKIM_STS_OK;
2724                         bfa_tskim_cleanup_ios(tskim);
2725                         return;
2726                 }
2727
2728                 if (!bfa_tskim_send(tskim)) {
2729                         bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
2730                         bfa_stats(tskim->itnim, tm_qwait);
2731                         bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2732                                           &tskim->reqq_wait);
2733                 }
2734                 break;
2735
2736         default:
2737                 bfa_sm_fault(tskim->bfa, event);
2738         }
2739 }
2740
2741 /*
2742  * TM command is active, awaiting completion from firmware to
2743  * cleanup IO requests in TM scope.
2744  */
2745 static void
2746 bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2747 {
2748         bfa_trc(tskim->bfa, event);
2749
2750         switch (event) {
2751         case BFA_TSKIM_SM_DONE:
2752                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2753                 bfa_tskim_cleanup_ios(tskim);
2754                 break;
2755
2756         case BFA_TSKIM_SM_CLEANUP:
2757                 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2758                 if (!bfa_tskim_send_abort(tskim)) {
2759                         bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
2760                         bfa_stats(tskim->itnim, tm_qwait);
2761                         bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2762                                 &tskim->reqq_wait);
2763                 }
2764                 break;
2765
2766         case BFA_TSKIM_SM_HWFAIL:
2767                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2768                 bfa_tskim_iocdisable_ios(tskim);
2769                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2770                 break;
2771
2772         default:
2773                 bfa_sm_fault(tskim->bfa, event);
2774         }
2775 }
2776
2777 /*
2778  * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
2779  * completion event from firmware.
2780  */
2781 static void
2782 bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2783 {
2784         bfa_trc(tskim->bfa, event);
2785
2786         switch (event) {
2787         case BFA_TSKIM_SM_DONE:
2788                 /*
2789                  * Ignore and wait for ABORT completion from firmware.
2790                  */
2791                 break;
2792
2793         case BFA_TSKIM_SM_CLEANUP_DONE:
2794                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2795                 bfa_tskim_cleanup_ios(tskim);
2796                 break;
2797
2798         case BFA_TSKIM_SM_HWFAIL:
2799                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2800                 bfa_tskim_iocdisable_ios(tskim);
2801                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2802                 break;
2803
2804         default:
2805                 bfa_sm_fault(tskim->bfa, event);
2806         }
2807 }
2808
2809 static void
2810 bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2811 {
2812         bfa_trc(tskim->bfa, event);
2813
2814         switch (event) {
2815         case BFA_TSKIM_SM_IOS_DONE:
2816                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2817                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
2818                 break;
2819
2820         case BFA_TSKIM_SM_CLEANUP:
2821                 /*
2822                  * Ignore, TM command completed on wire.
2823                  * Notify TM conmpletion on IO cleanup completion.
2824                  */
2825                 break;
2826
2827         case BFA_TSKIM_SM_HWFAIL:
2828                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2829                 bfa_tskim_iocdisable_ios(tskim);
2830                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2831                 break;
2832
2833         default:
2834                 bfa_sm_fault(tskim->bfa, event);
2835         }
2836 }
2837
2838 /*
2839  * Task management command is waiting for room in request CQ
2840  */
2841 static void
2842 bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2843 {
2844         bfa_trc(tskim->bfa, event);
2845
2846         switch (event) {
2847         case BFA_TSKIM_SM_QRESUME:
2848                 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2849                 bfa_tskim_send(tskim);
2850                 break;
2851
2852         case BFA_TSKIM_SM_CLEANUP:
2853                 /*
2854                  * No need to send TM on wire since ITN is offline.
2855                  */
2856                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2857                 bfa_reqq_wcancel(&tskim->reqq_wait);
2858                 bfa_tskim_cleanup_ios(tskim);
2859                 break;
2860
2861         case BFA_TSKIM_SM_HWFAIL:
2862                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2863                 bfa_reqq_wcancel(&tskim->reqq_wait);
2864                 bfa_tskim_iocdisable_ios(tskim);
2865                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2866                 break;
2867
2868         default:
2869                 bfa_sm_fault(tskim->bfa, event);
2870         }
2871 }
2872
2873 /*
2874  * Task management command is active, awaiting for room in request CQ
2875  * to send clean up request.
2876  */
2877 static void
2878 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
2879                 enum bfa_tskim_event event)
2880 {
2881         bfa_trc(tskim->bfa, event);
2882
2883         switch (event) {
2884         case BFA_TSKIM_SM_DONE:
2885                 bfa_reqq_wcancel(&tskim->reqq_wait);
2886                 /*
2887                  * Fall through !!!
2888                  */
2889         case BFA_TSKIM_SM_QRESUME:
2890                 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2891                 bfa_tskim_send_abort(tskim);
2892                 break;
2893
2894         case BFA_TSKIM_SM_HWFAIL:
2895                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2896                 bfa_reqq_wcancel(&tskim->reqq_wait);
2897                 bfa_tskim_iocdisable_ios(tskim);
2898                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2899                 break;
2900
2901         default:
2902                 bfa_sm_fault(tskim->bfa, event);
2903         }
2904 }
2905
2906 /*
2907  * BFA callback is pending
2908  */
2909 static void
2910 bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2911 {
2912         bfa_trc(tskim->bfa, event);
2913
2914         switch (event) {
2915         case BFA_TSKIM_SM_HCB:
2916                 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
2917                 bfa_tskim_free(tskim);
2918                 break;
2919
2920         case BFA_TSKIM_SM_CLEANUP:
2921                 bfa_tskim_notify_comp(tskim);
2922                 break;
2923
2924         case BFA_TSKIM_SM_HWFAIL:
2925                 break;
2926
2927         default:
2928                 bfa_sm_fault(tskim->bfa, event);
2929         }
2930 }
2931
2932 static void
2933 __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
2934 {
2935         struct bfa_tskim_s *tskim = cbarg;
2936
2937         if (!complete) {
2938                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
2939                 return;
2940         }
2941
2942         bfa_stats(tskim->itnim, tm_success);
2943         bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
2944 }
2945
2946 static void
2947 __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
2948 {
2949         struct bfa_tskim_s *tskim = cbarg;
2950
2951         if (!complete) {
2952                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
2953                 return;
2954         }
2955
2956         bfa_stats(tskim->itnim, tm_failures);
2957         bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
2958                                 BFI_TSKIM_STS_FAILED);
2959 }
2960
2961 static bfa_boolean_t
2962 bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
2963 {
2964         switch (tskim->tm_cmnd) {
2965         case FCP_TM_TARGET_RESET:
2966                 return BFA_TRUE;
2967
2968         case FCP_TM_ABORT_TASK_SET:
2969         case FCP_TM_CLEAR_TASK_SET:
2970         case FCP_TM_LUN_RESET:
2971         case FCP_TM_CLEAR_ACA:
2972                 return !memcmp(&tskim->lun, &lun, sizeof(lun));
2973
2974         default:
2975                 WARN_ON(1);
2976         }
2977
2978         return BFA_FALSE;
2979 }
2980
2981 /*
2982  * Gather affected IO requests and task management commands.
2983  */
2984 static void
2985 bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
2986 {
2987         struct bfa_itnim_s *itnim = tskim->itnim;
2988         struct bfa_ioim_s *ioim;
2989         struct list_head *qe, *qen;
2990         struct scsi_cmnd *cmnd;
2991         struct scsi_lun scsilun;
2992
2993         INIT_LIST_HEAD(&tskim->io_q);
2994
2995         /*
2996          * Gather any active IO requests first.
2997          */
2998         list_for_each_safe(qe, qen, &itnim->io_q) {
2999                 ioim = (struct bfa_ioim_s *) qe;
3000                 cmnd = (struct scsi_cmnd *) ioim->dio;
3001                 int_to_scsilun(cmnd->device->lun, &scsilun);
3002                 if (bfa_tskim_match_scope(tskim, scsilun)) {
3003                         list_del(&ioim->qe);
3004                         list_add_tail(&ioim->qe, &tskim->io_q);
3005                 }
3006         }
3007
3008         /*
3009          * Failback any pending IO requests immediately.
3010          */
3011         list_for_each_safe(qe, qen, &itnim->pending_q) {
3012                 ioim = (struct bfa_ioim_s *) qe;
3013                 cmnd = (struct scsi_cmnd *) ioim->dio;
3014                 int_to_scsilun(cmnd->device->lun, &scsilun);
3015                 if (bfa_tskim_match_scope(tskim, scsilun)) {
3016                         list_del(&ioim->qe);
3017                         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3018                         bfa_ioim_tov(ioim);
3019                 }
3020         }
3021 }
3022
3023 /*
3024  * IO cleanup completion
3025  */
3026 static void
3027 bfa_tskim_cleanp_comp(void *tskim_cbarg)
3028 {
3029         struct bfa_tskim_s *tskim = tskim_cbarg;
3030
3031         bfa_stats(tskim->itnim, tm_io_comps);
3032         bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3033 }
3034
3035 /*
3036  * Gather affected IO requests and task management commands.
3037  */
3038 static void
3039 bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3040 {
3041         struct bfa_ioim_s *ioim;
3042         struct list_head        *qe, *qen;
3043
3044         bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3045
3046         list_for_each_safe(qe, qen, &tskim->io_q) {
3047                 ioim = (struct bfa_ioim_s *) qe;
3048                 bfa_wc_up(&tskim->wc);
3049                 bfa_ioim_cleanup_tm(ioim, tskim);
3050         }
3051
3052         bfa_wc_wait(&tskim->wc);
3053 }
3054
3055 /*
3056  * Send task management request to firmware.
3057  */
3058 static bfa_boolean_t
3059 bfa_tskim_send(struct bfa_tskim_s *tskim)
3060 {
3061         struct bfa_itnim_s *itnim = tskim->itnim;
3062         struct bfi_tskim_req_s *m;
3063
3064         /*
3065          * check for room in queue to send request now
3066          */
3067         m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3068         if (!m)
3069                 return BFA_FALSE;
3070
3071         /*
3072          * build i/o request message next
3073          */
3074         bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3075                         bfa_fn_lpu(tskim->bfa));
3076
3077         m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3078         m->itn_fhdl = tskim->itnim->rport->fw_handle;
3079         m->t_secs = tskim->tsecs;
3080         m->lun = tskim->lun;
3081         m->tm_flags = tskim->tm_cmnd;
3082
3083         /*
3084          * queue I/O message to firmware
3085          */
3086         bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3087         return BFA_TRUE;
3088 }
3089
3090 /*
3091  * Send abort request to cleanup an active TM to firmware.
3092  */
3093 static bfa_boolean_t
3094 bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3095 {
3096         struct bfa_itnim_s      *itnim = tskim->itnim;
3097         struct bfi_tskim_abortreq_s     *m;
3098
3099         /*
3100          * check for room in queue to send request now
3101          */
3102         m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3103         if (!m)
3104                 return BFA_FALSE;
3105
3106         /*
3107          * build i/o request message next
3108          */
3109         bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3110                         bfa_fn_lpu(tskim->bfa));
3111
3112         m->tsk_tag  = cpu_to_be16(tskim->tsk_tag);
3113
3114         /*
3115          * queue I/O message to firmware
3116          */
3117         bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3118         return BFA_TRUE;
3119 }
3120
3121 /*
3122  * Call to resume task management cmnd waiting for room in request queue.
3123  */
3124 static void
3125 bfa_tskim_qresume(void *cbarg)
3126 {
3127         struct bfa_tskim_s *tskim = cbarg;
3128
3129         bfa_stats(tskim->itnim, tm_qresumes);
3130         bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3131 }
3132
3133 /*
3134  * Cleanup IOs associated with a task mangement command on IOC failures.
3135  */
3136 static void
3137 bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3138 {
3139         struct bfa_ioim_s *ioim;
3140         struct list_head        *qe, *qen;
3141
3142         list_for_each_safe(qe, qen, &tskim->io_q) {
3143                 ioim = (struct bfa_ioim_s *) qe;
3144                 bfa_ioim_iocdisable(ioim);
3145         }
3146 }
3147
3148 /*
3149  * Notification on completions from related ioim.
3150  */
3151 void
3152 bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3153 {
3154         bfa_wc_down(&tskim->wc);
3155 }
3156
3157 /*
3158  * Handle IOC h/w failure notification from itnim.
3159  */
3160 void
3161 bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3162 {
3163         tskim->notify = BFA_FALSE;
3164         bfa_stats(tskim->itnim, tm_iocdowns);
3165         bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3166 }
3167
3168 /*
3169  * Cleanup TM command and associated IOs as part of ITNIM offline.
3170  */
3171 void
3172 bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3173 {
3174         tskim->notify = BFA_TRUE;
3175         bfa_stats(tskim->itnim, tm_cleanups);
3176         bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3177 }
3178
3179 /*
3180  * Memory allocation and initialization.
3181  */
3182 void
3183 bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
3184 {
3185         struct bfa_tskim_s *tskim;
3186         struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
3187         u16     i;
3188
3189         INIT_LIST_HEAD(&fcpim->tskim_free_q);
3190         INIT_LIST_HEAD(&fcpim->tskim_unused_q);
3191
3192         tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
3193         fcpim->tskim_arr = tskim;
3194
3195         for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3196                 /*
3197                  * initialize TSKIM
3198                  */
3199                 memset(tskim, 0, sizeof(struct bfa_tskim_s));
3200                 tskim->tsk_tag = i;
3201                 tskim->bfa      = fcpim->bfa;
3202                 tskim->fcpim    = fcpim;
3203                 tskim->notify  = BFA_FALSE;
3204                 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3205                                         tskim);
3206                 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3207
3208                 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3209         }
3210
3211         bfa_mem_kva_curp(fcp) = (u8 *) tskim;
3212 }
3213
3214 void
3215 bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3216 {
3217         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3218         struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3219         struct bfa_tskim_s *tskim;
3220         u16     tsk_tag = be16_to_cpu(rsp->tsk_tag);
3221
3222         tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3223         WARN_ON(tskim->tsk_tag != tsk_tag);
3224
3225         tskim->tsk_status = rsp->tsk_status;
3226
3227         /*
3228          * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3229          * requests. All other statuses are for normal completions.
3230          */
3231         if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3232                 bfa_stats(tskim->itnim, tm_cleanup_comps);
3233                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3234         } else {
3235                 bfa_stats(tskim->itnim, tm_fw_rsps);
3236                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3237         }
3238 }
3239
3240
3241 struct bfa_tskim_s *
3242 bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3243 {
3244         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3245         struct bfa_tskim_s *tskim;
3246
3247         bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3248
3249         if (tskim)
3250                 tskim->dtsk = dtsk;
3251
3252         return tskim;
3253 }
3254
3255 void
3256 bfa_tskim_free(struct bfa_tskim_s *tskim)
3257 {
3258         WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3259         list_del(&tskim->qe);
3260         list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3261 }
3262
3263 /*
3264  * Start a task management command.
3265  *
3266  * @param[in]   tskim   BFA task management command instance
3267  * @param[in]   itnim   i-t nexus for the task management command
3268  * @param[in]   lun     lun, if applicable
3269  * @param[in]   tm_cmnd Task management command code.
3270  * @param[in]   t_secs  Timeout in seconds
3271  *
3272  * @return None.
3273  */
3274 void
3275 bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3276                         struct scsi_lun lun,
3277                         enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3278 {
3279         tskim->itnim    = itnim;
3280         tskim->lun      = lun;
3281         tskim->tm_cmnd = tm_cmnd;
3282         tskim->tsecs    = tsecs;
3283         tskim->notify  = BFA_FALSE;
3284         bfa_stats(itnim, tm_cmnds);
3285
3286         list_add_tail(&tskim->qe, &itnim->tsk_q);
3287         bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3288 }
3289
3290 void
3291 bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
3292 {
3293         struct bfa_fcpim_s      *fcpim = BFA_FCPIM(bfa);
3294         struct list_head        *qe;
3295         int     i;
3296
3297         for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
3298                 bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
3299                 list_add_tail(qe, &fcpim->tskim_unused_q);
3300         }
3301 }
3302
3303 /* BFA FCP module - parent module for fcpim */
3304
3305 BFA_MODULE(fcp);
3306
3307 static void
3308 bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
3309                 struct bfa_s *bfa)
3310 {
3311         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3312         struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
3313         struct bfa_mem_dma_s *seg_ptr;
3314         u16     nsegs, idx, per_seg_ios, num_io_req;
3315         u32     km_len = 0;
3316
3317         /*
3318          * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
3319          * So if the values are non zero, adjust them appropriately.
3320          */
3321         if (cfg->fwcfg.num_ioim_reqs &&
3322             cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
3323                 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
3324         else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
3325                 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3326
3327         if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
3328                 cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3329
3330         num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3331         if (num_io_req > BFA_IO_MAX) {
3332                 if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
3333                         cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
3334                         cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
3335                 } else if (cfg->fwcfg.num_fwtio_reqs)
3336                         cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3337                 else
3338                         cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3339         }
3340
3341         bfa_fcpim_meminfo(cfg, &km_len);
3342
3343         num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3344         km_len += num_io_req * sizeof(struct bfa_iotag_s);
3345         km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
3346
3347         /* dma memory */
3348         nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3349         per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
3350
3351         bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3352                 if (num_io_req >= per_seg_ios) {
3353                         num_io_req -= per_seg_ios;
3354                         bfa_mem_dma_setup(minfo, seg_ptr,
3355                                 per_seg_ios * BFI_IOIM_SNSLEN);
3356                 } else
3357                         bfa_mem_dma_setup(minfo, seg_ptr,
3358                                 num_io_req * BFI_IOIM_SNSLEN);
3359         }
3360
3361         /* kva memory */
3362         bfa_mem_kva_setup(minfo, fcp_kva, km_len);
3363 }
3364
3365 static void
3366 bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3367                 struct bfa_pcidev_s *pcidev)
3368 {
3369         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3370         struct bfa_mem_dma_s *seg_ptr;
3371         u16     idx, nsegs, num_io_req;
3372
3373         fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3374         fcp->num_fwtio_reqs  = cfg->fwcfg.num_fwtio_reqs;
3375         fcp->num_itns   = cfg->fwcfg.num_rports;
3376         fcp->bfa = bfa;
3377
3378         /*
3379          * Setup the pool of snsbase addr's, that is passed to fw as
3380          * part of bfi_iocfc_cfg_s.
3381          */
3382         num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3383         nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3384
3385         bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3386
3387                 if (!bfa_mem_dma_virt(seg_ptr))
3388                         break;
3389
3390                 fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
3391                 fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
3392                 bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
3393         }
3394
3395         bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
3396
3397         bfa_iotag_attach(fcp);
3398
3399         fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
3400         bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
3401                         (fcp->num_itns * sizeof(struct bfa_itn_s));
3402         memset(fcp->itn_arr, 0,
3403                         (fcp->num_itns * sizeof(struct bfa_itn_s)));
3404 }
3405
3406 static void
3407 bfa_fcp_detach(struct bfa_s *bfa)
3408 {
3409 }
3410
3411 static void
3412 bfa_fcp_start(struct bfa_s *bfa)
3413 {
3414 }
3415
3416 static void
3417 bfa_fcp_stop(struct bfa_s *bfa)
3418 {
3419 }
3420
3421 static void
3422 bfa_fcp_iocdisable(struct bfa_s *bfa)
3423 {
3424         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3425
3426         /* Enqueue unused ioim resources to free_q */
3427         list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q);
3428
3429         bfa_fcpim_iocdisable(fcp);
3430 }
3431
3432 void
3433 bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw)
3434 {
3435         struct bfa_fcp_mod_s    *mod = BFA_FCP_MOD(bfa);
3436         struct list_head        *qe;
3437         int     i;
3438
3439         for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
3440                 bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
3441                 list_add_tail(qe, &mod->iotag_unused_q);
3442         }
3443 }
3444
3445 void
3446 bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
3447                 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
3448 {
3449         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3450         struct bfa_itn_s *itn;
3451
3452         itn =  BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
3453         itn->isr = isr;
3454 }
3455
3456 /*
3457  * Itn interrupt processing.
3458  */
3459 void
3460 bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3461 {
3462         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3463         union bfi_itn_i2h_msg_u msg;
3464         struct bfa_itn_s *itn;
3465
3466         msg.msg = m;
3467         itn =  BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
3468
3469         if (itn->isr)
3470                 itn->isr(bfa, m);
3471         else
3472                 WARN_ON(1);
3473 }
3474
3475 void
3476 bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
3477 {
3478         struct bfa_iotag_s *iotag;
3479         u16     num_io_req, i;
3480
3481         iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
3482         fcp->iotag_arr = iotag;
3483
3484         INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
3485         INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
3486         INIT_LIST_HEAD(&fcp->iotag_unused_q);
3487
3488         num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
3489         for (i = 0; i < num_io_req; i++, iotag++) {
3490                 memset(iotag, 0, sizeof(struct bfa_iotag_s));
3491                 iotag->tag = i;
3492                 if (i < fcp->num_ioim_reqs)
3493                         list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
3494                 else
3495                         list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
3496         }
3497
3498         bfa_mem_kva_curp(fcp) = (u8 *) iotag;
3499 }