Merge branch 'for-3.2' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound...
[pandora-kernel.git] / drivers / scsi / qla2xxx / qla_bsg.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12
13 /* BSG support for ELS/CT pass through */
14 inline srb_t *
15 qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
16 {
17         srb_t *sp;
18         struct qla_hw_data *ha = vha->hw;
19         struct srb_ctx *ctx;
20
21         sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
22         if (!sp)
23                 goto done;
24         ctx = kzalloc(size, GFP_KERNEL);
25         if (!ctx) {
26                 mempool_free(sp, ha->srb_mempool);
27                 sp = NULL;
28                 goto done;
29         }
30
31         memset(sp, 0, sizeof(*sp));
32         sp->fcport = fcport;
33         sp->ctx = ctx;
34 done:
35         return sp;
36 }
37
38 int
39 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
40         struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
41 {
42         int i, ret, num_valid;
43         uint8_t *bcode;
44         struct qla_fcp_prio_entry *pri_entry;
45         uint32_t *bcode_val_ptr, bcode_val;
46
47         ret = 1;
48         num_valid = 0;
49         bcode = (uint8_t *)pri_cfg;
50         bcode_val_ptr = (uint32_t *)pri_cfg;
51         bcode_val = (uint32_t)(*bcode_val_ptr);
52
53         if (bcode_val == 0xFFFFFFFF) {
54                 /* No FCP Priority config data in flash */
55                 ql_dbg(ql_dbg_user, vha, 0x7051,
56                     "No FCP Priority config data.\n");
57                 return 0;
58         }
59
60         if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
61                         bcode[3] != 'S') {
62                 /* Invalid FCP priority data header*/
63                 ql_dbg(ql_dbg_user, vha, 0x7052,
64                     "Invalid FCP Priority data header. bcode=0x%x.\n",
65                     bcode_val);
66                 return 0;
67         }
68         if (flag != 1)
69                 return ret;
70
71         pri_entry = &pri_cfg->entry[0];
72         for (i = 0; i < pri_cfg->num_entries; i++) {
73                 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
74                         num_valid++;
75                 pri_entry++;
76         }
77
78         if (num_valid == 0) {
79                 /* No valid FCP priority data entries */
80                 ql_dbg(ql_dbg_user, vha, 0x7053,
81                     "No valid FCP Priority data entries.\n");
82                 ret = 0;
83         } else {
84                 /* FCP priority data is valid */
85                 ql_dbg(ql_dbg_user, vha, 0x7054,
86                     "Valid FCP priority data. num entries = %d.\n",
87                     num_valid);
88         }
89
90         return ret;
91 }
92
93 static int
94 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
95 {
96         struct Scsi_Host *host = bsg_job->shost;
97         scsi_qla_host_t *vha = shost_priv(host);
98         struct qla_hw_data *ha = vha->hw;
99         int ret = 0;
100         uint32_t len;
101         uint32_t oper;
102
103         bsg_job->reply->reply_payload_rcv_len = 0;
104
105         if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))) {
106                 ret = -EINVAL;
107                 goto exit_fcp_prio_cfg;
108         }
109
110         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
111                 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
112                 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
113                 ret = -EBUSY;
114                 goto exit_fcp_prio_cfg;
115         }
116
117         /* Get the sub command */
118         oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
119
120         /* Only set config is allowed if config memory is not allocated */
121         if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
122                 ret = -EINVAL;
123                 goto exit_fcp_prio_cfg;
124         }
125         switch (oper) {
126         case QLFC_FCP_PRIO_DISABLE:
127                 if (ha->flags.fcp_prio_enabled) {
128                         ha->flags.fcp_prio_enabled = 0;
129                         ha->fcp_prio_cfg->attributes &=
130                                 ~FCP_PRIO_ATTR_ENABLE;
131                         qla24xx_update_all_fcp_prio(vha);
132                         bsg_job->reply->result = DID_OK;
133                 } else {
134                         ret = -EINVAL;
135                         bsg_job->reply->result = (DID_ERROR << 16);
136                         goto exit_fcp_prio_cfg;
137                 }
138                 break;
139
140         case QLFC_FCP_PRIO_ENABLE:
141                 if (!ha->flags.fcp_prio_enabled) {
142                         if (ha->fcp_prio_cfg) {
143                                 ha->flags.fcp_prio_enabled = 1;
144                                 ha->fcp_prio_cfg->attributes |=
145                                     FCP_PRIO_ATTR_ENABLE;
146                                 qla24xx_update_all_fcp_prio(vha);
147                                 bsg_job->reply->result = DID_OK;
148                         } else {
149                                 ret = -EINVAL;
150                                 bsg_job->reply->result = (DID_ERROR << 16);
151                                 goto exit_fcp_prio_cfg;
152                         }
153                 }
154                 break;
155
156         case QLFC_FCP_PRIO_GET_CONFIG:
157                 len = bsg_job->reply_payload.payload_len;
158                 if (!len || len > FCP_PRIO_CFG_SIZE) {
159                         ret = -EINVAL;
160                         bsg_job->reply->result = (DID_ERROR << 16);
161                         goto exit_fcp_prio_cfg;
162                 }
163
164                 bsg_job->reply->result = DID_OK;
165                 bsg_job->reply->reply_payload_rcv_len =
166                         sg_copy_from_buffer(
167                         bsg_job->reply_payload.sg_list,
168                         bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
169                         len);
170
171                 break;
172
173         case QLFC_FCP_PRIO_SET_CONFIG:
174                 len = bsg_job->request_payload.payload_len;
175                 if (!len || len > FCP_PRIO_CFG_SIZE) {
176                         bsg_job->reply->result = (DID_ERROR << 16);
177                         ret = -EINVAL;
178                         goto exit_fcp_prio_cfg;
179                 }
180
181                 if (!ha->fcp_prio_cfg) {
182                         ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
183                         if (!ha->fcp_prio_cfg) {
184                                 ql_log(ql_log_warn, vha, 0x7050,
185                                     "Unable to allocate memory for fcp prio "
186                                     "config data (%x).\n", FCP_PRIO_CFG_SIZE);
187                                 bsg_job->reply->result = (DID_ERROR << 16);
188                                 ret = -ENOMEM;
189                                 goto exit_fcp_prio_cfg;
190                         }
191                 }
192
193                 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
194                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
195                 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
196                         FCP_PRIO_CFG_SIZE);
197
198                 /* validate fcp priority data */
199
200                 if (!qla24xx_fcp_prio_cfg_valid(vha,
201                     (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
202                         bsg_job->reply->result = (DID_ERROR << 16);
203                         ret = -EINVAL;
204                         /* If buffer was invalidatic int
205                          * fcp_prio_cfg is of no use
206                          */
207                         vfree(ha->fcp_prio_cfg);
208                         ha->fcp_prio_cfg = NULL;
209                         goto exit_fcp_prio_cfg;
210                 }
211
212                 ha->flags.fcp_prio_enabled = 0;
213                 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
214                         ha->flags.fcp_prio_enabled = 1;
215                 qla24xx_update_all_fcp_prio(vha);
216                 bsg_job->reply->result = DID_OK;
217                 break;
218         default:
219                 ret = -EINVAL;
220                 break;
221         }
222 exit_fcp_prio_cfg:
223         bsg_job->job_done(bsg_job);
224         return ret;
225 }
226 static int
227 qla2x00_process_els(struct fc_bsg_job *bsg_job)
228 {
229         struct fc_rport *rport;
230         fc_port_t *fcport = NULL;
231         struct Scsi_Host *host;
232         scsi_qla_host_t *vha;
233         struct qla_hw_data *ha;
234         srb_t *sp;
235         const char *type;
236         int req_sg_cnt, rsp_sg_cnt;
237         int rval =  (DRIVER_ERROR << 16);
238         uint16_t nextlid = 0;
239         struct srb_ctx *els;
240
241         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
242                 rport = bsg_job->rport;
243                 fcport = *(fc_port_t **) rport->dd_data;
244                 host = rport_to_shost(rport);
245                 vha = shost_priv(host);
246                 ha = vha->hw;
247                 type = "FC_BSG_RPT_ELS";
248         } else {
249                 host = bsg_job->shost;
250                 vha = shost_priv(host);
251                 ha = vha->hw;
252                 type = "FC_BSG_HST_ELS_NOLOGIN";
253         }
254
255         /* pass through is supported only for ISP 4Gb or higher */
256         if (!IS_FWI2_CAPABLE(ha)) {
257                 ql_dbg(ql_dbg_user, vha, 0x7001,
258                     "ELS passthru not supported for ISP23xx based adapters.\n");
259                 rval = -EPERM;
260                 goto done;
261         }
262
263         /*  Multiple SG's are not supported for ELS requests */
264         if (bsg_job->request_payload.sg_cnt > 1 ||
265                 bsg_job->reply_payload.sg_cnt > 1) {
266                 ql_dbg(ql_dbg_user, vha, 0x7002,
267                     "Multiple SG's are not suppored for ELS requests, "
268                     "request_sg_cnt=%x reply_sg_cnt=%x.\n",
269                     bsg_job->request_payload.sg_cnt,
270                     bsg_job->reply_payload.sg_cnt);
271                 rval = -EPERM;
272                 goto done;
273         }
274
275         /* ELS request for rport */
276         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
277                 /* make sure the rport is logged in,
278                  * if not perform fabric login
279                  */
280                 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
281                         ql_dbg(ql_dbg_user, vha, 0x7003,
282                             "Failed to login port %06X for ELS passthru.\n",
283                             fcport->d_id.b24);
284                         rval = -EIO;
285                         goto done;
286                 }
287         } else {
288                 /* Allocate a dummy fcport structure, since functions
289                  * preparing the IOCB and mailbox command retrieves port
290                  * specific information from fcport structure. For Host based
291                  * ELS commands there will be no fcport structure allocated
292                  */
293                 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
294                 if (!fcport) {
295                         rval = -ENOMEM;
296                         goto done;
297                 }
298
299                 /* Initialize all required  fields of fcport */
300                 fcport->vha = vha;
301                 fcport->vp_idx = vha->vp_idx;
302                 fcport->d_id.b.al_pa =
303                         bsg_job->request->rqst_data.h_els.port_id[0];
304                 fcport->d_id.b.area =
305                         bsg_job->request->rqst_data.h_els.port_id[1];
306                 fcport->d_id.b.domain =
307                         bsg_job->request->rqst_data.h_els.port_id[2];
308                 fcport->loop_id =
309                         (fcport->d_id.b.al_pa == 0xFD) ?
310                         NPH_FABRIC_CONTROLLER : NPH_F_PORT;
311         }
312
313         if (!vha->flags.online) {
314                 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
315                 rval = -EIO;
316                 goto done;
317         }
318
319         req_sg_cnt =
320                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
321                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
322         if (!req_sg_cnt) {
323                 rval = -ENOMEM;
324                 goto done_free_fcport;
325         }
326
327         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
328                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
329         if (!rsp_sg_cnt) {
330                 rval = -ENOMEM;
331                 goto done_free_fcport;
332         }
333
334         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
335                 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
336                 ql_log(ql_log_warn, vha, 0x7008,
337                     "dma mapping resulted in different sg counts, "
338                     "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
339                     "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
340                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
341                 rval = -EAGAIN;
342                 goto done_unmap_sg;
343         }
344
345         /* Alloc SRB structure */
346         sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
347         if (!sp) {
348                 rval = -ENOMEM;
349                 goto done_unmap_sg;
350         }
351
352         els = sp->ctx;
353         els->type =
354                 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
355                 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
356         els->name =
357                 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
358                 "bsg_els_rpt" : "bsg_els_hst");
359         els->u.bsg_job = bsg_job;
360
361         ql_dbg(ql_dbg_user, vha, 0x700a,
362             "bsg rqst type: %s els type: %x - loop-id=%x "
363             "portid=%-2x%02x%02x.\n", type,
364             bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
365             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
366
367         rval = qla2x00_start_sp(sp);
368         if (rval != QLA_SUCCESS) {
369                 ql_log(ql_log_warn, vha, 0x700e,
370                     "qla2x00_start_sp failed = %d\n", rval);
371                 kfree(sp->ctx);
372                 mempool_free(sp, ha->srb_mempool);
373                 rval = -EIO;
374                 goto done_unmap_sg;
375         }
376         return rval;
377
378 done_unmap_sg:
379         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
380                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
381         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
382                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
383         goto done_free_fcport;
384
385 done_free_fcport:
386         if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
387                 kfree(fcport);
388 done:
389         return rval;
390 }
391
392 static int
393 qla2x00_process_ct(struct fc_bsg_job *bsg_job)
394 {
395         srb_t *sp;
396         struct Scsi_Host *host = bsg_job->shost;
397         scsi_qla_host_t *vha = shost_priv(host);
398         struct qla_hw_data *ha = vha->hw;
399         int rval = (DRIVER_ERROR << 16);
400         int req_sg_cnt, rsp_sg_cnt;
401         uint16_t loop_id;
402         struct fc_port *fcport;
403         char  *type = "FC_BSG_HST_CT";
404         struct srb_ctx *ct;
405
406         req_sg_cnt =
407                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
408                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
409         if (!req_sg_cnt) {
410                 ql_log(ql_log_warn, vha, 0x700f,
411                     "dma_map_sg return %d for request\n", req_sg_cnt);
412                 rval = -ENOMEM;
413                 goto done;
414         }
415
416         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
417                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
418         if (!rsp_sg_cnt) {
419                 ql_log(ql_log_warn, vha, 0x7010,
420                     "dma_map_sg return %d for reply\n", rsp_sg_cnt);
421                 rval = -ENOMEM;
422                 goto done;
423         }
424
425         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
426             (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
427                 ql_log(ql_log_warn, vha, 0x7011,
428                     "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
429                     "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
430                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
431                 rval = -EAGAIN;
432                 goto done_unmap_sg;
433         }
434
435         if (!vha->flags.online) {
436                 ql_log(ql_log_warn, vha, 0x7012,
437                     "Host is not online.\n");
438                 rval = -EIO;
439                 goto done_unmap_sg;
440         }
441
442         loop_id =
443                 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
444                         >> 24;
445         switch (loop_id) {
446         case 0xFC:
447                 loop_id = cpu_to_le16(NPH_SNS);
448                 break;
449         case 0xFA:
450                 loop_id = vha->mgmt_svr_loop_id;
451                 break;
452         default:
453                 ql_dbg(ql_dbg_user, vha, 0x7013,
454                     "Unknown loop id: %x.\n", loop_id);
455                 rval = -EINVAL;
456                 goto done_unmap_sg;
457         }
458
459         /* Allocate a dummy fcport structure, since functions preparing the
460          * IOCB and mailbox command retrieves port specific information
461          * from fcport structure. For Host based ELS commands there will be
462          * no fcport structure allocated
463          */
464         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
465         if (!fcport) {
466                 ql_log(ql_log_warn, vha, 0x7014,
467                     "Failed to allocate fcport.\n");
468                 rval = -ENOMEM;
469                 goto done_unmap_sg;
470         }
471
472         /* Initialize all required  fields of fcport */
473         fcport->vha = vha;
474         fcport->vp_idx = vha->vp_idx;
475         fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
476         fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
477         fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
478         fcport->loop_id = loop_id;
479
480         /* Alloc SRB structure */
481         sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
482         if (!sp) {
483                 ql_log(ql_log_warn, vha, 0x7015,
484                     "qla2x00_get_ctx_bsg_sp failed.\n");
485                 rval = -ENOMEM;
486                 goto done_free_fcport;
487         }
488
489         ct = sp->ctx;
490         ct->type = SRB_CT_CMD;
491         ct->name = "bsg_ct";
492         ct->u.bsg_job = bsg_job;
493
494         ql_dbg(ql_dbg_user, vha, 0x7016,
495             "bsg rqst type: %s else type: %x - "
496             "loop-id=%x portid=%02x%02x%02x.\n", type,
497             (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
498             fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
499             fcport->d_id.b.al_pa);
500
501         rval = qla2x00_start_sp(sp);
502         if (rval != QLA_SUCCESS) {
503                 ql_log(ql_log_warn, vha, 0x7017,
504                     "qla2x00_start_sp failed=%d.\n", rval);
505                 kfree(sp->ctx);
506                 mempool_free(sp, ha->srb_mempool);
507                 rval = -EIO;
508                 goto done_free_fcport;
509         }
510         return rval;
511
512 done_free_fcport:
513         kfree(fcport);
514 done_unmap_sg:
515         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
516                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
517         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
518                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
519 done:
520         return rval;
521 }
522
523 /* Set the port configuration to enable the
524  * internal loopback on ISP81XX
525  */
526 static inline int
527 qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
528     uint16_t *new_config)
529 {
530         int ret = 0;
531         int rval = 0;
532         struct qla_hw_data *ha = vha->hw;
533
534         if (!IS_QLA81XX(ha))
535                 goto done_set_internal;
536
537         new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
538         memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
539
540         ha->notify_dcbx_comp = 1;
541         ret = qla81xx_set_port_config(vha, new_config);
542         if (ret != QLA_SUCCESS) {
543                 ql_log(ql_log_warn, vha, 0x7021,
544                     "set port config failed.\n");
545                 ha->notify_dcbx_comp = 0;
546                 rval = -EINVAL;
547                 goto done_set_internal;
548         }
549
550         /* Wait for DCBX complete event */
551         if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
552                 ql_dbg(ql_dbg_user, vha, 0x7022,
553                     "State change notification not received.\n");
554         } else
555                 ql_dbg(ql_dbg_user, vha, 0x7023,
556                     "State change received.\n");
557
558         ha->notify_dcbx_comp = 0;
559
560 done_set_internal:
561         return rval;
562 }
563
564 /* Set the port configuration to disable the
565  * internal loopback on ISP81XX
566  */
567 static inline int
568 qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
569     int wait)
570 {
571         int ret = 0;
572         int rval = 0;
573         uint16_t new_config[4];
574         struct qla_hw_data *ha = vha->hw;
575
576         if (!IS_QLA81XX(ha))
577                 goto done_reset_internal;
578
579         memset(new_config, 0 , sizeof(new_config));
580         if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
581                         ENABLE_INTERNAL_LOOPBACK) {
582                 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
583                 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
584
585                 ha->notify_dcbx_comp = wait;
586                 ret = qla81xx_set_port_config(vha, new_config);
587                 if (ret != QLA_SUCCESS) {
588                         ql_log(ql_log_warn, vha, 0x7025,
589                             "Set port config failed.\n");
590                         ha->notify_dcbx_comp = 0;
591                         rval = -EINVAL;
592                         goto done_reset_internal;
593                 }
594
595                 /* Wait for DCBX complete event */
596                 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
597                         (20 * HZ))) {
598                         ql_dbg(ql_dbg_user, vha, 0x7026,
599                             "State change notification not received.\n");
600                         ha->notify_dcbx_comp = 0;
601                         rval = -EINVAL;
602                         goto done_reset_internal;
603                 } else
604                         ql_dbg(ql_dbg_user, vha, 0x7027,
605                             "State change received.\n");
606
607                 ha->notify_dcbx_comp = 0;
608         }
609 done_reset_internal:
610         return rval;
611 }
612
613 static int
614 qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
615 {
616         struct Scsi_Host *host = bsg_job->shost;
617         scsi_qla_host_t *vha = shost_priv(host);
618         struct qla_hw_data *ha = vha->hw;
619         int rval;
620         uint8_t command_sent;
621         char *type;
622         struct msg_echo_lb elreq;
623         uint16_t response[MAILBOX_REGISTER_COUNT];
624         uint16_t config[4], new_config[4];
625         uint8_t *fw_sts_ptr;
626         uint8_t *req_data = NULL;
627         dma_addr_t req_data_dma;
628         uint32_t req_data_len;
629         uint8_t *rsp_data = NULL;
630         dma_addr_t rsp_data_dma;
631         uint32_t rsp_data_len;
632
633         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
634                 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
635                 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
636                 ql_log(ql_log_warn, vha, 0x7018, "Abort active or needed.\n");
637                 return -EBUSY;
638         }
639
640         if (!vha->flags.online) {
641                 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
642                 return -EIO;
643         }
644
645         elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
646                 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
647                 DMA_TO_DEVICE);
648
649         if (!elreq.req_sg_cnt) {
650                 ql_log(ql_log_warn, vha, 0x701a,
651                     "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
652                 return -ENOMEM;
653         }
654
655         elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
656                 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
657                 DMA_FROM_DEVICE);
658
659         if (!elreq.rsp_sg_cnt) {
660                 ql_log(ql_log_warn, vha, 0x701b,
661                     "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
662                 rval = -ENOMEM;
663                 goto done_unmap_req_sg;
664         }
665
666         if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
667                 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
668                 ql_log(ql_log_warn, vha, 0x701c,
669                     "dma mapping resulted in different sg counts, "
670                     "request_sg_cnt: %x dma_request_sg_cnt: %x "
671                     "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
672                     bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
673                     bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
674                 rval = -EAGAIN;
675                 goto done_unmap_sg;
676         }
677         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
678         req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
679                 &req_data_dma, GFP_KERNEL);
680         if (!req_data) {
681                 ql_log(ql_log_warn, vha, 0x701d,
682                     "dma alloc failed for req_data.\n");
683                 rval = -ENOMEM;
684                 goto done_unmap_sg;
685         }
686
687         rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
688                 &rsp_data_dma, GFP_KERNEL);
689         if (!rsp_data) {
690                 ql_log(ql_log_warn, vha, 0x7004,
691                     "dma alloc failed for rsp_data.\n");
692                 rval = -ENOMEM;
693                 goto done_free_dma_req;
694         }
695
696         /* Copy the request buffer in req_data now */
697         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
698                 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
699
700         elreq.send_dma = req_data_dma;
701         elreq.rcv_dma = rsp_data_dma;
702         elreq.transfer_size = req_data_len;
703
704         elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
705
706         if ((ha->current_topology == ISP_CFG_F ||
707             (atomic_read(&vha->loop_state) == LOOP_DOWN) ||
708             (IS_QLA81XX(ha) &&
709             le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
710             && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
711                 elreq.options == EXTERNAL_LOOPBACK) {
712                 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
713                 ql_dbg(ql_dbg_user, vha, 0x701e,
714                     "BSG request type: %s.\n", type);
715                 command_sent = INT_DEF_LB_ECHO_CMD;
716                 rval = qla2x00_echo_test(vha, &elreq, response);
717         } else {
718                 if (IS_QLA81XX(ha)) {
719                         memset(config, 0, sizeof(config));
720                         memset(new_config, 0, sizeof(new_config));
721                         if (qla81xx_get_port_config(vha, config)) {
722                                 ql_log(ql_log_warn, vha, 0x701f,
723                                     "Get port config failed.\n");
724                                 bsg_job->reply->reply_payload_rcv_len = 0;
725                                 bsg_job->reply->result = (DID_ERROR << 16);
726                                 rval = -EPERM;
727                                 goto done_free_dma_req;
728                         }
729
730                         if (elreq.options != EXTERNAL_LOOPBACK) {
731                                 ql_dbg(ql_dbg_user, vha, 0x7020,
732                                     "Internal: curent port config = %x\n",
733                                     config[0]);
734                                 if (qla81xx_set_internal_loopback(vha, config,
735                                         new_config)) {
736                                         ql_log(ql_log_warn, vha, 0x7024,
737                                             "Internal loopback failed.\n");
738                                         bsg_job->reply->reply_payload_rcv_len =
739                                                 0;
740                                         bsg_job->reply->result =
741                                                 (DID_ERROR << 16);
742                                         rval = -EPERM;
743                                         goto done_free_dma_req;
744                                 }
745                         } else {
746                                 /* For external loopback to work
747                                  * ensure internal loopback is disabled
748                                  */
749                                 if (qla81xx_reset_internal_loopback(vha,
750                                         config, 1)) {
751                                         bsg_job->reply->reply_payload_rcv_len =
752                                                 0;
753                                         bsg_job->reply->result =
754                                                 (DID_ERROR << 16);
755                                         rval = -EPERM;
756                                         goto done_free_dma_req;
757                                 }
758                         }
759
760                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
761                         ql_dbg(ql_dbg_user, vha, 0x7028,
762                             "BSG request type: %s.\n", type);
763
764                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
765                         rval = qla2x00_loopback_test(vha, &elreq, response);
766
767                         if (new_config[0]) {
768                                 /* Revert back to original port config
769                                  * Also clear internal loopback
770                                  */
771                                 qla81xx_reset_internal_loopback(vha,
772                                     new_config, 0);
773                         }
774
775                         if (response[0] == MBS_COMMAND_ERROR &&
776                                         response[1] == MBS_LB_RESET) {
777                                 ql_log(ql_log_warn, vha, 0x7029,
778                                     "MBX command error, Aborting ISP.\n");
779                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
780                                 qla2xxx_wake_dpc(vha);
781                                 qla2x00_wait_for_chip_reset(vha);
782                                 /* Also reset the MPI */
783                                 if (qla81xx_restart_mpi_firmware(vha) !=
784                                     QLA_SUCCESS) {
785                                         ql_log(ql_log_warn, vha, 0x702a,
786                                             "MPI reset failed.\n");
787                                 }
788
789                                 bsg_job->reply->reply_payload_rcv_len = 0;
790                                 bsg_job->reply->result = (DID_ERROR << 16);
791                                 rval = -EIO;
792                                 goto done_free_dma_req;
793                         }
794                 } else {
795                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
796                         ql_dbg(ql_dbg_user, vha, 0x702b,
797                             "BSG request type: %s.\n", type);
798                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
799                         rval = qla2x00_loopback_test(vha, &elreq, response);
800                 }
801         }
802
803         if (rval) {
804                 ql_log(ql_log_warn, vha, 0x702c,
805                     "Vendor request %s failed.\n", type);
806
807                 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
808                     sizeof(struct fc_bsg_reply);
809
810                 memcpy(fw_sts_ptr, response, sizeof(response));
811                 fw_sts_ptr += sizeof(response);
812                 *fw_sts_ptr = command_sent;
813                 rval = 0;
814                 bsg_job->reply->reply_payload_rcv_len = 0;
815                 bsg_job->reply->result = (DID_ERROR << 16);
816         } else {
817                 ql_dbg(ql_dbg_user, vha, 0x702d,
818                     "Vendor request %s completed.\n", type);
819
820                 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
821                         sizeof(response) + sizeof(uint8_t);
822                 bsg_job->reply->reply_payload_rcv_len =
823                         bsg_job->reply_payload.payload_len;
824                 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
825                         sizeof(struct fc_bsg_reply);
826                 memcpy(fw_sts_ptr, response, sizeof(response));
827                 fw_sts_ptr += sizeof(response);
828                 *fw_sts_ptr = command_sent;
829                 bsg_job->reply->result = DID_OK;
830                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
831                         bsg_job->reply_payload.sg_cnt, rsp_data,
832                         rsp_data_len);
833         }
834         bsg_job->job_done(bsg_job);
835
836         dma_free_coherent(&ha->pdev->dev, rsp_data_len,
837                 rsp_data, rsp_data_dma);
838 done_free_dma_req:
839         dma_free_coherent(&ha->pdev->dev, req_data_len,
840                 req_data, req_data_dma);
841 done_unmap_sg:
842         dma_unmap_sg(&ha->pdev->dev,
843             bsg_job->reply_payload.sg_list,
844             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
845 done_unmap_req_sg:
846         dma_unmap_sg(&ha->pdev->dev,
847             bsg_job->request_payload.sg_list,
848             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
849         return rval;
850 }
851
852 static int
853 qla84xx_reset(struct fc_bsg_job *bsg_job)
854 {
855         struct Scsi_Host *host = bsg_job->shost;
856         scsi_qla_host_t *vha = shost_priv(host);
857         struct qla_hw_data *ha = vha->hw;
858         int rval = 0;
859         uint32_t flag;
860
861         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
862             test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
863             test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
864                 ql_log(ql_log_warn, vha, 0x702e, "Abort active or needed.\n");
865                 return -EBUSY;
866         }
867
868         if (!IS_QLA84XX(ha)) {
869                 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
870                 return -EINVAL;
871         }
872
873         flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
874
875         rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
876
877         if (rval) {
878                 ql_log(ql_log_warn, vha, 0x7030,
879                     "Vendor request 84xx reset failed.\n");
880                 rval = bsg_job->reply->reply_payload_rcv_len = 0;
881                 bsg_job->reply->result = (DID_ERROR << 16);
882
883         } else {
884                 ql_dbg(ql_dbg_user, vha, 0x7031,
885                     "Vendor request 84xx reset completed.\n");
886                 bsg_job->reply->result = DID_OK;
887         }
888
889         bsg_job->job_done(bsg_job);
890         return rval;
891 }
892
893 static int
894 qla84xx_updatefw(struct fc_bsg_job *bsg_job)
895 {
896         struct Scsi_Host *host = bsg_job->shost;
897         scsi_qla_host_t *vha = shost_priv(host);
898         struct qla_hw_data *ha = vha->hw;
899         struct verify_chip_entry_84xx *mn = NULL;
900         dma_addr_t mn_dma, fw_dma;
901         void *fw_buf = NULL;
902         int rval = 0;
903         uint32_t sg_cnt;
904         uint32_t data_len;
905         uint16_t options;
906         uint32_t flag;
907         uint32_t fw_ver;
908
909         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
910                 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
911                 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
912                 return -EBUSY;
913
914         if (!IS_QLA84XX(ha)) {
915                 ql_dbg(ql_dbg_user, vha, 0x7032,
916                     "Not 84xx, exiting.\n");
917                 return -EINVAL;
918         }
919
920         sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
921                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
922         if (!sg_cnt) {
923                 ql_log(ql_log_warn, vha, 0x7033,
924                     "dma_map_sg returned %d for request.\n", sg_cnt);
925                 return -ENOMEM;
926         }
927
928         if (sg_cnt != bsg_job->request_payload.sg_cnt) {
929                 ql_log(ql_log_warn, vha, 0x7034,
930                     "DMA mapping resulted in different sg counts, "
931                     "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
932                     bsg_job->request_payload.sg_cnt, sg_cnt);
933                 rval = -EAGAIN;
934                 goto done_unmap_sg;
935         }
936
937         data_len = bsg_job->request_payload.payload_len;
938         fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
939                 &fw_dma, GFP_KERNEL);
940         if (!fw_buf) {
941                 ql_log(ql_log_warn, vha, 0x7035,
942                     "DMA alloc failed for fw_buf.\n");
943                 rval = -ENOMEM;
944                 goto done_unmap_sg;
945         }
946
947         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
948                 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
949
950         mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
951         if (!mn) {
952                 ql_log(ql_log_warn, vha, 0x7036,
953                     "DMA alloc failed for fw buffer.\n");
954                 rval = -ENOMEM;
955                 goto done_free_fw_buf;
956         }
957
958         flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
959         fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
960
961         memset(mn, 0, sizeof(struct access_chip_84xx));
962         mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
963         mn->entry_count = 1;
964
965         options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
966         if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
967                 options |= VCO_DIAG_FW;
968
969         mn->options = cpu_to_le16(options);
970         mn->fw_ver =  cpu_to_le32(fw_ver);
971         mn->fw_size =  cpu_to_le32(data_len);
972         mn->fw_seq_size =  cpu_to_le32(data_len);
973         mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
974         mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
975         mn->dseg_length = cpu_to_le32(data_len);
976         mn->data_seg_cnt = cpu_to_le16(1);
977
978         rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
979
980         if (rval) {
981                 ql_log(ql_log_warn, vha, 0x7037,
982                     "Vendor request 84xx updatefw failed.\n");
983
984                 rval = bsg_job->reply->reply_payload_rcv_len = 0;
985                 bsg_job->reply->result = (DID_ERROR << 16);
986
987         } else {
988                 ql_dbg(ql_dbg_user, vha, 0x7038,
989                     "Vendor request 84xx updatefw completed.\n");
990
991                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
992                 bsg_job->reply->result = DID_OK;
993         }
994
995         bsg_job->job_done(bsg_job);
996         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
997
998 done_free_fw_buf:
999         dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1000
1001 done_unmap_sg:
1002         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1003                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1004
1005         return rval;
1006 }
1007
1008 static int
1009 qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1010 {
1011         struct Scsi_Host *host = bsg_job->shost;
1012         scsi_qla_host_t *vha = shost_priv(host);
1013         struct qla_hw_data *ha = vha->hw;
1014         struct access_chip_84xx *mn = NULL;
1015         dma_addr_t mn_dma, mgmt_dma;
1016         void *mgmt_b = NULL;
1017         int rval = 0;
1018         struct qla_bsg_a84_mgmt *ql84_mgmt;
1019         uint32_t sg_cnt;
1020         uint32_t data_len = 0;
1021         uint32_t dma_direction = DMA_NONE;
1022
1023         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1024                 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1025                 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1026                 ql_log(ql_log_warn, vha, 0x7039,
1027                     "Abort active or needed.\n");
1028                 return -EBUSY;
1029         }
1030
1031         if (!IS_QLA84XX(ha)) {
1032                 ql_log(ql_log_warn, vha, 0x703a,
1033                     "Not 84xx, exiting.\n");
1034                 return -EINVAL;
1035         }
1036
1037         ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
1038                 sizeof(struct fc_bsg_request));
1039         if (!ql84_mgmt) {
1040                 ql_log(ql_log_warn, vha, 0x703b,
1041                     "MGMT header not provided, exiting.\n");
1042                 return -EINVAL;
1043         }
1044
1045         mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1046         if (!mn) {
1047                 ql_log(ql_log_warn, vha, 0x703c,
1048                     "DMA alloc failed for fw buffer.\n");
1049                 return -ENOMEM;
1050         }
1051
1052         memset(mn, 0, sizeof(struct access_chip_84xx));
1053         mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1054         mn->entry_count = 1;
1055
1056         switch (ql84_mgmt->mgmt.cmd) {
1057         case QLA84_MGMT_READ_MEM:
1058         case QLA84_MGMT_GET_INFO:
1059                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1060                         bsg_job->reply_payload.sg_list,
1061                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1062                 if (!sg_cnt) {
1063                         ql_log(ql_log_warn, vha, 0x703d,
1064                             "dma_map_sg returned %d for reply.\n", sg_cnt);
1065                         rval = -ENOMEM;
1066                         goto exit_mgmt;
1067                 }
1068
1069                 dma_direction = DMA_FROM_DEVICE;
1070
1071                 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1072                         ql_log(ql_log_warn, vha, 0x703e,
1073                             "DMA mapping resulted in different sg counts, "
1074                             "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1075                             bsg_job->reply_payload.sg_cnt, sg_cnt);
1076                         rval = -EAGAIN;
1077                         goto done_unmap_sg;
1078                 }
1079
1080                 data_len = bsg_job->reply_payload.payload_len;
1081
1082                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1083                     &mgmt_dma, GFP_KERNEL);
1084                 if (!mgmt_b) {
1085                         ql_log(ql_log_warn, vha, 0x703f,
1086                             "DMA alloc failed for mgmt_b.\n");
1087                         rval = -ENOMEM;
1088                         goto done_unmap_sg;
1089                 }
1090
1091                 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1092                         mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1093                         mn->parameter1 =
1094                                 cpu_to_le32(
1095                                 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1096
1097                 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1098                         mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1099                         mn->parameter1 =
1100                                 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1101
1102                         mn->parameter2 =
1103                                 cpu_to_le32(
1104                                 ql84_mgmt->mgmt.mgmtp.u.info.context);
1105                 }
1106                 break;
1107
1108         case QLA84_MGMT_WRITE_MEM:
1109                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1110                         bsg_job->request_payload.sg_list,
1111                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1112
1113                 if (!sg_cnt) {
1114                         ql_log(ql_log_warn, vha, 0x7040,
1115                             "dma_map_sg returned %d.\n", sg_cnt);
1116                         rval = -ENOMEM;
1117                         goto exit_mgmt;
1118                 }
1119
1120                 dma_direction = DMA_TO_DEVICE;
1121
1122                 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1123                         ql_log(ql_log_warn, vha, 0x7041,
1124                             "DMA mapping resulted in different sg counts, "
1125                             "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1126                             bsg_job->request_payload.sg_cnt, sg_cnt);
1127                         rval = -EAGAIN;
1128                         goto done_unmap_sg;
1129                 }
1130
1131                 data_len = bsg_job->request_payload.payload_len;
1132                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1133                         &mgmt_dma, GFP_KERNEL);
1134                 if (!mgmt_b) {
1135                         ql_log(ql_log_warn, vha, 0x7042,
1136                             "DMA alloc failed for mgmt_b.\n");
1137                         rval = -ENOMEM;
1138                         goto done_unmap_sg;
1139                 }
1140
1141                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1142                         bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1143
1144                 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1145                 mn->parameter1 =
1146                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1147                 break;
1148
1149         case QLA84_MGMT_CHNG_CONFIG:
1150                 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1151                 mn->parameter1 =
1152                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1153
1154                 mn->parameter2 =
1155                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1156
1157                 mn->parameter3 =
1158                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1159                 break;
1160
1161         default:
1162                 rval = -EIO;
1163                 goto exit_mgmt;
1164         }
1165
1166         if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1167                 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1168                 mn->dseg_count = cpu_to_le16(1);
1169                 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1170                 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1171                 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1172         }
1173
1174         rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1175
1176         if (rval) {
1177                 ql_log(ql_log_warn, vha, 0x7043,
1178                     "Vendor request 84xx mgmt failed.\n");
1179
1180                 rval = bsg_job->reply->reply_payload_rcv_len = 0;
1181                 bsg_job->reply->result = (DID_ERROR << 16);
1182
1183         } else {
1184                 ql_dbg(ql_dbg_user, vha, 0x7044,
1185                     "Vendor request 84xx mgmt completed.\n");
1186
1187                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1188                 bsg_job->reply->result = DID_OK;
1189
1190                 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1191                         (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1192                         bsg_job->reply->reply_payload_rcv_len =
1193                                 bsg_job->reply_payload.payload_len;
1194
1195                         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1196                                 bsg_job->reply_payload.sg_cnt, mgmt_b,
1197                                 data_len);
1198                 }
1199         }
1200
1201         bsg_job->job_done(bsg_job);
1202
1203 done_unmap_sg:
1204         if (mgmt_b)
1205                 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1206
1207         if (dma_direction == DMA_TO_DEVICE)
1208                 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1209                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1210         else if (dma_direction == DMA_FROM_DEVICE)
1211                 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1212                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1213
1214 exit_mgmt:
1215         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1216
1217         return rval;
1218 }
1219
1220 static int
1221 qla24xx_iidma(struct fc_bsg_job *bsg_job)
1222 {
1223         struct Scsi_Host *host = bsg_job->shost;
1224         scsi_qla_host_t *vha = shost_priv(host);
1225         int rval = 0;
1226         struct qla_port_param *port_param = NULL;
1227         fc_port_t *fcport = NULL;
1228         uint16_t mb[MAILBOX_REGISTER_COUNT];
1229         uint8_t *rsp_ptr = NULL;
1230
1231         bsg_job->reply->reply_payload_rcv_len = 0;
1232
1233         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1234                 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1235                 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1236                 ql_log(ql_log_warn, vha, 0x7045, "abort active or needed.\n");
1237                 return -EBUSY;
1238         }
1239
1240         if (!IS_IIDMA_CAPABLE(vha->hw)) {
1241                 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1242                 return -EINVAL;
1243         }
1244
1245         port_param = (struct qla_port_param *)((char *)bsg_job->request +
1246                 sizeof(struct fc_bsg_request));
1247         if (!port_param) {
1248                 ql_log(ql_log_warn, vha, 0x7047,
1249                     "port_param header not provided.\n");
1250                 return -EINVAL;
1251         }
1252
1253         if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1254                 ql_log(ql_log_warn, vha, 0x7048,
1255                     "Invalid destination type.\n");
1256                 return -EINVAL;
1257         }
1258
1259         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1260                 if (fcport->port_type != FCT_TARGET)
1261                         continue;
1262
1263                 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1264                         fcport->port_name, sizeof(fcport->port_name)))
1265                         continue;
1266                 break;
1267         }
1268
1269         if (!fcport) {
1270                 ql_log(ql_log_warn, vha, 0x7049,
1271                     "Failed to find port.\n");
1272                 return -EINVAL;
1273         }
1274
1275         if (atomic_read(&fcport->state) != FCS_ONLINE) {
1276                 ql_log(ql_log_warn, vha, 0x704a,
1277                     "Port is not online.\n");
1278                 return -EINVAL;
1279         }
1280
1281         if (fcport->flags & FCF_LOGIN_NEEDED) {
1282                 ql_log(ql_log_warn, vha, 0x704b,
1283                     "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1284                 return -EINVAL;
1285         }
1286
1287         if (port_param->mode)
1288                 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1289                         port_param->speed, mb);
1290         else
1291                 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1292                         &port_param->speed, mb);
1293
1294         if (rval) {
1295                 ql_log(ql_log_warn, vha, 0x704c,
1296                     "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
1297                     "%04x %x %04x %04x.\n", fcport->port_name[0],
1298                     fcport->port_name[1], fcport->port_name[2],
1299                     fcport->port_name[3], fcport->port_name[4],
1300                     fcport->port_name[5], fcport->port_name[6],
1301                     fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
1302                 rval = 0;
1303                 bsg_job->reply->result = (DID_ERROR << 16);
1304
1305         } else {
1306                 if (!port_param->mode) {
1307                         bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1308                                 sizeof(struct qla_port_param);
1309
1310                         rsp_ptr = ((uint8_t *)bsg_job->reply) +
1311                                 sizeof(struct fc_bsg_reply);
1312
1313                         memcpy(rsp_ptr, port_param,
1314                                 sizeof(struct qla_port_param));
1315                 }
1316
1317                 bsg_job->reply->result = DID_OK;
1318         }
1319
1320         bsg_job->job_done(bsg_job);
1321         return rval;
1322 }
1323
1324 static int
1325 qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1326         uint8_t is_update)
1327 {
1328         uint32_t start = 0;
1329         int valid = 0;
1330         struct qla_hw_data *ha = vha->hw;
1331
1332         bsg_job->reply->reply_payload_rcv_len = 0;
1333
1334         if (unlikely(pci_channel_offline(ha->pdev)))
1335                 return -EINVAL;
1336
1337         start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1338         if (start > ha->optrom_size) {
1339                 ql_log(ql_log_warn, vha, 0x7055,
1340                     "start %d > optrom_size %d.\n", start, ha->optrom_size);
1341                 return -EINVAL;
1342         }
1343
1344         if (ha->optrom_state != QLA_SWAITING) {
1345                 ql_log(ql_log_info, vha, 0x7056,
1346                     "optrom_state %d.\n", ha->optrom_state);
1347                 return -EBUSY;
1348         }
1349
1350         ha->optrom_region_start = start;
1351         ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1352         if (is_update) {
1353                 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1354                         valid = 1;
1355                 else if (start == (ha->flt_region_boot * 4) ||
1356                     start == (ha->flt_region_fw * 4))
1357                         valid = 1;
1358                 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1359                     IS_QLA8XXX_TYPE(ha))
1360                         valid = 1;
1361                 if (!valid) {
1362                         ql_log(ql_log_warn, vha, 0x7058,
1363                             "Invalid start region 0x%x/0x%x.\n", start,
1364                             bsg_job->request_payload.payload_len);
1365                         return -EINVAL;
1366                 }
1367
1368                 ha->optrom_region_size = start +
1369                     bsg_job->request_payload.payload_len > ha->optrom_size ?
1370                     ha->optrom_size - start :
1371                     bsg_job->request_payload.payload_len;
1372                 ha->optrom_state = QLA_SWRITING;
1373         } else {
1374                 ha->optrom_region_size = start +
1375                     bsg_job->reply_payload.payload_len > ha->optrom_size ?
1376                     ha->optrom_size - start :
1377                     bsg_job->reply_payload.payload_len;
1378                 ha->optrom_state = QLA_SREADING;
1379         }
1380
1381         ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1382         if (!ha->optrom_buffer) {
1383                 ql_log(ql_log_warn, vha, 0x7059,
1384                     "Read: Unable to allocate memory for optrom retrieval "
1385                     "(%x)\n", ha->optrom_region_size);
1386
1387                 ha->optrom_state = QLA_SWAITING;
1388                 return -ENOMEM;
1389         }
1390
1391         memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1392         return 0;
1393 }
1394
1395 static int
1396 qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1397 {
1398         struct Scsi_Host *host = bsg_job->shost;
1399         scsi_qla_host_t *vha = shost_priv(host);
1400         struct qla_hw_data *ha = vha->hw;
1401         int rval = 0;
1402
1403         rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1404         if (rval)
1405                 return rval;
1406
1407         ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1408             ha->optrom_region_start, ha->optrom_region_size);
1409
1410         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1411             bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1412             ha->optrom_region_size);
1413
1414         bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
1415         bsg_job->reply->result = DID_OK;
1416         vfree(ha->optrom_buffer);
1417         ha->optrom_buffer = NULL;
1418         ha->optrom_state = QLA_SWAITING;
1419         bsg_job->job_done(bsg_job);
1420         return rval;
1421 }
1422
1423 static int
1424 qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1425 {
1426         struct Scsi_Host *host = bsg_job->shost;
1427         scsi_qla_host_t *vha = shost_priv(host);
1428         struct qla_hw_data *ha = vha->hw;
1429         int rval = 0;
1430
1431         rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1432         if (rval)
1433                 return rval;
1434
1435         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1436             bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1437             ha->optrom_region_size);
1438
1439         ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1440             ha->optrom_region_start, ha->optrom_region_size);
1441
1442         bsg_job->reply->result = DID_OK;
1443         vfree(ha->optrom_buffer);
1444         ha->optrom_buffer = NULL;
1445         ha->optrom_state = QLA_SWAITING;
1446         bsg_job->job_done(bsg_job);
1447         return rval;
1448 }
1449
1450 static int
1451 qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
1452 {
1453         struct Scsi_Host *host = bsg_job->shost;
1454         scsi_qla_host_t *vha = shost_priv(host);
1455         struct qla_hw_data *ha = vha->hw;
1456         int rval = 0;
1457         uint8_t bsg[DMA_POOL_SIZE];
1458         struct qla_image_version_list *list = (void *)bsg;
1459         struct qla_image_version *image;
1460         uint32_t count;
1461         dma_addr_t sfp_dma;
1462         void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1463         if (!sfp) {
1464                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1465                     EXT_STATUS_NO_MEMORY;
1466                 goto done;
1467         }
1468
1469         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1470             bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1471
1472         image = list->version;
1473         count = list->count;
1474         while (count--) {
1475                 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1476                 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1477                     image->field_address.device, image->field_address.offset,
1478                     sizeof(image->field_info), image->field_address.option);
1479                 if (rval) {
1480                         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1481                             EXT_STATUS_MAILBOX;
1482                         goto dealloc;
1483                 }
1484                 image++;
1485         }
1486
1487         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1488
1489 dealloc:
1490         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1491
1492 done:
1493         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1494         bsg_job->reply->result = DID_OK << 16;
1495         bsg_job->job_done(bsg_job);
1496
1497         return 0;
1498 }
1499
1500 static int
1501 qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
1502 {
1503         struct Scsi_Host *host = bsg_job->shost;
1504         scsi_qla_host_t *vha = shost_priv(host);
1505         struct qla_hw_data *ha = vha->hw;
1506         int rval = 0;
1507         uint8_t bsg[DMA_POOL_SIZE];
1508         struct qla_status_reg *sr = (void *)bsg;
1509         dma_addr_t sfp_dma;
1510         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1511         if (!sfp) {
1512                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1513                     EXT_STATUS_NO_MEMORY;
1514                 goto done;
1515         }
1516
1517         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1518             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1519
1520         rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1521             sr->field_address.device, sr->field_address.offset,
1522             sizeof(sr->status_reg), sr->field_address.option);
1523         sr->status_reg = *sfp;
1524
1525         if (rval) {
1526                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1527                     EXT_STATUS_MAILBOX;
1528                 goto dealloc;
1529         }
1530
1531         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1532             bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1533
1534         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1535
1536 dealloc:
1537         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1538
1539 done:
1540         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1541         bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
1542         bsg_job->reply->result = DID_OK << 16;
1543         bsg_job->job_done(bsg_job);
1544
1545         return 0;
1546 }
1547
1548 static int
1549 qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
1550 {
1551         struct Scsi_Host *host = bsg_job->shost;
1552         scsi_qla_host_t *vha = shost_priv(host);
1553         struct qla_hw_data *ha = vha->hw;
1554         int rval = 0;
1555         uint8_t bsg[DMA_POOL_SIZE];
1556         struct qla_status_reg *sr = (void *)bsg;
1557         dma_addr_t sfp_dma;
1558         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1559         if (!sfp) {
1560                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1561                     EXT_STATUS_NO_MEMORY;
1562                 goto done;
1563         }
1564
1565         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1566             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1567
1568         *sfp = sr->status_reg;
1569         rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1570             sr->field_address.device, sr->field_address.offset,
1571             sizeof(sr->status_reg), sr->field_address.option);
1572
1573         if (rval) {
1574                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1575                     EXT_STATUS_MAILBOX;
1576                 goto dealloc;
1577         }
1578
1579         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1580
1581 dealloc:
1582         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1583
1584 done:
1585         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1586         bsg_job->reply->result = DID_OK << 16;
1587         bsg_job->job_done(bsg_job);
1588
1589         return 0;
1590 }
1591
1592 static int
1593 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1594 {
1595         switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
1596         case QL_VND_LOOPBACK:
1597                 return qla2x00_process_loopback(bsg_job);
1598
1599         case QL_VND_A84_RESET:
1600                 return qla84xx_reset(bsg_job);
1601
1602         case QL_VND_A84_UPDATE_FW:
1603                 return qla84xx_updatefw(bsg_job);
1604
1605         case QL_VND_A84_MGMT_CMD:
1606                 return qla84xx_mgmt_cmd(bsg_job);
1607
1608         case QL_VND_IIDMA:
1609                 return qla24xx_iidma(bsg_job);
1610
1611         case QL_VND_FCP_PRIO_CFG_CMD:
1612                 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
1613
1614         case QL_VND_READ_FLASH:
1615                 return qla2x00_read_optrom(bsg_job);
1616
1617         case QL_VND_UPDATE_FLASH:
1618                 return qla2x00_update_optrom(bsg_job);
1619
1620         case QL_VND_SET_FRU_VERSION:
1621                 return qla2x00_update_fru_versions(bsg_job);
1622
1623         case QL_VND_READ_FRU_STATUS:
1624                 return qla2x00_read_fru_status(bsg_job);
1625
1626         case QL_VND_WRITE_FRU_STATUS:
1627                 return qla2x00_write_fru_status(bsg_job);
1628
1629         default:
1630                 bsg_job->reply->result = (DID_ERROR << 16);
1631                 bsg_job->job_done(bsg_job);
1632                 return -ENOSYS;
1633         }
1634 }
1635
1636 int
1637 qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1638 {
1639         int ret = -EINVAL;
1640         struct fc_rport *rport;
1641         fc_port_t *fcport = NULL;
1642         struct Scsi_Host *host;
1643         scsi_qla_host_t *vha;
1644
1645         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1646                 rport = bsg_job->rport;
1647                 fcport = *(fc_port_t **) rport->dd_data;
1648                 host = rport_to_shost(rport);
1649                 vha = shost_priv(host);
1650         } else {
1651                 host = bsg_job->shost;
1652                 vha = shost_priv(host);
1653         }
1654
1655         ql_dbg(ql_dbg_user, vha, 0x7000,
1656             "Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode);
1657
1658         switch (bsg_job->request->msgcode) {
1659         case FC_BSG_RPT_ELS:
1660         case FC_BSG_HST_ELS_NOLOGIN:
1661                 ret = qla2x00_process_els(bsg_job);
1662                 break;
1663         case FC_BSG_HST_CT:
1664                 ret = qla2x00_process_ct(bsg_job);
1665                 break;
1666         case FC_BSG_HST_VENDOR:
1667                 ret = qla2x00_process_vendor_specific(bsg_job);
1668                 break;
1669         case FC_BSG_HST_ADD_RPORT:
1670         case FC_BSG_HST_DEL_RPORT:
1671         case FC_BSG_RPT_CT:
1672         default:
1673                 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
1674                 break;
1675         }
1676         return ret;
1677 }
1678
1679 int
1680 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1681 {
1682         scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
1683         struct qla_hw_data *ha = vha->hw;
1684         srb_t *sp;
1685         int cnt, que;
1686         unsigned long flags;
1687         struct req_que *req;
1688         struct srb_ctx *sp_bsg;
1689
1690         /* find the bsg job from the active list of commands */
1691         spin_lock_irqsave(&ha->hardware_lock, flags);
1692         for (que = 0; que < ha->max_req_queues; que++) {
1693                 req = ha->req_q_map[que];
1694                 if (!req)
1695                         continue;
1696
1697                 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1698                         sp = req->outstanding_cmds[cnt];
1699                         if (sp) {
1700                                 sp_bsg = sp->ctx;
1701
1702                                 if (((sp_bsg->type == SRB_CT_CMD) ||
1703                                         (sp_bsg->type == SRB_ELS_CMD_HST))
1704                                         && (sp_bsg->u.bsg_job == bsg_job)) {
1705                                         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1706                                         if (ha->isp_ops->abort_command(sp)) {
1707                                                 ql_log(ql_log_warn, vha, 0x7089,
1708                                                     "mbx abort_command "
1709                                                     "failed.\n");
1710                                                 bsg_job->req->errors =
1711                                                 bsg_job->reply->result = -EIO;
1712                                         } else {
1713                                                 ql_dbg(ql_dbg_user, vha, 0x708a,
1714                                                     "mbx abort_command "
1715                                                     "success.\n");
1716                                                 bsg_job->req->errors =
1717                                                 bsg_job->reply->result = 0;
1718                                         }
1719                                         spin_lock_irqsave(&ha->hardware_lock, flags);
1720                                         goto done;
1721                                 }
1722                         }
1723                 }
1724         }
1725         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1726         ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
1727         bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1728         return 0;
1729
1730 done:
1731         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1732         if (bsg_job->request->msgcode == FC_BSG_HST_CT)
1733                 kfree(sp->fcport);
1734         kfree(sp->ctx);
1735         mempool_free(sp, ha->srb_mempool);
1736         return 0;
1737 }