1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_transport_fc.h>
30 #include <scsi/scsi_bsg_fc.h>
31 #include <scsi/fc/fc_fs.h>
36 #include "lpfc_sli4.h"
39 #include "lpfc_disc.h"
40 #include "lpfc_scsi.h"
42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_vport.h"
45 #include "lpfc_version.h"
47 struct lpfc_bsg_event {
48 struct list_head node;
52 /* Event type and waiter identifiers */
57 /* next two flags are here for the auto-delete logic */
58 unsigned long wait_time_stamp;
61 /* seen and not seen events */
62 struct list_head events_to_get;
63 struct list_head events_to_see;
65 /* job waiting for this event to finish */
66 struct fc_bsg_job *set_job;
69 struct lpfc_bsg_iocb {
70 struct lpfc_iocbq *cmdiocbq;
71 struct lpfc_iocbq *rspiocbq;
72 struct lpfc_dmabuf *bmp;
73 struct lpfc_nodelist *ndlp;
75 /* job waiting for this iocb to finish */
76 struct fc_bsg_job *set_job;
79 struct lpfc_bsg_mbox {
82 struct lpfc_dmabuf *rxbmp; /* for BIU diags */
83 struct lpfc_dmabufext *dmp; /* for BIU diags */
84 uint8_t *ext; /* extended mailbox data */
85 uint32_t mbOffset; /* from app */
86 uint32_t inExtWLen; /* from app */
87 uint32_t outExtWLen; /* from app */
89 /* job waiting for this mbox command to finish */
90 struct fc_bsg_job *set_job;
93 #define MENLO_DID 0x0000FC0E
95 struct lpfc_bsg_menlo {
96 struct lpfc_iocbq *cmdiocbq;
97 struct lpfc_iocbq *rspiocbq;
98 struct lpfc_dmabuf *bmp;
100 /* job waiting for this iocb to finish */
101 struct fc_bsg_job *set_job;
108 struct bsg_job_data {
111 struct lpfc_bsg_event *evt;
112 struct lpfc_bsg_iocb iocb;
113 struct lpfc_bsg_mbox mbox;
114 struct lpfc_bsg_menlo menlo;
119 struct list_head node;
126 #define BUF_SZ_4K 4096
127 #define SLI_CT_ELX_LOOPBACK 0x10
129 enum ELX_LOOPBACK_CMD {
130 ELX_LOOPBACK_XRI_SETUP,
134 #define ELX_LOOPBACK_HEADER_SZ \
135 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
137 struct lpfc_dmabufext {
138 struct lpfc_dmabuf dma;
144 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
145 * @phba: Pointer to HBA context object.
146 * @cmdiocbq: Pointer to command iocb.
147 * @rspiocbq: Pointer to response iocb.
149 * This function is the completion handler for iocbs issued using
150 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
151 * ring event handler function without any lock held. This function
152 * can be called from both worker thread context and interrupt
153 * context. This function also can be called from another thread which
154 * cleans up the SLI layer objects.
155 * This function copies the contents of the response iocb to the
156 * response iocb memory object provided by the caller of
157 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
158 * sleeps for the iocb completion.
161 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
162 struct lpfc_iocbq *cmdiocbq,
163 struct lpfc_iocbq *rspiocbq)
165 unsigned long iflags;
166 struct bsg_job_data *dd_data;
167 struct fc_bsg_job *job;
169 struct lpfc_dmabuf *bmp;
170 struct lpfc_nodelist *ndlp;
171 struct lpfc_bsg_iocb *iocb;
175 spin_lock_irqsave(&phba->ct_ev_lock, flags);
176 dd_data = cmdiocbq->context1;
178 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
182 iocb = &dd_data->context_un.iocb;
184 job->dd_data = NULL; /* so timeout handler does not reply */
186 spin_lock_irqsave(&phba->hbalock, iflags);
187 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
188 if (cmdiocbq->context2 && rspiocbq)
189 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
190 &rspiocbq->iocb, sizeof(IOCB_t));
191 spin_unlock_irqrestore(&phba->hbalock, iflags);
194 rspiocbq = iocb->rspiocbq;
195 rsp = &rspiocbq->iocb;
198 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
199 job->request_payload.sg_cnt, DMA_TO_DEVICE);
200 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
201 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
203 if (rsp->ulpStatus) {
204 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
205 switch (rsp->un.ulpWord[4] & 0xff) {
206 case IOERR_SEQUENCE_TIMEOUT:
209 case IOERR_INVALID_RPI:
219 job->reply->reply_payload_rcv_len =
220 rsp->un.genreq64.bdl.bdeSize;
222 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
223 lpfc_sli_release_iocbq(phba, rspiocbq);
224 lpfc_sli_release_iocbq(phba, cmdiocbq);
228 /* make error code available to userspace */
229 job->reply->result = rc;
230 /* complete the job back to userspace */
232 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
237 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
238 * @job: fc_bsg_job to handle
241 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
243 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
244 struct lpfc_hba *phba = vport->phba;
245 struct lpfc_rport_data *rdata = job->rport->dd_data;
246 struct lpfc_nodelist *ndlp = rdata->pnode;
247 struct ulp_bde64 *bpl = NULL;
249 struct lpfc_iocbq *cmdiocbq = NULL;
250 struct lpfc_iocbq *rspiocbq = NULL;
253 struct lpfc_dmabuf *bmp = NULL;
256 struct scatterlist *sgel = NULL;
259 struct bsg_job_data *dd_data;
264 /* in case no data is transferred */
265 job->reply->reply_payload_rcv_len = 0;
267 /* allocate our bsg tracking structure */
268 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
270 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
271 "2733 Failed allocation of dd_data\n");
276 if (!lpfc_nlp_get(ndlp)) {
281 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
287 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
292 cmdiocbq = lpfc_sli_get_iocbq(phba);
298 cmd = &cmdiocbq->iocb;
299 rspiocbq = lpfc_sli_get_iocbq(phba);
305 rsp = &rspiocbq->iocb;
306 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
312 INIT_LIST_HEAD(&bmp->list);
313 bpl = (struct ulp_bde64 *) bmp->virt;
314 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
315 job->request_payload.sg_cnt, DMA_TO_DEVICE);
316 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
317 busaddr = sg_dma_address(sgel);
318 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
319 bpl->tus.f.bdeSize = sg_dma_len(sgel);
320 bpl->tus.w = cpu_to_le32(bpl->tus.w);
321 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
322 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
326 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
327 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
328 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
329 busaddr = sg_dma_address(sgel);
330 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
331 bpl->tus.f.bdeSize = sg_dma_len(sgel);
332 bpl->tus.w = cpu_to_le32(bpl->tus.w);
333 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
334 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
338 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
339 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
340 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
341 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
342 cmd->un.genreq64.bdl.bdeSize =
343 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
344 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
345 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
346 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
347 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
348 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
349 cmd->ulpBdeCount = 1;
351 cmd->ulpClass = CLASS3;
352 cmd->ulpContext = ndlp->nlp_rpi;
353 cmd->ulpOwner = OWN_CHIP;
354 cmdiocbq->vport = phba->pport;
355 cmdiocbq->context3 = bmp;
356 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
357 timeout = phba->fc_ratov * 2;
358 cmd->ulpTimeout = timeout;
360 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
361 cmdiocbq->context1 = dd_data;
362 cmdiocbq->context2 = rspiocbq;
363 dd_data->type = TYPE_IOCB;
364 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
365 dd_data->context_un.iocb.rspiocbq = rspiocbq;
366 dd_data->context_un.iocb.set_job = job;
367 dd_data->context_un.iocb.bmp = bmp;
368 dd_data->context_un.iocb.ndlp = ndlp;
370 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
371 creg_val = readl(phba->HCregaddr);
372 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
373 writel(creg_val, phba->HCregaddr);
374 readl(phba->HCregaddr); /* flush */
377 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
378 if (iocb_stat == IOCB_SUCCESS)
379 return 0; /* done for now */
380 else if (iocb_stat == IOCB_BUSY)
386 /* iocb failed so cleanup */
387 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
388 job->request_payload.sg_cnt, DMA_TO_DEVICE);
389 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
390 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
392 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
395 lpfc_sli_release_iocbq(phba, rspiocbq);
397 lpfc_sli_release_iocbq(phba, cmdiocbq);
405 /* make error code available to userspace */
406 job->reply->result = rc;
412 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
413 * @phba: Pointer to HBA context object.
414 * @cmdiocbq: Pointer to command iocb.
415 * @rspiocbq: Pointer to response iocb.
417 * This function is the completion handler for iocbs issued using
418 * lpfc_bsg_rport_els_cmp function. This function is called by the
419 * ring event handler function without any lock held. This function
420 * can be called from both worker thread context and interrupt
421 * context. This function also can be called from other thread which
422 * cleans up the SLI layer objects.
423 * This function copies the contents of the response iocb to the
424 * response iocb memory object provided by the caller of
425 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
426 * sleeps for the iocb completion.
429 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
430 struct lpfc_iocbq *cmdiocbq,
431 struct lpfc_iocbq *rspiocbq)
433 struct bsg_job_data *dd_data;
434 struct fc_bsg_job *job;
436 struct lpfc_nodelist *ndlp;
437 struct lpfc_dmabuf *pbuflist = NULL;
438 struct fc_bsg_ctels_reply *els_reply;
443 spin_lock_irqsave(&phba->ct_ev_lock, flags);
444 dd_data = cmdiocbq->context1;
445 /* normal completion and timeout crossed paths, already done */
447 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
451 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
452 if (cmdiocbq->context2 && rspiocbq)
453 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
454 &rspiocbq->iocb, sizeof(IOCB_t));
456 job = dd_data->context_un.iocb.set_job;
457 cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
458 rspiocbq = dd_data->context_un.iocb.rspiocbq;
459 rsp = &rspiocbq->iocb;
460 ndlp = dd_data->context_un.iocb.ndlp;
462 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
463 job->request_payload.sg_cnt, DMA_TO_DEVICE);
464 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
465 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
467 if (job->reply->result == -EAGAIN)
469 else if (rsp->ulpStatus == IOSTAT_SUCCESS)
470 job->reply->reply_payload_rcv_len =
471 rsp->un.elsreq64.bdl.bdeSize;
472 else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
473 job->reply->reply_payload_rcv_len =
474 sizeof(struct fc_bsg_ctels_reply);
475 /* LS_RJT data returned in word 4 */
476 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
477 els_reply = &job->reply->reply_data.ctels_reply;
478 els_reply->status = FC_CTELS_STATUS_REJECT;
479 els_reply->rjt_data.action = rjt_data[3];
480 els_reply->rjt_data.reason_code = rjt_data[2];
481 els_reply->rjt_data.reason_explanation = rjt_data[1];
482 els_reply->rjt_data.vendor_unique = rjt_data[0];
486 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
487 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
488 lpfc_sli_release_iocbq(phba, rspiocbq);
489 lpfc_sli_release_iocbq(phba, cmdiocbq);
492 /* make error code available to userspace */
493 job->reply->result = rc;
495 /* complete the job back to userspace */
497 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
502 * lpfc_bsg_rport_els - send an ELS command from a bsg request
503 * @job: fc_bsg_job to handle
506 lpfc_bsg_rport_els(struct fc_bsg_job *job)
508 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
509 struct lpfc_hba *phba = vport->phba;
510 struct lpfc_rport_data *rdata = job->rport->dd_data;
511 struct lpfc_nodelist *ndlp = rdata->pnode;
515 struct lpfc_iocbq *rspiocbq;
516 struct lpfc_iocbq *cmdiocbq;
519 struct lpfc_dmabuf *pcmd;
520 struct lpfc_dmabuf *prsp;
521 struct lpfc_dmabuf *pbuflist = NULL;
522 struct ulp_bde64 *bpl;
525 struct scatterlist *sgel = NULL;
528 struct bsg_job_data *dd_data;
532 /* in case no data is transferred */
533 job->reply->reply_payload_rcv_len = 0;
535 /* allocate our bsg tracking structure */
536 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
538 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
539 "2735 Failed allocation of dd_data\n");
544 if (!lpfc_nlp_get(ndlp)) {
549 elscmd = job->request->rqst_data.r_els.els_code;
550 cmdsize = job->request_payload.payload_len;
551 rspsize = job->reply_payload.payload_len;
552 rspiocbq = lpfc_sli_get_iocbq(phba);
559 rsp = &rspiocbq->iocb;
562 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
563 ndlp->nlp_DID, elscmd);
569 /* prep els iocb set context1 to the ndlp, context2 to the command
570 * dmabuf, context3 holds the data dmabuf
572 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
573 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
574 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
576 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
578 cmdiocbq->context2 = NULL;
580 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
581 bpl = (struct ulp_bde64 *) pbuflist->virt;
583 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
584 job->request_payload.sg_cnt, DMA_TO_DEVICE);
585 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
586 busaddr = sg_dma_address(sgel);
587 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
588 bpl->tus.f.bdeSize = sg_dma_len(sgel);
589 bpl->tus.w = cpu_to_le32(bpl->tus.w);
590 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
591 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
595 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
596 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
597 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
598 busaddr = sg_dma_address(sgel);
599 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
600 bpl->tus.f.bdeSize = sg_dma_len(sgel);
601 bpl->tus.w = cpu_to_le32(bpl->tus.w);
602 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
603 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
606 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
607 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
608 cmdiocbq->iocb.ulpContext = rpi;
609 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
610 cmdiocbq->context1 = NULL;
611 cmdiocbq->context2 = NULL;
613 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
614 cmdiocbq->context1 = dd_data;
615 cmdiocbq->context2 = rspiocbq;
616 dd_data->type = TYPE_IOCB;
617 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
618 dd_data->context_un.iocb.rspiocbq = rspiocbq;
619 dd_data->context_un.iocb.set_job = job;
620 dd_data->context_un.iocb.bmp = NULL;;
621 dd_data->context_un.iocb.ndlp = ndlp;
623 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
624 creg_val = readl(phba->HCregaddr);
625 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
626 writel(creg_val, phba->HCregaddr);
627 readl(phba->HCregaddr); /* flush */
629 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
631 if (rc == IOCB_SUCCESS)
632 return 0; /* done for now */
633 else if (rc == IOCB_BUSY)
638 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
639 job->request_payload.sg_cnt, DMA_TO_DEVICE);
640 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
641 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
643 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
645 lpfc_sli_release_iocbq(phba, cmdiocbq);
648 lpfc_sli_release_iocbq(phba, rspiocbq);
654 /* make error code available to userspace */
655 job->reply->result = rc;
661 * lpfc_bsg_event_free - frees an allocated event structure
662 * @kref: Pointer to a kref.
664 * Called from kref_put. Back cast the kref into an event structure address.
665 * Free any events to get, delete associated nodes, free any events to see,
666 * free any data then free the event itself.
669 lpfc_bsg_event_free(struct kref *kref)
671 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
673 struct event_data *ed;
675 list_del(&evt->node);
677 while (!list_empty(&evt->events_to_get)) {
678 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
684 while (!list_empty(&evt->events_to_see)) {
685 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
695 * lpfc_bsg_event_ref - increments the kref for an event
696 * @evt: Pointer to an event structure.
699 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
701 kref_get(&evt->kref);
705 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
706 * @evt: Pointer to an event structure.
709 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
711 kref_put(&evt->kref, lpfc_bsg_event_free);
715 * lpfc_bsg_event_new - allocate and initialize a event structure
716 * @ev_mask: Mask of events.
717 * @ev_reg_id: Event reg id.
718 * @ev_req_id: Event request id.
720 static struct lpfc_bsg_event *
721 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
723 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
728 INIT_LIST_HEAD(&evt->events_to_get);
729 INIT_LIST_HEAD(&evt->events_to_see);
730 evt->type_mask = ev_mask;
731 evt->req_id = ev_req_id;
732 evt->reg_id = ev_reg_id;
733 evt->wait_time_stamp = jiffies;
734 init_waitqueue_head(&evt->wq);
735 kref_init(&evt->kref);
740 * diag_cmd_data_free - Frees an lpfc dma buffer extension
741 * @phba: Pointer to HBA context object.
742 * @mlist: Pointer to an lpfc dma buffer extension.
745 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
747 struct lpfc_dmabufext *mlast;
748 struct pci_dev *pcidev;
749 struct list_head head, *curr, *next;
751 if ((!mlist) || (!lpfc_is_link_up(phba) &&
752 (phba->link_flag & LS_LOOPBACK_MODE))) {
756 pcidev = phba->pcidev;
757 list_add_tail(&head, &mlist->dma.list);
759 list_for_each_safe(curr, next, &head) {
760 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
762 dma_free_coherent(&pcidev->dev,
772 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
777 * This function is called when an unsolicited CT command is received. It
778 * forwards the event to any processes registered to receive CT events.
781 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
782 struct lpfc_iocbq *piocbq)
784 uint32_t evt_req_id = 0;
787 struct lpfc_dmabuf *dmabuf = NULL;
788 struct lpfc_bsg_event *evt;
789 struct event_data *evt_dat = NULL;
790 struct lpfc_iocbq *iocbq;
792 struct list_head head;
793 struct ulp_bde64 *bde;
796 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
797 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
798 struct lpfc_hbq_entry *hbqe;
799 struct lpfc_sli_ct_request *ct_req;
800 struct fc_bsg_job *job = NULL;
804 INIT_LIST_HEAD(&head);
805 list_add_tail(&head, &piocbq->list);
807 if (piocbq->iocb.ulpBdeCount == 0 ||
808 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
809 goto error_ct_unsol_exit;
811 if (phba->link_state == LPFC_HBA_ERROR ||
812 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
813 goto error_ct_unsol_exit;
815 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
818 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
819 piocbq->iocb.un.cont64[0].addrLow);
820 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
823 goto error_ct_unsol_exit;
824 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
825 evt_req_id = ct_req->FsType;
826 cmd = ct_req->CommandResponse.bits.CmdRsp;
827 len = ct_req->CommandResponse.bits.Size;
828 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
829 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
831 spin_lock_irqsave(&phba->ct_ev_lock, flags);
832 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
833 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
834 evt->req_id != evt_req_id)
837 lpfc_bsg_event_ref(evt);
838 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
839 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
840 if (evt_dat == NULL) {
841 spin_lock_irqsave(&phba->ct_ev_lock, flags);
842 lpfc_bsg_event_unref(evt);
843 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
844 "2614 Memory allocation failed for "
849 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
850 /* take accumulated byte count from the last iocbq */
851 iocbq = list_entry(head.prev, typeof(*iocbq), list);
852 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
854 list_for_each_entry(iocbq, &head, list) {
855 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
857 iocbq->iocb.un.cont64[i].tus.f.bdeSize;
861 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
862 if (evt_dat->data == NULL) {
863 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
864 "2615 Memory allocation failed for "
865 "CT event data, size %d\n",
868 spin_lock_irqsave(&phba->ct_ev_lock, flags);
869 lpfc_bsg_event_unref(evt);
870 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
871 goto error_ct_unsol_exit;
874 list_for_each_entry(iocbq, &head, list) {
876 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
877 bdeBuf1 = iocbq->context2;
878 bdeBuf2 = iocbq->context3;
880 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
881 if (phba->sli3_options &
882 LPFC_SLI3_HBQ_ENABLED) {
884 hbqe = (struct lpfc_hbq_entry *)
885 &iocbq->iocb.un.ulpWord[0];
886 size = hbqe->bde.tus.f.bdeSize;
889 hbqe = (struct lpfc_hbq_entry *)
892 size = hbqe->bde.tus.f.bdeSize;
895 if ((offset + size) > evt_dat->len)
896 size = evt_dat->len - offset;
898 size = iocbq->iocb.un.cont64[i].
900 bde = &iocbq->iocb.un.cont64[i];
901 dma_addr = getPaddr(bde->addrHigh,
903 dmabuf = lpfc_sli_ringpostbuf_get(phba,
907 lpfc_printf_log(phba, KERN_ERR,
908 LOG_LIBDFC, "2616 No dmabuf "
909 "found for iocbq 0x%p\n",
911 kfree(evt_dat->data);
913 spin_lock_irqsave(&phba->ct_ev_lock,
915 lpfc_bsg_event_unref(evt);
916 spin_unlock_irqrestore(
917 &phba->ct_ev_lock, flags);
918 goto error_ct_unsol_exit;
920 memcpy((char *)(evt_dat->data) + offset,
923 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
924 !(phba->sli3_options &
925 LPFC_SLI3_HBQ_ENABLED)) {
926 lpfc_sli_ringpostbuf_put(phba, pring,
930 case ELX_LOOPBACK_DATA:
931 diag_cmd_data_free(phba,
932 (struct lpfc_dmabufext *)
935 case ELX_LOOPBACK_XRI_SETUP:
936 if ((phba->sli_rev ==
938 (phba->sli3_options &
939 LPFC_SLI3_HBQ_ENABLED
941 lpfc_in_buf_free(phba,
944 lpfc_post_buffer(phba,
950 if (!(phba->sli3_options &
951 LPFC_SLI3_HBQ_ENABLED))
952 lpfc_post_buffer(phba,
961 spin_lock_irqsave(&phba->ct_ev_lock, flags);
962 if (phba->sli_rev == LPFC_SLI_REV4) {
963 evt_dat->immed_dat = phba->ctx_idx;
964 phba->ctx_idx = (phba->ctx_idx + 1) % 64;
965 /* Provide warning for over-run of the ct_ctx array */
966 if (phba->ct_ctx[evt_dat->immed_dat].flags &
968 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
969 "2717 CT context array entry "
970 "[%d] over-run: oxid:x%x, "
971 "sid:x%x\n", phba->ctx_idx,
973 evt_dat->immed_dat].oxid,
975 evt_dat->immed_dat].SID);
976 phba->ct_ctx[evt_dat->immed_dat].oxid =
977 piocbq->iocb.ulpContext;
978 phba->ct_ctx[evt_dat->immed_dat].SID =
979 piocbq->iocb.un.rcvels.remoteID;
980 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
982 evt_dat->immed_dat = piocbq->iocb.ulpContext;
984 evt_dat->type = FC_REG_CT_EVENT;
985 list_add(&evt_dat->node, &evt->events_to_see);
986 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
987 wake_up_interruptible(&evt->wq);
988 lpfc_bsg_event_unref(evt);
992 list_move(evt->events_to_see.prev, &evt->events_to_get);
993 lpfc_bsg_event_unref(evt);
998 job->reply->reply_payload_rcv_len = size;
999 /* make error code available to userspace */
1000 job->reply->result = 0;
1001 job->dd_data = NULL;
1002 /* complete the job back to userspace */
1003 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1005 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1008 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1010 error_ct_unsol_exit:
1011 if (!list_empty(&head))
1013 if (evt_req_id == SLI_CT_ELX_LOOPBACK)
1019 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1020 * @job: SET_EVENT fc_bsg_job
1023 lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1025 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1026 struct lpfc_hba *phba = vport->phba;
1027 struct set_ct_event *event_req;
1028 struct lpfc_bsg_event *evt;
1030 struct bsg_job_data *dd_data = NULL;
1032 unsigned long flags;
1034 if (job->request_len <
1035 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1036 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1037 "2612 Received SET_CT_EVENT below minimum "
1043 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1044 if (dd_data == NULL) {
1045 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1046 "2734 Failed allocation of dd_data\n");
1051 event_req = (struct set_ct_event *)
1052 job->request->rqst_data.h_vendor.vendor_cmd;
1053 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1055 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1056 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1057 if (evt->reg_id == event_req->ev_reg_id) {
1058 lpfc_bsg_event_ref(evt);
1059 evt->wait_time_stamp = jiffies;
1063 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1065 if (&evt->node == &phba->ct_ev_waiters) {
1066 /* no event waiting struct yet - first call */
1067 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1068 event_req->ev_req_id);
1070 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1071 "2617 Failed allocation of event "
1077 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1078 list_add(&evt->node, &phba->ct_ev_waiters);
1079 lpfc_bsg_event_ref(evt);
1080 evt->wait_time_stamp = jiffies;
1081 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1084 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1086 dd_data->type = TYPE_EVT;
1087 dd_data->context_un.evt = evt;
1088 evt->set_job = job; /* for unsolicited command */
1089 job->dd_data = dd_data; /* for fc transport timeout callback*/
1090 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1091 return 0; /* call job done later */
1094 if (dd_data != NULL)
1097 job->dd_data = NULL;
1102 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1103 * @job: GET_EVENT fc_bsg_job
1106 lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1108 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1109 struct lpfc_hba *phba = vport->phba;
1110 struct get_ct_event *event_req;
1111 struct get_ct_event_reply *event_reply;
1112 struct lpfc_bsg_event *evt;
1113 struct event_data *evt_dat = NULL;
1114 unsigned long flags;
1117 if (job->request_len <
1118 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1119 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1120 "2613 Received GET_CT_EVENT request below "
1126 event_req = (struct get_ct_event *)
1127 job->request->rqst_data.h_vendor.vendor_cmd;
1129 event_reply = (struct get_ct_event_reply *)
1130 job->reply->reply_data.vendor_reply.vendor_rsp;
1131 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1132 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1133 if (evt->reg_id == event_req->ev_reg_id) {
1134 if (list_empty(&evt->events_to_get))
1136 lpfc_bsg_event_ref(evt);
1137 evt->wait_time_stamp = jiffies;
1138 evt_dat = list_entry(evt->events_to_get.prev,
1139 struct event_data, node);
1140 list_del(&evt_dat->node);
1144 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1146 /* The app may continue to ask for event data until it gets
1147 * an error indicating that there isn't anymore
1149 if (evt_dat == NULL) {
1150 job->reply->reply_payload_rcv_len = 0;
1155 if (evt_dat->len > job->request_payload.payload_len) {
1156 evt_dat->len = job->request_payload.payload_len;
1157 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1158 "2618 Truncated event data at %d "
1160 job->request_payload.payload_len);
1163 event_reply->type = evt_dat->type;
1164 event_reply->immed_data = evt_dat->immed_dat;
1165 if (evt_dat->len > 0)
1166 job->reply->reply_payload_rcv_len =
1167 sg_copy_from_buffer(job->request_payload.sg_list,
1168 job->request_payload.sg_cnt,
1169 evt_dat->data, evt_dat->len);
1171 job->reply->reply_payload_rcv_len = 0;
1174 kfree(evt_dat->data);
1178 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1179 lpfc_bsg_event_unref(evt);
1180 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1181 job->dd_data = NULL;
1182 job->reply->result = 0;
1187 job->dd_data = NULL;
1188 job->reply->result = rc;
1193 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1194 * @phba: Pointer to HBA context object.
1195 * @cmdiocbq: Pointer to command iocb.
1196 * @rspiocbq: Pointer to response iocb.
1198 * This function is the completion handler for iocbs issued using
1199 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1200 * ring event handler function without any lock held. This function
1201 * can be called from both worker thread context and interrupt
1202 * context. This function also can be called from other thread which
1203 * cleans up the SLI layer objects.
1204 * This function copy the contents of the response iocb to the
1205 * response iocb memory object provided by the caller of
1206 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1207 * sleeps for the iocb completion.
1210 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1211 struct lpfc_iocbq *cmdiocbq,
1212 struct lpfc_iocbq *rspiocbq)
1214 struct bsg_job_data *dd_data;
1215 struct fc_bsg_job *job;
1217 struct lpfc_dmabuf *bmp;
1218 struct lpfc_nodelist *ndlp;
1219 unsigned long flags;
1222 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1223 dd_data = cmdiocbq->context1;
1224 /* normal completion and timeout crossed paths, already done */
1226 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1230 job = dd_data->context_un.iocb.set_job;
1231 bmp = dd_data->context_un.iocb.bmp;
1232 rsp = &rspiocbq->iocb;
1233 ndlp = dd_data->context_un.iocb.ndlp;
1235 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1236 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1238 if (rsp->ulpStatus) {
1239 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1240 switch (rsp->un.ulpWord[4] & 0xff) {
1241 case IOERR_SEQUENCE_TIMEOUT:
1244 case IOERR_INVALID_RPI:
1254 job->reply->reply_payload_rcv_len =
1255 rsp->un.genreq64.bdl.bdeSize;
1257 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1258 lpfc_sli_release_iocbq(phba, cmdiocbq);
1262 /* make error code available to userspace */
1263 job->reply->result = rc;
1264 job->dd_data = NULL;
1265 /* complete the job back to userspace */
1267 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1272 * lpfc_issue_ct_rsp - issue a ct response
1273 * @phba: Pointer to HBA context object.
1274 * @job: Pointer to the job object.
1275 * @tag: tag index value into the ports context exchange array.
1276 * @bmp: Pointer to a dma buffer descriptor.
1277 * @num_entry: Number of enties in the bde.
1280 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1281 struct lpfc_dmabuf *bmp, int num_entry)
1284 struct lpfc_iocbq *ctiocb = NULL;
1286 struct lpfc_nodelist *ndlp = NULL;
1287 struct bsg_job_data *dd_data;
1290 /* allocate our bsg tracking structure */
1291 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1293 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1294 "2736 Failed allocation of dd_data\n");
1299 /* Allocate buffer for command iocb */
1300 ctiocb = lpfc_sli_get_iocbq(phba);
1306 icmd = &ctiocb->iocb;
1307 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1308 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1309 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1310 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1311 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1312 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1313 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1314 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1315 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1317 /* Fill in rest of iocb */
1318 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1319 icmd->ulpBdeCount = 1;
1321 icmd->ulpClass = CLASS3;
1322 if (phba->sli_rev == LPFC_SLI_REV4) {
1323 /* Do not issue unsol response if oxid not marked as valid */
1324 if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
1326 goto issue_ct_rsp_exit;
1328 icmd->ulpContext = phba->ct_ctx[tag].oxid;
1329 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1331 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1332 "2721 ndlp null for oxid %x SID %x\n",
1334 phba->ct_ctx[tag].SID);
1336 goto issue_ct_rsp_exit;
1339 /* Check if the ndlp is active */
1340 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1342 goto issue_ct_rsp_exit;
1345 /* get a refernece count so the ndlp doesn't go away while
1348 if (!lpfc_nlp_get(ndlp)) {
1350 goto issue_ct_rsp_exit;
1353 icmd->un.ulpWord[3] = ndlp->nlp_rpi;
1354 /* The exchange is done, mark the entry as invalid */
1355 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
1357 icmd->ulpContext = (ushort) tag;
1359 icmd->ulpTimeout = phba->fc_ratov * 2;
1361 /* Xmit CT response on exchange <xid> */
1362 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1363 "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
1364 icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
1366 ctiocb->iocb_cmpl = NULL;
1367 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1368 ctiocb->vport = phba->pport;
1369 ctiocb->context3 = bmp;
1371 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1372 ctiocb->context1 = dd_data;
1373 ctiocb->context2 = NULL;
1374 dd_data->type = TYPE_IOCB;
1375 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1376 dd_data->context_un.iocb.rspiocbq = NULL;
1377 dd_data->context_un.iocb.set_job = job;
1378 dd_data->context_un.iocb.bmp = bmp;
1379 dd_data->context_un.iocb.ndlp = ndlp;
1381 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1382 creg_val = readl(phba->HCregaddr);
1383 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1384 writel(creg_val, phba->HCregaddr);
1385 readl(phba->HCregaddr); /* flush */
1388 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1390 if (rc == IOCB_SUCCESS)
1391 return 0; /* done for now */
1394 lpfc_sli_release_iocbq(phba, ctiocb);
1402 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1403 * @job: SEND_MGMT_RESP fc_bsg_job
1406 lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1408 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1409 struct lpfc_hba *phba = vport->phba;
1410 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1411 job->request->rqst_data.h_vendor.vendor_cmd;
1412 struct ulp_bde64 *bpl;
1413 struct lpfc_dmabuf *bmp = NULL;
1414 struct scatterlist *sgel = NULL;
1418 uint32_t tag = mgmt_resp->tag;
1419 unsigned long reqbfrcnt =
1420 (unsigned long)job->request_payload.payload_len;
1423 /* in case no data is transferred */
1424 job->reply->reply_payload_rcv_len = 0;
1426 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1428 goto send_mgmt_rsp_exit;
1431 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1434 goto send_mgmt_rsp_exit;
1437 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1440 goto send_mgmt_rsp_free_bmp;
1443 INIT_LIST_HEAD(&bmp->list);
1444 bpl = (struct ulp_bde64 *) bmp->virt;
1445 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
1446 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1447 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
1448 busaddr = sg_dma_address(sgel);
1449 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1450 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1451 bpl->tus.w = cpu_to_le32(bpl->tus.w);
1452 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
1453 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
1457 rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
1459 if (rc == IOCB_SUCCESS)
1460 return 0; /* done for now */
1462 /* TBD need to handle a timeout */
1463 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1464 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1466 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1468 send_mgmt_rsp_free_bmp:
1471 /* make error code available to userspace */
1472 job->reply->result = rc;
1473 job->dd_data = NULL;
1478 * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
1479 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1481 * This function is responsible for placing a port into diagnostic loopback
1482 * mode in order to perform a diagnostic loopback test.
1483 * All new scsi requests are blocked, a small delay is used to allow the
1484 * scsi requests to complete then the link is brought down. If the link is
1485 * is placed in loopback mode then scsi requests are again allowed
1486 * so the scsi mid-layer doesn't give up on the port.
1487 * All of this is done in-line.
1490 lpfc_bsg_diag_mode(struct fc_bsg_job *job)
1492 struct Scsi_Host *shost = job->shost;
1493 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1494 struct lpfc_hba *phba = vport->phba;
1495 struct diag_mode_set *loopback_mode;
1496 struct lpfc_sli *psli = &phba->sli;
1497 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1498 uint32_t link_flags;
1500 struct lpfc_vport **vports;
1501 LPFC_MBOXQ_t *pmboxq;
1506 /* no data to return just the return code */
1507 job->reply->reply_payload_rcv_len = 0;
1509 if (job->request_len <
1510 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
1511 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1512 "2738 Received DIAG MODE request below minimum "
1518 loopback_mode = (struct diag_mode_set *)
1519 job->request->rqst_data.h_vendor.vendor_cmd;
1520 link_flags = loopback_mode->type;
1521 timeout = loopback_mode->timeout * 100;
1523 if ((phba->link_state == LPFC_HBA_ERROR) ||
1524 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1525 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
1530 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1536 vports = lpfc_create_vport_work_array(phba);
1538 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1539 shost = lpfc_shost_from_vport(vports[i]);
1540 scsi_block_requests(shost);
1543 lpfc_destroy_vport_work_array(phba, vports);
1545 shost = lpfc_shost_from_vport(phba->pport);
1546 scsi_block_requests(shost);
1549 while (pring->txcmplq_cnt) {
1550 if (i++ > 500) /* wait up to 5 seconds */
1556 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1557 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1558 pmboxq->u.mb.mbxOwner = OWN_HOST;
1560 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1562 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1563 /* wait for link down before proceeding */
1565 while (phba->link_state != LPFC_LINK_DOWN) {
1566 if (i++ > timeout) {
1568 goto loopback_mode_exit;
1574 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1575 if (link_flags == INTERNAL_LOOP_BACK)
1576 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1578 pmboxq->u.mb.un.varInitLnk.link_flags =
1579 FLAGS_TOPOLOGY_MODE_LOOP;
1581 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1582 pmboxq->u.mb.mbxOwner = OWN_HOST;
1584 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1587 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1590 phba->link_flag |= LS_LOOPBACK_MODE;
1591 /* wait for the link attention interrupt */
1595 while (phba->link_state != LPFC_HBA_READY) {
1596 if (i++ > timeout) {
1609 vports = lpfc_create_vport_work_array(phba);
1611 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1612 shost = lpfc_shost_from_vport(vports[i]);
1613 scsi_unblock_requests(shost);
1615 lpfc_destroy_vport_work_array(phba, vports);
1617 shost = lpfc_shost_from_vport(phba->pport);
1618 scsi_unblock_requests(shost);
1622 * Let SLI layer release mboxq if mbox command completed after timeout.
1624 if (mbxstatus != MBX_TIMEOUT)
1625 mempool_free(pmboxq, phba->mbox_mem_pool);
1628 /* make error code available to userspace */
1629 job->reply->result = rc;
1630 /* complete the job back to userspace if no error */
1637 * lpfcdiag_loop_self_reg - obtains a remote port login id
1638 * @phba: Pointer to HBA context object
1639 * @rpi: Pointer to a remote port login id
1641 * This function obtains a remote port login id so the diag loopback test
1642 * can send and receive its own unsolicited CT command.
1644 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
1647 struct lpfc_dmabuf *dmabuff;
1650 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1654 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
1655 (uint8_t *)&phba->pport->fc_sparam, mbox, 0);
1657 mempool_free(mbox, phba->mbox_mem_pool);
1661 dmabuff = (struct lpfc_dmabuf *) mbox->context1;
1662 mbox->context1 = NULL;
1663 mbox->context2 = NULL;
1664 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1666 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1667 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
1669 if (status != MBX_TIMEOUT)
1670 mempool_free(mbox, phba->mbox_mem_pool);
1674 *rpi = mbox->u.mb.un.varWords[0];
1676 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
1678 mempool_free(mbox, phba->mbox_mem_pool);
1683 * lpfcdiag_loop_self_unreg - unregs from the rpi
1684 * @phba: Pointer to HBA context object
1685 * @rpi: Remote port login id
1687 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
1689 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
1694 /* Allocate mboxq structure */
1695 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1699 lpfc_unreg_login(phba, 0, rpi, mbox);
1700 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1702 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1703 if (status != MBX_TIMEOUT)
1704 mempool_free(mbox, phba->mbox_mem_pool);
1708 mempool_free(mbox, phba->mbox_mem_pool);
1713 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
1714 * @phba: Pointer to HBA context object
1715 * @rpi: Remote port login id
1716 * @txxri: Pointer to transmit exchange id
1717 * @rxxri: Pointer to response exchabge id
1719 * This function obtains the transmit and receive ids required to send
1720 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
1721 * flags are used to the unsolicted response handler is able to process
1722 * the ct command sent on the same port.
1724 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
1725 uint16_t *txxri, uint16_t * rxxri)
1727 struct lpfc_bsg_event *evt;
1728 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
1730 struct lpfc_dmabuf *dmabuf;
1731 struct ulp_bde64 *bpl = NULL;
1732 struct lpfc_sli_ct_request *ctreq = NULL;
1736 unsigned long flags;
1740 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
1741 SLI_CT_ELX_LOOPBACK);
1745 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1746 list_add(&evt->node, &phba->ct_ev_waiters);
1747 lpfc_bsg_event_ref(evt);
1748 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1750 cmdiocbq = lpfc_sli_get_iocbq(phba);
1751 rspiocbq = lpfc_sli_get_iocbq(phba);
1753 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1755 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
1757 INIT_LIST_HEAD(&dmabuf->list);
1758 bpl = (struct ulp_bde64 *) dmabuf->virt;
1759 memset(bpl, 0, sizeof(*bpl));
1760 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
1762 le32_to_cpu(putPaddrHigh(dmabuf->phys +
1765 le32_to_cpu(putPaddrLow(dmabuf->phys +
1767 bpl->tus.f.bdeFlags = 0;
1768 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
1769 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1773 if (cmdiocbq == NULL || rspiocbq == NULL ||
1774 dmabuf == NULL || bpl == NULL || ctreq == NULL ||
1775 dmabuf->virt == NULL) {
1777 goto err_get_xri_exit;
1780 cmd = &cmdiocbq->iocb;
1781 rsp = &rspiocbq->iocb;
1783 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
1785 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
1786 ctreq->RevisionId.bits.InId = 0;
1787 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
1788 ctreq->FsSubType = 0;
1789 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
1790 ctreq->CommandResponse.bits.Size = 0;
1793 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
1794 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
1795 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1796 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
1798 cmd->un.xseq64.w5.hcsw.Fctl = LA;
1799 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
1800 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
1801 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1803 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
1804 cmd->ulpBdeCount = 1;
1806 cmd->ulpClass = CLASS3;
1807 cmd->ulpContext = rpi;
1809 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
1810 cmdiocbq->vport = phba->pport;
1812 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
1814 (phba->fc_ratov * 2)
1815 + LPFC_DRVR_TIMEOUT);
1818 goto err_get_xri_exit;
1820 *txxri = rsp->ulpContext;
1823 evt->wait_time_stamp = jiffies;
1824 time_left = wait_event_interruptible_timeout(
1825 evt->wq, !list_empty(&evt->events_to_see),
1826 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
1827 if (list_empty(&evt->events_to_see))
1828 ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
1830 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1831 list_move(evt->events_to_see.prev, &evt->events_to_get);
1832 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1833 *rxxri = (list_entry(evt->events_to_get.prev,
1834 typeof(struct event_data),
1840 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1841 lpfc_bsg_event_unref(evt); /* release ref */
1842 lpfc_bsg_event_unref(evt); /* delete */
1843 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1847 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
1851 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
1852 lpfc_sli_release_iocbq(phba, cmdiocbq);
1854 lpfc_sli_release_iocbq(phba, rspiocbq);
1859 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
1860 * @phba: Pointer to HBA context object
1861 * @bpl: Pointer to 64 bit bde structure
1862 * @size: Number of bytes to process
1863 * @nocopydata: Flag to copy user data into the allocated buffer
1865 * This function allocates page size buffers and populates an lpfc_dmabufext.
1866 * If allowed the user data pointed to with indataptr is copied into the kernel
1867 * memory. The chained list of page size buffers is returned.
1869 static struct lpfc_dmabufext *
1870 diag_cmd_data_alloc(struct lpfc_hba *phba,
1871 struct ulp_bde64 *bpl, uint32_t size,
1874 struct lpfc_dmabufext *mlist = NULL;
1875 struct lpfc_dmabufext *dmp;
1876 int cnt, offset = 0, i = 0;
1877 struct pci_dev *pcidev;
1879 pcidev = phba->pcidev;
1882 /* We get chunks of 4K */
1883 if (size > BUF_SZ_4K)
1888 /* allocate struct lpfc_dmabufext buffer header */
1889 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
1893 INIT_LIST_HEAD(&dmp->dma.list);
1895 /* Queue it to a linked list */
1897 list_add_tail(&dmp->dma.list, &mlist->dma.list);
1901 /* allocate buffer */
1902 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
1913 bpl->tus.f.bdeFlags = 0;
1914 pci_dma_sync_single_for_device(phba->pcidev,
1915 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
1918 memset((uint8_t *)dmp->dma.virt, 0, cnt);
1919 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1922 /* build buffer ptr list for IOCB */
1923 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
1924 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
1925 bpl->tus.f.bdeSize = (ushort) cnt;
1926 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1937 diag_cmd_data_free(phba, mlist);
1942 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
1943 * @phba: Pointer to HBA context object
1944 * @rxxri: Receive exchange id
1945 * @len: Number of data bytes
1947 * This function allocates and posts a data buffer of sufficient size to recieve
1948 * an unsolicted CT command.
1950 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
1953 struct lpfc_sli *psli = &phba->sli;
1954 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1955 struct lpfc_iocbq *cmdiocbq;
1957 struct list_head head, *curr, *next;
1958 struct lpfc_dmabuf *rxbmp;
1959 struct lpfc_dmabuf *dmp;
1960 struct lpfc_dmabuf *mp[2] = {NULL, NULL};
1961 struct ulp_bde64 *rxbpl = NULL;
1963 struct lpfc_dmabufext *rxbuffer = NULL;
1968 cmdiocbq = lpfc_sli_get_iocbq(phba);
1969 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1970 if (rxbmp != NULL) {
1971 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
1973 INIT_LIST_HEAD(&rxbmp->list);
1974 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
1975 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
1979 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
1981 goto err_post_rxbufs_exit;
1984 /* Queue buffers for the receive exchange */
1985 num_bde = (uint32_t)rxbuffer->flag;
1986 dmp = &rxbuffer->dma;
1988 cmd = &cmdiocbq->iocb;
1991 INIT_LIST_HEAD(&head);
1992 list_add_tail(&head, &dmp->list);
1993 list_for_each_safe(curr, next, &head) {
1994 mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
1997 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
1998 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
1999 cmd->un.quexri64cx.buff.bde.addrHigh =
2000 putPaddrHigh(mp[i]->phys);
2001 cmd->un.quexri64cx.buff.bde.addrLow =
2002 putPaddrLow(mp[i]->phys);
2003 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
2004 ((struct lpfc_dmabufext *)mp[i])->size;
2005 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
2006 cmd->ulpCommand = CMD_QUE_XRI64_CX;
2009 cmd->ulpBdeCount = 1;
2010 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
2013 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
2014 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
2015 cmd->un.cont64[i].tus.f.bdeSize =
2016 ((struct lpfc_dmabufext *)mp[i])->size;
2017 cmd->ulpBdeCount = ++i;
2019 if ((--num_bde > 0) && (i < 2))
2022 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
2026 cmd->ulpClass = CLASS3;
2027 cmd->ulpContext = rxxri;
2029 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2031 if (iocb_stat == IOCB_ERROR) {
2032 diag_cmd_data_free(phba,
2033 (struct lpfc_dmabufext *)mp[0]);
2035 diag_cmd_data_free(phba,
2036 (struct lpfc_dmabufext *)mp[1]);
2037 dmp = list_entry(next, struct lpfc_dmabuf, list);
2039 goto err_post_rxbufs_exit;
2042 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
2044 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
2048 /* The iocb was freed by lpfc_sli_issue_iocb */
2049 cmdiocbq = lpfc_sli_get_iocbq(phba);
2051 dmp = list_entry(next, struct lpfc_dmabuf, list);
2053 goto err_post_rxbufs_exit;
2056 cmd = &cmdiocbq->iocb;
2061 err_post_rxbufs_exit:
2065 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2070 lpfc_sli_release_iocbq(phba, cmdiocbq);
2075 * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
2076 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2078 * This function receives a user data buffer to be transmitted and received on
2079 * the same port, the link must be up and in loopback mode prior
2081 * 1. A kernel buffer is allocated to copy the user data into.
2082 * 2. The port registers with "itself".
2083 * 3. The transmit and receive exchange ids are obtained.
2084 * 4. The receive exchange id is posted.
2085 * 5. A new els loopback event is created.
2086 * 6. The command and response iocbs are allocated.
2087 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2089 * This function is meant to be called n times while the port is in loopback
2090 * so it is the apps responsibility to issue a reset to take the port out
2094 lpfc_bsg_diag_test(struct fc_bsg_job *job)
2096 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2097 struct lpfc_hba *phba = vport->phba;
2098 struct diag_mode_test *diag_mode;
2099 struct lpfc_bsg_event *evt;
2100 struct event_data *evdat;
2101 struct lpfc_sli *psli = &phba->sli;
2104 size_t segment_len = 0, segment_offset = 0, current_offset = 0;
2106 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2108 struct lpfc_sli_ct_request *ctreq;
2109 struct lpfc_dmabuf *txbmp;
2110 struct ulp_bde64 *txbpl = NULL;
2111 struct lpfc_dmabufext *txbuffer = NULL;
2112 struct list_head head;
2113 struct lpfc_dmabuf *curr;
2114 uint16_t txxri, rxxri;
2116 uint8_t *ptr = NULL, *rx_databuf = NULL;
2120 unsigned long flags;
2121 void *dataout = NULL;
2124 /* in case no data is returned return just the return code */
2125 job->reply->reply_payload_rcv_len = 0;
2127 if (job->request_len <
2128 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
2129 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2130 "2739 Received DIAG TEST request below minimum "
2133 goto loopback_test_exit;
2136 if (job->request_payload.payload_len !=
2137 job->reply_payload.payload_len) {
2139 goto loopback_test_exit;
2142 diag_mode = (struct diag_mode_test *)
2143 job->request->rqst_data.h_vendor.vendor_cmd;
2145 if ((phba->link_state == LPFC_HBA_ERROR) ||
2146 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
2147 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
2149 goto loopback_test_exit;
2152 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
2154 goto loopback_test_exit;
2157 size = job->request_payload.payload_len;
2158 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
2160 if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
2162 goto loopback_test_exit;
2165 if (size >= BUF_SZ_4K) {
2167 * Allocate memory for ioctl data. If buffer is bigger than 64k,
2168 * then we allocate 64k and re-use that buffer over and over to
2169 * xfer the whole block. This is because Linux kernel has a
2170 * problem allocating more than 120k of kernel space memory. Saw
2171 * problem with GET_FCPTARGETMAPPING...
2173 if (size <= (64 * 1024))
2176 total_mem = 64 * 1024;
2178 /* Allocate memory for ioctl data */
2179 total_mem = BUF_SZ_4K;
2181 dataout = kmalloc(total_mem, GFP_KERNEL);
2182 if (dataout == NULL) {
2184 goto loopback_test_exit;
2188 ptr += ELX_LOOPBACK_HEADER_SZ;
2189 sg_copy_to_buffer(job->request_payload.sg_list,
2190 job->request_payload.sg_cnt,
2193 rc = lpfcdiag_loop_self_reg(phba, &rpi);
2195 goto loopback_test_exit;
2197 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
2199 lpfcdiag_loop_self_unreg(phba, rpi);
2200 goto loopback_test_exit;
2203 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
2205 lpfcdiag_loop_self_unreg(phba, rpi);
2206 goto loopback_test_exit;
2209 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2210 SLI_CT_ELX_LOOPBACK);
2212 lpfcdiag_loop_self_unreg(phba, rpi);
2214 goto loopback_test_exit;
2217 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2218 list_add(&evt->node, &phba->ct_ev_waiters);
2219 lpfc_bsg_event_ref(evt);
2220 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2222 cmdiocbq = lpfc_sli_get_iocbq(phba);
2223 rspiocbq = lpfc_sli_get_iocbq(phba);
2224 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2227 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
2229 INIT_LIST_HEAD(&txbmp->list);
2230 txbpl = (struct ulp_bde64 *) txbmp->virt;
2231 txbuffer = diag_cmd_data_alloc(phba,
2232 txbpl, full_size, 0);
2236 if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer ||
2239 goto err_loopback_test_exit;
2242 cmd = &cmdiocbq->iocb;
2243 rsp = &rspiocbq->iocb;
2245 INIT_LIST_HEAD(&head);
2246 list_add_tail(&head, &txbuffer->dma.list);
2247 list_for_each_entry(curr, &head, list) {
2248 segment_len = ((struct lpfc_dmabufext *)curr)->size;
2249 if (current_offset == 0) {
2251 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2252 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2253 ctreq->RevisionId.bits.InId = 0;
2254 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2255 ctreq->FsSubType = 0;
2256 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
2257 ctreq->CommandResponse.bits.Size = size;
2258 segment_offset = ELX_LOOPBACK_HEADER_SZ;
2262 BUG_ON(segment_offset >= segment_len);
2263 memcpy(curr->virt + segment_offset,
2264 ptr + current_offset,
2265 segment_len - segment_offset);
2267 current_offset += segment_len - segment_offset;
2268 BUG_ON(current_offset > size);
2272 /* Build the XMIT_SEQUENCE iocb */
2274 num_bde = (uint32_t)txbuffer->flag;
2276 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
2277 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
2278 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2279 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
2281 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
2282 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2283 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2284 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2286 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
2287 cmd->ulpBdeCount = 1;
2289 cmd->ulpClass = CLASS3;
2290 cmd->ulpContext = txxri;
2292 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2293 cmdiocbq->vport = phba->pport;
2295 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2296 rspiocbq, (phba->fc_ratov * 2) +
2299 if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
2301 goto err_loopback_test_exit;
2305 time_left = wait_event_interruptible_timeout(
2306 evt->wq, !list_empty(&evt->events_to_see),
2307 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2309 if (list_empty(&evt->events_to_see))
2310 rc = (time_left) ? -EINTR : -ETIMEDOUT;
2312 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2313 list_move(evt->events_to_see.prev, &evt->events_to_get);
2314 evdat = list_entry(evt->events_to_get.prev,
2315 typeof(*evdat), node);
2316 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2317 rx_databuf = evdat->data;
2318 if (evdat->len != full_size) {
2319 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2320 "1603 Loopback test did not receive expected "
2321 "data length. actual length 0x%x expected "
2323 evdat->len, full_size);
2325 } else if (rx_databuf == NULL)
2329 /* skip over elx loopback header */
2330 rx_databuf += ELX_LOOPBACK_HEADER_SZ;
2331 job->reply->reply_payload_rcv_len =
2332 sg_copy_from_buffer(job->reply_payload.sg_list,
2333 job->reply_payload.sg_cnt,
2335 job->reply->reply_payload_rcv_len = size;
2339 err_loopback_test_exit:
2340 lpfcdiag_loop_self_unreg(phba, rpi);
2342 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2343 lpfc_bsg_event_unref(evt); /* release ref */
2344 lpfc_bsg_event_unref(evt); /* delete */
2345 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2347 if (cmdiocbq != NULL)
2348 lpfc_sli_release_iocbq(phba, cmdiocbq);
2350 if (rspiocbq != NULL)
2351 lpfc_sli_release_iocbq(phba, rspiocbq);
2353 if (txbmp != NULL) {
2354 if (txbpl != NULL) {
2355 if (txbuffer != NULL)
2356 diag_cmd_data_free(phba, txbuffer);
2357 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
2364 /* make error code available to userspace */
2365 job->reply->result = rc;
2366 job->dd_data = NULL;
2367 /* complete the job back to userspace if no error */
2374 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
2375 * @job: GET_DFC_REV fc_bsg_job
2378 lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
2380 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2381 struct lpfc_hba *phba = vport->phba;
2382 struct get_mgmt_rev *event_req;
2383 struct get_mgmt_rev_reply *event_reply;
2386 if (job->request_len <
2387 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
2388 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2389 "2740 Received GET_DFC_REV request below "
2395 event_req = (struct get_mgmt_rev *)
2396 job->request->rqst_data.h_vendor.vendor_cmd;
2398 event_reply = (struct get_mgmt_rev_reply *)
2399 job->reply->reply_data.vendor_reply.vendor_rsp;
2401 if (job->reply_len <
2402 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
2403 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2404 "2741 Received GET_DFC_REV reply below "
2410 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
2411 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
2413 job->reply->result = rc;
2420 * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
2421 * @phba: Pointer to HBA context object.
2422 * @pmboxq: Pointer to mailbox command.
2424 * This is completion handler function for mailbox commands issued from
2425 * lpfc_bsg_issue_mbox function. This function is called by the
2426 * mailbox event handler function with no lock held. This function
2427 * will wake up thread waiting on the wait queue pointed by context1
2431 lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2433 struct bsg_job_data *dd_data;
2434 struct fc_bsg_job *job;
2436 unsigned long flags;
2440 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2441 dd_data = pmboxq->context1;
2442 /* job already timed out? */
2444 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2448 /* build the outgoing buffer to do an sg copy
2449 * the format is the response mailbox followed by any extended
2452 from = (uint8_t *)&pmboxq->u.mb;
2453 to = (uint8_t *)dd_data->context_un.mbox.mb;
2454 memcpy(to, from, sizeof(MAILBOX_t));
2455 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
2456 /* copy the extended data if any, count is in words */
2457 if (dd_data->context_un.mbox.outExtWLen) {
2458 from = (uint8_t *)dd_data->context_un.mbox.ext;
2459 to += sizeof(MAILBOX_t);
2460 size = dd_data->context_un.mbox.outExtWLen *
2462 memcpy(to, from, size);
2463 } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
2464 from = (uint8_t *)dd_data->context_un.mbox.
2466 to += sizeof(MAILBOX_t);
2467 size = dd_data->context_un.mbox.dmp->size;
2468 memcpy(to, from, size);
2469 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
2470 (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
2471 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
2473 to += sizeof(MAILBOX_t);
2474 size = pmboxq->u.mb.un.varWords[5];
2475 memcpy(to, from, size);
2476 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
2477 (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) {
2478 struct lpfc_mbx_nembed_cmd *nembed_sge =
2479 (struct lpfc_mbx_nembed_cmd *)
2480 &pmboxq->u.mb.un.varWords[0];
2482 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
2484 to += sizeof(MAILBOX_t);
2485 size = nembed_sge->sge[0].length;
2486 memcpy(to, from, size);
2487 } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
2488 from = (uint8_t *)dd_data->context_un.
2490 to += sizeof(MAILBOX_t);
2491 size = dd_data->context_un.mbox.dmp->size;
2492 memcpy(to, from, size);
2496 from = (uint8_t *)dd_data->context_un.mbox.mb;
2497 job = dd_data->context_un.mbox.set_job;
2498 size = job->reply_payload.payload_len;
2499 job->reply->reply_payload_rcv_len =
2500 sg_copy_from_buffer(job->reply_payload.sg_list,
2501 job->reply_payload.sg_cnt,
2503 job->reply->result = 0;
2505 dd_data->context_un.mbox.set_job = NULL;
2506 job->dd_data = NULL;
2508 /* need to hold the lock until we call job done to hold off
2509 * the timeout handler returning to the midlayer while
2510 * we are stillprocessing the job
2512 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2514 kfree(dd_data->context_un.mbox.mb);
2515 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
2516 kfree(dd_data->context_un.mbox.ext);
2517 if (dd_data->context_un.mbox.dmp) {
2518 dma_free_coherent(&phba->pcidev->dev,
2519 dd_data->context_un.mbox.dmp->size,
2520 dd_data->context_un.mbox.dmp->dma.virt,
2521 dd_data->context_un.mbox.dmp->dma.phys);
2522 kfree(dd_data->context_un.mbox.dmp);
2524 if (dd_data->context_un.mbox.rxbmp) {
2525 lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
2526 dd_data->context_un.mbox.rxbmp->phys);
2527 kfree(dd_data->context_un.mbox.rxbmp);
2534 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
2535 * @phba: Pointer to HBA context object.
2536 * @mb: Pointer to a mailbox object.
2537 * @vport: Pointer to a vport object.
2539 * Some commands require the port to be offline, some may not be called from
2542 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2543 MAILBOX_t *mb, struct lpfc_vport *vport)
2545 /* return negative error values for bsg job */
2546 switch (mb->mbxCommand) {
2550 case MBX_CONFIG_LINK:
2551 case MBX_CONFIG_RING:
2552 case MBX_RESET_RING:
2553 case MBX_UNREG_LOGIN:
2555 case MBX_DUMP_CONTEXT:
2559 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
2560 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2561 "2743 Command 0x%x is illegal in on-line "
2567 case MBX_WRITE_VPARMS:
2570 case MBX_READ_CONFIG:
2571 case MBX_READ_RCONFIG:
2572 case MBX_READ_STATUS:
2575 case MBX_READ_LNK_STAT:
2576 case MBX_DUMP_MEMORY:
2578 case MBX_UPDATE_CFG:
2579 case MBX_KILL_BOARD:
2581 case MBX_LOAD_EXP_ROM:
2583 case MBX_DEL_LD_ENTRY:
2586 case MBX_SLI4_CONFIG:
2587 case MBX_READ_EVENT_LOG:
2588 case MBX_READ_EVENT_LOG_STATUS:
2589 case MBX_WRITE_EVENT_LOG:
2590 case MBX_PORT_CAPABILITIES:
2591 case MBX_PORT_IOV_CONTROL:
2592 case MBX_RUN_BIU_DIAG64:
2594 case MBX_SET_VARIABLE:
2595 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2596 "1226 mbox: set_variable 0x%x, 0x%x\n",
2598 mb->un.varWords[1]);
2599 if ((mb->un.varWords[0] == SETVAR_MLOMNT)
2600 && (mb->un.varWords[1] == 1)) {
2601 phba->wait_4_mlo_maint_flg = 1;
2602 } else if (mb->un.varWords[0] == SETVAR_MLORST) {
2603 phba->link_flag &= ~LS_LOOPBACK_MODE;
2604 phba->fc_topology = TOPOLOGY_PT_PT;
2607 case MBX_READ_SPARM64:
2611 case MBX_REG_LOGIN64:
2612 case MBX_CONFIG_PORT:
2613 case MBX_RUN_BIU_DIAG:
2615 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2616 "2742 Unknown Command 0x%x\n",
2625 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
2626 * @phba: Pointer to HBA context object.
2627 * @mb: Pointer to a mailbox object.
2628 * @vport: Pointer to a vport object.
2630 * Allocate a tracking object, mailbox command memory, get a mailbox
2631 * from the mailbox pool, copy the caller mailbox command.
2633 * If offline and the sli is active we need to poll for the command (port is
2634 * being reset) and com-plete the job, otherwise issue the mailbox command and
2635 * let our completion handler finish the command.
2638 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2639 struct lpfc_vport *vport)
2641 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
2642 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
2643 /* a 4k buffer to hold the mb and extended data from/to the bsg */
2644 MAILBOX_t *mb = NULL;
2645 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
2647 struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */
2648 struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
2649 struct ulp_bde64 *rxbpl = NULL;
2650 struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
2651 job->request->rqst_data.h_vendor.vendor_cmd;
2652 uint8_t *ext = NULL;
2656 /* in case no data is transferred */
2657 job->reply->reply_payload_rcv_len = 0;
2659 /* check if requested extended data lengths are valid */
2660 if ((mbox_req->inExtWLen > MAILBOX_EXT_SIZE) ||
2661 (mbox_req->outExtWLen > MAILBOX_EXT_SIZE)) {
2666 /* allocate our bsg tracking structure */
2667 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2669 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2670 "2727 Failed allocation of dd_data\n");
2675 mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
2681 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2686 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
2688 size = job->request_payload.payload_len;
2689 sg_copy_to_buffer(job->request_payload.sg_list,
2690 job->request_payload.sg_cnt,
2693 rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
2695 goto job_done; /* must be negative */
2697 pmb = &pmboxq->u.mb;
2698 memcpy(pmb, mb, sizeof(*pmb));
2699 pmb->mbxOwner = OWN_HOST;
2700 pmboxq->vport = vport;
2702 /* If HBA encountered an error attention, allow only DUMP
2703 * or RESTART mailbox commands until the HBA is restarted.
2705 if (phba->pport->stopped &&
2706 pmb->mbxCommand != MBX_DUMP_MEMORY &&
2707 pmb->mbxCommand != MBX_RESTART &&
2708 pmb->mbxCommand != MBX_WRITE_VPARMS &&
2709 pmb->mbxCommand != MBX_WRITE_WWN)
2710 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2711 "2797 mbox: Issued mailbox cmd "
2712 "0x%x while in stopped state.\n",
2715 /* Don't allow mailbox commands to be sent when blocked
2716 * or when in the middle of discovery
2718 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
2723 /* extended mailbox commands will need an extended buffer */
2724 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
2725 ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
2731 /* any data for the device? */
2732 if (mbox_req->inExtWLen) {
2733 from = (uint8_t *)mb;
2734 from += sizeof(MAILBOX_t);
2735 memcpy((uint8_t *)ext, from,
2736 mbox_req->inExtWLen * sizeof(uint32_t));
2739 pmboxq->context2 = ext;
2740 pmboxq->in_ext_byte_len =
2741 mbox_req->inExtWLen * sizeof(uint32_t);
2742 pmboxq->out_ext_byte_len =
2743 mbox_req->outExtWLen * sizeof(uint32_t);
2744 pmboxq->mbox_offset_word = mbox_req->mbOffset;
2747 /* biu diag will need a kernel buffer to transfer the data
2748 * allocate our own buffer and setup the mailbox command to
2751 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
2752 uint32_t transmit_length = pmb->un.varWords[1];
2753 uint32_t receive_length = pmb->un.varWords[4];
2754 /* transmit length cannot be greater than receive length or
2755 * mailbox extension size
2757 if ((transmit_length > receive_length) ||
2758 (transmit_length > MAILBOX_EXT_SIZE)) {
2763 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2769 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2775 INIT_LIST_HEAD(&rxbmp->list);
2776 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2777 dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
2783 INIT_LIST_HEAD(&dmp->dma.list);
2784 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
2785 putPaddrHigh(dmp->dma.phys);
2786 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
2787 putPaddrLow(dmp->dma.phys);
2789 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
2790 putPaddrHigh(dmp->dma.phys +
2791 pmb->un.varBIUdiag.un.s2.
2792 xmit_bde64.tus.f.bdeSize);
2793 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
2794 putPaddrLow(dmp->dma.phys +
2795 pmb->un.varBIUdiag.un.s2.
2796 xmit_bde64.tus.f.bdeSize);
2798 /* copy the transmit data found in the mailbox extension area */
2799 from = (uint8_t *)mb;
2800 from += sizeof(MAILBOX_t);
2801 memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
2802 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
2803 struct READ_EVENT_LOG_VAR *rdEventLog =
2804 &pmb->un.varRdEventLog ;
2805 uint32_t receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
2806 uint32_t mode = bf_get(lpfc_event_log, rdEventLog);
2808 /* receive length cannot be greater than mailbox
2811 if (receive_length > MAILBOX_EXT_SIZE) {
2816 /* mode zero uses a bde like biu diags command */
2819 /* rebuild the command for sli4 using our own buffers
2820 * like we do for biu diags
2823 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2829 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2830 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2832 INIT_LIST_HEAD(&rxbmp->list);
2833 dmp = diag_cmd_data_alloc(phba, rxbpl,
2842 INIT_LIST_HEAD(&dmp->dma.list);
2843 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
2844 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
2846 } else if (phba->sli_rev == LPFC_SLI_REV4) {
2847 if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
2848 /* rebuild the command for sli4 using our own buffers
2849 * like we do for biu diags
2851 uint32_t receive_length = pmb->un.varWords[2];
2852 /* receive length cannot be greater than mailbox
2855 if ((receive_length == 0) ||
2856 (receive_length > MAILBOX_EXT_SIZE)) {
2861 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2867 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2873 INIT_LIST_HEAD(&rxbmp->list);
2874 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2875 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
2882 INIT_LIST_HEAD(&dmp->dma.list);
2883 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
2884 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
2885 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
2886 pmb->un.varUpdateCfg.co) {
2887 struct ulp_bde64 *bde =
2888 (struct ulp_bde64 *)&pmb->un.varWords[4];
2890 /* bde size cannot be greater than mailbox ext size */
2891 if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) {
2896 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2902 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2908 INIT_LIST_HEAD(&rxbmp->list);
2909 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2910 dmp = diag_cmd_data_alloc(phba, rxbpl,
2911 bde->tus.f.bdeSize, 0);
2917 INIT_LIST_HEAD(&dmp->dma.list);
2918 bde->addrHigh = putPaddrHigh(dmp->dma.phys);
2919 bde->addrLow = putPaddrLow(dmp->dma.phys);
2921 /* copy the transmit data found in the mailbox
2924 from = (uint8_t *)mb;
2925 from += sizeof(MAILBOX_t);
2926 memcpy((uint8_t *)dmp->dma.virt, from,
2927 bde->tus.f.bdeSize);
2928 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
2929 struct lpfc_mbx_nembed_cmd *nembed_sge;
2930 struct mbox_header *header;
2931 uint32_t receive_length;
2933 /* rebuild the command for sli4 using our own buffers
2934 * like we do for biu diags
2936 header = (struct mbox_header *)&pmb->un.varWords[0];
2937 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
2938 &pmb->un.varWords[0];
2939 receive_length = nembed_sge->sge[0].length;
2941 /* receive length cannot be greater than mailbox
2944 if ((receive_length == 0) ||
2945 (receive_length > MAILBOX_EXT_SIZE)) {
2950 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2956 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2962 INIT_LIST_HEAD(&rxbmp->list);
2963 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2964 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
2971 INIT_LIST_HEAD(&dmp->dma.list);
2972 nembed_sge->sge[0].pa_hi = putPaddrHigh(dmp->dma.phys);
2973 nembed_sge->sge[0].pa_lo = putPaddrLow(dmp->dma.phys);
2974 /* copy the transmit data found in the mailbox
2977 from = (uint8_t *)mb;
2978 from += sizeof(MAILBOX_t);
2979 memcpy((uint8_t *)dmp->dma.virt, from,
2980 header->cfg_mhdr.payload_length);
2984 dd_data->context_un.mbox.rxbmp = rxbmp;
2985 dd_data->context_un.mbox.dmp = dmp;
2987 /* setup wake call as IOCB callback */
2988 pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
2990 /* setup context field to pass wait_queue pointer to wake function */
2991 pmboxq->context1 = dd_data;
2992 dd_data->type = TYPE_MBOX;
2993 dd_data->context_un.mbox.pmboxq = pmboxq;
2994 dd_data->context_un.mbox.mb = mb;
2995 dd_data->context_un.mbox.set_job = job;
2996 dd_data->context_un.mbox.ext = ext;
2997 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
2998 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
2999 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
3000 job->dd_data = dd_data;
3002 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3003 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
3004 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3005 if (rc != MBX_SUCCESS) {
3006 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
3010 /* job finished, copy the data */
3011 memcpy(mb, pmb, sizeof(*pmb));
3012 job->reply->reply_payload_rcv_len =
3013 sg_copy_from_buffer(job->reply_payload.sg_list,
3014 job->reply_payload.sg_cnt,
3016 /* not waiting mbox already done */
3021 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3022 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
3023 return 1; /* job started */
3026 /* common exit for error or job completed inline */
3029 mempool_free(pmboxq, phba->mbox_mem_pool);
3032 dma_free_coherent(&phba->pcidev->dev,
3033 dmp->size, dmp->dma.virt,
3038 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
3047 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
3048 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
3051 lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
3053 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
3054 struct lpfc_hba *phba = vport->phba;
3057 /* in case no data is transferred */
3058 job->reply->reply_payload_rcv_len = 0;
3059 if (job->request_len <
3060 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
3061 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3062 "2737 Received MBOX_REQ request below "
3068 if (job->request_payload.payload_len != BSG_MBOX_SIZE) {
3073 if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
3078 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
3083 rc = lpfc_bsg_issue_mbox(phba, job, vport);
3088 job->reply->result = 0;
3089 job->dd_data = NULL;
3092 /* job submitted, will complete later*/
3093 rc = 0; /* return zero, no error */
3095 /* some error occurred */
3096 job->reply->result = rc;
3097 job->dd_data = NULL;
3104 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
3105 * @phba: Pointer to HBA context object.
3106 * @cmdiocbq: Pointer to command iocb.
3107 * @rspiocbq: Pointer to response iocb.
3109 * This function is the completion handler for iocbs issued using
3110 * lpfc_menlo_cmd function. This function is called by the
3111 * ring event handler function without any lock held. This function
3112 * can be called from both worker thread context and interrupt
3113 * context. This function also can be called from another thread which
3114 * cleans up the SLI layer objects.
3115 * This function copies the contents of the response iocb to the
3116 * response iocb memory object provided by the caller of
3117 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
3118 * sleeps for the iocb completion.
3121 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
3122 struct lpfc_iocbq *cmdiocbq,
3123 struct lpfc_iocbq *rspiocbq)
3125 struct bsg_job_data *dd_data;
3126 struct fc_bsg_job *job;
3128 struct lpfc_dmabuf *bmp;
3129 struct lpfc_bsg_menlo *menlo;
3130 unsigned long flags;
3131 struct menlo_response *menlo_resp;
3134 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3135 dd_data = cmdiocbq->context1;
3137 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3141 menlo = &dd_data->context_un.menlo;
3142 job = menlo->set_job;
3143 job->dd_data = NULL; /* so timeout handler does not reply */
3145 spin_lock(&phba->hbalock);
3146 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
3147 if (cmdiocbq->context2 && rspiocbq)
3148 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
3149 &rspiocbq->iocb, sizeof(IOCB_t));
3150 spin_unlock(&phba->hbalock);
3153 rspiocbq = menlo->rspiocbq;
3154 rsp = &rspiocbq->iocb;
3156 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
3157 job->request_payload.sg_cnt, DMA_TO_DEVICE);
3158 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
3159 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
3161 /* always return the xri, this would be used in the case
3162 * of a menlo download to allow the data to be sent as a continuation
3165 menlo_resp = (struct menlo_response *)
3166 job->reply->reply_data.vendor_reply.vendor_rsp;
3167 menlo_resp->xri = rsp->ulpContext;
3168 if (rsp->ulpStatus) {
3169 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
3170 switch (rsp->un.ulpWord[4] & 0xff) {
3171 case IOERR_SEQUENCE_TIMEOUT:
3174 case IOERR_INVALID_RPI:
3184 job->reply->reply_payload_rcv_len =
3185 rsp->un.genreq64.bdl.bdeSize;
3187 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
3188 lpfc_sli_release_iocbq(phba, rspiocbq);
3189 lpfc_sli_release_iocbq(phba, cmdiocbq);
3192 /* make error code available to userspace */
3193 job->reply->result = rc;
3194 /* complete the job back to userspace */
3196 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3201 * lpfc_menlo_cmd - send an ioctl for menlo hardware
3202 * @job: fc_bsg_job to handle
3204 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
3205 * all the command completions will return the xri for the command.
3206 * For menlo data requests a gen request 64 CX is used to continue the exchange
3207 * supplied in the menlo request header xri field.
3210 lpfc_menlo_cmd(struct fc_bsg_job *job)
3212 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
3213 struct lpfc_hba *phba = vport->phba;
3214 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
3217 struct menlo_command *menlo_cmd;
3218 struct menlo_response *menlo_resp;
3219 struct lpfc_dmabuf *bmp = NULL;
3222 struct scatterlist *sgel = NULL;
3225 struct bsg_job_data *dd_data;
3226 struct ulp_bde64 *bpl = NULL;
3228 /* in case no data is returned return just the return code */
3229 job->reply->reply_payload_rcv_len = 0;
3231 if (job->request_len <
3232 sizeof(struct fc_bsg_request) +
3233 sizeof(struct menlo_command)) {
3234 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3235 "2784 Received MENLO_CMD request below "
3241 if (job->reply_len <
3242 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
3243 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3244 "2785 Received MENLO_CMD reply below "
3250 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
3251 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3252 "2786 Adapter does not support menlo "
3258 menlo_cmd = (struct menlo_command *)
3259 job->request->rqst_data.h_vendor.vendor_cmd;
3261 menlo_resp = (struct menlo_response *)
3262 job->reply->reply_data.vendor_reply.vendor_rsp;
3264 /* allocate our bsg tracking structure */
3265 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3267 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3268 "2787 Failed allocation of dd_data\n");
3273 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3279 cmdiocbq = lpfc_sli_get_iocbq(phba);
3285 rspiocbq = lpfc_sli_get_iocbq(phba);
3291 rsp = &rspiocbq->iocb;
3293 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
3299 INIT_LIST_HEAD(&bmp->list);
3300 bpl = (struct ulp_bde64 *) bmp->virt;
3301 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
3302 job->request_payload.sg_cnt, DMA_TO_DEVICE);
3303 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
3304 busaddr = sg_dma_address(sgel);
3305 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3306 bpl->tus.f.bdeSize = sg_dma_len(sgel);
3307 bpl->tus.w = cpu_to_le32(bpl->tus.w);
3308 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
3309 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
3313 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
3314 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
3315 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
3316 busaddr = sg_dma_address(sgel);
3317 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
3318 bpl->tus.f.bdeSize = sg_dma_len(sgel);
3319 bpl->tus.w = cpu_to_le32(bpl->tus.w);
3320 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
3321 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
3325 cmd = &cmdiocbq->iocb;
3326 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
3327 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
3328 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
3329 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
3330 cmd->un.genreq64.bdl.bdeSize =
3331 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
3332 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
3333 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
3334 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
3335 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
3336 cmd->ulpBdeCount = 1;
3337 cmd->ulpClass = CLASS3;
3338 cmd->ulpOwner = OWN_CHIP;
3339 cmd->ulpLe = 1; /* Limited Edition */
3340 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
3341 cmdiocbq->vport = phba->pport;
3342 /* We want the firmware to timeout before we do */
3343 cmd->ulpTimeout = MENLO_TIMEOUT - 5;
3344 cmdiocbq->context3 = bmp;
3345 cmdiocbq->context2 = rspiocbq;
3346 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
3347 cmdiocbq->context1 = dd_data;
3348 cmdiocbq->context2 = rspiocbq;
3349 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
3350 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
3351 cmd->ulpPU = MENLO_PU; /* 3 */
3352 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
3353 cmd->ulpContext = MENLO_CONTEXT; /* 0 */
3355 cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
3357 cmd->un.ulpWord[4] = 0;
3358 cmd->ulpContext = menlo_cmd->xri;
3361 dd_data->type = TYPE_MENLO;
3362 dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
3363 dd_data->context_un.menlo.rspiocbq = rspiocbq;
3364 dd_data->context_un.menlo.set_job = job;
3365 dd_data->context_un.menlo.bmp = bmp;
3367 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
3369 if (rc == IOCB_SUCCESS)
3370 return 0; /* done for now */
3372 /* iocb failed so cleanup */
3373 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
3374 job->request_payload.sg_cnt, DMA_TO_DEVICE);
3375 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
3376 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
3378 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
3381 lpfc_sli_release_iocbq(phba, rspiocbq);
3383 lpfc_sli_release_iocbq(phba, cmdiocbq);
3389 /* make error code available to userspace */
3390 job->reply->result = rc;
3391 job->dd_data = NULL;
3395 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
3396 * @job: fc_bsg_job to handle
3399 lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
3401 int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
3405 case LPFC_BSG_VENDOR_SET_CT_EVENT:
3406 rc = lpfc_bsg_hba_set_event(job);
3408 case LPFC_BSG_VENDOR_GET_CT_EVENT:
3409 rc = lpfc_bsg_hba_get_event(job);
3411 case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
3412 rc = lpfc_bsg_send_mgmt_rsp(job);
3414 case LPFC_BSG_VENDOR_DIAG_MODE:
3415 rc = lpfc_bsg_diag_mode(job);
3417 case LPFC_BSG_VENDOR_DIAG_TEST:
3418 rc = lpfc_bsg_diag_test(job);
3420 case LPFC_BSG_VENDOR_GET_MGMT_REV:
3421 rc = lpfc_bsg_get_dfc_rev(job);
3423 case LPFC_BSG_VENDOR_MBOX:
3424 rc = lpfc_bsg_mbox_cmd(job);
3426 case LPFC_BSG_VENDOR_MENLO_CMD:
3427 case LPFC_BSG_VENDOR_MENLO_DATA:
3428 rc = lpfc_menlo_cmd(job);
3432 job->reply->reply_payload_rcv_len = 0;
3433 /* make error code available to userspace */
3434 job->reply->result = rc;
3442 * lpfc_bsg_request - handle a bsg request from the FC transport
3443 * @job: fc_bsg_job to handle
3446 lpfc_bsg_request(struct fc_bsg_job *job)
3451 msgcode = job->request->msgcode;
3453 case FC_BSG_HST_VENDOR:
3454 rc = lpfc_bsg_hst_vendor(job);
3456 case FC_BSG_RPT_ELS:
3457 rc = lpfc_bsg_rport_els(job);
3460 rc = lpfc_bsg_send_mgmt_cmd(job);
3464 job->reply->reply_payload_rcv_len = 0;
3465 /* make error code available to userspace */
3466 job->reply->result = rc;
3474 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
3475 * @job: fc_bsg_job that has timed out
3477 * This function just aborts the job's IOCB. The aborted IOCB will return to
3478 * the waiting function which will handle passing the error back to userspace
3481 lpfc_bsg_timeout(struct fc_bsg_job *job)
3483 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
3484 struct lpfc_hba *phba = vport->phba;
3485 struct lpfc_iocbq *cmdiocb;
3486 struct lpfc_bsg_event *evt;
3487 struct lpfc_bsg_iocb *iocb;
3488 struct lpfc_bsg_mbox *mbox;
3489 struct lpfc_bsg_menlo *menlo;
3490 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3491 struct bsg_job_data *dd_data;
3492 unsigned long flags;
3494 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3495 dd_data = (struct bsg_job_data *)job->dd_data;
3496 /* timeout and completion crossed paths if no dd_data */
3498 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3502 switch (dd_data->type) {
3504 iocb = &dd_data->context_un.iocb;
3505 cmdiocb = iocb->cmdiocbq;
3506 /* hint to completion handler that the job timed out */
3507 job->reply->result = -EAGAIN;
3508 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3509 /* this will call our completion handler */
3510 spin_lock_irq(&phba->hbalock);
3511 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
3512 spin_unlock_irq(&phba->hbalock);
3515 evt = dd_data->context_un.evt;
3516 /* this event has no job anymore */
3517 evt->set_job = NULL;
3518 job->dd_data = NULL;
3519 job->reply->reply_payload_rcv_len = 0;
3520 /* Return -EAGAIN which is our way of signallying the
3523 job->reply->result = -EAGAIN;
3524 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3528 mbox = &dd_data->context_un.mbox;
3529 /* this mbox has no job anymore */
3530 mbox->set_job = NULL;
3531 job->dd_data = NULL;
3532 job->reply->reply_payload_rcv_len = 0;
3533 job->reply->result = -EAGAIN;
3534 /* the mbox completion handler can now be run */
3535 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3539 menlo = &dd_data->context_un.menlo;
3540 cmdiocb = menlo->cmdiocbq;
3541 /* hint to completion handler that the job timed out */
3542 job->reply->result = -EAGAIN;
3543 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3544 /* this will call our completion handler */
3545 spin_lock_irq(&phba->hbalock);
3546 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
3547 spin_unlock_irq(&phba->hbalock);
3550 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3554 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
3555 * otherwise an error message will be displayed on the console
3556 * so always return success (zero)