1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21 #include <linux/pci.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
25 #include <asm/unaligned.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_eh.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_tcq.h>
32 #include <scsi/scsi_transport_fc.h>
34 #include "lpfc_version.h"
38 #include "lpfc_sli4.h"
40 #include "lpfc_disc.h"
41 #include "lpfc_scsi.h"
43 #include "lpfc_logmsg.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_vport.h"
47 #define LPFC_RESET_WAIT 2
48 #define LPFC_ABORT_WAIT 2
52 static char *dif_op_str[] = {
54 "SCSI_PROT_READ_INSERT",
55 "SCSI_PROT_WRITE_STRIP",
56 "SCSI_PROT_READ_STRIP",
57 "SCSI_PROT_WRITE_INSERT",
58 "SCSI_PROT_READ_PASS",
59 "SCSI_PROT_WRITE_PASS",
62 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
64 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
67 lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
70 struct scatterlist *sgde = scsi_sglist(cmnd);
72 if (!_dump_buf_data) {
73 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
74 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
81 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
82 "9051 BLKGRD: ERROR: data scatterlist is null\n");
86 dst = (void *) _dump_buf_data;
89 memcpy(dst, src, sgde->length);
96 lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
99 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
101 if (!_dump_buf_dif) {
102 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
103 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
109 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
110 "9053 BLKGRD: ERROR: prot scatterlist is null\n");
117 memcpy(dst, src, sgde->length);
119 sgde = sg_next(sgde);
124 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
125 * @phba: Pointer to HBA object.
126 * @lpfc_cmd: lpfc scsi command object pointer.
128 * This function is called from the lpfc_prep_task_mgmt_cmd function to
129 * set the last bit in the response sge entry.
132 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
133 struct lpfc_scsi_buf *lpfc_cmd)
135 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
138 sgl->word2 = le32_to_cpu(sgl->word2);
139 bf_set(lpfc_sli4_sge_last, sgl, 1);
140 sgl->word2 = cpu_to_le32(sgl->word2);
145 * lpfc_update_stats - Update statistical data for the command completion
146 * @phba: Pointer to HBA object.
147 * @lpfc_cmd: lpfc scsi command object pointer.
149 * This function is called when there is a command completion and this
150 * function updates the statistical data for the command completion.
153 lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
155 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
156 struct lpfc_nodelist *pnode = rdata->pnode;
157 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
159 struct Scsi_Host *shost = cmd->device->host;
160 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
161 unsigned long latency;
167 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
169 spin_lock_irqsave(shost->host_lock, flags);
170 if (!vport->stat_data_enabled ||
171 vport->stat_data_blocked ||
173 (phba->bucket_type == LPFC_NO_BUCKET)) {
174 spin_unlock_irqrestore(shost->host_lock, flags);
178 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
179 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
181 /* check array subscript bounds */
184 else if (i >= LPFC_MAX_BUCKET_COUNT)
185 i = LPFC_MAX_BUCKET_COUNT - 1;
187 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
188 if (latency <= (phba->bucket_base +
189 ((1<<i)*phba->bucket_step)))
193 pnode->lat_data[i].cmd_count++;
194 spin_unlock_irqrestore(shost->host_lock, flags);
198 * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
199 * @phba: Pointer to HBA context object.
200 * @vport: Pointer to vport object.
201 * @ndlp: Pointer to FC node associated with the target.
202 * @lun: Lun number of the scsi device.
203 * @old_val: Old value of the queue depth.
204 * @new_val: New value of the queue depth.
206 * This function sends an event to the mgmt application indicating
207 * there is a change in the scsi device queue depth.
210 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
211 struct lpfc_vport *vport,
212 struct lpfc_nodelist *ndlp,
217 struct lpfc_fast_path_event *fast_path_evt;
220 fast_path_evt = lpfc_alloc_fast_evt(phba);
224 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
226 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
227 LPFC_EVENT_VARQUEDEPTH;
229 /* Report all luns with change in queue depth */
230 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
231 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
232 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
233 &ndlp->nlp_portname, sizeof(struct lpfc_name));
234 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
235 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
238 fast_path_evt->un.queue_depth_evt.oldval = old_val;
239 fast_path_evt->un.queue_depth_evt.newval = new_val;
240 fast_path_evt->vport = vport;
242 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
243 spin_lock_irqsave(&phba->hbalock, flags);
244 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
245 spin_unlock_irqrestore(&phba->hbalock, flags);
246 lpfc_worker_wake_up(phba);
252 * lpfc_change_queue_depth - Alter scsi device queue depth
253 * @sdev: Pointer the scsi device on which to change the queue depth.
254 * @qdepth: New queue depth to set the sdev to.
255 * @reason: The reason for the queue depth change.
257 * This function is called by the midlayer and the LLD to alter the queue
258 * depth for a scsi device. This function sets the queue depth to the new
259 * value and sends an event out to log the queue depth change.
262 lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
264 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
265 struct lpfc_hba *phba = vport->phba;
266 struct lpfc_rport_data *rdata;
267 unsigned long new_queue_depth, old_queue_depth;
269 old_queue_depth = sdev->queue_depth;
270 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
271 new_queue_depth = sdev->queue_depth;
272 rdata = sdev->hostdata;
274 lpfc_send_sdev_queuedepth_change_event(phba, vport,
275 rdata->pnode, sdev->lun,
278 return sdev->queue_depth;
282 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
283 * @phba: The Hba for which this call is being executed.
285 * This routine is called when there is resource error in driver or firmware.
286 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
287 * posts at most 1 event each second. This routine wakes up worker thread of
288 * @phba to process WORKER_RAM_DOWN_EVENT event.
290 * This routine should be called with no lock held.
293 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
298 spin_lock_irqsave(&phba->hbalock, flags);
299 atomic_inc(&phba->num_rsrc_err);
300 phba->last_rsrc_error_time = jiffies;
302 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
303 spin_unlock_irqrestore(&phba->hbalock, flags);
307 phba->last_ramp_down_time = jiffies;
309 spin_unlock_irqrestore(&phba->hbalock, flags);
311 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
312 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
314 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
315 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
318 lpfc_worker_wake_up(phba);
323 * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
324 * @phba: The Hba for which this call is being executed.
326 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
327 * post at most 1 event every 5 minute after last_ramp_up_time or
328 * last_rsrc_error_time. This routine wakes up worker thread of @phba
329 * to process WORKER_RAM_DOWN_EVENT event.
331 * This routine should be called with no lock held.
334 lpfc_rampup_queue_depth(struct lpfc_vport *vport,
335 uint32_t queue_depth)
338 struct lpfc_hba *phba = vport->phba;
340 atomic_inc(&phba->num_cmd_success);
342 if (vport->cfg_lun_queue_depth <= queue_depth)
344 spin_lock_irqsave(&phba->hbalock, flags);
345 if (time_before(jiffies,
346 phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
348 phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
349 spin_unlock_irqrestore(&phba->hbalock, flags);
352 phba->last_ramp_up_time = jiffies;
353 spin_unlock_irqrestore(&phba->hbalock, flags);
355 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
356 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
358 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
359 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
362 lpfc_worker_wake_up(phba);
367 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
368 * @phba: The Hba for which this call is being executed.
370 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
371 * thread.This routine reduces queue depth for all scsi device on each vport
372 * associated with @phba.
375 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
377 struct lpfc_vport **vports;
378 struct Scsi_Host *shost;
379 struct scsi_device *sdev;
380 unsigned long new_queue_depth;
381 unsigned long num_rsrc_err, num_cmd_success;
384 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
385 num_cmd_success = atomic_read(&phba->num_cmd_success);
387 vports = lpfc_create_vport_work_array(phba);
389 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
390 shost = lpfc_shost_from_vport(vports[i]);
391 shost_for_each_device(sdev, shost) {
393 sdev->queue_depth * num_rsrc_err /
394 (num_rsrc_err + num_cmd_success);
395 if (!new_queue_depth)
396 new_queue_depth = sdev->queue_depth - 1;
398 new_queue_depth = sdev->queue_depth -
400 lpfc_change_queue_depth(sdev, new_queue_depth,
401 SCSI_QDEPTH_DEFAULT);
404 lpfc_destroy_vport_work_array(phba, vports);
405 atomic_set(&phba->num_rsrc_err, 0);
406 atomic_set(&phba->num_cmd_success, 0);
410 * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
411 * @phba: The Hba for which this call is being executed.
413 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
414 * thread.This routine increases queue depth for all scsi device on each vport
415 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
416 * num_cmd_success to zero.
419 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
421 struct lpfc_vport **vports;
422 struct Scsi_Host *shost;
423 struct scsi_device *sdev;
426 vports = lpfc_create_vport_work_array(phba);
428 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
429 shost = lpfc_shost_from_vport(vports[i]);
430 shost_for_each_device(sdev, shost) {
431 if (vports[i]->cfg_lun_queue_depth <=
434 lpfc_change_queue_depth(sdev,
436 SCSI_QDEPTH_RAMP_UP);
439 lpfc_destroy_vport_work_array(phba, vports);
440 atomic_set(&phba->num_rsrc_err, 0);
441 atomic_set(&phba->num_cmd_success, 0);
445 * lpfc_scsi_dev_block - set all scsi hosts to block state
446 * @phba: Pointer to HBA context object.
448 * This function walks vport list and set each SCSI host to block state
449 * by invoking fc_remote_port_delete() routine. This function is invoked
450 * with EEH when device's PCI slot has been permanently disabled.
453 lpfc_scsi_dev_block(struct lpfc_hba *phba)
455 struct lpfc_vport **vports;
456 struct Scsi_Host *shost;
457 struct scsi_device *sdev;
458 struct fc_rport *rport;
461 vports = lpfc_create_vport_work_array(phba);
463 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
464 shost = lpfc_shost_from_vport(vports[i]);
465 shost_for_each_device(sdev, shost) {
466 rport = starget_to_rport(scsi_target(sdev));
467 fc_remote_port_delete(rport);
470 lpfc_destroy_vport_work_array(phba, vports);
474 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
475 * @vport: The virtual port for which this call being executed.
476 * @num_to_allocate: The requested number of buffers to allocate.
478 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
479 * the scsi buffer contains all the necessary information needed to initiate
480 * a SCSI I/O. The non-DMAable buffer region contains information to build
481 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
482 * and the initial BPL. In addition to allocating memory, the FCP CMND and
483 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
486 * int - number of scsi buffers that were allocated.
487 * 0 = failure, less than num_to_alloc is a partial failure.
490 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
492 struct lpfc_hba *phba = vport->phba;
493 struct lpfc_scsi_buf *psb;
494 struct ulp_bde64 *bpl;
496 dma_addr_t pdma_phys_fcp_cmd;
497 dma_addr_t pdma_phys_fcp_rsp;
498 dma_addr_t pdma_phys_bpl;
502 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
503 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
508 * Get memory from the pci pool to map the virt space to pci
509 * bus space for an I/O. The DMA buffer includes space for the
510 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
511 * necessary to support the sg_tablesize.
513 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
514 GFP_KERNEL, &psb->dma_handle);
520 /* Initialize virtual ptrs to dma_buf region. */
521 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
523 /* Allocate iotag for psb->cur_iocbq. */
524 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
526 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
527 psb->data, psb->dma_handle);
531 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
533 psb->fcp_cmnd = psb->data;
534 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
535 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
536 sizeof(struct fcp_rsp);
538 /* Initialize local short-hand pointers. */
540 pdma_phys_fcp_cmd = psb->dma_handle;
541 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
542 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
543 sizeof(struct fcp_rsp);
546 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
547 * are sg list bdes. Initialize the first two and leave the
548 * rest for queuecommand.
550 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
551 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
552 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
553 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
554 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
556 /* Setup the physical region for the FCP RSP */
557 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
558 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
559 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
560 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
561 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
564 * Since the IOCB for the FCP I/O is built into this
565 * lpfc_scsi_buf, initialize it with all known data now.
567 iocb = &psb->cur_iocbq.iocb;
568 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
569 if ((phba->sli_rev == 3) &&
570 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
571 /* fill in immediate fcp command BDE */
572 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
573 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
574 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
576 iocb->un.fcpi64.bdl.addrHigh = 0;
577 iocb->ulpBdeCount = 0;
579 /* fill in responce BDE */
580 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
582 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
583 sizeof(struct fcp_rsp);
584 iocb->unsli3.fcp_ext.rbde.addrLow =
585 putPaddrLow(pdma_phys_fcp_rsp);
586 iocb->unsli3.fcp_ext.rbde.addrHigh =
587 putPaddrHigh(pdma_phys_fcp_rsp);
589 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
590 iocb->un.fcpi64.bdl.bdeSize =
591 (2 * sizeof(struct ulp_bde64));
592 iocb->un.fcpi64.bdl.addrLow =
593 putPaddrLow(pdma_phys_bpl);
594 iocb->un.fcpi64.bdl.addrHigh =
595 putPaddrHigh(pdma_phys_bpl);
596 iocb->ulpBdeCount = 1;
599 iocb->ulpClass = CLASS3;
600 psb->status = IOSTAT_SUCCESS;
601 /* Put it back into the SCSI buffer list */
602 lpfc_release_scsi_buf_s3(phba, psb);
610 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
611 * @phba: pointer to lpfc hba data structure.
612 * @axri: pointer to the fcp xri abort wcqe structure.
614 * This routine is invoked by the worker thread to process a SLI4 fast-path
618 lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
619 struct sli4_wcqe_xri_aborted *axri)
621 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
622 struct lpfc_scsi_buf *psb, *next_psb;
623 unsigned long iflag = 0;
624 struct lpfc_iocbq *iocbq;
626 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
628 spin_lock_irqsave(&phba->hbalock, iflag);
629 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
630 list_for_each_entry_safe(psb, next_psb,
631 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
632 if (psb->cur_iocbq.sli4_xritag == xri) {
633 list_del(&psb->list);
635 psb->status = IOSTAT_SUCCESS;
637 &phba->sli4_hba.abts_scsi_buf_list_lock);
638 spin_unlock_irqrestore(&phba->hbalock, iflag);
639 lpfc_release_scsi_buf_s4(phba, psb);
643 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
644 for (i = 1; i <= phba->sli.last_iotag; i++) {
645 iocbq = phba->sli.iocbq_lookup[i];
647 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
648 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
650 if (iocbq->sli4_xritag != xri)
652 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
654 spin_unlock_irqrestore(&phba->hbalock, iflag);
656 lpfc_worker_wake_up(phba);
660 spin_unlock_irqrestore(&phba->hbalock, iflag);
664 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
665 * @phba: pointer to lpfc hba data structure.
667 * This routine walks the list of scsi buffers that have been allocated and
668 * repost them to the HBA by using SGL block post. This is needed after a
669 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
670 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
671 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
673 * Returns: 0 = success, non-zero failure.
676 lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
678 struct lpfc_scsi_buf *psb;
679 int index, status, bcnt = 0, rcnt = 0, rc = 0;
682 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
683 psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
685 /* Remove from SCSI buffer list */
686 list_del(&psb->list);
687 /* Add it to a local SCSI buffer list */
688 list_add_tail(&psb->list, &sblist);
689 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
694 /* A hole present in the XRI array, need to skip */
697 if (index == phba->sli4_hba.scsi_xri_cnt - 1)
698 /* End of XRI array for SCSI buffer, complete */
701 /* Continue until collect up to a nembed page worth of sgls */
704 /* Now, post the SCSI buffer list sgls as a block */
705 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
706 /* Reset SCSI buffer count for next round of posting */
708 while (!list_empty(&sblist)) {
709 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
712 /* Put this back on the abort scsi list */
717 psb->status = IOSTAT_SUCCESS;
719 /* Put it back into the SCSI buffer list */
720 lpfc_release_scsi_buf_s4(phba, psb);
727 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
728 * @vport: The virtual port for which this call being executed.
729 * @num_to_allocate: The requested number of buffers to allocate.
731 * This routine allocates a scsi buffer for device with SLI-4 interface spec,
732 * the scsi buffer contains all the necessary information needed to initiate
736 * int - number of scsi buffers that were allocated.
737 * 0 = failure, less than num_to_alloc is a partial failure.
740 lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
742 struct lpfc_hba *phba = vport->phba;
743 struct lpfc_scsi_buf *psb;
744 struct sli4_sge *sgl;
746 dma_addr_t pdma_phys_fcp_cmd;
747 dma_addr_t pdma_phys_fcp_rsp;
748 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
749 uint16_t iotag, last_xritag = NO_XRI;
750 int status = 0, index;
752 int non_sequential_xri = 0;
755 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
756 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
761 * Get memory from the pci pool to map the virt space to pci bus
762 * space for an I/O. The DMA buffer includes space for the
763 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
764 * necessary to support the sg_tablesize.
766 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
767 GFP_KERNEL, &psb->dma_handle);
773 /* Initialize virtual ptrs to dma_buf region. */
774 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
776 /* Allocate iotag for psb->cur_iocbq. */
777 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
779 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
780 psb->data, psb->dma_handle);
785 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
786 if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
787 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
788 psb->data, psb->dma_handle);
792 if (last_xritag != NO_XRI
793 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
794 non_sequential_xri = 1;
796 list_add_tail(&psb->list, &sblist);
797 last_xritag = psb->cur_iocbq.sli4_xritag;
799 index = phba->sli4_hba.scsi_xri_cnt++;
800 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
802 psb->fcp_bpl = psb->data;
803 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
804 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
805 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
806 sizeof(struct fcp_cmnd));
808 /* Initialize local short-hand pointers. */
809 sgl = (struct sli4_sge *)psb->fcp_bpl;
810 pdma_phys_bpl = psb->dma_handle;
812 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
813 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
814 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
817 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
818 * are sg list bdes. Initialize the first two and leave the
819 * rest for queuecommand.
821 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
822 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
823 bf_set(lpfc_sli4_sge_last, sgl, 0);
824 sgl->word2 = cpu_to_le32(sgl->word2);
825 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
828 /* Setup the physical region for the FCP RSP */
829 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
830 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
831 bf_set(lpfc_sli4_sge_last, sgl, 1);
832 sgl->word2 = cpu_to_le32(sgl->word2);
833 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
836 * Since the IOCB for the FCP I/O is built into this
837 * lpfc_scsi_buf, initialize it with all known data now.
839 iocb = &psb->cur_iocbq.iocb;
840 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
841 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
842 /* setting the BLP size to 2 * sizeof BDE may not be correct.
843 * We are setting the bpl to point to out sgl. An sgl's
844 * entries are 16 bytes, a bpl entries are 12 bytes.
846 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
847 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
848 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
849 iocb->ulpBdeCount = 1;
851 iocb->ulpClass = CLASS3;
852 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
853 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
856 psb->dma_phys_bpl = pdma_phys_bpl;
857 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
858 if (non_sequential_xri) {
859 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
861 psb->cur_iocbq.sli4_xritag);
863 /* Put this back on the abort scsi list */
867 psb->status = IOSTAT_SUCCESS;
869 /* Put it back into the SCSI buffer list */
870 lpfc_release_scsi_buf_s4(phba, psb);
875 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
876 /* Reset SCSI buffer count for next round of posting */
877 while (!list_empty(&sblist)) {
878 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
881 /* Put this back on the abort scsi list */
885 psb->status = IOSTAT_SUCCESS;
887 /* Put it back into the SCSI buffer list */
888 lpfc_release_scsi_buf_s4(phba, psb);
892 return bcnt + non_sequential_xri;
896 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
897 * @vport: The virtual port for which this call being executed.
898 * @num_to_allocate: The requested number of buffers to allocate.
900 * This routine wraps the actual SCSI buffer allocator function pointer from
901 * the lpfc_hba struct.
904 * int - number of scsi buffers that were allocated.
905 * 0 = failure, less than num_to_alloc is a partial failure.
908 lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
910 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
914 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
915 * @phba: The HBA for which this call is being executed.
917 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
918 * and returns to caller.
922 * Pointer to lpfc_scsi_buf - Success
924 static struct lpfc_scsi_buf*
925 lpfc_get_scsi_buf(struct lpfc_hba * phba)
927 struct lpfc_scsi_buf * lpfc_cmd = NULL;
928 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
929 unsigned long iflag = 0;
931 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
932 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
934 lpfc_cmd->seg_cnt = 0;
935 lpfc_cmd->nonsg_phys = 0;
936 lpfc_cmd->prot_seg_cnt = 0;
938 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
943 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
944 * @phba: The Hba for which this call is being executed.
945 * @psb: The scsi buffer which is being released.
947 * This routine releases @psb scsi buffer by adding it to tail of @phba
948 * lpfc_scsi_buf_list list.
951 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
953 unsigned long iflag = 0;
955 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
957 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
958 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
962 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
963 * @phba: The Hba for which this call is being executed.
964 * @psb: The scsi buffer which is being released.
966 * This routine releases @psb scsi buffer by adding it to tail of @phba
967 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
968 * and cannot be reused for at least RA_TOV amount of time if it was
972 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
974 unsigned long iflag = 0;
976 if (psb->exch_busy) {
977 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
980 list_add_tail(&psb->list,
981 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
982 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
986 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
988 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
989 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
994 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
995 * @phba: The Hba for which this call is being executed.
996 * @psb: The scsi buffer which is being released.
998 * This routine releases @psb scsi buffer by adding it to tail of @phba
999 * lpfc_scsi_buf_list list.
1002 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1005 phba->lpfc_release_scsi_buf(phba, psb);
1009 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
1010 * @phba: The Hba for which this call is being executed.
1011 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1013 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1014 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
1015 * through sg elements and format the bdea. This routine also initializes all
1016 * IOCB fields which are dependent on scsi command request buffer.
1023 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1025 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1026 struct scatterlist *sgel = NULL;
1027 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1028 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1029 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
1030 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1031 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1032 dma_addr_t physaddr;
1033 uint32_t num_bde = 0;
1034 int nseg, datadir = scsi_cmnd->sc_data_direction;
1037 * There are three possibilities here - use scatter-gather segment, use
1038 * the single mapping, or neither. Start the lpfc command prep by
1039 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1043 if (scsi_sg_count(scsi_cmnd)) {
1045 * The driver stores the segment count returned from pci_map_sg
1046 * because this a count of dma-mappings used to map the use_sg
1047 * pages. They are not guaranteed to be the same for those
1048 * architectures that implement an IOMMU.
1051 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1052 scsi_sg_count(scsi_cmnd), datadir);
1053 if (unlikely(!nseg))
1056 lpfc_cmd->seg_cnt = nseg;
1057 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1058 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1059 "9064 BLKGRD: %s: Too many sg segments from "
1060 "dma_map_sg. Config %d, seg_cnt %d\n",
1061 __func__, phba->cfg_sg_seg_cnt,
1063 scsi_dma_unmap(scsi_cmnd);
1068 * The driver established a maximum scatter-gather segment count
1069 * during probe that limits the number of sg elements in any
1070 * single scsi command. Just run through the seg_cnt and format
1072 * When using SLI-3 the driver will try to fit all the BDEs into
1073 * the IOCB. If it can't then the BDEs get added to a BPL as it
1074 * does for SLI-2 mode.
1076 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1077 physaddr = sg_dma_address(sgel);
1078 if (phba->sli_rev == 3 &&
1079 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1080 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1081 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1082 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1083 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1084 data_bde->addrLow = putPaddrLow(physaddr);
1085 data_bde->addrHigh = putPaddrHigh(physaddr);
1088 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1089 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1090 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1092 le32_to_cpu(putPaddrLow(physaddr));
1094 le32_to_cpu(putPaddrHigh(physaddr));
1101 * Finish initializing those IOCB fields that are dependent on the
1102 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1103 * explicitly reinitialized and for SLI-3 the extended bde count is
1104 * explicitly reinitialized since all iocb memory resources are reused.
1106 if (phba->sli_rev == 3 &&
1107 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1108 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1109 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1111 * The extended IOCB format can only fit 3 BDE or a BPL.
1112 * This I/O has more than 3 BDE so the 1st data bde will
1113 * be a BPL that is filled in here.
1115 physaddr = lpfc_cmd->dma_handle;
1116 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1117 data_bde->tus.f.bdeSize = (num_bde *
1118 sizeof(struct ulp_bde64));
1119 physaddr += (sizeof(struct fcp_cmnd) +
1120 sizeof(struct fcp_rsp) +
1121 (2 * sizeof(struct ulp_bde64)));
1122 data_bde->addrHigh = putPaddrHigh(physaddr);
1123 data_bde->addrLow = putPaddrLow(physaddr);
1124 /* ebde count includes the responce bde and data bpl */
1125 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1127 /* ebde count includes the responce bde and data bdes */
1128 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1131 iocb_cmd->un.fcpi64.bdl.bdeSize =
1132 ((num_bde + 2) * sizeof(struct ulp_bde64));
1133 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1135 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1138 * Due to difference in data length between DIF/non-DIF paths,
1139 * we need to set word 4 of IOCB here
1141 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1146 * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it
1147 * @sc: The SCSI command to examine
1148 * @txopt: (out) BlockGuard operation for transmitted data
1149 * @rxopt: (out) BlockGuard operation for received data
1151 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1155 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1156 uint8_t *txop, uint8_t *rxop)
1158 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1161 if (guard_type == SHOST_DIX_GUARD_IP) {
1162 switch (scsi_get_prot_op(sc)) {
1163 case SCSI_PROT_READ_INSERT:
1164 case SCSI_PROT_WRITE_STRIP:
1165 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1166 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1169 case SCSI_PROT_READ_STRIP:
1170 case SCSI_PROT_WRITE_INSERT:
1171 *txop = BG_OP_IN_NODIF_OUT_CRC;
1172 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1175 case SCSI_PROT_READ_PASS:
1176 case SCSI_PROT_WRITE_PASS:
1177 *txop = BG_OP_IN_CSUM_OUT_CRC;
1178 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1181 case SCSI_PROT_NORMAL:
1183 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1184 "9063 BLKGRD: Bad op/guard:%d/%d combination\n",
1185 scsi_get_prot_op(sc), guard_type);
1190 } else if (guard_type == SHOST_DIX_GUARD_CRC) {
1191 switch (scsi_get_prot_op(sc)) {
1192 case SCSI_PROT_READ_STRIP:
1193 case SCSI_PROT_WRITE_INSERT:
1194 *txop = BG_OP_IN_NODIF_OUT_CRC;
1195 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1198 case SCSI_PROT_READ_PASS:
1199 case SCSI_PROT_WRITE_PASS:
1200 *txop = BG_OP_IN_CRC_OUT_CRC;
1201 *rxop = BG_OP_IN_CRC_OUT_CRC;
1204 case SCSI_PROT_READ_INSERT:
1205 case SCSI_PROT_WRITE_STRIP:
1206 case SCSI_PROT_NORMAL:
1208 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1209 "9075 BLKGRD: Bad op/guard:%d/%d combination\n",
1210 scsi_get_prot_op(sc), guard_type);
1215 /* unsupported format */
1222 struct scsi_dif_tuple {
1223 __be16 guard_tag; /* Checksum */
1224 __be16 app_tag; /* Opaque storage */
1225 __be32 ref_tag; /* Target LBA or indirect LBA */
1228 static inline unsigned
1229 lpfc_cmd_blksize(struct scsi_cmnd *sc)
1231 return sc->device->sector_size;
1235 * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
1236 * @sc: in: SCSI command
1237 * @apptagmask: out: app tag mask
1238 * @apptagval: out: app tag value
1239 * @reftag: out: ref tag (reference tag)
1242 * Extract DIF parameters from the command if possible. Otherwise,
1243 * use default parameters.
1247 lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
1248 uint16_t *apptagval, uint32_t *reftag)
1250 struct scsi_dif_tuple *spt;
1251 unsigned char op = scsi_get_prot_op(sc);
1252 unsigned int protcnt = scsi_prot_sg_count(sc);
1255 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
1256 op == SCSI_PROT_WRITE_PASS)) {
1259 spt = page_address(sg_page(scsi_prot_sglist(sc))) +
1260 scsi_prot_sglist(sc)[0].offset;
1263 *reftag = cpu_to_be32(spt->ref_tag);
1266 /* SBC defines ref tag to be lower 32bits of LBA */
1267 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
1274 * This function sets up buffer list for protection groups of
1275 * type LPFC_PG_TYPE_NO_DIF
1277 * This is usually used when the HBA is instructed to generate
1278 * DIFs and insert them into data stream (or strip DIF from
1279 * incoming data stream)
1281 * The buffer list consists of just one protection group described
1283 * +-------------------------+
1284 * start of prot group --> | PDE_5 |
1285 * +-------------------------+
1287 * +-------------------------+
1289 * +-------------------------+
1290 * |more Data BDE's ... (opt)|
1291 * +-------------------------+
1293 * @sc: pointer to scsi command we're working on
1294 * @bpl: pointer to buffer list for protection groups
1295 * @datacnt: number of segments of data that have been dma mapped
1297 * Note: Data s/g buffers have been dma mapped
1300 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1301 struct ulp_bde64 *bpl, int datasegcnt)
1303 struct scatterlist *sgde = NULL; /* s/g data entry */
1304 struct lpfc_pde5 *pde5 = NULL;
1305 struct lpfc_pde6 *pde6 = NULL;
1306 dma_addr_t physaddr;
1307 int i = 0, num_bde = 0, status;
1308 int datadir = sc->sc_data_direction;
1311 uint16_t apptagmask, apptagval;
1314 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1318 /* extract some info from the scsi command for pde*/
1319 blksize = lpfc_cmd_blksize(sc);
1320 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1322 /* setup PDE5 with what we have */
1323 pde5 = (struct lpfc_pde5 *) bpl;
1324 memset(pde5, 0, sizeof(struct lpfc_pde5));
1325 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1326 pde5->reftag = reftag;
1328 /* Endianness conversion if necessary for PDE5 */
1329 pde5->word0 = cpu_to_le32(pde5->word0);
1330 pde5->reftag = cpu_to_le32(pde5->reftag);
1332 /* advance bpl and increment bde count */
1335 pde6 = (struct lpfc_pde6 *) bpl;
1337 /* setup PDE6 with the rest of the info */
1338 memset(pde6, 0, sizeof(struct lpfc_pde6));
1339 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1340 bf_set(pde6_optx, pde6, txop);
1341 bf_set(pde6_oprx, pde6, rxop);
1342 if (datadir == DMA_FROM_DEVICE) {
1343 bf_set(pde6_ce, pde6, 1);
1344 bf_set(pde6_re, pde6, 1);
1345 bf_set(pde6_ae, pde6, 1);
1347 bf_set(pde6_ai, pde6, 1);
1348 bf_set(pde6_apptagval, pde6, apptagval);
1350 /* Endianness conversion if necessary for PDE6 */
1351 pde6->word0 = cpu_to_le32(pde6->word0);
1352 pde6->word1 = cpu_to_le32(pde6->word1);
1353 pde6->word2 = cpu_to_le32(pde6->word2);
1355 /* advance bpl and increment bde count */
1359 /* assumption: caller has already run dma_map_sg on command data */
1360 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1361 physaddr = sg_dma_address(sgde);
1362 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1363 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1364 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1365 if (datadir == DMA_TO_DEVICE)
1366 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1368 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1369 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1379 * This function sets up buffer list for protection groups of
1380 * type LPFC_PG_TYPE_DIF_BUF
1382 * This is usually used when DIFs are in their own buffers,
1383 * separate from the data. The HBA can then by instructed
1384 * to place the DIFs in the outgoing stream. For read operations,
1385 * The HBA could extract the DIFs and place it in DIF buffers.
1387 * The buffer list for this type consists of one or more of the
1388 * protection groups described below:
1389 * +-------------------------+
1390 * start of first prot group --> | PDE_5 |
1391 * +-------------------------+
1393 * +-------------------------+
1394 * | PDE_7 (Prot BDE) |
1395 * +-------------------------+
1397 * +-------------------------+
1398 * |more Data BDE's ... (opt)|
1399 * +-------------------------+
1400 * start of new prot group --> | PDE_5 |
1401 * +-------------------------+
1403 * +-------------------------+
1405 * @sc: pointer to scsi command we're working on
1406 * @bpl: pointer to buffer list for protection groups
1407 * @datacnt: number of segments of data that have been dma mapped
1408 * @protcnt: number of segment of protection data that have been dma mapped
1410 * Note: It is assumed that both data and protection s/g buffers have been
1414 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1415 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1417 struct scatterlist *sgde = NULL; /* s/g data entry */
1418 struct scatterlist *sgpe = NULL; /* s/g prot entry */
1419 struct lpfc_pde5 *pde5 = NULL;
1420 struct lpfc_pde6 *pde6 = NULL;
1421 struct ulp_bde64 *prot_bde = NULL;
1422 dma_addr_t dataphysaddr, protphysaddr;
1423 unsigned short curr_data = 0, curr_prot = 0;
1424 unsigned int split_offset, protgroup_len;
1425 unsigned int protgrp_blks, protgrp_bytes;
1426 unsigned int remainder, subtotal;
1428 int datadir = sc->sc_data_direction;
1429 unsigned char pgdone = 0, alldone = 0;
1432 uint16_t apptagmask, apptagval;
1436 sgpe = scsi_prot_sglist(sc);
1437 sgde = scsi_sglist(sc);
1439 if (!sgpe || !sgde) {
1440 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1441 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
1446 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1450 /* extract some info from the scsi command */
1451 blksize = lpfc_cmd_blksize(sc);
1452 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1456 /* setup PDE5 with what we have */
1457 pde5 = (struct lpfc_pde5 *) bpl;
1458 memset(pde5, 0, sizeof(struct lpfc_pde5));
1459 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1460 pde5->reftag = reftag;
1462 /* Endianness conversion if necessary for PDE5 */
1463 pde5->word0 = cpu_to_le32(pde5->word0);
1464 pde5->reftag = cpu_to_le32(pde5->reftag);
1466 /* advance bpl and increment bde count */
1469 pde6 = (struct lpfc_pde6 *) bpl;
1471 /* setup PDE6 with the rest of the info */
1472 memset(pde6, 0, sizeof(struct lpfc_pde6));
1473 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1474 bf_set(pde6_optx, pde6, txop);
1475 bf_set(pde6_oprx, pde6, rxop);
1476 bf_set(pde6_ce, pde6, 1);
1477 bf_set(pde6_re, pde6, 1);
1478 bf_set(pde6_ae, pde6, 1);
1479 bf_set(pde6_ai, pde6, 1);
1480 bf_set(pde6_apptagval, pde6, apptagval);
1482 /* Endianness conversion if necessary for PDE6 */
1483 pde6->word0 = cpu_to_le32(pde6->word0);
1484 pde6->word1 = cpu_to_le32(pde6->word1);
1485 pde6->word2 = cpu_to_le32(pde6->word2);
1487 /* advance bpl and increment bde count */
1491 /* setup the first BDE that points to protection buffer */
1492 prot_bde = (struct ulp_bde64 *) bpl;
1493 protphysaddr = sg_dma_address(sgpe);
1494 prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
1495 prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
1496 protgroup_len = sg_dma_len(sgpe);
1498 /* must be integer multiple of the DIF block length */
1499 BUG_ON(protgroup_len % 8);
1501 protgrp_blks = protgroup_len / 8;
1502 protgrp_bytes = protgrp_blks * blksize;
1504 prot_bde->tus.f.bdeSize = protgroup_len;
1505 prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR;
1506 prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
1511 /* setup BDE's for data blocks associated with DIF data */
1513 subtotal = 0; /* total bytes processed for current prot grp */
1516 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1517 "9065 BLKGRD:%s Invalid data segment\n",
1522 dataphysaddr = sg_dma_address(sgde) + split_offset;
1523 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1524 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1526 remainder = sg_dma_len(sgde) - split_offset;
1528 if ((subtotal + remainder) <= protgrp_bytes) {
1529 /* we can use this whole buffer */
1530 bpl->tus.f.bdeSize = remainder;
1533 if ((subtotal + remainder) == protgrp_bytes)
1536 /* must split this buffer with next prot grp */
1537 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1538 split_offset += bpl->tus.f.bdeSize;
1541 subtotal += bpl->tus.f.bdeSize;
1543 if (datadir == DMA_TO_DEVICE)
1544 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1546 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1547 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1555 /* Move to the next s/g segment if possible */
1556 sgde = sg_next(sgde);
1561 if (curr_prot == protcnt) {
1563 } else if (curr_prot < protcnt) {
1564 /* advance to next prot buffer */
1565 sgpe = sg_next(sgpe);
1568 /* update the reference tag */
1569 reftag += protgrp_blks;
1571 /* if we're here, we have a bug */
1572 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1573 "9054 BLKGRD: bug in %s\n", __func__);
1583 * Given a SCSI command that supports DIF, determine composition of protection
1584 * groups involved in setting up buffer lists
1587 * for DIF (for both read and write)
1590 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1592 int ret = LPFC_PG_TYPE_INVALID;
1593 unsigned char op = scsi_get_prot_op(sc);
1596 case SCSI_PROT_READ_STRIP:
1597 case SCSI_PROT_WRITE_INSERT:
1598 ret = LPFC_PG_TYPE_NO_DIF;
1600 case SCSI_PROT_READ_INSERT:
1601 case SCSI_PROT_WRITE_STRIP:
1602 case SCSI_PROT_READ_PASS:
1603 case SCSI_PROT_WRITE_PASS:
1604 ret = LPFC_PG_TYPE_DIF_BUF;
1607 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1608 "9021 Unsupported protection op:%d\n", op);
1616 * This is the protection/DIF aware version of
1617 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
1618 * two functions eventually, but for now, it's here
1621 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1622 struct lpfc_scsi_buf *lpfc_cmd)
1624 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1625 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1626 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1627 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1628 uint32_t num_bde = 0;
1629 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
1630 int prot_group_type = 0;
1635 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
1636 * fcp_rsp regions to the first data bde entry
1639 if (scsi_sg_count(scsi_cmnd)) {
1641 * The driver stores the segment count returned from pci_map_sg
1642 * because this a count of dma-mappings used to map the use_sg
1643 * pages. They are not guaranteed to be the same for those
1644 * architectures that implement an IOMMU.
1646 datasegcnt = dma_map_sg(&phba->pcidev->dev,
1647 scsi_sglist(scsi_cmnd),
1648 scsi_sg_count(scsi_cmnd), datadir);
1649 if (unlikely(!datasegcnt))
1652 lpfc_cmd->seg_cnt = datasegcnt;
1653 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1654 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1655 "9067 BLKGRD: %s: Too many sg segments"
1656 " from dma_map_sg. Config %d, seg_cnt"
1658 __func__, phba->cfg_sg_seg_cnt,
1660 scsi_dma_unmap(scsi_cmnd);
1664 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
1666 switch (prot_group_type) {
1667 case LPFC_PG_TYPE_NO_DIF:
1668 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
1670 /* we should have 2 or more entries in buffer list */
1674 case LPFC_PG_TYPE_DIF_BUF:{
1676 * This type indicates that protection buffers are
1677 * passed to the driver, so that needs to be prepared
1680 protsegcnt = dma_map_sg(&phba->pcidev->dev,
1681 scsi_prot_sglist(scsi_cmnd),
1682 scsi_prot_sg_count(scsi_cmnd), datadir);
1683 if (unlikely(!protsegcnt)) {
1684 scsi_dma_unmap(scsi_cmnd);
1688 lpfc_cmd->prot_seg_cnt = protsegcnt;
1689 if (lpfc_cmd->prot_seg_cnt
1690 > phba->cfg_prot_sg_seg_cnt) {
1691 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1692 "9068 BLKGRD: %s: Too many prot sg "
1693 "segments from dma_map_sg. Config %d,"
1694 "prot_seg_cnt %d\n", __func__,
1695 phba->cfg_prot_sg_seg_cnt,
1696 lpfc_cmd->prot_seg_cnt);
1697 dma_unmap_sg(&phba->pcidev->dev,
1698 scsi_prot_sglist(scsi_cmnd),
1699 scsi_prot_sg_count(scsi_cmnd),
1701 scsi_dma_unmap(scsi_cmnd);
1705 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
1706 datasegcnt, protsegcnt);
1707 /* we should have 3 or more entries in buffer list */
1712 case LPFC_PG_TYPE_INVALID:
1714 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1715 "9022 Unexpected protection group %i\n",
1722 * Finish initializing those IOCB fields that are dependent on the
1723 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
1724 * reinitialized since all iocb memory resources are used many times
1725 * for transmit, receive, and continuation bpl's.
1727 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
1728 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
1729 iocb_cmd->ulpBdeCount = 1;
1730 iocb_cmd->ulpLe = 1;
1732 fcpdl = scsi_bufflen(scsi_cmnd);
1734 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
1736 * We are in DIF Type 1 mode
1737 * Every data block has a 8 byte DIF (trailer)
1738 * attached to it. Must ajust FCP data length
1740 blksize = lpfc_cmd_blksize(scsi_cmnd);
1741 diflen = (fcpdl / blksize) * 8;
1744 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
1747 * Due to difference in data length between DIF/non-DIF paths,
1748 * we need to set word 4 of IOCB here
1750 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
1754 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1755 "9023 Could not setup all needed BDE's"
1756 "prot_group_type=%d, num_bde=%d\n",
1757 prot_group_type, num_bde);
1762 * This function checks for BlockGuard errors detected by
1763 * the HBA. In case of errors, the ASC/ASCQ fields in the
1764 * sense buffer will be set accordingly, paired with
1765 * ILLEGAL_REQUEST to signal to the kernel that the HBA
1766 * detected corruption.
1769 * 0 - No error found
1770 * 1 - BlockGuard error found
1771 * -1 - Internal error (bad profile, ...etc)
1774 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1775 struct lpfc_iocbq *pIocbOut)
1777 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
1778 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
1780 uint32_t bghm = bgf->bghm;
1781 uint32_t bgstat = bgf->bgstat;
1782 uint64_t failing_sector = 0;
1784 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
1785 " 0x%x lba 0x%llx blk cnt 0x%x "
1786 "bgstat=0x%x bghm=0x%x\n",
1787 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1788 blk_rq_sectors(cmd->request), bgstat, bghm);
1790 spin_lock(&_dump_buf_lock);
1791 if (!_dump_buf_done) {
1792 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
1793 " Data for %u blocks to debugfs\n",
1794 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1795 lpfc_debug_save_data(phba, cmd);
1797 /* If we have a prot sgl, save the DIF buffer */
1798 if (lpfc_prot_group_type(phba, cmd) ==
1799 LPFC_PG_TYPE_DIF_BUF) {
1800 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
1801 "Saving DIF for %u blocks to debugfs\n",
1802 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1803 lpfc_debug_save_dif(phba, cmd);
1808 spin_unlock(&_dump_buf_lock);
1810 if (lpfc_bgs_get_invalid_prof(bgstat)) {
1811 cmd->result = ScsiResult(DID_ERROR, 0);
1812 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
1813 " BlockGuard profile. bgstat:0x%x\n",
1819 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
1820 cmd->result = ScsiResult(DID_ERROR, 0);
1821 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
1822 "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
1828 if (lpfc_bgs_get_guard_err(bgstat)) {
1831 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1833 cmd->result = DRIVER_SENSE << 24
1834 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1835 phba->bg_guard_err_cnt++;
1836 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1837 "9055 BLKGRD: guard_tag error\n");
1840 if (lpfc_bgs_get_reftag_err(bgstat)) {
1843 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1845 cmd->result = DRIVER_SENSE << 24
1846 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1848 phba->bg_reftag_err_cnt++;
1849 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1850 "9056 BLKGRD: ref_tag error\n");
1853 if (lpfc_bgs_get_apptag_err(bgstat)) {
1856 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1858 cmd->result = DRIVER_SENSE << 24
1859 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1861 phba->bg_apptag_err_cnt++;
1862 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1863 "9061 BLKGRD: app_tag error\n");
1866 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
1868 * setup sense data descriptor 0 per SPC-4 as an information
1869 * field, and put the failing LBA in it
1871 cmd->sense_buffer[8] = 0; /* Information */
1872 cmd->sense_buffer[9] = 0xa; /* Add. length */
1873 bghm /= cmd->device->sector_size;
1875 failing_sector = scsi_get_lba(cmd);
1876 failing_sector += bghm;
1878 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
1882 /* No error was reported - problem in FW? */
1883 cmd->result = ScsiResult(DID_ERROR, 0);
1884 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1885 "9057 BLKGRD: no errors reported!\n");
1893 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
1894 * @phba: The Hba for which this call is being executed.
1895 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1897 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1898 * field of @lpfc_cmd for device with SLI-4 interface spec.
1905 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1907 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1908 struct scatterlist *sgel = NULL;
1909 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1910 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
1911 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1912 dma_addr_t physaddr;
1913 uint32_t num_bde = 0;
1915 uint32_t dma_offset = 0;
1919 * There are three possibilities here - use scatter-gather segment, use
1920 * the single mapping, or neither. Start the lpfc command prep by
1921 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1924 if (scsi_sg_count(scsi_cmnd)) {
1926 * The driver stores the segment count returned from pci_map_sg
1927 * because this a count of dma-mappings used to map the use_sg
1928 * pages. They are not guaranteed to be the same for those
1929 * architectures that implement an IOMMU.
1932 nseg = scsi_dma_map(scsi_cmnd);
1933 if (unlikely(!nseg))
1936 /* clear the last flag in the fcp_rsp map entry */
1937 sgl->word2 = le32_to_cpu(sgl->word2);
1938 bf_set(lpfc_sli4_sge_last, sgl, 0);
1939 sgl->word2 = cpu_to_le32(sgl->word2);
1942 lpfc_cmd->seg_cnt = nseg;
1943 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1944 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
1945 " %s: Too many sg segments from "
1946 "dma_map_sg. Config %d, seg_cnt %d\n",
1947 __func__, phba->cfg_sg_seg_cnt,
1949 scsi_dma_unmap(scsi_cmnd);
1954 * The driver established a maximum scatter-gather segment count
1955 * during probe that limits the number of sg elements in any
1956 * single scsi command. Just run through the seg_cnt and format
1958 * When using SLI-3 the driver will try to fit all the BDEs into
1959 * the IOCB. If it can't then the BDEs get added to a BPL as it
1960 * does for SLI-2 mode.
1962 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1963 physaddr = sg_dma_address(sgel);
1964 dma_len = sg_dma_len(sgel);
1965 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1966 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1967 if ((num_bde + 1) == nseg)
1968 bf_set(lpfc_sli4_sge_last, sgl, 1);
1970 bf_set(lpfc_sli4_sge_last, sgl, 0);
1971 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1972 sgl->word2 = cpu_to_le32(sgl->word2);
1973 sgl->sge_len = cpu_to_le32(dma_len);
1974 dma_offset += dma_len;
1979 /* clear the last flag in the fcp_rsp map entry */
1980 sgl->word2 = le32_to_cpu(sgl->word2);
1981 bf_set(lpfc_sli4_sge_last, sgl, 1);
1982 sgl->word2 = cpu_to_le32(sgl->word2);
1986 * Finish initializing those IOCB fields that are dependent on the
1987 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1988 * explicitly reinitialized.
1989 * all iocb memory resources are reused.
1991 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1994 * Due to difference in data length between DIF/non-DIF paths,
1995 * we need to set word 4 of IOCB here
1997 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
2002 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
2003 * @phba: The Hba for which this call is being executed.
2004 * @lpfc_cmd: The scsi buffer which is going to be mapped.
2006 * This routine wraps the actual DMA mapping function pointer from the
2014 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2016 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
2020 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
2021 * @phba: Pointer to hba context object.
2022 * @vport: Pointer to vport object.
2023 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
2024 * @rsp_iocb: Pointer to response iocb object which reported error.
2026 * This function posts an event when there is a SCSI command reporting
2027 * error from the scsi device.
2030 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
2031 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
2032 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
2033 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
2034 uint32_t resp_info = fcprsp->rspStatus2;
2035 uint32_t scsi_status = fcprsp->rspStatus3;
2036 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
2037 struct lpfc_fast_path_event *fast_path_evt = NULL;
2038 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
2039 unsigned long flags;
2041 /* If there is queuefull or busy condition send a scsi event */
2042 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
2043 (cmnd->result == SAM_STAT_BUSY)) {
2044 fast_path_evt = lpfc_alloc_fast_evt(phba);
2047 fast_path_evt->un.scsi_evt.event_type =
2049 fast_path_evt->un.scsi_evt.subcategory =
2050 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
2051 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
2052 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
2053 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
2054 &pnode->nlp_portname, sizeof(struct lpfc_name));
2055 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
2056 &pnode->nlp_nodename, sizeof(struct lpfc_name));
2057 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
2058 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
2059 fast_path_evt = lpfc_alloc_fast_evt(phba);
2062 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
2064 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
2065 LPFC_EVENT_CHECK_COND;
2066 fast_path_evt->un.check_cond_evt.scsi_event.lun =
2068 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
2069 &pnode->nlp_portname, sizeof(struct lpfc_name));
2070 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
2071 &pnode->nlp_nodename, sizeof(struct lpfc_name));
2072 fast_path_evt->un.check_cond_evt.sense_key =
2073 cmnd->sense_buffer[2] & 0xf;
2074 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
2075 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
2076 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
2078 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
2079 ((scsi_status == SAM_STAT_GOOD) &&
2080 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
2082 * If status is good or resid does not match with fcp_param and
2083 * there is valid fcpi_parm, then there is a read_check error
2085 fast_path_evt = lpfc_alloc_fast_evt(phba);
2088 fast_path_evt->un.read_check_error.header.event_type =
2089 FC_REG_FABRIC_EVENT;
2090 fast_path_evt->un.read_check_error.header.subcategory =
2091 LPFC_EVENT_FCPRDCHKERR;
2092 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
2093 &pnode->nlp_portname, sizeof(struct lpfc_name));
2094 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
2095 &pnode->nlp_nodename, sizeof(struct lpfc_name));
2096 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
2097 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
2098 fast_path_evt->un.read_check_error.fcpiparam =
2103 fast_path_evt->vport = vport;
2104 spin_lock_irqsave(&phba->hbalock, flags);
2105 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
2106 spin_unlock_irqrestore(&phba->hbalock, flags);
2107 lpfc_worker_wake_up(phba);
2112 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
2113 * @phba: The HBA for which this call is being executed.
2114 * @psb: The scsi buffer which is going to be un-mapped.
2116 * This routine does DMA un-mapping of scatter gather list of scsi command
2117 * field of @lpfc_cmd for device with SLI-3 interface spec.
2120 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2123 * There are only two special cases to consider. (1) the scsi command
2124 * requested scatter-gather usage or (2) the scsi command allocated
2125 * a request buffer, but did not request use_sg. There is a third
2126 * case, but it does not require resource deallocation.
2128 if (psb->seg_cnt > 0)
2129 scsi_dma_unmap(psb->pCmd);
2130 if (psb->prot_seg_cnt > 0)
2131 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
2132 scsi_prot_sg_count(psb->pCmd),
2133 psb->pCmd->sc_data_direction);
2137 * lpfc_handler_fcp_err - FCP response handler
2138 * @vport: The virtual port for which this call is being executed.
2139 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2140 * @rsp_iocb: The response IOCB which contains FCP error.
2142 * This routine is called to process response IOCB with status field
2143 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
2144 * based upon SCSI and FCP error.
2147 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2148 struct lpfc_iocbq *rsp_iocb)
2150 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
2151 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
2152 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
2153 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
2154 uint32_t resp_info = fcprsp->rspStatus2;
2155 uint32_t scsi_status = fcprsp->rspStatus3;
2157 uint32_t host_status = DID_OK;
2158 uint32_t rsplen = 0;
2159 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
2163 * If this is a task management command, there is no
2164 * scsi packet associated with this lpfc_cmd. The driver
2167 if (fcpcmd->fcpCntl2) {
2172 if (resp_info & RSP_LEN_VALID) {
2173 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2174 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
2175 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2176 "2719 Invalid response length: "
2177 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
2179 cmnd->device->lun, cmnd->cmnd[0],
2181 host_status = DID_ERROR;
2184 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
2185 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2186 "2757 Protocol failure detected during "
2187 "processing of FCP I/O op: "
2188 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
2190 cmnd->device->lun, cmnd->cmnd[0],
2192 host_status = DID_ERROR;
2197 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
2198 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
2199 if (snslen > SCSI_SENSE_BUFFERSIZE)
2200 snslen = SCSI_SENSE_BUFFERSIZE;
2202 if (resp_info & RSP_LEN_VALID)
2203 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2204 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
2206 lp = (uint32_t *)cmnd->sense_buffer;
2208 if (!scsi_status && (resp_info & RESID_UNDER))
2211 lpfc_printf_vlog(vport, KERN_WARNING, logit,
2212 "9024 FCP command x%x failed: x%x SNS x%x x%x "
2213 "Data: x%x x%x x%x x%x x%x\n",
2214 cmnd->cmnd[0], scsi_status,
2215 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
2216 be32_to_cpu(fcprsp->rspResId),
2217 be32_to_cpu(fcprsp->rspSnsLen),
2218 be32_to_cpu(fcprsp->rspRspLen),
2221 scsi_set_resid(cmnd, 0);
2222 if (resp_info & RESID_UNDER) {
2223 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
2225 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2226 "9025 FCP Read Underrun, expected %d, "
2227 "residual %d Data: x%x x%x x%x\n",
2228 be32_to_cpu(fcpcmd->fcpDl),
2229 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
2233 * If there is an under run check if under run reported by
2234 * storage array is same as the under run reported by HBA.
2235 * If this is not same, there is a dropped frame.
2237 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
2239 (scsi_get_resid(cmnd) != fcpi_parm)) {
2240 lpfc_printf_vlog(vport, KERN_WARNING,
2241 LOG_FCP | LOG_FCP_ERROR,
2242 "9026 FCP Read Check Error "
2243 "and Underrun Data: x%x x%x x%x x%x\n",
2244 be32_to_cpu(fcpcmd->fcpDl),
2245 scsi_get_resid(cmnd), fcpi_parm,
2247 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
2248 host_status = DID_ERROR;
2251 * The cmnd->underflow is the minimum number of bytes that must
2252 * be transfered for this command. Provided a sense condition
2253 * is not present, make sure the actual amount transferred is at
2254 * least the underflow value or fail.
2256 if (!(resp_info & SNS_LEN_VALID) &&
2257 (scsi_status == SAM_STAT_GOOD) &&
2258 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
2259 < cmnd->underflow)) {
2260 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2261 "9027 FCP command x%x residual "
2262 "underrun converted to error "
2263 "Data: x%x x%x x%x\n",
2264 cmnd->cmnd[0], scsi_bufflen(cmnd),
2265 scsi_get_resid(cmnd), cmnd->underflow);
2266 host_status = DID_ERROR;
2268 } else if (resp_info & RESID_OVER) {
2269 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2270 "9028 FCP command x%x residual overrun error. "
2271 "Data: x%x x%x\n", cmnd->cmnd[0],
2272 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
2273 host_status = DID_ERROR;
2276 * Check SLI validation that all the transfer was actually done
2277 * (fcpi_parm should be zero). Apply check only to reads.
2279 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
2280 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
2281 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
2282 "9029 FCP Read Check Error Data: "
2283 "x%x x%x x%x x%x\n",
2284 be32_to_cpu(fcpcmd->fcpDl),
2285 be32_to_cpu(fcprsp->rspResId),
2286 fcpi_parm, cmnd->cmnd[0]);
2287 host_status = DID_ERROR;
2288 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
2292 cmnd->result = ScsiResult(host_status, scsi_status);
2293 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
2297 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
2298 * @phba: The Hba for which this call is being executed.
2299 * @pIocbIn: The command IOCBQ for the scsi cmnd.
2300 * @pIocbOut: The response IOCBQ for the scsi cmnd.
2302 * This routine assigns scsi command result by looking into response IOCB
2303 * status field appropriately. This routine handles QUEUE FULL condition as
2304 * well by ramping down device queue depth.
2307 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2308 struct lpfc_iocbq *pIocbOut)
2310 struct lpfc_scsi_buf *lpfc_cmd =
2311 (struct lpfc_scsi_buf *) pIocbIn->context1;
2312 struct lpfc_vport *vport = pIocbIn->vport;
2313 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
2314 struct lpfc_nodelist *pnode = rdata->pnode;
2315 struct scsi_cmnd *cmd;
2317 struct scsi_device *tmp_sdev;
2319 unsigned long flags;
2320 struct lpfc_fast_path_event *fast_path_evt;
2321 struct Scsi_Host *shost;
2322 uint32_t queue_depth, scsi_id;
2324 /* Sanity check on return of outstanding command */
2325 if (!(lpfc_cmd->pCmd))
2327 cmd = lpfc_cmd->pCmd;
2328 shost = cmd->device->host;
2330 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
2331 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
2332 /* pick up SLI4 exhange busy status from HBA */
2333 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
2335 if (pnode && NLP_CHK_NODE_ACT(pnode))
2336 atomic_dec(&pnode->cmd_pending);
2338 if (lpfc_cmd->status) {
2339 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
2340 (lpfc_cmd->result & IOERR_DRVR_MASK))
2341 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2342 else if (lpfc_cmd->status >= IOSTAT_CNT)
2343 lpfc_cmd->status = IOSTAT_DEFAULT;
2345 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2346 "9030 FCP cmd x%x failed <%d/%d> "
2347 "status: x%x result: x%x Data: x%x x%x\n",
2349 cmd->device ? cmd->device->id : 0xffff,
2350 cmd->device ? cmd->device->lun : 0xffff,
2351 lpfc_cmd->status, lpfc_cmd->result,
2352 pIocbOut->iocb.ulpContext,
2353 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
2355 switch (lpfc_cmd->status) {
2356 case IOSTAT_FCP_RSP_ERROR:
2357 /* Call FCP RSP handler to determine result */
2358 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
2360 case IOSTAT_NPORT_BSY:
2361 case IOSTAT_FABRIC_BSY:
2362 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
2363 fast_path_evt = lpfc_alloc_fast_evt(phba);
2366 fast_path_evt->un.fabric_evt.event_type =
2367 FC_REG_FABRIC_EVENT;
2368 fast_path_evt->un.fabric_evt.subcategory =
2369 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
2370 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
2371 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2372 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
2373 &pnode->nlp_portname,
2374 sizeof(struct lpfc_name));
2375 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
2376 &pnode->nlp_nodename,
2377 sizeof(struct lpfc_name));
2379 fast_path_evt->vport = vport;
2380 fast_path_evt->work_evt.evt =
2381 LPFC_EVT_FASTPATH_MGMT_EVT;
2382 spin_lock_irqsave(&phba->hbalock, flags);
2383 list_add_tail(&fast_path_evt->work_evt.evt_listp,
2385 spin_unlock_irqrestore(&phba->hbalock, flags);
2386 lpfc_worker_wake_up(phba);
2388 case IOSTAT_LOCAL_REJECT:
2389 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
2390 lpfc_cmd->result == IOERR_NO_RESOURCES ||
2391 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
2392 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
2393 cmd->result = ScsiResult(DID_REQUEUE, 0);
2397 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
2398 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
2399 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
2400 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2402 * This is a response for a BG enabled
2403 * cmd. Parse BG error
2405 lpfc_parse_bg_err(phba, lpfc_cmd,
2409 lpfc_printf_vlog(vport, KERN_WARNING,
2411 "9031 non-zero BGSTAT "
2412 "on unprotected cmd\n");
2416 /* else: fall through */
2418 cmd->result = ScsiResult(DID_ERROR, 0);
2422 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
2423 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
2424 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
2427 cmd->result = ScsiResult(DID_OK, 0);
2430 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
2431 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
2433 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2434 "0710 Iodone <%d/%d> cmd %p, error "
2435 "x%x SNS x%x x%x Data: x%x x%x\n",
2436 cmd->device->id, cmd->device->lun, cmd,
2437 cmd->result, *lp, *(lp + 3), cmd->retries,
2438 scsi_get_resid(cmd));
2441 lpfc_update_stats(phba, lpfc_cmd);
2442 result = cmd->result;
2443 if (vport->cfg_max_scsicmpl_time &&
2444 time_after(jiffies, lpfc_cmd->start_time +
2445 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
2446 spin_lock_irqsave(shost->host_lock, flags);
2447 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2448 if (pnode->cmd_qdepth >
2449 atomic_read(&pnode->cmd_pending) &&
2450 (atomic_read(&pnode->cmd_pending) >
2451 LPFC_MIN_TGT_QDEPTH) &&
2452 ((cmd->cmnd[0] == READ_10) ||
2453 (cmd->cmnd[0] == WRITE_10)))
2455 atomic_read(&pnode->cmd_pending);
2457 pnode->last_change_time = jiffies;
2459 spin_unlock_irqrestore(shost->host_lock, flags);
2460 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2461 if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
2462 time_after(jiffies, pnode->last_change_time +
2463 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
2464 spin_lock_irqsave(shost->host_lock, flags);
2465 depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
2467 depth = depth ? depth : 1;
2468 pnode->cmd_qdepth += depth;
2469 if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
2470 pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
2471 pnode->last_change_time = jiffies;
2472 spin_unlock_irqrestore(shost->host_lock, flags);
2476 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
2478 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
2479 queue_depth = cmd->device->queue_depth;
2480 scsi_id = cmd->device->id;
2481 cmd->scsi_done(cmd);
2483 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2485 * If there is a thread waiting for command completion
2486 * wake up the thread.
2488 spin_lock_irqsave(shost->host_lock, flags);
2489 lpfc_cmd->pCmd = NULL;
2490 if (lpfc_cmd->waitq)
2491 wake_up(lpfc_cmd->waitq);
2492 spin_unlock_irqrestore(shost->host_lock, flags);
2493 lpfc_release_scsi_buf(phba, lpfc_cmd);
2498 lpfc_rampup_queue_depth(vport, queue_depth);
2501 * Check for queue full. If the lun is reporting queue full, then
2502 * back off the lun queue depth to prevent target overloads.
2504 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
2505 NLP_CHK_NODE_ACT(pnode)) {
2506 shost_for_each_device(tmp_sdev, shost) {
2507 if (tmp_sdev->id != scsi_id)
2509 depth = scsi_track_queue_full(tmp_sdev,
2510 tmp_sdev->queue_depth-1);
2513 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2514 "0711 detected queue full - lun queue "
2515 "depth adjusted to %d.\n", depth);
2516 lpfc_send_sdev_queuedepth_change_event(phba, vport,
2524 * If there is a thread waiting for command completion
2525 * wake up the thread.
2527 spin_lock_irqsave(shost->host_lock, flags);
2528 lpfc_cmd->pCmd = NULL;
2529 if (lpfc_cmd->waitq)
2530 wake_up(lpfc_cmd->waitq);
2531 spin_unlock_irqrestore(shost->host_lock, flags);
2533 lpfc_release_scsi_buf(phba, lpfc_cmd);
2537 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
2538 * @data: A pointer to the immediate command data portion of the IOCB.
2539 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
2541 * The routine copies the entire FCP command from @fcp_cmnd to @data while
2542 * byte swapping the data to big endian format for transmission on the wire.
2545 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
2548 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
2549 i += sizeof(uint32_t), j++) {
2550 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
2555 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
2556 * @vport: The virtual port for which this call is being executed.
2557 * @lpfc_cmd: The scsi command which needs to send.
2558 * @pnode: Pointer to lpfc_nodelist.
2560 * This routine initializes fcp_cmnd and iocb data structure from scsi command
2561 * to transfer for device with SLI3 interface spec.
2564 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2565 struct lpfc_nodelist *pnode)
2567 struct lpfc_hba *phba = vport->phba;
2568 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2569 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2570 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2571 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
2572 int datadir = scsi_cmnd->sc_data_direction;
2575 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
2578 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
2579 /* clear task management bits */
2580 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
2582 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
2583 &lpfc_cmd->fcp_cmnd->fcp_lun);
2585 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
2587 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
2589 case HEAD_OF_QUEUE_TAG:
2590 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
2592 case ORDERED_QUEUE_TAG:
2593 fcp_cmnd->fcpCntl1 = ORDERED_Q;
2596 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
2600 fcp_cmnd->fcpCntl1 = 0;
2603 * There are three possibilities here - use scatter-gather segment, use
2604 * the single mapping, or neither. Start the lpfc command prep by
2605 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
2608 if (scsi_sg_count(scsi_cmnd)) {
2609 if (datadir == DMA_TO_DEVICE) {
2610 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
2611 if (phba->sli_rev < LPFC_SLI_REV4) {
2612 iocb_cmd->un.fcpi.fcpi_parm = 0;
2613 iocb_cmd->ulpPU = 0;
2615 iocb_cmd->ulpPU = PARM_READ_CHECK;
2616 fcp_cmnd->fcpCntl3 = WRITE_DATA;
2617 phba->fc4OutputRequests++;
2619 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
2620 iocb_cmd->ulpPU = PARM_READ_CHECK;
2621 fcp_cmnd->fcpCntl3 = READ_DATA;
2622 phba->fc4InputRequests++;
2625 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
2626 iocb_cmd->un.fcpi.fcpi_parm = 0;
2627 iocb_cmd->ulpPU = 0;
2628 fcp_cmnd->fcpCntl3 = 0;
2629 phba->fc4ControlRequests++;
2631 if (phba->sli_rev == 3 &&
2632 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
2633 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
2635 * Finish initializing those IOCB fields that are independent
2636 * of the scsi_cmnd request_buffer
2638 piocbq->iocb.ulpContext = pnode->nlp_rpi;
2639 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
2640 piocbq->iocb.ulpFCP2Rcvy = 1;
2642 piocbq->iocb.ulpFCP2Rcvy = 0;
2644 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
2645 piocbq->context1 = lpfc_cmd;
2646 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
2647 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
2648 piocbq->vport = vport;
2652 * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
2653 * @vport: The virtual port for which this call is being executed.
2654 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2655 * @lun: Logical unit number.
2656 * @task_mgmt_cmd: SCSI task management command.
2658 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2659 * for device with SLI-3 interface spec.
2666 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2667 struct lpfc_scsi_buf *lpfc_cmd,
2669 uint8_t task_mgmt_cmd)
2671 struct lpfc_iocbq *piocbq;
2673 struct fcp_cmnd *fcp_cmnd;
2674 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
2675 struct lpfc_nodelist *ndlp = rdata->pnode;
2677 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2678 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
2681 piocbq = &(lpfc_cmd->cur_iocbq);
2682 piocbq->vport = vport;
2684 piocb = &piocbq->iocb;
2686 fcp_cmnd = lpfc_cmd->fcp_cmnd;
2687 /* Clear out any old data in the FCP command area */
2688 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2689 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
2690 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
2691 if (vport->phba->sli_rev == 3 &&
2692 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
2693 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
2694 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
2695 piocb->ulpContext = ndlp->nlp_rpi;
2696 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2697 piocb->ulpFCP2Rcvy = 1;
2699 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
2701 /* ulpTimeout is only one byte */
2702 if (lpfc_cmd->timeout > 0xff) {
2704 * Do not timeout the command at the firmware level.
2705 * The driver will provide the timeout mechanism.
2707 piocb->ulpTimeout = 0;
2709 piocb->ulpTimeout = lpfc_cmd->timeout;
2711 if (vport->phba->sli_rev == LPFC_SLI_REV4)
2712 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
2718 * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
2719 * @phba: The hba struct for which this call is being executed.
2720 * @dev_grp: The HBA PCI-Device group number.
2722 * This routine sets up the SCSI interface API function jump table in @phba
2724 * Returns: 0 - success, -ENODEV - failure.
2727 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2730 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
2731 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
2732 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2735 case LPFC_PCI_DEV_LP:
2736 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
2737 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
2738 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
2740 case LPFC_PCI_DEV_OC:
2741 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
2742 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
2743 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
2746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2747 "1418 Invalid HBA PCI-device group: 0x%x\n",
2752 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2753 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
2754 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
2759 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
2760 * @phba: The Hba for which this call is being executed.
2761 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
2762 * @rspiocbq: Pointer to lpfc_iocbq data structure.
2764 * This routine is IOCB completion routine for device reset and target reset
2765 * routine. This routine release scsi buffer associated with lpfc_cmd.
2768 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
2769 struct lpfc_iocbq *cmdiocbq,
2770 struct lpfc_iocbq *rspiocbq)
2772 struct lpfc_scsi_buf *lpfc_cmd =
2773 (struct lpfc_scsi_buf *) cmdiocbq->context1;
2775 lpfc_release_scsi_buf(phba, lpfc_cmd);
2780 * lpfc_info - Info entry point of scsi_host_template data structure
2781 * @host: The scsi host for which this call is being executed.
2783 * This routine provides module information about hba.
2786 * Pointer to char - Success.
2789 lpfc_info(struct Scsi_Host *host)
2791 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
2792 struct lpfc_hba *phba = vport->phba;
2794 static char lpfcinfobuf[384];
2796 memset(lpfcinfobuf,0,384);
2797 if (phba && phba->pcidev){
2798 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
2799 len = strlen(lpfcinfobuf);
2800 snprintf(lpfcinfobuf + len,
2802 " on PCI bus %02x device %02x irq %d",
2803 phba->pcidev->bus->number,
2804 phba->pcidev->devfn,
2806 len = strlen(lpfcinfobuf);
2807 if (phba->Port[0]) {
2808 snprintf(lpfcinfobuf + len,
2813 len = strlen(lpfcinfobuf);
2814 if (phba->sli4_hba.link_state.logical_speed) {
2815 snprintf(lpfcinfobuf + len,
2817 " Logical Link Speed: %d Mbps",
2818 phba->sli4_hba.link_state.logical_speed * 10);
2825 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
2826 * @phba: The Hba for which this call is being executed.
2828 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
2829 * The default value of cfg_poll_tmo is 10 milliseconds.
2831 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
2833 unsigned long poll_tmo_expires =
2834 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
2836 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
2837 mod_timer(&phba->fcp_poll_timer,
2842 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
2843 * @phba: The Hba for which this call is being executed.
2845 * This routine starts the fcp_poll_timer of @phba.
2847 void lpfc_poll_start_timer(struct lpfc_hba * phba)
2849 lpfc_poll_rearm_timer(phba);
2853 * lpfc_poll_timeout - Restart polling timer
2854 * @ptr: Map to lpfc_hba data structure pointer.
2856 * This routine restarts fcp_poll timer, when FCP ring polling is enable
2857 * and FCP Ring interrupt is disable.
2860 void lpfc_poll_timeout(unsigned long ptr)
2862 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
2864 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2865 lpfc_sli_handle_fast_ring_event(phba,
2866 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
2868 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2869 lpfc_poll_rearm_timer(phba);
2874 * lpfc_queuecommand - scsi_host_template queuecommand entry point
2875 * @cmnd: Pointer to scsi_cmnd data structure.
2876 * @done: Pointer to done routine.
2878 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
2879 * This routine prepares an IOCB from scsi command and provides to firmware.
2880 * The @done callback is invoked after driver finished processing the command.
2884 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
2887 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2889 struct Scsi_Host *shost = cmnd->device->host;
2890 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2891 struct lpfc_hba *phba = vport->phba;
2892 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2893 struct lpfc_nodelist *ndlp;
2894 struct lpfc_scsi_buf *lpfc_cmd;
2895 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2898 err = fc_remote_port_chkready(rport);
2901 goto out_fail_command;
2903 ndlp = rdata->pnode;
2905 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
2906 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2908 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2909 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
2910 " op:%02x str=%s without registering for"
2911 " BlockGuard - Rejecting command\n",
2912 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2913 dif_op_str[scsi_get_prot_op(cmnd)]);
2914 goto out_fail_command;
2918 * Catch race where our node has transitioned, but the
2919 * transport is still transitioning.
2921 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
2922 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
2923 goto out_fail_command;
2925 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
2928 lpfc_cmd = lpfc_get_scsi_buf(phba);
2929 if (lpfc_cmd == NULL) {
2930 lpfc_rampdown_queue_depth(phba);
2932 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2933 "0707 driver's buffer pool is empty, "
2939 * Store the midlayer's command structure for the completion phase
2940 * and complete the command initialization.
2942 lpfc_cmd->pCmd = cmnd;
2943 lpfc_cmd->rdata = rdata;
2944 lpfc_cmd->timeout = 0;
2945 lpfc_cmd->start_time = jiffies;
2946 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
2947 cmnd->scsi_done = done;
2949 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2950 if (vport->phba->cfg_enable_bg) {
2951 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2952 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
2954 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2955 dif_op_str[scsi_get_prot_op(cmnd)]);
2956 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2957 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2958 "%02x %02x %02x %02x %02x\n",
2959 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2960 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2961 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2963 if (cmnd->cmnd[0] == READ_10)
2964 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2965 "9035 BLKGRD: READ @ sector %llu, "
2967 (unsigned long long)scsi_get_lba(cmnd),
2968 blk_rq_sectors(cmnd->request));
2969 else if (cmnd->cmnd[0] == WRITE_10)
2970 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2971 "9036 BLKGRD: WRITE @ sector %llu, "
2972 "count %u cmd=%p\n",
2973 (unsigned long long)scsi_get_lba(cmnd),
2974 blk_rq_sectors(cmnd->request),
2978 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
2980 if (vport->phba->cfg_enable_bg) {
2981 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2982 "9038 BLKGRD: rcvd unprotected cmd:"
2983 "%02x op:%02x str=%s\n",
2984 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2985 dif_op_str[scsi_get_prot_op(cmnd)]);
2986 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2987 "9039 BLKGRD: CDB: %02x %02x %02x "
2988 "%02x %02x %02x %02x %02x %02x %02x\n",
2989 cmnd->cmnd[0], cmnd->cmnd[1],
2990 cmnd->cmnd[2], cmnd->cmnd[3],
2991 cmnd->cmnd[4], cmnd->cmnd[5],
2992 cmnd->cmnd[6], cmnd->cmnd[7],
2993 cmnd->cmnd[8], cmnd->cmnd[9]);
2994 if (cmnd->cmnd[0] == READ_10)
2995 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2996 "9040 dbg: READ @ sector %llu, "
2998 (unsigned long long)scsi_get_lba(cmnd),
2999 blk_rq_sectors(cmnd->request));
3000 else if (cmnd->cmnd[0] == WRITE_10)
3001 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3002 "9041 dbg: WRITE @ sector %llu, "
3003 "count %u cmd=%p\n",
3004 (unsigned long long)scsi_get_lba(cmnd),
3005 blk_rq_sectors(cmnd->request), cmnd);
3007 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3008 "9042 dbg: parser not implemented\n");
3010 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3014 goto out_host_busy_free_buf;
3016 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
3018 atomic_inc(&ndlp->cmd_pending);
3019 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
3020 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
3022 atomic_dec(&ndlp->cmd_pending);
3023 goto out_host_busy_free_buf;
3025 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3026 spin_unlock(shost->host_lock);
3027 lpfc_sli_handle_fast_ring_event(phba,
3028 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3030 spin_lock(shost->host_lock);
3031 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3032 lpfc_poll_rearm_timer(phba);
3037 out_host_busy_free_buf:
3038 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
3039 lpfc_release_scsi_buf(phba, lpfc_cmd);
3041 return SCSI_MLQUEUE_HOST_BUSY;
3049 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
3050 * @cmnd: Pointer to scsi_cmnd data structure.
3052 * This routine aborts @cmnd pending in base driver.
3059 lpfc_abort_handler(struct scsi_cmnd *cmnd)
3061 struct Scsi_Host *shost = cmnd->device->host;
3062 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3063 struct lpfc_hba *phba = vport->phba;
3064 struct lpfc_iocbq *iocb;
3065 struct lpfc_iocbq *abtsiocb;
3066 struct lpfc_scsi_buf *lpfc_cmd;
3069 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
3071 ret = fc_block_scsi_eh(cmnd);
3074 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
3078 * If pCmd field of the corresponding lpfc_scsi_buf structure
3079 * points to a different SCSI command, then the driver has
3080 * already completed this command, but the midlayer did not
3081 * see the completion before the eh fired. Just return
3084 iocb = &lpfc_cmd->cur_iocbq;
3085 if (lpfc_cmd->pCmd != cmnd)
3088 BUG_ON(iocb->context1 != lpfc_cmd);
3090 abtsiocb = lpfc_sli_get_iocbq(phba);
3091 if (abtsiocb == NULL) {
3097 * The scsi command can not be in txq and it is in flight because the
3098 * pCmd is still pointig at the SCSI command we have to abort. There
3099 * is no need to search the txcmplq. Just send an abort to the FW.
3103 icmd = &abtsiocb->iocb;
3104 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
3105 icmd->un.acxri.abortContextTag = cmd->ulpContext;
3106 if (phba->sli_rev == LPFC_SLI_REV4)
3107 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
3109 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
3112 icmd->ulpClass = cmd->ulpClass;
3114 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3115 abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
3116 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
3118 if (lpfc_is_link_up(phba))
3119 icmd->ulpCommand = CMD_ABORT_XRI_CN;
3121 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
3123 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
3124 abtsiocb->vport = vport;
3125 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
3127 lpfc_sli_release_iocbq(phba, abtsiocb);
3132 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3133 lpfc_sli_handle_fast_ring_event(phba,
3134 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3136 lpfc_cmd->waitq = &waitq;
3137 /* Wait for abort to complete */
3138 wait_event_timeout(waitq,
3139 (lpfc_cmd->pCmd != cmnd),
3140 (2*vport->cfg_devloss_tmo*HZ));
3142 spin_lock_irq(shost->host_lock);
3143 lpfc_cmd->waitq = NULL;
3144 spin_unlock_irq(shost->host_lock);
3146 if (lpfc_cmd->pCmd == cmnd) {
3148 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3149 "0748 abort handler timed out waiting "
3150 "for abort to complete: ret %#x, ID %d, "
3151 "LUN %d, snum %#lx\n",
3152 ret, cmnd->device->id, cmnd->device->lun,
3153 cmnd->serial_number);
3157 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3158 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
3159 "LUN %d snum %#lx\n", ret, cmnd->device->id,
3160 cmnd->device->lun, cmnd->serial_number);
3165 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
3167 switch (task_mgmt_cmd) {
3168 case FCP_ABORT_TASK_SET:
3169 return "ABORT_TASK_SET";
3170 case FCP_CLEAR_TASK_SET:
3171 return "FCP_CLEAR_TASK_SET";
3173 return "FCP_BUS_RESET";
3175 return "FCP_LUN_RESET";
3176 case FCP_TARGET_RESET:
3177 return "FCP_TARGET_RESET";
3179 return "FCP_CLEAR_ACA";
3180 case FCP_TERMINATE_TASK:
3181 return "FCP_TERMINATE_TASK";
3188 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
3189 * @vport: The virtual port for which this call is being executed.
3190 * @rdata: Pointer to remote port local data
3191 * @tgt_id: Target ID of remote device.
3192 * @lun_id: Lun number for the TMF
3193 * @task_mgmt_cmd: type of TMF to send
3195 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
3203 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3204 unsigned tgt_id, unsigned int lun_id,
3205 uint8_t task_mgmt_cmd)
3207 struct lpfc_hba *phba = vport->phba;
3208 struct lpfc_scsi_buf *lpfc_cmd;
3209 struct lpfc_iocbq *iocbq;
3210 struct lpfc_iocbq *iocbqrsp;
3214 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
3217 lpfc_cmd = lpfc_get_scsi_buf(phba);
3218 if (lpfc_cmd == NULL)
3220 lpfc_cmd->timeout = 60;
3221 lpfc_cmd->rdata = rdata;
3223 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
3226 lpfc_release_scsi_buf(phba, lpfc_cmd);
3230 iocbq = &lpfc_cmd->cur_iocbq;
3231 iocbqrsp = lpfc_sli_get_iocbq(phba);
3232 if (iocbqrsp == NULL) {
3233 lpfc_release_scsi_buf(phba, lpfc_cmd);
3237 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3238 "0702 Issue %s to TGT %d LUN %d "
3239 "rpi x%x nlp_flag x%x\n",
3240 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
3241 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
3243 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
3244 iocbq, iocbqrsp, lpfc_cmd->timeout);
3245 if (status != IOCB_SUCCESS) {
3246 if (status == IOCB_TIMEDOUT) {
3247 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
3248 ret = TIMEOUT_ERROR;
3251 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3252 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3253 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
3254 lpfc_taskmgmt_name(task_mgmt_cmd),
3255 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
3256 iocbqrsp->iocb.un.ulpWord[4]);
3257 } else if (status == IOCB_BUSY)
3262 lpfc_sli_release_iocbq(phba, iocbqrsp);
3264 if (ret != TIMEOUT_ERROR)
3265 lpfc_release_scsi_buf(phba, lpfc_cmd);
3271 * lpfc_chk_tgt_mapped -
3272 * @vport: The virtual port to check on
3273 * @cmnd: Pointer to scsi_cmnd data structure.
3275 * This routine delays until the scsi target (aka rport) for the
3276 * command exists (is present and logged in) or we declare it non-existent.
3283 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
3285 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3286 struct lpfc_nodelist *pnode;
3287 unsigned long later;
3290 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3291 "0797 Tgt Map rport failure: rdata x%p\n", rdata);
3294 pnode = rdata->pnode;
3296 * If target is not in a MAPPED state, delay until
3297 * target is rediscovered or devloss timeout expires.
3299 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3300 while (time_after(later, jiffies)) {
3301 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3303 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
3305 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
3306 rdata = cmnd->device->hostdata;
3309 pnode = rdata->pnode;
3311 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
3312 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3318 * lpfc_reset_flush_io_context -
3319 * @vport: The virtual port (scsi_host) for the flush context
3320 * @tgt_id: If aborting by Target contect - specifies the target id
3321 * @lun_id: If aborting by Lun context - specifies the lun id
3322 * @context: specifies the context level to flush at.
3324 * After a reset condition via TMF, we need to flush orphaned i/o
3325 * contexts from the adapter. This routine aborts any contexts
3326 * outstanding, then waits for their completions. The wait is
3327 * bounded by devloss_tmo though.
3334 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
3335 uint64_t lun_id, lpfc_ctx_cmd context)
3337 struct lpfc_hba *phba = vport->phba;
3338 unsigned long later;
3341 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
3343 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
3344 tgt_id, lun_id, context);
3345 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3346 while (time_after(later, jiffies) && cnt) {
3347 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
3348 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
3351 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3352 "0724 I/O flush failure for context %s : cnt x%x\n",
3353 ((context == LPFC_CTX_LUN) ? "LUN" :
3354 ((context == LPFC_CTX_TGT) ? "TGT" :
3355 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
3363 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
3364 * @cmnd: Pointer to scsi_cmnd data structure.
3366 * This routine does a device reset by sending a LUN_RESET task management
3374 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
3376 struct Scsi_Host *shost = cmnd->device->host;
3377 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3378 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3379 struct lpfc_nodelist *pnode;
3380 unsigned tgt_id = cmnd->device->id;
3381 unsigned int lun_id = cmnd->device->lun;
3382 struct lpfc_scsi_event_header scsi_event;
3386 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3387 "0798 Device Reset rport failure: rdata x%p\n", rdata);
3390 pnode = rdata->pnode;
3391 status = fc_block_scsi_eh(cmnd);
3395 status = lpfc_chk_tgt_mapped(vport, cmnd);
3396 if (status == FAILED) {
3397 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3398 "0721 Device Reset rport failure: rdata x%p\n", rdata);
3402 scsi_event.event_type = FC_REG_SCSI_EVENT;
3403 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
3404 scsi_event.lun = lun_id;
3405 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3406 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3408 fc_host_post_vendor_event(shost, fc_get_event_number(),
3409 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3411 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3414 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3415 "0713 SCSI layer issued Device Reset (%d, %d) "
3416 "return x%x\n", tgt_id, lun_id, status);
3419 * We have to clean up i/o as : they may be orphaned by the TMF;
3420 * or if the TMF failed, they may be in an indeterminate state.
3422 * We will report success if all the i/o aborts successfully.
3424 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3430 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
3431 * @cmnd: Pointer to scsi_cmnd data structure.
3433 * This routine does a target reset by sending a TARGET_RESET task management
3441 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
3443 struct Scsi_Host *shost = cmnd->device->host;
3444 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3445 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3446 struct lpfc_nodelist *pnode;
3447 unsigned tgt_id = cmnd->device->id;
3448 unsigned int lun_id = cmnd->device->lun;
3449 struct lpfc_scsi_event_header scsi_event;
3453 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3454 "0799 Target Reset rport failure: rdata x%p\n", rdata);
3457 pnode = rdata->pnode;
3458 status = fc_block_scsi_eh(cmnd);
3462 status = lpfc_chk_tgt_mapped(vport, cmnd);
3463 if (status == FAILED) {
3464 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3465 "0722 Target Reset rport failure: rdata x%p\n", rdata);
3469 scsi_event.event_type = FC_REG_SCSI_EVENT;
3470 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
3472 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3473 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3475 fc_host_post_vendor_event(shost, fc_get_event_number(),
3476 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3478 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3481 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3482 "0723 SCSI layer issued Target Reset (%d, %d) "
3483 "return x%x\n", tgt_id, lun_id, status);
3486 * We have to clean up i/o as : they may be orphaned by the TMF;
3487 * or if the TMF failed, they may be in an indeterminate state.
3489 * We will report success if all the i/o aborts successfully.
3491 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3497 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
3498 * @cmnd: Pointer to scsi_cmnd data structure.
3500 * This routine does target reset to all targets on @cmnd->device->host.
3501 * This emulates Parallel SCSI Bus Reset Semantics.
3508 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3510 struct Scsi_Host *shost = cmnd->device->host;
3511 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3512 struct lpfc_nodelist *ndlp = NULL;
3513 struct lpfc_scsi_event_header scsi_event;
3515 int ret = SUCCESS, status, i;
3517 scsi_event.event_type = FC_REG_SCSI_EVENT;
3518 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
3520 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
3521 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
3523 fc_host_post_vendor_event(shost, fc_get_event_number(),
3524 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3526 ret = fc_block_scsi_eh(cmnd);
3531 * Since the driver manages a single bus device, reset all
3532 * targets known to the driver. Should any target reset
3533 * fail, this routine returns failure to the midlayer.
3535 for (i = 0; i < LPFC_MAX_TARGET; i++) {
3536 /* Search for mapped node by target ID */
3538 spin_lock_irq(shost->host_lock);
3539 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3540 if (!NLP_CHK_NODE_ACT(ndlp))
3542 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
3543 ndlp->nlp_sid == i &&
3549 spin_unlock_irq(shost->host_lock);
3553 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
3554 i, 0, FCP_TARGET_RESET);
3556 if (status != SUCCESS) {
3557 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3558 "0700 Bus Reset on target %d failed\n",
3564 * We have to clean up i/o as : they may be orphaned by the TMFs
3565 * above; or if any of the TMFs failed, they may be in an
3566 * indeterminate state.
3567 * We will report success if all the i/o aborts successfully.
3570 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
3571 if (status != SUCCESS)
3574 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3575 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
3580 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
3581 * @sdev: Pointer to scsi_device.
3583 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
3584 * globally available list of scsi buffers. This routine also makes sure scsi
3585 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
3586 * of scsi buffer exists for the lifetime of the driver.
3593 lpfc_slave_alloc(struct scsi_device *sdev)
3595 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3596 struct lpfc_hba *phba = vport->phba;
3597 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3599 uint32_t num_to_alloc = 0;
3600 int num_allocated = 0;
3603 if (!rport || fc_remote_port_chkready(rport))
3606 sdev->hostdata = rport->dd_data;
3607 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
3610 * Populate the cmds_per_lun count scsi_bufs into this host's globally
3611 * available list of scsi buffers. Don't allocate more than the
3612 * HBA limit conveyed to the midlayer via the host structure. The
3613 * formula accounts for the lun_queue_depth + error handlers + 1
3614 * extra. This list of scsi bufs exists for the lifetime of the driver.
3616 total = phba->total_scsi_bufs;
3617 num_to_alloc = vport->cfg_lun_queue_depth + 2;
3619 /* If allocated buffers are enough do nothing */
3620 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
3623 /* Allow some exchanges to be available always to complete discovery */
3624 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
3625 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3626 "0704 At limitation of %d preallocated "
3627 "command buffers\n", total);
3629 /* Allow some exchanges to be available always to complete discovery */
3630 } else if (total + num_to_alloc >
3631 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
3632 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3633 "0705 Allocation request of %d "
3634 "command buffers will exceed max of %d. "
3635 "Reducing allocation request to %d.\n",
3636 num_to_alloc, phba->cfg_hba_queue_depth,
3637 (phba->cfg_hba_queue_depth - total));
3638 num_to_alloc = phba->cfg_hba_queue_depth - total;
3640 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
3641 if (num_to_alloc != num_allocated) {
3642 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3643 "0708 Allocation request of %d "
3644 "command buffers did not succeed. "
3645 "Allocated %d buffers.\n",
3646 num_to_alloc, num_allocated);
3648 if (num_allocated > 0)
3649 phba->total_scsi_bufs += num_allocated;
3654 * lpfc_slave_configure - scsi_host_template slave_configure entry point
3655 * @sdev: Pointer to scsi_device.
3657 * This routine configures following items
3658 * - Tag command queuing support for @sdev if supported.
3659 * - Dev loss time out value of fc_rport.
3660 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
3666 lpfc_slave_configure(struct scsi_device *sdev)
3668 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3669 struct lpfc_hba *phba = vport->phba;
3670 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
3672 if (sdev->tagged_supported)
3673 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
3675 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
3678 * Initialize the fc transport attributes for the target
3679 * containing this scsi device. Also note that the driver's
3680 * target pointer is stored in the starget_data for the
3681 * driver's sysfs entry point functions.
3683 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3685 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3686 lpfc_sli_handle_fast_ring_event(phba,
3687 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3688 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3689 lpfc_poll_rearm_timer(phba);
3696 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
3697 * @sdev: Pointer to scsi_device.
3699 * This routine sets @sdev hostatdata filed to null.
3702 lpfc_slave_destroy(struct scsi_device *sdev)
3704 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3705 struct lpfc_hba *phba = vport->phba;
3706 atomic_dec(&phba->sdev_cnt);
3707 sdev->hostdata = NULL;
3712 struct scsi_host_template lpfc_template = {
3713 .module = THIS_MODULE,
3714 .name = LPFC_DRIVER_NAME,
3716 .queuecommand = lpfc_queuecommand,
3717 .eh_abort_handler = lpfc_abort_handler,
3718 .eh_device_reset_handler = lpfc_device_reset_handler,
3719 .eh_target_reset_handler = lpfc_target_reset_handler,
3720 .eh_bus_reset_handler = lpfc_bus_reset_handler,
3721 .slave_alloc = lpfc_slave_alloc,
3722 .slave_configure = lpfc_slave_configure,
3723 .slave_destroy = lpfc_slave_destroy,
3724 .scan_finished = lpfc_scan_finished,
3726 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
3727 .cmd_per_lun = LPFC_CMD_PER_LUN,
3728 .use_clustering = ENABLE_CLUSTERING,
3729 .shost_attrs = lpfc_hba_attrs,
3730 .max_sectors = 0xFFFF,
3731 .vendor_id = LPFC_NL_VENDOR_ID,
3732 .change_queue_depth = lpfc_change_queue_depth,
3735 struct scsi_host_template lpfc_vport_template = {
3736 .module = THIS_MODULE,
3737 .name = LPFC_DRIVER_NAME,
3739 .queuecommand = lpfc_queuecommand,
3740 .eh_abort_handler = lpfc_abort_handler,
3741 .eh_device_reset_handler = lpfc_device_reset_handler,
3742 .eh_target_reset_handler = lpfc_target_reset_handler,
3743 .eh_bus_reset_handler = lpfc_bus_reset_handler,
3744 .slave_alloc = lpfc_slave_alloc,
3745 .slave_configure = lpfc_slave_configure,
3746 .slave_destroy = lpfc_slave_destroy,
3747 .scan_finished = lpfc_scan_finished,
3749 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
3750 .cmd_per_lun = LPFC_CMD_PER_LUN,
3751 .use_clustering = ENABLE_CLUSTERING,
3752 .shost_attrs = lpfc_vport_attrs,
3753 .max_sectors = 0xFFFF,
3754 .change_queue_depth = lpfc_change_queue_depth,