2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
14 static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
16 static void qla25xx_set_que(srb_t *, struct rsp_que **);
18 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
21 * Returns the proper CF_* direction based on CDB.
23 static inline uint16_t
24 qla2x00_get_cmd_direction(srb_t *sp)
30 /* Set transfer direction */
31 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
33 sp->fcport->vha->hw->qla_stats.output_bytes +=
34 scsi_bufflen(sp->cmd);
35 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
37 sp->fcport->vha->hw->qla_stats.input_bytes +=
38 scsi_bufflen(sp->cmd);
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
47 * @dsds: number of data segment decriptors needed
49 * Returns the number of IOCB entries needed to store @dsds.
52 qla2x00_calc_iocbs_32(uint16_t dsds)
58 iocbs += (dsds - 3) / 7;
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
69 * @dsds: number of data segment decriptors needed
71 * Returns the number of IOCB entries needed to store @dsds.
74 qla2x00_calc_iocbs_64(uint16_t dsds)
80 iocbs += (dsds - 2) / 5;
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96 cont_entry_t *cont_pkt;
97 struct req_que *req = vha->req;
98 /* Adjust ring index. */
100 if (req->ring_index == req->length) {
102 req->ring_ptr = req->ring;
107 cont_pkt = (cont_entry_t *)req->ring_ptr;
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt->entry_type)) =
111 __constant_cpu_to_le32(CONTINUE_TYPE);
117 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
120 * Returns a pointer to the continuation type 1 IOCB packet.
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
125 cont_a64_entry_t *cont_pkt;
127 /* Adjust ring index. */
129 if (req->ring_index == req->length) {
131 req->ring_ptr = req->ring;
136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138 /* Load packet defaults. */
139 *((uint32_t *)(&cont_pkt->entry_type)) =
140 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
150 /* We only support T10 DIF right now */
151 if (guard != SHOST_DIX_GUARD_CRC) {
152 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
153 "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
157 /* We always use DIFF Bundling for best performance */
160 /* Translate SCSI opcode to a protection opcode */
161 switch (scsi_get_prot_op(sp->cmd)) {
162 case SCSI_PROT_READ_STRIP:
163 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
165 case SCSI_PROT_WRITE_INSERT:
166 *fw_prot_opts |= PO_MODE_DIF_INSERT;
168 case SCSI_PROT_READ_INSERT:
169 *fw_prot_opts |= PO_MODE_DIF_INSERT;
171 case SCSI_PROT_WRITE_STRIP:
172 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
174 case SCSI_PROT_READ_PASS:
175 *fw_prot_opts |= PO_MODE_DIF_PASS;
177 case SCSI_PROT_WRITE_PASS:
178 *fw_prot_opts |= PO_MODE_DIF_PASS;
180 default: /* Normal Request */
181 *fw_prot_opts |= PO_MODE_DIF_PASS;
185 return scsi_prot_sg_count(sp->cmd);
189 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
190 * capable IOCB types.
192 * @sp: SRB command to process
193 * @cmd_pkt: Command type 2 IOCB
194 * @tot_dsds: Total number of segments to transfer
196 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
201 scsi_qla_host_t *vha;
202 struct scsi_cmnd *cmd;
203 struct scatterlist *sg;
208 /* Update entry type to indicate Command Type 2 IOCB */
209 *((uint32_t *)(&cmd_pkt->entry_type)) =
210 __constant_cpu_to_le32(COMMAND_TYPE);
212 /* No data transfer */
213 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
214 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
218 vha = sp->fcport->vha;
219 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
221 /* Three DSDs are available in the Command Type 2 IOCB */
223 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
225 /* Load data segments */
226 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
227 cont_entry_t *cont_pkt;
229 /* Allocate additional continuation packets? */
230 if (avail_dsds == 0) {
232 * Seven DSDs are available in the Continuation
235 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
236 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
240 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
241 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
247 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
248 * capable IOCB types.
250 * @sp: SRB command to process
251 * @cmd_pkt: Command type 3 IOCB
252 * @tot_dsds: Total number of segments to transfer
254 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
259 scsi_qla_host_t *vha;
260 struct scsi_cmnd *cmd;
261 struct scatterlist *sg;
266 /* Update entry type to indicate Command Type 3 IOCB */
267 *((uint32_t *)(&cmd_pkt->entry_type)) =
268 __constant_cpu_to_le32(COMMAND_A64_TYPE);
270 /* No data transfer */
271 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
272 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
276 vha = sp->fcport->vha;
277 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
279 /* Two DSDs are available in the Command Type 3 IOCB */
281 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
283 /* Load data segments */
284 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
286 cont_a64_entry_t *cont_pkt;
288 /* Allocate additional continuation packets? */
289 if (avail_dsds == 0) {
291 * Five DSDs are available in the Continuation
294 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
295 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
299 sle_dma = sg_dma_address(sg);
300 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
301 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
302 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
308 * qla2x00_start_scsi() - Send a SCSI command to the ISP
309 * @sp: command to send to the ISP
311 * Returns non-zero if a failure occurred, else zero.
314 qla2x00_start_scsi(srb_t *sp)
318 scsi_qla_host_t *vha;
319 struct scsi_cmnd *cmd;
323 cmd_entry_t *cmd_pkt;
327 struct device_reg_2xxx __iomem *reg;
328 struct qla_hw_data *ha;
333 /* Setup device pointers. */
335 vha = sp->fcport->vha;
337 reg = &ha->iobase->isp;
339 req = ha->req_q_map[0];
340 rsp = ha->rsp_q_map[0];
341 /* So we know we haven't pci_map'ed anything yet */
344 /* Send marker if required */
345 if (vha->marker_needed != 0) {
346 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
348 return (QLA_FUNCTION_FAILED);
350 vha->marker_needed = 0;
353 /* Acquire ring specific lock */
354 spin_lock_irqsave(&ha->hardware_lock, flags);
356 /* Check for room in outstanding command list. */
357 handle = req->current_outstanding_cmd;
358 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
360 if (handle == MAX_OUTSTANDING_COMMANDS)
362 if (!req->outstanding_cmds[handle])
365 if (index == MAX_OUTSTANDING_COMMANDS)
368 /* Map the sg table so we have an accurate count of sg entries needed */
369 if (scsi_sg_count(cmd)) {
370 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
371 scsi_sg_count(cmd), cmd->sc_data_direction);
379 /* Calculate the number of request entries needed. */
380 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
381 if (req->cnt < (req_cnt + 2)) {
382 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
383 if (req->ring_index < cnt)
384 req->cnt = cnt - req->ring_index;
386 req->cnt = req->length -
387 (req->ring_index - cnt);
389 if (req->cnt < (req_cnt + 2))
392 /* Build command packet */
393 req->current_outstanding_cmd = handle;
394 req->outstanding_cmds[handle] = sp;
396 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
399 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
400 cmd_pkt->handle = handle;
401 /* Zero out remaining portion of packet. */
402 clr_ptr = (uint32_t *)cmd_pkt + 2;
403 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
404 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
406 /* Set target ID and LUN number*/
407 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
408 cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
410 /* Update tagged queuing modifier */
411 if (scsi_populate_tag_msg(cmd, tag)) {
413 case HEAD_OF_QUEUE_TAG:
414 cmd_pkt->control_flags =
415 __constant_cpu_to_le16(CF_HEAD_TAG);
417 case ORDERED_QUEUE_TAG:
418 cmd_pkt->control_flags =
419 __constant_cpu_to_le16(CF_ORDERED_TAG);
422 cmd_pkt->control_flags =
423 __constant_cpu_to_le16(CF_SIMPLE_TAG);
428 /* Load SCSI command packet. */
429 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
430 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
432 /* Build IOCB segments */
433 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
435 /* Set total data segment count. */
436 cmd_pkt->entry_count = (uint8_t)req_cnt;
439 /* Adjust ring index. */
441 if (req->ring_index == req->length) {
443 req->ring_ptr = req->ring;
447 sp->flags |= SRB_DMA_VALID;
449 /* Set chip new ring index. */
450 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
451 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
453 /* Manage unprocessed RIO/ZIO commands in response queue. */
454 if (vha->flags.process_response_queue &&
455 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
456 qla2x00_process_response_queue(rsp);
458 spin_unlock_irqrestore(&ha->hardware_lock, flags);
459 return (QLA_SUCCESS);
465 spin_unlock_irqrestore(&ha->hardware_lock, flags);
467 return (QLA_FUNCTION_FAILED);
471 * qla2x00_marker() - Send a marker IOCB to the firmware.
475 * @type: marker modifier
477 * Can be called from both normal and interrupt context.
479 * Returns non-zero if a failure occurred, else zero.
482 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
483 struct rsp_que *rsp, uint16_t loop_id,
484 uint16_t lun, uint8_t type)
487 struct mrk_entry_24xx *mrk24;
488 struct qla_hw_data *ha = vha->hw;
489 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
492 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
494 ql_log(ql_log_warn, base_vha, 0x3026,
495 "Failed to allocate Marker IOCB.\n");
497 return (QLA_FUNCTION_FAILED);
500 mrk->entry_type = MARKER_TYPE;
501 mrk->modifier = type;
502 if (type != MK_SYNC_ALL) {
503 if (IS_FWI2_CAPABLE(ha)) {
504 mrk24 = (struct mrk_entry_24xx *) mrk;
505 mrk24->nport_handle = cpu_to_le16(loop_id);
506 mrk24->lun[1] = LSB(lun);
507 mrk24->lun[2] = MSB(lun);
508 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
509 mrk24->vp_index = vha->vp_idx;
510 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
512 SET_TARGET_ID(ha, mrk->target, loop_id);
513 mrk->lun = cpu_to_le16(lun);
518 qla2x00_isp_cmd(vha, req);
520 return (QLA_SUCCESS);
524 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
525 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
529 unsigned long flags = 0;
531 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
532 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
533 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
539 * qla2x00_isp_cmd() - Modify the request ring pointer.
542 * Note: The caller must hold the hardware lock before calling this routine.
545 qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
547 struct qla_hw_data *ha = vha->hw;
548 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
549 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
551 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
553 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
554 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
556 /* Adjust ring index. */
558 if (req->ring_index == req->length) {
560 req->ring_ptr = req->ring;
564 /* Set chip new ring index. */
565 if (IS_QLA82XX(ha)) {
566 uint32_t dbval = 0x04 | (ha->portnum << 5);
568 /* write, read and verify logic */
569 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
571 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
574 (unsigned long __iomem *)ha->nxdb_wr_ptr,
577 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
578 WRT_REG_DWORD((unsigned long __iomem *)
579 ha->nxdb_wr_ptr, dbval);
583 } else if (ha->mqenable) {
584 /* Set chip new ring index. */
585 WRT_REG_DWORD(®->isp25mq.req_q_in, req->ring_index);
586 RD_REG_DWORD(&ioreg->hccr);
588 if (IS_FWI2_CAPABLE(ha)) {
589 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
590 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
592 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
594 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
601 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
602 * Continuation Type 1 IOCBs to allocate.
604 * @dsds: number of data segment decriptors needed
606 * Returns the number of IOCB entries needed to store @dsds.
609 qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
615 iocbs += (dsds - 1) / 5;
623 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
626 * @sp: SRB command to process
627 * @cmd_pkt: Command type 3 IOCB
628 * @tot_dsds: Total number of segments to transfer
631 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
636 scsi_qla_host_t *vha;
637 struct scsi_cmnd *cmd;
638 struct scatterlist *sg;
644 /* Update entry type to indicate Command Type 3 IOCB */
645 *((uint32_t *)(&cmd_pkt->entry_type)) =
646 __constant_cpu_to_le32(COMMAND_TYPE_7);
648 /* No data transfer */
649 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
650 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
654 vha = sp->fcport->vha;
657 /* Set transfer direction */
658 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
659 cmd_pkt->task_mgmt_flags =
660 __constant_cpu_to_le16(TMF_WRITE_DATA);
661 sp->fcport->vha->hw->qla_stats.output_bytes +=
662 scsi_bufflen(sp->cmd);
663 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
664 cmd_pkt->task_mgmt_flags =
665 __constant_cpu_to_le16(TMF_READ_DATA);
666 sp->fcport->vha->hw->qla_stats.input_bytes +=
667 scsi_bufflen(sp->cmd);
670 /* One DSD is available in the Command Type 3 IOCB */
672 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
674 /* Load data segments */
676 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
678 cont_a64_entry_t *cont_pkt;
680 /* Allocate additional continuation packets? */
681 if (avail_dsds == 0) {
683 * Five DSDs are available in the Continuation
686 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
687 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
691 sle_dma = sg_dma_address(sg);
692 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
693 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
694 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
699 struct fw_dif_context {
702 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
703 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
707 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
711 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
712 unsigned int protcnt)
714 struct scsi_cmnd *cmd = sp->cmd;
715 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
717 switch (scsi_get_prot_type(cmd)) {
718 case SCSI_PROT_DIF_TYPE0:
720 * No check for ql2xenablehba_err_chk, as it would be an
721 * I/O error if hba tag generation is not done.
723 pkt->ref_tag = cpu_to_le32((uint32_t)
724 (0xffffffff & scsi_get_lba(cmd)));
726 if (!qla2x00_hba_err_chk_enabled(sp))
729 pkt->ref_tag_mask[0] = 0xff;
730 pkt->ref_tag_mask[1] = 0xff;
731 pkt->ref_tag_mask[2] = 0xff;
732 pkt->ref_tag_mask[3] = 0xff;
736 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
737 * match LBA in CDB + N
739 case SCSI_PROT_DIF_TYPE2:
740 pkt->app_tag = __constant_cpu_to_le16(0);
741 pkt->app_tag_mask[0] = 0x0;
742 pkt->app_tag_mask[1] = 0x0;
744 pkt->ref_tag = cpu_to_le32((uint32_t)
745 (0xffffffff & scsi_get_lba(cmd)));
747 if (!qla2x00_hba_err_chk_enabled(sp))
750 /* enable ALL bytes of the ref tag */
751 pkt->ref_tag_mask[0] = 0xff;
752 pkt->ref_tag_mask[1] = 0xff;
753 pkt->ref_tag_mask[2] = 0xff;
754 pkt->ref_tag_mask[3] = 0xff;
757 /* For Type 3 protection: 16 bit GUARD only */
758 case SCSI_PROT_DIF_TYPE3:
759 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
760 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
765 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
768 case SCSI_PROT_DIF_TYPE1:
769 pkt->ref_tag = cpu_to_le32((uint32_t)
770 (0xffffffff & scsi_get_lba(cmd)));
771 pkt->app_tag = __constant_cpu_to_le16(0);
772 pkt->app_tag_mask[0] = 0x0;
773 pkt->app_tag_mask[1] = 0x0;
775 if (!qla2x00_hba_err_chk_enabled(sp))
778 /* enable ALL bytes of the ref tag */
779 pkt->ref_tag_mask[0] = 0xff;
780 pkt->ref_tag_mask[1] = 0xff;
781 pkt->ref_tag_mask[2] = 0xff;
782 pkt->ref_tag_mask[3] = 0xff;
786 ql_dbg(ql_dbg_io, vha, 0x3009,
787 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
788 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
789 pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
790 scsi_get_prot_type(cmd), cmd);
794 dma_addr_t dma_addr; /* OUT */
795 uint32_t dma_len; /* OUT */
797 uint32_t tot_bytes; /* IN */
798 struct scatterlist *cur_sg; /* IN */
800 /* for book keeping, bzero on initial invocation */
801 uint32_t bytes_consumed;
803 uint32_t tot_partial;
811 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
814 struct scatterlist *sg;
815 uint32_t cumulative_partial, sg_len;
816 dma_addr_t sg_dma_addr;
818 if (sgx->num_bytes == sgx->tot_bytes)
822 cumulative_partial = sgx->tot_partial;
824 sg_dma_addr = sg_dma_address(sg);
825 sg_len = sg_dma_len(sg);
827 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
829 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
830 sgx->dma_len = (blk_sz - cumulative_partial);
831 sgx->tot_partial = 0;
832 sgx->num_bytes += blk_sz;
835 sgx->dma_len = sg_len - sgx->bytes_consumed;
836 sgx->tot_partial += sgx->dma_len;
840 sgx->bytes_consumed += sgx->dma_len;
842 if (sg_len == sgx->bytes_consumed) {
846 sgx->bytes_consumed = 0;
853 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
854 uint32_t *dsd, uint16_t tot_dsds)
857 uint8_t avail_dsds = 0;
858 uint32_t dsd_list_len;
859 struct dsd_dma *dsd_ptr;
860 struct scatterlist *sg_prot;
861 uint32_t *cur_dsd = dsd;
862 uint16_t used_dsds = tot_dsds;
868 uint32_t sle_dma_len, tot_prot_dma_len = 0;
869 struct scsi_cmnd *cmd = sp->cmd;
871 prot_int = cmd->device->sector_size;
873 memset(&sgx, 0, sizeof(struct qla2_sgx));
874 sgx.tot_bytes = scsi_bufflen(sp->cmd);
875 sgx.cur_sg = scsi_sglist(sp->cmd);
878 sg_prot = scsi_prot_sglist(sp->cmd);
880 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
882 sle_dma = sgx.dma_addr;
883 sle_dma_len = sgx.dma_len;
885 /* Allocate additional continuation packets? */
886 if (avail_dsds == 0) {
887 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
888 QLA_DSDS_PER_IOCB : used_dsds;
889 dsd_list_len = (avail_dsds + 1) * 12;
890 used_dsds -= avail_dsds;
892 /* allocate tracking DS */
893 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
897 /* allocate new list */
898 dsd_ptr->dsd_addr = next_dsd =
899 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
900 &dsd_ptr->dsd_list_dma);
904 * Need to cleanup only this dsd_ptr, rest
905 * will be done by sp_free_dma()
911 list_add_tail(&dsd_ptr->list,
912 &((struct crc_context *)sp->ctx)->dsd_list);
914 sp->flags |= SRB_CRC_CTX_DSD_VALID;
916 /* add new list to cmd iocb or last list */
917 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
918 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
919 *cur_dsd++ = dsd_list_len;
920 cur_dsd = (uint32_t *)next_dsd;
922 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
923 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
924 *cur_dsd++ = cpu_to_le32(sle_dma_len);
928 /* Got a full protection interval */
929 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
932 tot_prot_dma_len += sle_dma_len;
933 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
934 tot_prot_dma_len = 0;
935 sg_prot = sg_next(sg_prot);
938 partial = 1; /* So as to not re-enter this block */
942 /* Null termination */
949 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
953 uint8_t avail_dsds = 0;
954 uint32_t dsd_list_len;
955 struct dsd_dma *dsd_ptr;
956 struct scatterlist *sg;
957 uint32_t *cur_dsd = dsd;
959 uint16_t used_dsds = tot_dsds;
960 scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
964 scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
967 /* Allocate additional continuation packets? */
968 if (avail_dsds == 0) {
969 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
970 QLA_DSDS_PER_IOCB : used_dsds;
971 dsd_list_len = (avail_dsds + 1) * 12;
972 used_dsds -= avail_dsds;
974 /* allocate tracking DS */
975 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
979 /* allocate new list */
980 dsd_ptr->dsd_addr = next_dsd =
981 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
982 &dsd_ptr->dsd_list_dma);
986 * Need to cleanup only this dsd_ptr, rest
987 * will be done by sp_free_dma()
993 list_add_tail(&dsd_ptr->list,
994 &((struct crc_context *)sp->ctx)->dsd_list);
996 sp->flags |= SRB_CRC_CTX_DSD_VALID;
998 /* add new list to cmd iocb or last list */
999 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1000 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1001 *cur_dsd++ = dsd_list_len;
1002 cur_dsd = (uint32_t *)next_dsd;
1004 sle_dma = sg_dma_address(sg);
1005 ql_dbg(ql_dbg_io, vha, 0x300a,
1006 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
1007 cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
1009 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1010 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1011 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1014 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1015 cp = page_address(sg_page(sg)) + sg->offset;
1016 ql_dbg(ql_dbg_io, vha, 0x300b,
1017 "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
1020 /* Null termination */
1028 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1033 uint8_t avail_dsds = 0;
1034 uint32_t dsd_list_len;
1035 struct dsd_dma *dsd_ptr;
1036 struct scatterlist *sg;
1038 struct scsi_cmnd *cmd;
1039 uint32_t *cur_dsd = dsd;
1040 uint16_t used_dsds = tot_dsds;
1041 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1046 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1049 /* Allocate additional continuation packets? */
1050 if (avail_dsds == 0) {
1051 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1052 QLA_DSDS_PER_IOCB : used_dsds;
1053 dsd_list_len = (avail_dsds + 1) * 12;
1054 used_dsds -= avail_dsds;
1056 /* allocate tracking DS */
1057 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1061 /* allocate new list */
1062 dsd_ptr->dsd_addr = next_dsd =
1063 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1064 &dsd_ptr->dsd_list_dma);
1068 * Need to cleanup only this dsd_ptr, rest
1069 * will be done by sp_free_dma()
1075 list_add_tail(&dsd_ptr->list,
1076 &((struct crc_context *)sp->ctx)->dsd_list);
1078 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1080 /* add new list to cmd iocb or last list */
1081 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1082 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1083 *cur_dsd++ = dsd_list_len;
1084 cur_dsd = (uint32_t *)next_dsd;
1086 sle_dma = sg_dma_address(sg);
1087 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1088 ql_dbg(ql_dbg_io, vha, 0x3027,
1089 "%s(): %p, sg_entry %d - "
1090 "addr=0x%x0x%x, len=%d.\n",
1091 __func__, cur_dsd, i,
1092 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
1094 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1095 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1096 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1098 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1099 cp = page_address(sg_page(sg)) + sg->offset;
1100 ql_dbg(ql_dbg_io, vha, 0x3028,
1101 "%s(): Protection Data buffer = %p.\n", __func__,
1106 /* Null termination */
1114 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1115 * Type 6 IOCB types.
1117 * @sp: SRB command to process
1118 * @cmd_pkt: Command type 3 IOCB
1119 * @tot_dsds: Total number of segments to transfer
1122 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1123 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1125 uint32_t *cur_dsd, *fcp_dl;
1126 scsi_qla_host_t *vha;
1127 struct scsi_cmnd *cmd;
1128 struct scatterlist *cur_seg;
1130 uint32_t total_bytes = 0;
1131 uint32_t data_bytes;
1133 uint8_t bundling = 1;
1136 struct crc_context *crc_ctx_pkt = NULL;
1137 struct qla_hw_data *ha;
1138 uint8_t additional_fcpcdb_len;
1139 uint16_t fcp_cmnd_len;
1140 struct fcp_cmnd *fcp_cmnd;
1141 dma_addr_t crc_ctx_dma;
1147 /* Update entry type to indicate Command Type CRC_2 IOCB */
1148 *((uint32_t *)(&cmd_pkt->entry_type)) =
1149 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1151 vha = sp->fcport->vha;
1154 /* No data transfer */
1155 data_bytes = scsi_bufflen(cmd);
1156 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1157 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1161 cmd_pkt->vp_index = sp->fcport->vp_idx;
1163 /* Set transfer direction */
1164 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1165 cmd_pkt->control_flags =
1166 __constant_cpu_to_le16(CF_WRITE_DATA);
1167 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1168 cmd_pkt->control_flags =
1169 __constant_cpu_to_le16(CF_READ_DATA);
1172 if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
1173 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
1174 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
1175 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
1178 /* Allocate CRC context from global pool */
1179 crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
1180 GFP_ATOMIC, &crc_ctx_dma);
1183 goto crc_queuing_error;
1185 /* Zero out CTX area. */
1186 clr_ptr = (uint8_t *)crc_ctx_pkt;
1187 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1189 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1191 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1194 crc_ctx_pkt->handle = cmd_pkt->handle;
1196 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1198 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1199 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1201 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1202 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1203 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1205 /* Determine SCSI command length -- align to 4 byte boundary */
1206 if (cmd->cmd_len > 16) {
1207 additional_fcpcdb_len = cmd->cmd_len - 16;
1208 if ((cmd->cmd_len % 4) != 0) {
1209 /* SCSI cmd > 16 bytes must be multiple of 4 */
1210 goto crc_queuing_error;
1212 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1214 additional_fcpcdb_len = 0;
1215 fcp_cmnd_len = 12 + 16 + 4;
1218 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1220 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1221 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1222 fcp_cmnd->additional_cdb_len |= 1;
1223 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1224 fcp_cmnd->additional_cdb_len |= 2;
1226 int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
1227 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1228 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1229 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1230 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1231 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1232 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1233 fcp_cmnd->task_management = 0;
1236 * Update tagged queuing modifier if using command tag queuing
1238 if (scsi_populate_tag_msg(cmd, tag)) {
1240 case HEAD_OF_QUEUE_TAG:
1241 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1243 case ORDERED_QUEUE_TAG:
1244 fcp_cmnd->task_attribute = TSK_ORDERED;
1247 fcp_cmnd->task_attribute = 0;
1251 fcp_cmnd->task_attribute = 0;
1254 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1256 /* Compute dif len and adjust data len to incude protection */
1258 blk_size = cmd->device->sector_size;
1259 dif_bytes = (data_bytes / blk_size) * 8;
1261 switch (scsi_get_prot_op(sp->cmd)) {
1262 case SCSI_PROT_READ_INSERT:
1263 case SCSI_PROT_WRITE_STRIP:
1264 total_bytes = data_bytes;
1265 data_bytes += dif_bytes;
1268 case SCSI_PROT_READ_STRIP:
1269 case SCSI_PROT_WRITE_INSERT:
1270 case SCSI_PROT_READ_PASS:
1271 case SCSI_PROT_WRITE_PASS:
1272 total_bytes = data_bytes + dif_bytes;
1278 if (!qla2x00_hba_err_chk_enabled(sp))
1279 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1282 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1285 * Configure Bundling if we need to fetch interlaving
1286 * protection PCI accesses
1288 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1289 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1290 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1292 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1295 /* Finish the common fields of CRC pkt */
1296 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1297 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1298 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1299 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1300 /* Fibre channel byte count */
1301 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1302 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1303 additional_fcpcdb_len);
1304 *fcp_dl = htonl(total_bytes);
1306 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1307 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1310 /* Walks data segments */
1312 cmd_pkt->control_flags |=
1313 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1315 if (!bundling && tot_prot_dsds) {
1316 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1318 goto crc_queuing_error;
1319 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1320 (tot_dsds - tot_prot_dsds)))
1321 goto crc_queuing_error;
1323 if (bundling && tot_prot_dsds) {
1324 /* Walks dif segments */
1325 cur_seg = scsi_prot_sglist(cmd);
1326 cmd_pkt->control_flags |=
1327 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1328 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1329 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1331 goto crc_queuing_error;
1336 /* Cleanup will be performed by the caller */
1338 return QLA_FUNCTION_FAILED;
1342 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1343 * @sp: command to send to the ISP
1345 * Returns non-zero if a failure occurred, else zero.
1348 qla24xx_start_scsi(srb_t *sp)
1351 unsigned long flags;
1355 struct cmd_type_7 *cmd_pkt;
1359 struct req_que *req = NULL;
1360 struct rsp_que *rsp = NULL;
1361 struct scsi_cmnd *cmd = sp->cmd;
1362 struct scsi_qla_host *vha = sp->fcport->vha;
1363 struct qla_hw_data *ha = vha->hw;
1366 /* Setup device pointers. */
1369 qla25xx_set_que(sp, &rsp);
1372 /* So we know we haven't pci_map'ed anything yet */
1375 /* Send marker if required */
1376 if (vha->marker_needed != 0) {
1377 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1379 return QLA_FUNCTION_FAILED;
1380 vha->marker_needed = 0;
1383 /* Acquire ring specific lock */
1384 spin_lock_irqsave(&ha->hardware_lock, flags);
1386 /* Check for room in outstanding command list. */
1387 handle = req->current_outstanding_cmd;
1388 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1390 if (handle == MAX_OUTSTANDING_COMMANDS)
1392 if (!req->outstanding_cmds[handle])
1395 if (index == MAX_OUTSTANDING_COMMANDS) {
1399 /* Map the sg table so we have an accurate count of sg entries needed */
1400 if (scsi_sg_count(cmd)) {
1401 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1402 scsi_sg_count(cmd), cmd->sc_data_direction);
1403 if (unlikely(!nseg))
1409 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1410 if (req->cnt < (req_cnt + 2)) {
1411 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1413 if (req->ring_index < cnt)
1414 req->cnt = cnt - req->ring_index;
1416 req->cnt = req->length -
1417 (req->ring_index - cnt);
1419 if (req->cnt < (req_cnt + 2))
1422 /* Build command packet. */
1423 req->current_outstanding_cmd = handle;
1424 req->outstanding_cmds[handle] = sp;
1425 sp->handle = handle;
1426 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1427 req->cnt -= req_cnt;
1429 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1430 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1432 /* Zero out remaining portion of packet. */
1433 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1434 clr_ptr = (uint32_t *)cmd_pkt + 2;
1435 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1436 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1438 /* Set NPORT-ID and LUN number*/
1439 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1440 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1441 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1442 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1443 cmd_pkt->vp_index = sp->fcport->vp_idx;
1445 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1446 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1448 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1449 if (scsi_populate_tag_msg(cmd, tag)) {
1451 case HEAD_OF_QUEUE_TAG:
1452 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1454 case ORDERED_QUEUE_TAG:
1455 cmd_pkt->task = TSK_ORDERED;
1460 /* Load SCSI command packet. */
1461 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1462 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1464 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1466 /* Build IOCB segments */
1467 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1469 /* Set total data segment count. */
1470 cmd_pkt->entry_count = (uint8_t)req_cnt;
1471 /* Specify response queue number where completion should happen */
1472 cmd_pkt->entry_status = (uint8_t) rsp->id;
1474 /* Adjust ring index. */
1476 if (req->ring_index == req->length) {
1477 req->ring_index = 0;
1478 req->ring_ptr = req->ring;
1482 sp->flags |= SRB_DMA_VALID;
1484 /* Set chip new ring index. */
1485 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1486 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1488 /* Manage unprocessed RIO/ZIO commands in response queue. */
1489 if (vha->flags.process_response_queue &&
1490 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1491 qla24xx_process_response_queue(vha, rsp);
1493 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1498 scsi_dma_unmap(cmd);
1500 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1502 return QLA_FUNCTION_FAILED;
1507 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1508 * @sp: command to send to the ISP
1510 * Returns non-zero if a failure occurred, else zero.
1513 qla24xx_dif_start_scsi(srb_t *sp)
1516 unsigned long flags;
1521 uint16_t req_cnt = 0;
1523 uint16_t tot_prot_dsds;
1524 uint16_t fw_prot_opts = 0;
1525 struct req_que *req = NULL;
1526 struct rsp_que *rsp = NULL;
1527 struct scsi_cmnd *cmd = sp->cmd;
1528 struct scsi_qla_host *vha = sp->fcport->vha;
1529 struct qla_hw_data *ha = vha->hw;
1530 struct cmd_type_crc_2 *cmd_pkt;
1531 uint32_t status = 0;
1533 #define QDSS_GOT_Q_SPACE BIT_0
1535 /* Only process protection or >16 cdb in this routine */
1536 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1537 if (cmd->cmd_len <= 16)
1538 return qla24xx_start_scsi(sp);
1541 /* Setup device pointers. */
1543 qla25xx_set_que(sp, &rsp);
1546 /* So we know we haven't pci_map'ed anything yet */
1549 /* Send marker if required */
1550 if (vha->marker_needed != 0) {
1551 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1553 return QLA_FUNCTION_FAILED;
1554 vha->marker_needed = 0;
1557 /* Acquire ring specific lock */
1558 spin_lock_irqsave(&ha->hardware_lock, flags);
1560 /* Check for room in outstanding command list. */
1561 handle = req->current_outstanding_cmd;
1562 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1564 if (handle == MAX_OUTSTANDING_COMMANDS)
1566 if (!req->outstanding_cmds[handle])
1570 if (index == MAX_OUTSTANDING_COMMANDS)
1573 /* Compute number of required data segments */
1574 /* Map the sg table so we have an accurate count of sg entries needed */
1575 if (scsi_sg_count(cmd)) {
1576 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1577 scsi_sg_count(cmd), cmd->sc_data_direction);
1578 if (unlikely(!nseg))
1581 sp->flags |= SRB_DMA_VALID;
1583 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1584 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1585 struct qla2_sgx sgx;
1588 memset(&sgx, 0, sizeof(struct qla2_sgx));
1589 sgx.tot_bytes = scsi_bufflen(cmd);
1590 sgx.cur_sg = scsi_sglist(cmd);
1594 while (qla24xx_get_one_block_sg(
1595 cmd->device->sector_size, &sgx, &partial))
1601 /* number of required data segments */
1604 /* Compute number of required protection segments */
1605 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1606 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1607 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1608 if (unlikely(!nseg))
1611 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1613 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1614 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1615 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1622 /* Total Data and protection sg segment(s) */
1623 tot_prot_dsds = nseg;
1625 if (req->cnt < (req_cnt + 2)) {
1626 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1628 if (req->ring_index < cnt)
1629 req->cnt = cnt - req->ring_index;
1631 req->cnt = req->length -
1632 (req->ring_index - cnt);
1635 if (req->cnt < (req_cnt + 2))
1638 status |= QDSS_GOT_Q_SPACE;
1640 /* Build header part of command packet (excluding the OPCODE). */
1641 req->current_outstanding_cmd = handle;
1642 req->outstanding_cmds[handle] = sp;
1643 sp->handle = handle;
1644 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1645 req->cnt -= req_cnt;
1647 /* Fill-in common area */
1648 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1649 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1651 clr_ptr = (uint32_t *)cmd_pkt + 2;
1652 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1654 /* Set NPORT-ID and LUN number*/
1655 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1656 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1657 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1658 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1660 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1661 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1663 /* Total Data and protection segment(s) */
1664 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1666 /* Build IOCB segments and adjust for data protection segments */
1667 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1668 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1672 cmd_pkt->entry_count = (uint8_t)req_cnt;
1673 /* Specify response queue number where completion should happen */
1674 cmd_pkt->entry_status = (uint8_t) rsp->id;
1675 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1678 /* Adjust ring index. */
1680 if (req->ring_index == req->length) {
1681 req->ring_index = 0;
1682 req->ring_ptr = req->ring;
1686 /* Set chip new ring index. */
1687 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1688 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1690 /* Manage unprocessed RIO/ZIO commands in response queue. */
1691 if (vha->flags.process_response_queue &&
1692 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1693 qla24xx_process_response_queue(vha, rsp);
1695 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1700 if (status & QDSS_GOT_Q_SPACE) {
1701 req->outstanding_cmds[handle] = NULL;
1702 req->cnt += req_cnt;
1704 /* Cleanup will be performed by the caller (queuecommand) */
1706 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1707 return QLA_FUNCTION_FAILED;
1711 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1713 struct scsi_cmnd *cmd = sp->cmd;
1714 struct qla_hw_data *ha = sp->fcport->vha->hw;
1715 int affinity = cmd->request->cpu;
1717 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1718 affinity < ha->max_rsp_queues - 1)
1719 *rsp = ha->rsp_q_map[affinity + 1];
1721 *rsp = ha->rsp_q_map[0];
1724 /* Generic Control-SRB manipulation functions. */
1726 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1728 struct qla_hw_data *ha = vha->hw;
1729 struct req_que *req = ha->req_q_map[0];
1730 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1731 uint32_t index, handle;
1733 uint16_t cnt, req_cnt;
1740 goto skip_cmd_array;
1742 /* Check for room in outstanding command list. */
1743 handle = req->current_outstanding_cmd;
1744 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1746 if (handle == MAX_OUTSTANDING_COMMANDS)
1748 if (!req->outstanding_cmds[handle])
1751 if (index == MAX_OUTSTANDING_COMMANDS) {
1752 ql_log(ql_log_warn, vha, 0x700b,
1753 "No room on oustanding cmd array.\n");
1757 /* Prep command array. */
1758 req->current_outstanding_cmd = handle;
1759 req->outstanding_cmds[handle] = sp;
1760 sp->handle = handle;
1763 /* Check for room on request queue. */
1764 if (req->cnt < req_cnt) {
1766 cnt = RD_REG_DWORD(®->isp25mq.req_q_out);
1767 else if (IS_QLA82XX(ha))
1768 cnt = RD_REG_DWORD(®->isp82.req_q_out);
1769 else if (IS_FWI2_CAPABLE(ha))
1770 cnt = RD_REG_DWORD(®->isp24.req_q_out);
1772 cnt = qla2x00_debounce_register(
1773 ISP_REQ_Q_OUT(ha, ®->isp));
1775 if (req->ring_index < cnt)
1776 req->cnt = cnt - req->ring_index;
1778 req->cnt = req->length -
1779 (req->ring_index - cnt);
1781 if (req->cnt < req_cnt)
1785 req->cnt -= req_cnt;
1786 pkt = req->ring_ptr;
1787 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1788 pkt->entry_count = req_cnt;
1789 pkt->handle = handle;
1796 qla2x00_start_iocbs(srb_t *sp)
1798 struct qla_hw_data *ha = sp->fcport->vha->hw;
1799 struct req_que *req = ha->req_q_map[0];
1800 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1801 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1803 if (IS_QLA82XX(ha)) {
1804 qla82xx_start_iocbs(sp);
1806 /* Adjust ring index. */
1808 if (req->ring_index == req->length) {
1809 req->ring_index = 0;
1810 req->ring_ptr = req->ring;
1814 /* Set chip new ring index. */
1816 WRT_REG_DWORD(®->isp25mq.req_q_in, req->ring_index);
1817 RD_REG_DWORD(&ioreg->hccr);
1818 } else if (IS_QLA82XX(ha)) {
1819 qla82xx_start_iocbs(sp);
1820 } else if (IS_FWI2_CAPABLE(ha)) {
1821 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
1822 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
1824 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
1826 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
1832 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1834 struct srb_ctx *ctx = sp->ctx;
1835 struct srb_iocb *lio = ctx->u.iocb_cmd;
1837 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1838 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1839 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1840 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1841 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1842 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1843 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1844 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1845 logio->port_id[1] = sp->fcport->d_id.b.area;
1846 logio->port_id[2] = sp->fcport->d_id.b.domain;
1847 logio->vp_index = sp->fcport->vp_idx;
1851 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1853 struct qla_hw_data *ha = sp->fcport->vha->hw;
1854 struct srb_ctx *ctx = sp->ctx;
1855 struct srb_iocb *lio = ctx->u.iocb_cmd;
1858 mbx->entry_type = MBX_IOCB_TYPE;
1859 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1860 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1861 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1862 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1863 if (HAS_EXTENDED_IDS(ha)) {
1864 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1865 mbx->mb10 = cpu_to_le16(opts);
1867 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1869 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1870 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1871 sp->fcport->d_id.b.al_pa);
1872 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1876 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1878 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1879 logio->control_flags =
1880 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1881 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1882 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1883 logio->port_id[1] = sp->fcport->d_id.b.area;
1884 logio->port_id[2] = sp->fcport->d_id.b.domain;
1885 logio->vp_index = sp->fcport->vp_idx;
1889 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1891 struct qla_hw_data *ha = sp->fcport->vha->hw;
1893 mbx->entry_type = MBX_IOCB_TYPE;
1894 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1895 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1896 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1897 cpu_to_le16(sp->fcport->loop_id):
1898 cpu_to_le16(sp->fcport->loop_id << 8);
1899 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1900 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1901 sp->fcport->d_id.b.al_pa);
1902 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1903 /* Implicit: mbx->mbx10 = 0. */
1907 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1909 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1910 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1911 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1912 logio->vp_index = sp->fcport->vp_idx;
1916 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1918 struct qla_hw_data *ha = sp->fcport->vha->hw;
1920 mbx->entry_type = MBX_IOCB_TYPE;
1921 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1922 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1923 if (HAS_EXTENDED_IDS(ha)) {
1924 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1925 mbx->mb10 = cpu_to_le16(BIT_0);
1927 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1929 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1930 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1931 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1932 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1933 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1937 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1941 struct fc_port *fcport = sp->fcport;
1942 scsi_qla_host_t *vha = fcport->vha;
1943 struct qla_hw_data *ha = vha->hw;
1944 struct srb_ctx *ctx = sp->ctx;
1945 struct srb_iocb *iocb = ctx->u.iocb_cmd;
1946 struct req_que *req = vha->req;
1948 flags = iocb->u.tmf.flags;
1949 lun = iocb->u.tmf.lun;
1951 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1952 tsk->entry_count = 1;
1953 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1954 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
1955 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1956 tsk->control_flags = cpu_to_le32(flags);
1957 tsk->port_id[0] = fcport->d_id.b.al_pa;
1958 tsk->port_id[1] = fcport->d_id.b.area;
1959 tsk->port_id[2] = fcport->d_id.b.domain;
1960 tsk->vp_index = fcport->vp_idx;
1962 if (flags == TCF_LUN_RESET) {
1963 int_to_scsilun(lun, &tsk->lun);
1964 host_to_fcp_swap((uint8_t *)&tsk->lun,
1970 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1972 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1974 els_iocb->entry_type = ELS_IOCB_TYPE;
1975 els_iocb->entry_count = 1;
1976 els_iocb->sys_define = 0;
1977 els_iocb->entry_status = 0;
1978 els_iocb->handle = sp->handle;
1979 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1980 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1981 els_iocb->vp_index = sp->fcport->vp_idx;
1982 els_iocb->sof_type = EST_SOFI3;
1983 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1986 (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
1987 bsg_job->request->rqst_data.r_els.els_code :
1988 bsg_job->request->rqst_data.h_els.command_code;
1989 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
1990 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
1991 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
1992 els_iocb->control_flags = 0;
1993 els_iocb->rx_byte_count =
1994 cpu_to_le32(bsg_job->reply_payload.payload_len);
1995 els_iocb->tx_byte_count =
1996 cpu_to_le32(bsg_job->request_payload.payload_len);
1998 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
1999 (bsg_job->request_payload.sg_list)));
2000 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2001 (bsg_job->request_payload.sg_list)));
2002 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2003 (bsg_job->request_payload.sg_list));
2005 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2006 (bsg_job->reply_payload.sg_list)));
2007 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2008 (bsg_job->reply_payload.sg_list)));
2009 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2010 (bsg_job->reply_payload.sg_list));
2014 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2016 uint16_t avail_dsds;
2018 struct scatterlist *sg;
2021 scsi_qla_host_t *vha = sp->fcport->vha;
2022 struct qla_hw_data *ha = vha->hw;
2023 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
2024 int loop_iterartion = 0;
2025 int cont_iocb_prsnt = 0;
2026 int entry_count = 1;
2028 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2029 ct_iocb->entry_type = CT_IOCB_TYPE;
2030 ct_iocb->entry_status = 0;
2031 ct_iocb->handle1 = sp->handle;
2032 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2033 ct_iocb->status = __constant_cpu_to_le16(0);
2034 ct_iocb->control_flags = __constant_cpu_to_le16(0);
2035 ct_iocb->timeout = 0;
2036 ct_iocb->cmd_dsd_count =
2037 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2038 ct_iocb->total_dsd_count =
2039 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2040 ct_iocb->req_bytecount =
2041 cpu_to_le32(bsg_job->request_payload.payload_len);
2042 ct_iocb->rsp_bytecount =
2043 cpu_to_le32(bsg_job->reply_payload.payload_len);
2045 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2046 (bsg_job->request_payload.sg_list)));
2047 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2048 (bsg_job->request_payload.sg_list)));
2049 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2051 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2052 (bsg_job->reply_payload.sg_list)));
2053 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2054 (bsg_job->reply_payload.sg_list)));
2055 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2058 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2060 tot_dsds = bsg_job->reply_payload.sg_cnt;
2062 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2064 cont_a64_entry_t *cont_pkt;
2066 /* Allocate additional continuation packets? */
2067 if (avail_dsds == 0) {
2069 * Five DSDs are available in the Cont.
2072 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2073 vha->hw->req_q_map[0]);
2074 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2076 cont_iocb_prsnt = 1;
2080 sle_dma = sg_dma_address(sg);
2081 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2082 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2083 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2087 ct_iocb->entry_count = entry_count;
2091 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2093 uint16_t avail_dsds;
2095 struct scatterlist *sg;
2098 scsi_qla_host_t *vha = sp->fcport->vha;
2099 struct qla_hw_data *ha = vha->hw;
2100 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
2101 int loop_iterartion = 0;
2102 int cont_iocb_prsnt = 0;
2103 int entry_count = 1;
2105 ct_iocb->entry_type = CT_IOCB_TYPE;
2106 ct_iocb->entry_status = 0;
2107 ct_iocb->sys_define = 0;
2108 ct_iocb->handle = sp->handle;
2110 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2111 ct_iocb->vp_index = sp->fcport->vp_idx;
2112 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2114 ct_iocb->cmd_dsd_count =
2115 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2116 ct_iocb->timeout = 0;
2117 ct_iocb->rsp_dsd_count =
2118 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2119 ct_iocb->rsp_byte_count =
2120 cpu_to_le32(bsg_job->reply_payload.payload_len);
2121 ct_iocb->cmd_byte_count =
2122 cpu_to_le32(bsg_job->request_payload.payload_len);
2123 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2124 (bsg_job->request_payload.sg_list)));
2125 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2126 (bsg_job->request_payload.sg_list)));
2127 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2128 (bsg_job->request_payload.sg_list));
2131 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2133 tot_dsds = bsg_job->reply_payload.sg_cnt;
2135 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2137 cont_a64_entry_t *cont_pkt;
2139 /* Allocate additional continuation packets? */
2140 if (avail_dsds == 0) {
2142 * Five DSDs are available in the Cont.
2145 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2147 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2149 cont_iocb_prsnt = 1;
2153 sle_dma = sg_dma_address(sg);
2154 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2155 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2156 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2160 ct_iocb->entry_count = entry_count;
2164 qla2x00_start_sp(srb_t *sp)
2167 struct qla_hw_data *ha = sp->fcport->vha->hw;
2169 struct srb_ctx *ctx = sp->ctx;
2170 unsigned long flags;
2172 rval = QLA_FUNCTION_FAILED;
2173 spin_lock_irqsave(&ha->hardware_lock, flags);
2174 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2176 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2177 "qla2x00_alloc_iocbs failed.\n");
2182 switch (ctx->type) {
2184 IS_FWI2_CAPABLE(ha) ?
2185 qla24xx_login_iocb(sp, pkt) :
2186 qla2x00_login_iocb(sp, pkt);
2188 case SRB_LOGOUT_CMD:
2189 IS_FWI2_CAPABLE(ha) ?
2190 qla24xx_logout_iocb(sp, pkt) :
2191 qla2x00_logout_iocb(sp, pkt);
2193 case SRB_ELS_CMD_RPT:
2194 case SRB_ELS_CMD_HST:
2195 qla24xx_els_iocb(sp, pkt);
2198 IS_FWI2_CAPABLE(ha) ?
2199 qla24xx_ct_iocb(sp, pkt) :
2200 qla2x00_ct_iocb(sp, pkt);
2203 IS_FWI2_CAPABLE(ha) ?
2204 qla24xx_adisc_iocb(sp, pkt) :
2205 qla2x00_adisc_iocb(sp, pkt);
2208 qla24xx_tm_iocb(sp, pkt);
2215 qla2x00_start_iocbs(sp);
2217 spin_unlock_irqrestore(&ha->hardware_lock, flags);