Merge branch 'for-linus' of git://neil.brown.name/md
[pandora-kernel.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11
12 #include <scsi/scsi_tcq.h>
13
14 static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
15
16 static void qla25xx_set_que(srb_t *, struct rsp_que **);
17 /**
18  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19  * @cmd: SCSI command
20  *
21  * Returns the proper CF_* direction based on CDB.
22  */
23 static inline uint16_t
24 qla2x00_get_cmd_direction(srb_t *sp)
25 {
26         uint16_t cflags;
27
28         cflags = 0;
29
30         /* Set transfer direction */
31         if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
32                 cflags = CF_WRITE;
33                 sp->fcport->vha->hw->qla_stats.output_bytes +=
34                     scsi_bufflen(sp->cmd);
35         } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 sp->fcport->vha->hw->qla_stats.input_bytes +=
38                     scsi_bufflen(sp->cmd);
39         }
40         return (cflags);
41 }
42
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54         uint16_t iocbs;
55
56         iocbs = 1;
57         if (dsds > 3) {
58                 iocbs += (dsds - 3) / 7;
59                 if ((dsds - 3) % 7)
60                         iocbs++;
61         }
62         return (iocbs);
63 }
64
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76         uint16_t iocbs;
77
78         iocbs = 1;
79         if (dsds > 2) {
80                 iocbs += (dsds - 2) / 5;
81                 if ((dsds - 2) % 5)
82                         iocbs++;
83         }
84         return (iocbs);
85 }
86
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @ha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96         cont_entry_t *cont_pkt;
97         struct req_que *req = vha->req;
98         /* Adjust ring index. */
99         req->ring_index++;
100         if (req->ring_index == req->length) {
101                 req->ring_index = 0;
102                 req->ring_ptr = req->ring;
103         } else {
104                 req->ring_ptr++;
105         }
106
107         cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109         /* Load packet defaults. */
110         *((uint32_t *)(&cont_pkt->entry_type)) =
111             __constant_cpu_to_le32(CONTINUE_TYPE);
112
113         return (cont_pkt);
114 }
115
116 /**
117  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118  * @ha: HA context
119  *
120  * Returns a pointer to the continuation type 1 IOCB packet.
121  */
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 {
125         cont_a64_entry_t *cont_pkt;
126
127         /* Adjust ring index. */
128         req->ring_index++;
129         if (req->ring_index == req->length) {
130                 req->ring_index = 0;
131                 req->ring_ptr = req->ring;
132         } else {
133                 req->ring_ptr++;
134         }
135
136         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137
138         /* Load packet defaults. */
139         *((uint32_t *)(&cont_pkt->entry_type)) =
140             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
141
142         return (cont_pkt);
143 }
144
145 static inline int
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 {
148         uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
149
150         /* We only support T10 DIF right now */
151         if (guard != SHOST_DIX_GUARD_CRC) {
152                 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
153                     "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
154                 return 0;
155         }
156
157         /* We always use DIFF Bundling for best performance */
158         *fw_prot_opts = 0;
159
160         /* Translate SCSI opcode to a protection opcode */
161         switch (scsi_get_prot_op(sp->cmd)) {
162         case SCSI_PROT_READ_STRIP:
163                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
164                 break;
165         case SCSI_PROT_WRITE_INSERT:
166                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
167                 break;
168         case SCSI_PROT_READ_INSERT:
169                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
170                 break;
171         case SCSI_PROT_WRITE_STRIP:
172                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
173                 break;
174         case SCSI_PROT_READ_PASS:
175                 *fw_prot_opts |= PO_MODE_DIF_PASS;
176                 break;
177         case SCSI_PROT_WRITE_PASS:
178                 *fw_prot_opts |= PO_MODE_DIF_PASS;
179                 break;
180         default:        /* Normal Request */
181                 *fw_prot_opts |= PO_MODE_DIF_PASS;
182                 break;
183         }
184
185         return scsi_prot_sg_count(sp->cmd);
186 }
187
188 /*
189  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
190  * capable IOCB types.
191  *
192  * @sp: SRB command to process
193  * @cmd_pkt: Command type 2 IOCB
194  * @tot_dsds: Total number of segments to transfer
195  */
196 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
197     uint16_t tot_dsds)
198 {
199         uint16_t        avail_dsds;
200         uint32_t        *cur_dsd;
201         scsi_qla_host_t *vha;
202         struct scsi_cmnd *cmd;
203         struct scatterlist *sg;
204         int i;
205
206         cmd = sp->cmd;
207
208         /* Update entry type to indicate Command Type 2 IOCB */
209         *((uint32_t *)(&cmd_pkt->entry_type)) =
210             __constant_cpu_to_le32(COMMAND_TYPE);
211
212         /* No data transfer */
213         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
214                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
215                 return;
216         }
217
218         vha = sp->fcport->vha;
219         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
220
221         /* Three DSDs are available in the Command Type 2 IOCB */
222         avail_dsds = 3;
223         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
224
225         /* Load data segments */
226         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
227                 cont_entry_t *cont_pkt;
228
229                 /* Allocate additional continuation packets? */
230                 if (avail_dsds == 0) {
231                         /*
232                          * Seven DSDs are available in the Continuation
233                          * Type 0 IOCB.
234                          */
235                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
236                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
237                         avail_dsds = 7;
238                 }
239
240                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
241                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
242                 avail_dsds--;
243         }
244 }
245
246 /**
247  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
248  * capable IOCB types.
249  *
250  * @sp: SRB command to process
251  * @cmd_pkt: Command type 3 IOCB
252  * @tot_dsds: Total number of segments to transfer
253  */
254 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
255     uint16_t tot_dsds)
256 {
257         uint16_t        avail_dsds;
258         uint32_t        *cur_dsd;
259         scsi_qla_host_t *vha;
260         struct scsi_cmnd *cmd;
261         struct scatterlist *sg;
262         int i;
263
264         cmd = sp->cmd;
265
266         /* Update entry type to indicate Command Type 3 IOCB */
267         *((uint32_t *)(&cmd_pkt->entry_type)) =
268             __constant_cpu_to_le32(COMMAND_A64_TYPE);
269
270         /* No data transfer */
271         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
272                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
273                 return;
274         }
275
276         vha = sp->fcport->vha;
277         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
278
279         /* Two DSDs are available in the Command Type 3 IOCB */
280         avail_dsds = 2;
281         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
282
283         /* Load data segments */
284         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
285                 dma_addr_t      sle_dma;
286                 cont_a64_entry_t *cont_pkt;
287
288                 /* Allocate additional continuation packets? */
289                 if (avail_dsds == 0) {
290                         /*
291                          * Five DSDs are available in the Continuation
292                          * Type 1 IOCB.
293                          */
294                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
295                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
296                         avail_dsds = 5;
297                 }
298
299                 sle_dma = sg_dma_address(sg);
300                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
301                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
302                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
303                 avail_dsds--;
304         }
305 }
306
307 /**
308  * qla2x00_start_scsi() - Send a SCSI command to the ISP
309  * @sp: command to send to the ISP
310  *
311  * Returns non-zero if a failure occurred, else zero.
312  */
313 int
314 qla2x00_start_scsi(srb_t *sp)
315 {
316         int             ret, nseg;
317         unsigned long   flags;
318         scsi_qla_host_t *vha;
319         struct scsi_cmnd *cmd;
320         uint32_t        *clr_ptr;
321         uint32_t        index;
322         uint32_t        handle;
323         cmd_entry_t     *cmd_pkt;
324         uint16_t        cnt;
325         uint16_t        req_cnt;
326         uint16_t        tot_dsds;
327         struct device_reg_2xxx __iomem *reg;
328         struct qla_hw_data *ha;
329         struct req_que *req;
330         struct rsp_que *rsp;
331         char            tag[2];
332
333         /* Setup device pointers. */
334         ret = 0;
335         vha = sp->fcport->vha;
336         ha = vha->hw;
337         reg = &ha->iobase->isp;
338         cmd = sp->cmd;
339         req = ha->req_q_map[0];
340         rsp = ha->rsp_q_map[0];
341         /* So we know we haven't pci_map'ed anything yet */
342         tot_dsds = 0;
343
344         /* Send marker if required */
345         if (vha->marker_needed != 0) {
346                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
347                     QLA_SUCCESS) {
348                         return (QLA_FUNCTION_FAILED);
349                 }
350                 vha->marker_needed = 0;
351         }
352
353         /* Acquire ring specific lock */
354         spin_lock_irqsave(&ha->hardware_lock, flags);
355
356         /* Check for room in outstanding command list. */
357         handle = req->current_outstanding_cmd;
358         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
359                 handle++;
360                 if (handle == MAX_OUTSTANDING_COMMANDS)
361                         handle = 1;
362                 if (!req->outstanding_cmds[handle])
363                         break;
364         }
365         if (index == MAX_OUTSTANDING_COMMANDS)
366                 goto queuing_error;
367
368         /* Map the sg table so we have an accurate count of sg entries needed */
369         if (scsi_sg_count(cmd)) {
370                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
371                     scsi_sg_count(cmd), cmd->sc_data_direction);
372                 if (unlikely(!nseg))
373                         goto queuing_error;
374         } else
375                 nseg = 0;
376
377         tot_dsds = nseg;
378
379         /* Calculate the number of request entries needed. */
380         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
381         if (req->cnt < (req_cnt + 2)) {
382                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
383                 if (req->ring_index < cnt)
384                         req->cnt = cnt - req->ring_index;
385                 else
386                         req->cnt = req->length -
387                             (req->ring_index - cnt);
388         }
389         if (req->cnt < (req_cnt + 2))
390                 goto queuing_error;
391
392         /* Build command packet */
393         req->current_outstanding_cmd = handle;
394         req->outstanding_cmds[handle] = sp;
395         sp->handle = handle;
396         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
397         req->cnt -= req_cnt;
398
399         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
400         cmd_pkt->handle = handle;
401         /* Zero out remaining portion of packet. */
402         clr_ptr = (uint32_t *)cmd_pkt + 2;
403         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
404         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
405
406         /* Set target ID and LUN number*/
407         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
408         cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
409
410         /* Update tagged queuing modifier */
411         if (scsi_populate_tag_msg(cmd, tag)) {
412                 switch (tag[0]) {
413                 case HEAD_OF_QUEUE_TAG:
414                         cmd_pkt->control_flags =
415                             __constant_cpu_to_le16(CF_HEAD_TAG);
416                         break;
417                 case ORDERED_QUEUE_TAG:
418                         cmd_pkt->control_flags =
419                             __constant_cpu_to_le16(CF_ORDERED_TAG);
420                         break;
421                 default:
422                         cmd_pkt->control_flags =
423                             __constant_cpu_to_le16(CF_SIMPLE_TAG);
424                         break;
425                 }
426         }
427
428         /* Load SCSI command packet. */
429         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
430         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
431
432         /* Build IOCB segments */
433         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
434
435         /* Set total data segment count. */
436         cmd_pkt->entry_count = (uint8_t)req_cnt;
437         wmb();
438
439         /* Adjust ring index. */
440         req->ring_index++;
441         if (req->ring_index == req->length) {
442                 req->ring_index = 0;
443                 req->ring_ptr = req->ring;
444         } else
445                 req->ring_ptr++;
446
447         sp->flags |= SRB_DMA_VALID;
448
449         /* Set chip new ring index. */
450         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
451         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
452
453         /* Manage unprocessed RIO/ZIO commands in response queue. */
454         if (vha->flags.process_response_queue &&
455             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
456                 qla2x00_process_response_queue(rsp);
457
458         spin_unlock_irqrestore(&ha->hardware_lock, flags);
459         return (QLA_SUCCESS);
460
461 queuing_error:
462         if (tot_dsds)
463                 scsi_dma_unmap(cmd);
464
465         spin_unlock_irqrestore(&ha->hardware_lock, flags);
466
467         return (QLA_FUNCTION_FAILED);
468 }
469
470 /**
471  * qla2x00_marker() - Send a marker IOCB to the firmware.
472  * @ha: HA context
473  * @loop_id: loop ID
474  * @lun: LUN
475  * @type: marker modifier
476  *
477  * Can be called from both normal and interrupt context.
478  *
479  * Returns non-zero if a failure occurred, else zero.
480  */
481 static int
482 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
483                         struct rsp_que *rsp, uint16_t loop_id,
484                         uint16_t lun, uint8_t type)
485 {
486         mrk_entry_t *mrk;
487         struct mrk_entry_24xx *mrk24;
488         struct qla_hw_data *ha = vha->hw;
489         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
490
491         mrk24 = NULL;
492         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
493         if (mrk == NULL) {
494                 ql_log(ql_log_warn, base_vha, 0x3026,
495                     "Failed to allocate Marker IOCB.\n");
496
497                 return (QLA_FUNCTION_FAILED);
498         }
499
500         mrk->entry_type = MARKER_TYPE;
501         mrk->modifier = type;
502         if (type != MK_SYNC_ALL) {
503                 if (IS_FWI2_CAPABLE(ha)) {
504                         mrk24 = (struct mrk_entry_24xx *) mrk;
505                         mrk24->nport_handle = cpu_to_le16(loop_id);
506                         mrk24->lun[1] = LSB(lun);
507                         mrk24->lun[2] = MSB(lun);
508                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
509                         mrk24->vp_index = vha->vp_idx;
510                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
511                 } else {
512                         SET_TARGET_ID(ha, mrk->target, loop_id);
513                         mrk->lun = cpu_to_le16(lun);
514                 }
515         }
516         wmb();
517
518         qla2x00_isp_cmd(vha, req);
519
520         return (QLA_SUCCESS);
521 }
522
523 int
524 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
525                 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
526                 uint8_t type)
527 {
528         int ret;
529         unsigned long flags = 0;
530
531         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
532         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
533         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
534
535         return (ret);
536 }
537
538 /**
539  * qla2x00_isp_cmd() - Modify the request ring pointer.
540  * @ha: HA context
541  *
542  * Note: The caller must hold the hardware lock before calling this routine.
543  */
544 static void
545 qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
546 {
547         struct qla_hw_data *ha = vha->hw;
548         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
549         struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
550
551         ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
552             "IOCB data:\n");
553         ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
554             (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
555
556         /* Adjust ring index. */
557         req->ring_index++;
558         if (req->ring_index == req->length) {
559                 req->ring_index = 0;
560                 req->ring_ptr = req->ring;
561         } else
562                 req->ring_ptr++;
563
564         /* Set chip new ring index. */
565         if (IS_QLA82XX(ha)) {
566                 uint32_t dbval = 0x04 | (ha->portnum << 5);
567
568                 /* write, read and verify logic */
569                 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
570                 if (ql2xdbwr)
571                         qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
572                 else {
573                         WRT_REG_DWORD(
574                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
575                                 dbval);
576                         wmb();
577                         while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
578                                 WRT_REG_DWORD((unsigned long __iomem *)
579                                         ha->nxdb_wr_ptr, dbval);
580                                 wmb();
581                         }
582                 }
583         } else if (ha->mqenable) {
584                 /* Set chip new ring index. */
585                 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
586                 RD_REG_DWORD(&ioreg->hccr);
587         } else {
588                 if (IS_FWI2_CAPABLE(ha)) {
589                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
590                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
591                 } else {
592                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
593                                 req->ring_index);
594                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
595                 }
596         }
597
598 }
599
600 /**
601  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
602  * Continuation Type 1 IOCBs to allocate.
603  *
604  * @dsds: number of data segment decriptors needed
605  *
606  * Returns the number of IOCB entries needed to store @dsds.
607  */
608 inline uint16_t
609 qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
610 {
611         uint16_t iocbs;
612
613         iocbs = 1;
614         if (dsds > 1) {
615                 iocbs += (dsds - 1) / 5;
616                 if ((dsds - 1) % 5)
617                         iocbs++;
618         }
619         return iocbs;
620 }
621
622 /**
623  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
624  * IOCB types.
625  *
626  * @sp: SRB command to process
627  * @cmd_pkt: Command type 3 IOCB
628  * @tot_dsds: Total number of segments to transfer
629  */
630 inline void
631 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
632     uint16_t tot_dsds)
633 {
634         uint16_t        avail_dsds;
635         uint32_t        *cur_dsd;
636         scsi_qla_host_t *vha;
637         struct scsi_cmnd *cmd;
638         struct scatterlist *sg;
639         int i;
640         struct req_que *req;
641
642         cmd = sp->cmd;
643
644         /* Update entry type to indicate Command Type 3 IOCB */
645         *((uint32_t *)(&cmd_pkt->entry_type)) =
646             __constant_cpu_to_le32(COMMAND_TYPE_7);
647
648         /* No data transfer */
649         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
650                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
651                 return;
652         }
653
654         vha = sp->fcport->vha;
655         req = vha->req;
656
657         /* Set transfer direction */
658         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
659                 cmd_pkt->task_mgmt_flags =
660                     __constant_cpu_to_le16(TMF_WRITE_DATA);
661                 sp->fcport->vha->hw->qla_stats.output_bytes +=
662                     scsi_bufflen(sp->cmd);
663         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
664                 cmd_pkt->task_mgmt_flags =
665                     __constant_cpu_to_le16(TMF_READ_DATA);
666                 sp->fcport->vha->hw->qla_stats.input_bytes +=
667                     scsi_bufflen(sp->cmd);
668         }
669
670         /* One DSD is available in the Command Type 3 IOCB */
671         avail_dsds = 1;
672         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
673
674         /* Load data segments */
675
676         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
677                 dma_addr_t      sle_dma;
678                 cont_a64_entry_t *cont_pkt;
679
680                 /* Allocate additional continuation packets? */
681                 if (avail_dsds == 0) {
682                         /*
683                          * Five DSDs are available in the Continuation
684                          * Type 1 IOCB.
685                          */
686                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
687                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
688                         avail_dsds = 5;
689                 }
690
691                 sle_dma = sg_dma_address(sg);
692                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
693                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
694                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
695                 avail_dsds--;
696         }
697 }
698
699 struct fw_dif_context {
700         uint32_t ref_tag;
701         uint16_t app_tag;
702         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
703         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
704 };
705
706 /*
707  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
708  *
709  */
710 static inline void
711 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
712     unsigned int protcnt)
713 {
714         struct scsi_cmnd *cmd = sp->cmd;
715         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
716
717         switch (scsi_get_prot_type(cmd)) {
718         case SCSI_PROT_DIF_TYPE0:
719                 /*
720                  * No check for ql2xenablehba_err_chk, as it would be an
721                  * I/O error if hba tag generation is not done.
722                  */
723                 pkt->ref_tag = cpu_to_le32((uint32_t)
724                     (0xffffffff & scsi_get_lba(cmd)));
725
726                 if (!qla2x00_hba_err_chk_enabled(sp))
727                         break;
728
729                 pkt->ref_tag_mask[0] = 0xff;
730                 pkt->ref_tag_mask[1] = 0xff;
731                 pkt->ref_tag_mask[2] = 0xff;
732                 pkt->ref_tag_mask[3] = 0xff;
733                 break;
734
735         /*
736          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
737          * match LBA in CDB + N
738          */
739         case SCSI_PROT_DIF_TYPE2:
740                 pkt->app_tag = __constant_cpu_to_le16(0);
741                 pkt->app_tag_mask[0] = 0x0;
742                 pkt->app_tag_mask[1] = 0x0;
743
744                 pkt->ref_tag = cpu_to_le32((uint32_t)
745                     (0xffffffff & scsi_get_lba(cmd)));
746
747                 if (!qla2x00_hba_err_chk_enabled(sp))
748                         break;
749
750                 /* enable ALL bytes of the ref tag */
751                 pkt->ref_tag_mask[0] = 0xff;
752                 pkt->ref_tag_mask[1] = 0xff;
753                 pkt->ref_tag_mask[2] = 0xff;
754                 pkt->ref_tag_mask[3] = 0xff;
755                 break;
756
757         /* For Type 3 protection: 16 bit GUARD only */
758         case SCSI_PROT_DIF_TYPE3:
759                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
760                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
761                                                                 0x00;
762                 break;
763
764         /*
765          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
766          * 16 bit app tag.
767          */
768         case SCSI_PROT_DIF_TYPE1:
769                 pkt->ref_tag = cpu_to_le32((uint32_t)
770                     (0xffffffff & scsi_get_lba(cmd)));
771                 pkt->app_tag = __constant_cpu_to_le16(0);
772                 pkt->app_tag_mask[0] = 0x0;
773                 pkt->app_tag_mask[1] = 0x0;
774
775                 if (!qla2x00_hba_err_chk_enabled(sp))
776                         break;
777
778                 /* enable ALL bytes of the ref tag */
779                 pkt->ref_tag_mask[0] = 0xff;
780                 pkt->ref_tag_mask[1] = 0xff;
781                 pkt->ref_tag_mask[2] = 0xff;
782                 pkt->ref_tag_mask[3] = 0xff;
783                 break;
784         }
785
786         ql_dbg(ql_dbg_io, vha, 0x3009,
787             "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
788             "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
789             pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
790             scsi_get_prot_type(cmd), cmd);
791 }
792
793 struct qla2_sgx {
794         dma_addr_t              dma_addr;       /* OUT */
795         uint32_t                dma_len;        /* OUT */
796
797         uint32_t                tot_bytes;      /* IN */
798         struct scatterlist      *cur_sg;        /* IN */
799
800         /* for book keeping, bzero on initial invocation */
801         uint32_t                bytes_consumed;
802         uint32_t                num_bytes;
803         uint32_t                tot_partial;
804
805         /* for debugging */
806         uint32_t                num_sg;
807         srb_t                   *sp;
808 };
809
810 static int
811 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
812         uint32_t *partial)
813 {
814         struct scatterlist *sg;
815         uint32_t cumulative_partial, sg_len;
816         dma_addr_t sg_dma_addr;
817
818         if (sgx->num_bytes == sgx->tot_bytes)
819                 return 0;
820
821         sg = sgx->cur_sg;
822         cumulative_partial = sgx->tot_partial;
823
824         sg_dma_addr = sg_dma_address(sg);
825         sg_len = sg_dma_len(sg);
826
827         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
828
829         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
830                 sgx->dma_len = (blk_sz - cumulative_partial);
831                 sgx->tot_partial = 0;
832                 sgx->num_bytes += blk_sz;
833                 *partial = 0;
834         } else {
835                 sgx->dma_len = sg_len - sgx->bytes_consumed;
836                 sgx->tot_partial += sgx->dma_len;
837                 *partial = 1;
838         }
839
840         sgx->bytes_consumed += sgx->dma_len;
841
842         if (sg_len == sgx->bytes_consumed) {
843                 sg = sg_next(sg);
844                 sgx->num_sg++;
845                 sgx->cur_sg = sg;
846                 sgx->bytes_consumed = 0;
847         }
848
849         return 1;
850 }
851
852 static int
853 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
854         uint32_t *dsd, uint16_t tot_dsds)
855 {
856         void *next_dsd;
857         uint8_t avail_dsds = 0;
858         uint32_t dsd_list_len;
859         struct dsd_dma *dsd_ptr;
860         struct scatterlist *sg_prot;
861         uint32_t *cur_dsd = dsd;
862         uint16_t        used_dsds = tot_dsds;
863
864         uint32_t        prot_int;
865         uint32_t        partial;
866         struct qla2_sgx sgx;
867         dma_addr_t      sle_dma;
868         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
869         struct scsi_cmnd *cmd = sp->cmd;
870
871         prot_int = cmd->device->sector_size;
872
873         memset(&sgx, 0, sizeof(struct qla2_sgx));
874         sgx.tot_bytes = scsi_bufflen(sp->cmd);
875         sgx.cur_sg = scsi_sglist(sp->cmd);
876         sgx.sp = sp;
877
878         sg_prot = scsi_prot_sglist(sp->cmd);
879
880         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
881
882                 sle_dma = sgx.dma_addr;
883                 sle_dma_len = sgx.dma_len;
884 alloc_and_fill:
885                 /* Allocate additional continuation packets? */
886                 if (avail_dsds == 0) {
887                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
888                                         QLA_DSDS_PER_IOCB : used_dsds;
889                         dsd_list_len = (avail_dsds + 1) * 12;
890                         used_dsds -= avail_dsds;
891
892                         /* allocate tracking DS */
893                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
894                         if (!dsd_ptr)
895                                 return 1;
896
897                         /* allocate new list */
898                         dsd_ptr->dsd_addr = next_dsd =
899                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
900                                 &dsd_ptr->dsd_list_dma);
901
902                         if (!next_dsd) {
903                                 /*
904                                  * Need to cleanup only this dsd_ptr, rest
905                                  * will be done by sp_free_dma()
906                                  */
907                                 kfree(dsd_ptr);
908                                 return 1;
909                         }
910
911                         list_add_tail(&dsd_ptr->list,
912                             &((struct crc_context *)sp->ctx)->dsd_list);
913
914                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
915
916                         /* add new list to cmd iocb or last list */
917                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
918                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
919                         *cur_dsd++ = dsd_list_len;
920                         cur_dsd = (uint32_t *)next_dsd;
921                 }
922                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
923                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
924                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
925                 avail_dsds--;
926
927                 if (partial == 0) {
928                         /* Got a full protection interval */
929                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
930                         sle_dma_len = 8;
931
932                         tot_prot_dma_len += sle_dma_len;
933                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
934                                 tot_prot_dma_len = 0;
935                                 sg_prot = sg_next(sg_prot);
936                         }
937
938                         partial = 1; /* So as to not re-enter this block */
939                         goto alloc_and_fill;
940                 }
941         }
942         /* Null termination */
943         *cur_dsd++ = 0;
944         *cur_dsd++ = 0;
945         *cur_dsd++ = 0;
946         return 0;
947 }
948 static int
949 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
950         uint16_t tot_dsds)
951 {
952         void *next_dsd;
953         uint8_t avail_dsds = 0;
954         uint32_t dsd_list_len;
955         struct dsd_dma *dsd_ptr;
956         struct scatterlist *sg;
957         uint32_t *cur_dsd = dsd;
958         int     i;
959         uint16_t        used_dsds = tot_dsds;
960         scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
961
962         uint8_t         *cp;
963
964         scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
965                 dma_addr_t      sle_dma;
966
967                 /* Allocate additional continuation packets? */
968                 if (avail_dsds == 0) {
969                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
970                                         QLA_DSDS_PER_IOCB : used_dsds;
971                         dsd_list_len = (avail_dsds + 1) * 12;
972                         used_dsds -= avail_dsds;
973
974                         /* allocate tracking DS */
975                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
976                         if (!dsd_ptr)
977                                 return 1;
978
979                         /* allocate new list */
980                         dsd_ptr->dsd_addr = next_dsd =
981                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
982                                 &dsd_ptr->dsd_list_dma);
983
984                         if (!next_dsd) {
985                                 /*
986                                  * Need to cleanup only this dsd_ptr, rest
987                                  * will be done by sp_free_dma()
988                                  */
989                                 kfree(dsd_ptr);
990                                 return 1;
991                         }
992
993                         list_add_tail(&dsd_ptr->list,
994                             &((struct crc_context *)sp->ctx)->dsd_list);
995
996                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
997
998                         /* add new list to cmd iocb or last list */
999                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1000                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1001                         *cur_dsd++ = dsd_list_len;
1002                         cur_dsd = (uint32_t *)next_dsd;
1003                 }
1004                 sle_dma = sg_dma_address(sg);
1005                 ql_dbg(ql_dbg_io, vha, 0x300a,
1006                     "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
1007                     cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
1008                     sp->cmd);
1009                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1010                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1011                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1012                 avail_dsds--;
1013
1014                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1015                         cp = page_address(sg_page(sg)) + sg->offset;
1016                         ql_dbg(ql_dbg_io, vha, 0x300b,
1017                             "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
1018                 }
1019         }
1020         /* Null termination */
1021         *cur_dsd++ = 0;
1022         *cur_dsd++ = 0;
1023         *cur_dsd++ = 0;
1024         return 0;
1025 }
1026
1027 static int
1028 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1029                                                         uint32_t *dsd,
1030         uint16_t tot_dsds)
1031 {
1032         void *next_dsd;
1033         uint8_t avail_dsds = 0;
1034         uint32_t dsd_list_len;
1035         struct dsd_dma *dsd_ptr;
1036         struct scatterlist *sg;
1037         int     i;
1038         struct scsi_cmnd *cmd;
1039         uint32_t *cur_dsd = dsd;
1040         uint16_t        used_dsds = tot_dsds;
1041         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1042         uint8_t         *cp;
1043
1044
1045         cmd = sp->cmd;
1046         scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1047                 dma_addr_t      sle_dma;
1048
1049                 /* Allocate additional continuation packets? */
1050                 if (avail_dsds == 0) {
1051                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1052                                                 QLA_DSDS_PER_IOCB : used_dsds;
1053                         dsd_list_len = (avail_dsds + 1) * 12;
1054                         used_dsds -= avail_dsds;
1055
1056                         /* allocate tracking DS */
1057                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1058                         if (!dsd_ptr)
1059                                 return 1;
1060
1061                         /* allocate new list */
1062                         dsd_ptr->dsd_addr = next_dsd =
1063                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1064                                 &dsd_ptr->dsd_list_dma);
1065
1066                         if (!next_dsd) {
1067                                 /*
1068                                  * Need to cleanup only this dsd_ptr, rest
1069                                  * will be done by sp_free_dma()
1070                                  */
1071                                 kfree(dsd_ptr);
1072                                 return 1;
1073                         }
1074
1075                         list_add_tail(&dsd_ptr->list,
1076                             &((struct crc_context *)sp->ctx)->dsd_list);
1077
1078                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1079
1080                         /* add new list to cmd iocb or last list */
1081                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1082                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1083                         *cur_dsd++ = dsd_list_len;
1084                         cur_dsd = (uint32_t *)next_dsd;
1085                 }
1086                 sle_dma = sg_dma_address(sg);
1087                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1088                         ql_dbg(ql_dbg_io, vha, 0x3027,
1089                             "%s(): %p, sg_entry %d - "
1090                             "addr=0x%x0x%x, len=%d.\n",
1091                             __func__, cur_dsd, i,
1092                             LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
1093                 }
1094                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1095                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1096                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1097
1098                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1099                         cp = page_address(sg_page(sg)) + sg->offset;
1100                         ql_dbg(ql_dbg_io, vha, 0x3028,
1101                             "%s(): Protection Data buffer = %p.\n", __func__,
1102                             cp);
1103                 }
1104                 avail_dsds--;
1105         }
1106         /* Null termination */
1107         *cur_dsd++ = 0;
1108         *cur_dsd++ = 0;
1109         *cur_dsd++ = 0;
1110         return 0;
1111 }
1112
1113 /**
1114  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1115  *                                                      Type 6 IOCB types.
1116  *
1117  * @sp: SRB command to process
1118  * @cmd_pkt: Command type 3 IOCB
1119  * @tot_dsds: Total number of segments to transfer
1120  */
1121 static inline int
1122 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1123     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1124 {
1125         uint32_t                *cur_dsd, *fcp_dl;
1126         scsi_qla_host_t         *vha;
1127         struct scsi_cmnd        *cmd;
1128         struct scatterlist      *cur_seg;
1129         int                     sgc;
1130         uint32_t                total_bytes = 0;
1131         uint32_t                data_bytes;
1132         uint32_t                dif_bytes;
1133         uint8_t                 bundling = 1;
1134         uint16_t                blk_size;
1135         uint8_t                 *clr_ptr;
1136         struct crc_context      *crc_ctx_pkt = NULL;
1137         struct qla_hw_data      *ha;
1138         uint8_t                 additional_fcpcdb_len;
1139         uint16_t                fcp_cmnd_len;
1140         struct fcp_cmnd         *fcp_cmnd;
1141         dma_addr_t              crc_ctx_dma;
1142         char                    tag[2];
1143
1144         cmd = sp->cmd;
1145
1146         sgc = 0;
1147         /* Update entry type to indicate Command Type CRC_2 IOCB */
1148         *((uint32_t *)(&cmd_pkt->entry_type)) =
1149             __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1150
1151         vha = sp->fcport->vha;
1152         ha = vha->hw;
1153
1154         /* No data transfer */
1155         data_bytes = scsi_bufflen(cmd);
1156         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1157                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1158                 return QLA_SUCCESS;
1159         }
1160
1161         cmd_pkt->vp_index = sp->fcport->vp_idx;
1162
1163         /* Set transfer direction */
1164         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1165                 cmd_pkt->control_flags =
1166                     __constant_cpu_to_le16(CF_WRITE_DATA);
1167         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1168                 cmd_pkt->control_flags =
1169                     __constant_cpu_to_le16(CF_READ_DATA);
1170         }
1171
1172         if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
1173             (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
1174             (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
1175             (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
1176                 bundling = 0;
1177
1178         /* Allocate CRC context from global pool */
1179         crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
1180             GFP_ATOMIC, &crc_ctx_dma);
1181
1182         if (!crc_ctx_pkt)
1183                 goto crc_queuing_error;
1184
1185         /* Zero out CTX area. */
1186         clr_ptr = (uint8_t *)crc_ctx_pkt;
1187         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1188
1189         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1190
1191         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1192
1193         /* Set handle */
1194         crc_ctx_pkt->handle = cmd_pkt->handle;
1195
1196         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1197
1198         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1199             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1200
1201         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1202         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1203         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1204
1205         /* Determine SCSI command length -- align to 4 byte boundary */
1206         if (cmd->cmd_len > 16) {
1207                 additional_fcpcdb_len = cmd->cmd_len - 16;
1208                 if ((cmd->cmd_len % 4) != 0) {
1209                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1210                         goto crc_queuing_error;
1211                 }
1212                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1213         } else {
1214                 additional_fcpcdb_len = 0;
1215                 fcp_cmnd_len = 12 + 16 + 4;
1216         }
1217
1218         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1219
1220         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1221         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1222                 fcp_cmnd->additional_cdb_len |= 1;
1223         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1224                 fcp_cmnd->additional_cdb_len |= 2;
1225
1226         int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
1227         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1228         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1229         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1230             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1231         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1232             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1233         fcp_cmnd->task_management = 0;
1234
1235         /*
1236          * Update tagged queuing modifier if using command tag queuing
1237          */
1238         if (scsi_populate_tag_msg(cmd, tag)) {
1239                 switch (tag[0]) {
1240                 case HEAD_OF_QUEUE_TAG:
1241                     fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1242                     break;
1243                 case ORDERED_QUEUE_TAG:
1244                     fcp_cmnd->task_attribute = TSK_ORDERED;
1245                     break;
1246                 default:
1247                     fcp_cmnd->task_attribute = 0;
1248                     break;
1249                 }
1250         } else {
1251                 fcp_cmnd->task_attribute = 0;
1252         }
1253
1254         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1255
1256         /* Compute dif len and adjust data len to incude protection */
1257         dif_bytes = 0;
1258         blk_size = cmd->device->sector_size;
1259         dif_bytes = (data_bytes / blk_size) * 8;
1260
1261         switch (scsi_get_prot_op(sp->cmd)) {
1262         case SCSI_PROT_READ_INSERT:
1263         case SCSI_PROT_WRITE_STRIP:
1264             total_bytes = data_bytes;
1265             data_bytes += dif_bytes;
1266             break;
1267
1268         case SCSI_PROT_READ_STRIP:
1269         case SCSI_PROT_WRITE_INSERT:
1270         case SCSI_PROT_READ_PASS:
1271         case SCSI_PROT_WRITE_PASS:
1272             total_bytes = data_bytes + dif_bytes;
1273             break;
1274         default:
1275             BUG();
1276         }
1277
1278         if (!qla2x00_hba_err_chk_enabled(sp))
1279                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1280
1281         if (!bundling) {
1282                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1283         } else {
1284                 /*
1285                  * Configure Bundling if we need to fetch interlaving
1286                  * protection PCI accesses
1287                  */
1288                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1289                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1290                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1291                                                         tot_prot_dsds);
1292                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1293         }
1294
1295         /* Finish the common fields of CRC pkt */
1296         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1297         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1298         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1299         crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1300         /* Fibre channel byte count */
1301         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1302         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1303             additional_fcpcdb_len);
1304         *fcp_dl = htonl(total_bytes);
1305
1306         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1307                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1308                 return QLA_SUCCESS;
1309         }
1310         /* Walks data segments */
1311
1312         cmd_pkt->control_flags |=
1313             __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1314
1315         if (!bundling && tot_prot_dsds) {
1316                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1317                     cur_dsd, tot_dsds))
1318                         goto crc_queuing_error;
1319         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1320             (tot_dsds - tot_prot_dsds)))
1321                 goto crc_queuing_error;
1322
1323         if (bundling && tot_prot_dsds) {
1324                 /* Walks dif segments */
1325                 cur_seg = scsi_prot_sglist(cmd);
1326                 cmd_pkt->control_flags |=
1327                         __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1328                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1329                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1330                     tot_prot_dsds))
1331                         goto crc_queuing_error;
1332         }
1333         return QLA_SUCCESS;
1334
1335 crc_queuing_error:
1336         /* Cleanup will be performed by the caller */
1337
1338         return QLA_FUNCTION_FAILED;
1339 }
1340
1341 /**
1342  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1343  * @sp: command to send to the ISP
1344  *
1345  * Returns non-zero if a failure occurred, else zero.
1346  */
1347 int
1348 qla24xx_start_scsi(srb_t *sp)
1349 {
1350         int             ret, nseg;
1351         unsigned long   flags;
1352         uint32_t        *clr_ptr;
1353         uint32_t        index;
1354         uint32_t        handle;
1355         struct cmd_type_7 *cmd_pkt;
1356         uint16_t        cnt;
1357         uint16_t        req_cnt;
1358         uint16_t        tot_dsds;
1359         struct req_que *req = NULL;
1360         struct rsp_que *rsp = NULL;
1361         struct scsi_cmnd *cmd = sp->cmd;
1362         struct scsi_qla_host *vha = sp->fcport->vha;
1363         struct qla_hw_data *ha = vha->hw;
1364         char            tag[2];
1365
1366         /* Setup device pointers. */
1367         ret = 0;
1368
1369         qla25xx_set_que(sp, &rsp);
1370         req = vha->req;
1371
1372         /* So we know we haven't pci_map'ed anything yet */
1373         tot_dsds = 0;
1374
1375         /* Send marker if required */
1376         if (vha->marker_needed != 0) {
1377                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1378                     QLA_SUCCESS)
1379                         return QLA_FUNCTION_FAILED;
1380                 vha->marker_needed = 0;
1381         }
1382
1383         /* Acquire ring specific lock */
1384         spin_lock_irqsave(&ha->hardware_lock, flags);
1385
1386         /* Check for room in outstanding command list. */
1387         handle = req->current_outstanding_cmd;
1388         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1389                 handle++;
1390                 if (handle == MAX_OUTSTANDING_COMMANDS)
1391                         handle = 1;
1392                 if (!req->outstanding_cmds[handle])
1393                         break;
1394         }
1395         if (index == MAX_OUTSTANDING_COMMANDS) {
1396                 goto queuing_error;
1397         }
1398
1399         /* Map the sg table so we have an accurate count of sg entries needed */
1400         if (scsi_sg_count(cmd)) {
1401                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1402                     scsi_sg_count(cmd), cmd->sc_data_direction);
1403                 if (unlikely(!nseg))
1404                         goto queuing_error;
1405         } else
1406                 nseg = 0;
1407
1408         tot_dsds = nseg;
1409         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1410         if (req->cnt < (req_cnt + 2)) {
1411                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1412
1413                 if (req->ring_index < cnt)
1414                         req->cnt = cnt - req->ring_index;
1415                 else
1416                         req->cnt = req->length -
1417                                 (req->ring_index - cnt);
1418         }
1419         if (req->cnt < (req_cnt + 2))
1420                 goto queuing_error;
1421
1422         /* Build command packet. */
1423         req->current_outstanding_cmd = handle;
1424         req->outstanding_cmds[handle] = sp;
1425         sp->handle = handle;
1426         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1427         req->cnt -= req_cnt;
1428
1429         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1430         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1431
1432         /* Zero out remaining portion of packet. */
1433         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1434         clr_ptr = (uint32_t *)cmd_pkt + 2;
1435         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1436         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1437
1438         /* Set NPORT-ID and LUN number*/
1439         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1440         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1441         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1442         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1443         cmd_pkt->vp_index = sp->fcport->vp_idx;
1444
1445         int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1446         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1447
1448         /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1449         if (scsi_populate_tag_msg(cmd, tag)) {
1450                 switch (tag[0]) {
1451                 case HEAD_OF_QUEUE_TAG:
1452                         cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1453                         break;
1454                 case ORDERED_QUEUE_TAG:
1455                         cmd_pkt->task = TSK_ORDERED;
1456                         break;
1457                 }
1458         }
1459
1460         /* Load SCSI command packet. */
1461         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1462         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1463
1464         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1465
1466         /* Build IOCB segments */
1467         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1468
1469         /* Set total data segment count. */
1470         cmd_pkt->entry_count = (uint8_t)req_cnt;
1471         /* Specify response queue number where completion should happen */
1472         cmd_pkt->entry_status = (uint8_t) rsp->id;
1473         wmb();
1474         /* Adjust ring index. */
1475         req->ring_index++;
1476         if (req->ring_index == req->length) {
1477                 req->ring_index = 0;
1478                 req->ring_ptr = req->ring;
1479         } else
1480                 req->ring_ptr++;
1481
1482         sp->flags |= SRB_DMA_VALID;
1483
1484         /* Set chip new ring index. */
1485         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1486         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1487
1488         /* Manage unprocessed RIO/ZIO commands in response queue. */
1489         if (vha->flags.process_response_queue &&
1490                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1491                 qla24xx_process_response_queue(vha, rsp);
1492
1493         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1494         return QLA_SUCCESS;
1495
1496 queuing_error:
1497         if (tot_dsds)
1498                 scsi_dma_unmap(cmd);
1499
1500         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1501
1502         return QLA_FUNCTION_FAILED;
1503 }
1504
1505
1506 /**
1507  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1508  * @sp: command to send to the ISP
1509  *
1510  * Returns non-zero if a failure occurred, else zero.
1511  */
1512 int
1513 qla24xx_dif_start_scsi(srb_t *sp)
1514 {
1515         int                     nseg;
1516         unsigned long           flags;
1517         uint32_t                *clr_ptr;
1518         uint32_t                index;
1519         uint32_t                handle;
1520         uint16_t                cnt;
1521         uint16_t                req_cnt = 0;
1522         uint16_t                tot_dsds;
1523         uint16_t                tot_prot_dsds;
1524         uint16_t                fw_prot_opts = 0;
1525         struct req_que          *req = NULL;
1526         struct rsp_que          *rsp = NULL;
1527         struct scsi_cmnd        *cmd = sp->cmd;
1528         struct scsi_qla_host    *vha = sp->fcport->vha;
1529         struct qla_hw_data      *ha = vha->hw;
1530         struct cmd_type_crc_2   *cmd_pkt;
1531         uint32_t                status = 0;
1532
1533 #define QDSS_GOT_Q_SPACE        BIT_0
1534
1535         /* Only process protection or >16 cdb in this routine */
1536         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1537                 if (cmd->cmd_len <= 16)
1538                         return qla24xx_start_scsi(sp);
1539         }
1540
1541         /* Setup device pointers. */
1542
1543         qla25xx_set_que(sp, &rsp);
1544         req = vha->req;
1545
1546         /* So we know we haven't pci_map'ed anything yet */
1547         tot_dsds = 0;
1548
1549         /* Send marker if required */
1550         if (vha->marker_needed != 0) {
1551                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1552                     QLA_SUCCESS)
1553                         return QLA_FUNCTION_FAILED;
1554                 vha->marker_needed = 0;
1555         }
1556
1557         /* Acquire ring specific lock */
1558         spin_lock_irqsave(&ha->hardware_lock, flags);
1559
1560         /* Check for room in outstanding command list. */
1561         handle = req->current_outstanding_cmd;
1562         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1563                 handle++;
1564                 if (handle == MAX_OUTSTANDING_COMMANDS)
1565                         handle = 1;
1566                 if (!req->outstanding_cmds[handle])
1567                         break;
1568         }
1569
1570         if (index == MAX_OUTSTANDING_COMMANDS)
1571                 goto queuing_error;
1572
1573         /* Compute number of required data segments */
1574         /* Map the sg table so we have an accurate count of sg entries needed */
1575         if (scsi_sg_count(cmd)) {
1576                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1577                     scsi_sg_count(cmd), cmd->sc_data_direction);
1578                 if (unlikely(!nseg))
1579                         goto queuing_error;
1580                 else
1581                         sp->flags |= SRB_DMA_VALID;
1582
1583                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1584                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1585                         struct qla2_sgx sgx;
1586                         uint32_t        partial;
1587
1588                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1589                         sgx.tot_bytes = scsi_bufflen(cmd);
1590                         sgx.cur_sg = scsi_sglist(cmd);
1591                         sgx.sp = sp;
1592
1593                         nseg = 0;
1594                         while (qla24xx_get_one_block_sg(
1595                             cmd->device->sector_size, &sgx, &partial))
1596                                 nseg++;
1597                 }
1598         } else
1599                 nseg = 0;
1600
1601         /* number of required data segments */
1602         tot_dsds = nseg;
1603
1604         /* Compute number of required protection segments */
1605         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1606                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1607                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1608                 if (unlikely(!nseg))
1609                         goto queuing_error;
1610                 else
1611                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1612
1613                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1614                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1615                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1616                 }
1617         } else {
1618                 nseg = 0;
1619         }
1620
1621         req_cnt = 1;
1622         /* Total Data and protection sg segment(s) */
1623         tot_prot_dsds = nseg;
1624         tot_dsds += nseg;
1625         if (req->cnt < (req_cnt + 2)) {
1626                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1627
1628                 if (req->ring_index < cnt)
1629                         req->cnt = cnt - req->ring_index;
1630                 else
1631                         req->cnt = req->length -
1632                                 (req->ring_index - cnt);
1633         }
1634
1635         if (req->cnt < (req_cnt + 2))
1636                 goto queuing_error;
1637
1638         status |= QDSS_GOT_Q_SPACE;
1639
1640         /* Build header part of command packet (excluding the OPCODE). */
1641         req->current_outstanding_cmd = handle;
1642         req->outstanding_cmds[handle] = sp;
1643         sp->handle = handle;
1644         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1645         req->cnt -= req_cnt;
1646
1647         /* Fill-in common area */
1648         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1649         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1650
1651         clr_ptr = (uint32_t *)cmd_pkt + 2;
1652         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1653
1654         /* Set NPORT-ID and LUN number*/
1655         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1656         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1657         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1658         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1659
1660         int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1661         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1662
1663         /* Total Data and protection segment(s) */
1664         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1665
1666         /* Build IOCB segments and adjust for data protection segments */
1667         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1668             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1669                 QLA_SUCCESS)
1670                 goto queuing_error;
1671
1672         cmd_pkt->entry_count = (uint8_t)req_cnt;
1673         /* Specify response queue number where completion should happen */
1674         cmd_pkt->entry_status = (uint8_t) rsp->id;
1675         cmd_pkt->timeout = __constant_cpu_to_le16(0);
1676         wmb();
1677
1678         /* Adjust ring index. */
1679         req->ring_index++;
1680         if (req->ring_index == req->length) {
1681                 req->ring_index = 0;
1682                 req->ring_ptr = req->ring;
1683         } else
1684                 req->ring_ptr++;
1685
1686         /* Set chip new ring index. */
1687         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1688         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1689
1690         /* Manage unprocessed RIO/ZIO commands in response queue. */
1691         if (vha->flags.process_response_queue &&
1692             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1693                 qla24xx_process_response_queue(vha, rsp);
1694
1695         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1696
1697         return QLA_SUCCESS;
1698
1699 queuing_error:
1700         if (status & QDSS_GOT_Q_SPACE) {
1701                 req->outstanding_cmds[handle] = NULL;
1702                 req->cnt += req_cnt;
1703         }
1704         /* Cleanup will be performed by the caller (queuecommand) */
1705
1706         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1707         return QLA_FUNCTION_FAILED;
1708 }
1709
1710
1711 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1712 {
1713         struct scsi_cmnd *cmd = sp->cmd;
1714         struct qla_hw_data *ha = sp->fcport->vha->hw;
1715         int affinity = cmd->request->cpu;
1716
1717         if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1718                 affinity < ha->max_rsp_queues - 1)
1719                 *rsp = ha->rsp_q_map[affinity + 1];
1720          else
1721                 *rsp = ha->rsp_q_map[0];
1722 }
1723
1724 /* Generic Control-SRB manipulation functions. */
1725 void *
1726 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1727 {
1728         struct qla_hw_data *ha = vha->hw;
1729         struct req_que *req = ha->req_q_map[0];
1730         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1731         uint32_t index, handle;
1732         request_t *pkt;
1733         uint16_t cnt, req_cnt;
1734
1735         pkt = NULL;
1736         req_cnt = 1;
1737         handle = 0;
1738
1739         if (!sp)
1740                 goto skip_cmd_array;
1741
1742         /* Check for room in outstanding command list. */
1743         handle = req->current_outstanding_cmd;
1744         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1745                 handle++;
1746                 if (handle == MAX_OUTSTANDING_COMMANDS)
1747                         handle = 1;
1748                 if (!req->outstanding_cmds[handle])
1749                         break;
1750         }
1751         if (index == MAX_OUTSTANDING_COMMANDS) {
1752                 ql_log(ql_log_warn, vha, 0x700b,
1753                     "No room on oustanding cmd array.\n");
1754                 goto queuing_error;
1755         }
1756
1757         /* Prep command array. */
1758         req->current_outstanding_cmd = handle;
1759         req->outstanding_cmds[handle] = sp;
1760         sp->handle = handle;
1761
1762 skip_cmd_array:
1763         /* Check for room on request queue. */
1764         if (req->cnt < req_cnt) {
1765                 if (ha->mqenable)
1766                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1767                 else if (IS_QLA82XX(ha))
1768                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1769                 else if (IS_FWI2_CAPABLE(ha))
1770                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1771                 else
1772                         cnt = qla2x00_debounce_register(
1773                             ISP_REQ_Q_OUT(ha, &reg->isp));
1774
1775                 if  (req->ring_index < cnt)
1776                         req->cnt = cnt - req->ring_index;
1777                 else
1778                         req->cnt = req->length -
1779                             (req->ring_index - cnt);
1780         }
1781         if (req->cnt < req_cnt)
1782                 goto queuing_error;
1783
1784         /* Prep packet */
1785         req->cnt -= req_cnt;
1786         pkt = req->ring_ptr;
1787         memset(pkt, 0, REQUEST_ENTRY_SIZE);
1788         pkt->entry_count = req_cnt;
1789         pkt->handle = handle;
1790
1791 queuing_error:
1792         return pkt;
1793 }
1794
1795 static void
1796 qla2x00_start_iocbs(srb_t *sp)
1797 {
1798         struct qla_hw_data *ha = sp->fcport->vha->hw;
1799         struct req_que *req = ha->req_q_map[0];
1800         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1801         struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1802
1803         if (IS_QLA82XX(ha)) {
1804                 qla82xx_start_iocbs(sp);
1805         } else {
1806                 /* Adjust ring index. */
1807                 req->ring_index++;
1808                 if (req->ring_index == req->length) {
1809                         req->ring_index = 0;
1810                         req->ring_ptr = req->ring;
1811                 } else
1812                         req->ring_ptr++;
1813
1814                 /* Set chip new ring index. */
1815                 if (ha->mqenable) {
1816                         WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
1817                         RD_REG_DWORD(&ioreg->hccr);
1818                 } else if (IS_QLA82XX(ha)) {
1819                         qla82xx_start_iocbs(sp);
1820                 } else if (IS_FWI2_CAPABLE(ha)) {
1821                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
1822                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
1823                 } else {
1824                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
1825                                 req->ring_index);
1826                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
1827                 }
1828         }
1829 }
1830
1831 static void
1832 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1833 {
1834         struct srb_ctx *ctx = sp->ctx;
1835         struct srb_iocb *lio = ctx->u.iocb_cmd;
1836
1837         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1838         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1839         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1840                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1841         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1842                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1843         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1844         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1845         logio->port_id[1] = sp->fcport->d_id.b.area;
1846         logio->port_id[2] = sp->fcport->d_id.b.domain;
1847         logio->vp_index = sp->fcport->vp_idx;
1848 }
1849
1850 static void
1851 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1852 {
1853         struct qla_hw_data *ha = sp->fcport->vha->hw;
1854         struct srb_ctx *ctx = sp->ctx;
1855         struct srb_iocb *lio = ctx->u.iocb_cmd;
1856         uint16_t opts;
1857
1858         mbx->entry_type = MBX_IOCB_TYPE;
1859         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1860         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1861         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1862         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1863         if (HAS_EXTENDED_IDS(ha)) {
1864                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1865                 mbx->mb10 = cpu_to_le16(opts);
1866         } else {
1867                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1868         }
1869         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1870         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1871             sp->fcport->d_id.b.al_pa);
1872         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1873 }
1874
1875 static void
1876 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1877 {
1878         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1879         logio->control_flags =
1880             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1881         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1882         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1883         logio->port_id[1] = sp->fcport->d_id.b.area;
1884         logio->port_id[2] = sp->fcport->d_id.b.domain;
1885         logio->vp_index = sp->fcport->vp_idx;
1886 }
1887
1888 static void
1889 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1890 {
1891         struct qla_hw_data *ha = sp->fcport->vha->hw;
1892
1893         mbx->entry_type = MBX_IOCB_TYPE;
1894         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1895         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1896         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1897             cpu_to_le16(sp->fcport->loop_id):
1898             cpu_to_le16(sp->fcport->loop_id << 8);
1899         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1900         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1901             sp->fcport->d_id.b.al_pa);
1902         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1903         /* Implicit: mbx->mbx10 = 0. */
1904 }
1905
1906 static void
1907 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1908 {
1909         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1910         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1911         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1912         logio->vp_index = sp->fcport->vp_idx;
1913 }
1914
1915 static void
1916 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1917 {
1918         struct qla_hw_data *ha = sp->fcport->vha->hw;
1919
1920         mbx->entry_type = MBX_IOCB_TYPE;
1921         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1922         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1923         if (HAS_EXTENDED_IDS(ha)) {
1924                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1925                 mbx->mb10 = cpu_to_le16(BIT_0);
1926         } else {
1927                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1928         }
1929         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1930         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1931         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1932         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1933         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1934 }
1935
1936 static void
1937 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1938 {
1939         uint32_t flags;
1940         unsigned int lun;
1941         struct fc_port *fcport = sp->fcport;
1942         scsi_qla_host_t *vha = fcport->vha;
1943         struct qla_hw_data *ha = vha->hw;
1944         struct srb_ctx *ctx = sp->ctx;
1945         struct srb_iocb *iocb = ctx->u.iocb_cmd;
1946         struct req_que *req = vha->req;
1947
1948         flags = iocb->u.tmf.flags;
1949         lun = iocb->u.tmf.lun;
1950
1951         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1952         tsk->entry_count = 1;
1953         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1954         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
1955         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1956         tsk->control_flags = cpu_to_le32(flags);
1957         tsk->port_id[0] = fcport->d_id.b.al_pa;
1958         tsk->port_id[1] = fcport->d_id.b.area;
1959         tsk->port_id[2] = fcport->d_id.b.domain;
1960         tsk->vp_index = fcport->vp_idx;
1961
1962         if (flags == TCF_LUN_RESET) {
1963                 int_to_scsilun(lun, &tsk->lun);
1964                 host_to_fcp_swap((uint8_t *)&tsk->lun,
1965                         sizeof(tsk->lun));
1966         }
1967 }
1968
1969 static void
1970 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1971 {
1972         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1973
1974         els_iocb->entry_type = ELS_IOCB_TYPE;
1975         els_iocb->entry_count = 1;
1976         els_iocb->sys_define = 0;
1977         els_iocb->entry_status = 0;
1978         els_iocb->handle = sp->handle;
1979         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1980         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1981         els_iocb->vp_index = sp->fcport->vp_idx;
1982         els_iocb->sof_type = EST_SOFI3;
1983         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1984
1985         els_iocb->opcode =
1986             (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
1987             bsg_job->request->rqst_data.r_els.els_code :
1988             bsg_job->request->rqst_data.h_els.command_code;
1989         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
1990         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
1991         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
1992         els_iocb->control_flags = 0;
1993         els_iocb->rx_byte_count =
1994             cpu_to_le32(bsg_job->reply_payload.payload_len);
1995         els_iocb->tx_byte_count =
1996             cpu_to_le32(bsg_job->request_payload.payload_len);
1997
1998         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
1999             (bsg_job->request_payload.sg_list)));
2000         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2001             (bsg_job->request_payload.sg_list)));
2002         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2003             (bsg_job->request_payload.sg_list));
2004
2005         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2006             (bsg_job->reply_payload.sg_list)));
2007         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2008             (bsg_job->reply_payload.sg_list)));
2009         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2010             (bsg_job->reply_payload.sg_list));
2011 }
2012
2013 static void
2014 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2015 {
2016         uint16_t        avail_dsds;
2017         uint32_t        *cur_dsd;
2018         struct scatterlist *sg;
2019         int index;
2020         uint16_t tot_dsds;
2021         scsi_qla_host_t *vha = sp->fcport->vha;
2022         struct qla_hw_data *ha = vha->hw;
2023         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
2024         int loop_iterartion = 0;
2025         int cont_iocb_prsnt = 0;
2026         int entry_count = 1;
2027
2028         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2029         ct_iocb->entry_type = CT_IOCB_TYPE;
2030         ct_iocb->entry_status = 0;
2031         ct_iocb->handle1 = sp->handle;
2032         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2033         ct_iocb->status = __constant_cpu_to_le16(0);
2034         ct_iocb->control_flags = __constant_cpu_to_le16(0);
2035         ct_iocb->timeout = 0;
2036         ct_iocb->cmd_dsd_count =
2037             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2038         ct_iocb->total_dsd_count =
2039             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2040         ct_iocb->req_bytecount =
2041             cpu_to_le32(bsg_job->request_payload.payload_len);
2042         ct_iocb->rsp_bytecount =
2043             cpu_to_le32(bsg_job->reply_payload.payload_len);
2044
2045         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2046             (bsg_job->request_payload.sg_list)));
2047         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2048             (bsg_job->request_payload.sg_list)));
2049         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2050
2051         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2052             (bsg_job->reply_payload.sg_list)));
2053         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2054             (bsg_job->reply_payload.sg_list)));
2055         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2056
2057         avail_dsds = 1;
2058         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2059         index = 0;
2060         tot_dsds = bsg_job->reply_payload.sg_cnt;
2061
2062         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2063                 dma_addr_t       sle_dma;
2064                 cont_a64_entry_t *cont_pkt;
2065
2066                 /* Allocate additional continuation packets? */
2067                 if (avail_dsds == 0) {
2068                         /*
2069                         * Five DSDs are available in the Cont.
2070                         * Type 1 IOCB.
2071                                */
2072                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2073                             vha->hw->req_q_map[0]);
2074                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2075                         avail_dsds = 5;
2076                         cont_iocb_prsnt = 1;
2077                         entry_count++;
2078                 }
2079
2080                 sle_dma = sg_dma_address(sg);
2081                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2082                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2083                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2084                 loop_iterartion++;
2085                 avail_dsds--;
2086         }
2087         ct_iocb->entry_count = entry_count;
2088 }
2089
2090 static void
2091 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2092 {
2093         uint16_t        avail_dsds;
2094         uint32_t        *cur_dsd;
2095         struct scatterlist *sg;
2096         int index;
2097         uint16_t tot_dsds;
2098         scsi_qla_host_t *vha = sp->fcport->vha;
2099         struct qla_hw_data *ha = vha->hw;
2100         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
2101         int loop_iterartion = 0;
2102         int cont_iocb_prsnt = 0;
2103         int entry_count = 1;
2104
2105         ct_iocb->entry_type = CT_IOCB_TYPE;
2106         ct_iocb->entry_status = 0;
2107         ct_iocb->sys_define = 0;
2108         ct_iocb->handle = sp->handle;
2109
2110         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2111         ct_iocb->vp_index = sp->fcport->vp_idx;
2112         ct_iocb->comp_status = __constant_cpu_to_le16(0);
2113
2114         ct_iocb->cmd_dsd_count =
2115             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2116         ct_iocb->timeout = 0;
2117         ct_iocb->rsp_dsd_count =
2118             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2119         ct_iocb->rsp_byte_count =
2120             cpu_to_le32(bsg_job->reply_payload.payload_len);
2121         ct_iocb->cmd_byte_count =
2122             cpu_to_le32(bsg_job->request_payload.payload_len);
2123         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2124             (bsg_job->request_payload.sg_list)));
2125         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2126            (bsg_job->request_payload.sg_list)));
2127         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2128             (bsg_job->request_payload.sg_list));
2129
2130         avail_dsds = 1;
2131         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2132         index = 0;
2133         tot_dsds = bsg_job->reply_payload.sg_cnt;
2134
2135         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2136                 dma_addr_t       sle_dma;
2137                 cont_a64_entry_t *cont_pkt;
2138
2139                 /* Allocate additional continuation packets? */
2140                 if (avail_dsds == 0) {
2141                         /*
2142                         * Five DSDs are available in the Cont.
2143                         * Type 1 IOCB.
2144                                */
2145                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2146                             ha->req_q_map[0]);
2147                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2148                         avail_dsds = 5;
2149                         cont_iocb_prsnt = 1;
2150                         entry_count++;
2151                 }
2152
2153                 sle_dma = sg_dma_address(sg);
2154                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2155                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2156                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2157                 loop_iterartion++;
2158                 avail_dsds--;
2159         }
2160         ct_iocb->entry_count = entry_count;
2161 }
2162
2163 int
2164 qla2x00_start_sp(srb_t *sp)
2165 {
2166         int rval;
2167         struct qla_hw_data *ha = sp->fcport->vha->hw;
2168         void *pkt;
2169         struct srb_ctx *ctx = sp->ctx;
2170         unsigned long flags;
2171
2172         rval = QLA_FUNCTION_FAILED;
2173         spin_lock_irqsave(&ha->hardware_lock, flags);
2174         pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2175         if (!pkt) {
2176                 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2177                     "qla2x00_alloc_iocbs failed.\n");
2178                 goto done;
2179         }
2180
2181         rval = QLA_SUCCESS;
2182         switch (ctx->type) {
2183         case SRB_LOGIN_CMD:
2184                 IS_FWI2_CAPABLE(ha) ?
2185                     qla24xx_login_iocb(sp, pkt) :
2186                     qla2x00_login_iocb(sp, pkt);
2187                 break;
2188         case SRB_LOGOUT_CMD:
2189                 IS_FWI2_CAPABLE(ha) ?
2190                     qla24xx_logout_iocb(sp, pkt) :
2191                     qla2x00_logout_iocb(sp, pkt);
2192                 break;
2193         case SRB_ELS_CMD_RPT:
2194         case SRB_ELS_CMD_HST:
2195                 qla24xx_els_iocb(sp, pkt);
2196                 break;
2197         case SRB_CT_CMD:
2198                 IS_FWI2_CAPABLE(ha) ?
2199                 qla24xx_ct_iocb(sp, pkt) :
2200                 qla2x00_ct_iocb(sp, pkt);
2201                 break;
2202         case SRB_ADISC_CMD:
2203                 IS_FWI2_CAPABLE(ha) ?
2204                     qla24xx_adisc_iocb(sp, pkt) :
2205                     qla2x00_adisc_iocb(sp, pkt);
2206                 break;
2207         case SRB_TM_CMD:
2208                 qla24xx_tm_iocb(sp, pkt);
2209                 break;
2210         default:
2211                 break;
2212         }
2213
2214         wmb();
2215         qla2x00_start_iocbs(sp);
2216 done:
2217         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2218         return rval;
2219 }