Merge branch 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[pandora-kernel.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11
12 #include <scsi/scsi_tcq.h>
13
14 static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
15
16 static void qla25xx_set_que(srb_t *, struct rsp_que **);
17 /**
18  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19  * @cmd: SCSI command
20  *
21  * Returns the proper CF_* direction based on CDB.
22  */
23 static inline uint16_t
24 qla2x00_get_cmd_direction(srb_t *sp)
25 {
26         uint16_t cflags;
27
28         cflags = 0;
29
30         /* Set transfer direction */
31         if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
32                 cflags = CF_WRITE;
33                 sp->fcport->vha->hw->qla_stats.output_bytes +=
34                     scsi_bufflen(sp->cmd);
35         } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 sp->fcport->vha->hw->qla_stats.input_bytes +=
38                     scsi_bufflen(sp->cmd);
39         }
40         return (cflags);
41 }
42
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54         uint16_t iocbs;
55
56         iocbs = 1;
57         if (dsds > 3) {
58                 iocbs += (dsds - 3) / 7;
59                 if ((dsds - 3) % 7)
60                         iocbs++;
61         }
62         return (iocbs);
63 }
64
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76         uint16_t iocbs;
77
78         iocbs = 1;
79         if (dsds > 2) {
80                 iocbs += (dsds - 2) / 5;
81                 if ((dsds - 2) % 5)
82                         iocbs++;
83         }
84         return (iocbs);
85 }
86
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @ha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96         cont_entry_t *cont_pkt;
97         struct req_que *req = vha->req;
98         /* Adjust ring index. */
99         req->ring_index++;
100         if (req->ring_index == req->length) {
101                 req->ring_index = 0;
102                 req->ring_ptr = req->ring;
103         } else {
104                 req->ring_ptr++;
105         }
106
107         cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109         /* Load packet defaults. */
110         *((uint32_t *)(&cont_pkt->entry_type)) =
111             __constant_cpu_to_le32(CONTINUE_TYPE);
112
113         return (cont_pkt);
114 }
115
116 /**
117  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118  * @ha: HA context
119  *
120  * Returns a pointer to the continuation type 1 IOCB packet.
121  */
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
124 {
125         cont_a64_entry_t *cont_pkt;
126
127         struct req_que *req = vha->req;
128         /* Adjust ring index. */
129         req->ring_index++;
130         if (req->ring_index == req->length) {
131                 req->ring_index = 0;
132                 req->ring_ptr = req->ring;
133         } else {
134                 req->ring_ptr++;
135         }
136
137         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138
139         /* Load packet defaults. */
140         *((uint32_t *)(&cont_pkt->entry_type)) =
141             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
142
143         return (cont_pkt);
144 }
145
146 static inline int
147 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 {
149         uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
150
151         /* We only support T10 DIF right now */
152         if (guard != SHOST_DIX_GUARD_CRC) {
153                 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
154                     "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
155                 return 0;
156         }
157
158         /* We always use DIFF Bundling for best performance */
159         *fw_prot_opts = 0;
160
161         /* Translate SCSI opcode to a protection opcode */
162         switch (scsi_get_prot_op(sp->cmd)) {
163         case SCSI_PROT_READ_STRIP:
164                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
165                 break;
166         case SCSI_PROT_WRITE_INSERT:
167                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
168                 break;
169         case SCSI_PROT_READ_INSERT:
170                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
171                 break;
172         case SCSI_PROT_WRITE_STRIP:
173                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
174                 break;
175         case SCSI_PROT_READ_PASS:
176                 *fw_prot_opts |= PO_MODE_DIF_PASS;
177                 break;
178         case SCSI_PROT_WRITE_PASS:
179                 *fw_prot_opts |= PO_MODE_DIF_PASS;
180                 break;
181         default:        /* Normal Request */
182                 *fw_prot_opts |= PO_MODE_DIF_PASS;
183                 break;
184         }
185
186         return scsi_prot_sg_count(sp->cmd);
187 }
188
189 /*
190  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
191  * capable IOCB types.
192  *
193  * @sp: SRB command to process
194  * @cmd_pkt: Command type 2 IOCB
195  * @tot_dsds: Total number of segments to transfer
196  */
197 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
198     uint16_t tot_dsds)
199 {
200         uint16_t        avail_dsds;
201         uint32_t        *cur_dsd;
202         scsi_qla_host_t *vha;
203         struct scsi_cmnd *cmd;
204         struct scatterlist *sg;
205         int i;
206
207         cmd = sp->cmd;
208
209         /* Update entry type to indicate Command Type 2 IOCB */
210         *((uint32_t *)(&cmd_pkt->entry_type)) =
211             __constant_cpu_to_le32(COMMAND_TYPE);
212
213         /* No data transfer */
214         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
215                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
216                 return;
217         }
218
219         vha = sp->fcport->vha;
220         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
221
222         /* Three DSDs are available in the Command Type 2 IOCB */
223         avail_dsds = 3;
224         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
225
226         /* Load data segments */
227         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
228                 cont_entry_t *cont_pkt;
229
230                 /* Allocate additional continuation packets? */
231                 if (avail_dsds == 0) {
232                         /*
233                          * Seven DSDs are available in the Continuation
234                          * Type 0 IOCB.
235                          */
236                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
237                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
238                         avail_dsds = 7;
239                 }
240
241                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
242                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
243                 avail_dsds--;
244         }
245 }
246
247 /**
248  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
249  * capable IOCB types.
250  *
251  * @sp: SRB command to process
252  * @cmd_pkt: Command type 3 IOCB
253  * @tot_dsds: Total number of segments to transfer
254  */
255 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
256     uint16_t tot_dsds)
257 {
258         uint16_t        avail_dsds;
259         uint32_t        *cur_dsd;
260         scsi_qla_host_t *vha;
261         struct scsi_cmnd *cmd;
262         struct scatterlist *sg;
263         int i;
264
265         cmd = sp->cmd;
266
267         /* Update entry type to indicate Command Type 3 IOCB */
268         *((uint32_t *)(&cmd_pkt->entry_type)) =
269             __constant_cpu_to_le32(COMMAND_A64_TYPE);
270
271         /* No data transfer */
272         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
273                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
274                 return;
275         }
276
277         vha = sp->fcport->vha;
278         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
279
280         /* Two DSDs are available in the Command Type 3 IOCB */
281         avail_dsds = 2;
282         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
283
284         /* Load data segments */
285         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
286                 dma_addr_t      sle_dma;
287                 cont_a64_entry_t *cont_pkt;
288
289                 /* Allocate additional continuation packets? */
290                 if (avail_dsds == 0) {
291                         /*
292                          * Five DSDs are available in the Continuation
293                          * Type 1 IOCB.
294                          */
295                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
296                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
297                         avail_dsds = 5;
298                 }
299
300                 sle_dma = sg_dma_address(sg);
301                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
302                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
303                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
304                 avail_dsds--;
305         }
306 }
307
308 /**
309  * qla2x00_start_scsi() - Send a SCSI command to the ISP
310  * @sp: command to send to the ISP
311  *
312  * Returns non-zero if a failure occurred, else zero.
313  */
314 int
315 qla2x00_start_scsi(srb_t *sp)
316 {
317         int             ret, nseg;
318         unsigned long   flags;
319         scsi_qla_host_t *vha;
320         struct scsi_cmnd *cmd;
321         uint32_t        *clr_ptr;
322         uint32_t        index;
323         uint32_t        handle;
324         cmd_entry_t     *cmd_pkt;
325         uint16_t        cnt;
326         uint16_t        req_cnt;
327         uint16_t        tot_dsds;
328         struct device_reg_2xxx __iomem *reg;
329         struct qla_hw_data *ha;
330         struct req_que *req;
331         struct rsp_que *rsp;
332         char            tag[2];
333
334         /* Setup device pointers. */
335         ret = 0;
336         vha = sp->fcport->vha;
337         ha = vha->hw;
338         reg = &ha->iobase->isp;
339         cmd = sp->cmd;
340         req = ha->req_q_map[0];
341         rsp = ha->rsp_q_map[0];
342         /* So we know we haven't pci_map'ed anything yet */
343         tot_dsds = 0;
344
345         /* Send marker if required */
346         if (vha->marker_needed != 0) {
347                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
348                     QLA_SUCCESS) {
349                         return (QLA_FUNCTION_FAILED);
350                 }
351                 vha->marker_needed = 0;
352         }
353
354         /* Acquire ring specific lock */
355         spin_lock_irqsave(&ha->hardware_lock, flags);
356
357         /* Check for room in outstanding command list. */
358         handle = req->current_outstanding_cmd;
359         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
360                 handle++;
361                 if (handle == MAX_OUTSTANDING_COMMANDS)
362                         handle = 1;
363                 if (!req->outstanding_cmds[handle])
364                         break;
365         }
366         if (index == MAX_OUTSTANDING_COMMANDS)
367                 goto queuing_error;
368
369         /* Map the sg table so we have an accurate count of sg entries needed */
370         if (scsi_sg_count(cmd)) {
371                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
372                     scsi_sg_count(cmd), cmd->sc_data_direction);
373                 if (unlikely(!nseg))
374                         goto queuing_error;
375         } else
376                 nseg = 0;
377
378         tot_dsds = nseg;
379
380         /* Calculate the number of request entries needed. */
381         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
382         if (req->cnt < (req_cnt + 2)) {
383                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
384                 if (req->ring_index < cnt)
385                         req->cnt = cnt - req->ring_index;
386                 else
387                         req->cnt = req->length -
388                             (req->ring_index - cnt);
389         }
390         if (req->cnt < (req_cnt + 2))
391                 goto queuing_error;
392
393         /* Build command packet */
394         req->current_outstanding_cmd = handle;
395         req->outstanding_cmds[handle] = sp;
396         sp->handle = handle;
397         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
398         req->cnt -= req_cnt;
399
400         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
401         cmd_pkt->handle = handle;
402         /* Zero out remaining portion of packet. */
403         clr_ptr = (uint32_t *)cmd_pkt + 2;
404         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
405         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
406
407         /* Set target ID and LUN number*/
408         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
409         cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
410
411         /* Update tagged queuing modifier */
412         if (scsi_populate_tag_msg(cmd, tag)) {
413                 switch (tag[0]) {
414                 case HEAD_OF_QUEUE_TAG:
415                         cmd_pkt->control_flags =
416                             __constant_cpu_to_le16(CF_HEAD_TAG);
417                         break;
418                 case ORDERED_QUEUE_TAG:
419                         cmd_pkt->control_flags =
420                             __constant_cpu_to_le16(CF_ORDERED_TAG);
421                         break;
422                 default:
423                         cmd_pkt->control_flags =
424                             __constant_cpu_to_le16(CF_SIMPLE_TAG);
425                         break;
426                 }
427         }
428
429         /* Load SCSI command packet. */
430         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
431         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
432
433         /* Build IOCB segments */
434         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
435
436         /* Set total data segment count. */
437         cmd_pkt->entry_count = (uint8_t)req_cnt;
438         wmb();
439
440         /* Adjust ring index. */
441         req->ring_index++;
442         if (req->ring_index == req->length) {
443                 req->ring_index = 0;
444                 req->ring_ptr = req->ring;
445         } else
446                 req->ring_ptr++;
447
448         sp->flags |= SRB_DMA_VALID;
449
450         /* Set chip new ring index. */
451         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
452         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
453
454         /* Manage unprocessed RIO/ZIO commands in response queue. */
455         if (vha->flags.process_response_queue &&
456             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
457                 qla2x00_process_response_queue(rsp);
458
459         spin_unlock_irqrestore(&ha->hardware_lock, flags);
460         return (QLA_SUCCESS);
461
462 queuing_error:
463         if (tot_dsds)
464                 scsi_dma_unmap(cmd);
465
466         spin_unlock_irqrestore(&ha->hardware_lock, flags);
467
468         return (QLA_FUNCTION_FAILED);
469 }
470
471 /**
472  * qla2x00_marker() - Send a marker IOCB to the firmware.
473  * @ha: HA context
474  * @loop_id: loop ID
475  * @lun: LUN
476  * @type: marker modifier
477  *
478  * Can be called from both normal and interrupt context.
479  *
480  * Returns non-zero if a failure occurred, else zero.
481  */
482 static int
483 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
484                         struct rsp_que *rsp, uint16_t loop_id,
485                         uint16_t lun, uint8_t type)
486 {
487         mrk_entry_t *mrk;
488         struct mrk_entry_24xx *mrk24;
489         struct qla_hw_data *ha = vha->hw;
490         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
491
492         mrk24 = NULL;
493         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
494         if (mrk == NULL) {
495                 ql_log(ql_log_warn, base_vha, 0x3026,
496                     "Failed to allocate Marker IOCB.\n");
497
498                 return (QLA_FUNCTION_FAILED);
499         }
500
501         mrk->entry_type = MARKER_TYPE;
502         mrk->modifier = type;
503         if (type != MK_SYNC_ALL) {
504                 if (IS_FWI2_CAPABLE(ha)) {
505                         mrk24 = (struct mrk_entry_24xx *) mrk;
506                         mrk24->nport_handle = cpu_to_le16(loop_id);
507                         mrk24->lun[1] = LSB(lun);
508                         mrk24->lun[2] = MSB(lun);
509                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
510                         mrk24->vp_index = vha->vp_idx;
511                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
512                 } else {
513                         SET_TARGET_ID(ha, mrk->target, loop_id);
514                         mrk->lun = cpu_to_le16(lun);
515                 }
516         }
517         wmb();
518
519         qla2x00_isp_cmd(vha, req);
520
521         return (QLA_SUCCESS);
522 }
523
524 int
525 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
526                 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
527                 uint8_t type)
528 {
529         int ret;
530         unsigned long flags = 0;
531
532         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
533         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
534         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
535
536         return (ret);
537 }
538
539 /**
540  * qla2x00_isp_cmd() - Modify the request ring pointer.
541  * @ha: HA context
542  *
543  * Note: The caller must hold the hardware lock before calling this routine.
544  */
545 static void
546 qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
547 {
548         struct qla_hw_data *ha = vha->hw;
549         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
550         struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
551
552         ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
553             "IOCB data:\n");
554         ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
555             (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
556
557         /* Adjust ring index. */
558         req->ring_index++;
559         if (req->ring_index == req->length) {
560                 req->ring_index = 0;
561                 req->ring_ptr = req->ring;
562         } else
563                 req->ring_ptr++;
564
565         /* Set chip new ring index. */
566         if (IS_QLA82XX(ha)) {
567                 uint32_t dbval = 0x04 | (ha->portnum << 5);
568
569                 /* write, read and verify logic */
570                 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
571                 if (ql2xdbwr)
572                         qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
573                 else {
574                         WRT_REG_DWORD(
575                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
576                                 dbval);
577                         wmb();
578                         while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
579                                 WRT_REG_DWORD((unsigned long __iomem *)
580                                         ha->nxdb_wr_ptr, dbval);
581                                 wmb();
582                         }
583                 }
584         } else if (ha->mqenable) {
585                 /* Set chip new ring index. */
586                 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
587                 RD_REG_DWORD(&ioreg->hccr);
588         } else {
589                 if (IS_FWI2_CAPABLE(ha)) {
590                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
591                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
592                 } else {
593                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
594                                 req->ring_index);
595                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
596                 }
597         }
598
599 }
600
601 /**
602  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
603  * Continuation Type 1 IOCBs to allocate.
604  *
605  * @dsds: number of data segment decriptors needed
606  *
607  * Returns the number of IOCB entries needed to store @dsds.
608  */
609 inline uint16_t
610 qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
611 {
612         uint16_t iocbs;
613
614         iocbs = 1;
615         if (dsds > 1) {
616                 iocbs += (dsds - 1) / 5;
617                 if ((dsds - 1) % 5)
618                         iocbs++;
619         }
620         return iocbs;
621 }
622
623 /**
624  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
625  * IOCB types.
626  *
627  * @sp: SRB command to process
628  * @cmd_pkt: Command type 3 IOCB
629  * @tot_dsds: Total number of segments to transfer
630  */
631 inline void
632 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
633     uint16_t tot_dsds)
634 {
635         uint16_t        avail_dsds;
636         uint32_t        *cur_dsd;
637         scsi_qla_host_t *vha;
638         struct scsi_cmnd *cmd;
639         struct scatterlist *sg;
640         int i;
641         struct req_que *req;
642
643         cmd = sp->cmd;
644
645         /* Update entry type to indicate Command Type 3 IOCB */
646         *((uint32_t *)(&cmd_pkt->entry_type)) =
647             __constant_cpu_to_le32(COMMAND_TYPE_7);
648
649         /* No data transfer */
650         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
651                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
652                 return;
653         }
654
655         vha = sp->fcport->vha;
656         req = vha->req;
657
658         /* Set transfer direction */
659         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
660                 cmd_pkt->task_mgmt_flags =
661                     __constant_cpu_to_le16(TMF_WRITE_DATA);
662                 sp->fcport->vha->hw->qla_stats.output_bytes +=
663                     scsi_bufflen(sp->cmd);
664         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
665                 cmd_pkt->task_mgmt_flags =
666                     __constant_cpu_to_le16(TMF_READ_DATA);
667                 sp->fcport->vha->hw->qla_stats.input_bytes +=
668                     scsi_bufflen(sp->cmd);
669         }
670
671         /* One DSD is available in the Command Type 3 IOCB */
672         avail_dsds = 1;
673         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
674
675         /* Load data segments */
676
677         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
678                 dma_addr_t      sle_dma;
679                 cont_a64_entry_t *cont_pkt;
680
681                 /* Allocate additional continuation packets? */
682                 if (avail_dsds == 0) {
683                         /*
684                          * Five DSDs are available in the Continuation
685                          * Type 1 IOCB.
686                          */
687                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
688                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
689                         avail_dsds = 5;
690                 }
691
692                 sle_dma = sg_dma_address(sg);
693                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
694                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
695                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
696                 avail_dsds--;
697         }
698 }
699
700 struct fw_dif_context {
701         uint32_t ref_tag;
702         uint16_t app_tag;
703         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
704         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
705 };
706
707 /*
708  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
709  *
710  */
711 static inline void
712 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
713     unsigned int protcnt)
714 {
715         struct scsi_cmnd *cmd = sp->cmd;
716         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
717
718         switch (scsi_get_prot_type(cmd)) {
719         case SCSI_PROT_DIF_TYPE0:
720                 /*
721                  * No check for ql2xenablehba_err_chk, as it would be an
722                  * I/O error if hba tag generation is not done.
723                  */
724                 pkt->ref_tag = cpu_to_le32((uint32_t)
725                     (0xffffffff & scsi_get_lba(cmd)));
726
727                 if (!qla2x00_hba_err_chk_enabled(sp))
728                         break;
729
730                 pkt->ref_tag_mask[0] = 0xff;
731                 pkt->ref_tag_mask[1] = 0xff;
732                 pkt->ref_tag_mask[2] = 0xff;
733                 pkt->ref_tag_mask[3] = 0xff;
734                 break;
735
736         /*
737          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
738          * match LBA in CDB + N
739          */
740         case SCSI_PROT_DIF_TYPE2:
741                 pkt->app_tag = __constant_cpu_to_le16(0);
742                 pkt->app_tag_mask[0] = 0x0;
743                 pkt->app_tag_mask[1] = 0x0;
744
745                 pkt->ref_tag = cpu_to_le32((uint32_t)
746                     (0xffffffff & scsi_get_lba(cmd)));
747
748                 if (!qla2x00_hba_err_chk_enabled(sp))
749                         break;
750
751                 /* enable ALL bytes of the ref tag */
752                 pkt->ref_tag_mask[0] = 0xff;
753                 pkt->ref_tag_mask[1] = 0xff;
754                 pkt->ref_tag_mask[2] = 0xff;
755                 pkt->ref_tag_mask[3] = 0xff;
756                 break;
757
758         /* For Type 3 protection: 16 bit GUARD only */
759         case SCSI_PROT_DIF_TYPE3:
760                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
761                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
762                                                                 0x00;
763                 break;
764
765         /*
766          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
767          * 16 bit app tag.
768          */
769         case SCSI_PROT_DIF_TYPE1:
770                 pkt->ref_tag = cpu_to_le32((uint32_t)
771                     (0xffffffff & scsi_get_lba(cmd)));
772                 pkt->app_tag = __constant_cpu_to_le16(0);
773                 pkt->app_tag_mask[0] = 0x0;
774                 pkt->app_tag_mask[1] = 0x0;
775
776                 if (!qla2x00_hba_err_chk_enabled(sp))
777                         break;
778
779                 /* enable ALL bytes of the ref tag */
780                 pkt->ref_tag_mask[0] = 0xff;
781                 pkt->ref_tag_mask[1] = 0xff;
782                 pkt->ref_tag_mask[2] = 0xff;
783                 pkt->ref_tag_mask[3] = 0xff;
784                 break;
785         }
786
787         ql_dbg(ql_dbg_io, vha, 0x3009,
788             "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
789             "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
790             pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
791             scsi_get_prot_type(cmd), cmd);
792 }
793
794 struct qla2_sgx {
795         dma_addr_t              dma_addr;       /* OUT */
796         uint32_t                dma_len;        /* OUT */
797
798         uint32_t                tot_bytes;      /* IN */
799         struct scatterlist      *cur_sg;        /* IN */
800
801         /* for book keeping, bzero on initial invocation */
802         uint32_t                bytes_consumed;
803         uint32_t                num_bytes;
804         uint32_t                tot_partial;
805
806         /* for debugging */
807         uint32_t                num_sg;
808         srb_t                   *sp;
809 };
810
811 static int
812 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
813         uint32_t *partial)
814 {
815         struct scatterlist *sg;
816         uint32_t cumulative_partial, sg_len;
817         dma_addr_t sg_dma_addr;
818
819         if (sgx->num_bytes == sgx->tot_bytes)
820                 return 0;
821
822         sg = sgx->cur_sg;
823         cumulative_partial = sgx->tot_partial;
824
825         sg_dma_addr = sg_dma_address(sg);
826         sg_len = sg_dma_len(sg);
827
828         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
829
830         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
831                 sgx->dma_len = (blk_sz - cumulative_partial);
832                 sgx->tot_partial = 0;
833                 sgx->num_bytes += blk_sz;
834                 *partial = 0;
835         } else {
836                 sgx->dma_len = sg_len - sgx->bytes_consumed;
837                 sgx->tot_partial += sgx->dma_len;
838                 *partial = 1;
839         }
840
841         sgx->bytes_consumed += sgx->dma_len;
842
843         if (sg_len == sgx->bytes_consumed) {
844                 sg = sg_next(sg);
845                 sgx->num_sg++;
846                 sgx->cur_sg = sg;
847                 sgx->bytes_consumed = 0;
848         }
849
850         return 1;
851 }
852
853 static int
854 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
855         uint32_t *dsd, uint16_t tot_dsds)
856 {
857         void *next_dsd;
858         uint8_t avail_dsds = 0;
859         uint32_t dsd_list_len;
860         struct dsd_dma *dsd_ptr;
861         struct scatterlist *sg_prot;
862         uint32_t *cur_dsd = dsd;
863         uint16_t        used_dsds = tot_dsds;
864
865         uint32_t        prot_int;
866         uint32_t        partial;
867         struct qla2_sgx sgx;
868         dma_addr_t      sle_dma;
869         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
870         struct scsi_cmnd *cmd = sp->cmd;
871
872         prot_int = cmd->device->sector_size;
873
874         memset(&sgx, 0, sizeof(struct qla2_sgx));
875         sgx.tot_bytes = scsi_bufflen(sp->cmd);
876         sgx.cur_sg = scsi_sglist(sp->cmd);
877         sgx.sp = sp;
878
879         sg_prot = scsi_prot_sglist(sp->cmd);
880
881         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
882
883                 sle_dma = sgx.dma_addr;
884                 sle_dma_len = sgx.dma_len;
885 alloc_and_fill:
886                 /* Allocate additional continuation packets? */
887                 if (avail_dsds == 0) {
888                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
889                                         QLA_DSDS_PER_IOCB : used_dsds;
890                         dsd_list_len = (avail_dsds + 1) * 12;
891                         used_dsds -= avail_dsds;
892
893                         /* allocate tracking DS */
894                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
895                         if (!dsd_ptr)
896                                 return 1;
897
898                         /* allocate new list */
899                         dsd_ptr->dsd_addr = next_dsd =
900                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
901                                 &dsd_ptr->dsd_list_dma);
902
903                         if (!next_dsd) {
904                                 /*
905                                  * Need to cleanup only this dsd_ptr, rest
906                                  * will be done by sp_free_dma()
907                                  */
908                                 kfree(dsd_ptr);
909                                 return 1;
910                         }
911
912                         list_add_tail(&dsd_ptr->list,
913                             &((struct crc_context *)sp->ctx)->dsd_list);
914
915                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
916
917                         /* add new list to cmd iocb or last list */
918                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
919                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
920                         *cur_dsd++ = dsd_list_len;
921                         cur_dsd = (uint32_t *)next_dsd;
922                 }
923                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
924                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
925                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
926                 avail_dsds--;
927
928                 if (partial == 0) {
929                         /* Got a full protection interval */
930                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
931                         sle_dma_len = 8;
932
933                         tot_prot_dma_len += sle_dma_len;
934                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
935                                 tot_prot_dma_len = 0;
936                                 sg_prot = sg_next(sg_prot);
937                         }
938
939                         partial = 1; /* So as to not re-enter this block */
940                         goto alloc_and_fill;
941                 }
942         }
943         /* Null termination */
944         *cur_dsd++ = 0;
945         *cur_dsd++ = 0;
946         *cur_dsd++ = 0;
947         return 0;
948 }
949 static int
950 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
951         uint16_t tot_dsds)
952 {
953         void *next_dsd;
954         uint8_t avail_dsds = 0;
955         uint32_t dsd_list_len;
956         struct dsd_dma *dsd_ptr;
957         struct scatterlist *sg;
958         uint32_t *cur_dsd = dsd;
959         int     i;
960         uint16_t        used_dsds = tot_dsds;
961         scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
962
963         uint8_t         *cp;
964
965         scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
966                 dma_addr_t      sle_dma;
967
968                 /* Allocate additional continuation packets? */
969                 if (avail_dsds == 0) {
970                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
971                                         QLA_DSDS_PER_IOCB : used_dsds;
972                         dsd_list_len = (avail_dsds + 1) * 12;
973                         used_dsds -= avail_dsds;
974
975                         /* allocate tracking DS */
976                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
977                         if (!dsd_ptr)
978                                 return 1;
979
980                         /* allocate new list */
981                         dsd_ptr->dsd_addr = next_dsd =
982                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
983                                 &dsd_ptr->dsd_list_dma);
984
985                         if (!next_dsd) {
986                                 /*
987                                  * Need to cleanup only this dsd_ptr, rest
988                                  * will be done by sp_free_dma()
989                                  */
990                                 kfree(dsd_ptr);
991                                 return 1;
992                         }
993
994                         list_add_tail(&dsd_ptr->list,
995                             &((struct crc_context *)sp->ctx)->dsd_list);
996
997                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
998
999                         /* add new list to cmd iocb or last list */
1000                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1001                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1002                         *cur_dsd++ = dsd_list_len;
1003                         cur_dsd = (uint32_t *)next_dsd;
1004                 }
1005                 sle_dma = sg_dma_address(sg);
1006                 ql_dbg(ql_dbg_io, vha, 0x300a,
1007                     "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
1008                     cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
1009                     sp->cmd);
1010                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1011                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1012                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1013                 avail_dsds--;
1014
1015                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1016                         cp = page_address(sg_page(sg)) + sg->offset;
1017                         ql_dbg(ql_dbg_io, vha, 0x300b,
1018                             "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
1019                 }
1020         }
1021         /* Null termination */
1022         *cur_dsd++ = 0;
1023         *cur_dsd++ = 0;
1024         *cur_dsd++ = 0;
1025         return 0;
1026 }
1027
1028 static int
1029 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1030                                                         uint32_t *dsd,
1031         uint16_t tot_dsds)
1032 {
1033         void *next_dsd;
1034         uint8_t avail_dsds = 0;
1035         uint32_t dsd_list_len;
1036         struct dsd_dma *dsd_ptr;
1037         struct scatterlist *sg;
1038         int     i;
1039         struct scsi_cmnd *cmd;
1040         uint32_t *cur_dsd = dsd;
1041         uint16_t        used_dsds = tot_dsds;
1042         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1043         uint8_t         *cp;
1044
1045
1046         cmd = sp->cmd;
1047         scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1048                 dma_addr_t      sle_dma;
1049
1050                 /* Allocate additional continuation packets? */
1051                 if (avail_dsds == 0) {
1052                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1053                                                 QLA_DSDS_PER_IOCB : used_dsds;
1054                         dsd_list_len = (avail_dsds + 1) * 12;
1055                         used_dsds -= avail_dsds;
1056
1057                         /* allocate tracking DS */
1058                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1059                         if (!dsd_ptr)
1060                                 return 1;
1061
1062                         /* allocate new list */
1063                         dsd_ptr->dsd_addr = next_dsd =
1064                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1065                                 &dsd_ptr->dsd_list_dma);
1066
1067                         if (!next_dsd) {
1068                                 /*
1069                                  * Need to cleanup only this dsd_ptr, rest
1070                                  * will be done by sp_free_dma()
1071                                  */
1072                                 kfree(dsd_ptr);
1073                                 return 1;
1074                         }
1075
1076                         list_add_tail(&dsd_ptr->list,
1077                             &((struct crc_context *)sp->ctx)->dsd_list);
1078
1079                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1080
1081                         /* add new list to cmd iocb or last list */
1082                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1083                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1084                         *cur_dsd++ = dsd_list_len;
1085                         cur_dsd = (uint32_t *)next_dsd;
1086                 }
1087                 sle_dma = sg_dma_address(sg);
1088                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1089                         ql_dbg(ql_dbg_io, vha, 0x3027,
1090                             "%s(): %p, sg_entry %d - "
1091                             "addr=0x%x0x%x, len=%d.\n",
1092                             __func__, cur_dsd, i,
1093                             LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
1094                 }
1095                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1096                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1097                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1098
1099                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1100                         cp = page_address(sg_page(sg)) + sg->offset;
1101                         ql_dbg(ql_dbg_io, vha, 0x3028,
1102                             "%s(): Protection Data buffer = %p.\n", __func__,
1103                             cp);
1104                 }
1105                 avail_dsds--;
1106         }
1107         /* Null termination */
1108         *cur_dsd++ = 0;
1109         *cur_dsd++ = 0;
1110         *cur_dsd++ = 0;
1111         return 0;
1112 }
1113
1114 /**
1115  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1116  *                                                      Type 6 IOCB types.
1117  *
1118  * @sp: SRB command to process
1119  * @cmd_pkt: Command type 3 IOCB
1120  * @tot_dsds: Total number of segments to transfer
1121  */
1122 static inline int
1123 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1124     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1125 {
1126         uint32_t                *cur_dsd, *fcp_dl;
1127         scsi_qla_host_t         *vha;
1128         struct scsi_cmnd        *cmd;
1129         struct scatterlist      *cur_seg;
1130         int                     sgc;
1131         uint32_t                total_bytes = 0;
1132         uint32_t                data_bytes;
1133         uint32_t                dif_bytes;
1134         uint8_t                 bundling = 1;
1135         uint16_t                blk_size;
1136         uint8_t                 *clr_ptr;
1137         struct crc_context      *crc_ctx_pkt = NULL;
1138         struct qla_hw_data      *ha;
1139         uint8_t                 additional_fcpcdb_len;
1140         uint16_t                fcp_cmnd_len;
1141         struct fcp_cmnd         *fcp_cmnd;
1142         dma_addr_t              crc_ctx_dma;
1143         char                    tag[2];
1144
1145         cmd = sp->cmd;
1146
1147         sgc = 0;
1148         /* Update entry type to indicate Command Type CRC_2 IOCB */
1149         *((uint32_t *)(&cmd_pkt->entry_type)) =
1150             __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1151
1152         vha = sp->fcport->vha;
1153         ha = vha->hw;
1154
1155         /* No data transfer */
1156         data_bytes = scsi_bufflen(cmd);
1157         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1158                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1159                 return QLA_SUCCESS;
1160         }
1161
1162         cmd_pkt->vp_index = sp->fcport->vp_idx;
1163
1164         /* Set transfer direction */
1165         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1166                 cmd_pkt->control_flags =
1167                     __constant_cpu_to_le16(CF_WRITE_DATA);
1168         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1169                 cmd_pkt->control_flags =
1170                     __constant_cpu_to_le16(CF_READ_DATA);
1171         }
1172
1173         if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
1174             (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
1175             (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
1176             (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
1177                 bundling = 0;
1178
1179         /* Allocate CRC context from global pool */
1180         crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
1181             GFP_ATOMIC, &crc_ctx_dma);
1182
1183         if (!crc_ctx_pkt)
1184                 goto crc_queuing_error;
1185
1186         /* Zero out CTX area. */
1187         clr_ptr = (uint8_t *)crc_ctx_pkt;
1188         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1189
1190         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1191
1192         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1193
1194         /* Set handle */
1195         crc_ctx_pkt->handle = cmd_pkt->handle;
1196
1197         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1198
1199         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1200             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1201
1202         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1203         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1204         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1205
1206         /* Determine SCSI command length -- align to 4 byte boundary */
1207         if (cmd->cmd_len > 16) {
1208                 additional_fcpcdb_len = cmd->cmd_len - 16;
1209                 if ((cmd->cmd_len % 4) != 0) {
1210                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1211                         goto crc_queuing_error;
1212                 }
1213                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1214         } else {
1215                 additional_fcpcdb_len = 0;
1216                 fcp_cmnd_len = 12 + 16 + 4;
1217         }
1218
1219         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1220
1221         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1222         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1223                 fcp_cmnd->additional_cdb_len |= 1;
1224         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1225                 fcp_cmnd->additional_cdb_len |= 2;
1226
1227         int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
1228         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1229         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1230         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1231             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1232         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1233             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1234         fcp_cmnd->task_management = 0;
1235
1236         /*
1237          * Update tagged queuing modifier if using command tag queuing
1238          */
1239         if (scsi_populate_tag_msg(cmd, tag)) {
1240                 switch (tag[0]) {
1241                 case HEAD_OF_QUEUE_TAG:
1242                     fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1243                     break;
1244                 case ORDERED_QUEUE_TAG:
1245                     fcp_cmnd->task_attribute = TSK_ORDERED;
1246                     break;
1247                 default:
1248                     fcp_cmnd->task_attribute = 0;
1249                     break;
1250                 }
1251         } else {
1252                 fcp_cmnd->task_attribute = 0;
1253         }
1254
1255         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1256
1257         /* Compute dif len and adjust data len to incude protection */
1258         dif_bytes = 0;
1259         blk_size = cmd->device->sector_size;
1260         dif_bytes = (data_bytes / blk_size) * 8;
1261
1262         switch (scsi_get_prot_op(sp->cmd)) {
1263         case SCSI_PROT_READ_INSERT:
1264         case SCSI_PROT_WRITE_STRIP:
1265             total_bytes = data_bytes;
1266             data_bytes += dif_bytes;
1267             break;
1268
1269         case SCSI_PROT_READ_STRIP:
1270         case SCSI_PROT_WRITE_INSERT:
1271         case SCSI_PROT_READ_PASS:
1272         case SCSI_PROT_WRITE_PASS:
1273             total_bytes = data_bytes + dif_bytes;
1274             break;
1275         default:
1276             BUG();
1277         }
1278
1279         if (!qla2x00_hba_err_chk_enabled(sp))
1280                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1281
1282         if (!bundling) {
1283                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1284         } else {
1285                 /*
1286                  * Configure Bundling if we need to fetch interlaving
1287                  * protection PCI accesses
1288                  */
1289                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1290                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1291                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1292                                                         tot_prot_dsds);
1293                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1294         }
1295
1296         /* Finish the common fields of CRC pkt */
1297         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1298         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1299         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1300         crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1301         /* Fibre channel byte count */
1302         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1303         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1304             additional_fcpcdb_len);
1305         *fcp_dl = htonl(total_bytes);
1306
1307         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1308                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1309                 return QLA_SUCCESS;
1310         }
1311         /* Walks data segments */
1312
1313         cmd_pkt->control_flags |=
1314             __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1315
1316         if (!bundling && tot_prot_dsds) {
1317                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1318                     cur_dsd, tot_dsds))
1319                         goto crc_queuing_error;
1320         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1321             (tot_dsds - tot_prot_dsds)))
1322                 goto crc_queuing_error;
1323
1324         if (bundling && tot_prot_dsds) {
1325                 /* Walks dif segments */
1326                 cur_seg = scsi_prot_sglist(cmd);
1327                 cmd_pkt->control_flags |=
1328                         __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1329                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1330                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1331                     tot_prot_dsds))
1332                         goto crc_queuing_error;
1333         }
1334         return QLA_SUCCESS;
1335
1336 crc_queuing_error:
1337         /* Cleanup will be performed by the caller */
1338
1339         return QLA_FUNCTION_FAILED;
1340 }
1341
1342 /**
1343  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1344  * @sp: command to send to the ISP
1345  *
1346  * Returns non-zero if a failure occurred, else zero.
1347  */
1348 int
1349 qla24xx_start_scsi(srb_t *sp)
1350 {
1351         int             ret, nseg;
1352         unsigned long   flags;
1353         uint32_t        *clr_ptr;
1354         uint32_t        index;
1355         uint32_t        handle;
1356         struct cmd_type_7 *cmd_pkt;
1357         uint16_t        cnt;
1358         uint16_t        req_cnt;
1359         uint16_t        tot_dsds;
1360         struct req_que *req = NULL;
1361         struct rsp_que *rsp = NULL;
1362         struct scsi_cmnd *cmd = sp->cmd;
1363         struct scsi_qla_host *vha = sp->fcport->vha;
1364         struct qla_hw_data *ha = vha->hw;
1365         char            tag[2];
1366
1367         /* Setup device pointers. */
1368         ret = 0;
1369
1370         qla25xx_set_que(sp, &rsp);
1371         req = vha->req;
1372
1373         /* So we know we haven't pci_map'ed anything yet */
1374         tot_dsds = 0;
1375
1376         /* Send marker if required */
1377         if (vha->marker_needed != 0) {
1378                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1379                     QLA_SUCCESS)
1380                         return QLA_FUNCTION_FAILED;
1381                 vha->marker_needed = 0;
1382         }
1383
1384         /* Acquire ring specific lock */
1385         spin_lock_irqsave(&ha->hardware_lock, flags);
1386
1387         /* Check for room in outstanding command list. */
1388         handle = req->current_outstanding_cmd;
1389         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1390                 handle++;
1391                 if (handle == MAX_OUTSTANDING_COMMANDS)
1392                         handle = 1;
1393                 if (!req->outstanding_cmds[handle])
1394                         break;
1395         }
1396         if (index == MAX_OUTSTANDING_COMMANDS) {
1397                 goto queuing_error;
1398         }
1399
1400         /* Map the sg table so we have an accurate count of sg entries needed */
1401         if (scsi_sg_count(cmd)) {
1402                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1403                     scsi_sg_count(cmd), cmd->sc_data_direction);
1404                 if (unlikely(!nseg))
1405                         goto queuing_error;
1406         } else
1407                 nseg = 0;
1408
1409         tot_dsds = nseg;
1410         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1411         if (req->cnt < (req_cnt + 2)) {
1412                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1413
1414                 if (req->ring_index < cnt)
1415                         req->cnt = cnt - req->ring_index;
1416                 else
1417                         req->cnt = req->length -
1418                                 (req->ring_index - cnt);
1419         }
1420         if (req->cnt < (req_cnt + 2))
1421                 goto queuing_error;
1422
1423         /* Build command packet. */
1424         req->current_outstanding_cmd = handle;
1425         req->outstanding_cmds[handle] = sp;
1426         sp->handle = handle;
1427         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1428         req->cnt -= req_cnt;
1429
1430         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1431         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1432
1433         /* Zero out remaining portion of packet. */
1434         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1435         clr_ptr = (uint32_t *)cmd_pkt + 2;
1436         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1437         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1438
1439         /* Set NPORT-ID and LUN number*/
1440         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1441         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1442         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1443         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1444         cmd_pkt->vp_index = sp->fcport->vp_idx;
1445
1446         int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1447         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1448
1449         /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1450         if (scsi_populate_tag_msg(cmd, tag)) {
1451                 switch (tag[0]) {
1452                 case HEAD_OF_QUEUE_TAG:
1453                         cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1454                         break;
1455                 case ORDERED_QUEUE_TAG:
1456                         cmd_pkt->task = TSK_ORDERED;
1457                         break;
1458                 }
1459         }
1460
1461         /* Load SCSI command packet. */
1462         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1463         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1464
1465         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1466
1467         /* Build IOCB segments */
1468         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1469
1470         /* Set total data segment count. */
1471         cmd_pkt->entry_count = (uint8_t)req_cnt;
1472         /* Specify response queue number where completion should happen */
1473         cmd_pkt->entry_status = (uint8_t) rsp->id;
1474         wmb();
1475         /* Adjust ring index. */
1476         req->ring_index++;
1477         if (req->ring_index == req->length) {
1478                 req->ring_index = 0;
1479                 req->ring_ptr = req->ring;
1480         } else
1481                 req->ring_ptr++;
1482
1483         sp->flags |= SRB_DMA_VALID;
1484
1485         /* Set chip new ring index. */
1486         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1487         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1488
1489         /* Manage unprocessed RIO/ZIO commands in response queue. */
1490         if (vha->flags.process_response_queue &&
1491                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1492                 qla24xx_process_response_queue(vha, rsp);
1493
1494         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1495         return QLA_SUCCESS;
1496
1497 queuing_error:
1498         if (tot_dsds)
1499                 scsi_dma_unmap(cmd);
1500
1501         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1502
1503         return QLA_FUNCTION_FAILED;
1504 }
1505
1506
1507 /**
1508  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1509  * @sp: command to send to the ISP
1510  *
1511  * Returns non-zero if a failure occurred, else zero.
1512  */
1513 int
1514 qla24xx_dif_start_scsi(srb_t *sp)
1515 {
1516         int                     nseg;
1517         unsigned long           flags;
1518         uint32_t                *clr_ptr;
1519         uint32_t                index;
1520         uint32_t                handle;
1521         uint16_t                cnt;
1522         uint16_t                req_cnt = 0;
1523         uint16_t                tot_dsds;
1524         uint16_t                tot_prot_dsds;
1525         uint16_t                fw_prot_opts = 0;
1526         struct req_que          *req = NULL;
1527         struct rsp_que          *rsp = NULL;
1528         struct scsi_cmnd        *cmd = sp->cmd;
1529         struct scsi_qla_host    *vha = sp->fcport->vha;
1530         struct qla_hw_data      *ha = vha->hw;
1531         struct cmd_type_crc_2   *cmd_pkt;
1532         uint32_t                status = 0;
1533
1534 #define QDSS_GOT_Q_SPACE        BIT_0
1535
1536         /* Only process protection or >16 cdb in this routine */
1537         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1538                 if (cmd->cmd_len <= 16)
1539                         return qla24xx_start_scsi(sp);
1540         }
1541
1542         /* Setup device pointers. */
1543
1544         qla25xx_set_que(sp, &rsp);
1545         req = vha->req;
1546
1547         /* So we know we haven't pci_map'ed anything yet */
1548         tot_dsds = 0;
1549
1550         /* Send marker if required */
1551         if (vha->marker_needed != 0) {
1552                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1553                     QLA_SUCCESS)
1554                         return QLA_FUNCTION_FAILED;
1555                 vha->marker_needed = 0;
1556         }
1557
1558         /* Acquire ring specific lock */
1559         spin_lock_irqsave(&ha->hardware_lock, flags);
1560
1561         /* Check for room in outstanding command list. */
1562         handle = req->current_outstanding_cmd;
1563         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1564                 handle++;
1565                 if (handle == MAX_OUTSTANDING_COMMANDS)
1566                         handle = 1;
1567                 if (!req->outstanding_cmds[handle])
1568                         break;
1569         }
1570
1571         if (index == MAX_OUTSTANDING_COMMANDS)
1572                 goto queuing_error;
1573
1574         /* Compute number of required data segments */
1575         /* Map the sg table so we have an accurate count of sg entries needed */
1576         if (scsi_sg_count(cmd)) {
1577                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1578                     scsi_sg_count(cmd), cmd->sc_data_direction);
1579                 if (unlikely(!nseg))
1580                         goto queuing_error;
1581                 else
1582                         sp->flags |= SRB_DMA_VALID;
1583
1584                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1585                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1586                         struct qla2_sgx sgx;
1587                         uint32_t        partial;
1588
1589                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1590                         sgx.tot_bytes = scsi_bufflen(cmd);
1591                         sgx.cur_sg = scsi_sglist(cmd);
1592                         sgx.sp = sp;
1593
1594                         nseg = 0;
1595                         while (qla24xx_get_one_block_sg(
1596                             cmd->device->sector_size, &sgx, &partial))
1597                                 nseg++;
1598                 }
1599         } else
1600                 nseg = 0;
1601
1602         /* number of required data segments */
1603         tot_dsds = nseg;
1604
1605         /* Compute number of required protection segments */
1606         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1607                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1608                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1609                 if (unlikely(!nseg))
1610                         goto queuing_error;
1611                 else
1612                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1613
1614                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1615                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1616                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1617                 }
1618         } else {
1619                 nseg = 0;
1620         }
1621
1622         req_cnt = 1;
1623         /* Total Data and protection sg segment(s) */
1624         tot_prot_dsds = nseg;
1625         tot_dsds += nseg;
1626         if (req->cnt < (req_cnt + 2)) {
1627                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1628
1629                 if (req->ring_index < cnt)
1630                         req->cnt = cnt - req->ring_index;
1631                 else
1632                         req->cnt = req->length -
1633                                 (req->ring_index - cnt);
1634         }
1635
1636         if (req->cnt < (req_cnt + 2))
1637                 goto queuing_error;
1638
1639         status |= QDSS_GOT_Q_SPACE;
1640
1641         /* Build header part of command packet (excluding the OPCODE). */
1642         req->current_outstanding_cmd = handle;
1643         req->outstanding_cmds[handle] = sp;
1644         sp->handle = handle;
1645         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1646         req->cnt -= req_cnt;
1647
1648         /* Fill-in common area */
1649         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1650         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1651
1652         clr_ptr = (uint32_t *)cmd_pkt + 2;
1653         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1654
1655         /* Set NPORT-ID and LUN number*/
1656         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1657         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1658         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1659         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1660
1661         int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1662         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1663
1664         /* Total Data and protection segment(s) */
1665         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1666
1667         /* Build IOCB segments and adjust for data protection segments */
1668         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1669             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1670                 QLA_SUCCESS)
1671                 goto queuing_error;
1672
1673         cmd_pkt->entry_count = (uint8_t)req_cnt;
1674         /* Specify response queue number where completion should happen */
1675         cmd_pkt->entry_status = (uint8_t) rsp->id;
1676         cmd_pkt->timeout = __constant_cpu_to_le16(0);
1677         wmb();
1678
1679         /* Adjust ring index. */
1680         req->ring_index++;
1681         if (req->ring_index == req->length) {
1682                 req->ring_index = 0;
1683                 req->ring_ptr = req->ring;
1684         } else
1685                 req->ring_ptr++;
1686
1687         /* Set chip new ring index. */
1688         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1689         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1690
1691         /* Manage unprocessed RIO/ZIO commands in response queue. */
1692         if (vha->flags.process_response_queue &&
1693             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1694                 qla24xx_process_response_queue(vha, rsp);
1695
1696         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1697
1698         return QLA_SUCCESS;
1699
1700 queuing_error:
1701         if (status & QDSS_GOT_Q_SPACE) {
1702                 req->outstanding_cmds[handle] = NULL;
1703                 req->cnt += req_cnt;
1704         }
1705         /* Cleanup will be performed by the caller (queuecommand) */
1706
1707         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1708         return QLA_FUNCTION_FAILED;
1709 }
1710
1711
1712 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1713 {
1714         struct scsi_cmnd *cmd = sp->cmd;
1715         struct qla_hw_data *ha = sp->fcport->vha->hw;
1716         int affinity = cmd->request->cpu;
1717
1718         if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1719                 affinity < ha->max_rsp_queues - 1)
1720                 *rsp = ha->rsp_q_map[affinity + 1];
1721          else
1722                 *rsp = ha->rsp_q_map[0];
1723 }
1724
1725 /* Generic Control-SRB manipulation functions. */
1726 void *
1727 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1728 {
1729         struct qla_hw_data *ha = vha->hw;
1730         struct req_que *req = ha->req_q_map[0];
1731         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1732         uint32_t index, handle;
1733         request_t *pkt;
1734         uint16_t cnt, req_cnt;
1735
1736         pkt = NULL;
1737         req_cnt = 1;
1738         handle = 0;
1739
1740         if (!sp)
1741                 goto skip_cmd_array;
1742
1743         /* Check for room in outstanding command list. */
1744         handle = req->current_outstanding_cmd;
1745         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1746                 handle++;
1747                 if (handle == MAX_OUTSTANDING_COMMANDS)
1748                         handle = 1;
1749                 if (!req->outstanding_cmds[handle])
1750                         break;
1751         }
1752         if (index == MAX_OUTSTANDING_COMMANDS) {
1753                 ql_log(ql_log_warn, vha, 0x700b,
1754                     "No room on oustanding cmd array.\n");
1755                 goto queuing_error;
1756         }
1757
1758         /* Prep command array. */
1759         req->current_outstanding_cmd = handle;
1760         req->outstanding_cmds[handle] = sp;
1761         sp->handle = handle;
1762
1763 skip_cmd_array:
1764         /* Check for room on request queue. */
1765         if (req->cnt < req_cnt) {
1766                 if (ha->mqenable)
1767                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1768                 else if (IS_QLA82XX(ha))
1769                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1770                 else if (IS_FWI2_CAPABLE(ha))
1771                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1772                 else
1773                         cnt = qla2x00_debounce_register(
1774                             ISP_REQ_Q_OUT(ha, &reg->isp));
1775
1776                 if  (req->ring_index < cnt)
1777                         req->cnt = cnt - req->ring_index;
1778                 else
1779                         req->cnt = req->length -
1780                             (req->ring_index - cnt);
1781         }
1782         if (req->cnt < req_cnt)
1783                 goto queuing_error;
1784
1785         /* Prep packet */
1786         req->cnt -= req_cnt;
1787         pkt = req->ring_ptr;
1788         memset(pkt, 0, REQUEST_ENTRY_SIZE);
1789         pkt->entry_count = req_cnt;
1790         pkt->handle = handle;
1791
1792 queuing_error:
1793         return pkt;
1794 }
1795
1796 static void
1797 qla2x00_start_iocbs(srb_t *sp)
1798 {
1799         struct qla_hw_data *ha = sp->fcport->vha->hw;
1800         struct req_que *req = ha->req_q_map[0];
1801         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1802         struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1803
1804         if (IS_QLA82XX(ha)) {
1805                 qla82xx_start_iocbs(sp);
1806         } else {
1807                 /* Adjust ring index. */
1808                 req->ring_index++;
1809                 if (req->ring_index == req->length) {
1810                         req->ring_index = 0;
1811                         req->ring_ptr = req->ring;
1812                 } else
1813                         req->ring_ptr++;
1814
1815                 /* Set chip new ring index. */
1816                 if (ha->mqenable) {
1817                         WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
1818                         RD_REG_DWORD(&ioreg->hccr);
1819                 } else if (IS_QLA82XX(ha)) {
1820                         qla82xx_start_iocbs(sp);
1821                 } else if (IS_FWI2_CAPABLE(ha)) {
1822                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
1823                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
1824                 } else {
1825                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
1826                                 req->ring_index);
1827                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
1828                 }
1829         }
1830 }
1831
1832 static void
1833 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1834 {
1835         struct srb_ctx *ctx = sp->ctx;
1836         struct srb_iocb *lio = ctx->u.iocb_cmd;
1837
1838         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1839         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1840         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1841                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1842         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1843                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1844         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1845         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1846         logio->port_id[1] = sp->fcport->d_id.b.area;
1847         logio->port_id[2] = sp->fcport->d_id.b.domain;
1848         logio->vp_index = sp->fcport->vp_idx;
1849 }
1850
1851 static void
1852 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1853 {
1854         struct qla_hw_data *ha = sp->fcport->vha->hw;
1855         struct srb_ctx *ctx = sp->ctx;
1856         struct srb_iocb *lio = ctx->u.iocb_cmd;
1857         uint16_t opts;
1858
1859         mbx->entry_type = MBX_IOCB_TYPE;
1860         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1861         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1862         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1863         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1864         if (HAS_EXTENDED_IDS(ha)) {
1865                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1866                 mbx->mb10 = cpu_to_le16(opts);
1867         } else {
1868                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1869         }
1870         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1871         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1872             sp->fcport->d_id.b.al_pa);
1873         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1874 }
1875
1876 static void
1877 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1878 {
1879         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1880         logio->control_flags =
1881             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1882         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1883         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1884         logio->port_id[1] = sp->fcport->d_id.b.area;
1885         logio->port_id[2] = sp->fcport->d_id.b.domain;
1886         logio->vp_index = sp->fcport->vp_idx;
1887 }
1888
1889 static void
1890 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1891 {
1892         struct qla_hw_data *ha = sp->fcport->vha->hw;
1893
1894         mbx->entry_type = MBX_IOCB_TYPE;
1895         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1896         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1897         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1898             cpu_to_le16(sp->fcport->loop_id):
1899             cpu_to_le16(sp->fcport->loop_id << 8);
1900         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1901         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1902             sp->fcport->d_id.b.al_pa);
1903         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1904         /* Implicit: mbx->mbx10 = 0. */
1905 }
1906
1907 static void
1908 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1909 {
1910         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1911         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1912         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1913         logio->vp_index = sp->fcport->vp_idx;
1914 }
1915
1916 static void
1917 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1918 {
1919         struct qla_hw_data *ha = sp->fcport->vha->hw;
1920
1921         mbx->entry_type = MBX_IOCB_TYPE;
1922         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1923         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1924         if (HAS_EXTENDED_IDS(ha)) {
1925                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1926                 mbx->mb10 = cpu_to_le16(BIT_0);
1927         } else {
1928                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1929         }
1930         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1931         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1932         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1933         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1934         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1935 }
1936
1937 static void
1938 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1939 {
1940         uint32_t flags;
1941         unsigned int lun;
1942         struct fc_port *fcport = sp->fcport;
1943         scsi_qla_host_t *vha = fcport->vha;
1944         struct qla_hw_data *ha = vha->hw;
1945         struct srb_ctx *ctx = sp->ctx;
1946         struct srb_iocb *iocb = ctx->u.iocb_cmd;
1947         struct req_que *req = vha->req;
1948
1949         flags = iocb->u.tmf.flags;
1950         lun = iocb->u.tmf.lun;
1951
1952         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1953         tsk->entry_count = 1;
1954         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1955         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
1956         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1957         tsk->control_flags = cpu_to_le32(flags);
1958         tsk->port_id[0] = fcport->d_id.b.al_pa;
1959         tsk->port_id[1] = fcport->d_id.b.area;
1960         tsk->port_id[2] = fcport->d_id.b.domain;
1961         tsk->vp_index = fcport->vp_idx;
1962
1963         if (flags == TCF_LUN_RESET) {
1964                 int_to_scsilun(lun, &tsk->lun);
1965                 host_to_fcp_swap((uint8_t *)&tsk->lun,
1966                         sizeof(tsk->lun));
1967         }
1968 }
1969
1970 static void
1971 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1972 {
1973         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1974
1975         els_iocb->entry_type = ELS_IOCB_TYPE;
1976         els_iocb->entry_count = 1;
1977         els_iocb->sys_define = 0;
1978         els_iocb->entry_status = 0;
1979         els_iocb->handle = sp->handle;
1980         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1981         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1982         els_iocb->vp_index = sp->fcport->vp_idx;
1983         els_iocb->sof_type = EST_SOFI3;
1984         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1985
1986         els_iocb->opcode =
1987             (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
1988             bsg_job->request->rqst_data.r_els.els_code :
1989             bsg_job->request->rqst_data.h_els.command_code;
1990         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
1991         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
1992         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
1993         els_iocb->control_flags = 0;
1994         els_iocb->rx_byte_count =
1995             cpu_to_le32(bsg_job->reply_payload.payload_len);
1996         els_iocb->tx_byte_count =
1997             cpu_to_le32(bsg_job->request_payload.payload_len);
1998
1999         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2000             (bsg_job->request_payload.sg_list)));
2001         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2002             (bsg_job->request_payload.sg_list)));
2003         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2004             (bsg_job->request_payload.sg_list));
2005
2006         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2007             (bsg_job->reply_payload.sg_list)));
2008         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2009             (bsg_job->reply_payload.sg_list)));
2010         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2011             (bsg_job->reply_payload.sg_list));
2012 }
2013
2014 static void
2015 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2016 {
2017         uint16_t        avail_dsds;
2018         uint32_t        *cur_dsd;
2019         struct scatterlist *sg;
2020         int index;
2021         uint16_t tot_dsds;
2022         scsi_qla_host_t *vha = sp->fcport->vha;
2023         struct qla_hw_data *ha = vha->hw;
2024         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
2025         int loop_iterartion = 0;
2026         int cont_iocb_prsnt = 0;
2027         int entry_count = 1;
2028
2029         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2030         ct_iocb->entry_type = CT_IOCB_TYPE;
2031         ct_iocb->entry_status = 0;
2032         ct_iocb->handle1 = sp->handle;
2033         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2034         ct_iocb->status = __constant_cpu_to_le16(0);
2035         ct_iocb->control_flags = __constant_cpu_to_le16(0);
2036         ct_iocb->timeout = 0;
2037         ct_iocb->cmd_dsd_count =
2038             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2039         ct_iocb->total_dsd_count =
2040             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2041         ct_iocb->req_bytecount =
2042             cpu_to_le32(bsg_job->request_payload.payload_len);
2043         ct_iocb->rsp_bytecount =
2044             cpu_to_le32(bsg_job->reply_payload.payload_len);
2045
2046         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2047             (bsg_job->request_payload.sg_list)));
2048         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2049             (bsg_job->request_payload.sg_list)));
2050         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2051
2052         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2053             (bsg_job->reply_payload.sg_list)));
2054         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2055             (bsg_job->reply_payload.sg_list)));
2056         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2057
2058         avail_dsds = 1;
2059         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2060         index = 0;
2061         tot_dsds = bsg_job->reply_payload.sg_cnt;
2062
2063         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2064                 dma_addr_t       sle_dma;
2065                 cont_a64_entry_t *cont_pkt;
2066
2067                 /* Allocate additional continuation packets? */
2068                 if (avail_dsds == 0) {
2069                         /*
2070                         * Five DSDs are available in the Cont.
2071                         * Type 1 IOCB.
2072                                */
2073                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
2074                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2075                         avail_dsds = 5;
2076                         cont_iocb_prsnt = 1;
2077                         entry_count++;
2078                 }
2079
2080                 sle_dma = sg_dma_address(sg);
2081                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2082                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2083                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2084                 loop_iterartion++;
2085                 avail_dsds--;
2086         }
2087         ct_iocb->entry_count = entry_count;
2088 }
2089
2090 static void
2091 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2092 {
2093         uint16_t        avail_dsds;
2094         uint32_t        *cur_dsd;
2095         struct scatterlist *sg;
2096         int index;
2097         uint16_t tot_dsds;
2098         scsi_qla_host_t *vha = sp->fcport->vha;
2099         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
2100         int loop_iterartion = 0;
2101         int cont_iocb_prsnt = 0;
2102         int entry_count = 1;
2103
2104         ct_iocb->entry_type = CT_IOCB_TYPE;
2105         ct_iocb->entry_status = 0;
2106         ct_iocb->sys_define = 0;
2107         ct_iocb->handle = sp->handle;
2108
2109         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2110         ct_iocb->vp_index = sp->fcport->vp_idx;
2111         ct_iocb->comp_status = __constant_cpu_to_le16(0);
2112
2113         ct_iocb->cmd_dsd_count =
2114             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2115         ct_iocb->timeout = 0;
2116         ct_iocb->rsp_dsd_count =
2117             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2118         ct_iocb->rsp_byte_count =
2119             cpu_to_le32(bsg_job->reply_payload.payload_len);
2120         ct_iocb->cmd_byte_count =
2121             cpu_to_le32(bsg_job->request_payload.payload_len);
2122         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2123             (bsg_job->request_payload.sg_list)));
2124         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2125            (bsg_job->request_payload.sg_list)));
2126         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2127             (bsg_job->request_payload.sg_list));
2128
2129         avail_dsds = 1;
2130         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2131         index = 0;
2132         tot_dsds = bsg_job->reply_payload.sg_cnt;
2133
2134         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2135                 dma_addr_t       sle_dma;
2136                 cont_a64_entry_t *cont_pkt;
2137
2138                 /* Allocate additional continuation packets? */
2139                 if (avail_dsds == 0) {
2140                         /*
2141                         * Five DSDs are available in the Cont.
2142                         * Type 1 IOCB.
2143                                */
2144                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
2145                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2146                         avail_dsds = 5;
2147                         cont_iocb_prsnt = 1;
2148                         entry_count++;
2149                 }
2150
2151                 sle_dma = sg_dma_address(sg);
2152                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2153                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2154                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2155                 loop_iterartion++;
2156                 avail_dsds--;
2157         }
2158         ct_iocb->entry_count = entry_count;
2159 }
2160
2161 int
2162 qla2x00_start_sp(srb_t *sp)
2163 {
2164         int rval;
2165         struct qla_hw_data *ha = sp->fcport->vha->hw;
2166         void *pkt;
2167         struct srb_ctx *ctx = sp->ctx;
2168         unsigned long flags;
2169
2170         rval = QLA_FUNCTION_FAILED;
2171         spin_lock_irqsave(&ha->hardware_lock, flags);
2172         pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2173         if (!pkt) {
2174                 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2175                     "qla2x00_alloc_iocbs failed.\n");
2176                 goto done;
2177         }
2178
2179         rval = QLA_SUCCESS;
2180         switch (ctx->type) {
2181         case SRB_LOGIN_CMD:
2182                 IS_FWI2_CAPABLE(ha) ?
2183                     qla24xx_login_iocb(sp, pkt) :
2184                     qla2x00_login_iocb(sp, pkt);
2185                 break;
2186         case SRB_LOGOUT_CMD:
2187                 IS_FWI2_CAPABLE(ha) ?
2188                     qla24xx_logout_iocb(sp, pkt) :
2189                     qla2x00_logout_iocb(sp, pkt);
2190                 break;
2191         case SRB_ELS_CMD_RPT:
2192         case SRB_ELS_CMD_HST:
2193                 qla24xx_els_iocb(sp, pkt);
2194                 break;
2195         case SRB_CT_CMD:
2196                 IS_FWI2_CAPABLE(ha) ?
2197                 qla24xx_ct_iocb(sp, pkt) :
2198                 qla2x00_ct_iocb(sp, pkt);
2199                 break;
2200         case SRB_ADISC_CMD:
2201                 IS_FWI2_CAPABLE(ha) ?
2202                     qla24xx_adisc_iocb(sp, pkt) :
2203                     qla2x00_adisc_iocb(sp, pkt);
2204                 break;
2205         case SRB_TM_CMD:
2206                 qla24xx_tm_iocb(sp, pkt);
2207                 break;
2208         default:
2209                 break;
2210         }
2211
2212         wmb();
2213         qla2x00_start_iocbs(sp);
2214 done:
2215         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2216         return rval;
2217 }