Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6
[pandora-kernel.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11
12 #include <scsi/scsi_tcq.h>
13
14 static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
15
16 static void qla25xx_set_que(srb_t *, struct rsp_que **);
17 /**
18  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19  * @cmd: SCSI command
20  *
21  * Returns the proper CF_* direction based on CDB.
22  */
23 static inline uint16_t
24 qla2x00_get_cmd_direction(srb_t *sp)
25 {
26         uint16_t cflags;
27
28         cflags = 0;
29
30         /* Set transfer direction */
31         if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
32                 cflags = CF_WRITE;
33                 sp->fcport->vha->hw->qla_stats.output_bytes +=
34                     scsi_bufflen(sp->cmd);
35         } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 sp->fcport->vha->hw->qla_stats.input_bytes +=
38                     scsi_bufflen(sp->cmd);
39         }
40         return (cflags);
41 }
42
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54         uint16_t iocbs;
55
56         iocbs = 1;
57         if (dsds > 3) {
58                 iocbs += (dsds - 3) / 7;
59                 if ((dsds - 3) % 7)
60                         iocbs++;
61         }
62         return (iocbs);
63 }
64
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76         uint16_t iocbs;
77
78         iocbs = 1;
79         if (dsds > 2) {
80                 iocbs += (dsds - 2) / 5;
81                 if ((dsds - 2) % 5)
82                         iocbs++;
83         }
84         return (iocbs);
85 }
86
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @ha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96         cont_entry_t *cont_pkt;
97         struct req_que *req = vha->req;
98         /* Adjust ring index. */
99         req->ring_index++;
100         if (req->ring_index == req->length) {
101                 req->ring_index = 0;
102                 req->ring_ptr = req->ring;
103         } else {
104                 req->ring_ptr++;
105         }
106
107         cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109         /* Load packet defaults. */
110         *((uint32_t *)(&cont_pkt->entry_type)) =
111             __constant_cpu_to_le32(CONTINUE_TYPE);
112
113         return (cont_pkt);
114 }
115
116 /**
117  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118  * @ha: HA context
119  *
120  * Returns a pointer to the continuation type 1 IOCB packet.
121  */
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
124 {
125         cont_a64_entry_t *cont_pkt;
126
127         struct req_que *req = vha->req;
128         /* Adjust ring index. */
129         req->ring_index++;
130         if (req->ring_index == req->length) {
131                 req->ring_index = 0;
132                 req->ring_ptr = req->ring;
133         } else {
134                 req->ring_ptr++;
135         }
136
137         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138
139         /* Load packet defaults. */
140         *((uint32_t *)(&cont_pkt->entry_type)) =
141             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
142
143         return (cont_pkt);
144 }
145
146 static inline int
147 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 {
149         uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
150
151         /* We only support T10 DIF right now */
152         if (guard != SHOST_DIX_GUARD_CRC) {
153                 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
154                     "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
155                 return 0;
156         }
157
158         /* We always use DIFF Bundling for best performance */
159         *fw_prot_opts = 0;
160
161         /* Translate SCSI opcode to a protection opcode */
162         switch (scsi_get_prot_op(sp->cmd)) {
163         case SCSI_PROT_READ_STRIP:
164                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
165                 break;
166         case SCSI_PROT_WRITE_INSERT:
167                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
168                 break;
169         case SCSI_PROT_READ_INSERT:
170                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
171                 break;
172         case SCSI_PROT_WRITE_STRIP:
173                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
174                 break;
175         case SCSI_PROT_READ_PASS:
176                 *fw_prot_opts |= PO_MODE_DIF_PASS;
177                 break;
178         case SCSI_PROT_WRITE_PASS:
179                 *fw_prot_opts |= PO_MODE_DIF_PASS;
180                 break;
181         default:        /* Normal Request */
182                 *fw_prot_opts |= PO_MODE_DIF_PASS;
183                 break;
184         }
185
186         return scsi_prot_sg_count(sp->cmd);
187 }
188
189 /*
190  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
191  * capable IOCB types.
192  *
193  * @sp: SRB command to process
194  * @cmd_pkt: Command type 2 IOCB
195  * @tot_dsds: Total number of segments to transfer
196  */
197 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
198     uint16_t tot_dsds)
199 {
200         uint16_t        avail_dsds;
201         uint32_t        *cur_dsd;
202         scsi_qla_host_t *vha;
203         struct scsi_cmnd *cmd;
204         struct scatterlist *sg;
205         int i;
206
207         cmd = sp->cmd;
208
209         /* Update entry type to indicate Command Type 2 IOCB */
210         *((uint32_t *)(&cmd_pkt->entry_type)) =
211             __constant_cpu_to_le32(COMMAND_TYPE);
212
213         /* No data transfer */
214         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
215                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
216                 return;
217         }
218
219         vha = sp->fcport->vha;
220         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
221
222         /* Three DSDs are available in the Command Type 2 IOCB */
223         avail_dsds = 3;
224         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
225
226         /* Load data segments */
227         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
228                 cont_entry_t *cont_pkt;
229
230                 /* Allocate additional continuation packets? */
231                 if (avail_dsds == 0) {
232                         /*
233                          * Seven DSDs are available in the Continuation
234                          * Type 0 IOCB.
235                          */
236                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
237                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
238                         avail_dsds = 7;
239                 }
240
241                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
242                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
243                 avail_dsds--;
244         }
245 }
246
247 /**
248  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
249  * capable IOCB types.
250  *
251  * @sp: SRB command to process
252  * @cmd_pkt: Command type 3 IOCB
253  * @tot_dsds: Total number of segments to transfer
254  */
255 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
256     uint16_t tot_dsds)
257 {
258         uint16_t        avail_dsds;
259         uint32_t        *cur_dsd;
260         scsi_qla_host_t *vha;
261         struct scsi_cmnd *cmd;
262         struct scatterlist *sg;
263         int i;
264
265         cmd = sp->cmd;
266
267         /* Update entry type to indicate Command Type 3 IOCB */
268         *((uint32_t *)(&cmd_pkt->entry_type)) =
269             __constant_cpu_to_le32(COMMAND_A64_TYPE);
270
271         /* No data transfer */
272         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
273                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
274                 return;
275         }
276
277         vha = sp->fcport->vha;
278         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
279
280         /* Two DSDs are available in the Command Type 3 IOCB */
281         avail_dsds = 2;
282         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
283
284         /* Load data segments */
285         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
286                 dma_addr_t      sle_dma;
287                 cont_a64_entry_t *cont_pkt;
288
289                 /* Allocate additional continuation packets? */
290                 if (avail_dsds == 0) {
291                         /*
292                          * Five DSDs are available in the Continuation
293                          * Type 1 IOCB.
294                          */
295                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
296                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
297                         avail_dsds = 5;
298                 }
299
300                 sle_dma = sg_dma_address(sg);
301                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
302                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
303                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
304                 avail_dsds--;
305         }
306 }
307
308 /**
309  * qla2x00_start_scsi() - Send a SCSI command to the ISP
310  * @sp: command to send to the ISP
311  *
312  * Returns non-zero if a failure occurred, else zero.
313  */
314 int
315 qla2x00_start_scsi(srb_t *sp)
316 {
317         int             ret, nseg;
318         unsigned long   flags;
319         scsi_qla_host_t *vha;
320         struct scsi_cmnd *cmd;
321         uint32_t        *clr_ptr;
322         uint32_t        index;
323         uint32_t        handle;
324         cmd_entry_t     *cmd_pkt;
325         uint16_t        cnt;
326         uint16_t        req_cnt;
327         uint16_t        tot_dsds;
328         struct device_reg_2xxx __iomem *reg;
329         struct qla_hw_data *ha;
330         struct req_que *req;
331         struct rsp_que *rsp;
332         char            tag[2];
333
334         /* Setup device pointers. */
335         ret = 0;
336         vha = sp->fcport->vha;
337         ha = vha->hw;
338         reg = &ha->iobase->isp;
339         cmd = sp->cmd;
340         req = ha->req_q_map[0];
341         rsp = ha->rsp_q_map[0];
342         /* So we know we haven't pci_map'ed anything yet */
343         tot_dsds = 0;
344
345         /* Send marker if required */
346         if (vha->marker_needed != 0) {
347                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
348                     QLA_SUCCESS) {
349                         return (QLA_FUNCTION_FAILED);
350                 }
351                 vha->marker_needed = 0;
352         }
353
354         /* Acquire ring specific lock */
355         spin_lock_irqsave(&ha->hardware_lock, flags);
356
357         /* Check for room in outstanding command list. */
358         handle = req->current_outstanding_cmd;
359         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
360                 handle++;
361                 if (handle == MAX_OUTSTANDING_COMMANDS)
362                         handle = 1;
363                 if (!req->outstanding_cmds[handle])
364                         break;
365         }
366         if (index == MAX_OUTSTANDING_COMMANDS)
367                 goto queuing_error;
368
369         /* Map the sg table so we have an accurate count of sg entries needed */
370         if (scsi_sg_count(cmd)) {
371                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
372                     scsi_sg_count(cmd), cmd->sc_data_direction);
373                 if (unlikely(!nseg))
374                         goto queuing_error;
375         } else
376                 nseg = 0;
377
378         tot_dsds = nseg;
379
380         /* Calculate the number of request entries needed. */
381         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
382         if (req->cnt < (req_cnt + 2)) {
383                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
384                 if (req->ring_index < cnt)
385                         req->cnt = cnt - req->ring_index;
386                 else
387                         req->cnt = req->length -
388                             (req->ring_index - cnt);
389         }
390         if (req->cnt < (req_cnt + 2))
391                 goto queuing_error;
392
393         /* Build command packet */
394         req->current_outstanding_cmd = handle;
395         req->outstanding_cmds[handle] = sp;
396         sp->handle = handle;
397         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
398         req->cnt -= req_cnt;
399
400         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
401         cmd_pkt->handle = handle;
402         /* Zero out remaining portion of packet. */
403         clr_ptr = (uint32_t *)cmd_pkt + 2;
404         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
405         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
406
407         /* Set target ID and LUN number*/
408         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
409         cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
410
411         /* Update tagged queuing modifier */
412         if (scsi_populate_tag_msg(cmd, tag)) {
413                 switch (tag[0]) {
414                 case HEAD_OF_QUEUE_TAG:
415                         cmd_pkt->control_flags =
416                             __constant_cpu_to_le16(CF_HEAD_TAG);
417                         break;
418                 case ORDERED_QUEUE_TAG:
419                         cmd_pkt->control_flags =
420                             __constant_cpu_to_le16(CF_ORDERED_TAG);
421                         break;
422                 default:
423                         cmd_pkt->control_flags =
424                             __constant_cpu_to_le16(CF_SIMPLE_TAG);
425                         break;
426                 }
427         }
428
429         /* Load SCSI command packet. */
430         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
431         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
432
433         /* Build IOCB segments */
434         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
435
436         /* Set total data segment count. */
437         cmd_pkt->entry_count = (uint8_t)req_cnt;
438         wmb();
439
440         /* Adjust ring index. */
441         req->ring_index++;
442         if (req->ring_index == req->length) {
443                 req->ring_index = 0;
444                 req->ring_ptr = req->ring;
445         } else
446                 req->ring_ptr++;
447
448         sp->flags |= SRB_DMA_VALID;
449
450         /* Set chip new ring index. */
451         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
452         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
453
454         /* Manage unprocessed RIO/ZIO commands in response queue. */
455         if (vha->flags.process_response_queue &&
456             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
457                 qla2x00_process_response_queue(rsp);
458
459         spin_unlock_irqrestore(&ha->hardware_lock, flags);
460         return (QLA_SUCCESS);
461
462 queuing_error:
463         if (tot_dsds)
464                 scsi_dma_unmap(cmd);
465
466         spin_unlock_irqrestore(&ha->hardware_lock, flags);
467
468         return (QLA_FUNCTION_FAILED);
469 }
470
471 /**
472  * qla2x00_marker() - Send a marker IOCB to the firmware.
473  * @ha: HA context
474  * @loop_id: loop ID
475  * @lun: LUN
476  * @type: marker modifier
477  *
478  * Can be called from both normal and interrupt context.
479  *
480  * Returns non-zero if a failure occurred, else zero.
481  */
482 static int
483 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
484                         struct rsp_que *rsp, uint16_t loop_id,
485                         uint16_t lun, uint8_t type)
486 {
487         mrk_entry_t *mrk;
488         struct mrk_entry_24xx *mrk24;
489         struct qla_hw_data *ha = vha->hw;
490         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
491
492         mrk24 = NULL;
493         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
494         if (mrk == NULL) {
495                 ql_log(ql_log_warn, base_vha, 0x3026,
496                     "Failed to allocate Marker IOCB.\n");
497
498                 return (QLA_FUNCTION_FAILED);
499         }
500
501         mrk->entry_type = MARKER_TYPE;
502         mrk->modifier = type;
503         if (type != MK_SYNC_ALL) {
504                 if (IS_FWI2_CAPABLE(ha)) {
505                         mrk24 = (struct mrk_entry_24xx *) mrk;
506                         mrk24->nport_handle = cpu_to_le16(loop_id);
507                         mrk24->lun[1] = LSB(lun);
508                         mrk24->lun[2] = MSB(lun);
509                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
510                         mrk24->vp_index = vha->vp_idx;
511                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
512                 } else {
513                         SET_TARGET_ID(ha, mrk->target, loop_id);
514                         mrk->lun = cpu_to_le16(lun);
515                 }
516         }
517         wmb();
518
519         qla2x00_isp_cmd(vha, req);
520
521         return (QLA_SUCCESS);
522 }
523
524 int
525 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
526                 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
527                 uint8_t type)
528 {
529         int ret;
530         unsigned long flags = 0;
531
532         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
533         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
534         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
535
536         return (ret);
537 }
538
539 /**
540  * qla2x00_isp_cmd() - Modify the request ring pointer.
541  * @ha: HA context
542  *
543  * Note: The caller must hold the hardware lock before calling this routine.
544  */
545 static void
546 qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
547 {
548         struct qla_hw_data *ha = vha->hw;
549         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
550         struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
551
552         ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
553             "IOCB data:\n");
554         ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
555             (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
556
557         /* Adjust ring index. */
558         req->ring_index++;
559         if (req->ring_index == req->length) {
560                 req->ring_index = 0;
561                 req->ring_ptr = req->ring;
562         } else
563                 req->ring_ptr++;
564
565         /* Set chip new ring index. */
566         if (IS_QLA82XX(ha)) {
567                 uint32_t dbval = 0x04 | (ha->portnum << 5);
568
569                 /* write, read and verify logic */
570                 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
571                 if (ql2xdbwr)
572                         qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
573                 else {
574                         WRT_REG_DWORD(
575                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
576                                 dbval);
577                         wmb();
578                         while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
579                                 WRT_REG_DWORD((unsigned long __iomem *)
580                                         ha->nxdb_wr_ptr, dbval);
581                                 wmb();
582                         }
583                 }
584         } else if (ha->mqenable) {
585                 /* Set chip new ring index. */
586                 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
587                 RD_REG_DWORD(&ioreg->hccr);
588         } else {
589                 if (IS_FWI2_CAPABLE(ha)) {
590                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
591                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
592                 } else {
593                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
594                                 req->ring_index);
595                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
596                 }
597         }
598
599 }
600
601 /**
602  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
603  * Continuation Type 1 IOCBs to allocate.
604  *
605  * @dsds: number of data segment decriptors needed
606  *
607  * Returns the number of IOCB entries needed to store @dsds.
608  */
609 inline uint16_t
610 qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
611 {
612         uint16_t iocbs;
613
614         iocbs = 1;
615         if (dsds > 1) {
616                 iocbs += (dsds - 1) / 5;
617                 if ((dsds - 1) % 5)
618                         iocbs++;
619         }
620         return iocbs;
621 }
622
623 /**
624  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
625  * IOCB types.
626  *
627  * @sp: SRB command to process
628  * @cmd_pkt: Command type 3 IOCB
629  * @tot_dsds: Total number of segments to transfer
630  */
631 inline void
632 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
633     uint16_t tot_dsds)
634 {
635         uint16_t        avail_dsds;
636         uint32_t        *cur_dsd;
637         scsi_qla_host_t *vha;
638         struct scsi_cmnd *cmd;
639         struct scatterlist *sg;
640         int i;
641         struct req_que *req;
642
643         cmd = sp->cmd;
644
645         /* Update entry type to indicate Command Type 3 IOCB */
646         *((uint32_t *)(&cmd_pkt->entry_type)) =
647             __constant_cpu_to_le32(COMMAND_TYPE_7);
648
649         /* No data transfer */
650         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
651                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
652                 return;
653         }
654
655         vha = sp->fcport->vha;
656         req = vha->req;
657
658         /* Set transfer direction */
659         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
660                 cmd_pkt->task_mgmt_flags =
661                     __constant_cpu_to_le16(TMF_WRITE_DATA);
662                 sp->fcport->vha->hw->qla_stats.output_bytes +=
663                     scsi_bufflen(sp->cmd);
664         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
665                 cmd_pkt->task_mgmt_flags =
666                     __constant_cpu_to_le16(TMF_READ_DATA);
667                 sp->fcport->vha->hw->qla_stats.input_bytes +=
668                     scsi_bufflen(sp->cmd);
669         }
670
671         /* One DSD is available in the Command Type 3 IOCB */
672         avail_dsds = 1;
673         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
674
675         /* Load data segments */
676
677         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
678                 dma_addr_t      sle_dma;
679                 cont_a64_entry_t *cont_pkt;
680
681                 /* Allocate additional continuation packets? */
682                 if (avail_dsds == 0) {
683                         /*
684                          * Five DSDs are available in the Continuation
685                          * Type 1 IOCB.
686                          */
687                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
688                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
689                         avail_dsds = 5;
690                 }
691
692                 sle_dma = sg_dma_address(sg);
693                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
694                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
695                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
696                 avail_dsds--;
697         }
698 }
699
700 struct fw_dif_context {
701         uint32_t ref_tag;
702         uint16_t app_tag;
703         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
704         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
705 };
706
707 /*
708  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
709  *
710  */
711 static inline void
712 qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
713     unsigned int protcnt)
714 {
715         struct sd_dif_tuple *spt;
716         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
717         unsigned char op = scsi_get_prot_op(cmd);
718
719         switch (scsi_get_prot_type(cmd)) {
720         /* For TYPE 0 protection: no checking */
721         case SCSI_PROT_DIF_TYPE0:
722                 pkt->ref_tag_mask[0] = 0x00;
723                 pkt->ref_tag_mask[1] = 0x00;
724                 pkt->ref_tag_mask[2] = 0x00;
725                 pkt->ref_tag_mask[3] = 0x00;
726                 break;
727
728         /*
729          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
730          * match LBA in CDB + N
731          */
732         case SCSI_PROT_DIF_TYPE2:
733                 if (!ql2xenablehba_err_chk)
734                         break;
735
736                 if (scsi_prot_sg_count(cmd)) {
737                         spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
738                             scsi_prot_sglist(cmd)[0].offset;
739                         pkt->app_tag = swab32(spt->app_tag);
740                         pkt->app_tag_mask[0] =  0xff;
741                         pkt->app_tag_mask[1] =  0xff;
742                 }
743
744                 pkt->ref_tag = cpu_to_le32((uint32_t)
745                     (0xffffffff & scsi_get_lba(cmd)));
746
747                 /* enable ALL bytes of the ref tag */
748                 pkt->ref_tag_mask[0] = 0xff;
749                 pkt->ref_tag_mask[1] = 0xff;
750                 pkt->ref_tag_mask[2] = 0xff;
751                 pkt->ref_tag_mask[3] = 0xff;
752                 break;
753
754         /* For Type 3 protection: 16 bit GUARD only */
755         case SCSI_PROT_DIF_TYPE3:
756                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
757                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
758                                                                 0x00;
759                 break;
760
761         /*
762          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
763          * 16 bit app tag.
764          */
765         case SCSI_PROT_DIF_TYPE1:
766                 if (!ql2xenablehba_err_chk)
767                         break;
768
769                 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
770                     op == SCSI_PROT_WRITE_PASS)) {
771                         spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
772                             scsi_prot_sglist(cmd)[0].offset;
773                         ql_dbg(ql_dbg_io, vha, 0x3008,
774                             "LBA from user %p, lba = 0x%x for cmd=%p.\n",
775                             spt, (int)spt->ref_tag, cmd);
776                         pkt->ref_tag = swab32(spt->ref_tag);
777                         pkt->app_tag_mask[0] = 0x0;
778                         pkt->app_tag_mask[1] = 0x0;
779                 } else {
780                         pkt->ref_tag = cpu_to_le32((uint32_t)
781                             (0xffffffff & scsi_get_lba(cmd)));
782                         pkt->app_tag = __constant_cpu_to_le16(0);
783                         pkt->app_tag_mask[0] = 0x0;
784                         pkt->app_tag_mask[1] = 0x0;
785                 }
786                 /* enable ALL bytes of the ref tag */
787                 pkt->ref_tag_mask[0] = 0xff;
788                 pkt->ref_tag_mask[1] = 0xff;
789                 pkt->ref_tag_mask[2] = 0xff;
790                 pkt->ref_tag_mask[3] = 0xff;
791                 break;
792         }
793
794         ql_dbg(ql_dbg_io, vha, 0x3009,
795             "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
796             "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
797             pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
798             scsi_get_prot_type(cmd), cmd);
799 }
800
801
802 static int
803 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
804         uint16_t tot_dsds)
805 {
806         void *next_dsd;
807         uint8_t avail_dsds = 0;
808         uint32_t dsd_list_len;
809         struct dsd_dma *dsd_ptr;
810         struct scatterlist *sg;
811         uint32_t *cur_dsd = dsd;
812         int     i;
813         uint16_t        used_dsds = tot_dsds;
814         scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
815
816         uint8_t         *cp;
817
818         scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
819                 dma_addr_t      sle_dma;
820
821                 /* Allocate additional continuation packets? */
822                 if (avail_dsds == 0) {
823                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
824                                         QLA_DSDS_PER_IOCB : used_dsds;
825                         dsd_list_len = (avail_dsds + 1) * 12;
826                         used_dsds -= avail_dsds;
827
828                         /* allocate tracking DS */
829                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
830                         if (!dsd_ptr)
831                                 return 1;
832
833                         /* allocate new list */
834                         dsd_ptr->dsd_addr = next_dsd =
835                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
836                                 &dsd_ptr->dsd_list_dma);
837
838                         if (!next_dsd) {
839                                 /*
840                                  * Need to cleanup only this dsd_ptr, rest
841                                  * will be done by sp_free_dma()
842                                  */
843                                 kfree(dsd_ptr);
844                                 return 1;
845                         }
846
847                         list_add_tail(&dsd_ptr->list,
848                             &((struct crc_context *)sp->ctx)->dsd_list);
849
850                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
851
852                         /* add new list to cmd iocb or last list */
853                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
854                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
855                         *cur_dsd++ = dsd_list_len;
856                         cur_dsd = (uint32_t *)next_dsd;
857                 }
858                 sle_dma = sg_dma_address(sg);
859                 ql_dbg(ql_dbg_io, vha, 0x300a,
860                     "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
861                     cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
862                     sp->cmd);
863                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
864                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
865                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
866                 avail_dsds--;
867
868                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
869                         cp = page_address(sg_page(sg)) + sg->offset;
870                         ql_dbg(ql_dbg_io, vha, 0x300b,
871                             "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
872                 }
873         }
874         /* Null termination */
875         *cur_dsd++ = 0;
876         *cur_dsd++ = 0;
877         *cur_dsd++ = 0;
878         return 0;
879 }
880
881 static int
882 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
883                                                         uint32_t *dsd,
884         uint16_t tot_dsds)
885 {
886         void *next_dsd;
887         uint8_t avail_dsds = 0;
888         uint32_t dsd_list_len;
889         struct dsd_dma *dsd_ptr;
890         struct scatterlist *sg;
891         int     i;
892         struct scsi_cmnd *cmd;
893         uint32_t *cur_dsd = dsd;
894         uint16_t        used_dsds = tot_dsds;
895         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
896         uint8_t         *cp;
897
898
899         cmd = sp->cmd;
900         scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
901                 dma_addr_t      sle_dma;
902
903                 /* Allocate additional continuation packets? */
904                 if (avail_dsds == 0) {
905                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
906                                                 QLA_DSDS_PER_IOCB : used_dsds;
907                         dsd_list_len = (avail_dsds + 1) * 12;
908                         used_dsds -= avail_dsds;
909
910                         /* allocate tracking DS */
911                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
912                         if (!dsd_ptr)
913                                 return 1;
914
915                         /* allocate new list */
916                         dsd_ptr->dsd_addr = next_dsd =
917                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
918                                 &dsd_ptr->dsd_list_dma);
919
920                         if (!next_dsd) {
921                                 /*
922                                  * Need to cleanup only this dsd_ptr, rest
923                                  * will be done by sp_free_dma()
924                                  */
925                                 kfree(dsd_ptr);
926                                 return 1;
927                         }
928
929                         list_add_tail(&dsd_ptr->list,
930                             &((struct crc_context *)sp->ctx)->dsd_list);
931
932                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
933
934                         /* add new list to cmd iocb or last list */
935                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
936                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
937                         *cur_dsd++ = dsd_list_len;
938                         cur_dsd = (uint32_t *)next_dsd;
939                 }
940                 sle_dma = sg_dma_address(sg);
941                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
942                         ql_dbg(ql_dbg_io, vha, 0x3027,
943                             "%s(): %p, sg_entry %d - "
944                             "addr=0x%x0x%x, len=%d.\n",
945                             __func__, cur_dsd, i,
946                             LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
947                 }
948                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
949                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
950                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
951
952                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
953                         cp = page_address(sg_page(sg)) + sg->offset;
954                         ql_dbg(ql_dbg_io, vha, 0x3028,
955                             "%s(): Protection Data buffer = %p.\n", __func__,
956                             cp);
957                 }
958                 avail_dsds--;
959         }
960         /* Null termination */
961         *cur_dsd++ = 0;
962         *cur_dsd++ = 0;
963         *cur_dsd++ = 0;
964         return 0;
965 }
966
967 /**
968  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
969  *                                                      Type 6 IOCB types.
970  *
971  * @sp: SRB command to process
972  * @cmd_pkt: Command type 3 IOCB
973  * @tot_dsds: Total number of segments to transfer
974  */
975 static inline int
976 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
977     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
978 {
979         uint32_t                *cur_dsd, *fcp_dl;
980         scsi_qla_host_t         *vha;
981         struct scsi_cmnd        *cmd;
982         struct scatterlist      *cur_seg;
983         int                     sgc;
984         uint32_t                total_bytes;
985         uint32_t                data_bytes;
986         uint32_t                dif_bytes;
987         uint8_t                 bundling = 1;
988         uint16_t                blk_size;
989         uint8_t                 *clr_ptr;
990         struct crc_context      *crc_ctx_pkt = NULL;
991         struct qla_hw_data      *ha;
992         uint8_t                 additional_fcpcdb_len;
993         uint16_t                fcp_cmnd_len;
994         struct fcp_cmnd         *fcp_cmnd;
995         dma_addr_t              crc_ctx_dma;
996         char                    tag[2];
997
998         cmd = sp->cmd;
999
1000         sgc = 0;
1001         /* Update entry type to indicate Command Type CRC_2 IOCB */
1002         *((uint32_t *)(&cmd_pkt->entry_type)) =
1003             __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1004
1005         vha = sp->fcport->vha;
1006         ha = vha->hw;
1007
1008         /* No data transfer */
1009         data_bytes = scsi_bufflen(cmd);
1010         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1011                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1012                 return QLA_SUCCESS;
1013         }
1014
1015         cmd_pkt->vp_index = sp->fcport->vp_idx;
1016
1017         /* Set transfer direction */
1018         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1019                 cmd_pkt->control_flags =
1020                     __constant_cpu_to_le16(CF_WRITE_DATA);
1021         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1022                 cmd_pkt->control_flags =
1023                     __constant_cpu_to_le16(CF_READ_DATA);
1024         }
1025
1026         tot_prot_dsds = scsi_prot_sg_count(cmd);
1027         if (!tot_prot_dsds)
1028                 bundling = 0;
1029
1030         /* Allocate CRC context from global pool */
1031         crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
1032             GFP_ATOMIC, &crc_ctx_dma);
1033
1034         if (!crc_ctx_pkt)
1035                 goto crc_queuing_error;
1036
1037         /* Zero out CTX area. */
1038         clr_ptr = (uint8_t *)crc_ctx_pkt;
1039         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1040
1041         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1042
1043         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1044
1045         /* Set handle */
1046         crc_ctx_pkt->handle = cmd_pkt->handle;
1047
1048         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1049
1050         qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *)
1051             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1052
1053         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1054         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1055         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1056
1057         /* Determine SCSI command length -- align to 4 byte boundary */
1058         if (cmd->cmd_len > 16) {
1059                 additional_fcpcdb_len = cmd->cmd_len - 16;
1060                 if ((cmd->cmd_len % 4) != 0) {
1061                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1062                         goto crc_queuing_error;
1063                 }
1064                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1065         } else {
1066                 additional_fcpcdb_len = 0;
1067                 fcp_cmnd_len = 12 + 16 + 4;
1068         }
1069
1070         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1071
1072         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1073         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1074                 fcp_cmnd->additional_cdb_len |= 1;
1075         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1076                 fcp_cmnd->additional_cdb_len |= 2;
1077
1078         int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
1079         host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun));
1080         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1081         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1082         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1083             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1084         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1085             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1086         fcp_cmnd->task_management = 0;
1087
1088         /*
1089          * Update tagged queuing modifier if using command tag queuing
1090          */
1091         if (scsi_populate_tag_msg(cmd, tag)) {
1092                 switch (tag[0]) {
1093                 case HEAD_OF_QUEUE_TAG:
1094                     fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1095                     break;
1096                 case ORDERED_QUEUE_TAG:
1097                     fcp_cmnd->task_attribute = TSK_ORDERED;
1098                     break;
1099                 default:
1100                     fcp_cmnd->task_attribute = 0;
1101                     break;
1102                 }
1103         } else {
1104                 fcp_cmnd->task_attribute = 0;
1105         }
1106
1107         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1108
1109         /* Compute dif len and adjust data len to incude protection */
1110         total_bytes = data_bytes;
1111         dif_bytes = 0;
1112         blk_size = cmd->device->sector_size;
1113         if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1114                 dif_bytes = (data_bytes / blk_size) * 8;
1115                 total_bytes += dif_bytes;
1116         }
1117
1118         if (!ql2xenablehba_err_chk)
1119                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1120
1121         if (!bundling) {
1122                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1123         } else {
1124                 /*
1125                  * Configure Bundling if we need to fetch interlaving
1126                  * protection PCI accesses
1127                  */
1128                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1129                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1130                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1131                                                         tot_prot_dsds);
1132                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1133         }
1134
1135         /* Finish the common fields of CRC pkt */
1136         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1137         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1138         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1139         crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1140         /* Fibre channel byte count */
1141         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1142         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1143             additional_fcpcdb_len);
1144         *fcp_dl = htonl(total_bytes);
1145
1146         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1147                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1148                 return QLA_SUCCESS;
1149         }
1150         /* Walks data segments */
1151
1152         cmd_pkt->control_flags |=
1153             __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1154         if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1155             (tot_dsds - tot_prot_dsds)))
1156                 goto crc_queuing_error;
1157
1158         if (bundling && tot_prot_dsds) {
1159                 /* Walks dif segments */
1160                 cur_seg = scsi_prot_sglist(cmd);
1161                 cmd_pkt->control_flags |=
1162                         __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1163                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1164                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1165                     tot_prot_dsds))
1166                         goto crc_queuing_error;
1167         }
1168         return QLA_SUCCESS;
1169
1170 crc_queuing_error:
1171         /* Cleanup will be performed by the caller */
1172
1173         return QLA_FUNCTION_FAILED;
1174 }
1175
1176 /**
1177  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1178  * @sp: command to send to the ISP
1179  *
1180  * Returns non-zero if a failure occurred, else zero.
1181  */
1182 int
1183 qla24xx_start_scsi(srb_t *sp)
1184 {
1185         int             ret, nseg;
1186         unsigned long   flags;
1187         uint32_t        *clr_ptr;
1188         uint32_t        index;
1189         uint32_t        handle;
1190         struct cmd_type_7 *cmd_pkt;
1191         uint16_t        cnt;
1192         uint16_t        req_cnt;
1193         uint16_t        tot_dsds;
1194         struct req_que *req = NULL;
1195         struct rsp_que *rsp = NULL;
1196         struct scsi_cmnd *cmd = sp->cmd;
1197         struct scsi_qla_host *vha = sp->fcport->vha;
1198         struct qla_hw_data *ha = vha->hw;
1199         char            tag[2];
1200
1201         /* Setup device pointers. */
1202         ret = 0;
1203
1204         qla25xx_set_que(sp, &rsp);
1205         req = vha->req;
1206
1207         /* So we know we haven't pci_map'ed anything yet */
1208         tot_dsds = 0;
1209
1210         /* Send marker if required */
1211         if (vha->marker_needed != 0) {
1212                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1213                     QLA_SUCCESS)
1214                         return QLA_FUNCTION_FAILED;
1215                 vha->marker_needed = 0;
1216         }
1217
1218         /* Acquire ring specific lock */
1219         spin_lock_irqsave(&ha->hardware_lock, flags);
1220
1221         /* Check for room in outstanding command list. */
1222         handle = req->current_outstanding_cmd;
1223         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1224                 handle++;
1225                 if (handle == MAX_OUTSTANDING_COMMANDS)
1226                         handle = 1;
1227                 if (!req->outstanding_cmds[handle])
1228                         break;
1229         }
1230         if (index == MAX_OUTSTANDING_COMMANDS) {
1231                 goto queuing_error;
1232         }
1233
1234         /* Map the sg table so we have an accurate count of sg entries needed */
1235         if (scsi_sg_count(cmd)) {
1236                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1237                     scsi_sg_count(cmd), cmd->sc_data_direction);
1238                 if (unlikely(!nseg))
1239                         goto queuing_error;
1240         } else
1241                 nseg = 0;
1242
1243         tot_dsds = nseg;
1244         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1245         if (req->cnt < (req_cnt + 2)) {
1246                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1247
1248                 if (req->ring_index < cnt)
1249                         req->cnt = cnt - req->ring_index;
1250                 else
1251                         req->cnt = req->length -
1252                                 (req->ring_index - cnt);
1253         }
1254         if (req->cnt < (req_cnt + 2))
1255                 goto queuing_error;
1256
1257         /* Build command packet. */
1258         req->current_outstanding_cmd = handle;
1259         req->outstanding_cmds[handle] = sp;
1260         sp->handle = handle;
1261         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1262         req->cnt -= req_cnt;
1263
1264         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1265         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1266
1267         /* Zero out remaining portion of packet. */
1268         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1269         clr_ptr = (uint32_t *)cmd_pkt + 2;
1270         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1271         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1272
1273         /* Set NPORT-ID and LUN number*/
1274         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1275         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1276         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1277         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1278         cmd_pkt->vp_index = sp->fcport->vp_idx;
1279
1280         int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1281         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1282
1283         /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1284         if (scsi_populate_tag_msg(cmd, tag)) {
1285                 switch (tag[0]) {
1286                 case HEAD_OF_QUEUE_TAG:
1287                         cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1288                         break;
1289                 case ORDERED_QUEUE_TAG:
1290                         cmd_pkt->task = TSK_ORDERED;
1291                         break;
1292                 }
1293         }
1294
1295         /* Load SCSI command packet. */
1296         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1297         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1298
1299         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1300
1301         /* Build IOCB segments */
1302         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1303
1304         /* Set total data segment count. */
1305         cmd_pkt->entry_count = (uint8_t)req_cnt;
1306         /* Specify response queue number where completion should happen */
1307         cmd_pkt->entry_status = (uint8_t) rsp->id;
1308         wmb();
1309         /* Adjust ring index. */
1310         req->ring_index++;
1311         if (req->ring_index == req->length) {
1312                 req->ring_index = 0;
1313                 req->ring_ptr = req->ring;
1314         } else
1315                 req->ring_ptr++;
1316
1317         sp->flags |= SRB_DMA_VALID;
1318
1319         /* Set chip new ring index. */
1320         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1321         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1322
1323         /* Manage unprocessed RIO/ZIO commands in response queue. */
1324         if (vha->flags.process_response_queue &&
1325                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1326                 qla24xx_process_response_queue(vha, rsp);
1327
1328         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1329         return QLA_SUCCESS;
1330
1331 queuing_error:
1332         if (tot_dsds)
1333                 scsi_dma_unmap(cmd);
1334
1335         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1336
1337         return QLA_FUNCTION_FAILED;
1338 }
1339
1340
1341 /**
1342  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1343  * @sp: command to send to the ISP
1344  *
1345  * Returns non-zero if a failure occurred, else zero.
1346  */
1347 int
1348 qla24xx_dif_start_scsi(srb_t *sp)
1349 {
1350         int                     nseg;
1351         unsigned long           flags;
1352         uint32_t                *clr_ptr;
1353         uint32_t                index;
1354         uint32_t                handle;
1355         uint16_t                cnt;
1356         uint16_t                req_cnt = 0;
1357         uint16_t                tot_dsds;
1358         uint16_t                tot_prot_dsds;
1359         uint16_t                fw_prot_opts = 0;
1360         struct req_que          *req = NULL;
1361         struct rsp_que          *rsp = NULL;
1362         struct scsi_cmnd        *cmd = sp->cmd;
1363         struct scsi_qla_host    *vha = sp->fcport->vha;
1364         struct qla_hw_data      *ha = vha->hw;
1365         struct cmd_type_crc_2   *cmd_pkt;
1366         uint32_t                status = 0;
1367
1368 #define QDSS_GOT_Q_SPACE        BIT_0
1369
1370         /* Only process protection or >16 cdb in this routine */
1371         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1372                 if (cmd->cmd_len <= 16)
1373                         return qla24xx_start_scsi(sp);
1374         }
1375
1376         /* Setup device pointers. */
1377
1378         qla25xx_set_que(sp, &rsp);
1379         req = vha->req;
1380
1381         /* So we know we haven't pci_map'ed anything yet */
1382         tot_dsds = 0;
1383
1384         /* Send marker if required */
1385         if (vha->marker_needed != 0) {
1386                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1387                     QLA_SUCCESS)
1388                         return QLA_FUNCTION_FAILED;
1389                 vha->marker_needed = 0;
1390         }
1391
1392         /* Acquire ring specific lock */
1393         spin_lock_irqsave(&ha->hardware_lock, flags);
1394
1395         /* Check for room in outstanding command list. */
1396         handle = req->current_outstanding_cmd;
1397         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1398                 handle++;
1399                 if (handle == MAX_OUTSTANDING_COMMANDS)
1400                         handle = 1;
1401                 if (!req->outstanding_cmds[handle])
1402                         break;
1403         }
1404
1405         if (index == MAX_OUTSTANDING_COMMANDS)
1406                 goto queuing_error;
1407
1408         /* Compute number of required data segments */
1409         /* Map the sg table so we have an accurate count of sg entries needed */
1410         if (scsi_sg_count(cmd)) {
1411                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1412                     scsi_sg_count(cmd), cmd->sc_data_direction);
1413                 if (unlikely(!nseg))
1414                         goto queuing_error;
1415                 else
1416                         sp->flags |= SRB_DMA_VALID;
1417         } else
1418                 nseg = 0;
1419
1420         /* number of required data segments */
1421         tot_dsds = nseg;
1422
1423         /* Compute number of required protection segments */
1424         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1425                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1426                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1427                 if (unlikely(!nseg))
1428                         goto queuing_error;
1429                 else
1430                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1431         } else {
1432                 nseg = 0;
1433         }
1434
1435         req_cnt = 1;
1436         /* Total Data and protection sg segment(s) */
1437         tot_prot_dsds = nseg;
1438         tot_dsds += nseg;
1439         if (req->cnt < (req_cnt + 2)) {
1440                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1441
1442                 if (req->ring_index < cnt)
1443                         req->cnt = cnt - req->ring_index;
1444                 else
1445                         req->cnt = req->length -
1446                                 (req->ring_index - cnt);
1447         }
1448
1449         if (req->cnt < (req_cnt + 2))
1450                 goto queuing_error;
1451
1452         status |= QDSS_GOT_Q_SPACE;
1453
1454         /* Build header part of command packet (excluding the OPCODE). */
1455         req->current_outstanding_cmd = handle;
1456         req->outstanding_cmds[handle] = sp;
1457         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1458         req->cnt -= req_cnt;
1459
1460         /* Fill-in common area */
1461         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1462         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1463
1464         clr_ptr = (uint32_t *)cmd_pkt + 2;
1465         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1466
1467         /* Set NPORT-ID and LUN number*/
1468         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1469         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1470         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1471         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1472
1473         int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1474         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1475
1476         /* Total Data and protection segment(s) */
1477         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1478
1479         /* Build IOCB segments and adjust for data protection segments */
1480         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1481             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1482                 QLA_SUCCESS)
1483                 goto queuing_error;
1484
1485         cmd_pkt->entry_count = (uint8_t)req_cnt;
1486         /* Specify response queue number where completion should happen */
1487         cmd_pkt->entry_status = (uint8_t) rsp->id;
1488         cmd_pkt->timeout = __constant_cpu_to_le16(0);
1489         wmb();
1490
1491         /* Adjust ring index. */
1492         req->ring_index++;
1493         if (req->ring_index == req->length) {
1494                 req->ring_index = 0;
1495                 req->ring_ptr = req->ring;
1496         } else
1497                 req->ring_ptr++;
1498
1499         /* Set chip new ring index. */
1500         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1501         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1502
1503         /* Manage unprocessed RIO/ZIO commands in response queue. */
1504         if (vha->flags.process_response_queue &&
1505             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1506                 qla24xx_process_response_queue(vha, rsp);
1507
1508         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1509
1510         return QLA_SUCCESS;
1511
1512 queuing_error:
1513         if (status & QDSS_GOT_Q_SPACE) {
1514                 req->outstanding_cmds[handle] = NULL;
1515                 req->cnt += req_cnt;
1516         }
1517         /* Cleanup will be performed by the caller (queuecommand) */
1518
1519         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1520         return QLA_FUNCTION_FAILED;
1521 }
1522
1523
1524 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1525 {
1526         struct scsi_cmnd *cmd = sp->cmd;
1527         struct qla_hw_data *ha = sp->fcport->vha->hw;
1528         int affinity = cmd->request->cpu;
1529
1530         if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1531                 affinity < ha->max_rsp_queues - 1)
1532                 *rsp = ha->rsp_q_map[affinity + 1];
1533          else
1534                 *rsp = ha->rsp_q_map[0];
1535 }
1536
1537 /* Generic Control-SRB manipulation functions. */
1538 void *
1539 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1540 {
1541         struct qla_hw_data *ha = vha->hw;
1542         struct req_que *req = ha->req_q_map[0];
1543         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1544         uint32_t index, handle;
1545         request_t *pkt;
1546         uint16_t cnt, req_cnt;
1547
1548         pkt = NULL;
1549         req_cnt = 1;
1550         handle = 0;
1551
1552         if (!sp)
1553                 goto skip_cmd_array;
1554
1555         /* Check for room in outstanding command list. */
1556         handle = req->current_outstanding_cmd;
1557         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1558                 handle++;
1559                 if (handle == MAX_OUTSTANDING_COMMANDS)
1560                         handle = 1;
1561                 if (!req->outstanding_cmds[handle])
1562                         break;
1563         }
1564         if (index == MAX_OUTSTANDING_COMMANDS) {
1565                 ql_log(ql_log_warn, vha, 0x700b,
1566                     "No room on oustanding cmd array.\n");
1567                 goto queuing_error;
1568         }
1569
1570         /* Prep command array. */
1571         req->current_outstanding_cmd = handle;
1572         req->outstanding_cmds[handle] = sp;
1573         sp->handle = handle;
1574
1575 skip_cmd_array:
1576         /* Check for room on request queue. */
1577         if (req->cnt < req_cnt) {
1578                 if (ha->mqenable)
1579                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1580                 else if (IS_QLA82XX(ha))
1581                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1582                 else if (IS_FWI2_CAPABLE(ha))
1583                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1584                 else
1585                         cnt = qla2x00_debounce_register(
1586                             ISP_REQ_Q_OUT(ha, &reg->isp));
1587
1588                 if  (req->ring_index < cnt)
1589                         req->cnt = cnt - req->ring_index;
1590                 else
1591                         req->cnt = req->length -
1592                             (req->ring_index - cnt);
1593         }
1594         if (req->cnt < req_cnt)
1595                 goto queuing_error;
1596
1597         /* Prep packet */
1598         req->cnt -= req_cnt;
1599         pkt = req->ring_ptr;
1600         memset(pkt, 0, REQUEST_ENTRY_SIZE);
1601         pkt->entry_count = req_cnt;
1602         pkt->handle = handle;
1603
1604 queuing_error:
1605         return pkt;
1606 }
1607
1608 static void
1609 qla2x00_start_iocbs(srb_t *sp)
1610 {
1611         struct qla_hw_data *ha = sp->fcport->vha->hw;
1612         struct req_que *req = ha->req_q_map[0];
1613         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1614         struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1615
1616         if (IS_QLA82XX(ha)) {
1617                 qla82xx_start_iocbs(sp);
1618         } else {
1619                 /* Adjust ring index. */
1620                 req->ring_index++;
1621                 if (req->ring_index == req->length) {
1622                         req->ring_index = 0;
1623                         req->ring_ptr = req->ring;
1624                 } else
1625                         req->ring_ptr++;
1626
1627                 /* Set chip new ring index. */
1628                 if (ha->mqenable) {
1629                         WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
1630                         RD_REG_DWORD(&ioreg->hccr);
1631                 } else if (IS_QLA82XX(ha)) {
1632                         qla82xx_start_iocbs(sp);
1633                 } else if (IS_FWI2_CAPABLE(ha)) {
1634                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
1635                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
1636                 } else {
1637                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
1638                                 req->ring_index);
1639                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
1640                 }
1641         }
1642 }
1643
1644 static void
1645 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1646 {
1647         struct srb_ctx *ctx = sp->ctx;
1648         struct srb_iocb *lio = ctx->u.iocb_cmd;
1649
1650         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1651         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1652         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1653                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1654         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1655                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1656         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1657         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1658         logio->port_id[1] = sp->fcport->d_id.b.area;
1659         logio->port_id[2] = sp->fcport->d_id.b.domain;
1660         logio->vp_index = sp->fcport->vp_idx;
1661 }
1662
1663 static void
1664 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1665 {
1666         struct qla_hw_data *ha = sp->fcport->vha->hw;
1667         struct srb_ctx *ctx = sp->ctx;
1668         struct srb_iocb *lio = ctx->u.iocb_cmd;
1669         uint16_t opts;
1670
1671         mbx->entry_type = MBX_IOCB_TYPE;
1672         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1673         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1674         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1675         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1676         if (HAS_EXTENDED_IDS(ha)) {
1677                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1678                 mbx->mb10 = cpu_to_le16(opts);
1679         } else {
1680                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1681         }
1682         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1683         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1684             sp->fcport->d_id.b.al_pa);
1685         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1686 }
1687
1688 static void
1689 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1690 {
1691         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1692         logio->control_flags =
1693             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1694         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1695         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1696         logio->port_id[1] = sp->fcport->d_id.b.area;
1697         logio->port_id[2] = sp->fcport->d_id.b.domain;
1698         logio->vp_index = sp->fcport->vp_idx;
1699 }
1700
1701 static void
1702 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1703 {
1704         struct qla_hw_data *ha = sp->fcport->vha->hw;
1705
1706         mbx->entry_type = MBX_IOCB_TYPE;
1707         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1708         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1709         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1710             cpu_to_le16(sp->fcport->loop_id):
1711             cpu_to_le16(sp->fcport->loop_id << 8);
1712         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1713         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1714             sp->fcport->d_id.b.al_pa);
1715         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1716         /* Implicit: mbx->mbx10 = 0. */
1717 }
1718
1719 static void
1720 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1721 {
1722         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1723         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1724         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1725         logio->vp_index = sp->fcport->vp_idx;
1726 }
1727
1728 static void
1729 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1730 {
1731         struct qla_hw_data *ha = sp->fcport->vha->hw;
1732
1733         mbx->entry_type = MBX_IOCB_TYPE;
1734         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1735         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1736         if (HAS_EXTENDED_IDS(ha)) {
1737                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1738                 mbx->mb10 = cpu_to_le16(BIT_0);
1739         } else {
1740                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1741         }
1742         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1743         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1744         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1745         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1746         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1747 }
1748
1749 static void
1750 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1751 {
1752         uint32_t flags;
1753         unsigned int lun;
1754         struct fc_port *fcport = sp->fcport;
1755         scsi_qla_host_t *vha = fcport->vha;
1756         struct qla_hw_data *ha = vha->hw;
1757         struct srb_ctx *ctx = sp->ctx;
1758         struct srb_iocb *iocb = ctx->u.iocb_cmd;
1759         struct req_que *req = vha->req;
1760
1761         flags = iocb->u.tmf.flags;
1762         lun = iocb->u.tmf.lun;
1763
1764         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1765         tsk->entry_count = 1;
1766         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1767         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
1768         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1769         tsk->control_flags = cpu_to_le32(flags);
1770         tsk->port_id[0] = fcport->d_id.b.al_pa;
1771         tsk->port_id[1] = fcport->d_id.b.area;
1772         tsk->port_id[2] = fcport->d_id.b.domain;
1773         tsk->vp_index = fcport->vp_idx;
1774
1775         if (flags == TCF_LUN_RESET) {
1776                 int_to_scsilun(lun, &tsk->lun);
1777                 host_to_fcp_swap((uint8_t *)&tsk->lun,
1778                         sizeof(tsk->lun));
1779         }
1780 }
1781
1782 static void
1783 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1784 {
1785         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1786
1787         els_iocb->entry_type = ELS_IOCB_TYPE;
1788         els_iocb->entry_count = 1;
1789         els_iocb->sys_define = 0;
1790         els_iocb->entry_status = 0;
1791         els_iocb->handle = sp->handle;
1792         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1793         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1794         els_iocb->vp_index = sp->fcport->vp_idx;
1795         els_iocb->sof_type = EST_SOFI3;
1796         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1797
1798         els_iocb->opcode =
1799             (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
1800             bsg_job->request->rqst_data.r_els.els_code :
1801             bsg_job->request->rqst_data.h_els.command_code;
1802         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
1803         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
1804         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
1805         els_iocb->control_flags = 0;
1806         els_iocb->rx_byte_count =
1807             cpu_to_le32(bsg_job->reply_payload.payload_len);
1808         els_iocb->tx_byte_count =
1809             cpu_to_le32(bsg_job->request_payload.payload_len);
1810
1811         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
1812             (bsg_job->request_payload.sg_list)));
1813         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
1814             (bsg_job->request_payload.sg_list)));
1815         els_iocb->tx_len = cpu_to_le32(sg_dma_len
1816             (bsg_job->request_payload.sg_list));
1817
1818         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
1819             (bsg_job->reply_payload.sg_list)));
1820         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
1821             (bsg_job->reply_payload.sg_list)));
1822         els_iocb->rx_len = cpu_to_le32(sg_dma_len
1823             (bsg_job->reply_payload.sg_list));
1824 }
1825
1826 static void
1827 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
1828 {
1829         uint16_t        avail_dsds;
1830         uint32_t        *cur_dsd;
1831         struct scatterlist *sg;
1832         int index;
1833         uint16_t tot_dsds;
1834         scsi_qla_host_t *vha = sp->fcport->vha;
1835         struct qla_hw_data *ha = vha->hw;
1836         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1837         int loop_iterartion = 0;
1838         int cont_iocb_prsnt = 0;
1839         int entry_count = 1;
1840
1841         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
1842         ct_iocb->entry_type = CT_IOCB_TYPE;
1843         ct_iocb->entry_status = 0;
1844         ct_iocb->handle1 = sp->handle;
1845         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
1846         ct_iocb->status = __constant_cpu_to_le16(0);
1847         ct_iocb->control_flags = __constant_cpu_to_le16(0);
1848         ct_iocb->timeout = 0;
1849         ct_iocb->cmd_dsd_count =
1850             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1851         ct_iocb->total_dsd_count =
1852             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
1853         ct_iocb->req_bytecount =
1854             cpu_to_le32(bsg_job->request_payload.payload_len);
1855         ct_iocb->rsp_bytecount =
1856             cpu_to_le32(bsg_job->reply_payload.payload_len);
1857
1858         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
1859             (bsg_job->request_payload.sg_list)));
1860         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
1861             (bsg_job->request_payload.sg_list)));
1862         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
1863
1864         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
1865             (bsg_job->reply_payload.sg_list)));
1866         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
1867             (bsg_job->reply_payload.sg_list)));
1868         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
1869
1870         avail_dsds = 1;
1871         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
1872         index = 0;
1873         tot_dsds = bsg_job->reply_payload.sg_cnt;
1874
1875         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1876                 dma_addr_t       sle_dma;
1877                 cont_a64_entry_t *cont_pkt;
1878
1879                 /* Allocate additional continuation packets? */
1880                 if (avail_dsds == 0) {
1881                         /*
1882                         * Five DSDs are available in the Cont.
1883                         * Type 1 IOCB.
1884                                */
1885                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1886                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1887                         avail_dsds = 5;
1888                         cont_iocb_prsnt = 1;
1889                         entry_count++;
1890                 }
1891
1892                 sle_dma = sg_dma_address(sg);
1893                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
1894                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
1895                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
1896                 loop_iterartion++;
1897                 avail_dsds--;
1898         }
1899         ct_iocb->entry_count = entry_count;
1900 }
1901
1902 static void
1903 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
1904 {
1905         uint16_t        avail_dsds;
1906         uint32_t        *cur_dsd;
1907         struct scatterlist *sg;
1908         int index;
1909         uint16_t tot_dsds;
1910         scsi_qla_host_t *vha = sp->fcport->vha;
1911         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1912         int loop_iterartion = 0;
1913         int cont_iocb_prsnt = 0;
1914         int entry_count = 1;
1915
1916         ct_iocb->entry_type = CT_IOCB_TYPE;
1917         ct_iocb->entry_status = 0;
1918         ct_iocb->sys_define = 0;
1919         ct_iocb->handle = sp->handle;
1920
1921         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1922         ct_iocb->vp_index = sp->fcport->vp_idx;
1923         ct_iocb->comp_status = __constant_cpu_to_le16(0);
1924
1925         ct_iocb->cmd_dsd_count =
1926             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1927         ct_iocb->timeout = 0;
1928         ct_iocb->rsp_dsd_count =
1929             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1930         ct_iocb->rsp_byte_count =
1931             cpu_to_le32(bsg_job->reply_payload.payload_len);
1932         ct_iocb->cmd_byte_count =
1933             cpu_to_le32(bsg_job->request_payload.payload_len);
1934         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
1935             (bsg_job->request_payload.sg_list)));
1936         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
1937            (bsg_job->request_payload.sg_list)));
1938         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
1939             (bsg_job->request_payload.sg_list));
1940
1941         avail_dsds = 1;
1942         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
1943         index = 0;
1944         tot_dsds = bsg_job->reply_payload.sg_cnt;
1945
1946         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1947                 dma_addr_t       sle_dma;
1948                 cont_a64_entry_t *cont_pkt;
1949
1950                 /* Allocate additional continuation packets? */
1951                 if (avail_dsds == 0) {
1952                         /*
1953                         * Five DSDs are available in the Cont.
1954                         * Type 1 IOCB.
1955                                */
1956                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1957                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1958                         avail_dsds = 5;
1959                         cont_iocb_prsnt = 1;
1960                         entry_count++;
1961                 }
1962
1963                 sle_dma = sg_dma_address(sg);
1964                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
1965                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
1966                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
1967                 loop_iterartion++;
1968                 avail_dsds--;
1969         }
1970         ct_iocb->entry_count = entry_count;
1971 }
1972
1973 int
1974 qla2x00_start_sp(srb_t *sp)
1975 {
1976         int rval;
1977         struct qla_hw_data *ha = sp->fcport->vha->hw;
1978         void *pkt;
1979         struct srb_ctx *ctx = sp->ctx;
1980         unsigned long flags;
1981
1982         rval = QLA_FUNCTION_FAILED;
1983         spin_lock_irqsave(&ha->hardware_lock, flags);
1984         pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
1985         if (!pkt) {
1986                 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
1987                     "qla2x00_alloc_iocbs failed.\n");
1988                 goto done;
1989         }
1990
1991         rval = QLA_SUCCESS;
1992         switch (ctx->type) {
1993         case SRB_LOGIN_CMD:
1994                 IS_FWI2_CAPABLE(ha) ?
1995                     qla24xx_login_iocb(sp, pkt) :
1996                     qla2x00_login_iocb(sp, pkt);
1997                 break;
1998         case SRB_LOGOUT_CMD:
1999                 IS_FWI2_CAPABLE(ha) ?
2000                     qla24xx_logout_iocb(sp, pkt) :
2001                     qla2x00_logout_iocb(sp, pkt);
2002                 break;
2003         case SRB_ELS_CMD_RPT:
2004         case SRB_ELS_CMD_HST:
2005                 qla24xx_els_iocb(sp, pkt);
2006                 break;
2007         case SRB_CT_CMD:
2008                 IS_FWI2_CAPABLE(ha) ?
2009                 qla24xx_ct_iocb(sp, pkt) :
2010                 qla2x00_ct_iocb(sp, pkt);
2011                 break;
2012         case SRB_ADISC_CMD:
2013                 IS_FWI2_CAPABLE(ha) ?
2014                     qla24xx_adisc_iocb(sp, pkt) :
2015                     qla2x00_adisc_iocb(sp, pkt);
2016                 break;
2017         case SRB_TM_CMD:
2018                 qla24xx_tm_iocb(sp, pkt);
2019                 break;
2020         default:
2021                 break;
2022         }
2023
2024         wmb();
2025         qla2x00_start_iocbs(sp);
2026 done:
2027         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2028         return rval;
2029 }