2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 #include "scu_completion_codes.h"
61 #include "scu_event_codes.h"
65 * This method returns the sgl element pair for the specificed sgl_pair index.
66 * @sci_req: This parameter specifies the IO request for which to retrieve
67 * the Scatter-Gather List element pair.
68 * @sgl_pair_index: This parameter specifies the index into the SGL element
69 * pair to be retrieved.
71 * This method returns a pointer to an struct scu_sgl_element_pair.
73 static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair(
74 struct scic_sds_request *sci_req,
77 struct scu_task_context *task_context;
79 task_context = (struct scu_task_context *)sci_req->task_context_buffer;
81 if (sgl_pair_index == 0) {
82 return &task_context->sgl_pair_ab;
83 } else if (sgl_pair_index == 1) {
84 return &task_context->sgl_pair_cd;
87 return &sci_req->sg_table[sgl_pair_index - 2];
91 * This function will build the SGL list for an IO request.
92 * @sci_req: This parameter specifies the IO request for which to build
93 * the Scatter-Gather List.
96 static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
98 struct isci_request *isci_request = sci_req_to_ireq(sds_request);
99 struct isci_host *isci_host = isci_request->isci_host;
100 struct sas_task *task = isci_request_access_task(isci_request);
101 struct scatterlist *sg = NULL;
104 struct scu_sgl_element_pair *scu_sg = NULL;
105 struct scu_sgl_element_pair *prev_sg = NULL;
107 if (task->num_scatter > 0) {
111 scu_sg = scic_sds_request_get_sgl_element_pair(
115 SCU_SGL_COPY(scu_sg->A, sg);
120 SCU_SGL_COPY(scu_sg->B, sg);
123 SCU_SGL_ZERO(scu_sg->B);
127 scic_io_request_get_dma_addr(
131 prev_sg->next_pair_upper =
132 upper_32_bits(dma_addr);
133 prev_sg->next_pair_lower =
134 lower_32_bits(dma_addr);
140 } else { /* handle when no sg */
141 scu_sg = scic_sds_request_get_sgl_element_pair(sds_request,
144 dma_addr = dma_map_single(&isci_host->pdev->dev,
146 task->total_xfer_len,
149 isci_request->zero_scatter_daddr = dma_addr;
151 scu_sg->A.length = task->total_xfer_len;
152 scu_sg->A.address_upper = upper_32_bits(dma_addr);
153 scu_sg->A.address_lower = lower_32_bits(dma_addr);
157 scu_sg->next_pair_upper = 0;
158 scu_sg->next_pair_lower = 0;
162 static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req)
164 struct ssp_cmd_iu *cmd_iu;
165 struct isci_request *ireq = sci_req_to_ireq(sci_req);
166 struct sas_task *task = isci_request_access_task(ireq);
168 cmd_iu = &sci_req->ssp.cmd;
170 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
171 cmd_iu->add_cdb_len = 0;
174 cmd_iu->en_fburst = 0; /* unsupported */
175 cmd_iu->task_prio = task->ssp_task.task_prio;
176 cmd_iu->task_attr = task->ssp_task.task_attr;
179 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
180 sizeof(task->ssp_task.cdb) / sizeof(u32));
183 static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req)
185 struct ssp_task_iu *task_iu;
186 struct isci_request *ireq = sci_req_to_ireq(sci_req);
187 struct sas_task *task = isci_request_access_task(ireq);
188 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
190 task_iu = &sci_req->ssp.tmf;
192 memset(task_iu, 0, sizeof(struct ssp_task_iu));
194 memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
196 task_iu->task_func = isci_tmf->tmf_code;
198 (ireq->ttype == tmf_task) ?
200 SCI_CONTROLLER_INVALID_IO_TAG;
204 * This method is will fill in the SCU Task Context for any type of SSP request.
209 static void scu_ssp_reqeust_construct_task_context(
210 struct scic_sds_request *sds_request,
211 struct scu_task_context *task_context)
214 struct scic_sds_remote_device *target_device;
215 struct scic_sds_port *target_port;
217 target_device = scic_sds_request_get_device(sds_request);
218 target_port = scic_sds_request_get_port(sds_request);
220 /* Fill in the TC with the its required data */
221 task_context->abort = 0;
222 task_context->priority = 0;
223 task_context->initiator_request = 1;
224 task_context->connection_rate = target_device->connection_rate;
225 task_context->protocol_engine_index =
226 scic_sds_controller_get_protocol_engine_group(controller);
227 task_context->logical_port_index =
228 scic_sds_port_get_index(target_port);
229 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
230 task_context->valid = SCU_TASK_CONTEXT_VALID;
231 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
233 task_context->remote_node_index =
234 scic_sds_remote_device_get_index(sds_request->target_device);
235 task_context->command_code = 0;
237 task_context->link_layer_control = 0;
238 task_context->do_not_dma_ssp_good_response = 1;
239 task_context->strict_ordering = 0;
240 task_context->control_frame = 0;
241 task_context->timeout_enable = 0;
242 task_context->block_guard_enable = 0;
244 task_context->address_modifier = 0;
246 /* task_context->type.ssp.tag = sci_req->io_tag; */
247 task_context->task_phase = 0x01;
249 if (sds_request->was_tag_assigned_by_user) {
251 * Build the task context now since we have already read
254 sds_request->post_context =
255 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
256 (scic_sds_controller_get_protocol_engine_group(
258 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
259 (scic_sds_port_get_index(target_port) <<
260 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
261 ISCI_TAG_TCI(sds_request->io_tag));
264 * Build the task context now since we have already read
267 * I/O tag index is not assigned because we have to wait
270 sds_request->post_context =
271 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
272 (scic_sds_controller_get_protocol_engine_group(
273 owning_controller) <<
274 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
275 (scic_sds_port_get_index(target_port) <<
276 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
280 * Copy the physical address for the command buffer to the
283 dma_addr = scic_io_request_get_dma_addr(sds_request,
284 &sds_request->ssp.cmd);
286 task_context->command_iu_upper = upper_32_bits(dma_addr);
287 task_context->command_iu_lower = lower_32_bits(dma_addr);
290 * Copy the physical address for the response buffer to the
293 dma_addr = scic_io_request_get_dma_addr(sds_request,
294 &sds_request->ssp.rsp);
296 task_context->response_iu_upper = upper_32_bits(dma_addr);
297 task_context->response_iu_lower = lower_32_bits(dma_addr);
301 * This method is will fill in the SCU Task Context for a SSP IO request.
305 static void scu_ssp_io_request_construct_task_context(
306 struct scic_sds_request *sci_req,
307 enum dma_data_direction dir,
310 struct scu_task_context *task_context;
312 task_context = scic_sds_request_get_task_context(sci_req);
314 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
316 task_context->ssp_command_iu_length =
317 sizeof(struct ssp_cmd_iu) / sizeof(u32);
318 task_context->type.ssp.frame_type = SSP_COMMAND;
321 case DMA_FROM_DEVICE:
324 task_context->task_type = SCU_TASK_TYPE_IOREAD;
327 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
331 task_context->transfer_length_bytes = len;
333 if (task_context->transfer_length_bytes > 0)
334 scic_sds_request_build_sgl(sci_req);
338 * This method will fill in the SCU Task Context for a SSP Task request. The
339 * following important settings are utilized: -# priority ==
340 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
341 * ahead of other task destined for the same Remote Node. -# task_type ==
342 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
343 * (i.e. non-raw frame) is being utilized to perform task management. -#
344 * control_frame == 1. This ensures that the proper endianess is set so
345 * that the bytes are transmitted in the right order for a task frame.
346 * @sci_req: This parameter specifies the task request object being
350 static void scu_ssp_task_request_construct_task_context(
351 struct scic_sds_request *sci_req)
353 struct scu_task_context *task_context;
355 task_context = scic_sds_request_get_task_context(sci_req);
357 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
359 task_context->control_frame = 1;
360 task_context->priority = SCU_TASK_PRIORITY_HIGH;
361 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
362 task_context->transfer_length_bytes = 0;
363 task_context->type.ssp.frame_type = SSP_TASK;
364 task_context->ssp_command_iu_length =
365 sizeof(struct ssp_task_iu) / sizeof(u32);
369 * This method is will fill in the SCU Task Context for any type of SATA
370 * request. This is called from the various SATA constructors.
371 * @sci_req: The general IO request object which is to be used in
372 * constructing the SCU task context.
373 * @task_context: The buffer pointer for the SCU task context which is being
376 * The general io request construction is complete. The buffer assignment for
377 * the command buffer is complete. none Revisit task context construction to
378 * determine what is common for SSP/SMP/STP task context structures.
380 static void scu_sata_reqeust_construct_task_context(
381 struct scic_sds_request *sci_req,
382 struct scu_task_context *task_context)
385 struct scic_sds_remote_device *target_device;
386 struct scic_sds_port *target_port;
388 target_device = scic_sds_request_get_device(sci_req);
389 target_port = scic_sds_request_get_port(sci_req);
391 /* Fill in the TC with the its required data */
392 task_context->abort = 0;
393 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
394 task_context->initiator_request = 1;
395 task_context->connection_rate = target_device->connection_rate;
396 task_context->protocol_engine_index =
397 scic_sds_controller_get_protocol_engine_group(controller);
398 task_context->logical_port_index =
399 scic_sds_port_get_index(target_port);
400 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
401 task_context->valid = SCU_TASK_CONTEXT_VALID;
402 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
404 task_context->remote_node_index =
405 scic_sds_remote_device_get_index(sci_req->target_device);
406 task_context->command_code = 0;
408 task_context->link_layer_control = 0;
409 task_context->do_not_dma_ssp_good_response = 1;
410 task_context->strict_ordering = 0;
411 task_context->control_frame = 0;
412 task_context->timeout_enable = 0;
413 task_context->block_guard_enable = 0;
415 task_context->address_modifier = 0;
416 task_context->task_phase = 0x01;
418 task_context->ssp_command_iu_length =
419 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
421 /* Set the first word of the H2D REG FIS */
422 task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
424 if (sci_req->was_tag_assigned_by_user) {
426 * Build the task context now since we have already read
429 sci_req->post_context =
430 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
431 (scic_sds_controller_get_protocol_engine_group(
433 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
434 (scic_sds_port_get_index(target_port) <<
435 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
436 ISCI_TAG_TCI(sci_req->io_tag));
439 * Build the task context now since we have already read
441 * I/O tag index is not assigned because we have to wait
442 * until we get a TCi.
444 sci_req->post_context =
445 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
446 (scic_sds_controller_get_protocol_engine_group(
448 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
449 (scic_sds_port_get_index(target_port) <<
450 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
454 * Copy the physical address for the command buffer to the SCU Task
455 * Context. We must offset the command buffer by 4 bytes because the
456 * first 4 bytes are transfered in the body of the TC.
458 dma_addr = scic_io_request_get_dma_addr(sci_req,
459 ((char *) &sci_req->stp.cmd) +
462 task_context->command_iu_upper = upper_32_bits(dma_addr);
463 task_context->command_iu_lower = lower_32_bits(dma_addr);
465 /* SATA Requests do not have a response buffer */
466 task_context->response_iu_upper = 0;
467 task_context->response_iu_lower = 0;
473 * scu_stp_raw_request_construct_task_context -
474 * @sci_req: This parameter specifies the STP request object for which to
475 * construct a RAW command frame task context.
476 * @task_context: This parameter specifies the SCU specific task context buffer
479 * This method performs the operations common to all SATA/STP requests
480 * utilizing the raw frame method. none
482 static void scu_stp_raw_request_construct_task_context(struct scic_sds_stp_request *stp_req,
483 struct scu_task_context *task_context)
485 struct scic_sds_request *sci_req = to_sci_req(stp_req);
487 scu_sata_reqeust_construct_task_context(sci_req, task_context);
489 task_context->control_frame = 0;
490 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
491 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
492 task_context->type.stp.fis_type = FIS_REGH2D;
493 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
496 static enum sci_status
497 scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
500 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
501 struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
503 scu_stp_raw_request_construct_task_context(stp_req,
504 sci_req->task_context_buffer);
506 pio->current_transfer_bytes = 0;
507 pio->ending_error = 0;
508 pio->ending_status = 0;
510 pio->request_current.sgl_offset = 0;
511 pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
514 scic_sds_request_build_sgl(sci_req);
515 /* Since the IO request copy of the TC contains the same data as
516 * the actual TC this pointer is vaild for either.
518 pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
520 /* The user does not want the data copied to the SGL buffer location */
521 pio->request_current.sgl_pair = NULL;
529 * @sci_req: This parameter specifies the request to be constructed as an
531 * @optimized_task_type: This parameter specifies whether the request is to be
532 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
533 * value of 1 indicates NCQ.
535 * This method will perform request construction common to all types of STP
536 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
537 * returns an indication as to whether the construction was successful.
539 static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
540 u8 optimized_task_type,
542 enum dma_data_direction dir)
544 struct scu_task_context *task_context = sci_req->task_context_buffer;
546 /* Build the STP task context structure */
547 scu_sata_reqeust_construct_task_context(sci_req, task_context);
549 /* Copy over the SGL elements */
550 scic_sds_request_build_sgl(sci_req);
552 /* Copy over the number of bytes to be transfered */
553 task_context->transfer_length_bytes = len;
555 if (dir == DMA_TO_DEVICE) {
557 * The difference between the DMA IN and DMA OUT request task type
558 * values are consistent with the difference between FPDMA READ
559 * and FPDMA WRITE values. Add the supplied task type parameter
560 * to this difference to set the task type properly for this
561 * DATA OUT (WRITE) case. */
562 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
563 - SCU_TASK_TYPE_DMA_IN);
566 * For the DATA IN (READ) case, simply save the supplied
567 * optimized task type. */
568 task_context->task_type = optimized_task_type;
574 static enum sci_status
575 scic_io_request_construct_sata(struct scic_sds_request *sci_req,
577 enum dma_data_direction dir,
580 enum sci_status status = SCI_SUCCESS;
581 struct isci_request *ireq = sci_req_to_ireq(sci_req);
582 struct sas_task *task = isci_request_access_task(ireq);
584 /* check for management protocols */
585 if (ireq->ttype == tmf_task) {
586 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
588 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
589 tmf->tmf_code == isci_tmf_sata_srst_low) {
590 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
591 sci_req->task_context_buffer);
594 dev_err(scic_to_dev(sci_req->owning_controller),
595 "%s: Request 0x%p received un-handled SAT "
596 "management protocol 0x%x.\n",
597 __func__, sci_req, tmf->tmf_code);
603 if (!sas_protocol_ata(task->task_proto)) {
604 dev_err(scic_to_dev(sci_req->owning_controller),
605 "%s: Non-ATA protocol in SATA path: 0x%x\n",
613 if (task->data_dir == DMA_NONE) {
614 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
615 sci_req->task_context_buffer);
620 if (task->ata_task.use_ncq) {
621 scic_sds_stp_optimized_request_construct(sci_req,
622 SCU_TASK_TYPE_FPDMAQ_READ,
628 if (task->ata_task.dma_xfer) {
629 scic_sds_stp_optimized_request_construct(sci_req,
630 SCU_TASK_TYPE_DMA_IN,
634 return scic_sds_stp_pio_request_construct(sci_req, copy);
639 static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req)
641 struct isci_request *ireq = sci_req_to_ireq(sci_req);
642 struct sas_task *task = isci_request_access_task(ireq);
644 sci_req->protocol = SCIC_SSP_PROTOCOL;
646 scu_ssp_io_request_construct_task_context(sci_req,
648 task->total_xfer_len);
650 scic_sds_io_request_build_ssp_command_iu(sci_req);
652 sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
657 enum sci_status scic_task_request_construct_ssp(
658 struct scic_sds_request *sci_req)
660 /* Construct the SSP Task SCU Task Context */
661 scu_ssp_task_request_construct_task_context(sci_req);
663 /* Fill in the SSP Task IU */
664 scic_sds_task_request_build_ssp_task_iu(sci_req);
666 sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
671 static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req)
673 enum sci_status status;
675 struct isci_request *isci_request = sci_req_to_ireq(sci_req);
676 struct sas_task *task = isci_request_access_task(isci_request);
678 sci_req->protocol = SCIC_STP_PROTOCOL;
680 copy = (task->data_dir == DMA_NONE) ? false : true;
682 status = scic_io_request_construct_sata(sci_req,
683 task->total_xfer_len,
687 if (status == SCI_SUCCESS)
688 sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
693 enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req)
695 enum sci_status status = SCI_SUCCESS;
696 struct isci_request *ireq = sci_req_to_ireq(sci_req);
698 /* check for management protocols */
699 if (ireq->ttype == tmf_task) {
700 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
702 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
703 tmf->tmf_code == isci_tmf_sata_srst_low) {
704 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
705 sci_req->task_context_buffer);
707 dev_err(scic_to_dev(sci_req->owning_controller),
708 "%s: Request 0x%p received un-handled SAT "
710 __func__, sci_req, tmf->tmf_code);
716 if (status != SCI_SUCCESS)
718 sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
724 * sci_req_tx_bytes - bytes transferred when reply underruns request
725 * @sci_req: request that was terminated early
727 #define SCU_TASK_CONTEXT_SRAM 0x200000
728 static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req)
730 struct scic_sds_controller *scic = sci_req->owning_controller;
733 if (readl(&scic->smu_registers->address_modifier) == 0) {
734 void __iomem *scu_reg_base = scic->scu_registers;
736 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
737 * BAR1 is the scu_registers
738 * 0x20002C = 0x200000 + 0x2c
739 * = start of task context SRAM + offset of (type.ssp.data_offset)
740 * TCi is the io_tag of struct scic_sds_request
742 ret_val = readl(scu_reg_base +
743 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
744 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(sci_req->io_tag)));
750 enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req)
752 struct scic_sds_controller *scic = sci_req->owning_controller;
753 struct scu_task_context *task_context;
754 enum sci_base_request_states state;
756 if (sci_req->device_sequence !=
757 scic_sds_remote_device_get_sequence(sci_req->target_device))
760 state = sci_req->sm.current_state_id;
761 if (state != SCI_REQ_CONSTRUCTED) {
762 dev_warn(scic_to_dev(scic),
763 "%s: SCIC IO Request requested to start while in wrong "
764 "state %d\n", __func__, state);
765 return SCI_FAILURE_INVALID_STATE;
768 /* if necessary, allocate a TCi for the io request object and then will,
769 * if necessary, copy the constructed TC data into the actual TC buffer.
770 * If everything is successful the post context field is updated with
771 * the TCi so the controller can post the request to the hardware.
773 if (sci_req->io_tag == SCI_CONTROLLER_INVALID_IO_TAG)
774 sci_req->io_tag = scic_controller_allocate_io_tag(scic);
776 /* Record the IO Tag in the request */
777 if (sci_req->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) {
778 task_context = sci_req->task_context_buffer;
780 task_context->task_index = ISCI_TAG_TCI(sci_req->io_tag);
782 switch (task_context->protocol_type) {
783 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
784 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
786 task_context->type.ssp.tag = sci_req->io_tag;
787 task_context->type.ssp.target_port_transfer_tag =
791 case SCU_TASK_CONTEXT_PROTOCOL_STP:
793 * task_context->type.stp.ncq_tag = sci_req->ncq_tag;
797 case SCU_TASK_CONTEXT_PROTOCOL_NONE:
798 /* / @todo When do we set no protocol type? */
802 /* This should never happen since we build the IO
808 * Check to see if we need to copy the task context buffer
809 * or have been building into the task context buffer */
810 if (sci_req->was_tag_assigned_by_user == false)
811 scic_sds_controller_copy_task_context(scic, sci_req);
813 /* Add to the post_context the io tag value */
814 sci_req->post_context |= ISCI_TAG_TCI(sci_req->io_tag);
816 /* Everything is good go ahead and change state */
817 sci_change_state(&sci_req->sm, SCI_REQ_STARTED);
822 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
826 scic_sds_io_request_terminate(struct scic_sds_request *sci_req)
828 enum sci_base_request_states state;
830 state = sci_req->sm.current_state_id;
833 case SCI_REQ_CONSTRUCTED:
834 scic_sds_request_set_status(sci_req,
835 SCU_TASK_DONE_TASK_ABORT,
836 SCI_FAILURE_IO_TERMINATED);
838 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
840 case SCI_REQ_STARTED:
841 case SCI_REQ_TASK_WAIT_TC_COMP:
842 case SCI_REQ_SMP_WAIT_RESP:
843 case SCI_REQ_SMP_WAIT_TC_COMP:
844 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
845 case SCI_REQ_STP_UDMA_WAIT_D2H:
846 case SCI_REQ_STP_NON_DATA_WAIT_H2D:
847 case SCI_REQ_STP_NON_DATA_WAIT_D2H:
848 case SCI_REQ_STP_PIO_WAIT_H2D:
849 case SCI_REQ_STP_PIO_WAIT_FRAME:
850 case SCI_REQ_STP_PIO_DATA_IN:
851 case SCI_REQ_STP_PIO_DATA_OUT:
852 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
853 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
854 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
855 sci_change_state(&sci_req->sm, SCI_REQ_ABORTING);
857 case SCI_REQ_TASK_WAIT_TC_RESP:
858 sci_change_state(&sci_req->sm, SCI_REQ_ABORTING);
859 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
861 case SCI_REQ_ABORTING:
862 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
864 case SCI_REQ_COMPLETED:
866 dev_warn(scic_to_dev(sci_req->owning_controller),
867 "%s: SCIC IO Request requested to abort while in wrong "
870 sci_req->sm.current_state_id);
874 return SCI_FAILURE_INVALID_STATE;
877 enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req)
879 enum sci_base_request_states state;
880 struct scic_sds_controller *scic = sci_req->owning_controller;
882 state = sci_req->sm.current_state_id;
883 if (WARN_ONCE(state != SCI_REQ_COMPLETED,
884 "isci: request completion from wrong state (%d)\n", state))
885 return SCI_FAILURE_INVALID_STATE;
887 if (!sci_req->was_tag_assigned_by_user)
888 scic_controller_free_io_tag(scic, sci_req->io_tag);
890 if (sci_req->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
891 scic_sds_controller_release_frame(scic,
892 sci_req->saved_rx_frame_index);
894 /* XXX can we just stop the machine and remove the 'final' state? */
895 sci_change_state(&sci_req->sm, SCI_REQ_FINAL);
899 enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
902 enum sci_base_request_states state;
903 struct scic_sds_controller *scic = sci_req->owning_controller;
905 state = sci_req->sm.current_state_id;
907 if (state != SCI_REQ_STP_PIO_DATA_IN) {
908 dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n",
909 __func__, event_code, state);
911 return SCI_FAILURE_INVALID_STATE;
914 switch (scu_get_event_specifier(event_code)) {
915 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
916 /* We are waiting for data and the SCU has R_ERR the data frame.
917 * Go back to waiting for the D2H Register FIS
919 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
922 dev_err(scic_to_dev(scic),
923 "%s: pio request unexpected event %#x\n",
924 __func__, event_code);
926 /* TODO Should we fail the PIO request when we get an
934 * This function copies response data for requests returning response data
935 * instead of sense data.
936 * @sci_req: This parameter specifies the request object for which to copy
939 static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req)
943 struct ssp_response_iu *ssp_response;
944 struct isci_request *ireq = sci_req_to_ireq(sci_req);
945 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
947 ssp_response = &sci_req->ssp.rsp;
949 resp_buf = &isci_tmf->resp.resp_iu;
952 SSP_RESP_IU_MAX_SIZE,
953 be32_to_cpu(ssp_response->response_data_len));
955 memcpy(resp_buf, ssp_response->resp_data, len);
958 static enum sci_status
959 request_started_state_tc_event(struct scic_sds_request *sci_req,
962 struct ssp_response_iu *resp_iu;
965 /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
966 * to determine SDMA status
968 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
969 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
970 scic_sds_request_set_status(sci_req,
974 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
975 /* There are times when the SCU hardware will return an early
976 * response because the io request specified more data than is
977 * returned by the target device (mode pages, inquiry data,
978 * etc.). We must check the response stats to see if this is
979 * truly a failed request or a good request that just got
982 struct ssp_response_iu *resp = &sci_req->ssp.rsp;
983 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
985 sci_swab32_cpy(&sci_req->ssp.rsp,
989 if (resp->status == 0) {
990 scic_sds_request_set_status(sci_req,
992 SCI_SUCCESS_IO_DONE_EARLY);
994 scic_sds_request_set_status(sci_req,
995 SCU_TASK_DONE_CHECK_RESPONSE,
996 SCI_FAILURE_IO_RESPONSE_VALID);
1000 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
1001 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1003 sci_swab32_cpy(&sci_req->ssp.rsp,
1007 scic_sds_request_set_status(sci_req,
1008 SCU_TASK_DONE_CHECK_RESPONSE,
1009 SCI_FAILURE_IO_RESPONSE_VALID);
1013 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
1014 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
1015 * guaranteed to be received before this completion status is
1018 resp_iu = &sci_req->ssp.rsp;
1019 datapres = resp_iu->datapres;
1021 if (datapres == 1 || datapres == 2) {
1022 scic_sds_request_set_status(sci_req,
1023 SCU_TASK_DONE_CHECK_RESPONSE,
1024 SCI_FAILURE_IO_RESPONSE_VALID);
1026 scic_sds_request_set_status(sci_req,
1030 /* only stp device gets suspended. */
1031 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1032 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
1033 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
1034 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
1035 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
1036 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
1037 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1038 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
1039 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
1040 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1041 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
1042 if (sci_req->protocol == SCIC_STP_PROTOCOL) {
1043 scic_sds_request_set_status(sci_req,
1044 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1045 SCU_COMPLETION_TL_STATUS_SHIFT,
1046 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
1048 scic_sds_request_set_status(sci_req,
1049 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1050 SCU_COMPLETION_TL_STATUS_SHIFT,
1051 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1055 /* both stp/ssp device gets suspended */
1056 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
1057 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
1058 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
1059 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
1060 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
1061 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
1062 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
1063 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
1064 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
1065 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
1066 scic_sds_request_set_status(sci_req,
1067 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1068 SCU_COMPLETION_TL_STATUS_SHIFT,
1069 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
1072 /* neither ssp nor stp gets suspended. */
1073 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
1074 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
1075 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
1076 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
1077 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
1078 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
1079 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1080 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1081 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1082 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1083 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
1084 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
1085 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
1086 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
1087 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
1089 scic_sds_request_set_status(
1091 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1092 SCU_COMPLETION_TL_STATUS_SHIFT,
1093 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1098 * TODO: This is probably wrong for ACK/NAK timeout conditions
1101 /* In all cases we will treat this as the completion of the IO req. */
1102 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1106 static enum sci_status
1107 request_aborting_state_tc_event(struct scic_sds_request *sci_req,
1108 u32 completion_code)
1110 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1111 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1112 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1113 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_TASK_ABORT,
1114 SCI_FAILURE_IO_TERMINATED);
1116 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1120 /* Unless we get some strange error wait for the task abort to complete
1121 * TODO: Should there be a state change for this completion?
1129 static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request *sci_req,
1130 u32 completion_code)
1132 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1133 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1134 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1137 sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1139 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1140 /* Currently, the decision is to simply allow the task request
1141 * to timeout if the task IU wasn't received successfully.
1142 * There is a potential for receiving multiple task responses if
1143 * we decide to send the task IU again.
1145 dev_warn(scic_to_dev(sci_req->owning_controller),
1146 "%s: TaskRequest:0x%p CompletionCode:%x - "
1147 "ACK/NAK timeout\n", __func__, sci_req,
1150 sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1154 * All other completion status cause the IO to be complete.
1155 * If a NAK was received, then it is up to the user to retry
1158 scic_sds_request_set_status(sci_req,
1159 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1160 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1162 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1169 static enum sci_status
1170 smp_request_await_response_tc_event(struct scic_sds_request *sci_req,
1171 u32 completion_code)
1173 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1174 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1175 /* In the AWAIT RESPONSE state, any TC completion is
1176 * unexpected. but if the TC has success status, we
1177 * complete the IO anyway.
1179 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1182 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1185 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1186 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1187 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1188 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1189 /* These status has been seen in a specific LSI
1190 * expander, which sometimes is not able to send smp
1191 * response within 2 ms. This causes our hardware break
1192 * the connection and set TC completion with one of
1193 * these SMP_XXX_XX_ERR status. For these type of error,
1194 * we ask scic user to retry the request.
1196 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR,
1197 SCI_FAILURE_RETRY_REQUIRED);
1199 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1203 /* All other completion status cause the IO to be complete. If a NAK
1204 * was received, then it is up to the user to retry the request
1206 scic_sds_request_set_status(sci_req,
1207 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1208 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1210 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1217 static enum sci_status
1218 smp_request_await_tc_event(struct scic_sds_request *sci_req,
1219 u32 completion_code)
1221 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1222 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1223 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1226 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1229 /* All other completion status cause the IO to be
1230 * complete. If a NAK was received, then it is up to
1231 * the user to retry the request.
1233 scic_sds_request_set_status(sci_req,
1234 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1235 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1237 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1244 void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req,
1248 * @note This could be made to return an error to the user if the user
1249 * attempts to set the NCQ tag in the wrong state.
1251 req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
1258 * Get the next SGL element from the request. - Check on which SGL element pair
1259 * we are working - if working on SLG pair element A - advance to element B -
1260 * else - check to see if there are more SGL element pairs for this IO request
1261 * - if there are more SGL element pairs - advance to the next pair and return
1262 * element A struct scu_sgl_element*
1264 static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
1266 struct scu_sgl_element *current_sgl;
1267 struct scic_sds_request *sci_req = to_sci_req(stp_req);
1268 struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
1270 if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1271 if (pio_sgl->sgl_pair->B.address_lower == 0 &&
1272 pio_sgl->sgl_pair->B.address_upper == 0) {
1275 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
1276 current_sgl = &pio_sgl->sgl_pair->B;
1279 if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
1280 pio_sgl->sgl_pair->next_pair_upper == 0) {
1285 phys_addr = pio_sgl->sgl_pair->next_pair_upper;
1287 phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
1289 pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
1290 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
1291 current_sgl = &pio_sgl->sgl_pair->A;
1298 static enum sci_status
1299 stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req,
1300 u32 completion_code)
1302 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1303 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1304 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1307 sci_change_state(&sci_req->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1311 /* All other completion status cause the IO to be
1312 * complete. If a NAK was received, then it is up to
1313 * the user to retry the request.
1315 scic_sds_request_set_status(sci_req,
1316 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1317 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1319 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1326 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
1328 /* transmit DATA_FIS from (current sgl + offset) for input
1329 * parameter length. current sgl and offset is alreay stored in the IO request
1331 static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
1332 struct scic_sds_request *sci_req,
1335 struct scic_sds_controller *scic = sci_req->owning_controller;
1336 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1337 struct scu_task_context *task_context;
1338 struct scu_sgl_element *current_sgl;
1340 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1341 * for the data from current_sgl+offset for the input length
1343 task_context = scic_sds_controller_get_task_context_buffer(scic,
1346 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
1347 current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
1349 current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
1352 task_context->command_iu_upper = current_sgl->address_upper;
1353 task_context->command_iu_lower = current_sgl->address_lower;
1354 task_context->transfer_length_bytes = length;
1355 task_context->type.stp.fis_type = FIS_DATA;
1357 /* send the new TC out. */
1358 return scic_controller_continue_io(sci_req);
1361 static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
1364 struct scu_sgl_element *current_sgl;
1366 u32 remaining_bytes_in_current_sgl = 0;
1367 enum sci_status status = SCI_SUCCESS;
1368 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1370 sgl_offset = stp_req->type.pio.request_current.sgl_offset;
1372 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1373 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
1374 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
1376 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
1377 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
1381 if (stp_req->type.pio.pio_transfer_bytes > 0) {
1382 if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
1383 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
1384 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
1385 if (status == SCI_SUCCESS) {
1386 stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
1388 /* update the current sgl, sgl_offset and save for future */
1389 current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
1392 } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
1393 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
1394 scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
1396 if (status == SCI_SUCCESS) {
1397 /* Sgl offset will be adjusted and saved for future */
1398 sgl_offset += stp_req->type.pio.pio_transfer_bytes;
1399 current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
1400 stp_req->type.pio.pio_transfer_bytes = 0;
1405 if (status == SCI_SUCCESS) {
1406 stp_req->type.pio.request_current.sgl_offset = sgl_offset;
1414 * @stp_request: The request that is used for the SGL processing.
1415 * @data_buffer: The buffer of data to be copied.
1416 * @length: The length of the data transfer.
1418 * Copy the data from the buffer for the length specified to the IO reqeust SGL
1419 * specified data region. enum sci_status
1421 static enum sci_status
1422 scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
1423 u8 *data_buf, u32 len)
1425 struct scic_sds_request *sci_req;
1426 struct isci_request *ireq;
1429 struct sas_task *task;
1430 struct scatterlist *sg;
1432 int total_len = len;
1434 sci_req = to_sci_req(stp_req);
1435 ireq = sci_req_to_ireq(sci_req);
1436 task = isci_request_access_task(ireq);
1437 src_addr = data_buf;
1439 if (task->num_scatter > 0) {
1442 while (total_len > 0) {
1443 struct page *page = sg_page(sg);
1445 copy_len = min_t(int, total_len, sg_dma_len(sg));
1446 kaddr = kmap_atomic(page, KM_IRQ0);
1447 memcpy(kaddr + sg->offset, src_addr, copy_len);
1448 kunmap_atomic(kaddr, KM_IRQ0);
1449 total_len -= copy_len;
1450 src_addr += copy_len;
1454 BUG_ON(task->total_xfer_len < total_len);
1455 memcpy(task->scatter, src_addr, total_len);
1463 * @sci_req: The PIO DATA IN request that is to receive the data.
1464 * @data_buffer: The buffer to copy from.
1466 * Copy the data buffer to the io request data region. enum sci_status
1468 static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
1469 struct scic_sds_stp_request *sci_req,
1472 enum sci_status status;
1475 * If there is less than 1K remaining in the transfer request
1476 * copy just the data for the transfer */
1477 if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
1478 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1479 sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
1481 if (status == SCI_SUCCESS)
1482 sci_req->type.pio.pio_transfer_bytes = 0;
1484 /* We are transfering the whole frame so copy */
1485 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1486 sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1488 if (status == SCI_SUCCESS)
1489 sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
1495 static enum sci_status
1496 stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req,
1497 u32 completion_code)
1499 enum sci_status status = SCI_SUCCESS;
1501 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1502 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1503 scic_sds_request_set_status(sci_req,
1507 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1511 /* All other completion status cause the IO to be
1512 * complete. If a NAK was received, then it is up to
1513 * the user to retry the request.
1515 scic_sds_request_set_status(sci_req,
1516 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1517 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1519 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1526 static enum sci_status
1527 pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req,
1528 u32 completion_code)
1530 enum sci_status status = SCI_SUCCESS;
1531 bool all_frames_transferred = false;
1532 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1534 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1535 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1537 if (stp_req->type.pio.pio_transfer_bytes != 0) {
1538 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
1539 if (status == SCI_SUCCESS) {
1540 if (stp_req->type.pio.pio_transfer_bytes == 0)
1541 all_frames_transferred = true;
1543 } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
1545 * this will happen if the all data is written at the
1546 * first time after the pio setup fis is received
1548 all_frames_transferred = true;
1551 /* all data transferred. */
1552 if (all_frames_transferred) {
1554 * Change the state to SCI_REQ_STP_PIO_DATA_IN
1555 * and wait for PIO_SETUP fis / or D2H REg fis. */
1556 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1562 * All other completion status cause the IO to be complete.
1563 * If a NAK was received, then it is up to the user to retry
1566 scic_sds_request_set_status(
1568 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1569 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1571 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1578 static void scic_sds_stp_request_udma_complete_request(
1579 struct scic_sds_request *request,
1581 enum sci_status sci_status)
1583 scic_sds_request_set_status(request, scu_status, sci_status);
1584 sci_change_state(&request->sm, SCI_REQ_COMPLETED);
1587 static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
1590 struct scic_sds_controller *scic = sci_req->owning_controller;
1591 struct dev_to_host_fis *frame_header;
1592 enum sci_status status;
1595 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1597 (void **)&frame_header);
1599 if ((status == SCI_SUCCESS) &&
1600 (frame_header->fis_type == FIS_REGD2H)) {
1601 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1603 (void **)&frame_buffer);
1605 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1610 scic_sds_controller_release_frame(scic, frame_index);
1616 scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1619 struct scic_sds_controller *scic = sci_req->owning_controller;
1620 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1621 enum sci_base_request_states state;
1622 enum sci_status status;
1625 state = sci_req->sm.current_state_id;
1627 case SCI_REQ_STARTED: {
1628 struct ssp_frame_hdr ssp_hdr;
1631 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1635 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1636 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1638 if (ssp_hdr.frame_type == SSP_RESPONSE) {
1639 struct ssp_response_iu *resp_iu;
1640 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1642 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1646 sci_swab32_cpy(&sci_req->ssp.rsp, resp_iu, word_cnt);
1648 resp_iu = &sci_req->ssp.rsp;
1650 if (resp_iu->datapres == 0x01 ||
1651 resp_iu->datapres == 0x02) {
1652 scic_sds_request_set_status(sci_req,
1653 SCU_TASK_DONE_CHECK_RESPONSE,
1654 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1656 scic_sds_request_set_status(sci_req,
1660 /* not a response frame, why did it get forwarded? */
1661 dev_err(scic_to_dev(scic),
1662 "%s: SCIC IO Request 0x%p received unexpected "
1663 "frame %d type 0x%02x\n", __func__, sci_req,
1664 frame_index, ssp_hdr.frame_type);
1668 * In any case we are done with this frame buffer return it to
1671 scic_sds_controller_release_frame(scic, frame_index);
1676 case SCI_REQ_TASK_WAIT_TC_RESP:
1677 scic_sds_io_request_copy_response(sci_req);
1678 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1679 scic_sds_controller_release_frame(scic,frame_index);
1682 case SCI_REQ_SMP_WAIT_RESP: {
1683 struct smp_resp *rsp_hdr = &sci_req->smp.rsp;
1686 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1690 /* byte swap the header. */
1691 word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
1692 sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
1694 if (rsp_hdr->frame_type == SMP_RESPONSE) {
1697 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1701 word_cnt = (sizeof(struct smp_req) - SMP_RESP_HDR_SZ) /
1704 sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1705 smp_resp, word_cnt);
1707 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1710 sci_change_state(&sci_req->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1713 * This was not a response frame why did it get
1716 dev_err(scic_to_dev(scic),
1717 "%s: SCIC SMP Request 0x%p received unexpected "
1718 "frame %d type 0x%02x\n",
1722 rsp_hdr->frame_type);
1724 scic_sds_request_set_status(sci_req,
1725 SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
1726 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1728 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1731 scic_sds_controller_release_frame(scic, frame_index);
1736 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1737 return scic_sds_stp_request_udma_general_frame_handler(sci_req,
1740 case SCI_REQ_STP_UDMA_WAIT_D2H:
1741 /* Use the general frame handler to copy the resposne data */
1742 status = scic_sds_stp_request_udma_general_frame_handler(sci_req,
1745 if (status != SCI_SUCCESS)
1748 scic_sds_stp_request_udma_complete_request(sci_req,
1749 SCU_TASK_DONE_CHECK_RESPONSE,
1750 SCI_FAILURE_IO_RESPONSE_VALID);
1754 case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
1755 struct dev_to_host_fis *frame_header;
1758 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1760 (void **)&frame_header);
1762 if (status != SCI_SUCCESS) {
1763 dev_err(scic_to_dev(scic),
1764 "%s: SCIC IO Request 0x%p could not get frame "
1765 "header for frame index %d, status %x\n",
1774 switch (frame_header->fis_type) {
1776 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1778 (void **)&frame_buffer);
1780 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1784 /* The command has completed with error */
1785 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
1786 SCI_FAILURE_IO_RESPONSE_VALID);
1790 dev_warn(scic_to_dev(scic),
1791 "%s: IO Request:0x%p Frame Id:%d protocol "
1792 "violation occurred\n", __func__, stp_req,
1795 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
1796 SCI_FAILURE_PROTOCOL_VIOLATION);
1800 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1802 /* Frame has been decoded return it to the controller */
1803 scic_sds_controller_release_frame(scic, frame_index);
1808 case SCI_REQ_STP_PIO_WAIT_FRAME: {
1809 struct isci_request *ireq = sci_req_to_ireq(sci_req);
1810 struct sas_task *task = isci_request_access_task(ireq);
1811 struct dev_to_host_fis *frame_header;
1814 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1816 (void **)&frame_header);
1818 if (status != SCI_SUCCESS) {
1819 dev_err(scic_to_dev(scic),
1820 "%s: SCIC IO Request 0x%p could not get frame "
1821 "header for frame index %d, status %x\n",
1822 __func__, stp_req, frame_index, status);
1826 switch (frame_header->fis_type) {
1828 /* Get from the frame buffer the PIO Setup Data */
1829 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1831 (void **)&frame_buffer);
1833 /* Get the data from the PIO Setup The SCU Hardware
1834 * returns first word in the frame_header and the rest
1835 * of the data is in the frame buffer so we need to
1839 /* transfer_count: first 16bits in the 4th dword */
1840 stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
1842 /* ending_status: 4th byte in the 3rd dword */
1843 stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
1845 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1849 sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
1851 /* The next state is dependent on whether the
1852 * request was PIO Data-in or Data out
1854 if (task->data_dir == DMA_FROM_DEVICE) {
1855 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_IN);
1856 } else if (task->data_dir == DMA_TO_DEVICE) {
1858 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
1859 if (status != SCI_SUCCESS)
1861 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_OUT);
1865 case FIS_SETDEVBITS:
1866 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1870 if (frame_header->status & ATA_BUSY) {
1872 * Now why is the drive sending a D2H Register
1873 * FIS when it is still busy? Do nothing since
1874 * we are still in the right state.
1876 dev_dbg(scic_to_dev(scic),
1877 "%s: SCIC PIO Request 0x%p received "
1878 "D2H Register FIS with BSY status "
1882 frame_header->status);
1886 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1888 (void **)&frame_buffer);
1890 scic_sds_controller_copy_sata_response(&sci_req->stp.req,
1894 scic_sds_request_set_status(sci_req,
1895 SCU_TASK_DONE_CHECK_RESPONSE,
1896 SCI_FAILURE_IO_RESPONSE_VALID);
1898 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1902 /* FIXME: what do we do here? */
1906 /* Frame is decoded return it to the controller */
1907 scic_sds_controller_release_frame(scic, frame_index);
1912 case SCI_REQ_STP_PIO_DATA_IN: {
1913 struct dev_to_host_fis *frame_header;
1914 struct sata_fis_data *frame_buffer;
1916 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1918 (void **)&frame_header);
1920 if (status != SCI_SUCCESS) {
1921 dev_err(scic_to_dev(scic),
1922 "%s: SCIC IO Request 0x%p could not get frame "
1923 "header for frame index %d, status %x\n",
1931 if (frame_header->fis_type != FIS_DATA) {
1932 dev_err(scic_to_dev(scic),
1933 "%s: SCIC PIO Request 0x%p received frame %d "
1934 "with fis type 0x%02x when expecting a data "
1939 frame_header->fis_type);
1941 scic_sds_request_set_status(sci_req,
1943 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
1945 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1947 /* Frame is decoded return it to the controller */
1948 scic_sds_controller_release_frame(scic, frame_index);
1952 if (stp_req->type.pio.request_current.sgl_pair == NULL) {
1953 sci_req->saved_rx_frame_index = frame_index;
1954 stp_req->type.pio.pio_transfer_bytes = 0;
1956 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1958 (void **)&frame_buffer);
1960 status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
1961 (u8 *)frame_buffer);
1963 /* Frame is decoded return it to the controller */
1964 scic_sds_controller_release_frame(scic, frame_index);
1967 /* Check for the end of the transfer, are there more
1968 * bytes remaining for this data transfer
1970 if (status != SCI_SUCCESS ||
1971 stp_req->type.pio.pio_transfer_bytes != 0)
1974 if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
1975 scic_sds_request_set_status(sci_req,
1976 SCU_TASK_DONE_CHECK_RESPONSE,
1977 SCI_FAILURE_IO_RESPONSE_VALID);
1979 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1981 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1986 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
1987 struct dev_to_host_fis *frame_header;
1990 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1992 (void **)&frame_header);
1993 if (status != SCI_SUCCESS) {
1994 dev_err(scic_to_dev(scic),
1995 "%s: SCIC IO Request 0x%p could not get frame "
1996 "header for frame index %d, status %x\n",
2004 switch (frame_header->fis_type) {
2006 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2008 (void **)&frame_buffer);
2010 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2014 /* The command has completed with error */
2015 scic_sds_request_set_status(sci_req,
2016 SCU_TASK_DONE_CHECK_RESPONSE,
2017 SCI_FAILURE_IO_RESPONSE_VALID);
2021 dev_warn(scic_to_dev(scic),
2022 "%s: IO Request:0x%p Frame Id:%d protocol "
2023 "violation occurred\n",
2028 scic_sds_request_set_status(sci_req,
2029 SCU_TASK_DONE_UNEXP_FIS,
2030 SCI_FAILURE_PROTOCOL_VIOLATION);
2034 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
2036 /* Frame has been decoded return it to the controller */
2037 scic_sds_controller_release_frame(scic, frame_index);
2041 case SCI_REQ_ABORTING:
2043 * TODO: Is it even possible to get an unsolicited frame in the
2046 scic_sds_controller_release_frame(scic, frame_index);
2050 dev_warn(scic_to_dev(scic),
2051 "%s: SCIC IO Request given unexpected frame %x while "
2057 scic_sds_controller_release_frame(scic, frame_index);
2058 return SCI_FAILURE_INVALID_STATE;
2062 static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request *sci_req,
2063 u32 completion_code)
2065 enum sci_status status = SCI_SUCCESS;
2067 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2068 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2069 scic_sds_stp_request_udma_complete_request(sci_req,
2073 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
2074 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
2075 /* We must check ther response buffer to see if the D2H
2076 * Register FIS was received before we got the TC
2079 if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
2080 scic_sds_remote_device_suspend(sci_req->target_device,
2081 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2083 scic_sds_stp_request_udma_complete_request(sci_req,
2084 SCU_TASK_DONE_CHECK_RESPONSE,
2085 SCI_FAILURE_IO_RESPONSE_VALID);
2087 /* If we have an error completion status for the
2088 * TC then we can expect a D2H register FIS from
2089 * the device so we must change state to wait
2092 sci_change_state(&sci_req->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
2096 /* TODO Check to see if any of these completion status need to
2097 * wait for the device to host register fis.
2099 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
2100 * - this comes only for B0
2102 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
2103 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
2104 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
2105 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
2106 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
2107 scic_sds_remote_device_suspend(sci_req->target_device,
2108 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2109 /* Fall through to the default case */
2111 /* All other completion status cause the IO to be complete. */
2112 scic_sds_stp_request_udma_complete_request(sci_req,
2113 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2114 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2121 static enum sci_status
2122 stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_req,
2123 u32 completion_code)
2125 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2126 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2127 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
2130 sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
2135 * All other completion status cause the IO to be complete.
2136 * If a NAK was received, then it is up to the user to retry
2139 scic_sds_request_set_status(sci_req,
2140 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2141 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2143 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
2150 static enum sci_status
2151 stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct scic_sds_request *sci_req,
2152 u32 completion_code)
2154 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2155 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2156 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
2159 sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
2163 /* All other completion status cause the IO to be complete. If
2164 * a NAK was received, then it is up to the user to retry the
2167 scic_sds_request_set_status(sci_req,
2168 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2169 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2171 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
2179 scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req,
2180 u32 completion_code)
2182 enum sci_base_request_states state;
2183 struct scic_sds_controller *scic = sci_req->owning_controller;
2185 state = sci_req->sm.current_state_id;
2188 case SCI_REQ_STARTED:
2189 return request_started_state_tc_event(sci_req, completion_code);
2191 case SCI_REQ_TASK_WAIT_TC_COMP:
2192 return ssp_task_request_await_tc_event(sci_req,
2195 case SCI_REQ_SMP_WAIT_RESP:
2196 return smp_request_await_response_tc_event(sci_req,
2199 case SCI_REQ_SMP_WAIT_TC_COMP:
2200 return smp_request_await_tc_event(sci_req, completion_code);
2202 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
2203 return stp_request_udma_await_tc_event(sci_req,
2206 case SCI_REQ_STP_NON_DATA_WAIT_H2D:
2207 return stp_request_non_data_await_h2d_tc_event(sci_req,
2210 case SCI_REQ_STP_PIO_WAIT_H2D:
2211 return stp_request_pio_await_h2d_completion_tc_event(sci_req,
2214 case SCI_REQ_STP_PIO_DATA_OUT:
2215 return pio_data_out_tx_done_tc_event(sci_req, completion_code);
2217 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
2218 return stp_request_soft_reset_await_h2d_asserted_tc_event(sci_req,
2221 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
2222 return stp_request_soft_reset_await_h2d_diagnostic_tc_event(sci_req,
2225 case SCI_REQ_ABORTING:
2226 return request_aborting_state_tc_event(sci_req,
2230 dev_warn(scic_to_dev(scic),
2231 "%s: SCIC IO Request given task completion "
2232 "notification %x while in wrong state %d\n",
2236 return SCI_FAILURE_INVALID_STATE;
2241 * isci_request_process_response_iu() - This function sets the status and
2242 * response iu, in the task struct, from the request object for the upper
2244 * @sas_task: This parameter is the task struct from the upper layer driver.
2245 * @resp_iu: This parameter points to the response iu of the completed request.
2246 * @dev: This parameter specifies the linux device struct.
2250 static void isci_request_process_response_iu(
2251 struct sas_task *task,
2252 struct ssp_response_iu *resp_iu,
2257 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2258 "resp_iu->response_data_len = %x, "
2259 "resp_iu->sense_data_len = %x\nrepsonse data: ",
2264 resp_iu->response_data_len,
2265 resp_iu->sense_data_len);
2267 task->task_status.stat = resp_iu->status;
2269 /* libsas updates the task status fields based on the response iu. */
2270 sas_ssp_task_response(dev, task, resp_iu);
2274 * isci_request_set_open_reject_status() - This function prepares the I/O
2275 * completion for OPEN_REJECT conditions.
2276 * @request: This parameter is the completed isci_request object.
2277 * @response_ptr: This parameter specifies the service response for the I/O.
2278 * @status_ptr: This parameter specifies the exec status for the I/O.
2279 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2280 * the LLDD with respect to completing this request or forcing an abort
2281 * condition on the I/O.
2282 * @open_rej_reason: This parameter specifies the encoded reason for the
2283 * abandon-class reject.
2287 static void isci_request_set_open_reject_status(
2288 struct isci_request *request,
2289 struct sas_task *task,
2290 enum service_response *response_ptr,
2291 enum exec_status *status_ptr,
2292 enum isci_completion_selection *complete_to_host_ptr,
2293 enum sas_open_rej_reason open_rej_reason)
2295 /* Task in the target is done. */
2296 request->complete_in_target = true;
2297 *response_ptr = SAS_TASK_UNDELIVERED;
2298 *status_ptr = SAS_OPEN_REJECT;
2299 *complete_to_host_ptr = isci_perform_normal_io_completion;
2300 task->task_status.open_rej_reason = open_rej_reason;
2304 * isci_request_handle_controller_specific_errors() - This function decodes
2305 * controller-specific I/O completion error conditions.
2306 * @request: This parameter is the completed isci_request object.
2307 * @response_ptr: This parameter specifies the service response for the I/O.
2308 * @status_ptr: This parameter specifies the exec status for the I/O.
2309 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2310 * the LLDD with respect to completing this request or forcing an abort
2311 * condition on the I/O.
2315 static void isci_request_handle_controller_specific_errors(
2316 struct isci_remote_device *isci_device,
2317 struct isci_request *request,
2318 struct sas_task *task,
2319 enum service_response *response_ptr,
2320 enum exec_status *status_ptr,
2321 enum isci_completion_selection *complete_to_host_ptr)
2323 unsigned int cstatus;
2325 cstatus = request->sci.scu_status;
2327 dev_dbg(&request->isci_host->pdev->dev,
2328 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2329 "- controller status = 0x%x\n",
2330 __func__, request, cstatus);
2332 /* Decode the controller-specific errors; most
2333 * important is to recognize those conditions in which
2334 * the target may still have a task outstanding that
2337 * Note that there are SCU completion codes being
2338 * named in the decode below for which SCIC has already
2339 * done work to handle them in a way other than as
2340 * a controller-specific completion code; these are left
2341 * in the decode below for completeness sake.
2344 case SCU_TASK_DONE_DMASETUP_DIRERR:
2345 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2346 case SCU_TASK_DONE_XFERCNT_ERR:
2347 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2348 if (task->task_proto == SAS_PROTOCOL_SMP) {
2349 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2350 *response_ptr = SAS_TASK_COMPLETE;
2352 /* See if the device has been/is being stopped. Note
2353 * that we ignore the quiesce state, since we are
2354 * concerned about the actual device state.
2356 if ((isci_device->status == isci_stopping) ||
2357 (isci_device->status == isci_stopped))
2358 *status_ptr = SAS_DEVICE_UNKNOWN;
2360 *status_ptr = SAS_ABORTED_TASK;
2362 request->complete_in_target = true;
2364 *complete_to_host_ptr =
2365 isci_perform_normal_io_completion;
2367 /* Task in the target is not done. */
2368 *response_ptr = SAS_TASK_UNDELIVERED;
2370 if ((isci_device->status == isci_stopping) ||
2371 (isci_device->status == isci_stopped))
2372 *status_ptr = SAS_DEVICE_UNKNOWN;
2374 *status_ptr = SAM_STAT_TASK_ABORTED;
2376 request->complete_in_target = false;
2378 *complete_to_host_ptr =
2379 isci_perform_error_io_completion;
2384 case SCU_TASK_DONE_CRC_ERR:
2385 case SCU_TASK_DONE_NAK_CMD_ERR:
2386 case SCU_TASK_DONE_EXCESS_DATA:
2387 case SCU_TASK_DONE_UNEXP_FIS:
2388 /* Also SCU_TASK_DONE_UNEXP_RESP: */
2389 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
2390 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
2391 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
2392 /* These are conditions in which the target
2393 * has completed the task, so that no cleanup
2396 *response_ptr = SAS_TASK_COMPLETE;
2398 /* See if the device has been/is being stopped. Note
2399 * that we ignore the quiesce state, since we are
2400 * concerned about the actual device state.
2402 if ((isci_device->status == isci_stopping) ||
2403 (isci_device->status == isci_stopped))
2404 *status_ptr = SAS_DEVICE_UNKNOWN;
2406 *status_ptr = SAS_ABORTED_TASK;
2408 request->complete_in_target = true;
2410 *complete_to_host_ptr = isci_perform_normal_io_completion;
2414 /* Note that the only open reject completion codes seen here will be
2415 * abandon-class codes; all others are automatically retried in the SCU.
2417 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2419 isci_request_set_open_reject_status(
2420 request, task, response_ptr, status_ptr,
2421 complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2424 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2426 /* Note - the return of AB0 will change when
2427 * libsas implements detection of zone violations.
2429 isci_request_set_open_reject_status(
2430 request, task, response_ptr, status_ptr,
2431 complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2434 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2436 isci_request_set_open_reject_status(
2437 request, task, response_ptr, status_ptr,
2438 complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2441 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2443 isci_request_set_open_reject_status(
2444 request, task, response_ptr, status_ptr,
2445 complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2448 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2450 isci_request_set_open_reject_status(
2451 request, task, response_ptr, status_ptr,
2452 complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2455 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2457 isci_request_set_open_reject_status(
2458 request, task, response_ptr, status_ptr,
2459 complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2462 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2464 isci_request_set_open_reject_status(
2465 request, task, response_ptr, status_ptr,
2466 complete_to_host_ptr, SAS_OREJ_STP_NORES);
2469 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2471 isci_request_set_open_reject_status(
2472 request, task, response_ptr, status_ptr,
2473 complete_to_host_ptr, SAS_OREJ_EPROTO);
2476 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2478 isci_request_set_open_reject_status(
2479 request, task, response_ptr, status_ptr,
2480 complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2483 case SCU_TASK_DONE_LL_R_ERR:
2484 /* Also SCU_TASK_DONE_ACK_NAK_TO: */
2485 case SCU_TASK_DONE_LL_PERR:
2486 case SCU_TASK_DONE_LL_SY_TERM:
2487 /* Also SCU_TASK_DONE_NAK_ERR:*/
2488 case SCU_TASK_DONE_LL_LF_TERM:
2489 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2490 case SCU_TASK_DONE_LL_ABORT_ERR:
2491 case SCU_TASK_DONE_SEQ_INV_TYPE:
2492 /* Also SCU_TASK_DONE_UNEXP_XR: */
2493 case SCU_TASK_DONE_XR_IU_LEN_ERR:
2494 case SCU_TASK_DONE_INV_FIS_LEN:
2495 /* Also SCU_TASK_DONE_XR_WD_LEN: */
2496 case SCU_TASK_DONE_SDMA_ERR:
2497 case SCU_TASK_DONE_OFFSET_ERR:
2498 case SCU_TASK_DONE_MAX_PLD_ERR:
2499 case SCU_TASK_DONE_LF_ERR:
2500 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
2501 case SCU_TASK_DONE_SMP_LL_RX_ERR:
2502 case SCU_TASK_DONE_UNEXP_DATA:
2503 case SCU_TASK_DONE_UNEXP_SDBFIS:
2504 case SCU_TASK_DONE_REG_ERR:
2505 case SCU_TASK_DONE_SDB_ERR:
2506 case SCU_TASK_DONE_TASK_ABORT:
2508 /* Task in the target is not done. */
2509 *response_ptr = SAS_TASK_UNDELIVERED;
2510 *status_ptr = SAM_STAT_TASK_ABORTED;
2512 if (task->task_proto == SAS_PROTOCOL_SMP) {
2513 request->complete_in_target = true;
2515 *complete_to_host_ptr = isci_perform_normal_io_completion;
2517 request->complete_in_target = false;
2519 *complete_to_host_ptr = isci_perform_error_io_completion;
2526 * isci_task_save_for_upper_layer_completion() - This function saves the
2527 * request for later completion to the upper layer driver.
2528 * @host: This parameter is a pointer to the host on which the the request
2529 * should be queued (either as an error or success).
2530 * @request: This parameter is the completed request.
2531 * @response: This parameter is the response code for the completed task.
2532 * @status: This parameter is the status code for the completed task.
2536 static void isci_task_save_for_upper_layer_completion(
2537 struct isci_host *host,
2538 struct isci_request *request,
2539 enum service_response response,
2540 enum exec_status status,
2541 enum isci_completion_selection task_notification_selection)
2543 struct sas_task *task = isci_request_access_task(request);
2545 task_notification_selection
2546 = isci_task_set_completion_status(task, response, status,
2547 task_notification_selection);
2549 /* Tasks aborted specifically by a call to the lldd_abort_task
2550 * function should not be completed to the host in the regular path.
2552 switch (task_notification_selection) {
2554 case isci_perform_normal_io_completion:
2556 /* Normal notification (task_done) */
2557 dev_dbg(&host->pdev->dev,
2558 "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
2561 task->task_status.resp, response,
2562 task->task_status.stat, status);
2563 /* Add to the completed list. */
2564 list_add(&request->completed_node,
2565 &host->requests_to_complete);
2567 /* Take the request off the device's pending request list. */
2568 list_del_init(&request->dev_node);
2571 case isci_perform_aborted_io_completion:
2572 /* No notification to libsas because this request is
2573 * already in the abort path.
2575 dev_warn(&host->pdev->dev,
2576 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
2579 task->task_status.resp, response,
2580 task->task_status.stat, status);
2582 /* Wake up whatever process was waiting for this
2583 * request to complete.
2585 WARN_ON(request->io_request_completion == NULL);
2587 if (request->io_request_completion != NULL) {
2589 /* Signal whoever is waiting that this
2590 * request is complete.
2592 complete(request->io_request_completion);
2596 case isci_perform_error_io_completion:
2597 /* Use sas_task_abort */
2598 dev_warn(&host->pdev->dev,
2599 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
2602 task->task_status.resp, response,
2603 task->task_status.stat, status);
2604 /* Add to the aborted list. */
2605 list_add(&request->completed_node,
2606 &host->requests_to_errorback);
2610 dev_warn(&host->pdev->dev,
2611 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
2614 task->task_status.resp, response,
2615 task->task_status.stat, status);
2617 /* Add to the error to libsas list. */
2618 list_add(&request->completed_node,
2619 &host->requests_to_errorback);
2624 static void isci_request_io_request_complete(struct isci_host *isci_host,
2625 struct isci_request *request,
2626 enum sci_io_status completion_status)
2628 struct sas_task *task = isci_request_access_task(request);
2629 struct ssp_response_iu *resp_iu;
2631 unsigned long task_flags;
2632 struct isci_remote_device *isci_device = request->isci_device;
2633 enum service_response response = SAS_TASK_UNDELIVERED;
2634 enum exec_status status = SAS_ABORTED_TASK;
2635 enum isci_request_status request_status;
2636 enum isci_completion_selection complete_to_host
2637 = isci_perform_normal_io_completion;
2639 dev_dbg(&isci_host->pdev->dev,
2640 "%s: request = %p, task = %p,\n"
2641 "task->data_dir = %d completion_status = 0x%x\n",
2648 spin_lock(&request->state_lock);
2649 request_status = isci_request_get_state(request);
2651 /* Decode the request status. Note that if the request has been
2652 * aborted by a task management function, we don't care
2653 * what the status is.
2655 switch (request_status) {
2658 /* "aborted" indicates that the request was aborted by a task
2659 * management function, since once a task management request is
2660 * perfomed by the device, the request only completes because
2661 * of the subsequent driver terminate.
2663 * Aborted also means an external thread is explicitly managing
2664 * this request, so that we do not complete it up the stack.
2666 * The target is still there (since the TMF was successful).
2668 request->complete_in_target = true;
2669 response = SAS_TASK_COMPLETE;
2671 /* See if the device has been/is being stopped. Note
2672 * that we ignore the quiesce state, since we are
2673 * concerned about the actual device state.
2675 if ((isci_device->status == isci_stopping)
2676 || (isci_device->status == isci_stopped)
2678 status = SAS_DEVICE_UNKNOWN;
2680 status = SAS_ABORTED_TASK;
2682 complete_to_host = isci_perform_aborted_io_completion;
2683 /* This was an aborted request. */
2685 spin_unlock(&request->state_lock);
2689 /* aborting means that the task management function tried and
2690 * failed to abort the request. We need to note the request
2691 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
2694 * Aborting also means an external thread is explicitly managing
2695 * this request, so that we do not complete it up the stack.
2697 request->complete_in_target = true;
2698 response = SAS_TASK_UNDELIVERED;
2700 if ((isci_device->status == isci_stopping) ||
2701 (isci_device->status == isci_stopped))
2702 /* The device has been /is being stopped. Note that
2703 * we ignore the quiesce state, since we are
2704 * concerned about the actual device state.
2706 status = SAS_DEVICE_UNKNOWN;
2708 status = SAS_PHY_DOWN;
2710 complete_to_host = isci_perform_aborted_io_completion;
2712 /* This was an aborted request. */
2714 spin_unlock(&request->state_lock);
2719 /* This was an terminated request. This happens when
2720 * the I/O is being terminated because of an action on
2721 * the device (reset, tear down, etc.), and the I/O needs
2722 * to be completed up the stack.
2724 request->complete_in_target = true;
2725 response = SAS_TASK_UNDELIVERED;
2727 /* See if the device has been/is being stopped. Note
2728 * that we ignore the quiesce state, since we are
2729 * concerned about the actual device state.
2731 if ((isci_device->status == isci_stopping) ||
2732 (isci_device->status == isci_stopped))
2733 status = SAS_DEVICE_UNKNOWN;
2735 status = SAS_ABORTED_TASK;
2737 complete_to_host = isci_perform_aborted_io_completion;
2739 /* This was a terminated request. */
2741 spin_unlock(&request->state_lock);
2745 /* This was a terminated request that timed-out during the
2746 * termination process. There is no task to complete to
2749 complete_to_host = isci_perform_normal_io_completion;
2750 spin_unlock(&request->state_lock);
2755 /* The request is done from an SCU HW perspective. */
2756 request->status = completed;
2758 spin_unlock(&request->state_lock);
2760 /* This is an active request being completed from the core. */
2761 switch (completion_status) {
2763 case SCI_IO_FAILURE_RESPONSE_VALID:
2764 dev_dbg(&isci_host->pdev->dev,
2765 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2770 if (sas_protocol_ata(task->task_proto)) {
2771 resp_buf = &request->sci.stp.rsp;
2772 isci_request_process_stp_response(task,
2774 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2776 /* crack the iu response buffer. */
2777 resp_iu = &request->sci.ssp.rsp;
2778 isci_request_process_response_iu(task, resp_iu,
2779 &isci_host->pdev->dev);
2781 } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2783 dev_err(&isci_host->pdev->dev,
2784 "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2785 "SAS_PROTOCOL_SMP protocol\n",
2789 dev_err(&isci_host->pdev->dev,
2790 "%s: unknown protocol\n", __func__);
2792 /* use the task status set in the task struct by the
2793 * isci_request_process_response_iu call.
2795 request->complete_in_target = true;
2796 response = task->task_status.resp;
2797 status = task->task_status.stat;
2800 case SCI_IO_SUCCESS:
2801 case SCI_IO_SUCCESS_IO_DONE_EARLY:
2803 response = SAS_TASK_COMPLETE;
2804 status = SAM_STAT_GOOD;
2805 request->complete_in_target = true;
2807 if (task->task_proto == SAS_PROTOCOL_SMP) {
2808 void *rsp = &request->sci.smp.rsp;
2810 dev_dbg(&isci_host->pdev->dev,
2811 "%s: SMP protocol completion\n",
2814 sg_copy_from_buffer(
2815 &task->smp_task.smp_resp, 1,
2816 rsp, sizeof(struct smp_resp));
2817 } else if (completion_status
2818 == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2820 /* This was an SSP / STP / SATA transfer.
2821 * There is a possibility that less data than
2822 * the maximum was transferred.
2824 u32 transferred_length = sci_req_tx_bytes(&request->sci);
2826 task->task_status.residual
2827 = task->total_xfer_len - transferred_length;
2829 /* If there were residual bytes, call this an
2832 if (task->task_status.residual != 0)
2833 status = SAS_DATA_UNDERRUN;
2835 dev_dbg(&isci_host->pdev->dev,
2836 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2841 dev_dbg(&isci_host->pdev->dev,
2842 "%s: SCI_IO_SUCCESS\n",
2847 case SCI_IO_FAILURE_TERMINATED:
2848 dev_dbg(&isci_host->pdev->dev,
2849 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2854 /* The request was terminated explicitly. No handling
2855 * is needed in the SCSI error handler path.
2857 request->complete_in_target = true;
2858 response = SAS_TASK_UNDELIVERED;
2860 /* See if the device has been/is being stopped. Note
2861 * that we ignore the quiesce state, since we are
2862 * concerned about the actual device state.
2864 if ((isci_device->status == isci_stopping) ||
2865 (isci_device->status == isci_stopped))
2866 status = SAS_DEVICE_UNKNOWN;
2868 status = SAS_ABORTED_TASK;
2870 complete_to_host = isci_perform_normal_io_completion;
2873 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2875 isci_request_handle_controller_specific_errors(
2876 isci_device, request, task, &response, &status,
2881 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2882 /* This is a special case, in that the I/O completion
2883 * is telling us that the device needs a reset.
2884 * In order for the device reset condition to be
2885 * noticed, the I/O has to be handled in the error
2886 * handler. Set the reset flag and cause the
2887 * SCSI error thread to be scheduled.
2889 spin_lock_irqsave(&task->task_state_lock, task_flags);
2890 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2891 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2894 response = SAS_TASK_UNDELIVERED;
2895 status = SAM_STAT_TASK_ABORTED;
2897 complete_to_host = isci_perform_error_io_completion;
2898 request->complete_in_target = false;
2901 case SCI_FAILURE_RETRY_REQUIRED:
2903 /* Fail the I/O so it can be retried. */
2904 response = SAS_TASK_UNDELIVERED;
2905 if ((isci_device->status == isci_stopping) ||
2906 (isci_device->status == isci_stopped))
2907 status = SAS_DEVICE_UNKNOWN;
2909 status = SAS_ABORTED_TASK;
2911 complete_to_host = isci_perform_normal_io_completion;
2912 request->complete_in_target = true;
2917 /* Catch any otherwise unhandled error codes here. */
2918 dev_warn(&isci_host->pdev->dev,
2919 "%s: invalid completion code: 0x%x - "
2920 "isci_request = %p\n",
2921 __func__, completion_status, request);
2923 response = SAS_TASK_UNDELIVERED;
2925 /* See if the device has been/is being stopped. Note
2926 * that we ignore the quiesce state, since we are
2927 * concerned about the actual device state.
2929 if ((isci_device->status == isci_stopping) ||
2930 (isci_device->status == isci_stopped))
2931 status = SAS_DEVICE_UNKNOWN;
2933 status = SAS_ABORTED_TASK;
2935 if (SAS_PROTOCOL_SMP == task->task_proto) {
2936 request->complete_in_target = true;
2937 complete_to_host = isci_perform_normal_io_completion;
2939 request->complete_in_target = false;
2940 complete_to_host = isci_perform_error_io_completion;
2947 isci_request_unmap_sgl(request, isci_host->pdev);
2949 /* Put the completed request on the correct list */
2950 isci_task_save_for_upper_layer_completion(isci_host, request, response,
2951 status, complete_to_host
2954 /* complete the io request to the core. */
2955 scic_controller_complete_io(&isci_host->sci,
2958 /* set terminated handle so it cannot be completed or
2959 * terminated again, and to cause any calls into abort
2960 * task to recognize the already completed case.
2962 request->terminated = true;
2964 isci_host_can_dequeue(isci_host, 1);
2967 static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm)
2969 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
2970 struct isci_request *ireq = sci_req_to_ireq(sci_req);
2971 struct domain_device *dev = sci_dev_to_domain(sci_req->target_device);
2972 struct sas_task *task;
2974 /* XXX as hch said always creating an internal sas_task for tmf
2975 * requests would simplify the driver
2977 task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
2979 /* all unaccelerated request types (non ssp or ncq) handled with
2982 if (!task && dev->dev_type == SAS_END_DEV) {
2983 sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
2985 (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
2986 isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
2987 sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
2988 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
2989 sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
2990 } else if (task && sas_protocol_ata(task->task_proto) &&
2991 !task->ata_task.use_ncq) {
2994 if (task->data_dir == DMA_NONE)
2995 state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
2996 else if (task->ata_task.dma_xfer)
2997 state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
2999 state = SCI_REQ_STP_PIO_WAIT_H2D;
3001 sci_change_state(sm, state);
3005 static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm)
3007 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
3008 struct scic_sds_controller *scic = sci_req->owning_controller;
3009 struct isci_host *ihost = scic_to_ihost(scic);
3010 struct isci_request *ireq = sci_req_to_ireq(sci_req);
3012 /* Tell the SCI_USER that the IO request is complete */
3013 if (sci_req->is_task_management_request == false)
3014 isci_request_io_request_complete(ihost, ireq,
3015 sci_req->sci_status);
3017 isci_task_request_complete(ihost, ireq, sci_req->sci_status);
3020 static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm)
3022 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
3024 /* Setting the abort bit in the Task Context is required by the silicon. */
3025 sci_req->task_context_buffer->abort = 1;
3028 static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3030 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
3032 scic_sds_remote_device_set_working_request(sci_req->target_device,
3036 static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3038 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
3040 scic_sds_remote_device_set_working_request(sci_req->target_device,
3044 static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
3046 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
3048 scic_sds_remote_device_set_working_request(sci_req->target_device,
3052 static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
3054 struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
3055 struct scu_task_context *task_context;
3056 struct host_to_dev_fis *h2d_fis;
3057 enum sci_status status;
3059 /* Clear the SRST bit */
3060 h2d_fis = &sci_req->stp.cmd;
3061 h2d_fis->control = 0;
3063 /* Clear the TC control bit */
3064 task_context = scic_sds_controller_get_task_context_buffer(
3065 sci_req->owning_controller, sci_req->io_tag);
3066 task_context->control_frame = 0;
3068 status = scic_controller_continue_io(sci_req);
3069 WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
3072 static const struct sci_base_state scic_sds_request_state_table[] = {
3073 [SCI_REQ_INIT] = { },
3074 [SCI_REQ_CONSTRUCTED] = { },
3075 [SCI_REQ_STARTED] = {
3076 .enter_state = scic_sds_request_started_state_enter,
3078 [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
3079 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
3081 [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
3082 [SCI_REQ_STP_PIO_WAIT_H2D] = {
3083 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
3085 [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
3086 [SCI_REQ_STP_PIO_DATA_IN] = { },
3087 [SCI_REQ_STP_PIO_DATA_OUT] = { },
3088 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
3089 [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
3090 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
3091 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
3093 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
3094 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
3096 [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
3097 [SCI_REQ_TASK_WAIT_TC_COMP] = { },
3098 [SCI_REQ_TASK_WAIT_TC_RESP] = { },
3099 [SCI_REQ_SMP_WAIT_RESP] = { },
3100 [SCI_REQ_SMP_WAIT_TC_COMP] = { },
3101 [SCI_REQ_COMPLETED] = {
3102 .enter_state = scic_sds_request_completed_state_enter,
3104 [SCI_REQ_ABORTING] = {
3105 .enter_state = scic_sds_request_aborting_state_enter,
3107 [SCI_REQ_FINAL] = { },
3111 scic_sds_general_request_construct(struct scic_sds_controller *scic,
3112 struct scic_sds_remote_device *sci_dev,
3114 struct scic_sds_request *sci_req)
3116 sci_init_sm(&sci_req->sm, scic_sds_request_state_table, SCI_REQ_INIT);
3118 sci_req->io_tag = io_tag;
3119 sci_req->owning_controller = scic;
3120 sci_req->target_device = sci_dev;
3121 sci_req->protocol = SCIC_NO_PROTOCOL;
3122 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
3123 sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev);
3125 sci_req->sci_status = SCI_SUCCESS;
3126 sci_req->scu_status = 0;
3127 sci_req->post_context = 0xFFFFFFFF;
3129 sci_req->is_task_management_request = false;
3131 if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
3132 sci_req->was_tag_assigned_by_user = false;
3133 sci_req->task_context_buffer = &sci_req->tc;
3135 sci_req->was_tag_assigned_by_user = true;
3137 sci_req->task_context_buffer =
3138 scic_sds_controller_get_task_context_buffer(scic, io_tag);
3142 static enum sci_status
3143 scic_io_request_construct(struct scic_sds_controller *scic,
3144 struct scic_sds_remote_device *sci_dev,
3145 u16 io_tag, struct scic_sds_request *sci_req)
3147 struct domain_device *dev = sci_dev_to_domain(sci_dev);
3148 enum sci_status status = SCI_SUCCESS;
3150 /* Build the common part of the request */
3151 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
3153 if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3154 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3156 if (dev->dev_type == SAS_END_DEV)
3158 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
3159 memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd));
3160 else if (dev_is_expander(dev))
3161 memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd));
3163 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3165 memset(sci_req->task_context_buffer, 0,
3166 offsetof(struct scu_task_context, sgl_pair_ab));
3171 enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
3172 struct scic_sds_remote_device *sci_dev,
3173 u16 io_tag, struct scic_sds_request *sci_req)
3175 struct domain_device *dev = sci_dev_to_domain(sci_dev);
3176 enum sci_status status = SCI_SUCCESS;
3178 /* Build the common part of the request */
3179 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
3181 if (dev->dev_type == SAS_END_DEV ||
3182 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
3183 sci_req->is_task_management_request = true;
3184 memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context));
3186 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3191 static enum sci_status isci_request_ssp_request_construct(
3192 struct isci_request *request)
3194 enum sci_status status;
3196 dev_dbg(&request->isci_host->pdev->dev,
3197 "%s: request = %p\n",
3200 status = scic_io_request_construct_basic_ssp(&request->sci);
3204 static enum sci_status isci_request_stp_request_construct(
3205 struct isci_request *request)
3207 struct sas_task *task = isci_request_access_task(request);
3208 enum sci_status status;
3209 struct host_to_dev_fis *register_fis;
3211 dev_dbg(&request->isci_host->pdev->dev,
3212 "%s: request = %p\n",
3216 /* Get the host_to_dev_fis from the core and copy
3217 * the fis from the task into it.
3219 register_fis = isci_sata_task_to_fis_copy(task);
3221 status = scic_io_request_construct_basic_sata(&request->sci);
3223 /* Set the ncq tag in the fis, from the queue
3224 * command in the task.
3226 if (isci_sata_is_task_ncq(task)) {
3228 isci_sata_set_ncq_tag(
3238 * This function will fill in the SCU Task Context for a SMP request. The
3239 * following important settings are utilized: -# task_type ==
3240 * SCU_TASK_TYPE_SMP. This simply indicates that a normal request type
3241 * (i.e. non-raw frame) is being utilized to perform task management. -#
3242 * control_frame == 1. This ensures that the proper endianess is set so
3243 * that the bytes are transmitted in the right order for a smp request frame.
3244 * @sci_req: This parameter specifies the smp request object being
3249 scu_smp_request_construct_task_context(struct scic_sds_request *sci_req,
3252 dma_addr_t dma_addr;
3253 struct scic_sds_remote_device *sci_dev;
3254 struct scic_sds_port *sci_port;
3255 struct scu_task_context *task_context;
3256 ssize_t word_cnt = sizeof(struct smp_req) / sizeof(u32);
3258 /* byte swap the smp request. */
3259 sci_swab32_cpy(&sci_req->smp.cmd, &sci_req->smp.cmd,
3262 task_context = scic_sds_request_get_task_context(sci_req);
3264 sci_dev = scic_sds_request_get_device(sci_req);
3265 sci_port = scic_sds_request_get_port(sci_req);
3268 * Fill in the TC with the its required data
3271 task_context->priority = 0;
3272 task_context->initiator_request = 1;
3273 task_context->connection_rate = sci_dev->connection_rate;
3274 task_context->protocol_engine_index =
3275 scic_sds_controller_get_protocol_engine_group(scic);
3276 task_context->logical_port_index = scic_sds_port_get_index(sci_port);
3277 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3278 task_context->abort = 0;
3279 task_context->valid = SCU_TASK_CONTEXT_VALID;
3280 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3283 task_context->remote_node_index = sci_dev->rnc.remote_node_index;
3284 task_context->command_code = 0;
3285 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3288 task_context->link_layer_control = 0;
3289 task_context->do_not_dma_ssp_good_response = 1;
3290 task_context->strict_ordering = 0;
3291 task_context->control_frame = 1;
3292 task_context->timeout_enable = 0;
3293 task_context->block_guard_enable = 0;
3296 task_context->address_modifier = 0;
3299 task_context->ssp_command_iu_length = req_len;
3302 task_context->transfer_length_bytes = 0;
3305 * 18h ~ 30h, protocol specific
3306 * since commandIU has been build by framework at this point, we just
3307 * copy the frist DWord from command IU to this location. */
3308 memcpy(&task_context->type.smp, &sci_req->smp.cmd, sizeof(u32));
3312 * "For SMP you could program it to zero. We would prefer that way
3313 * so that done code will be consistent." - Venki
3315 task_context->task_phase = 0;
3317 if (sci_req->was_tag_assigned_by_user) {
3319 * Build the task context now since we have already read
3322 sci_req->post_context =
3323 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3324 (scic_sds_controller_get_protocol_engine_group(scic) <<
3325 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3326 (scic_sds_port_get_index(sci_port) <<
3327 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3328 ISCI_TAG_TCI(sci_req->io_tag));
3331 * Build the task context now since we have already read
3333 * I/O tag index is not assigned because we have to wait
3334 * until we get a TCi.
3336 sci_req->post_context =
3337 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3338 (scic_sds_controller_get_protocol_engine_group(scic) <<
3339 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3340 (scic_sds_port_get_index(sci_port) <<
3341 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
3345 * Copy the physical address for the command buffer to the SCU Task
3346 * Context command buffer should not contain command header.
3348 dma_addr = scic_io_request_get_dma_addr(sci_req,
3349 ((char *) &sci_req->smp.cmd) +
3352 task_context->command_iu_upper = upper_32_bits(dma_addr);
3353 task_context->command_iu_lower = lower_32_bits(dma_addr);
3355 /* SMP response comes as UF, so no need to set response IU address. */
3356 task_context->response_iu_upper = 0;
3357 task_context->response_iu_lower = 0;
3360 static enum sci_status
3361 scic_io_request_construct_smp(struct scic_sds_request *sci_req)
3363 struct smp_req *smp_req = &sci_req->smp.cmd;
3365 sci_req->protocol = SCIC_SMP_PROTOCOL;
3368 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3369 * functions under SAS 2.0, a zero request length really indicates
3370 * a non-zero default length.
3372 if (smp_req->req_len == 0) {
3373 switch (smp_req->func) {
3375 case SMP_REPORT_PHY_ERR_LOG:
3376 case SMP_REPORT_PHY_SATA:
3377 case SMP_REPORT_ROUTE_INFO:
3378 smp_req->req_len = 2;
3380 case SMP_CONF_ROUTE_INFO:
3381 case SMP_PHY_CONTROL:
3382 case SMP_PHY_TEST_FUNCTION:
3383 smp_req->req_len = 9;
3385 /* Default - zero is a valid default for 2.0. */
3389 scu_smp_request_construct_task_context(sci_req, smp_req->req_len);
3391 sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
3397 * isci_smp_request_build() - This function builds the smp request.
3398 * @ireq: This parameter points to the isci_request allocated in the
3399 * request construct function.
3401 * SCI_SUCCESS on successfull completion, or specific failure code.
3403 static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3405 enum sci_status status = SCI_FAILURE;
3406 struct sas_task *task = isci_request_access_task(ireq);
3407 struct scic_sds_request *sci_req = &ireq->sci;
3409 dev_dbg(&ireq->isci_host->pdev->dev,
3410 "%s: request = %p\n", __func__, ireq);
3412 dev_dbg(&ireq->isci_host->pdev->dev,
3413 "%s: smp_req len = %d\n",
3415 task->smp_task.smp_req.length);
3417 /* copy the smp_command to the address; */
3418 sg_copy_to_buffer(&task->smp_task.smp_req, 1,
3420 sizeof(struct smp_req));
3422 status = scic_io_request_construct_smp(sci_req);
3423 if (status != SCI_SUCCESS)
3424 dev_warn(&ireq->isci_host->pdev->dev,
3425 "%s: failed with status = %d\n",
3433 * isci_io_request_build() - This function builds the io request object.
3434 * @isci_host: This parameter specifies the ISCI host object
3435 * @request: This parameter points to the isci_request object allocated in the
3436 * request construct function.
3437 * @sci_device: This parameter is the handle for the sci core's remote device
3438 * object that is the destination for this request.
3440 * SCI_SUCCESS on successfull completion, or specific failure code.
3442 static enum sci_status isci_io_request_build(
3443 struct isci_host *isci_host,
3444 struct isci_request *request,
3445 struct isci_remote_device *isci_device)
3447 enum sci_status status = SCI_SUCCESS;
3448 struct sas_task *task = isci_request_access_task(request);
3449 struct scic_sds_remote_device *sci_device = &isci_device->sci;
3451 dev_dbg(&isci_host->pdev->dev,
3452 "%s: isci_device = 0x%p; request = %p, "
3453 "num_scatter = %d\n",
3459 /* map the sgl addresses, if present.
3460 * libata does the mapping for sata devices
3461 * before we get the request.
3463 if (task->num_scatter &&
3464 !sas_protocol_ata(task->task_proto) &&
3465 !(SAS_PROTOCOL_SMP & task->task_proto)) {
3467 request->num_sg_entries = dma_map_sg(
3468 &isci_host->pdev->dev,
3474 if (request->num_sg_entries == 0)
3475 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3478 /* build the common request object. For now,
3479 * we will let the core allocate the IO tag.
3481 status = scic_io_request_construct(&isci_host->sci, sci_device,
3482 SCI_CONTROLLER_INVALID_IO_TAG,
3485 if (status != SCI_SUCCESS) {
3486 dev_warn(&isci_host->pdev->dev,
3487 "%s: failed request construct\n",
3492 switch (task->task_proto) {
3493 case SAS_PROTOCOL_SMP:
3494 status = isci_smp_request_build(request);
3496 case SAS_PROTOCOL_SSP:
3497 status = isci_request_ssp_request_construct(request);
3499 case SAS_PROTOCOL_SATA:
3500 case SAS_PROTOCOL_STP:
3501 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3502 status = isci_request_stp_request_construct(request);
3505 dev_warn(&isci_host->pdev->dev,
3506 "%s: unknown protocol\n", __func__);
3513 static struct isci_request *isci_request_alloc_core(struct isci_host *ihost,
3514 struct isci_remote_device *idev,
3518 struct isci_request *ireq;
3520 ireq = dma_pool_alloc(ihost->dma_pool, gfp_flags, &handle);
3522 dev_warn(&ihost->pdev->dev,
3523 "%s: dma_pool_alloc returned NULL\n", __func__);
3527 /* initialize the request object. */
3528 spin_lock_init(&ireq->state_lock);
3529 ireq->request_daddr = handle;
3530 ireq->isci_host = ihost;
3531 ireq->isci_device = idev;
3532 ireq->io_request_completion = NULL;
3533 ireq->terminated = false;
3535 ireq->num_sg_entries = 0;
3537 ireq->complete_in_target = false;
3539 INIT_LIST_HEAD(&ireq->completed_node);
3540 INIT_LIST_HEAD(&ireq->dev_node);
3542 isci_request_change_state(ireq, allocated);
3547 static struct isci_request *isci_request_alloc_io(struct isci_host *ihost,
3548 struct sas_task *task,
3549 struct isci_remote_device *idev,
3552 struct isci_request *ireq;
3554 ireq = isci_request_alloc_core(ihost, idev, gfp_flags);
3556 ireq->ttype_ptr.io_task_ptr = task;
3557 ireq->ttype = io_task;
3558 task->lldd_task = ireq;
3563 struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost,
3564 struct isci_tmf *isci_tmf,
3565 struct isci_remote_device *idev,
3568 struct isci_request *ireq;
3570 ireq = isci_request_alloc_core(ihost, idev, gfp_flags);
3572 ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
3573 ireq->ttype = tmf_task;
3578 int isci_request_execute(struct isci_host *ihost, struct sas_task *task,
3581 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3582 struct scic_sds_remote_device *sci_dev;
3583 struct isci_remote_device *idev;
3584 struct isci_request *ireq;
3585 unsigned long flags;
3588 idev = task->dev->lldd_dev;
3589 sci_dev = &idev->sci;
3591 /* do common allocation and init of request object. */
3592 ireq = isci_request_alloc_io(ihost, task, idev, gfp_flags);
3596 status = isci_io_request_build(ihost, ireq, idev);
3597 if (status != SCI_SUCCESS) {
3598 dev_warn(&ihost->pdev->dev,
3599 "%s: request_construct failed - status = 0x%x\n",
3605 spin_lock_irqsave(&ihost->scic_lock, flags);
3607 /* send the request, let the core assign the IO TAG. */
3608 status = scic_controller_start_io(&ihost->sci, sci_dev,
3610 SCI_CONTROLLER_INVALID_IO_TAG);
3611 if (status != SCI_SUCCESS &&
3612 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3613 dev_warn(&ihost->pdev->dev,
3614 "%s: failed request start (0x%x)\n",
3616 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3620 /* Either I/O started OK, or the core has signaled that
3621 * the device needs a target reset.
3623 * In either case, hold onto the I/O for later.
3625 * Update it's status and add it to the list in the
3626 * remote device object.
3628 list_add(&ireq->dev_node, &idev->reqs_in_process);
3630 if (status == SCI_SUCCESS) {
3631 /* Save the tag for possible task mgmt later. */
3632 ireq->io_tag = ireq->sci.io_tag;
3633 isci_request_change_state(ireq, started);
3635 /* The request did not really start in the
3636 * hardware, so clear the request handle
3637 * here so no terminations will be done.
3639 ireq->terminated = true;
3640 isci_request_change_state(ireq, completed);
3642 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3645 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3646 /* Signal libsas that we need the SCSI error
3647 * handler thread to work on this I/O and that
3648 * we want a device reset.
3650 spin_lock_irqsave(&task->task_state_lock, flags);
3651 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3652 spin_unlock_irqrestore(&task->task_state_lock, flags);
3654 /* Cause this task to be scheduled in the SCSI error
3657 isci_execpath_callback(ihost, task,
3660 /* Change the status, since we are holding
3661 * the I/O until it is managed by the SCSI
3664 status = SCI_SUCCESS;
3668 if (status != SCI_SUCCESS) {
3669 /* release dma memory on failure. */
3670 isci_request_free(ihost, ireq);