2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 #include "intel_sas.h"
58 #include "intel_sata.h"
59 #include "intel_sat.h"
60 #include "scic_controller.h"
61 #include "scic_io_request.h"
62 #include "scic_sds_controller.h"
63 #include "scu_registers.h"
64 #include "scic_sds_port.h"
65 #include "remote_device.h"
66 #include "scic_sds_request.h"
67 #include "scic_sds_smp_request.h"
68 #include "scic_sds_stp_request.h"
69 #include "scic_sds_unsolicited_frame_control.h"
70 #include "sci_environment.h"
72 #include "scu_completion_codes.h"
73 #include "scu_constants.h"
74 #include "scu_task_context.h"
76 #if !defined(DISABLE_ATAPI)
77 #include "scic_sds_stp_packet_request.h"
81 * ****************************************************************************
82 * * SCIC SDS IO REQUEST CONSTANTS
83 * **************************************************************************** */
88 * We have no timer requirements for IO requests right now
90 #define SCIC_SDS_IO_REQUEST_MINIMUM_TIMER_COUNT (0)
91 #define SCIC_SDS_IO_REQUEST_MAXIMUM_TIMER_COUNT (0)
94 * ****************************************************************************
95 * * SCIC SDS IO REQUEST MACROS
96 * **************************************************************************** */
99 * scic_ssp_io_request_get_object_size() -
101 * This macro returns the sizeof memory required to store the an SSP IO
102 * request. This does not include the size of the SGL or SCU Task Context
105 #define scic_ssp_io_request_get_object_size() \
107 sizeof(struct sci_ssp_command_iu) \
108 + sizeof(struct sci_ssp_response_iu) \
112 * scic_sds_ssp_request_get_command_buffer() -
114 * This macro returns the address of the ssp command buffer in the io request
117 #define scic_sds_ssp_request_get_command_buffer(memory) \
118 ((struct sci_ssp_command_iu *)(\
119 ((char *)(memory)) + sizeof(struct scic_sds_request) \
123 * scic_sds_ssp_request_get_response_buffer() -
125 * This macro returns the address of the ssp response buffer in the io request
128 #define scic_sds_ssp_request_get_response_buffer(memory) \
129 ((struct sci_ssp_response_iu *)(\
130 ((char *)(scic_sds_ssp_request_get_command_buffer(memory))) \
131 + sizeof(struct sci_ssp_command_iu) \
135 * scic_sds_ssp_request_get_task_context_buffer() -
137 * This macro returns the address of the task context buffer in the io request
140 #define scic_sds_ssp_request_get_task_context_buffer(memory) \
141 ((struct scu_task_context *)(\
142 ((char *)(scic_sds_ssp_request_get_response_buffer(memory))) \
143 + sizeof(struct sci_ssp_response_iu) \
147 * scic_sds_ssp_request_get_sgl_element_buffer() -
149 * This macro returns the address of the sgl elment pairs in the io request
152 #define scic_sds_ssp_request_get_sgl_element_buffer(memory) \
153 ((struct scu_sgl_element_pair *)(\
154 ((char *)(scic_sds_ssp_request_get_task_context_buffer(memory))) \
155 + sizeof(struct scu_task_context) \
160 * scic_ssp_task_request_get_object_size() -
162 * This macro returns the sizeof of memory required to store an SSP Task
163 * request. This does not include the size of the SCU Task Context memory.
165 #define scic_ssp_task_request_get_object_size() \
167 sizeof(struct sci_ssp_task_iu) \
168 + sizeof(struct sci_ssp_response_iu) \
172 * scic_sds_ssp_task_request_get_command_buffer() -
174 * This macro returns the address of the ssp command buffer in the task request
175 * memory. Yes its the same as the above macro except for the name.
177 #define scic_sds_ssp_task_request_get_command_buffer(memory) \
178 ((struct sci_ssp_task_iu *)(\
179 ((char *)(memory)) + sizeof(struct scic_sds_request) \
183 * scic_sds_ssp_task_request_get_response_buffer() -
185 * This macro returns the address of the ssp response buffer in the task
188 #define scic_sds_ssp_task_request_get_response_buffer(memory) \
189 ((struct sci_ssp_response_iu *)(\
190 ((char *)(scic_sds_ssp_task_request_get_command_buffer(memory))) \
191 + sizeof(struct sci_ssp_task_iu) \
195 * scic_sds_ssp_task_request_get_task_context_buffer() -
197 * This macro returs the task context buffer for the SSP task request.
199 #define scic_sds_ssp_task_request_get_task_context_buffer(memory) \
200 ((struct scu_task_context *)(\
201 ((char *)(scic_sds_ssp_task_request_get_response_buffer(memory))) \
202 + sizeof(struct sci_ssp_response_iu) \
208 * ****************************************************************************
209 * * SCIC SDS IO REQUEST PRIVATE METHODS
210 * **************************************************************************** */
215 * This method returns the size required to store an SSP IO request object. u32
217 static u32 scic_sds_ssp_request_get_object_size(void)
219 return sizeof(struct scic_sds_request)
220 + scic_ssp_io_request_get_object_size()
221 + sizeof(struct scu_task_context)
223 + sizeof(struct scu_sgl_element_pair) * SCU_MAX_SGL_ELEMENT_PAIRS;
227 * This method returns the sgl element pair for the specificed sgl_pair index.
228 * @sci_req: This parameter specifies the IO request for which to retrieve
229 * the Scatter-Gather List element pair.
230 * @sgl_pair_index: This parameter specifies the index into the SGL element
231 * pair to be retrieved.
233 * This method returns a pointer to an struct scu_sgl_element_pair.
235 static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair(
236 struct scic_sds_request *sci_req,
239 struct scu_task_context *task_context;
241 task_context = (struct scu_task_context *)sci_req->task_context_buffer;
243 if (sgl_pair_index == 0) {
244 return &task_context->sgl_pair_ab;
245 } else if (sgl_pair_index == 1) {
246 return &task_context->sgl_pair_cd;
249 return &sci_req->sgl_element_pair_buffer[sgl_pair_index - 2];
253 * This function will build the SGL list for an IO request.
254 * @sci_req: This parameter specifies the IO request for which to build
255 * the Scatter-Gather List.
258 void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
260 struct isci_request *isci_request =
261 (struct isci_request *)sci_object_get_association(sds_request);
262 struct isci_host *isci_host = isci_request->isci_host;
263 struct sas_task *task = isci_request_access_task(isci_request);
264 struct scatterlist *sg = NULL;
267 struct scu_sgl_element_pair *scu_sg = NULL;
268 struct scu_sgl_element_pair *prev_sg = NULL;
270 if (task->num_scatter > 0) {
274 scu_sg = scic_sds_request_get_sgl_element_pair(
278 SCU_SGL_COPY(scu_sg->A, sg);
283 SCU_SGL_COPY(scu_sg->B, sg);
286 SCU_SGL_ZERO(scu_sg->B);
290 scic_io_request_get_dma_addr(
294 prev_sg->next_pair_upper =
295 upper_32_bits(dma_addr);
296 prev_sg->next_pair_lower =
297 lower_32_bits(dma_addr);
303 } else { /* handle when no sg */
304 scu_sg = scic_sds_request_get_sgl_element_pair(sds_request,
307 dma_addr = dma_map_single(&isci_host->pdev->dev,
309 task->total_xfer_len,
312 isci_request->zero_scatter_daddr = dma_addr;
314 scu_sg->A.length = task->total_xfer_len;
315 scu_sg->A.address_upper = upper_32_bits(dma_addr);
316 scu_sg->A.address_lower = lower_32_bits(dma_addr);
320 scu_sg->next_pair_upper = 0;
321 scu_sg->next_pair_lower = 0;
326 * This method build the remainder of the IO request object.
327 * @sci_req: This parameter specifies the request object being constructed.
329 * The scic_sds_general_request_construct() must be called before this call is
332 static void scic_sds_ssp_io_request_assign_buffers(
333 struct scic_sds_request *sci_req)
335 sci_req->command_buffer =
336 scic_sds_ssp_request_get_command_buffer(sci_req);
337 sci_req->response_buffer =
338 scic_sds_ssp_request_get_response_buffer(sci_req);
339 sci_req->sgl_element_pair_buffer =
340 scic_sds_ssp_request_get_sgl_element_buffer(sci_req);
341 sci_req->sgl_element_pair_buffer =
342 PTR_ALIGN(sci_req->sgl_element_pair_buffer,
343 sizeof(struct scu_sgl_element_pair));
345 if (sci_req->was_tag_assigned_by_user == false) {
346 sci_req->task_context_buffer =
347 scic_sds_ssp_request_get_task_context_buffer(sci_req);
348 sci_req->task_context_buffer =
349 PTR_ALIGN(sci_req->task_context_buffer,
355 * This method constructs the SSP Command IU data for this io request object.
356 * @sci_req: This parameter specifies the request object for which the SSP
357 * command information unit is being built.
360 static void scic_sds_io_request_build_ssp_command_iu(
361 struct scic_sds_request *sds_request)
363 struct sci_ssp_command_iu *command_frame;
366 struct isci_request *isci_request =
367 (struct isci_request *)sci_object_get_association(sds_request);
370 (struct sci_ssp_command_iu *)sds_request->command_buffer;
372 command_frame->lun_upper = 0;
373 command_frame->lun_lower =
374 isci_request_ssp_io_request_get_lun(isci_request);
376 ((u32 *)command_frame)[2] = 0;
378 cdb_length = isci_request_ssp_io_request_get_cdb_length(isci_request);
379 cdb_buffer = (u32 *)isci_request_ssp_io_request_get_cdb_address(
382 if (cdb_length > 16) {
383 command_frame->additional_cdb_length = cdb_length - 16;
386 /* / @todo Is it ok to leave junk at the end of the cdb buffer? */
387 scic_word_copy_with_swap(
388 (u32 *)(&command_frame->cdb),
390 (cdb_length + 3) / sizeof(u32)
393 command_frame->enable_first_burst = 0;
394 command_frame->task_priority =
395 isci_request_ssp_io_request_get_command_priority(isci_request);
396 command_frame->task_attribute =
397 isci_request_ssp_io_request_get_task_attribute(isci_request);
402 * This method constructs the SSP Task IU data for this io request object.
406 static void scic_sds_task_request_build_ssp_task_iu(
407 struct scic_sds_request *sds_request)
409 struct sci_ssp_task_iu *command_frame;
410 struct isci_request *isci_request =
411 (struct isci_request *)sci_object_get_association(sds_request);
414 (struct sci_ssp_task_iu *)sds_request->command_buffer;
416 command_frame->lun_upper = 0;
417 command_frame->lun_lower = isci_request_ssp_io_request_get_lun(
420 ((u32 *)command_frame)[2] = 0;
422 command_frame->task_function =
423 isci_task_ssp_request_get_function(isci_request);
424 command_frame->task_tag =
425 isci_task_ssp_request_get_io_tag_to_manage(
431 * This method is will fill in the SCU Task Context for any type of SSP request.
436 static void scu_ssp_reqeust_construct_task_context(
437 struct scic_sds_request *sds_request,
438 struct scu_task_context *task_context)
441 struct scic_sds_controller *controller;
442 struct scic_sds_remote_device *target_device;
443 struct scic_sds_port *target_port;
445 controller = scic_sds_request_get_controller(sds_request);
446 target_device = scic_sds_request_get_device(sds_request);
447 target_port = scic_sds_request_get_port(sds_request);
449 /* Fill in the TC with the its required data */
450 task_context->abort = 0;
451 task_context->priority = 0;
452 task_context->initiator_request = 1;
453 task_context->connection_rate =
454 scic_remote_device_get_connection_rate(target_device);
455 task_context->protocol_engine_index =
456 scic_sds_controller_get_protocol_engine_group(controller);
457 task_context->logical_port_index =
458 scic_sds_port_get_index(target_port);
459 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
460 task_context->valid = SCU_TASK_CONTEXT_VALID;
461 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
463 task_context->remote_node_index =
464 scic_sds_remote_device_get_index(sds_request->target_device);
465 task_context->command_code = 0;
467 task_context->link_layer_control = 0;
468 task_context->do_not_dma_ssp_good_response = 1;
469 task_context->strict_ordering = 0;
470 task_context->control_frame = 0;
471 task_context->timeout_enable = 0;
472 task_context->block_guard_enable = 0;
474 task_context->address_modifier = 0;
476 /* task_context->type.ssp.tag = sci_req->io_tag; */
477 task_context->task_phase = 0x01;
479 if (sds_request->was_tag_assigned_by_user) {
481 * Build the task context now since we have already read
484 sds_request->post_context =
485 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
486 (scic_sds_controller_get_protocol_engine_group(
488 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
489 (scic_sds_port_get_index(target_port) <<
490 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
491 scic_sds_io_tag_get_index(sds_request->io_tag));
494 * Build the task context now since we have already read
497 * I/O tag index is not assigned because we have to wait
500 sds_request->post_context =
501 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
502 (scic_sds_controller_get_protocol_engine_group(
503 owning_controller) <<
504 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
505 (scic_sds_port_get_index(target_port) <<
506 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
510 * Copy the physical address for the command buffer to the
513 dma_addr = scic_io_request_get_dma_addr(sds_request,
514 sds_request->command_buffer);
516 task_context->command_iu_upper = upper_32_bits(dma_addr);
517 task_context->command_iu_lower = lower_32_bits(dma_addr);
520 * Copy the physical address for the response buffer to the
523 dma_addr = scic_io_request_get_dma_addr(sds_request,
524 sds_request->response_buffer);
526 task_context->response_iu_upper = upper_32_bits(dma_addr);
527 task_context->response_iu_lower = lower_32_bits(dma_addr);
531 * This method is will fill in the SCU Task Context for a SSP IO request.
535 static void scu_ssp_io_request_construct_task_context(
536 struct scic_sds_request *sci_req,
537 enum dma_data_direction dir,
540 struct scu_task_context *task_context;
542 task_context = scic_sds_request_get_task_context(sci_req);
544 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
546 task_context->ssp_command_iu_length = sizeof(struct sci_ssp_command_iu) / sizeof(u32);
547 task_context->type.ssp.frame_type = SCI_SAS_COMMAND_FRAME;
550 case DMA_FROM_DEVICE:
553 task_context->task_type = SCU_TASK_TYPE_IOREAD;
556 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
560 task_context->transfer_length_bytes = len;
562 if (task_context->transfer_length_bytes > 0)
563 scic_sds_request_build_sgl(sci_req);
568 * This method will fill in the remainder of the io request object for SSP Task
573 static void scic_sds_ssp_task_request_assign_buffers(
574 struct scic_sds_request *sci_req)
576 /* Assign all of the buffer pointers */
577 sci_req->command_buffer =
578 scic_sds_ssp_task_request_get_command_buffer(sci_req);
579 sci_req->response_buffer =
580 scic_sds_ssp_task_request_get_response_buffer(sci_req);
581 sci_req->sgl_element_pair_buffer = NULL;
583 if (sci_req->was_tag_assigned_by_user == false) {
584 sci_req->task_context_buffer =
585 scic_sds_ssp_task_request_get_task_context_buffer(sci_req);
586 sci_req->task_context_buffer =
587 PTR_ALIGN(sci_req->task_context_buffer, SMP_CACHE_BYTES);
592 * This method will fill in the SCU Task Context for a SSP Task request. The
593 * following important settings are utilized: -# priority ==
594 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
595 * ahead of other task destined for the same Remote Node. -# task_type ==
596 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
597 * (i.e. non-raw frame) is being utilized to perform task management. -#
598 * control_frame == 1. This ensures that the proper endianess is set so
599 * that the bytes are transmitted in the right order for a task frame.
600 * @sci_req: This parameter specifies the task request object being
604 static void scu_ssp_task_request_construct_task_context(
605 struct scic_sds_request *sci_req)
607 struct scu_task_context *task_context;
609 task_context = scic_sds_request_get_task_context(sci_req);
611 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
613 task_context->control_frame = 1;
614 task_context->priority = SCU_TASK_PRIORITY_HIGH;
615 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
616 task_context->transfer_length_bytes = 0;
617 task_context->type.ssp.frame_type = SCI_SAS_TASK_FRAME;
618 task_context->ssp_command_iu_length = sizeof(struct sci_ssp_task_iu) / sizeof(u32);
623 * This method constructs the SSP Command IU data for this ssp passthrough
624 * comand request object.
625 * @sci_req: This parameter specifies the request object for which the SSP
626 * command information unit is being built.
628 * enum sci_status, returns invalid parameter is cdb > 16
633 * This method constructs the SATA request object.
642 static enum sci_status scic_io_request_construct_sata(struct scic_sds_request *sci_req,
644 enum dma_data_direction dir,
647 enum sci_status status = SCI_SUCCESS;
650 case SAT_PROTOCOL_PIO_DATA_IN:
651 case SAT_PROTOCOL_PIO_DATA_OUT:
652 status = scic_sds_stp_pio_request_construct(sci_req, proto, copy);
655 case SAT_PROTOCOL_UDMA_DATA_IN:
656 case SAT_PROTOCOL_UDMA_DATA_OUT:
657 status = scic_sds_stp_udma_request_construct(sci_req, len, dir);
660 case SAT_PROTOCOL_ATA_HARD_RESET:
661 case SAT_PROTOCOL_SOFT_RESET:
662 status = scic_sds_stp_soft_reset_request_construct(sci_req);
665 case SAT_PROTOCOL_NON_DATA:
666 status = scic_sds_stp_non_data_request_construct(sci_req);
669 case SAT_PROTOCOL_FPDMA:
670 status = scic_sds_stp_ncq_request_construct(sci_req, len, dir);
673 #if !defined(DISABLE_ATAPI)
674 case SAT_PROTOCOL_PACKET_NON_DATA:
675 case SAT_PROTOCOL_PACKET_DMA_DATA_IN:
676 case SAT_PROTOCOL_PACKET_DMA_DATA_OUT:
677 case SAT_PROTOCOL_PACKET_PIO_DATA_IN:
678 case SAT_PROTOCOL_PACKET_PIO_DATA_OUT:
679 status = scic_sds_stp_packet_request_construct(sci_req);
683 case SAT_PROTOCOL_DMA_QUEUED:
684 case SAT_PROTOCOL_DMA:
685 case SAT_PROTOCOL_DEVICE_DIAGNOSTIC:
686 case SAT_PROTOCOL_DEVICE_RESET:
687 case SAT_PROTOCOL_RETURN_RESPONSE_INFO:
689 dev_err(scic_to_dev(sci_req->owning_controller),
690 "%s: SCIC IO Request 0x%p received un-handled "
692 __func__, sci_req, proto);
694 status = SCI_FAILURE;
701 u32 scic_io_request_get_object_size(void)
703 u32 ssp_request_size;
704 u32 stp_request_size;
705 u32 smp_request_size;
707 ssp_request_size = scic_sds_ssp_request_get_object_size();
708 stp_request_size = scic_sds_stp_request_get_object_size();
709 smp_request_size = scic_sds_smp_request_get_object_size();
711 return max(ssp_request_size, max(stp_request_size, smp_request_size));
714 enum sci_status scic_io_request_construct_basic_ssp(
715 struct scic_sds_request *sci_req)
717 struct isci_request *isci_request =
718 (struct isci_request *)sci_object_get_association(sci_req);
720 sci_req->protocol = SCIC_SSP_PROTOCOL;
722 scu_ssp_io_request_construct_task_context(
724 isci_request_io_request_get_data_direction(isci_request),
725 isci_request_io_request_get_transfer_length(isci_request));
727 scic_sds_io_request_build_ssp_command_iu(sci_req);
729 sci_base_state_machine_change_state(&sci_req->state_machine,
730 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
736 enum sci_status scic_task_request_construct_ssp(
737 struct scic_sds_request *sci_req)
739 /* Construct the SSP Task SCU Task Context */
740 scu_ssp_task_request_construct_task_context(sci_req);
742 /* Fill in the SSP Task IU */
743 scic_sds_task_request_build_ssp_task_iu(sci_req);
745 sci_base_state_machine_change_state(&sci_req->state_machine,
746 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
752 enum sci_status scic_io_request_construct_basic_sata(
753 struct scic_sds_request *sci_req)
755 enum sci_status status;
756 struct scic_sds_stp_request *stp_req;
759 enum dma_data_direction dir;
761 struct isci_request *isci_request =
762 (struct isci_request *)sci_object_get_association(sci_req);
763 struct sas_task *task = isci_request_access_task(isci_request);
765 stp_req = container_of(sci_req, typeof(*stp_req), parent);
767 sci_req->protocol = SCIC_STP_PROTOCOL;
769 len = isci_request_io_request_get_transfer_length(isci_request);
770 dir = isci_request_io_request_get_data_direction(isci_request);
771 proto = isci_sata_get_sat_protocol(isci_request);
772 copy = (task->data_dir == DMA_NONE) ? false : true;
774 status = scic_io_request_construct_sata(sci_req, proto, len, dir, copy);
776 if (status == SCI_SUCCESS)
777 sci_base_state_machine_change_state(&sci_req->state_machine,
778 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
784 enum sci_status scic_task_request_construct_sata(
785 struct scic_sds_request *sci_req)
787 enum sci_status status;
789 struct isci_request *isci_request =
790 (struct isci_request *)sci_object_get_association(sci_req);
792 sat_protocol = isci_sata_get_sat_protocol(isci_request);
794 switch (sat_protocol) {
795 case SAT_PROTOCOL_ATA_HARD_RESET:
796 case SAT_PROTOCOL_SOFT_RESET:
797 status = scic_sds_stp_soft_reset_request_construct(sci_req);
801 dev_err(scic_to_dev(sci_req->owning_controller),
802 "%s: SCIC IO Request 0x%p received un-handled SAT "
808 status = SCI_FAILURE;
812 if (status == SCI_SUCCESS)
813 sci_base_state_machine_change_state(&sci_req->state_machine,
814 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
820 u16 scic_io_request_get_io_tag(
821 struct scic_sds_request *sci_req)
823 return sci_req->io_tag;
827 u32 scic_request_get_controller_status(
828 struct scic_sds_request *sci_req)
830 return sci_req->scu_status;
834 void *scic_io_request_get_command_iu_address(
835 struct scic_sds_request *sci_req)
837 return sci_req->command_buffer;
841 void *scic_io_request_get_response_iu_address(
842 struct scic_sds_request *sci_req)
844 return sci_req->response_buffer;
848 #define SCU_TASK_CONTEXT_SRAM 0x200000
849 u32 scic_io_request_get_number_of_bytes_transferred(
850 struct scic_sds_request *scic_sds_request)
852 struct scic_sds_controller *scic = scic_sds_request->owning_controller;
855 if (readl(&scic->smu_registers->address_modifier) == 0) {
856 void __iomem *scu_reg_base = scic->scu_registers;
858 * get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
859 * BAR1 is the scu_registers
860 * 0x20002C = 0x200000 + 0x2c
861 * = start of task context SRAM + offset of (type.ssp.data_offset)
862 * TCi is the io_tag of struct scic_sds_request */
863 ret_val = readl(scu_reg_base +
864 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
865 ((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(scic_sds_request->io_tag)));
873 * ****************************************************************************
874 * * SCIC SDS Interface Implementation
875 * **************************************************************************** */
878 scic_sds_request_start(struct scic_sds_request *request)
880 if (request->device_sequence !=
881 scic_sds_remote_device_get_sequence(request->target_device))
884 if (request->state_handlers->start_handler)
885 return request->state_handlers->start_handler(request);
887 dev_warn(scic_to_dev(request->owning_controller),
888 "%s: SCIC IO Request requested to start while in wrong "
891 sci_base_state_machine_get_state(&request->state_machine));
893 return SCI_FAILURE_INVALID_STATE;
897 scic_sds_io_request_terminate(struct scic_sds_request *request)
899 if (request->state_handlers->abort_handler)
900 return request->state_handlers->abort_handler(request);
902 dev_warn(scic_to_dev(request->owning_controller),
903 "%s: SCIC IO Request requested to abort while in wrong "
906 sci_base_state_machine_get_state(&request->state_machine));
908 return SCI_FAILURE_INVALID_STATE;
912 scic_sds_io_request_complete(struct scic_sds_request *request)
914 if (request->state_handlers->complete_handler)
915 return request->state_handlers->complete_handler(request);
917 dev_warn(scic_to_dev(request->owning_controller),
918 "%s: SCIC IO Request requested to complete while in wrong "
921 sci_base_state_machine_get_state(&request->state_machine));
923 return SCI_FAILURE_INVALID_STATE;
926 enum sci_status scic_sds_io_request_event_handler(
927 struct scic_sds_request *request,
930 if (request->state_handlers->event_handler)
931 return request->state_handlers->event_handler(request, event_code);
933 dev_warn(scic_to_dev(request->owning_controller),
934 "%s: SCIC IO Request given event code notification %x while "
935 "in wrong state %d\n",
938 sci_base_state_machine_get_state(&request->state_machine));
940 return SCI_FAILURE_INVALID_STATE;
944 scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code)
946 if (request->state_machine.current_state_id == SCI_BASE_REQUEST_STATE_STARTED &&
947 request->has_started_substate_machine == false)
948 return scic_sds_request_started_state_tc_completion_handler(request, completion_code);
949 else if (request->state_handlers->tc_completion_handler)
950 return request->state_handlers->tc_completion_handler(request, completion_code);
952 dev_warn(scic_to_dev(request->owning_controller),
953 "%s: SCIC IO Request given task completion notification %x "
954 "while in wrong state %d\n",
957 sci_base_state_machine_get_state(&request->state_machine));
959 return SCI_FAILURE_INVALID_STATE;
966 * @sci_req: The SCIC_SDS_IO_REQUEST_T object for which the start
967 * operation is to be executed.
968 * @frame_index: The frame index returned by the hardware for the reqeust
971 * This method invokes the core state frame handler for the
972 * SCIC_SDS_IO_REQUEST_T object. enum sci_status
974 enum sci_status scic_sds_io_request_frame_handler(
975 struct scic_sds_request *request,
978 if (request->state_handlers->frame_handler)
979 return request->state_handlers->frame_handler(request, frame_index);
981 dev_warn(scic_to_dev(request->owning_controller),
982 "%s: SCIC IO Request given unexpected frame %x while in "
986 sci_base_state_machine_get_state(&request->state_machine));
988 scic_sds_controller_release_frame(request->owning_controller, frame_index);
989 return SCI_FAILURE_INVALID_STATE;
994 * @sci_req: The SCIC_SDS_IO_REQUEST_T object for which the task start
995 * operation is to be executed.
997 * This method invokes the core state task complete handler for the
998 * SCIC_SDS_IO_REQUEST_T object. enum sci_status
1002 * ****************************************************************************
1003 * * SCIC SDS PROTECTED METHODS
1004 * **************************************************************************** */
1007 * This method copies response data for requests returning response data
1008 * instead of sense data.
1009 * @sci_req: This parameter specifies the request object for which to copy
1010 * the response data.
1013 void scic_sds_io_request_copy_response(struct scic_sds_request *sds_request)
1015 void *response_buffer;
1016 u32 user_response_length;
1017 u32 core_response_length;
1018 struct sci_ssp_response_iu *ssp_response;
1019 struct isci_request *isci_request =
1020 (struct isci_request *)sci_object_get_association(sds_request);
1023 (struct sci_ssp_response_iu *)sds_request->response_buffer;
1026 isci_task_ssp_request_get_response_data_address(
1029 user_response_length =
1030 isci_task_ssp_request_get_response_data_length(
1033 core_response_length = sci_ssp_get_response_data_length(
1034 ssp_response->response_data_length);
1036 user_response_length = min(user_response_length, core_response_length);
1038 memcpy(response_buffer, ssp_response->data, user_response_length);
1042 * *****************************************************************************
1043 * * CONSTRUCTED STATE HANDLERS
1044 * ***************************************************************************** */
1047 * This method implements the action taken when a constructed
1048 * SCIC_SDS_IO_REQUEST_T object receives a scic_sds_request_start() request.
1049 * This method will, if necessary, allocate a TCi for the io request object and
1050 * then will, if necessary, copy the constructed TC data into the actual TC
1051 * buffer. If everything is successful the post context field is updated with
1052 * the TCi so the controller can post the request to the hardware. enum sci_status
1053 * SCI_SUCCESS SCI_FAILURE_INSUFFICIENT_RESOURCES
1055 static enum sci_status scic_sds_request_constructed_state_start_handler(
1056 struct scic_sds_request *request)
1058 struct scu_task_context *task_context;
1060 if (request->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
1062 scic_controller_allocate_io_tag(request->owning_controller);
1065 /* Record the IO Tag in the request */
1066 if (request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) {
1067 task_context = request->task_context_buffer;
1069 task_context->task_index = scic_sds_io_tag_get_index(request->io_tag);
1071 switch (task_context->protocol_type) {
1072 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
1073 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
1075 task_context->type.ssp.tag = request->io_tag;
1076 task_context->type.ssp.target_port_transfer_tag = 0xFFFF;
1079 case SCU_TASK_CONTEXT_PROTOCOL_STP:
1082 * task_context->type.stp.ncq_tag = request->ncq_tag; */
1085 case SCU_TASK_CONTEXT_PROTOCOL_NONE:
1086 /* / @todo When do we set no protocol type? */
1090 /* This should never happen since we build the IO requests */
1095 * Check to see if we need to copy the task context buffer
1096 * or have been building into the task context buffer */
1097 if (request->was_tag_assigned_by_user == false) {
1098 scic_sds_controller_copy_task_context(
1099 request->owning_controller, request);
1102 /* Add to the post_context the io tag value */
1103 request->post_context |= scic_sds_io_tag_get_index(request->io_tag);
1105 /* Everything is good go ahead and change state */
1106 sci_base_state_machine_change_state(&request->state_machine,
1107 SCI_BASE_REQUEST_STATE_STARTED);
1112 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
1116 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1117 * object receives a scic_sds_request_terminate() request. Since the request
1118 * has not yet been posted to the hardware the request transitions to the
1119 * completed state. enum sci_status SCI_SUCCESS
1121 static enum sci_status scic_sds_request_constructed_state_abort_handler(
1122 struct scic_sds_request *request)
1125 * This request has been terminated by the user make sure that the correct
1126 * status code is returned */
1127 scic_sds_request_set_status(request,
1128 SCU_TASK_DONE_TASK_ABORT,
1129 SCI_FAILURE_IO_TERMINATED);
1131 sci_base_state_machine_change_state(&request->state_machine,
1132 SCI_BASE_REQUEST_STATE_COMPLETED);
1137 * *****************************************************************************
1138 * * STARTED STATE HANDLERS
1139 * ***************************************************************************** */
1142 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1143 * object receives a scic_sds_request_terminate() request. Since the request
1144 * has been posted to the hardware the io request state is changed to the
1145 * aborting state. enum sci_status SCI_SUCCESS
1147 enum sci_status scic_sds_request_started_state_abort_handler(
1148 struct scic_sds_request *request)
1150 if (request->has_started_substate_machine)
1151 sci_base_state_machine_stop(&request->started_substate_machine);
1153 sci_base_state_machine_change_state(&request->state_machine,
1154 SCI_BASE_REQUEST_STATE_ABORTING);
1159 * scic_sds_request_started_state_tc_completion_handler() - This method process
1160 * TC (task context) completions for normal IO request (i.e. Task/Abort
1161 * Completions of type 0). This method will update the
1162 * SCIC_SDS_IO_REQUEST_T::status field.
1163 * @sci_req: This parameter specifies the request for which a completion
1165 * @completion_code: This parameter specifies the completion code received from
1169 enum sci_status scic_sds_request_started_state_tc_completion_handler(
1170 struct scic_sds_request *sci_req,
1171 u32 completion_code)
1174 struct sci_ssp_response_iu *response_buffer;
1177 * @todo Any SDMA return code of other than 0 is bad
1178 * decode 0x003C0000 to determine SDMA status
1180 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1181 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1182 scic_sds_request_set_status(
1183 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1187 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP):
1190 * There are times when the SCU hardware will return an early response
1191 * because the io request specified more data than is returned by the
1192 * target device (mode pages, inquiry data, etc.). We must check the
1193 * response stats to see if this is truly a failed request or a good
1194 * request that just got completed early. */
1195 struct sci_ssp_response_iu *response = (struct sci_ssp_response_iu *)
1196 sci_req->response_buffer;
1197 scic_word_copy_with_swap(
1198 sci_req->response_buffer,
1199 sci_req->response_buffer,
1200 sizeof(struct sci_ssp_response_iu) / sizeof(u32)
1203 if (response->status == 0) {
1204 scic_sds_request_set_status(
1205 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS_IO_DONE_EARLY
1208 scic_sds_request_set_status(
1210 SCU_TASK_DONE_CHECK_RESPONSE,
1211 SCI_FAILURE_IO_RESPONSE_VALID
1217 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE):
1218 scic_word_copy_with_swap(
1219 sci_req->response_buffer,
1220 sci_req->response_buffer,
1221 sizeof(struct sci_ssp_response_iu) / sizeof(u32)
1224 scic_sds_request_set_status(
1226 SCU_TASK_DONE_CHECK_RESPONSE,
1227 SCI_FAILURE_IO_RESPONSE_VALID
1231 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
1233 * / @todo With TASK_DONE_RESP_LEN_ERR is the response frame guaranteed
1234 * / to be received before this completion status is posted? */
1236 (struct sci_ssp_response_iu *)sci_req->response_buffer;
1238 response_buffer->data_present & SCI_SSP_RESPONSE_IU_DATA_PRESENT_MASK;
1240 if ((data_present == 0x01) || (data_present == 0x02)) {
1241 scic_sds_request_set_status(
1243 SCU_TASK_DONE_CHECK_RESPONSE,
1244 SCI_FAILURE_IO_RESPONSE_VALID
1247 scic_sds_request_set_status(
1248 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1253 /* only stp device gets suspended. */
1254 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1255 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
1256 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
1257 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
1258 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
1259 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
1260 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1261 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
1262 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
1263 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1264 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
1265 if (sci_req->protocol == SCIC_STP_PROTOCOL) {
1266 scic_sds_request_set_status(
1268 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT,
1269 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED
1272 scic_sds_request_set_status(
1274 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT,
1275 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1280 /* both stp/ssp device gets suspended */
1281 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
1282 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
1283 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
1284 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
1285 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
1286 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
1287 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
1288 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
1289 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
1290 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
1291 scic_sds_request_set_status(
1293 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT,
1294 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED
1298 /* neither ssp nor stp gets suspended. */
1299 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
1300 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
1301 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
1302 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
1303 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
1304 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
1305 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1306 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1307 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1308 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1309 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
1310 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
1311 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
1312 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
1313 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
1315 scic_sds_request_set_status(
1317 SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT,
1318 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1324 * @todo This is probably wrong for ACK/NAK timeout conditions
1327 /* In all cases we will treat this as the completion of the IO request. */
1328 sci_base_state_machine_change_state(&sci_req->state_machine,
1329 SCI_BASE_REQUEST_STATE_COMPLETED);
1334 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1335 * object receives a scic_sds_request_frame_handler() request. This method
1336 * first determines the frame type received. If this is a response frame then
1337 * the response data is copied to the io request response buffer for processing
1338 * at completion time. If the frame type is not a response buffer an error is
1339 * logged. enum sci_status SCI_SUCCESS SCI_FAILURE_INVALID_PARAMETER_VALUE
1341 static enum sci_status scic_sds_request_started_state_frame_handler(
1342 struct scic_sds_request *sci_req,
1345 enum sci_status status;
1346 struct sci_ssp_frame_header *frame_header;
1348 /* / @todo If this is a response frame we must record that we received it */
1349 status = scic_sds_unsolicited_frame_control_get_header(
1350 &(scic_sds_request_get_controller(sci_req)->uf_control),
1352 (void **)&frame_header
1355 if (frame_header->frame_type == SCI_SAS_RESPONSE_FRAME) {
1356 struct sci_ssp_response_iu *response_buffer;
1358 status = scic_sds_unsolicited_frame_control_get_buffer(
1359 &(scic_sds_request_get_controller(sci_req)->uf_control),
1361 (void **)&response_buffer
1364 scic_word_copy_with_swap(
1365 sci_req->response_buffer,
1366 (u32 *)response_buffer,
1367 sizeof(struct sci_ssp_response_iu)
1370 response_buffer = (struct sci_ssp_response_iu *)sci_req->response_buffer;
1372 if ((response_buffer->data_present == 0x01) ||
1373 (response_buffer->data_present == 0x02)) {
1374 scic_sds_request_set_status(
1376 SCU_TASK_DONE_CHECK_RESPONSE,
1377 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1380 scic_sds_request_set_status(
1381 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1384 /* This was not a response frame why did it get forwarded? */
1385 dev_err(scic_to_dev(sci_req->owning_controller),
1386 "%s: SCIC IO Request 0x%p received unexpected "
1387 "frame %d type 0x%02x\n",
1391 frame_header->frame_type);
1394 * In any case we are done with this frame buffer return it to the
1396 scic_sds_controller_release_frame(
1397 sci_req->owning_controller, frame_index
1404 * *****************************************************************************
1405 * * COMPLETED STATE HANDLERS
1406 * ***************************************************************************** */
1410 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1411 * object receives a scic_sds_request_complete() request. This method frees up
1412 * any io request resources that have been allocated and transitions the
1413 * request to its final state. Consider stopping the state machine instead of
1414 * transitioning to the final state? enum sci_status SCI_SUCCESS
1416 static enum sci_status scic_sds_request_completed_state_complete_handler(
1417 struct scic_sds_request *request)
1419 if (request->was_tag_assigned_by_user != true) {
1420 scic_controller_free_io_tag(
1421 request->owning_controller, request->io_tag);
1424 if (request->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) {
1425 scic_sds_controller_release_frame(
1426 request->owning_controller, request->saved_rx_frame_index);
1429 sci_base_state_machine_change_state(&request->state_machine,
1430 SCI_BASE_REQUEST_STATE_FINAL);
1435 * *****************************************************************************
1436 * * ABORTING STATE HANDLERS
1437 * ***************************************************************************** */
1440 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1441 * object receives a scic_sds_request_terminate() request. This method is the
1442 * io request aborting state abort handlers. On receipt of a multiple
1443 * terminate requests the io request will transition to the completed state.
1444 * This should not happen in normal operation. enum sci_status SCI_SUCCESS
1446 static enum sci_status scic_sds_request_aborting_state_abort_handler(
1447 struct scic_sds_request *request)
1449 sci_base_state_machine_change_state(&request->state_machine,
1450 SCI_BASE_REQUEST_STATE_COMPLETED);
1455 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1456 * object receives a scic_sds_request_task_completion() request. This method
1457 * decodes the completion type waiting for the abort task complete
1458 * notification. When the abort task complete is received the io request
1459 * transitions to the completed state. enum sci_status SCI_SUCCESS
1461 static enum sci_status scic_sds_request_aborting_state_tc_completion_handler(
1462 struct scic_sds_request *sci_req,
1463 u32 completion_code)
1465 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1466 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1467 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1468 scic_sds_request_set_status(
1469 sci_req, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED
1472 sci_base_state_machine_change_state(&sci_req->state_machine,
1473 SCI_BASE_REQUEST_STATE_COMPLETED);
1478 * Unless we get some strange error wait for the task abort to complete
1479 * TODO: Should there be a state change for this completion? */
1487 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1488 * object receives a scic_sds_request_frame_handler() request. This method
1489 * discards the unsolicited frame since we are waiting for the abort task
1490 * completion. enum sci_status SCI_SUCCESS
1492 static enum sci_status scic_sds_request_aborting_state_frame_handler(
1493 struct scic_sds_request *sci_req,
1496 /* TODO: Is it even possible to get an unsolicited frame in the aborting state? */
1498 scic_sds_controller_release_frame(
1499 sci_req->owning_controller, frame_index);
1504 static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = {
1505 [SCI_BASE_REQUEST_STATE_INITIAL] = {
1507 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
1508 .start_handler = scic_sds_request_constructed_state_start_handler,
1509 .abort_handler = scic_sds_request_constructed_state_abort_handler,
1511 [SCI_BASE_REQUEST_STATE_STARTED] = {
1512 .abort_handler = scic_sds_request_started_state_abort_handler,
1513 .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler,
1514 .frame_handler = scic_sds_request_started_state_frame_handler,
1516 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
1517 .complete_handler = scic_sds_request_completed_state_complete_handler,
1519 [SCI_BASE_REQUEST_STATE_ABORTING] = {
1520 .abort_handler = scic_sds_request_aborting_state_abort_handler,
1521 .tc_completion_handler = scic_sds_request_aborting_state_tc_completion_handler,
1522 .frame_handler = scic_sds_request_aborting_state_frame_handler,
1524 [SCI_BASE_REQUEST_STATE_FINAL] = {
1529 * scic_sds_request_initial_state_enter() -
1530 * @object: This parameter specifies the base object for which the state
1531 * transition is occurring.
1533 * This method implements the actions taken when entering the
1534 * SCI_BASE_REQUEST_STATE_INITIAL state. This state is entered when the initial
1535 * base request is constructed. Entry into the initial state sets all handlers
1536 * for the io request object to their default handlers. none
1538 static void scic_sds_request_initial_state_enter(
1539 struct sci_base_object *object)
1541 struct scic_sds_request *sci_req = (struct scic_sds_request *)object;
1545 scic_sds_request_state_handler_table,
1546 SCI_BASE_REQUEST_STATE_INITIAL
1551 * scic_sds_request_constructed_state_enter() -
1552 * @object: The io request object that is to enter the constructed state.
1554 * This method implements the actions taken when entering the
1555 * SCI_BASE_REQUEST_STATE_CONSTRUCTED state. The method sets the state handlers
1556 * for the the constructed state. none
1558 static void scic_sds_request_constructed_state_enter(
1559 struct sci_base_object *object)
1561 struct scic_sds_request *sci_req = (struct scic_sds_request *)object;
1565 scic_sds_request_state_handler_table,
1566 SCI_BASE_REQUEST_STATE_CONSTRUCTED
1571 * scic_sds_request_started_state_enter() -
1572 * @object: This parameter specifies the base object for which the state
1573 * transition is occuring. This is cast into a SCIC_SDS_IO_REQUEST object.
1575 * This method implements the actions taken when entering the
1576 * SCI_BASE_REQUEST_STATE_STARTED state. If the io request object type is a
1577 * SCSI Task request we must enter the started substate machine. none
1579 static void scic_sds_request_started_state_enter(
1580 struct sci_base_object *object)
1582 struct scic_sds_request *sci_req = (struct scic_sds_request *)object;
1586 scic_sds_request_state_handler_table,
1587 SCI_BASE_REQUEST_STATE_STARTED
1591 * Most of the request state machines have a started substate machine so
1592 * start its execution on the entry to the started state. */
1593 if (sci_req->has_started_substate_machine == true)
1594 sci_base_state_machine_start(&sci_req->started_substate_machine);
1598 * scic_sds_request_started_state_exit() -
1599 * @object: This parameter specifies the base object for which the state
1600 * transition is occuring. This object is cast into a SCIC_SDS_IO_REQUEST
1603 * This method implements the actions taken when exiting the
1604 * SCI_BASE_REQUEST_STATE_STARTED state. For task requests the action will be
1605 * to stop the started substate machine. none
1607 static void scic_sds_request_started_state_exit(
1608 struct sci_base_object *object)
1610 struct scic_sds_request *sci_req = (struct scic_sds_request *)object;
1612 if (sci_req->has_started_substate_machine == true)
1613 sci_base_state_machine_stop(&sci_req->started_substate_machine);
1617 * scic_sds_request_completed_state_enter() -
1618 * @object: This parameter specifies the base object for which the state
1619 * transition is occuring. This object is cast into a SCIC_SDS_IO_REQUEST
1622 * This method implements the actions taken when entering the
1623 * SCI_BASE_REQUEST_STATE_COMPLETED state. This state is entered when the
1624 * SCIC_SDS_IO_REQUEST has completed. The method will decode the request
1625 * completion status and convert it to an enum sci_status to return in the
1626 * completion callback function. none
1628 static void scic_sds_request_completed_state_enter(
1629 struct sci_base_object *object)
1631 struct scic_sds_request *sci_req = (struct scic_sds_request *)object;
1632 struct scic_sds_controller *scic =
1633 scic_sds_request_get_controller(sci_req);
1634 struct isci_host *ihost = sci_object_get_association(scic);
1635 struct isci_request *ireq = sci_object_get_association(sci_req);
1637 SET_STATE_HANDLER(sci_req,
1638 scic_sds_request_state_handler_table,
1639 SCI_BASE_REQUEST_STATE_COMPLETED);
1641 /* Tell the SCI_USER that the IO request is complete */
1642 if (sci_req->is_task_management_request == false)
1643 isci_request_io_request_complete(ihost,
1645 sci_req->sci_status);
1647 isci_task_request_complete(ihost, ireq, sci_req->sci_status);
1651 * scic_sds_request_aborting_state_enter() -
1652 * @object: This parameter specifies the base object for which the state
1653 * transition is occuring. This object is cast into a SCIC_SDS_IO_REQUEST
1656 * This method implements the actions taken when entering the
1657 * SCI_BASE_REQUEST_STATE_ABORTING state. none
1659 static void scic_sds_request_aborting_state_enter(
1660 struct sci_base_object *object)
1662 struct scic_sds_request *sci_req = (struct scic_sds_request *)object;
1664 /* Setting the abort bit in the Task Context is required by the silicon. */
1665 sci_req->task_context_buffer->abort = 1;
1669 scic_sds_request_state_handler_table,
1670 SCI_BASE_REQUEST_STATE_ABORTING
1675 * scic_sds_request_final_state_enter() -
1676 * @object: This parameter specifies the base object for which the state
1677 * transition is occuring. This is cast into a SCIC_SDS_IO_REQUEST object.
1679 * This method implements the actions taken when entering the
1680 * SCI_BASE_REQUEST_STATE_FINAL state. The only action required is to put the
1681 * state handlers in place. none
1683 static void scic_sds_request_final_state_enter(
1684 struct sci_base_object *object)
1686 struct scic_sds_request *sci_req = (struct scic_sds_request *)object;
1690 scic_sds_request_state_handler_table,
1691 SCI_BASE_REQUEST_STATE_FINAL
1695 static const struct sci_base_state scic_sds_request_state_table[] = {
1696 [SCI_BASE_REQUEST_STATE_INITIAL] = {
1697 .enter_state = scic_sds_request_initial_state_enter,
1699 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
1700 .enter_state = scic_sds_request_constructed_state_enter,
1702 [SCI_BASE_REQUEST_STATE_STARTED] = {
1703 .enter_state = scic_sds_request_started_state_enter,
1704 .exit_state = scic_sds_request_started_state_exit
1706 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
1707 .enter_state = scic_sds_request_completed_state_enter,
1709 [SCI_BASE_REQUEST_STATE_ABORTING] = {
1710 .enter_state = scic_sds_request_aborting_state_enter,
1712 [SCI_BASE_REQUEST_STATE_FINAL] = {
1713 .enter_state = scic_sds_request_final_state_enter,
1717 static void scic_sds_general_request_construct(struct scic_sds_controller *scic,
1718 struct scic_sds_remote_device *sci_dev,
1720 void *user_io_request_object,
1721 struct scic_sds_request *sci_req)
1723 sci_req->parent.private = NULL;
1724 sci_base_state_machine_construct(&sci_req->state_machine, &sci_req->parent,
1725 scic_sds_request_state_table, SCI_BASE_REQUEST_STATE_INITIAL);
1726 sci_base_state_machine_start(&sci_req->state_machine);
1728 sci_req->io_tag = io_tag;
1729 sci_req->user_request = user_io_request_object;
1730 sci_req->owning_controller = scic;
1731 sci_req->target_device = sci_dev;
1732 sci_req->has_started_substate_machine = false;
1733 sci_req->protocol = SCIC_NO_PROTOCOL;
1734 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
1735 sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev);
1737 sci_req->sci_status = SCI_SUCCESS;
1738 sci_req->scu_status = 0;
1739 sci_req->post_context = 0xFFFFFFFF;
1741 sci_req->is_task_management_request = false;
1743 if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
1744 sci_req->was_tag_assigned_by_user = false;
1745 sci_req->task_context_buffer = NULL;
1747 sci_req->was_tag_assigned_by_user = true;
1749 sci_req->task_context_buffer =
1750 scic_sds_controller_get_task_context_buffer(scic, io_tag);
1754 enum sci_status scic_io_request_construct(struct scic_sds_controller *scic,
1755 struct scic_sds_remote_device *sci_dev,
1757 void *user_io_request_object,
1758 struct scic_sds_request *sci_req,
1759 struct scic_sds_request **new_scic_io_request_handle)
1761 struct domain_device *dev = sci_dev_to_domain(sci_dev);
1762 enum sci_status status = SCI_SUCCESS;
1764 /* Build the common part of the request */
1765 scic_sds_general_request_construct(scic, sci_dev, io_tag,
1766 user_io_request_object, sci_req);
1768 if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
1769 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
1771 if (dev->dev_type == SAS_END_DEV) {
1772 scic_sds_ssp_io_request_assign_buffers(sci_req);
1773 } else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
1774 scic_sds_stp_request_assign_buffers(sci_req);
1775 memset(sci_req->command_buffer, 0, sizeof(struct sata_fis_reg_h2d));
1776 } else if (dev_is_expander(dev)) {
1777 scic_sds_smp_request_assign_buffers(sci_req);
1778 memset(sci_req->command_buffer, 0, sizeof(struct smp_request));
1780 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
1782 if (status == SCI_SUCCESS) {
1783 memset(sci_req->task_context_buffer, 0,
1784 SCI_FIELD_OFFSET(struct scu_task_context, sgl_pair_ab));
1785 *new_scic_io_request_handle = sci_req;
1791 enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
1792 struct scic_sds_remote_device *sci_dev,
1794 void *user_io_request_object,
1795 struct scic_sds_request *sci_req,
1796 struct scic_sds_request **new_sci_req)
1798 struct domain_device *dev = sci_dev_to_domain(sci_dev);
1799 enum sci_status status = SCI_SUCCESS;
1801 /* Build the common part of the request */
1802 scic_sds_general_request_construct(scic, sci_dev, io_tag,
1803 user_io_request_object,
1806 if (dev->dev_type == SAS_END_DEV) {
1807 scic_sds_ssp_task_request_assign_buffers(sci_req);
1809 sci_req->has_started_substate_machine = true;
1811 /* Construct the started sub-state machine. */
1812 sci_base_state_machine_construct(
1813 &sci_req->started_substate_machine,
1815 scic_sds_io_request_started_task_mgmt_substate_table,
1816 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION
1818 } else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
1819 scic_sds_stp_request_assign_buffers(sci_req);
1821 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
1823 if (status == SCI_SUCCESS) {
1824 sci_req->is_task_management_request = true;
1825 memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context));
1826 *new_sci_req = sci_req;