2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 #include "sci_base_state.h"
59 #include "sci_base_state_machine.h"
60 #include "scic_io_request.h"
61 #include "scic_sds_controller.h"
62 #include "remote_device.h"
63 #include "scic_sds_request.h"
64 #include "scic_sds_stp_pio_request.h"
65 #include "scic_sds_stp_request.h"
66 #include "scic_sds_unsolicited_frame_control.h"
67 #include "sci_environment.h"
69 #include "scu_completion_codes.h"
70 #include "scu_event_codes.h"
71 #include "scu_task_context.h"
74 * scic_sds_stp_request_get_h2d_reg_buffer() -
76 * This macro returns the address of the stp h2d reg fis buffer in the io
79 #define scic_sds_stp_request_get_h2d_reg_buffer(memory) \
80 ((struct host_to_dev_fis *)(\
81 ((char *)(memory)) + sizeof(struct scic_sds_stp_request) \
85 * scic_sds_stp_request_get_response_buffer() -
87 * This macro returns the address of the ssp response iu buffer in the io
90 #define scic_sds_stp_request_get_response_buffer(memory) \
91 ((struct dev_to_host_fis *)(\
92 ((char *)(scic_sds_stp_request_get_h2d_reg_buffer(memory))) \
93 + sizeof(struct host_to_dev_fis) \
99 * This method return the memory space required for STP PIO requests. u32
101 u32 scic_sds_stp_request_get_object_size(void)
103 return sizeof(struct scic_sds_stp_request)
104 + sizeof(struct host_to_dev_fis)
105 + sizeof(struct dev_to_host_fis);
108 void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req)
110 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
112 sci_req->command_buffer = scic_sds_stp_request_get_h2d_reg_buffer(stp_req);
113 sci_req->response_buffer = scic_sds_stp_request_get_response_buffer(stp_req);
115 if (sci_req->was_tag_assigned_by_user == false)
116 sci_req->task_context_buffer = &sci_req->tc;
120 * This method is will fill in the SCU Task Context for any type of SATA
121 * request. This is called from the various SATA constructors.
122 * @sci_req: The general IO request object which is to be used in
123 * constructing the SCU task context.
124 * @task_context: The buffer pointer for the SCU task context which is being
127 * The general io request construction is complete. The buffer assignment for
128 * the command buffer is complete. none Revisit task context construction to
129 * determine what is common for SSP/SMP/STP task context structures.
131 static void scu_sata_reqeust_construct_task_context(
132 struct scic_sds_request *sds_request,
133 struct scu_task_context *task_context)
136 struct scic_sds_controller *controller;
137 struct scic_sds_remote_device *target_device;
138 struct scic_sds_port *target_port;
140 controller = scic_sds_request_get_controller(sds_request);
141 target_device = scic_sds_request_get_device(sds_request);
142 target_port = scic_sds_request_get_port(sds_request);
144 /* Fill in the TC with the its required data */
145 task_context->abort = 0;
146 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
147 task_context->initiator_request = 1;
148 task_context->connection_rate = target_device->connection_rate;
149 task_context->protocol_engine_index =
150 scic_sds_controller_get_protocol_engine_group(controller);
151 task_context->logical_port_index =
152 scic_sds_port_get_index(target_port);
153 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
154 task_context->valid = SCU_TASK_CONTEXT_VALID;
155 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
157 task_context->remote_node_index =
158 scic_sds_remote_device_get_index(sds_request->target_device);
159 task_context->command_code = 0;
161 task_context->link_layer_control = 0;
162 task_context->do_not_dma_ssp_good_response = 1;
163 task_context->strict_ordering = 0;
164 task_context->control_frame = 0;
165 task_context->timeout_enable = 0;
166 task_context->block_guard_enable = 0;
168 task_context->address_modifier = 0;
169 task_context->task_phase = 0x01;
171 task_context->ssp_command_iu_length =
172 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
174 /* Set the first word of the H2D REG FIS */
175 task_context->type.words[0] = *(u32 *)sds_request->command_buffer;
177 if (sds_request->was_tag_assigned_by_user) {
179 * Build the task context now since we have already read
182 sds_request->post_context =
183 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
184 (scic_sds_controller_get_protocol_engine_group(
186 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
187 (scic_sds_port_get_index(target_port) <<
188 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
189 scic_sds_io_tag_get_index(sds_request->io_tag));
192 * Build the task context now since we have already read
194 * I/O tag index is not assigned because we have to wait
195 * until we get a TCi.
197 sds_request->post_context =
198 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
199 (scic_sds_controller_get_protocol_engine_group(
201 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
202 (scic_sds_port_get_index(target_port) <<
203 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
207 * Copy the physical address for the command buffer to the SCU Task
208 * Context. We must offset the command buffer by 4 bytes because the
209 * first 4 bytes are transfered in the body of the TC.
212 scic_io_request_get_dma_addr(sds_request,
213 (char *)sds_request->
217 task_context->command_iu_upper = upper_32_bits(dma_addr);
218 task_context->command_iu_lower = lower_32_bits(dma_addr);
220 /* SATA Requests do not have a response buffer */
221 task_context->response_iu_upper = 0;
222 task_context->response_iu_lower = 0;
229 * This method will perform any general sata request construction. What part of
230 * SATA IO request construction is general? none
232 static void scic_sds_stp_non_ncq_request_construct(
233 struct scic_sds_request *sci_req)
235 sci_req->has_started_substate_machine = true;
240 * @sci_req: This parameter specifies the request to be constructed as an
242 * @optimized_task_type: This parameter specifies whether the request is to be
243 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
244 * value of 1 indicates NCQ.
246 * This method will perform request construction common to all types of STP
247 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
248 * returns an indication as to whether the construction was successful.
250 static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
251 u8 optimized_task_type,
253 enum dma_data_direction dir)
255 struct scu_task_context *task_context = sci_req->task_context_buffer;
257 /* Build the STP task context structure */
258 scu_sata_reqeust_construct_task_context(sci_req, task_context);
260 /* Copy over the SGL elements */
261 scic_sds_request_build_sgl(sci_req);
263 /* Copy over the number of bytes to be transfered */
264 task_context->transfer_length_bytes = len;
266 if (dir == DMA_TO_DEVICE) {
268 * The difference between the DMA IN and DMA OUT request task type
269 * values are consistent with the difference between FPDMA READ
270 * and FPDMA WRITE values. Add the supplied task type parameter
271 * to this difference to set the task type properly for this
272 * DATA OUT (WRITE) case. */
273 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
274 - SCU_TASK_TYPE_DMA_IN);
277 * For the DATA IN (READ) case, simply save the supplied
278 * optimized task type. */
279 task_context->task_type = optimized_task_type;
285 * @sci_req: This parameter specifies the request to be constructed.
287 * This method will construct the STP UDMA request and its associated TC data.
288 * This method returns an indication as to whether the construction was
289 * successful. SCI_SUCCESS Currently this method always returns this value.
291 enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_req,
293 enum dma_data_direction dir)
295 scic_sds_stp_optimized_request_construct(sci_req,
296 SCU_TASK_TYPE_FPDMAQ_READ,
302 * scu_stp_raw_request_construct_task_context -
303 * @sci_req: This parameter specifies the STP request object for which to
304 * construct a RAW command frame task context.
305 * @task_context: This parameter specifies the SCU specific task context buffer
308 * This method performs the operations common to all SATA/STP requests
309 * utilizing the raw frame method. none
311 static void scu_stp_raw_request_construct_task_context(
312 struct scic_sds_stp_request *stp_req,
313 struct scu_task_context *task_context)
315 struct scic_sds_request *sci_req = to_sci_req(stp_req);
317 scu_sata_reqeust_construct_task_context(sci_req, task_context);
319 task_context->control_frame = 0;
320 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
321 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
322 task_context->type.stp.fis_type = FIS_REGH2D;
323 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
326 void scic_stp_io_request_set_ncq_tag(
327 struct scic_sds_request *req,
331 * @note This could be made to return an error to the user if the user
332 * attempts to set the NCQ tag in the wrong state.
334 req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
338 void *scic_stp_io_request_get_h2d_reg_address(
339 struct scic_sds_request *req)
341 return req->command_buffer;
345 void *scic_stp_io_request_get_d2h_reg_address(struct scic_sds_request *sci_req)
347 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
349 return &stp_req->d2h_reg_fis;
356 * Get the next SGL element from the request. - Check on which SGL element pair
357 * we are working - if working on SLG pair element A - advance to element B -
358 * else - check to see if there are more SGL element pairs for this IO request
359 * - if there are more SGL element pairs - advance to the next pair and return
360 * element A struct scu_sgl_element*
362 static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
364 struct scu_sgl_element *current_sgl;
365 struct scic_sds_request *sci_req = to_sci_req(stp_req);
366 struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
368 if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
369 if (pio_sgl->sgl_pair->B.address_lower == 0 &&
370 pio_sgl->sgl_pair->B.address_upper == 0) {
373 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
374 current_sgl = &pio_sgl->sgl_pair->B;
377 if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
378 pio_sgl->sgl_pair->next_pair_upper == 0) {
383 phys_addr = pio_sgl->sgl_pair->next_pair_upper;
385 phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
387 pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
388 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
389 current_sgl = &pio_sgl->sgl_pair->A;
401 * This method processes a TC completion. The expected TC completion is for
402 * the transmission of the H2D register FIS containing the SATA/STP non-data
403 * request. This method always successfully processes the TC completion.
404 * SCI_SUCCESS This value is always returned.
406 static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler(
407 struct scic_sds_request *sci_req,
410 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
411 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
412 scic_sds_request_set_status(
413 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
416 sci_base_state_machine_change_state(
417 &sci_req->started_substate_machine,
418 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
424 * All other completion status cause the IO to be complete. If a NAK
425 * was received, then it is up to the user to retry the request. */
426 scic_sds_request_set_status(
428 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
429 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
432 sci_base_state_machine_change_state(
433 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
442 * @request: This parameter specifies the request for which a frame has been
444 * @frame_index: This parameter specifies the index of the frame that has been
447 * This method processes frames received from the target while waiting for a
448 * device to host register FIS. If a non-register FIS is received during this
449 * time, it is treated as a protocol violation from an IO perspective. Indicate
450 * if the received frame was processed successfully.
452 static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler(
453 struct scic_sds_request *sci_req,
456 enum sci_status status;
457 struct dev_to_host_fis *frame_header;
459 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
460 struct scic_sds_controller *scic = sci_req->owning_controller;
462 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
464 (void **)&frame_header);
466 if (status != SCI_SUCCESS) {
467 dev_err(scic_to_dev(sci_req->owning_controller),
468 "%s: SCIC IO Request 0x%p could not get frame header "
469 "for frame index %d, status %x\n",
470 __func__, stp_req, frame_index, status);
475 switch (frame_header->fis_type) {
477 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
479 (void **)&frame_buffer);
481 scic_sds_controller_copy_sata_response(&stp_req->d2h_reg_fis,
485 /* The command has completed with error */
486 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
487 SCI_FAILURE_IO_RESPONSE_VALID);
491 dev_warn(scic_to_dev(scic),
492 "%s: IO Request:0x%p Frame Id:%d protocol "
493 "violation occurred\n", __func__, stp_req,
496 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
497 SCI_FAILURE_PROTOCOL_VIOLATION);
501 sci_base_state_machine_change_state(&sci_req->state_machine,
502 SCI_BASE_REQUEST_STATE_COMPLETED);
504 /* Frame has been decoded return it to the controller */
505 scic_sds_controller_release_frame(scic, frame_index);
510 /* --------------------------------------------------------------------------- */
512 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_non_data_substate_handler_table[] = {
513 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
514 .abort_handler = scic_sds_request_started_state_abort_handler,
515 .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler,
517 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
518 .abort_handler = scic_sds_request_started_state_abort_handler,
519 .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler,
523 static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(
526 struct scic_sds_request *sci_req = object;
530 scic_sds_stp_request_started_non_data_substate_handler_table,
531 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
534 scic_sds_remote_device_set_working_request(
535 sci_req->target_device, sci_req
539 static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object)
541 struct scic_sds_request *sci_req = object;
545 scic_sds_stp_request_started_non_data_substate_handler_table,
546 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
550 /* --------------------------------------------------------------------------- */
552 static const struct sci_base_state scic_sds_stp_request_started_non_data_substate_table[] = {
553 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
554 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
556 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
557 .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter,
561 enum sci_status scic_sds_stp_non_data_request_construct(struct scic_sds_request *sci_req)
563 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
565 scic_sds_stp_non_ncq_request_construct(sci_req);
567 /* Build the STP task context structure */
568 scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
570 sci_base_state_machine_construct(&sci_req->started_substate_machine,
572 scic_sds_stp_request_started_non_data_substate_table,
573 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE);
578 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
580 /* transmit DATA_FIS from (current sgl + offset) for input
581 * parameter length. current sgl and offset is alreay stored in the IO request
583 static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
584 struct scic_sds_request *sci_req,
587 struct scic_sds_controller *scic = sci_req->owning_controller;
588 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
589 struct scu_task_context *task_context;
590 struct scu_sgl_element *current_sgl;
592 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
593 * for the data from current_sgl+offset for the input length
595 task_context = scic_sds_controller_get_task_context_buffer(scic,
598 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
599 current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
601 current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
604 task_context->command_iu_upper = current_sgl->address_upper;
605 task_context->command_iu_lower = current_sgl->address_lower;
606 task_context->transfer_length_bytes = length;
607 task_context->type.stp.fis_type = FIS_DATA;
609 /* send the new TC out. */
610 return scic_controller_continue_io(sci_req);
613 static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
616 struct scu_sgl_element *current_sgl;
618 u32 remaining_bytes_in_current_sgl = 0;
619 enum sci_status status = SCI_SUCCESS;
620 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
622 sgl_offset = stp_req->type.pio.request_current.sgl_offset;
624 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
625 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
626 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
628 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
629 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
633 if (stp_req->type.pio.pio_transfer_bytes > 0) {
634 if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
635 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
636 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
637 if (status == SCI_SUCCESS) {
638 stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
640 /* update the current sgl, sgl_offset and save for future */
641 current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
644 } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
645 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
646 scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
648 if (status == SCI_SUCCESS) {
649 /* Sgl offset will be adjusted and saved for future */
650 sgl_offset += stp_req->type.pio.pio_transfer_bytes;
651 current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
652 stp_req->type.pio.pio_transfer_bytes = 0;
657 if (status == SCI_SUCCESS) {
658 stp_req->type.pio.request_current.sgl_offset = sgl_offset;
666 * @stp_request: The request that is used for the SGL processing.
667 * @data_buffer: The buffer of data to be copied.
668 * @length: The length of the data transfer.
670 * Copy the data from the buffer for the length specified to the IO reqeust SGL
671 * specified data region. enum sci_status
673 static enum sci_status
674 scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
675 u8 *data_buf, u32 len)
677 struct scic_sds_request *sci_req;
678 struct isci_request *ireq;
681 struct sas_task *task;
682 struct scatterlist *sg;
686 sci_req = to_sci_req(stp_req);
687 ireq = scic_sds_request_get_user_request(sci_req);
688 task = isci_request_access_task(ireq);
691 if (task->num_scatter > 0) {
694 while (total_len > 0) {
695 struct page *page = sg_page(sg);
697 copy_len = min_t(int, total_len, sg_dma_len(sg));
698 kaddr = kmap_atomic(page, KM_IRQ0);
699 memcpy(kaddr + sg->offset, src_addr, copy_len);
700 kunmap_atomic(kaddr, KM_IRQ0);
701 total_len -= copy_len;
702 src_addr += copy_len;
706 BUG_ON(task->total_xfer_len < total_len);
707 memcpy(task->scatter, src_addr, total_len);
715 * @sci_req: The PIO DATA IN request that is to receive the data.
716 * @data_buffer: The buffer to copy from.
718 * Copy the data buffer to the io request data region. enum sci_status
720 static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
721 struct scic_sds_stp_request *sci_req,
724 enum sci_status status;
727 * If there is less than 1K remaining in the transfer request
728 * copy just the data for the transfer */
729 if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
730 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
731 sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
733 if (status == SCI_SUCCESS)
734 sci_req->type.pio.pio_transfer_bytes = 0;
736 /* We are transfering the whole frame so copy */
737 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
738 sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
740 if (status == SCI_SUCCESS)
741 sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
754 static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler(
755 struct scic_sds_request *sci_req,
758 enum sci_status status = SCI_SUCCESS;
760 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
761 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
762 scic_sds_request_set_status(
763 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
766 sci_base_state_machine_change_state(
767 &sci_req->started_substate_machine,
768 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
774 * All other completion status cause the IO to be complete. If a NAK
775 * was received, then it is up to the user to retry the request. */
776 scic_sds_request_set_status(
778 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
779 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
782 sci_base_state_machine_change_state(
783 &sci_req->state_machine,
784 SCI_BASE_REQUEST_STATE_COMPLETED
792 static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request *sci_req,
795 struct scic_sds_controller *scic = sci_req->owning_controller;
796 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
797 struct isci_request *ireq = sci_req->ireq;
798 struct sas_task *task = isci_request_access_task(ireq);
799 struct dev_to_host_fis *frame_header;
800 enum sci_status status;
803 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
805 (void **)&frame_header);
807 if (status != SCI_SUCCESS) {
808 dev_err(scic_to_dev(scic),
809 "%s: SCIC IO Request 0x%p could not get frame header "
810 "for frame index %d, status %x\n",
811 __func__, stp_req, frame_index, status);
815 switch (frame_header->fis_type) {
817 /* Get from the frame buffer the PIO Setup Data */
818 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
820 (void **)&frame_buffer);
822 /* Get the data from the PIO Setup The SCU Hardware returns
823 * first word in the frame_header and the rest of the data is in
824 * the frame buffer so we need to back up one dword
827 /* transfer_count: first 16bits in the 4th dword */
828 stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
830 /* ending_status: 4th byte in the 3rd dword */
831 stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
833 scic_sds_controller_copy_sata_response(&stp_req->d2h_reg_fis,
837 stp_req->d2h_reg_fis.status = stp_req->type.pio.ending_status;
839 /* The next state is dependent on whether the
840 * request was PIO Data-in or Data out
842 if (task->data_dir == DMA_FROM_DEVICE) {
843 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
844 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE);
845 } else if (task->data_dir == DMA_TO_DEVICE) {
847 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
848 if (status != SCI_SUCCESS)
850 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
851 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE);
855 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
856 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
859 if (frame_header->status & ATA_BUSY) {
860 /* Now why is the drive sending a D2H Register FIS when
861 * it is still busy? Do nothing since we are still in
864 dev_dbg(scic_to_dev(scic),
865 "%s: SCIC PIO Request 0x%p received "
866 "D2H Register FIS with BSY status "
867 "0x%x\n", __func__, stp_req,
868 frame_header->status);
872 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
874 (void **)&frame_buffer);
876 scic_sds_controller_copy_sata_response(&stp_req->d2h_reg_fis,
880 scic_sds_request_set_status(sci_req,
881 SCU_TASK_DONE_CHECK_RESPONSE,
882 SCI_FAILURE_IO_RESPONSE_VALID);
884 sci_base_state_machine_change_state(&sci_req->state_machine,
885 SCI_BASE_REQUEST_STATE_COMPLETED);
888 /* FIXME: what do we do here? */
892 /* Frame is decoded return it to the controller */
893 scic_sds_controller_release_frame(scic, frame_index);
898 static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request *sci_req,
901 enum sci_status status;
902 struct dev_to_host_fis *frame_header;
903 struct sata_fis_data *frame_buffer;
904 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
905 struct scic_sds_controller *scic = sci_req->owning_controller;
907 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
909 (void **)&frame_header);
911 if (status != SCI_SUCCESS) {
912 dev_err(scic_to_dev(scic),
913 "%s: SCIC IO Request 0x%p could not get frame header "
914 "for frame index %d, status %x\n",
915 __func__, stp_req, frame_index, status);
919 if (frame_header->fis_type == FIS_DATA) {
920 if (stp_req->type.pio.request_current.sgl_pair == NULL) {
921 sci_req->saved_rx_frame_index = frame_index;
922 stp_req->type.pio.pio_transfer_bytes = 0;
924 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
926 (void **)&frame_buffer);
928 status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
931 /* Frame is decoded return it to the controller */
932 scic_sds_controller_release_frame(scic, frame_index);
935 /* Check for the end of the transfer, are there more
936 * bytes remaining for this data transfer
938 if (status != SCI_SUCCESS ||
939 stp_req->type.pio.pio_transfer_bytes != 0)
942 if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
943 scic_sds_request_set_status(sci_req,
944 SCU_TASK_DONE_CHECK_RESPONSE,
945 SCI_FAILURE_IO_RESPONSE_VALID);
947 sci_base_state_machine_change_state(&sci_req->state_machine,
948 SCI_BASE_REQUEST_STATE_COMPLETED);
950 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
951 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
954 dev_err(scic_to_dev(scic),
955 "%s: SCIC PIO Request 0x%p received frame %d "
956 "with fis type 0x%02x when expecting a data "
957 "fis.\n", __func__, stp_req, frame_index,
958 frame_header->fis_type);
960 scic_sds_request_set_status(sci_req,
962 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
964 sci_base_state_machine_change_state(&sci_req->state_machine,
965 SCI_BASE_REQUEST_STATE_COMPLETED);
967 /* Frame is decoded return it to the controller */
968 scic_sds_controller_release_frame(scic, frame_index);
982 static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler(
984 struct scic_sds_request *sci_req,
987 enum sci_status status = SCI_SUCCESS;
988 bool all_frames_transferred = false;
989 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
991 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
992 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
994 if (stp_req->type.pio.pio_transfer_bytes != 0) {
995 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
996 if (status == SCI_SUCCESS) {
997 if (stp_req->type.pio.pio_transfer_bytes == 0)
998 all_frames_transferred = true;
1000 } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
1002 * this will happen if the all data is written at the
1003 * first time after the pio setup fis is received
1005 all_frames_transferred = true;
1008 /* all data transferred. */
1009 if (all_frames_transferred) {
1011 * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE
1012 * and wait for PIO_SETUP fis / or D2H REg fis. */
1013 sci_base_state_machine_change_state(
1014 &sci_req->started_substate_machine,
1015 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1022 * All other completion status cause the IO to be complete. If a NAK
1023 * was received, then it is up to the user to retry the request. */
1024 scic_sds_request_set_status(
1026 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1027 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1030 sci_base_state_machine_change_state(
1031 &sci_req->state_machine,
1032 SCI_BASE_REQUEST_STATE_COMPLETED
1042 * @request: This is the request which is receiving the event.
1043 * @event_code: This is the event code that the request on which the request is
1044 * expected to take action.
1046 * This method will handle any link layer events while waiting for the data
1047 * frame. enum sci_status SCI_SUCCESS SCI_FAILURE
1049 static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler(
1050 struct scic_sds_request *request,
1053 enum sci_status status;
1055 switch (scu_get_event_specifier(event_code)) {
1056 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
1058 * We are waiting for data and the SCU has R_ERR the data frame.
1059 * Go back to waiting for the D2H Register FIS */
1060 sci_base_state_machine_change_state(
1061 &request->started_substate_machine,
1062 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1065 status = SCI_SUCCESS;
1069 dev_err(scic_to_dev(request->owning_controller),
1070 "%s: SCIC PIO Request 0x%p received unexpected "
1072 __func__, request, event_code);
1074 /* / @todo Should we fail the PIO request when we get an unexpected event? */
1075 status = SCI_FAILURE;
1082 /* --------------------------------------------------------------------------- */
1084 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_pio_substate_handler_table[] = {
1085 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
1086 .abort_handler = scic_sds_request_started_state_abort_handler,
1087 .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler,
1089 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
1090 .abort_handler = scic_sds_request_started_state_abort_handler,
1091 .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler
1093 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
1094 .abort_handler = scic_sds_request_started_state_abort_handler,
1095 .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler,
1096 .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler
1098 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
1099 .abort_handler = scic_sds_request_started_state_abort_handler,
1100 .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler,
1104 static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(
1107 struct scic_sds_request *sci_req = object;
1111 scic_sds_stp_request_started_pio_substate_handler_table,
1112 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
1115 scic_sds_remote_device_set_working_request(
1116 sci_req->target_device, sci_req);
1119 static void scic_sds_stp_request_started_pio_await_frame_enter(void *object)
1121 struct scic_sds_request *sci_req = object;
1125 scic_sds_stp_request_started_pio_substate_handler_table,
1126 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1130 static void scic_sds_stp_request_started_pio_data_in_await_data_enter(
1133 struct scic_sds_request *sci_req = object;
1137 scic_sds_stp_request_started_pio_substate_handler_table,
1138 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
1142 static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter(
1145 struct scic_sds_request *sci_req = object;
1149 scic_sds_stp_request_started_pio_substate_handler_table,
1150 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
1154 /* --------------------------------------------------------------------------- */
1156 static const struct sci_base_state scic_sds_stp_request_started_pio_substate_table[] = {
1157 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
1158 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
1160 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
1161 .enter_state = scic_sds_stp_request_started_pio_await_frame_enter,
1163 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
1164 .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter,
1166 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
1167 .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter,
1172 scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
1175 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1176 struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
1178 scic_sds_stp_non_ncq_request_construct(sci_req);
1180 scu_stp_raw_request_construct_task_context(stp_req,
1181 sci_req->task_context_buffer);
1183 pio->current_transfer_bytes = 0;
1184 pio->ending_error = 0;
1185 pio->ending_status = 0;
1187 pio->request_current.sgl_offset = 0;
1188 pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
1190 if (copy_rx_frame) {
1191 scic_sds_request_build_sgl(sci_req);
1192 /* Since the IO request copy of the TC contains the same data as
1193 * the actual TC this pointer is vaild for either.
1195 pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
1197 /* The user does not want the data copied to the SGL buffer location */
1198 pio->request_current.sgl_pair = NULL;
1201 sci_base_state_machine_construct(&sci_req->started_substate_machine,
1203 scic_sds_stp_request_started_pio_substate_table,
1204 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE);
1209 static void scic_sds_stp_request_udma_complete_request(
1210 struct scic_sds_request *request,
1212 enum sci_status sci_status)
1214 scic_sds_request_set_status(request, scu_status, sci_status);
1215 sci_base_state_machine_change_state(&request->state_machine,
1216 SCI_BASE_REQUEST_STATE_COMPLETED);
1219 static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
1222 struct scic_sds_controller *scic = sci_req->owning_controller;
1223 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1224 struct dev_to_host_fis *frame_header;
1225 enum sci_status status;
1228 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1230 (void **)&frame_header);
1232 if ((status == SCI_SUCCESS) &&
1233 (frame_header->fis_type == FIS_REGD2H)) {
1234 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1236 (void **)&frame_buffer);
1238 scic_sds_controller_copy_sata_response(&stp_req->d2h_reg_fis,
1243 scic_sds_controller_release_frame(scic, frame_index);
1248 static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler(
1249 struct scic_sds_request *sci_req,
1250 u32 completion_code)
1252 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1253 enum sci_status status = SCI_SUCCESS;
1255 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1256 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1257 scic_sds_stp_request_udma_complete_request(sci_req,
1261 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
1262 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1264 * We must check ther response buffer to see if the D2H Register FIS was
1265 * received before we got the TC completion. */
1266 if (stp_req->d2h_reg_fis.fis_type == FIS_REGD2H) {
1267 scic_sds_remote_device_suspend(sci_req->target_device,
1268 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1270 scic_sds_stp_request_udma_complete_request(sci_req,
1271 SCU_TASK_DONE_CHECK_RESPONSE,
1272 SCI_FAILURE_IO_RESPONSE_VALID);
1275 * If we have an error completion status for the TC then we can expect a
1276 * D2H register FIS from the device so we must change state to wait for it */
1277 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
1278 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE);
1283 * / @todo Check to see if any of these completion status need to wait for
1284 * / the device to host register fis. */
1285 /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */
1286 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
1287 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1288 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
1289 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
1290 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
1291 scic_sds_remote_device_suspend(sci_req->target_device,
1292 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1293 /* Fall through to the default case */
1295 /* All other completion status cause the IO to be complete. */
1296 scic_sds_stp_request_udma_complete_request(sci_req,
1297 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1298 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1305 static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler(
1306 struct scic_sds_request *sci_req,
1309 enum sci_status status;
1311 /* Use the general frame handler to copy the resposne data */
1312 status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
1314 if (status != SCI_SUCCESS)
1317 scic_sds_stp_request_udma_complete_request(sci_req,
1318 SCU_TASK_DONE_CHECK_RESPONSE,
1319 SCI_FAILURE_IO_RESPONSE_VALID);
1324 /* --------------------------------------------------------------------------- */
1326 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_udma_substate_handler_table[] = {
1327 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
1328 .abort_handler = scic_sds_request_started_state_abort_handler,
1329 .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler,
1330 .frame_handler = scic_sds_stp_request_udma_general_frame_handler,
1332 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
1333 .abort_handler = scic_sds_request_started_state_abort_handler,
1334 .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler,
1338 static void scic_sds_stp_request_started_udma_await_tc_completion_enter(
1341 struct scic_sds_request *sci_req = object;
1345 scic_sds_stp_request_started_udma_substate_handler_table,
1346 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
1353 * This state is entered when there is an TC completion failure. The hardware
1354 * received an unexpected condition while processing the IO request and now
1355 * will UF the D2H register FIS to complete the IO.
1357 static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter(
1360 struct scic_sds_request *sci_req = object;
1364 scic_sds_stp_request_started_udma_substate_handler_table,
1365 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
1369 /* --------------------------------------------------------------------------- */
1371 static const struct sci_base_state scic_sds_stp_request_started_udma_substate_table[] = {
1372 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
1373 .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter,
1375 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
1376 .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter,
1380 enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req,
1382 enum dma_data_direction dir)
1384 scic_sds_stp_non_ncq_request_construct(sci_req);
1386 scic_sds_stp_optimized_request_construct(sci_req, SCU_TASK_TYPE_DMA_IN,
1389 sci_base_state_machine_construct(
1390 &sci_req->started_substate_machine,
1392 scic_sds_stp_request_started_udma_substate_table,
1393 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
1404 * This method processes a TC completion. The expected TC completion is for
1405 * the transmission of the H2D register FIS containing the SATA/STP non-data
1406 * request. This method always successfully processes the TC completion.
1407 * SCI_SUCCESS This value is always returned.
1409 static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler(
1410 struct scic_sds_request *sci_req,
1411 u32 completion_code)
1413 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1414 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1415 scic_sds_request_set_status(
1416 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1419 sci_base_state_machine_change_state(
1420 &sci_req->started_substate_machine,
1421 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
1427 * All other completion status cause the IO to be complete. If a NAK
1428 * was received, then it is up to the user to retry the request. */
1429 scic_sds_request_set_status(
1431 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1432 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1435 sci_base_state_machine_change_state(
1436 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
1448 * This method processes a TC completion. The expected TC completion is for
1449 * the transmission of the H2D register FIS containing the SATA/STP non-data
1450 * request. This method always successfully processes the TC completion.
1451 * SCI_SUCCESS This value is always returned.
1453 static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler(
1454 struct scic_sds_request *sci_req,
1455 u32 completion_code)
1457 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1458 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1459 scic_sds_request_set_status(
1460 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1463 sci_base_state_machine_change_state(
1464 &sci_req->started_substate_machine,
1465 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
1471 * All other completion status cause the IO to be complete. If a NAK
1472 * was received, then it is up to the user to retry the request. */
1473 scic_sds_request_set_status(
1475 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1476 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1479 sci_base_state_machine_change_state(&sci_req->state_machine,
1480 SCI_BASE_REQUEST_STATE_COMPLETED);
1489 * @request: This parameter specifies the request for which a frame has been
1491 * @frame_index: This parameter specifies the index of the frame that has been
1494 * This method processes frames received from the target while waiting for a
1495 * device to host register FIS. If a non-register FIS is received during this
1496 * time, it is treated as a protocol violation from an IO perspective. Indicate
1497 * if the received frame was processed successfully.
1499 static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler(
1500 struct scic_sds_request *sci_req,
1503 enum sci_status status;
1504 struct dev_to_host_fis *frame_header;
1506 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1507 struct scic_sds_controller *scic = sci_req->owning_controller;
1509 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1511 (void **)&frame_header);
1512 if (status != SCI_SUCCESS) {
1513 dev_err(scic_to_dev(scic),
1514 "%s: SCIC IO Request 0x%p could not get frame header "
1515 "for frame index %d, status %x\n",
1516 __func__, stp_req, frame_index, status);
1520 switch (frame_header->fis_type) {
1522 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1524 (void **)&frame_buffer);
1526 scic_sds_controller_copy_sata_response(&stp_req->d2h_reg_fis,
1530 /* The command has completed with error */
1531 scic_sds_request_set_status(sci_req,
1532 SCU_TASK_DONE_CHECK_RESPONSE,
1533 SCI_FAILURE_IO_RESPONSE_VALID);
1537 dev_warn(scic_to_dev(scic),
1538 "%s: IO Request:0x%p Frame Id:%d protocol "
1539 "violation occurred\n", __func__, stp_req,
1542 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
1543 SCI_FAILURE_PROTOCOL_VIOLATION);
1547 sci_base_state_machine_change_state(&sci_req->state_machine,
1548 SCI_BASE_REQUEST_STATE_COMPLETED);
1550 /* Frame has been decoded return it to the controller */
1551 scic_sds_controller_release_frame(scic, frame_index);
1556 /* --------------------------------------------------------------------------- */
1558 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_soft_reset_substate_handler_table[] = {
1559 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
1560 .abort_handler = scic_sds_request_started_state_abort_handler,
1561 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler,
1563 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
1564 .abort_handler = scic_sds_request_started_state_abort_handler,
1565 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler,
1567 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
1568 .abort_handler = scic_sds_request_started_state_abort_handler,
1569 .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler,
1573 static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(
1576 struct scic_sds_request *sci_req = object;
1580 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1581 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
1584 scic_sds_remote_device_set_working_request(
1585 sci_req->target_device, sci_req
1589 static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(
1592 struct scic_sds_request *sci_req = object;
1593 struct scu_task_context *task_context;
1594 struct host_to_dev_fis *h2d_fis;
1595 enum sci_status status;
1597 /* Clear the SRST bit */
1598 h2d_fis = scic_stp_io_request_get_h2d_reg_address(sci_req);
1599 h2d_fis->control = 0;
1601 /* Clear the TC control bit */
1602 task_context = scic_sds_controller_get_task_context_buffer(
1603 sci_req->owning_controller, sci_req->io_tag);
1604 task_context->control_frame = 0;
1606 status = scic_controller_continue_io(sci_req);
1607 if (status == SCI_SUCCESS) {
1610 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1611 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
1616 static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter(
1619 struct scic_sds_request *sci_req = object;
1623 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1624 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
1628 static const struct sci_base_state scic_sds_stp_request_started_soft_reset_substate_table[] = {
1629 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
1630 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
1632 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
1633 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
1635 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
1636 .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter,
1640 enum sci_status scic_sds_stp_soft_reset_request_construct(struct scic_sds_request *sci_req)
1642 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1644 scic_sds_stp_non_ncq_request_construct(sci_req);
1646 /* Build the STP task context structure */
1647 scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
1649 sci_base_state_machine_construct(&sci_req->started_substate_machine,
1651 scic_sds_stp_request_started_soft_reset_substate_table,
1652 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE);