08a7340b33bfc101068df5fd63eeb43b51962aee
[pandora-kernel.git] / drivers / scsi / isci / request.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55
56 #include "isci.h"
57 #include "task.h"
58 #include "request.h"
59 #include "sata.h"
60 #include "scu_completion_codes.h"
61 #include "scu_event_codes.h"
62 #include "sas.h"
63
64 /**
65  * This method returns the sgl element pair for the specificed sgl_pair index.
66  * @sci_req: This parameter specifies the IO request for which to retrieve
67  *    the Scatter-Gather List element pair.
68  * @sgl_pair_index: This parameter specifies the index into the SGL element
69  *    pair to be retrieved.
70  *
71  * This method returns a pointer to an struct scu_sgl_element_pair.
72  */
73 static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair(
74         struct scic_sds_request *sci_req,
75         u32 sgl_pair_index
76         ) {
77         struct scu_task_context *task_context;
78
79         task_context = (struct scu_task_context *)sci_req->task_context_buffer;
80
81         if (sgl_pair_index == 0) {
82                 return &task_context->sgl_pair_ab;
83         } else if (sgl_pair_index == 1) {
84                 return &task_context->sgl_pair_cd;
85         }
86
87         return &sci_req->sg_table[sgl_pair_index - 2];
88 }
89
90 /**
91  * This function will build the SGL list for an IO request.
92  * @sci_req: This parameter specifies the IO request for which to build
93  *    the Scatter-Gather List.
94  *
95  */
96 static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
97 {
98         struct isci_request *isci_request = sci_req_to_ireq(sds_request);
99         struct isci_host *isci_host = isci_request->isci_host;
100         struct sas_task *task = isci_request_access_task(isci_request);
101         struct scatterlist *sg = NULL;
102         dma_addr_t dma_addr;
103         u32 sg_idx = 0;
104         struct scu_sgl_element_pair *scu_sg   = NULL;
105         struct scu_sgl_element_pair *prev_sg  = NULL;
106
107         if (task->num_scatter > 0) {
108                 sg = task->scatter;
109
110                 while (sg) {
111                         scu_sg = scic_sds_request_get_sgl_element_pair(
112                                         sds_request,
113                                         sg_idx);
114
115                         SCU_SGL_COPY(scu_sg->A, sg);
116
117                         sg = sg_next(sg);
118
119                         if (sg) {
120                                 SCU_SGL_COPY(scu_sg->B, sg);
121                                 sg = sg_next(sg);
122                         } else
123                                 SCU_SGL_ZERO(scu_sg->B);
124
125                         if (prev_sg) {
126                                 dma_addr =
127                                         scic_io_request_get_dma_addr(
128                                                         sds_request,
129                                                         scu_sg);
130
131                                 prev_sg->next_pair_upper =
132                                         upper_32_bits(dma_addr);
133                                 prev_sg->next_pair_lower =
134                                         lower_32_bits(dma_addr);
135                         }
136
137                         prev_sg = scu_sg;
138                         sg_idx++;
139                 }
140         } else {        /* handle when no sg */
141                 scu_sg = scic_sds_request_get_sgl_element_pair(sds_request,
142                                                                sg_idx);
143
144                 dma_addr = dma_map_single(&isci_host->pdev->dev,
145                                           task->scatter,
146                                           task->total_xfer_len,
147                                           task->data_dir);
148
149                 isci_request->zero_scatter_daddr = dma_addr;
150
151                 scu_sg->A.length = task->total_xfer_len;
152                 scu_sg->A.address_upper = upper_32_bits(dma_addr);
153                 scu_sg->A.address_lower = lower_32_bits(dma_addr);
154         }
155
156         if (scu_sg) {
157                 scu_sg->next_pair_upper = 0;
158                 scu_sg->next_pair_lower = 0;
159         }
160 }
161
162 static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req)
163 {
164         struct ssp_cmd_iu *cmd_iu;
165         struct isci_request *ireq = sci_req_to_ireq(sci_req);
166         struct sas_task *task = isci_request_access_task(ireq);
167
168         cmd_iu = &sci_req->ssp.cmd;
169
170         memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
171         cmd_iu->add_cdb_len = 0;
172         cmd_iu->_r_a = 0;
173         cmd_iu->_r_b = 0;
174         cmd_iu->en_fburst = 0; /* unsupported */
175         cmd_iu->task_prio = task->ssp_task.task_prio;
176         cmd_iu->task_attr = task->ssp_task.task_attr;
177         cmd_iu->_r_c = 0;
178
179         sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
180                        sizeof(task->ssp_task.cdb) / sizeof(u32));
181 }
182
183 static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req)
184 {
185         struct ssp_task_iu *task_iu;
186         struct isci_request *ireq = sci_req_to_ireq(sci_req);
187         struct sas_task *task = isci_request_access_task(ireq);
188         struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
189
190         task_iu = &sci_req->ssp.tmf;
191
192         memset(task_iu, 0, sizeof(struct ssp_task_iu));
193
194         memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
195
196         task_iu->task_func = isci_tmf->tmf_code;
197         task_iu->task_tag =
198                 (ireq->ttype == tmf_task) ?
199                 isci_tmf->io_tag :
200                 SCI_CONTROLLER_INVALID_IO_TAG;
201 }
202
203 /**
204  * This method is will fill in the SCU Task Context for any type of SSP request.
205  * @sci_req:
206  * @task_context:
207  *
208  */
209 static void scu_ssp_reqeust_construct_task_context(
210         struct scic_sds_request *sds_request,
211         struct scu_task_context *task_context)
212 {
213         dma_addr_t dma_addr;
214         struct scic_sds_remote_device *target_device;
215         struct scic_sds_port *target_port;
216
217         target_device = scic_sds_request_get_device(sds_request);
218         target_port = scic_sds_request_get_port(sds_request);
219
220         /* Fill in the TC with the its required data */
221         task_context->abort = 0;
222         task_context->priority = 0;
223         task_context->initiator_request = 1;
224         task_context->connection_rate = target_device->connection_rate;
225         task_context->protocol_engine_index =
226                 scic_sds_controller_get_protocol_engine_group(controller);
227         task_context->logical_port_index =
228                 scic_sds_port_get_index(target_port);
229         task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
230         task_context->valid = SCU_TASK_CONTEXT_VALID;
231         task_context->context_type = SCU_TASK_CONTEXT_TYPE;
232
233         task_context->remote_node_index =
234                 scic_sds_remote_device_get_index(sds_request->target_device);
235         task_context->command_code = 0;
236
237         task_context->link_layer_control = 0;
238         task_context->do_not_dma_ssp_good_response = 1;
239         task_context->strict_ordering = 0;
240         task_context->control_frame = 0;
241         task_context->timeout_enable = 0;
242         task_context->block_guard_enable = 0;
243
244         task_context->address_modifier = 0;
245
246         /* task_context->type.ssp.tag = sci_req->io_tag; */
247         task_context->task_phase = 0x01;
248
249         if (sds_request->was_tag_assigned_by_user) {
250                 /*
251                  * Build the task context now since we have already read
252                  * the data
253                  */
254                 sds_request->post_context =
255                         (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
256                          (scic_sds_controller_get_protocol_engine_group(
257                                                         controller) <<
258                           SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
259                          (scic_sds_port_get_index(target_port) <<
260                           SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
261                           ISCI_TAG_TCI(sds_request->io_tag));
262         } else {
263                 /*
264                  * Build the task context now since we have already read
265                  * the data
266                  *
267                  * I/O tag index is not assigned because we have to wait
268                  * until we get a TCi
269                  */
270                 sds_request->post_context =
271                         (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
272                          (scic_sds_controller_get_protocol_engine_group(
273                                                         owning_controller) <<
274                           SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
275                          (scic_sds_port_get_index(target_port) <<
276                           SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
277         }
278
279         /*
280          * Copy the physical address for the command buffer to the
281          * SCU Task Context
282          */
283         dma_addr = scic_io_request_get_dma_addr(sds_request,
284                                                 &sds_request->ssp.cmd);
285
286         task_context->command_iu_upper = upper_32_bits(dma_addr);
287         task_context->command_iu_lower = lower_32_bits(dma_addr);
288
289         /*
290          * Copy the physical address for the response buffer to the
291          * SCU Task Context
292          */
293         dma_addr = scic_io_request_get_dma_addr(sds_request,
294                                                 &sds_request->ssp.rsp);
295
296         task_context->response_iu_upper = upper_32_bits(dma_addr);
297         task_context->response_iu_lower = lower_32_bits(dma_addr);
298 }
299
300 /**
301  * This method is will fill in the SCU Task Context for a SSP IO request.
302  * @sci_req:
303  *
304  */
305 static void scu_ssp_io_request_construct_task_context(
306         struct scic_sds_request *sci_req,
307         enum dma_data_direction dir,
308         u32 len)
309 {
310         struct scu_task_context *task_context;
311
312         task_context = scic_sds_request_get_task_context(sci_req);
313
314         scu_ssp_reqeust_construct_task_context(sci_req, task_context);
315
316         task_context->ssp_command_iu_length =
317                 sizeof(struct ssp_cmd_iu) / sizeof(u32);
318         task_context->type.ssp.frame_type = SSP_COMMAND;
319
320         switch (dir) {
321         case DMA_FROM_DEVICE:
322         case DMA_NONE:
323         default:
324                 task_context->task_type = SCU_TASK_TYPE_IOREAD;
325                 break;
326         case DMA_TO_DEVICE:
327                 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
328                 break;
329         }
330
331         task_context->transfer_length_bytes = len;
332
333         if (task_context->transfer_length_bytes > 0)
334                 scic_sds_request_build_sgl(sci_req);
335 }
336
337 /**
338  * This method will fill in the SCU Task Context for a SSP Task request.  The
339  *    following important settings are utilized: -# priority ==
340  *    SCU_TASK_PRIORITY_HIGH.  This ensures that the task request is issued
341  *    ahead of other task destined for the same Remote Node. -# task_type ==
342  *    SCU_TASK_TYPE_IOREAD.  This simply indicates that a normal request type
343  *    (i.e. non-raw frame) is being utilized to perform task management. -#
344  *    control_frame == 1.  This ensures that the proper endianess is set so
345  *    that the bytes are transmitted in the right order for a task frame.
346  * @sci_req: This parameter specifies the task request object being
347  *    constructed.
348  *
349  */
350 static void scu_ssp_task_request_construct_task_context(
351         struct scic_sds_request *sci_req)
352 {
353         struct scu_task_context *task_context;
354
355         task_context = scic_sds_request_get_task_context(sci_req);
356
357         scu_ssp_reqeust_construct_task_context(sci_req, task_context);
358
359         task_context->control_frame                = 1;
360         task_context->priority                     = SCU_TASK_PRIORITY_HIGH;
361         task_context->task_type                    = SCU_TASK_TYPE_RAW_FRAME;
362         task_context->transfer_length_bytes        = 0;
363         task_context->type.ssp.frame_type          = SSP_TASK;
364         task_context->ssp_command_iu_length =
365                 sizeof(struct ssp_task_iu) / sizeof(u32);
366 }
367
368 /**
369  * This method is will fill in the SCU Task Context for any type of SATA
370  *    request.  This is called from the various SATA constructors.
371  * @sci_req: The general IO request object which is to be used in
372  *    constructing the SCU task context.
373  * @task_context: The buffer pointer for the SCU task context which is being
374  *    constructed.
375  *
376  * The general io request construction is complete. The buffer assignment for
377  * the command buffer is complete. none Revisit task context construction to
378  * determine what is common for SSP/SMP/STP task context structures.
379  */
380 static void scu_sata_reqeust_construct_task_context(
381         struct scic_sds_request *sci_req,
382         struct scu_task_context *task_context)
383 {
384         dma_addr_t dma_addr;
385         struct scic_sds_remote_device *target_device;
386         struct scic_sds_port *target_port;
387
388         target_device = scic_sds_request_get_device(sci_req);
389         target_port = scic_sds_request_get_port(sci_req);
390
391         /* Fill in the TC with the its required data */
392         task_context->abort = 0;
393         task_context->priority = SCU_TASK_PRIORITY_NORMAL;
394         task_context->initiator_request = 1;
395         task_context->connection_rate = target_device->connection_rate;
396         task_context->protocol_engine_index =
397                 scic_sds_controller_get_protocol_engine_group(controller);
398         task_context->logical_port_index =
399                 scic_sds_port_get_index(target_port);
400         task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
401         task_context->valid = SCU_TASK_CONTEXT_VALID;
402         task_context->context_type = SCU_TASK_CONTEXT_TYPE;
403
404         task_context->remote_node_index =
405                 scic_sds_remote_device_get_index(sci_req->target_device);
406         task_context->command_code = 0;
407
408         task_context->link_layer_control = 0;
409         task_context->do_not_dma_ssp_good_response = 1;
410         task_context->strict_ordering = 0;
411         task_context->control_frame = 0;
412         task_context->timeout_enable = 0;
413         task_context->block_guard_enable = 0;
414
415         task_context->address_modifier = 0;
416         task_context->task_phase = 0x01;
417
418         task_context->ssp_command_iu_length =
419                 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
420
421         /* Set the first word of the H2D REG FIS */
422         task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
423
424         if (sci_req->was_tag_assigned_by_user) {
425                 /*
426                  * Build the task context now since we have already read
427                  * the data
428                  */
429                 sci_req->post_context =
430                         (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
431                          (scic_sds_controller_get_protocol_engine_group(
432                                                         controller) <<
433                           SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
434                          (scic_sds_port_get_index(target_port) <<
435                           SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
436                           ISCI_TAG_TCI(sci_req->io_tag));
437         } else {
438                 /*
439                  * Build the task context now since we have already read
440                  * the data.
441                  * I/O tag index is not assigned because we have to wait
442                  * until we get a TCi.
443                  */
444                 sci_req->post_context =
445                         (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
446                          (scic_sds_controller_get_protocol_engine_group(
447                                                         controller) <<
448                           SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
449                          (scic_sds_port_get_index(target_port) <<
450                           SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
451         }
452
453         /*
454          * Copy the physical address for the command buffer to the SCU Task
455          * Context. We must offset the command buffer by 4 bytes because the
456          * first 4 bytes are transfered in the body of the TC.
457          */
458         dma_addr = scic_io_request_get_dma_addr(sci_req,
459                                                 ((char *) &sci_req->stp.cmd) +
460                                                 sizeof(u32));
461
462         task_context->command_iu_upper = upper_32_bits(dma_addr);
463         task_context->command_iu_lower = lower_32_bits(dma_addr);
464
465         /* SATA Requests do not have a response buffer */
466         task_context->response_iu_upper = 0;
467         task_context->response_iu_lower = 0;
468 }
469
470
471
472 /**
473  * scu_stp_raw_request_construct_task_context -
474  * @sci_req: This parameter specifies the STP request object for which to
475  *    construct a RAW command frame task context.
476  * @task_context: This parameter specifies the SCU specific task context buffer
477  *    to construct.
478  *
479  * This method performs the operations common to all SATA/STP requests
480  * utilizing the raw frame method. none
481  */
482 static void scu_stp_raw_request_construct_task_context(struct scic_sds_stp_request *stp_req,
483                                                        struct scu_task_context *task_context)
484 {
485         struct scic_sds_request *sci_req = to_sci_req(stp_req);
486
487         scu_sata_reqeust_construct_task_context(sci_req, task_context);
488
489         task_context->control_frame         = 0;
490         task_context->priority              = SCU_TASK_PRIORITY_NORMAL;
491         task_context->task_type             = SCU_TASK_TYPE_SATA_RAW_FRAME;
492         task_context->type.stp.fis_type     = FIS_REGH2D;
493         task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
494 }
495
496 static enum sci_status
497 scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
498                                    bool copy_rx_frame)
499 {
500         struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
501         struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
502
503         scu_stp_raw_request_construct_task_context(stp_req,
504                                                    sci_req->task_context_buffer);
505
506         pio->current_transfer_bytes = 0;
507         pio->ending_error = 0;
508         pio->ending_status = 0;
509
510         pio->request_current.sgl_offset = 0;
511         pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
512
513         if (copy_rx_frame) {
514                 scic_sds_request_build_sgl(sci_req);
515                 /* Since the IO request copy of the TC contains the same data as
516                  * the actual TC this pointer is vaild for either.
517                  */
518                 pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
519         } else {
520                 /* The user does not want the data copied to the SGL buffer location */
521                 pio->request_current.sgl_pair = NULL;
522         }
523
524         return SCI_SUCCESS;
525 }
526
527 /**
528  *
529  * @sci_req: This parameter specifies the request to be constructed as an
530  *    optimized request.
531  * @optimized_task_type: This parameter specifies whether the request is to be
532  *    an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
533  *    value of 1 indicates NCQ.
534  *
535  * This method will perform request construction common to all types of STP
536  * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
537  * returns an indication as to whether the construction was successful.
538  */
539 static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
540                                                      u8 optimized_task_type,
541                                                      u32 len,
542                                                      enum dma_data_direction dir)
543 {
544         struct scu_task_context *task_context = sci_req->task_context_buffer;
545
546         /* Build the STP task context structure */
547         scu_sata_reqeust_construct_task_context(sci_req, task_context);
548
549         /* Copy over the SGL elements */
550         scic_sds_request_build_sgl(sci_req);
551
552         /* Copy over the number of bytes to be transfered */
553         task_context->transfer_length_bytes = len;
554
555         if (dir == DMA_TO_DEVICE) {
556                 /*
557                  * The difference between the DMA IN and DMA OUT request task type
558                  * values are consistent with the difference between FPDMA READ
559                  * and FPDMA WRITE values.  Add the supplied task type parameter
560                  * to this difference to set the task type properly for this
561                  * DATA OUT (WRITE) case. */
562                 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
563                                                                  - SCU_TASK_TYPE_DMA_IN);
564         } else {
565                 /*
566                  * For the DATA IN (READ) case, simply save the supplied
567                  * optimized task type. */
568                 task_context->task_type = optimized_task_type;
569         }
570 }
571
572
573
574 static enum sci_status
575 scic_io_request_construct_sata(struct scic_sds_request *sci_req,
576                                u32 len,
577                                enum dma_data_direction dir,
578                                bool copy)
579 {
580         enum sci_status status = SCI_SUCCESS;
581         struct isci_request *ireq = sci_req_to_ireq(sci_req);
582         struct sas_task *task = isci_request_access_task(ireq);
583
584         /* check for management protocols */
585         if (ireq->ttype == tmf_task) {
586                 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
587
588                 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
589                     tmf->tmf_code == isci_tmf_sata_srst_low) {
590                         scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
591                                                                    sci_req->task_context_buffer);
592                         return SCI_SUCCESS;
593                 } else {
594                         dev_err(scic_to_dev(sci_req->owning_controller),
595                                 "%s: Request 0x%p received un-handled SAT "
596                                 "management protocol 0x%x.\n",
597                                 __func__, sci_req, tmf->tmf_code);
598
599                         return SCI_FAILURE;
600                 }
601         }
602
603         if (!sas_protocol_ata(task->task_proto)) {
604                 dev_err(scic_to_dev(sci_req->owning_controller),
605                         "%s: Non-ATA protocol in SATA path: 0x%x\n",
606                         __func__,
607                         task->task_proto);
608                 return SCI_FAILURE;
609
610         }
611
612         /* non data */
613         if (task->data_dir == DMA_NONE) {
614                 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
615                                                            sci_req->task_context_buffer);
616                 return SCI_SUCCESS;
617         }
618
619         /* NCQ */
620         if (task->ata_task.use_ncq) {
621                 scic_sds_stp_optimized_request_construct(sci_req,
622                                                          SCU_TASK_TYPE_FPDMAQ_READ,
623                                                          len, dir);
624                 return SCI_SUCCESS;
625         }
626
627         /* DMA */
628         if (task->ata_task.dma_xfer) {
629                 scic_sds_stp_optimized_request_construct(sci_req,
630                                                          SCU_TASK_TYPE_DMA_IN,
631                                                          len, dir);
632                 return SCI_SUCCESS;
633         } else /* PIO */
634                 return scic_sds_stp_pio_request_construct(sci_req, copy);
635
636         return status;
637 }
638
639 static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req)
640 {
641         struct isci_request *ireq = sci_req_to_ireq(sci_req);
642         struct sas_task *task = isci_request_access_task(ireq);
643
644         sci_req->protocol = SCIC_SSP_PROTOCOL;
645
646         scu_ssp_io_request_construct_task_context(sci_req,
647                                                   task->data_dir,
648                                                   task->total_xfer_len);
649
650         scic_sds_io_request_build_ssp_command_iu(sci_req);
651
652         sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
653
654         return SCI_SUCCESS;
655 }
656
657 enum sci_status scic_task_request_construct_ssp(
658         struct scic_sds_request *sci_req)
659 {
660         /* Construct the SSP Task SCU Task Context */
661         scu_ssp_task_request_construct_task_context(sci_req);
662
663         /* Fill in the SSP Task IU */
664         scic_sds_task_request_build_ssp_task_iu(sci_req);
665
666         sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
667
668         return SCI_SUCCESS;
669 }
670
671 static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req)
672 {
673         enum sci_status status;
674         bool copy = false;
675         struct isci_request *isci_request = sci_req_to_ireq(sci_req);
676         struct sas_task *task = isci_request_access_task(isci_request);
677
678         sci_req->protocol = SCIC_STP_PROTOCOL;
679
680         copy = (task->data_dir == DMA_NONE) ? false : true;
681
682         status = scic_io_request_construct_sata(sci_req,
683                                                 task->total_xfer_len,
684                                                 task->data_dir,
685                                                 copy);
686
687         if (status == SCI_SUCCESS)
688                 sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
689
690         return status;
691 }
692
693 enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req)
694 {
695         enum sci_status status = SCI_SUCCESS;
696         struct isci_request *ireq = sci_req_to_ireq(sci_req);
697
698         /* check for management protocols */
699         if (ireq->ttype == tmf_task) {
700                 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
701
702                 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
703                     tmf->tmf_code == isci_tmf_sata_srst_low) {
704                         scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
705                                                                    sci_req->task_context_buffer);
706                 } else {
707                         dev_err(scic_to_dev(sci_req->owning_controller),
708                                 "%s: Request 0x%p received un-handled SAT "
709                                 "Protocol 0x%x.\n",
710                                 __func__, sci_req, tmf->tmf_code);
711
712                         return SCI_FAILURE;
713                 }
714         }
715
716         if (status != SCI_SUCCESS)
717                 return status;
718         sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
719
720         return status;
721 }
722
723 /**
724  * sci_req_tx_bytes - bytes transferred when reply underruns request
725  * @sci_req: request that was terminated early
726  */
727 #define SCU_TASK_CONTEXT_SRAM 0x200000
728 static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req)
729 {
730         struct scic_sds_controller *scic = sci_req->owning_controller;
731         u32 ret_val = 0;
732
733         if (readl(&scic->smu_registers->address_modifier) == 0) {
734                 void __iomem *scu_reg_base = scic->scu_registers;
735
736                 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
737                  *   BAR1 is the scu_registers
738                  *   0x20002C = 0x200000 + 0x2c
739                  *            = start of task context SRAM + offset of (type.ssp.data_offset)
740                  *   TCi is the io_tag of struct scic_sds_request
741                  */
742                 ret_val = readl(scu_reg_base +
743                                 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
744                                 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(sci_req->io_tag)));
745         }
746
747         return ret_val;
748 }
749
750 enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req)
751 {
752         struct scic_sds_controller *scic = sci_req->owning_controller;
753         struct scu_task_context *task_context;
754         enum sci_base_request_states state;
755
756         state = sci_req->sm.current_state_id;
757         if (state != SCI_REQ_CONSTRUCTED) {
758                 dev_warn(scic_to_dev(scic),
759                         "%s: SCIC IO Request requested to start while in wrong "
760                          "state %d\n", __func__, state);
761                 return SCI_FAILURE_INVALID_STATE;
762         }
763
764         /* if necessary, allocate a TCi for the io request object and then will,
765          * if necessary, copy the constructed TC data into the actual TC buffer.
766          * If everything is successful the post context field is updated with
767          * the TCi so the controller can post the request to the hardware.
768          */
769         if (sci_req->io_tag == SCI_CONTROLLER_INVALID_IO_TAG)
770                 sci_req->io_tag = scic_controller_allocate_io_tag(scic);
771
772         /* Record the IO Tag in the request */
773         if (sci_req->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) {
774                 task_context = sci_req->task_context_buffer;
775
776                 task_context->task_index = ISCI_TAG_TCI(sci_req->io_tag);
777
778                 switch (task_context->protocol_type) {
779                 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
780                 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
781                         /* SSP/SMP Frame */
782                         task_context->type.ssp.tag = sci_req->io_tag;
783                         task_context->type.ssp.target_port_transfer_tag =
784                                 0xFFFF;
785                         break;
786
787                 case SCU_TASK_CONTEXT_PROTOCOL_STP:
788                         /* STP/SATA Frame
789                          * task_context->type.stp.ncq_tag = sci_req->ncq_tag;
790                          */
791                         break;
792
793                 case SCU_TASK_CONTEXT_PROTOCOL_NONE:
794                         /* / @todo When do we set no protocol type? */
795                         break;
796
797                 default:
798                         /* This should never happen since we build the IO
799                          * requests */
800                         break;
801                 }
802
803                 /*
804                  * Check to see if we need to copy the task context buffer
805                  * or have been building into the task context buffer */
806                 if (sci_req->was_tag_assigned_by_user == false)
807                         scic_sds_controller_copy_task_context(scic, sci_req);
808
809                 /* Add to the post_context the io tag value */
810                 sci_req->post_context |= ISCI_TAG_TCI(sci_req->io_tag);
811
812                 /* Everything is good go ahead and change state */
813                 sci_change_state(&sci_req->sm, SCI_REQ_STARTED);
814
815                 return SCI_SUCCESS;
816         }
817
818         return SCI_FAILURE_INSUFFICIENT_RESOURCES;
819 }
820
821 enum sci_status
822 scic_sds_io_request_terminate(struct scic_sds_request *sci_req)
823 {
824         enum sci_base_request_states state;
825
826         state = sci_req->sm.current_state_id;
827
828         switch (state) {
829         case SCI_REQ_CONSTRUCTED:
830                 scic_sds_request_set_status(sci_req,
831                         SCU_TASK_DONE_TASK_ABORT,
832                         SCI_FAILURE_IO_TERMINATED);
833
834                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
835                 return SCI_SUCCESS;
836         case SCI_REQ_STARTED:
837         case SCI_REQ_TASK_WAIT_TC_COMP:
838         case SCI_REQ_SMP_WAIT_RESP:
839         case SCI_REQ_SMP_WAIT_TC_COMP:
840         case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
841         case SCI_REQ_STP_UDMA_WAIT_D2H:
842         case SCI_REQ_STP_NON_DATA_WAIT_H2D:
843         case SCI_REQ_STP_NON_DATA_WAIT_D2H:
844         case SCI_REQ_STP_PIO_WAIT_H2D:
845         case SCI_REQ_STP_PIO_WAIT_FRAME:
846         case SCI_REQ_STP_PIO_DATA_IN:
847         case SCI_REQ_STP_PIO_DATA_OUT:
848         case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
849         case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
850         case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
851                 sci_change_state(&sci_req->sm, SCI_REQ_ABORTING);
852                 return SCI_SUCCESS;
853         case SCI_REQ_TASK_WAIT_TC_RESP:
854                 sci_change_state(&sci_req->sm, SCI_REQ_ABORTING);
855                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
856                 return SCI_SUCCESS;
857         case SCI_REQ_ABORTING:
858                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
859                 return SCI_SUCCESS;
860         case SCI_REQ_COMPLETED:
861         default:
862                 dev_warn(scic_to_dev(sci_req->owning_controller),
863                          "%s: SCIC IO Request requested to abort while in wrong "
864                          "state %d\n",
865                          __func__,
866                          sci_req->sm.current_state_id);
867                 break;
868         }
869
870         return SCI_FAILURE_INVALID_STATE;
871 }
872
873 enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req)
874 {
875         enum sci_base_request_states state;
876         struct scic_sds_controller *scic = sci_req->owning_controller;
877
878         state = sci_req->sm.current_state_id;
879         if (WARN_ONCE(state != SCI_REQ_COMPLETED,
880                       "isci: request completion from wrong state (%d)\n", state))
881                 return SCI_FAILURE_INVALID_STATE;
882
883         if (!sci_req->was_tag_assigned_by_user)
884                 scic_controller_free_io_tag(scic, sci_req->io_tag);
885
886         if (sci_req->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
887                 scic_sds_controller_release_frame(scic,
888                                                   sci_req->saved_rx_frame_index);
889
890         /* XXX can we just stop the machine and remove the 'final' state? */
891         sci_change_state(&sci_req->sm, SCI_REQ_FINAL);
892         return SCI_SUCCESS;
893 }
894
895 enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
896                                                   u32 event_code)
897 {
898         enum sci_base_request_states state;
899         struct scic_sds_controller *scic = sci_req->owning_controller;
900
901         state = sci_req->sm.current_state_id;
902
903         if (state != SCI_REQ_STP_PIO_DATA_IN) {
904                 dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n",
905                          __func__, event_code, state);
906
907                 return SCI_FAILURE_INVALID_STATE;
908         }
909
910         switch (scu_get_event_specifier(event_code)) {
911         case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
912                 /* We are waiting for data and the SCU has R_ERR the data frame.
913                  * Go back to waiting for the D2H Register FIS
914                  */
915                 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
916                 return SCI_SUCCESS;
917         default:
918                 dev_err(scic_to_dev(scic),
919                         "%s: pio request unexpected event %#x\n",
920                         __func__, event_code);
921
922                 /* TODO Should we fail the PIO request when we get an
923                  * unexpected event?
924                  */
925                 return SCI_FAILURE;
926         }
927 }
928
929 /*
930  * This function copies response data for requests returning response data
931  *    instead of sense data.
932  * @sci_req: This parameter specifies the request object for which to copy
933  *    the response data.
934  */
935 static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req)
936 {
937         void *resp_buf;
938         u32 len;
939         struct ssp_response_iu *ssp_response;
940         struct isci_request *ireq = sci_req_to_ireq(sci_req);
941         struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
942
943         ssp_response = &sci_req->ssp.rsp;
944
945         resp_buf = &isci_tmf->resp.resp_iu;
946
947         len = min_t(u32,
948                     SSP_RESP_IU_MAX_SIZE,
949                     be32_to_cpu(ssp_response->response_data_len));
950
951         memcpy(resp_buf, ssp_response->resp_data, len);
952 }
953
954 static enum sci_status
955 request_started_state_tc_event(struct scic_sds_request *sci_req,
956                                u32 completion_code)
957 {
958         struct ssp_response_iu *resp_iu;
959         u8 datapres;
960
961         /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
962          * to determine SDMA status
963          */
964         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
965         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
966                 scic_sds_request_set_status(sci_req,
967                                             SCU_TASK_DONE_GOOD,
968                                             SCI_SUCCESS);
969                 break;
970         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
971                 /* There are times when the SCU hardware will return an early
972                  * response because the io request specified more data than is
973                  * returned by the target device (mode pages, inquiry data,
974                  * etc.).  We must check the response stats to see if this is
975                  * truly a failed request or a good request that just got
976                  * completed early.
977                  */
978                 struct ssp_response_iu *resp = &sci_req->ssp.rsp;
979                 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
980
981                 sci_swab32_cpy(&sci_req->ssp.rsp,
982                                &sci_req->ssp.rsp,
983                                word_cnt);
984
985                 if (resp->status == 0) {
986                         scic_sds_request_set_status(sci_req,
987                                                     SCU_TASK_DONE_GOOD,
988                                                     SCI_SUCCESS_IO_DONE_EARLY);
989                 } else {
990                         scic_sds_request_set_status(sci_req,
991                                                     SCU_TASK_DONE_CHECK_RESPONSE,
992                                                     SCI_FAILURE_IO_RESPONSE_VALID);
993                 }
994                 break;
995         }
996         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
997                 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
998
999                 sci_swab32_cpy(&sci_req->ssp.rsp,
1000                                &sci_req->ssp.rsp,
1001                                word_cnt);
1002
1003                 scic_sds_request_set_status(sci_req,
1004                                             SCU_TASK_DONE_CHECK_RESPONSE,
1005                                             SCI_FAILURE_IO_RESPONSE_VALID);
1006                 break;
1007         }
1008
1009         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
1010                 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
1011                  * guaranteed to be received before this completion status is
1012                  * posted?
1013                  */
1014                 resp_iu = &sci_req->ssp.rsp;
1015                 datapres = resp_iu->datapres;
1016
1017                 if (datapres == 1 || datapres == 2) {
1018                         scic_sds_request_set_status(sci_req,
1019                                                     SCU_TASK_DONE_CHECK_RESPONSE,
1020                                                     SCI_FAILURE_IO_RESPONSE_VALID);
1021                 } else
1022                         scic_sds_request_set_status(sci_req,
1023                                                     SCU_TASK_DONE_GOOD,
1024                                                     SCI_SUCCESS);
1025                 break;
1026         /* only stp device gets suspended. */
1027         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1028         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
1029         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
1030         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
1031         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
1032         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
1033         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1034         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
1035         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
1036         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1037         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
1038                 if (sci_req->protocol == SCIC_STP_PROTOCOL) {
1039                         scic_sds_request_set_status(sci_req,
1040                                 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1041                                 SCU_COMPLETION_TL_STATUS_SHIFT,
1042                                 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
1043                 } else {
1044                         scic_sds_request_set_status(sci_req,
1045                                 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1046                                 SCU_COMPLETION_TL_STATUS_SHIFT,
1047                                 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1048                 }
1049                 break;
1050
1051         /* both stp/ssp device gets suspended */
1052         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
1053         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
1054         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
1055         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
1056         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
1057         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
1058         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
1059         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
1060         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
1061         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
1062                 scic_sds_request_set_status(sci_req,
1063                                             SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1064                                             SCU_COMPLETION_TL_STATUS_SHIFT,
1065                                             SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
1066                 break;
1067
1068         /* neither ssp nor stp gets suspended. */
1069         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
1070         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
1071         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
1072         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
1073         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
1074         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
1075         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1076         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1077         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1078         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1079         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
1080         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
1081         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
1082         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
1083         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
1084         default:
1085                 scic_sds_request_set_status(
1086                         sci_req,
1087                         SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1088                         SCU_COMPLETION_TL_STATUS_SHIFT,
1089                         SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1090                 break;
1091         }
1092
1093         /*
1094          * TODO: This is probably wrong for ACK/NAK timeout conditions
1095          */
1096
1097         /* In all cases we will treat this as the completion of the IO req. */
1098         sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1099         return SCI_SUCCESS;
1100 }
1101
1102 static enum sci_status
1103 request_aborting_state_tc_event(struct scic_sds_request *sci_req,
1104                                 u32 completion_code)
1105 {
1106         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1107         case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1108         case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1109                 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_TASK_ABORT,
1110                                             SCI_FAILURE_IO_TERMINATED);
1111
1112                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1113                 break;
1114
1115         default:
1116                 /* Unless we get some strange error wait for the task abort to complete
1117                  * TODO: Should there be a state change for this completion?
1118                  */
1119                 break;
1120         }
1121
1122         return SCI_SUCCESS;
1123 }
1124
1125 static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request *sci_req,
1126                                                        u32 completion_code)
1127 {
1128         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1129         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1130                 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1131                                             SCI_SUCCESS);
1132
1133                 sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1134                 break;
1135         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1136                 /* Currently, the decision is to simply allow the task request
1137                  * to timeout if the task IU wasn't received successfully.
1138                  * There is a potential for receiving multiple task responses if
1139                  * we decide to send the task IU again.
1140                  */
1141                 dev_warn(scic_to_dev(sci_req->owning_controller),
1142                          "%s: TaskRequest:0x%p CompletionCode:%x - "
1143                          "ACK/NAK timeout\n", __func__, sci_req,
1144                          completion_code);
1145
1146                 sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1147                 break;
1148         default:
1149                 /*
1150                  * All other completion status cause the IO to be complete.
1151                  * If a NAK was received, then it is up to the user to retry
1152                  * the request.
1153                  */
1154                 scic_sds_request_set_status(sci_req,
1155                         SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1156                         SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1157
1158                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1159                 break;
1160         }
1161
1162         return SCI_SUCCESS;
1163 }
1164
1165 static enum sci_status
1166 smp_request_await_response_tc_event(struct scic_sds_request *sci_req,
1167                                     u32 completion_code)
1168 {
1169         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1170         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1171                 /* In the AWAIT RESPONSE state, any TC completion is
1172                  * unexpected.  but if the TC has success status, we
1173                  * complete the IO anyway.
1174                  */
1175                 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1176                                             SCI_SUCCESS);
1177
1178                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1179                 break;
1180
1181         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1182         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1183         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1184         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1185                 /* These status has been seen in a specific LSI
1186                  * expander, which sometimes is not able to send smp
1187                  * response within 2 ms. This causes our hardware break
1188                  * the connection and set TC completion with one of
1189                  * these SMP_XXX_XX_ERR status. For these type of error,
1190                  * we ask scic user to retry the request.
1191                  */
1192                 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR,
1193                                             SCI_FAILURE_RETRY_REQUIRED);
1194
1195                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1196                 break;
1197
1198         default:
1199                 /* All other completion status cause the IO to be complete.  If a NAK
1200                  * was received, then it is up to the user to retry the request
1201                  */
1202                 scic_sds_request_set_status(sci_req,
1203                                             SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1204                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1205
1206                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1207                 break;
1208         }
1209
1210         return SCI_SUCCESS;
1211 }
1212
1213 static enum sci_status
1214 smp_request_await_tc_event(struct scic_sds_request *sci_req,
1215                            u32 completion_code)
1216 {
1217         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1218         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1219                 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1220                                             SCI_SUCCESS);
1221
1222                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1223                 break;
1224         default:
1225                 /* All other completion status cause the IO to be
1226                  * complete.  If a NAK was received, then it is up to
1227                  * the user to retry the request.
1228                  */
1229                 scic_sds_request_set_status(sci_req,
1230                                             SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1231                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1232
1233                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1234                 break;
1235         }
1236
1237         return SCI_SUCCESS;
1238 }
1239
1240 void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req,
1241                                      u16 ncq_tag)
1242 {
1243         /**
1244          * @note This could be made to return an error to the user if the user
1245          *       attempts to set the NCQ tag in the wrong state.
1246          */
1247         req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
1248 }
1249
1250 /**
1251  *
1252  * @sci_req:
1253  *
1254  * Get the next SGL element from the request. - Check on which SGL element pair
1255  * we are working - if working on SLG pair element A - advance to element B -
1256  * else - check to see if there are more SGL element pairs for this IO request
1257  * - if there are more SGL element pairs - advance to the next pair and return
1258  * element A struct scu_sgl_element*
1259  */
1260 static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
1261 {
1262         struct scu_sgl_element *current_sgl;
1263         struct scic_sds_request *sci_req = to_sci_req(stp_req);
1264         struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
1265
1266         if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1267                 if (pio_sgl->sgl_pair->B.address_lower == 0 &&
1268                     pio_sgl->sgl_pair->B.address_upper == 0) {
1269                         current_sgl = NULL;
1270                 } else {
1271                         pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
1272                         current_sgl = &pio_sgl->sgl_pair->B;
1273                 }
1274         } else {
1275                 if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
1276                     pio_sgl->sgl_pair->next_pair_upper == 0) {
1277                         current_sgl = NULL;
1278                 } else {
1279                         u64 phys_addr;
1280
1281                         phys_addr = pio_sgl->sgl_pair->next_pair_upper;
1282                         phys_addr <<= 32;
1283                         phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
1284
1285                         pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
1286                         pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
1287                         current_sgl = &pio_sgl->sgl_pair->A;
1288                 }
1289         }
1290
1291         return current_sgl;
1292 }
1293
1294 static enum sci_status
1295 stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req,
1296                                         u32 completion_code)
1297 {
1298         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1299         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1300                 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1301                                             SCI_SUCCESS);
1302
1303                 sci_change_state(&sci_req->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1304                 break;
1305
1306         default:
1307                 /* All other completion status cause the IO to be
1308                  * complete.  If a NAK was received, then it is up to
1309                  * the user to retry the request.
1310                  */
1311                 scic_sds_request_set_status(sci_req,
1312                                             SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1313                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1314
1315                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1316                 break;
1317         }
1318
1319         return SCI_SUCCESS;
1320 }
1321
1322 #define SCU_MAX_FRAME_BUFFER_SIZE  0x400  /* 1K is the maximum SCU frame data payload */
1323
1324 /* transmit DATA_FIS from (current sgl + offset) for input
1325  * parameter length. current sgl and offset is alreay stored in the IO request
1326  */
1327 static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
1328         struct scic_sds_request *sci_req,
1329         u32 length)
1330 {
1331         struct scic_sds_controller *scic = sci_req->owning_controller;
1332         struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1333         struct scu_task_context *task_context;
1334         struct scu_sgl_element *current_sgl;
1335
1336         /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1337          * for the data from current_sgl+offset for the input length
1338          */
1339         task_context = scic_sds_controller_get_task_context_buffer(scic,
1340                                                                    sci_req->io_tag);
1341
1342         if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
1343                 current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
1344         else
1345                 current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
1346
1347         /* update the TC */
1348         task_context->command_iu_upper = current_sgl->address_upper;
1349         task_context->command_iu_lower = current_sgl->address_lower;
1350         task_context->transfer_length_bytes = length;
1351         task_context->type.stp.fis_type = FIS_DATA;
1352
1353         /* send the new TC out. */
1354         return scic_controller_continue_io(sci_req);
1355 }
1356
1357 static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
1358 {
1359
1360         struct scu_sgl_element *current_sgl;
1361         u32 sgl_offset;
1362         u32 remaining_bytes_in_current_sgl = 0;
1363         enum sci_status status = SCI_SUCCESS;
1364         struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1365
1366         sgl_offset = stp_req->type.pio.request_current.sgl_offset;
1367
1368         if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1369                 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
1370                 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
1371         } else {
1372                 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
1373                 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
1374         }
1375
1376
1377         if (stp_req->type.pio.pio_transfer_bytes > 0) {
1378                 if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
1379                         /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
1380                         status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
1381                         if (status == SCI_SUCCESS) {
1382                                 stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
1383
1384                                 /* update the current sgl, sgl_offset and save for future */
1385                                 current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
1386                                 sgl_offset = 0;
1387                         }
1388                 } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
1389                         /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
1390                         scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
1391
1392                         if (status == SCI_SUCCESS) {
1393                                 /* Sgl offset will be adjusted and saved for future */
1394                                 sgl_offset += stp_req->type.pio.pio_transfer_bytes;
1395                                 current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
1396                                 stp_req->type.pio.pio_transfer_bytes = 0;
1397                         }
1398                 }
1399         }
1400
1401         if (status == SCI_SUCCESS) {
1402                 stp_req->type.pio.request_current.sgl_offset = sgl_offset;
1403         }
1404
1405         return status;
1406 }
1407
1408 /**
1409  *
1410  * @stp_request: The request that is used for the SGL processing.
1411  * @data_buffer: The buffer of data to be copied.
1412  * @length: The length of the data transfer.
1413  *
1414  * Copy the data from the buffer for the length specified to the IO reqeust SGL
1415  * specified data region. enum sci_status
1416  */
1417 static enum sci_status
1418 scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
1419                                                   u8 *data_buf, u32 len)
1420 {
1421         struct scic_sds_request *sci_req;
1422         struct isci_request *ireq;
1423         u8 *src_addr;
1424         int copy_len;
1425         struct sas_task *task;
1426         struct scatterlist *sg;
1427         void *kaddr;
1428         int total_len = len;
1429
1430         sci_req = to_sci_req(stp_req);
1431         ireq = sci_req_to_ireq(sci_req);
1432         task = isci_request_access_task(ireq);
1433         src_addr = data_buf;
1434
1435         if (task->num_scatter > 0) {
1436                 sg = task->scatter;
1437
1438                 while (total_len > 0) {
1439                         struct page *page = sg_page(sg);
1440
1441                         copy_len = min_t(int, total_len, sg_dma_len(sg));
1442                         kaddr = kmap_atomic(page, KM_IRQ0);
1443                         memcpy(kaddr + sg->offset, src_addr, copy_len);
1444                         kunmap_atomic(kaddr, KM_IRQ0);
1445                         total_len -= copy_len;
1446                         src_addr += copy_len;
1447                         sg = sg_next(sg);
1448                 }
1449         } else {
1450                 BUG_ON(task->total_xfer_len < total_len);
1451                 memcpy(task->scatter, src_addr, total_len);
1452         }
1453
1454         return SCI_SUCCESS;
1455 }
1456
1457 /**
1458  *
1459  * @sci_req: The PIO DATA IN request that is to receive the data.
1460  * @data_buffer: The buffer to copy from.
1461  *
1462  * Copy the data buffer to the io request data region. enum sci_status
1463  */
1464 static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
1465         struct scic_sds_stp_request *sci_req,
1466         u8 *data_buffer)
1467 {
1468         enum sci_status status;
1469
1470         /*
1471          * If there is less than 1K remaining in the transfer request
1472          * copy just the data for the transfer */
1473         if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
1474                 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1475                         sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
1476
1477                 if (status == SCI_SUCCESS)
1478                         sci_req->type.pio.pio_transfer_bytes = 0;
1479         } else {
1480                 /* We are transfering the whole frame so copy */
1481                 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1482                         sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1483
1484                 if (status == SCI_SUCCESS)
1485                         sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
1486         }
1487
1488         return status;
1489 }
1490
1491 static enum sci_status
1492 stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req,
1493                                               u32 completion_code)
1494 {
1495         enum sci_status status = SCI_SUCCESS;
1496
1497         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1498         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1499                 scic_sds_request_set_status(sci_req,
1500                                             SCU_TASK_DONE_GOOD,
1501                                             SCI_SUCCESS);
1502
1503                 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1504                 break;
1505
1506         default:
1507                 /* All other completion status cause the IO to be
1508                  * complete.  If a NAK was received, then it is up to
1509                  * the user to retry the request.
1510                  */
1511                 scic_sds_request_set_status(sci_req,
1512                                             SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1513                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1514
1515                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1516                 break;
1517         }
1518
1519         return status;
1520 }
1521
1522 static enum sci_status
1523 pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req,
1524                               u32 completion_code)
1525 {
1526         enum sci_status status = SCI_SUCCESS;
1527         bool all_frames_transferred = false;
1528         struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1529
1530         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1531         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1532                 /* Transmit data */
1533                 if (stp_req->type.pio.pio_transfer_bytes != 0) {
1534                         status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
1535                         if (status == SCI_SUCCESS) {
1536                                 if (stp_req->type.pio.pio_transfer_bytes == 0)
1537                                         all_frames_transferred = true;
1538                         }
1539                 } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
1540                         /*
1541                          * this will happen if the all data is written at the
1542                          * first time after the pio setup fis is received
1543                          */
1544                         all_frames_transferred  = true;
1545                 }
1546
1547                 /* all data transferred. */
1548                 if (all_frames_transferred) {
1549                         /*
1550                          * Change the state to SCI_REQ_STP_PIO_DATA_IN
1551                          * and wait for PIO_SETUP fis / or D2H REg fis. */
1552                         sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1553                 }
1554                 break;
1555
1556         default:
1557                 /*
1558                  * All other completion status cause the IO to be complete.
1559                  * If a NAK was received, then it is up to the user to retry
1560                  * the request.
1561                  */
1562                 scic_sds_request_set_status(
1563                         sci_req,
1564                         SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1565                         SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1566
1567                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1568                 break;
1569         }
1570
1571         return status;
1572 }
1573
1574 static void scic_sds_stp_request_udma_complete_request(
1575         struct scic_sds_request *request,
1576         u32 scu_status,
1577         enum sci_status sci_status)
1578 {
1579         scic_sds_request_set_status(request, scu_status, sci_status);
1580         sci_change_state(&request->sm, SCI_REQ_COMPLETED);
1581 }
1582
1583 static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
1584                                                                        u32 frame_index)
1585 {
1586         struct scic_sds_controller *scic = sci_req->owning_controller;
1587         struct dev_to_host_fis *frame_header;
1588         enum sci_status status;
1589         u32 *frame_buffer;
1590
1591         status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1592                                                                frame_index,
1593                                                                (void **)&frame_header);
1594
1595         if ((status == SCI_SUCCESS) &&
1596             (frame_header->fis_type == FIS_REGD2H)) {
1597                 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1598                                                               frame_index,
1599                                                               (void **)&frame_buffer);
1600
1601                 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1602                                                        frame_header,
1603                                                        frame_buffer);
1604         }
1605
1606         scic_sds_controller_release_frame(scic, frame_index);
1607
1608         return status;
1609 }
1610
1611 enum sci_status
1612 scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1613                                   u32 frame_index)
1614 {
1615         struct scic_sds_controller *scic = sci_req->owning_controller;
1616         struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1617         enum sci_base_request_states state;
1618         enum sci_status status;
1619         ssize_t word_cnt;
1620
1621         state = sci_req->sm.current_state_id;
1622         switch (state)  {
1623         case SCI_REQ_STARTED: {
1624                 struct ssp_frame_hdr ssp_hdr;
1625                 void *frame_header;
1626
1627                 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1628                                                               frame_index,
1629                                                               &frame_header);
1630
1631                 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1632                 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1633
1634                 if (ssp_hdr.frame_type == SSP_RESPONSE) {
1635                         struct ssp_response_iu *resp_iu;
1636                         ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1637
1638                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1639                                                                       frame_index,
1640                                                                       (void **)&resp_iu);
1641
1642                         sci_swab32_cpy(&sci_req->ssp.rsp, resp_iu, word_cnt);
1643
1644                         resp_iu = &sci_req->ssp.rsp;
1645
1646                         if (resp_iu->datapres == 0x01 ||
1647                             resp_iu->datapres == 0x02) {
1648                                 scic_sds_request_set_status(sci_req,
1649                                                             SCU_TASK_DONE_CHECK_RESPONSE,
1650                                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1651                         } else
1652                                 scic_sds_request_set_status(sci_req,
1653                                                             SCU_TASK_DONE_GOOD,
1654                                                             SCI_SUCCESS);
1655                 } else {
1656                         /* not a response frame, why did it get forwarded? */
1657                         dev_err(scic_to_dev(scic),
1658                                 "%s: SCIC IO Request 0x%p received unexpected "
1659                                 "frame %d type 0x%02x\n", __func__, sci_req,
1660                                 frame_index, ssp_hdr.frame_type);
1661                 }
1662
1663                 /*
1664                  * In any case we are done with this frame buffer return it to
1665                  * the controller
1666                  */
1667                 scic_sds_controller_release_frame(scic, frame_index);
1668
1669                 return SCI_SUCCESS;
1670         }
1671
1672         case SCI_REQ_TASK_WAIT_TC_RESP:
1673                 scic_sds_io_request_copy_response(sci_req);
1674                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1675                 scic_sds_controller_release_frame(scic,frame_index);
1676                 return SCI_SUCCESS;
1677
1678         case SCI_REQ_SMP_WAIT_RESP: {
1679                 struct smp_resp *rsp_hdr = &sci_req->smp.rsp;
1680                 void *frame_header;
1681
1682                 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1683                                                               frame_index,
1684                                                               &frame_header);
1685
1686                 /* byte swap the header. */
1687                 word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
1688                 sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
1689
1690                 if (rsp_hdr->frame_type == SMP_RESPONSE) {
1691                         void *smp_resp;
1692
1693                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1694                                                                       frame_index,
1695                                                                       &smp_resp);
1696
1697                         word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
1698                                 sizeof(u32);
1699
1700                         sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1701                                        smp_resp, word_cnt);
1702
1703                         scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1704                                                     SCI_SUCCESS);
1705
1706                         sci_change_state(&sci_req->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1707                 } else {
1708                         /*
1709                          * This was not a response frame why did it get
1710                          * forwarded?
1711                          */
1712                         dev_err(scic_to_dev(scic),
1713                                 "%s: SCIC SMP Request 0x%p received unexpected "
1714                                 "frame %d type 0x%02x\n",
1715                                 __func__,
1716                                 sci_req,
1717                                 frame_index,
1718                                 rsp_hdr->frame_type);
1719
1720                         scic_sds_request_set_status(sci_req,
1721                                                     SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
1722                                                     SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1723
1724                         sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1725                 }
1726
1727                 scic_sds_controller_release_frame(scic, frame_index);
1728
1729                 return SCI_SUCCESS;
1730         }
1731
1732         case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1733                 return scic_sds_stp_request_udma_general_frame_handler(sci_req,
1734                                                                        frame_index);
1735
1736         case SCI_REQ_STP_UDMA_WAIT_D2H:
1737                 /* Use the general frame handler to copy the resposne data */
1738                 status = scic_sds_stp_request_udma_general_frame_handler(sci_req,
1739                                                                          frame_index);
1740
1741                 if (status != SCI_SUCCESS)
1742                         return status;
1743
1744                 scic_sds_stp_request_udma_complete_request(sci_req,
1745                                                            SCU_TASK_DONE_CHECK_RESPONSE,
1746                                                            SCI_FAILURE_IO_RESPONSE_VALID);
1747
1748                 return SCI_SUCCESS;
1749
1750         case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
1751                 struct dev_to_host_fis *frame_header;
1752                 u32 *frame_buffer;
1753
1754                 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1755                                                                        frame_index,
1756                                                                        (void **)&frame_header);
1757
1758                 if (status != SCI_SUCCESS) {
1759                         dev_err(scic_to_dev(scic),
1760                                 "%s: SCIC IO Request 0x%p could not get frame "
1761                                 "header for frame index %d, status %x\n",
1762                                 __func__,
1763                                 stp_req,
1764                                 frame_index,
1765                                 status);
1766
1767                         return status;
1768                 }
1769
1770                 switch (frame_header->fis_type) {
1771                 case FIS_REGD2H:
1772                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1773                                                                       frame_index,
1774                                                                       (void **)&frame_buffer);
1775
1776                         scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1777                                                                frame_header,
1778                                                                frame_buffer);
1779
1780                         /* The command has completed with error */
1781                         scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
1782                                                     SCI_FAILURE_IO_RESPONSE_VALID);
1783                         break;
1784
1785                 default:
1786                         dev_warn(scic_to_dev(scic),
1787                                  "%s: IO Request:0x%p Frame Id:%d protocol "
1788                                   "violation occurred\n", __func__, stp_req,
1789                                   frame_index);
1790
1791                         scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
1792                                                     SCI_FAILURE_PROTOCOL_VIOLATION);
1793                         break;
1794                 }
1795
1796                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1797
1798                 /* Frame has been decoded return it to the controller */
1799                 scic_sds_controller_release_frame(scic, frame_index);
1800
1801                 return status;
1802         }
1803
1804         case SCI_REQ_STP_PIO_WAIT_FRAME: {
1805                 struct isci_request *ireq = sci_req_to_ireq(sci_req);
1806                 struct sas_task *task = isci_request_access_task(ireq);
1807                 struct dev_to_host_fis *frame_header;
1808                 u32 *frame_buffer;
1809
1810                 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1811                                                                        frame_index,
1812                                                                        (void **)&frame_header);
1813
1814                 if (status != SCI_SUCCESS) {
1815                         dev_err(scic_to_dev(scic),
1816                                 "%s: SCIC IO Request 0x%p could not get frame "
1817                                 "header for frame index %d, status %x\n",
1818                                 __func__, stp_req, frame_index, status);
1819                         return status;
1820                 }
1821
1822                 switch (frame_header->fis_type) {
1823                 case FIS_PIO_SETUP:
1824                         /* Get from the frame buffer the PIO Setup Data */
1825                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1826                                                                       frame_index,
1827                                                                       (void **)&frame_buffer);
1828
1829                         /* Get the data from the PIO Setup The SCU Hardware
1830                          * returns first word in the frame_header and the rest
1831                          * of the data is in the frame buffer so we need to
1832                          * back up one dword
1833                          */
1834
1835                         /* transfer_count: first 16bits in the 4th dword */
1836                         stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
1837
1838                         /* ending_status: 4th byte in the 3rd dword */
1839                         stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
1840
1841                         scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1842                                                                frame_header,
1843                                                                frame_buffer);
1844
1845                         sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
1846
1847                         /* The next state is dependent on whether the
1848                          * request was PIO Data-in or Data out
1849                          */
1850                         if (task->data_dir == DMA_FROM_DEVICE) {
1851                                 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_IN);
1852                         } else if (task->data_dir == DMA_TO_DEVICE) {
1853                                 /* Transmit data */
1854                                 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
1855                                 if (status != SCI_SUCCESS)
1856                                         break;
1857                                 sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_OUT);
1858                         }
1859                         break;
1860
1861                 case FIS_SETDEVBITS:
1862                         sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1863                         break;
1864
1865                 case FIS_REGD2H:
1866                         if (frame_header->status & ATA_BUSY) {
1867                                 /*
1868                                  * Now why is the drive sending a D2H Register
1869                                  * FIS when it is still busy?  Do nothing since
1870                                  * we are still in the right state.
1871                                  */
1872                                 dev_dbg(scic_to_dev(scic),
1873                                         "%s: SCIC PIO Request 0x%p received "
1874                                         "D2H Register FIS with BSY status "
1875                                         "0x%x\n",
1876                                         __func__,
1877                                         stp_req,
1878                                         frame_header->status);
1879                                 break;
1880                         }
1881
1882                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1883                                                                       frame_index,
1884                                                                       (void **)&frame_buffer);
1885
1886                         scic_sds_controller_copy_sata_response(&sci_req->stp.req,
1887                                                                frame_header,
1888                                                                frame_buffer);
1889
1890                         scic_sds_request_set_status(sci_req,
1891                                                     SCU_TASK_DONE_CHECK_RESPONSE,
1892                                                     SCI_FAILURE_IO_RESPONSE_VALID);
1893
1894                         sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1895                         break;
1896
1897                 default:
1898                         /* FIXME: what do we do here? */
1899                         break;
1900                 }
1901
1902                 /* Frame is decoded return it to the controller */
1903                 scic_sds_controller_release_frame(scic, frame_index);
1904
1905                 return status;
1906         }
1907
1908         case SCI_REQ_STP_PIO_DATA_IN: {
1909                 struct dev_to_host_fis *frame_header;
1910                 struct sata_fis_data *frame_buffer;
1911
1912                 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1913                                                                        frame_index,
1914                                                                        (void **)&frame_header);
1915
1916                 if (status != SCI_SUCCESS) {
1917                         dev_err(scic_to_dev(scic),
1918                                 "%s: SCIC IO Request 0x%p could not get frame "
1919                                 "header for frame index %d, status %x\n",
1920                                 __func__,
1921                                 stp_req,
1922                                 frame_index,
1923                                 status);
1924                         return status;
1925                 }
1926
1927                 if (frame_header->fis_type != FIS_DATA) {
1928                         dev_err(scic_to_dev(scic),
1929                                 "%s: SCIC PIO Request 0x%p received frame %d "
1930                                 "with fis type 0x%02x when expecting a data "
1931                                 "fis.\n",
1932                                 __func__,
1933                                 stp_req,
1934                                 frame_index,
1935                                 frame_header->fis_type);
1936
1937                         scic_sds_request_set_status(sci_req,
1938                                                     SCU_TASK_DONE_GOOD,
1939                                                     SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
1940
1941                         sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1942
1943                         /* Frame is decoded return it to the controller */
1944                         scic_sds_controller_release_frame(scic, frame_index);
1945                         return status;
1946                 }
1947
1948                 if (stp_req->type.pio.request_current.sgl_pair == NULL) {
1949                         sci_req->saved_rx_frame_index = frame_index;
1950                         stp_req->type.pio.pio_transfer_bytes = 0;
1951                 } else {
1952                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1953                                                                       frame_index,
1954                                                                       (void **)&frame_buffer);
1955
1956                         status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
1957                                                                             (u8 *)frame_buffer);
1958
1959                         /* Frame is decoded return it to the controller */
1960                         scic_sds_controller_release_frame(scic, frame_index);
1961                 }
1962
1963                 /* Check for the end of the transfer, are there more
1964                  * bytes remaining for this data transfer
1965                  */
1966                 if (status != SCI_SUCCESS ||
1967                     stp_req->type.pio.pio_transfer_bytes != 0)
1968                         return status;
1969
1970                 if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
1971                         scic_sds_request_set_status(sci_req,
1972                                                     SCU_TASK_DONE_CHECK_RESPONSE,
1973                                                     SCI_FAILURE_IO_RESPONSE_VALID);
1974
1975                         sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
1976                 } else {
1977                         sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1978                 }
1979                 return status;
1980         }
1981
1982         case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
1983                 struct dev_to_host_fis *frame_header;
1984                 u32 *frame_buffer;
1985
1986                 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1987                                                                        frame_index,
1988                                                                        (void **)&frame_header);
1989                 if (status != SCI_SUCCESS) {
1990                         dev_err(scic_to_dev(scic),
1991                                 "%s: SCIC IO Request 0x%p could not get frame "
1992                                 "header for frame index %d, status %x\n",
1993                                 __func__,
1994                                 stp_req,
1995                                 frame_index,
1996                                 status);
1997                         return status;
1998                 }
1999
2000                 switch (frame_header->fis_type) {
2001                 case FIS_REGD2H:
2002                         scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2003                                                                       frame_index,
2004                                                                       (void **)&frame_buffer);
2005
2006                         scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2007                                                                frame_header,
2008                                                                frame_buffer);
2009
2010                         /* The command has completed with error */
2011                         scic_sds_request_set_status(sci_req,
2012                                                     SCU_TASK_DONE_CHECK_RESPONSE,
2013                                                     SCI_FAILURE_IO_RESPONSE_VALID);
2014                         break;
2015
2016                 default:
2017                         dev_warn(scic_to_dev(scic),
2018                                  "%s: IO Request:0x%p Frame Id:%d protocol "
2019                                  "violation occurred\n",
2020                                  __func__,
2021                                  stp_req,
2022                                  frame_index);
2023
2024                         scic_sds_request_set_status(sci_req,
2025                                                     SCU_TASK_DONE_UNEXP_FIS,
2026                                                     SCI_FAILURE_PROTOCOL_VIOLATION);
2027                         break;
2028                 }
2029
2030                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
2031
2032                 /* Frame has been decoded return it to the controller */
2033                 scic_sds_controller_release_frame(scic, frame_index);
2034
2035                 return status;
2036         }
2037         case SCI_REQ_ABORTING:
2038                 /*
2039                  * TODO: Is it even possible to get an unsolicited frame in the
2040                  * aborting state?
2041                  */
2042                 scic_sds_controller_release_frame(scic, frame_index);
2043                 return SCI_SUCCESS;
2044
2045         default:
2046                 dev_warn(scic_to_dev(scic),
2047                          "%s: SCIC IO Request given unexpected frame %x while "
2048                          "in state %d\n",
2049                          __func__,
2050                          frame_index,
2051                          state);
2052
2053                 scic_sds_controller_release_frame(scic, frame_index);
2054                 return SCI_FAILURE_INVALID_STATE;
2055         }
2056 }
2057
2058 static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request *sci_req,
2059                                                        u32 completion_code)
2060 {
2061         enum sci_status status = SCI_SUCCESS;
2062
2063         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2064         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2065                 scic_sds_stp_request_udma_complete_request(sci_req,
2066                                                            SCU_TASK_DONE_GOOD,
2067                                                            SCI_SUCCESS);
2068                 break;
2069         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
2070         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
2071                 /* We must check ther response buffer to see if the D2H
2072                  * Register FIS was received before we got the TC
2073                  * completion.
2074                  */
2075                 if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
2076                         scic_sds_remote_device_suspend(sci_req->target_device,
2077                                 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2078
2079                         scic_sds_stp_request_udma_complete_request(sci_req,
2080                                                                    SCU_TASK_DONE_CHECK_RESPONSE,
2081                                                                    SCI_FAILURE_IO_RESPONSE_VALID);
2082                 } else {
2083                         /* If we have an error completion status for the
2084                          * TC then we can expect a D2H register FIS from
2085                          * the device so we must change state to wait
2086                          * for it
2087                          */
2088                         sci_change_state(&sci_req->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
2089                 }
2090                 break;
2091
2092         /* TODO Check to see if any of these completion status need to
2093          * wait for the device to host register fis.
2094          */
2095         /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
2096          * - this comes only for B0
2097          */
2098         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
2099         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
2100         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
2101         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
2102         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
2103                 scic_sds_remote_device_suspend(sci_req->target_device,
2104                         SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2105         /* Fall through to the default case */
2106         default:
2107                 /* All other completion status cause the IO to be complete. */
2108                 scic_sds_stp_request_udma_complete_request(sci_req,
2109                                         SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2110                                         SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2111                 break;
2112         }
2113
2114         return status;
2115 }
2116
2117 static enum sci_status
2118 stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_req,
2119                                                    u32 completion_code)
2120 {
2121         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2122         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2123                 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
2124                                             SCI_SUCCESS);
2125
2126                 sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
2127                 break;
2128
2129         default:
2130                 /*
2131                  * All other completion status cause the IO to be complete.
2132                  * If a NAK was received, then it is up to the user to retry
2133                  * the request.
2134                  */
2135                 scic_sds_request_set_status(sci_req,
2136                                             SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2137                                             SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2138
2139                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
2140                 break;
2141         }
2142
2143         return SCI_SUCCESS;
2144 }
2145
2146 static enum sci_status
2147 stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct scic_sds_request *sci_req,
2148                                                      u32 completion_code)
2149 {
2150         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2151         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2152                 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
2153                                             SCI_SUCCESS);
2154
2155                 sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
2156                 break;
2157
2158         default:
2159                 /* All other completion status cause the IO to be complete.  If
2160                  * a NAK was received, then it is up to the user to retry the
2161                  * request.
2162                  */
2163                 scic_sds_request_set_status(sci_req,
2164                         SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2165                         SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2166
2167                 sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
2168                 break;
2169         }
2170
2171         return SCI_SUCCESS;
2172 }
2173
2174 enum sci_status
2175 scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req,
2176                                   u32 completion_code)
2177 {
2178         enum sci_base_request_states state;
2179         struct scic_sds_controller *scic = sci_req->owning_controller;
2180
2181         state = sci_req->sm.current_state_id;
2182
2183         switch (state) {
2184         case SCI_REQ_STARTED:
2185                 return request_started_state_tc_event(sci_req, completion_code);
2186
2187         case SCI_REQ_TASK_WAIT_TC_COMP:
2188                 return ssp_task_request_await_tc_event(sci_req,
2189                                                        completion_code);
2190
2191         case SCI_REQ_SMP_WAIT_RESP:
2192                 return smp_request_await_response_tc_event(sci_req,
2193                                                            completion_code);
2194
2195         case SCI_REQ_SMP_WAIT_TC_COMP:
2196                 return smp_request_await_tc_event(sci_req, completion_code);
2197
2198         case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
2199                 return stp_request_udma_await_tc_event(sci_req,
2200                                                        completion_code);
2201
2202         case SCI_REQ_STP_NON_DATA_WAIT_H2D:
2203                 return stp_request_non_data_await_h2d_tc_event(sci_req,
2204                                                                completion_code);
2205
2206         case SCI_REQ_STP_PIO_WAIT_H2D:
2207                 return stp_request_pio_await_h2d_completion_tc_event(sci_req,
2208                                                                      completion_code);
2209
2210         case SCI_REQ_STP_PIO_DATA_OUT:
2211                 return pio_data_out_tx_done_tc_event(sci_req, completion_code);
2212
2213         case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
2214                 return stp_request_soft_reset_await_h2d_asserted_tc_event(sci_req,
2215                                                                           completion_code);
2216
2217         case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
2218                 return stp_request_soft_reset_await_h2d_diagnostic_tc_event(sci_req,
2219                                                                             completion_code);
2220
2221         case SCI_REQ_ABORTING:
2222                 return request_aborting_state_tc_event(sci_req,
2223                                                        completion_code);
2224
2225         default:
2226                 dev_warn(scic_to_dev(scic),
2227                          "%s: SCIC IO Request given task completion "
2228                          "notification %x while in wrong state %d\n",
2229                          __func__,
2230                          completion_code,
2231                          state);
2232                 return SCI_FAILURE_INVALID_STATE;
2233         }
2234 }
2235
2236 /**
2237  * isci_request_process_response_iu() - This function sets the status and
2238  *    response iu, in the task struct, from the request object for the upper
2239  *    layer driver.
2240  * @sas_task: This parameter is the task struct from the upper layer driver.
2241  * @resp_iu: This parameter points to the response iu of the completed request.
2242  * @dev: This parameter specifies the linux device struct.
2243  *
2244  * none.
2245  */
2246 static void isci_request_process_response_iu(
2247         struct sas_task *task,
2248         struct ssp_response_iu *resp_iu,
2249         struct device *dev)
2250 {
2251         dev_dbg(dev,
2252                 "%s: resp_iu = %p "
2253                 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2254                 "resp_iu->response_data_len = %x, "
2255                 "resp_iu->sense_data_len = %x\nrepsonse data: ",
2256                 __func__,
2257                 resp_iu,
2258                 resp_iu->status,
2259                 resp_iu->datapres,
2260                 resp_iu->response_data_len,
2261                 resp_iu->sense_data_len);
2262
2263         task->task_status.stat = resp_iu->status;
2264
2265         /* libsas updates the task status fields based on the response iu. */
2266         sas_ssp_task_response(dev, task, resp_iu);
2267 }
2268
2269 /**
2270  * isci_request_set_open_reject_status() - This function prepares the I/O
2271  *    completion for OPEN_REJECT conditions.
2272  * @request: This parameter is the completed isci_request object.
2273  * @response_ptr: This parameter specifies the service response for the I/O.
2274  * @status_ptr: This parameter specifies the exec status for the I/O.
2275  * @complete_to_host_ptr: This parameter specifies the action to be taken by
2276  *    the LLDD with respect to completing this request or forcing an abort
2277  *    condition on the I/O.
2278  * @open_rej_reason: This parameter specifies the encoded reason for the
2279  *    abandon-class reject.
2280  *
2281  * none.
2282  */
2283 static void isci_request_set_open_reject_status(
2284         struct isci_request *request,
2285         struct sas_task *task,
2286         enum service_response *response_ptr,
2287         enum exec_status *status_ptr,
2288         enum isci_completion_selection *complete_to_host_ptr,
2289         enum sas_open_rej_reason open_rej_reason)
2290 {
2291         /* Task in the target is done. */
2292         request->complete_in_target       = true;
2293         *response_ptr                     = SAS_TASK_UNDELIVERED;
2294         *status_ptr                       = SAS_OPEN_REJECT;
2295         *complete_to_host_ptr             = isci_perform_normal_io_completion;
2296         task->task_status.open_rej_reason = open_rej_reason;
2297 }
2298
2299 /**
2300  * isci_request_handle_controller_specific_errors() - This function decodes
2301  *    controller-specific I/O completion error conditions.
2302  * @request: This parameter is the completed isci_request object.
2303  * @response_ptr: This parameter specifies the service response for the I/O.
2304  * @status_ptr: This parameter specifies the exec status for the I/O.
2305  * @complete_to_host_ptr: This parameter specifies the action to be taken by
2306  *    the LLDD with respect to completing this request or forcing an abort
2307  *    condition on the I/O.
2308  *
2309  * none.
2310  */
2311 static void isci_request_handle_controller_specific_errors(
2312         struct isci_remote_device *idev,
2313         struct isci_request *request,
2314         struct sas_task *task,
2315         enum service_response *response_ptr,
2316         enum exec_status *status_ptr,
2317         enum isci_completion_selection *complete_to_host_ptr)
2318 {
2319         unsigned int cstatus;
2320
2321         cstatus = request->sci.scu_status;
2322
2323         dev_dbg(&request->isci_host->pdev->dev,
2324                 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2325                 "- controller status = 0x%x\n",
2326                 __func__, request, cstatus);
2327
2328         /* Decode the controller-specific errors; most
2329          * important is to recognize those conditions in which
2330          * the target may still have a task outstanding that
2331          * must be aborted.
2332          *
2333          * Note that there are SCU completion codes being
2334          * named in the decode below for which SCIC has already
2335          * done work to handle them in a way other than as
2336          * a controller-specific completion code; these are left
2337          * in the decode below for completeness sake.
2338          */
2339         switch (cstatus) {
2340         case SCU_TASK_DONE_DMASETUP_DIRERR:
2341         /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2342         case SCU_TASK_DONE_XFERCNT_ERR:
2343                 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2344                 if (task->task_proto == SAS_PROTOCOL_SMP) {
2345                         /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2346                         *response_ptr = SAS_TASK_COMPLETE;
2347
2348                         /* See if the device has been/is being stopped. Note
2349                          * that we ignore the quiesce state, since we are
2350                          * concerned about the actual device state.
2351                          */
2352                         if (!idev)
2353                                 *status_ptr = SAS_DEVICE_UNKNOWN;
2354                         else
2355                                 *status_ptr = SAS_ABORTED_TASK;
2356
2357                         request->complete_in_target = true;
2358
2359                         *complete_to_host_ptr =
2360                                 isci_perform_normal_io_completion;
2361                 } else {
2362                         /* Task in the target is not done. */
2363                         *response_ptr = SAS_TASK_UNDELIVERED;
2364
2365                         if (!idev)
2366                                 *status_ptr = SAS_DEVICE_UNKNOWN;
2367                         else
2368                                 *status_ptr = SAM_STAT_TASK_ABORTED;
2369
2370                         request->complete_in_target = false;
2371
2372                         *complete_to_host_ptr =
2373                                 isci_perform_error_io_completion;
2374                 }
2375
2376                 break;
2377
2378         case SCU_TASK_DONE_CRC_ERR:
2379         case SCU_TASK_DONE_NAK_CMD_ERR:
2380         case SCU_TASK_DONE_EXCESS_DATA:
2381         case SCU_TASK_DONE_UNEXP_FIS:
2382         /* Also SCU_TASK_DONE_UNEXP_RESP: */
2383         case SCU_TASK_DONE_VIIT_ENTRY_NV:       /* TODO - conditions? */
2384         case SCU_TASK_DONE_IIT_ENTRY_NV:        /* TODO - conditions? */
2385         case SCU_TASK_DONE_RNCNV_OUTBOUND:      /* TODO - conditions? */
2386                 /* These are conditions in which the target
2387                  * has completed the task, so that no cleanup
2388                  * is necessary.
2389                  */
2390                 *response_ptr = SAS_TASK_COMPLETE;
2391
2392                 /* See if the device has been/is being stopped. Note
2393                  * that we ignore the quiesce state, since we are
2394                  * concerned about the actual device state.
2395                  */
2396                 if (!idev)
2397                         *status_ptr = SAS_DEVICE_UNKNOWN;
2398                 else
2399                         *status_ptr = SAS_ABORTED_TASK;
2400
2401                 request->complete_in_target = true;
2402
2403                 *complete_to_host_ptr = isci_perform_normal_io_completion;
2404                 break;
2405
2406
2407         /* Note that the only open reject completion codes seen here will be
2408          * abandon-class codes; all others are automatically retried in the SCU.
2409          */
2410         case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2411
2412                 isci_request_set_open_reject_status(
2413                         request, task, response_ptr, status_ptr,
2414                         complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2415                 break;
2416
2417         case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2418
2419                 /* Note - the return of AB0 will change when
2420                  * libsas implements detection of zone violations.
2421                  */
2422                 isci_request_set_open_reject_status(
2423                         request, task, response_ptr, status_ptr,
2424                         complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2425                 break;
2426
2427         case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2428
2429                 isci_request_set_open_reject_status(
2430                         request, task, response_ptr, status_ptr,
2431                         complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2432                 break;
2433
2434         case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2435
2436                 isci_request_set_open_reject_status(
2437                         request, task, response_ptr, status_ptr,
2438                         complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2439                 break;
2440
2441         case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2442
2443                 isci_request_set_open_reject_status(
2444                         request, task, response_ptr, status_ptr,
2445                         complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2446                 break;
2447
2448         case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2449
2450                 isci_request_set_open_reject_status(
2451                         request, task, response_ptr, status_ptr,
2452                         complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2453                 break;
2454
2455         case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2456
2457                 isci_request_set_open_reject_status(
2458                         request, task, response_ptr, status_ptr,
2459                         complete_to_host_ptr, SAS_OREJ_STP_NORES);
2460                 break;
2461
2462         case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2463
2464                 isci_request_set_open_reject_status(
2465                         request, task, response_ptr, status_ptr,
2466                         complete_to_host_ptr, SAS_OREJ_EPROTO);
2467                 break;
2468
2469         case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2470
2471                 isci_request_set_open_reject_status(
2472                         request, task, response_ptr, status_ptr,
2473                         complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2474                 break;
2475
2476         case SCU_TASK_DONE_LL_R_ERR:
2477         /* Also SCU_TASK_DONE_ACK_NAK_TO: */
2478         case SCU_TASK_DONE_LL_PERR:
2479         case SCU_TASK_DONE_LL_SY_TERM:
2480         /* Also SCU_TASK_DONE_NAK_ERR:*/
2481         case SCU_TASK_DONE_LL_LF_TERM:
2482         /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2483         case SCU_TASK_DONE_LL_ABORT_ERR:
2484         case SCU_TASK_DONE_SEQ_INV_TYPE:
2485         /* Also SCU_TASK_DONE_UNEXP_XR: */
2486         case SCU_TASK_DONE_XR_IU_LEN_ERR:
2487         case SCU_TASK_DONE_INV_FIS_LEN:
2488         /* Also SCU_TASK_DONE_XR_WD_LEN: */
2489         case SCU_TASK_DONE_SDMA_ERR:
2490         case SCU_TASK_DONE_OFFSET_ERR:
2491         case SCU_TASK_DONE_MAX_PLD_ERR:
2492         case SCU_TASK_DONE_LF_ERR:
2493         case SCU_TASK_DONE_SMP_RESP_TO_ERR:  /* Escalate to dev reset? */
2494         case SCU_TASK_DONE_SMP_LL_RX_ERR:
2495         case SCU_TASK_DONE_UNEXP_DATA:
2496         case SCU_TASK_DONE_UNEXP_SDBFIS:
2497         case SCU_TASK_DONE_REG_ERR:
2498         case SCU_TASK_DONE_SDB_ERR:
2499         case SCU_TASK_DONE_TASK_ABORT:
2500         default:
2501                 /* Task in the target is not done. */
2502                 *response_ptr = SAS_TASK_UNDELIVERED;
2503                 *status_ptr = SAM_STAT_TASK_ABORTED;
2504
2505                 if (task->task_proto == SAS_PROTOCOL_SMP) {
2506                         request->complete_in_target = true;
2507
2508                         *complete_to_host_ptr = isci_perform_normal_io_completion;
2509                 } else {
2510                         request->complete_in_target = false;
2511
2512                         *complete_to_host_ptr = isci_perform_error_io_completion;
2513                 }
2514                 break;
2515         }
2516 }
2517
2518 /**
2519  * isci_task_save_for_upper_layer_completion() - This function saves the
2520  *    request for later completion to the upper layer driver.
2521  * @host: This parameter is a pointer to the host on which the the request
2522  *    should be queued (either as an error or success).
2523  * @request: This parameter is the completed request.
2524  * @response: This parameter is the response code for the completed task.
2525  * @status: This parameter is the status code for the completed task.
2526  *
2527  * none.
2528  */
2529 static void isci_task_save_for_upper_layer_completion(
2530         struct isci_host *host,
2531         struct isci_request *request,
2532         enum service_response response,
2533         enum exec_status status,
2534         enum isci_completion_selection task_notification_selection)
2535 {
2536         struct sas_task *task = isci_request_access_task(request);
2537
2538         task_notification_selection
2539                 = isci_task_set_completion_status(task, response, status,
2540                                                   task_notification_selection);
2541
2542         /* Tasks aborted specifically by a call to the lldd_abort_task
2543          * function should not be completed to the host in the regular path.
2544          */
2545         switch (task_notification_selection) {
2546
2547         case isci_perform_normal_io_completion:
2548
2549                 /* Normal notification (task_done) */
2550                 dev_dbg(&host->pdev->dev,
2551                         "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
2552                         __func__,
2553                         task,
2554                         task->task_status.resp, response,
2555                         task->task_status.stat, status);
2556                 /* Add to the completed list. */
2557                 list_add(&request->completed_node,
2558                          &host->requests_to_complete);
2559
2560                 /* Take the request off the device's pending request list. */
2561                 list_del_init(&request->dev_node);
2562                 break;
2563
2564         case isci_perform_aborted_io_completion:
2565                 /* No notification to libsas because this request is
2566                  * already in the abort path.
2567                  */
2568                 dev_warn(&host->pdev->dev,
2569                          "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
2570                          __func__,
2571                          task,
2572                          task->task_status.resp, response,
2573                          task->task_status.stat, status);
2574
2575                 /* Wake up whatever process was waiting for this
2576                  * request to complete.
2577                  */
2578                 WARN_ON(request->io_request_completion == NULL);
2579
2580                 if (request->io_request_completion != NULL) {
2581
2582                         /* Signal whoever is waiting that this
2583                         * request is complete.
2584                         */
2585                         complete(request->io_request_completion);
2586                 }
2587                 break;
2588
2589         case isci_perform_error_io_completion:
2590                 /* Use sas_task_abort */
2591                 dev_warn(&host->pdev->dev,
2592                          "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
2593                          __func__,
2594                          task,
2595                          task->task_status.resp, response,
2596                          task->task_status.stat, status);
2597                 /* Add to the aborted list. */
2598                 list_add(&request->completed_node,
2599                          &host->requests_to_errorback);
2600                 break;
2601
2602         default:
2603                 dev_warn(&host->pdev->dev,
2604                          "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
2605                          __func__,
2606                          task,
2607                          task->task_status.resp, response,
2608                          task->task_status.stat, status);
2609
2610                 /* Add to the error to libsas list. */
2611                 list_add(&request->completed_node,
2612                          &host->requests_to_errorback);
2613                 break;
2614         }
2615 }
2616
2617 static void isci_request_io_request_complete(struct isci_host *isci_host,
2618                                              struct isci_request *request,
2619                                              enum sci_io_status completion_status)
2620 {
2621         struct sas_task *task = isci_request_access_task(request);
2622         struct ssp_response_iu *resp_iu;
2623         void *resp_buf;
2624         unsigned long task_flags;
2625         struct isci_remote_device *idev = isci_lookup_device(task->dev);
2626         enum service_response response       = SAS_TASK_UNDELIVERED;
2627         enum exec_status status         = SAS_ABORTED_TASK;
2628         enum isci_request_status request_status;
2629         enum isci_completion_selection complete_to_host
2630                 = isci_perform_normal_io_completion;
2631
2632         dev_dbg(&isci_host->pdev->dev,
2633                 "%s: request = %p, task = %p,\n"
2634                 "task->data_dir = %d completion_status = 0x%x\n",
2635                 __func__,
2636                 request,
2637                 task,
2638                 task->data_dir,
2639                 completion_status);
2640
2641         spin_lock(&request->state_lock);
2642         request_status = isci_request_get_state(request);
2643
2644         /* Decode the request status.  Note that if the request has been
2645          * aborted by a task management function, we don't care
2646          * what the status is.
2647          */
2648         switch (request_status) {
2649
2650         case aborted:
2651                 /* "aborted" indicates that the request was aborted by a task
2652                  * management function, since once a task management request is
2653                  * perfomed by the device, the request only completes because
2654                  * of the subsequent driver terminate.
2655                  *
2656                  * Aborted also means an external thread is explicitly managing
2657                  * this request, so that we do not complete it up the stack.
2658                  *
2659                  * The target is still there (since the TMF was successful).
2660                  */
2661                 request->complete_in_target = true;
2662                 response = SAS_TASK_COMPLETE;
2663
2664                 /* See if the device has been/is being stopped. Note
2665                  * that we ignore the quiesce state, since we are
2666                  * concerned about the actual device state.
2667                  */
2668                 if (!idev)
2669                         status = SAS_DEVICE_UNKNOWN;
2670                 else
2671                         status = SAS_ABORTED_TASK;
2672
2673                 complete_to_host = isci_perform_aborted_io_completion;
2674                 /* This was an aborted request. */
2675
2676                 spin_unlock(&request->state_lock);
2677                 break;
2678
2679         case aborting:
2680                 /* aborting means that the task management function tried and
2681                  * failed to abort the request. We need to note the request
2682                  * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
2683                  * target as down.
2684                  *
2685                  * Aborting also means an external thread is explicitly managing
2686                  * this request, so that we do not complete it up the stack.
2687                  */
2688                 request->complete_in_target = true;
2689                 response = SAS_TASK_UNDELIVERED;
2690
2691                 if (!idev)
2692                         /* The device has been /is being stopped. Note that
2693                          * we ignore the quiesce state, since we are
2694                          * concerned about the actual device state.
2695                          */
2696                         status = SAS_DEVICE_UNKNOWN;
2697                 else
2698                         status = SAS_PHY_DOWN;
2699
2700                 complete_to_host = isci_perform_aborted_io_completion;
2701
2702                 /* This was an aborted request. */
2703
2704                 spin_unlock(&request->state_lock);
2705                 break;
2706
2707         case terminating:
2708
2709                 /* This was an terminated request.  This happens when
2710                  * the I/O is being terminated because of an action on
2711                  * the device (reset, tear down, etc.), and the I/O needs
2712                  * to be completed up the stack.
2713                  */
2714                 request->complete_in_target = true;
2715                 response = SAS_TASK_UNDELIVERED;
2716
2717                 /* See if the device has been/is being stopped. Note
2718                  * that we ignore the quiesce state, since we are
2719                  * concerned about the actual device state.
2720                  */
2721                 if (!idev)
2722                         status = SAS_DEVICE_UNKNOWN;
2723                 else
2724                         status = SAS_ABORTED_TASK;
2725
2726                 complete_to_host = isci_perform_aborted_io_completion;
2727
2728                 /* This was a terminated request. */
2729
2730                 spin_unlock(&request->state_lock);
2731                 break;
2732
2733         case dead:
2734                 /* This was a terminated request that timed-out during the
2735                  * termination process.  There is no task to complete to
2736                  * libsas.
2737                  */
2738                 complete_to_host = isci_perform_normal_io_completion;
2739                 spin_unlock(&request->state_lock);
2740                 break;
2741
2742         default:
2743
2744                 /* The request is done from an SCU HW perspective. */
2745                 request->status = completed;
2746
2747                 spin_unlock(&request->state_lock);
2748
2749                 /* This is an active request being completed from the core. */
2750                 switch (completion_status) {
2751
2752                 case SCI_IO_FAILURE_RESPONSE_VALID:
2753                         dev_dbg(&isci_host->pdev->dev,
2754                                 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2755                                 __func__,
2756                                 request,
2757                                 task);
2758
2759                         if (sas_protocol_ata(task->task_proto)) {
2760                                 resp_buf = &request->sci.stp.rsp;
2761                                 isci_request_process_stp_response(task,
2762                                                                   resp_buf);
2763                         } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2764
2765                                 /* crack the iu response buffer. */
2766                                 resp_iu = &request->sci.ssp.rsp;
2767                                 isci_request_process_response_iu(task, resp_iu,
2768                                                                  &isci_host->pdev->dev);
2769
2770                         } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2771
2772                                 dev_err(&isci_host->pdev->dev,
2773                                         "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2774                                         "SAS_PROTOCOL_SMP protocol\n",
2775                                         __func__);
2776
2777                         } else
2778                                 dev_err(&isci_host->pdev->dev,
2779                                         "%s: unknown protocol\n", __func__);
2780
2781                         /* use the task status set in the task struct by the
2782                          * isci_request_process_response_iu call.
2783                          */
2784                         request->complete_in_target = true;
2785                         response = task->task_status.resp;
2786                         status = task->task_status.stat;
2787                         break;
2788
2789                 case SCI_IO_SUCCESS:
2790                 case SCI_IO_SUCCESS_IO_DONE_EARLY:
2791
2792                         response = SAS_TASK_COMPLETE;
2793                         status   = SAM_STAT_GOOD;
2794                         request->complete_in_target = true;
2795
2796                         if (task->task_proto == SAS_PROTOCOL_SMP) {
2797                                 void *rsp = &request->sci.smp.rsp;
2798
2799                                 dev_dbg(&isci_host->pdev->dev,
2800                                         "%s: SMP protocol completion\n",
2801                                         __func__);
2802
2803                                 sg_copy_from_buffer(
2804                                         &task->smp_task.smp_resp, 1,
2805                                         rsp, sizeof(struct smp_resp));
2806                         } else if (completion_status
2807                                    == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2808
2809                                 /* This was an SSP / STP / SATA transfer.
2810                                  * There is a possibility that less data than
2811                                  * the maximum was transferred.
2812                                  */
2813                                 u32 transferred_length = sci_req_tx_bytes(&request->sci);
2814
2815                                 task->task_status.residual
2816                                         = task->total_xfer_len - transferred_length;
2817
2818                                 /* If there were residual bytes, call this an
2819                                  * underrun.
2820                                  */
2821                                 if (task->task_status.residual != 0)
2822                                         status = SAS_DATA_UNDERRUN;
2823
2824                                 dev_dbg(&isci_host->pdev->dev,
2825                                         "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2826                                         __func__,
2827                                         status);
2828
2829                         } else
2830                                 dev_dbg(&isci_host->pdev->dev,
2831                                         "%s: SCI_IO_SUCCESS\n",
2832                                         __func__);
2833
2834                         break;
2835
2836                 case SCI_IO_FAILURE_TERMINATED:
2837                         dev_dbg(&isci_host->pdev->dev,
2838                                 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2839                                 __func__,
2840                                 request,
2841                                 task);
2842
2843                         /* The request was terminated explicitly.  No handling
2844                          * is needed in the SCSI error handler path.
2845                          */
2846                         request->complete_in_target = true;
2847                         response = SAS_TASK_UNDELIVERED;
2848
2849                         /* See if the device has been/is being stopped. Note
2850                          * that we ignore the quiesce state, since we are
2851                          * concerned about the actual device state.
2852                          */
2853                         if (!idev)
2854                                 status = SAS_DEVICE_UNKNOWN;
2855                         else
2856                                 status = SAS_ABORTED_TASK;
2857
2858                         complete_to_host = isci_perform_normal_io_completion;
2859                         break;
2860
2861                 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2862
2863                         isci_request_handle_controller_specific_errors(
2864                                 idev, request, task, &response, &status,
2865                                 &complete_to_host);
2866
2867                         break;
2868
2869                 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2870                         /* This is a special case, in that the I/O completion
2871                          * is telling us that the device needs a reset.
2872                          * In order for the device reset condition to be
2873                          * noticed, the I/O has to be handled in the error
2874                          * handler.  Set the reset flag and cause the
2875                          * SCSI error thread to be scheduled.
2876                          */
2877                         spin_lock_irqsave(&task->task_state_lock, task_flags);
2878                         task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2879                         spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2880
2881                         /* Fail the I/O. */
2882                         response = SAS_TASK_UNDELIVERED;
2883                         status = SAM_STAT_TASK_ABORTED;
2884
2885                         complete_to_host = isci_perform_error_io_completion;
2886                         request->complete_in_target = false;
2887                         break;
2888
2889                 case SCI_FAILURE_RETRY_REQUIRED:
2890
2891                         /* Fail the I/O so it can be retried. */
2892                         response = SAS_TASK_UNDELIVERED;
2893                         if (!idev)
2894                                 status = SAS_DEVICE_UNKNOWN;
2895                         else
2896                                 status = SAS_ABORTED_TASK;
2897
2898                         complete_to_host = isci_perform_normal_io_completion;
2899                         request->complete_in_target = true;
2900                         break;
2901
2902
2903                 default:
2904                         /* Catch any otherwise unhandled error codes here. */
2905                         dev_warn(&isci_host->pdev->dev,
2906                                  "%s: invalid completion code: 0x%x - "
2907                                  "isci_request = %p\n",
2908                                  __func__, completion_status, request);
2909
2910                         response = SAS_TASK_UNDELIVERED;
2911
2912                         /* See if the device has been/is being stopped. Note
2913                          * that we ignore the quiesce state, since we are
2914                          * concerned about the actual device state.
2915                          */
2916                         if (!idev)
2917                                 status = SAS_DEVICE_UNKNOWN;
2918                         else
2919                                 status = SAS_ABORTED_TASK;
2920
2921                         if (SAS_PROTOCOL_SMP == task->task_proto) {
2922                                 request->complete_in_target = true;
2923                                 complete_to_host = isci_perform_normal_io_completion;
2924                         } else {
2925                                 request->complete_in_target = false;
2926                                 complete_to_host = isci_perform_error_io_completion;
2927                         }
2928                         break;
2929                 }
2930                 break;
2931         }
2932
2933         switch (task->task_proto) {
2934         case SAS_PROTOCOL_SSP:
2935                 if (task->data_dir == DMA_NONE)
2936                         break;
2937                 if (task->num_scatter == 0)
2938                         /* 0 indicates a single dma address */
2939                         dma_unmap_single(&isci_host->pdev->dev,
2940                                          request->zero_scatter_daddr,
2941                                          task->total_xfer_len, task->data_dir);
2942                 else  /* unmap the sgl dma addresses */
2943                         dma_unmap_sg(&isci_host->pdev->dev, task->scatter,
2944                                      request->num_sg_entries, task->data_dir);
2945                 break;
2946         case SAS_PROTOCOL_SMP: {
2947                 struct scatterlist *sg = &task->smp_task.smp_req;
2948                 struct smp_req *smp_req;
2949                 void *kaddr;
2950
2951                 dma_unmap_sg(&isci_host->pdev->dev, sg, 1, DMA_TO_DEVICE);
2952
2953                 /* need to swab it back in case the command buffer is re-used */
2954                 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
2955                 smp_req = kaddr + sg->offset;
2956                 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
2957                 kunmap_atomic(kaddr, KM_IRQ0);
2958                 break;
2959         }
2960         default:
2961                 break;
2962         }
2963
2964         /* Put the completed request on the correct list */
2965         isci_task_save_for_upper_layer_completion(isci_host, request, response,
2966                                                   status, complete_to_host
2967                                                   );
2968
2969         /* complete the io request to the core. */
2970         scic_controller_complete_io(&isci_host->sci,
2971                                     request->sci.target_device,
2972                                     &request->sci);
2973         isci_put_device(idev);
2974
2975         /* set terminated handle so it cannot be completed or
2976          * terminated again, and to cause any calls into abort
2977          * task to recognize the already completed case.
2978          */
2979         request->terminated = true;
2980
2981         isci_host_can_dequeue(isci_host, 1);
2982 }
2983
2984 static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm)
2985 {
2986         struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
2987         struct isci_request *ireq = sci_req_to_ireq(sci_req);
2988         struct domain_device *dev = sci_dev_to_domain(sci_req->target_device);
2989         struct sas_task *task;
2990
2991         /* XXX as hch said always creating an internal sas_task for tmf
2992          * requests would simplify the driver
2993          */
2994         task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
2995
2996         /* all unaccelerated request types (non ssp or ncq) handled with
2997          * substates
2998          */
2999         if (!task && dev->dev_type == SAS_END_DEV) {
3000                 sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
3001         } else if (!task &&
3002                    (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
3003                     isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
3004                 sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
3005         } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
3006                 sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
3007         } else if (task && sas_protocol_ata(task->task_proto) &&
3008                    !task->ata_task.use_ncq) {
3009                 u32 state;
3010
3011                 if (task->data_dir == DMA_NONE)
3012                         state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
3013                 else if (task->ata_task.dma_xfer)
3014                         state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
3015                 else /* PIO */
3016                         state = SCI_REQ_STP_PIO_WAIT_H2D;
3017
3018                 sci_change_state(sm, state);
3019         }
3020 }
3021
3022 static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm)
3023 {
3024         struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
3025         struct scic_sds_controller *scic = sci_req->owning_controller;
3026         struct isci_host *ihost = scic_to_ihost(scic);
3027         struct isci_request *ireq = sci_req_to_ireq(sci_req);
3028
3029         /* Tell the SCI_USER that the IO request is complete */
3030         if (sci_req->is_task_management_request == false)
3031                 isci_request_io_request_complete(ihost, ireq,
3032                                                  sci_req->sci_status);
3033         else
3034                 isci_task_request_complete(ihost, ireq, sci_req->sci_status);
3035 }
3036
3037 static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm)
3038 {
3039         struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
3040
3041         /* Setting the abort bit in the Task Context is required by the silicon. */
3042         sci_req->task_context_buffer->abort = 1;
3043 }
3044
3045 static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3046 {
3047         struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
3048
3049         scic_sds_remote_device_set_working_request(sci_req->target_device,
3050                                                    sci_req);
3051 }
3052
3053 static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3054 {
3055         struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
3056
3057         scic_sds_remote_device_set_working_request(sci_req->target_device,
3058                                                    sci_req);
3059 }
3060
3061 static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
3062 {
3063         struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
3064
3065         scic_sds_remote_device_set_working_request(sci_req->target_device,
3066                                                    sci_req);
3067 }
3068
3069 static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
3070 {
3071         struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
3072         struct scu_task_context *task_context;
3073         struct host_to_dev_fis *h2d_fis;
3074         enum sci_status status;
3075
3076         /* Clear the SRST bit */
3077         h2d_fis = &sci_req->stp.cmd;
3078         h2d_fis->control = 0;
3079
3080         /* Clear the TC control bit */
3081         task_context = scic_sds_controller_get_task_context_buffer(
3082                 sci_req->owning_controller, sci_req->io_tag);
3083         task_context->control_frame = 0;
3084
3085         status = scic_controller_continue_io(sci_req);
3086         WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
3087 }
3088
3089 static const struct sci_base_state scic_sds_request_state_table[] = {
3090         [SCI_REQ_INIT] = { },
3091         [SCI_REQ_CONSTRUCTED] = { },
3092         [SCI_REQ_STARTED] = {
3093                 .enter_state = scic_sds_request_started_state_enter,
3094         },
3095         [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
3096                 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
3097         },
3098         [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
3099         [SCI_REQ_STP_PIO_WAIT_H2D] = {
3100                 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
3101         },
3102         [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
3103         [SCI_REQ_STP_PIO_DATA_IN] = { },
3104         [SCI_REQ_STP_PIO_DATA_OUT] = { },
3105         [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
3106         [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
3107         [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
3108                 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
3109         },
3110         [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
3111                 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
3112         },
3113         [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
3114         [SCI_REQ_TASK_WAIT_TC_COMP] = { },
3115         [SCI_REQ_TASK_WAIT_TC_RESP] = { },
3116         [SCI_REQ_SMP_WAIT_RESP] = { },
3117         [SCI_REQ_SMP_WAIT_TC_COMP] = { },
3118         [SCI_REQ_COMPLETED] = {
3119                 .enter_state = scic_sds_request_completed_state_enter,
3120         },
3121         [SCI_REQ_ABORTING] = {
3122                 .enter_state = scic_sds_request_aborting_state_enter,
3123         },
3124         [SCI_REQ_FINAL] = { },
3125 };
3126
3127 static void
3128 scic_sds_general_request_construct(struct scic_sds_controller *scic,
3129                                    struct scic_sds_remote_device *sci_dev,
3130                                    u16 io_tag,
3131                                    struct scic_sds_request *sci_req)
3132 {
3133         sci_init_sm(&sci_req->sm, scic_sds_request_state_table, SCI_REQ_INIT);
3134
3135         sci_req->io_tag = io_tag;
3136         sci_req->owning_controller = scic;
3137         sci_req->target_device = sci_dev;
3138         sci_req->protocol = SCIC_NO_PROTOCOL;
3139         sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
3140
3141         sci_req->sci_status   = SCI_SUCCESS;
3142         sci_req->scu_status   = 0;
3143         sci_req->post_context = 0xFFFFFFFF;
3144
3145         sci_req->is_task_management_request = false;
3146
3147         if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
3148                 sci_req->was_tag_assigned_by_user = false;
3149                 sci_req->task_context_buffer = &sci_req->tc;
3150         } else {
3151                 sci_req->was_tag_assigned_by_user = true;
3152
3153                 sci_req->task_context_buffer =
3154                         scic_sds_controller_get_task_context_buffer(scic, io_tag);
3155         }
3156 }
3157
3158 static enum sci_status
3159 scic_io_request_construct(struct scic_sds_controller *scic,
3160                           struct scic_sds_remote_device *sci_dev,
3161                           u16 io_tag, struct scic_sds_request *sci_req)
3162 {
3163         struct domain_device *dev = sci_dev_to_domain(sci_dev);
3164         enum sci_status status = SCI_SUCCESS;
3165
3166         /* Build the common part of the request */
3167         scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
3168
3169         if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3170                 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3171
3172         if (dev->dev_type == SAS_END_DEV)
3173                 /* pass */;
3174         else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
3175                 memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd));
3176         else if (dev_is_expander(dev))
3177                 /* pass */;
3178         else
3179                 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3180
3181         memset(sci_req->task_context_buffer, 0,
3182                offsetof(struct scu_task_context, sgl_pair_ab));
3183
3184         return status;
3185 }
3186
3187 enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
3188                                             struct scic_sds_remote_device *sci_dev,
3189                                             u16 io_tag, struct scic_sds_request *sci_req)
3190 {
3191         struct domain_device *dev = sci_dev_to_domain(sci_dev);
3192         enum sci_status status = SCI_SUCCESS;
3193
3194         /* Build the common part of the request */
3195         scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
3196
3197         if (dev->dev_type == SAS_END_DEV ||
3198             dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
3199                 sci_req->is_task_management_request = true;
3200                 memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context));
3201         } else
3202                 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3203
3204         return status;
3205 }
3206
3207 static enum sci_status isci_request_ssp_request_construct(
3208         struct isci_request *request)
3209 {
3210         enum sci_status status;
3211
3212         dev_dbg(&request->isci_host->pdev->dev,
3213                 "%s: request = %p\n",
3214                 __func__,
3215                 request);
3216         status = scic_io_request_construct_basic_ssp(&request->sci);
3217         return status;
3218 }
3219
3220 static enum sci_status isci_request_stp_request_construct(
3221         struct isci_request *request)
3222 {
3223         struct sas_task *task = isci_request_access_task(request);
3224         enum sci_status status;
3225         struct host_to_dev_fis *register_fis;
3226
3227         dev_dbg(&request->isci_host->pdev->dev,
3228                 "%s: request = %p\n",
3229                 __func__,
3230                 request);
3231
3232         /* Get the host_to_dev_fis from the core and copy
3233          * the fis from the task into it.
3234          */
3235         register_fis = isci_sata_task_to_fis_copy(task);
3236
3237         status = scic_io_request_construct_basic_sata(&request->sci);
3238
3239         /* Set the ncq tag in the fis, from the queue
3240          * command in the task.
3241          */
3242         if (isci_sata_is_task_ncq(task)) {
3243
3244                 isci_sata_set_ncq_tag(
3245                         register_fis,
3246                         task
3247                         );
3248         }
3249
3250         return status;
3251 }
3252
3253 static enum sci_status
3254 scic_io_request_construct_smp(struct device *dev,
3255                               struct scic_sds_request *sci_req,
3256                               struct sas_task *task)
3257 {
3258         struct scatterlist *sg = &task->smp_task.smp_req;
3259         struct scic_sds_remote_device *sci_dev;
3260         struct scu_task_context *task_context;
3261         struct scic_sds_port *sci_port;
3262         struct smp_req *smp_req;
3263         void *kaddr;
3264         u8 req_len;
3265         u32 cmd;
3266
3267         kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
3268         smp_req = kaddr + sg->offset;
3269         /*
3270          * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3271          * functions under SAS 2.0, a zero request length really indicates
3272          * a non-zero default length.
3273          */
3274         if (smp_req->req_len == 0) {
3275                 switch (smp_req->func) {
3276                 case SMP_DISCOVER:
3277                 case SMP_REPORT_PHY_ERR_LOG:
3278                 case SMP_REPORT_PHY_SATA:
3279                 case SMP_REPORT_ROUTE_INFO:
3280                         smp_req->req_len = 2;
3281                         break;
3282                 case SMP_CONF_ROUTE_INFO:
3283                 case SMP_PHY_CONTROL:
3284                 case SMP_PHY_TEST_FUNCTION:
3285                         smp_req->req_len = 9;
3286                         break;
3287                         /* Default - zero is a valid default for 2.0. */
3288                 }
3289         }
3290         req_len = smp_req->req_len;
3291         sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3292         cmd = *(u32 *) smp_req;
3293         kunmap_atomic(kaddr, KM_IRQ0);
3294
3295         if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3296                 return SCI_FAILURE;
3297
3298         sci_req->protocol = SCIC_SMP_PROTOCOL;
3299
3300         /* byte swap the smp request. */
3301
3302         task_context = scic_sds_request_get_task_context(sci_req);
3303
3304         sci_dev = scic_sds_request_get_device(sci_req);
3305         sci_port = scic_sds_request_get_port(sci_req);
3306
3307         /*
3308          * Fill in the TC with the its required data
3309          * 00h
3310          */
3311         task_context->priority = 0;
3312         task_context->initiator_request = 1;
3313         task_context->connection_rate = sci_dev->connection_rate;
3314         task_context->protocol_engine_index =
3315                 scic_sds_controller_get_protocol_engine_group(scic);
3316         task_context->logical_port_index = scic_sds_port_get_index(sci_port);
3317         task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3318         task_context->abort = 0;
3319         task_context->valid = SCU_TASK_CONTEXT_VALID;
3320         task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3321
3322         /* 04h */
3323         task_context->remote_node_index = sci_dev->rnc.remote_node_index;
3324         task_context->command_code = 0;
3325         task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3326
3327         /* 08h */
3328         task_context->link_layer_control = 0;
3329         task_context->do_not_dma_ssp_good_response = 1;
3330         task_context->strict_ordering = 0;
3331         task_context->control_frame = 1;
3332         task_context->timeout_enable = 0;
3333         task_context->block_guard_enable = 0;
3334
3335         /* 0ch */
3336         task_context->address_modifier = 0;
3337
3338         /* 10h */
3339         task_context->ssp_command_iu_length = req_len;
3340
3341         /* 14h */
3342         task_context->transfer_length_bytes = 0;
3343
3344         /*
3345          * 18h ~ 30h, protocol specific
3346          * since commandIU has been build by framework at this point, we just
3347          * copy the frist DWord from command IU to this location. */
3348         memcpy(&task_context->type.smp, &cmd, sizeof(u32));
3349
3350         /*
3351          * 40h
3352          * "For SMP you could program it to zero. We would prefer that way
3353          * so that done code will be consistent." - Venki
3354          */
3355         task_context->task_phase = 0;
3356
3357         if (sci_req->was_tag_assigned_by_user) {
3358                 /*
3359                  * Build the task context now since we have already read
3360                  * the data
3361                  */
3362                 sci_req->post_context =
3363                         (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3364                          (scic_sds_controller_get_protocol_engine_group(scic) <<
3365                           SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3366                          (scic_sds_port_get_index(sci_port) <<
3367                           SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3368                           ISCI_TAG_TCI(sci_req->io_tag));
3369         } else {
3370                 /*
3371                  * Build the task context now since we have already read
3372                  * the data.
3373                  * I/O tag index is not assigned because we have to wait
3374                  * until we get a TCi.
3375                  */
3376                 sci_req->post_context =
3377                         (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3378                          (scic_sds_controller_get_protocol_engine_group(scic) <<
3379                           SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3380                          (scic_sds_port_get_index(sci_port) <<
3381                           SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
3382         }
3383
3384         /*
3385          * Copy the physical address for the command buffer to the SCU Task
3386          * Context command buffer should not contain command header.
3387          */
3388         task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
3389         task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
3390
3391         /* SMP response comes as UF, so no need to set response IU address. */
3392         task_context->response_iu_upper = 0;
3393         task_context->response_iu_lower = 0;
3394
3395         sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
3396
3397         return SCI_SUCCESS;
3398 }
3399
3400 /*
3401  * isci_smp_request_build() - This function builds the smp request.
3402  * @ireq: This parameter points to the isci_request allocated in the
3403  *    request construct function.
3404  *
3405  * SCI_SUCCESS on successfull completion, or specific failure code.
3406  */
3407 static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3408 {
3409         struct sas_task *task = isci_request_access_task(ireq);
3410         struct device *dev = &ireq->isci_host->pdev->dev;
3411         struct scic_sds_request *sci_req = &ireq->sci;
3412         enum sci_status status = SCI_FAILURE;
3413
3414         status = scic_io_request_construct_smp(dev, sci_req, task);
3415         if (status != SCI_SUCCESS)
3416                 dev_warn(&ireq->isci_host->pdev->dev,
3417                          "%s: failed with status = %d\n",
3418                          __func__,
3419                          status);
3420
3421         return status;
3422 }
3423
3424 /**
3425  * isci_io_request_build() - This function builds the io request object.
3426  * @isci_host: This parameter specifies the ISCI host object
3427  * @request: This parameter points to the isci_request object allocated in the
3428  *    request construct function.
3429  * @sci_device: This parameter is the handle for the sci core's remote device
3430  *    object that is the destination for this request.
3431  *
3432  * SCI_SUCCESS on successfull completion, or specific failure code.
3433  */
3434 static enum sci_status isci_io_request_build(
3435         struct isci_host *isci_host,
3436         struct isci_request *request,
3437         struct isci_remote_device *isci_device)
3438 {
3439         enum sci_status status = SCI_SUCCESS;
3440         struct sas_task *task = isci_request_access_task(request);
3441         struct scic_sds_remote_device *sci_device = &isci_device->sci;
3442
3443         dev_dbg(&isci_host->pdev->dev,
3444                 "%s: isci_device = 0x%p; request = %p, "
3445                 "num_scatter = %d\n",
3446                 __func__,
3447                 isci_device,
3448                 request,
3449                 task->num_scatter);
3450
3451         /* map the sgl addresses, if present.
3452          * libata does the mapping for sata devices
3453          * before we get the request.
3454          */
3455         if (task->num_scatter &&
3456             !sas_protocol_ata(task->task_proto) &&
3457             !(SAS_PROTOCOL_SMP & task->task_proto)) {
3458
3459                 request->num_sg_entries = dma_map_sg(
3460                         &isci_host->pdev->dev,
3461                         task->scatter,
3462                         task->num_scatter,
3463                         task->data_dir
3464                         );
3465
3466                 if (request->num_sg_entries == 0)
3467                         return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3468         }
3469
3470         /* build the common request object. For now,
3471          * we will let the core allocate the IO tag.
3472          */
3473         status = scic_io_request_construct(&isci_host->sci, sci_device,
3474                                            SCI_CONTROLLER_INVALID_IO_TAG,
3475                                            &request->sci);
3476
3477         if (status != SCI_SUCCESS) {
3478                 dev_warn(&isci_host->pdev->dev,
3479                          "%s: failed request construct\n",
3480                          __func__);
3481                 return SCI_FAILURE;
3482         }
3483
3484         switch (task->task_proto) {
3485         case SAS_PROTOCOL_SMP:
3486                 status = isci_smp_request_build(request);
3487                 break;
3488         case SAS_PROTOCOL_SSP:
3489                 status = isci_request_ssp_request_construct(request);
3490                 break;
3491         case SAS_PROTOCOL_SATA:
3492         case SAS_PROTOCOL_STP:
3493         case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3494                 status = isci_request_stp_request_construct(request);
3495                 break;
3496         default:
3497                 dev_warn(&isci_host->pdev->dev,
3498                          "%s: unknown protocol\n", __func__);
3499                 return SCI_FAILURE;
3500         }
3501
3502         return SCI_SUCCESS;
3503 }
3504
3505 static struct isci_request *isci_request_alloc_core(struct isci_host *ihost,
3506                                                     gfp_t gfp_flags)
3507 {
3508         dma_addr_t handle;
3509         struct isci_request *ireq;
3510
3511         ireq = dma_pool_alloc(ihost->dma_pool, gfp_flags, &handle);
3512         if (!ireq) {
3513                 dev_warn(&ihost->pdev->dev,
3514                          "%s: dma_pool_alloc returned NULL\n", __func__);
3515                 return NULL;
3516         }
3517
3518         /* initialize the request object.       */
3519         spin_lock_init(&ireq->state_lock);
3520         ireq->request_daddr = handle;
3521         ireq->isci_host = ihost;
3522         ireq->io_request_completion = NULL;
3523         ireq->terminated = false;
3524
3525         ireq->num_sg_entries = 0;
3526
3527         ireq->complete_in_target = false;
3528
3529         INIT_LIST_HEAD(&ireq->completed_node);
3530         INIT_LIST_HEAD(&ireq->dev_node);
3531
3532         isci_request_change_state(ireq, allocated);
3533
3534         return ireq;
3535 }
3536
3537 static struct isci_request *isci_request_alloc_io(struct isci_host *ihost,
3538                                                   struct sas_task *task,
3539                                                   gfp_t gfp_flags)
3540 {
3541         struct isci_request *ireq;
3542
3543         ireq = isci_request_alloc_core(ihost, gfp_flags);
3544         if (ireq) {
3545                 ireq->ttype_ptr.io_task_ptr = task;
3546                 ireq->ttype = io_task;
3547                 task->lldd_task = ireq;
3548         }
3549         return ireq;
3550 }
3551
3552 struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost,
3553                                             struct isci_tmf *isci_tmf,
3554                                             gfp_t gfp_flags)
3555 {
3556         struct isci_request *ireq;
3557
3558         ireq = isci_request_alloc_core(ihost, gfp_flags);
3559         if (ireq) {
3560                 ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
3561                 ireq->ttype = tmf_task;
3562         }
3563         return ireq;
3564 }
3565
3566 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
3567                          struct sas_task *task, gfp_t gfp_flags)
3568 {
3569         enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3570         struct isci_request *ireq;
3571         unsigned long flags;
3572         int ret = 0;
3573
3574         /* do common allocation and init of request object. */
3575         ireq = isci_request_alloc_io(ihost, task, gfp_flags);
3576         if (!ireq)
3577                 goto out;
3578
3579         status = isci_io_request_build(ihost, ireq, idev);
3580         if (status != SCI_SUCCESS) {
3581                 dev_warn(&ihost->pdev->dev,
3582                          "%s: request_construct failed - status = 0x%x\n",
3583                          __func__,
3584                          status);
3585                 goto out;
3586         }
3587
3588         spin_lock_irqsave(&ihost->scic_lock, flags);
3589
3590         if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
3591
3592                 if (isci_task_is_ncq_recovery(task)) {
3593
3594                         /* The device is in an NCQ recovery state.  Issue the
3595                          * request on the task side.  Note that it will
3596                          * complete on the I/O request side because the
3597                          * request was built that way (ie.
3598                          * ireq->is_task_management_request is false).
3599                          */
3600                         status = scic_controller_start_task(&ihost->sci,
3601                                                             &idev->sci,
3602                                                             &ireq->sci,
3603                                                             SCI_CONTROLLER_INVALID_IO_TAG);
3604                 } else {
3605                         status = SCI_FAILURE;
3606                 }
3607         } else {
3608
3609                 /* send the request, let the core assign the IO TAG.    */
3610                 status = scic_controller_start_io(&ihost->sci, &idev->sci,
3611                                                   &ireq->sci,
3612                                                   SCI_CONTROLLER_INVALID_IO_TAG);
3613         }
3614         if (status != SCI_SUCCESS &&
3615             status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3616                 dev_warn(&ihost->pdev->dev,
3617                          "%s: failed request start (0x%x)\n",
3618                          __func__, status);
3619                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3620                 goto out;
3621         }
3622
3623         /* Either I/O started OK, or the core has signaled that
3624          * the device needs a target reset.
3625          *
3626          * In either case, hold onto the I/O for later.
3627          *
3628          * Update it's status and add it to the list in the
3629          * remote device object.
3630          */
3631         list_add(&ireq->dev_node, &idev->reqs_in_process);
3632
3633         if (status == SCI_SUCCESS) {
3634                 /* Save the tag for possible task mgmt later. */
3635                 ireq->io_tag = ireq->sci.io_tag;
3636                 isci_request_change_state(ireq, started);
3637         } else {
3638                 /* The request did not really start in the
3639                  * hardware, so clear the request handle
3640                  * here so no terminations will be done.
3641                  */
3642                 ireq->terminated = true;
3643                 isci_request_change_state(ireq, completed);
3644         }
3645         spin_unlock_irqrestore(&ihost->scic_lock, flags);
3646
3647         if (status ==
3648             SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3649                 /* Signal libsas that we need the SCSI error
3650                 * handler thread to work on this I/O and that
3651                 * we want a device reset.
3652                 */
3653                 spin_lock_irqsave(&task->task_state_lock, flags);
3654                 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3655                 spin_unlock_irqrestore(&task->task_state_lock, flags);
3656
3657                 /* Cause this task to be scheduled in the SCSI error
3658                 * handler thread.
3659                 */
3660                 isci_execpath_callback(ihost, task,
3661                                        sas_task_abort);
3662
3663                 /* Change the status, since we are holding
3664                 * the I/O until it is managed by the SCSI
3665                 * error handler.
3666                 */
3667                 status = SCI_SUCCESS;
3668         }
3669
3670  out:
3671         if (status != SCI_SUCCESS) {
3672                 /* release dma memory on failure. */
3673                 isci_request_free(ihost, ireq);
3674                 ireq = NULL;
3675                 ret = SCI_FAILURE;
3676         }
3677
3678         return ret;
3679 }