Merge branch 'next' into for-linus-3.0
[pandora-kernel.git] / drivers / scsi / isci / request.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55
56 #include "isci.h"
57 #include "task.h"
58 #include "request.h"
59 #include "scu_completion_codes.h"
60 #include "scu_event_codes.h"
61 #include "sas.h"
62
63 static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
64                                                         int idx)
65 {
66         if (idx == 0)
67                 return &ireq->tc->sgl_pair_ab;
68         else if (idx == 1)
69                 return &ireq->tc->sgl_pair_cd;
70         else if (idx < 0)
71                 return NULL;
72         else
73                 return &ireq->sg_table[idx - 2];
74 }
75
76 static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
77                                           struct isci_request *ireq, u32 idx)
78 {
79         u32 offset;
80
81         if (idx == 0) {
82                 offset = (void *) &ireq->tc->sgl_pair_ab -
83                          (void *) &ihost->task_context_table[0];
84                 return ihost->task_context_dma + offset;
85         } else if (idx == 1) {
86                 offset = (void *) &ireq->tc->sgl_pair_cd -
87                          (void *) &ihost->task_context_table[0];
88                 return ihost->task_context_dma + offset;
89         }
90
91         return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
92 }
93
94 static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
95 {
96         e->length = sg_dma_len(sg);
97         e->address_upper = upper_32_bits(sg_dma_address(sg));
98         e->address_lower = lower_32_bits(sg_dma_address(sg));
99         e->address_modifier = 0;
100 }
101
102 static void sci_request_build_sgl(struct isci_request *ireq)
103 {
104         struct isci_host *ihost = ireq->isci_host;
105         struct sas_task *task = isci_request_access_task(ireq);
106         struct scatterlist *sg = NULL;
107         dma_addr_t dma_addr;
108         u32 sg_idx = 0;
109         struct scu_sgl_element_pair *scu_sg   = NULL;
110         struct scu_sgl_element_pair *prev_sg  = NULL;
111
112         if (task->num_scatter > 0) {
113                 sg = task->scatter;
114
115                 while (sg) {
116                         scu_sg = to_sgl_element_pair(ireq, sg_idx);
117                         init_sgl_element(&scu_sg->A, sg);
118                         sg = sg_next(sg);
119                         if (sg) {
120                                 init_sgl_element(&scu_sg->B, sg);
121                                 sg = sg_next(sg);
122                         } else
123                                 memset(&scu_sg->B, 0, sizeof(scu_sg->B));
124
125                         if (prev_sg) {
126                                 dma_addr = to_sgl_element_pair_dma(ihost,
127                                                                    ireq,
128                                                                    sg_idx);
129
130                                 prev_sg->next_pair_upper =
131                                         upper_32_bits(dma_addr);
132                                 prev_sg->next_pair_lower =
133                                         lower_32_bits(dma_addr);
134                         }
135
136                         prev_sg = scu_sg;
137                         sg_idx++;
138                 }
139         } else {        /* handle when no sg */
140                 scu_sg = to_sgl_element_pair(ireq, sg_idx);
141
142                 dma_addr = dma_map_single(&ihost->pdev->dev,
143                                           task->scatter,
144                                           task->total_xfer_len,
145                                           task->data_dir);
146
147                 ireq->zero_scatter_daddr = dma_addr;
148
149                 scu_sg->A.length = task->total_xfer_len;
150                 scu_sg->A.address_upper = upper_32_bits(dma_addr);
151                 scu_sg->A.address_lower = lower_32_bits(dma_addr);
152         }
153
154         if (scu_sg) {
155                 scu_sg->next_pair_upper = 0;
156                 scu_sg->next_pair_lower = 0;
157         }
158 }
159
160 static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
161 {
162         struct ssp_cmd_iu *cmd_iu;
163         struct sas_task *task = isci_request_access_task(ireq);
164
165         cmd_iu = &ireq->ssp.cmd;
166
167         memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
168         cmd_iu->add_cdb_len = 0;
169         cmd_iu->_r_a = 0;
170         cmd_iu->_r_b = 0;
171         cmd_iu->en_fburst = 0; /* unsupported */
172         cmd_iu->task_prio = task->ssp_task.task_prio;
173         cmd_iu->task_attr = task->ssp_task.task_attr;
174         cmd_iu->_r_c = 0;
175
176         sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
177                        sizeof(task->ssp_task.cdb) / sizeof(u32));
178 }
179
180 static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
181 {
182         struct ssp_task_iu *task_iu;
183         struct sas_task *task = isci_request_access_task(ireq);
184         struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
185
186         task_iu = &ireq->ssp.tmf;
187
188         memset(task_iu, 0, sizeof(struct ssp_task_iu));
189
190         memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
191
192         task_iu->task_func = isci_tmf->tmf_code;
193         task_iu->task_tag =
194                 (ireq->ttype == tmf_task) ?
195                 isci_tmf->io_tag :
196                 SCI_CONTROLLER_INVALID_IO_TAG;
197 }
198
199 /**
200  * This method is will fill in the SCU Task Context for any type of SSP request.
201  * @sci_req:
202  * @task_context:
203  *
204  */
205 static void scu_ssp_reqeust_construct_task_context(
206         struct isci_request *ireq,
207         struct scu_task_context *task_context)
208 {
209         dma_addr_t dma_addr;
210         struct isci_remote_device *idev;
211         struct isci_port *iport;
212
213         idev = ireq->target_device;
214         iport = idev->owning_port;
215
216         /* Fill in the TC with the its required data */
217         task_context->abort = 0;
218         task_context->priority = 0;
219         task_context->initiator_request = 1;
220         task_context->connection_rate = idev->connection_rate;
221         task_context->protocol_engine_index = ISCI_PEG;
222         task_context->logical_port_index = iport->physical_port_index;
223         task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
224         task_context->valid = SCU_TASK_CONTEXT_VALID;
225         task_context->context_type = SCU_TASK_CONTEXT_TYPE;
226
227         task_context->remote_node_index = idev->rnc.remote_node_index;
228         task_context->command_code = 0;
229
230         task_context->link_layer_control = 0;
231         task_context->do_not_dma_ssp_good_response = 1;
232         task_context->strict_ordering = 0;
233         task_context->control_frame = 0;
234         task_context->timeout_enable = 0;
235         task_context->block_guard_enable = 0;
236
237         task_context->address_modifier = 0;
238
239         /* task_context->type.ssp.tag = ireq->io_tag; */
240         task_context->task_phase = 0x01;
241
242         ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
243                               (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
244                               (iport->physical_port_index <<
245                                SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
246                               ISCI_TAG_TCI(ireq->io_tag));
247
248         /*
249          * Copy the physical address for the command buffer to the
250          * SCU Task Context
251          */
252         dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
253
254         task_context->command_iu_upper = upper_32_bits(dma_addr);
255         task_context->command_iu_lower = lower_32_bits(dma_addr);
256
257         /*
258          * Copy the physical address for the response buffer to the
259          * SCU Task Context
260          */
261         dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
262
263         task_context->response_iu_upper = upper_32_bits(dma_addr);
264         task_context->response_iu_lower = lower_32_bits(dma_addr);
265 }
266
267 /**
268  * This method is will fill in the SCU Task Context for a SSP IO request.
269  * @sci_req:
270  *
271  */
272 static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
273                                                       enum dma_data_direction dir,
274                                                       u32 len)
275 {
276         struct scu_task_context *task_context = ireq->tc;
277
278         scu_ssp_reqeust_construct_task_context(ireq, task_context);
279
280         task_context->ssp_command_iu_length =
281                 sizeof(struct ssp_cmd_iu) / sizeof(u32);
282         task_context->type.ssp.frame_type = SSP_COMMAND;
283
284         switch (dir) {
285         case DMA_FROM_DEVICE:
286         case DMA_NONE:
287         default:
288                 task_context->task_type = SCU_TASK_TYPE_IOREAD;
289                 break;
290         case DMA_TO_DEVICE:
291                 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
292                 break;
293         }
294
295         task_context->transfer_length_bytes = len;
296
297         if (task_context->transfer_length_bytes > 0)
298                 sci_request_build_sgl(ireq);
299 }
300
301 /**
302  * This method will fill in the SCU Task Context for a SSP Task request.  The
303  *    following important settings are utilized: -# priority ==
304  *    SCU_TASK_PRIORITY_HIGH.  This ensures that the task request is issued
305  *    ahead of other task destined for the same Remote Node. -# task_type ==
306  *    SCU_TASK_TYPE_IOREAD.  This simply indicates that a normal request type
307  *    (i.e. non-raw frame) is being utilized to perform task management. -#
308  *    control_frame == 1.  This ensures that the proper endianess is set so
309  *    that the bytes are transmitted in the right order for a task frame.
310  * @sci_req: This parameter specifies the task request object being
311  *    constructed.
312  *
313  */
314 static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
315 {
316         struct scu_task_context *task_context = ireq->tc;
317
318         scu_ssp_reqeust_construct_task_context(ireq, task_context);
319
320         task_context->control_frame                = 1;
321         task_context->priority                     = SCU_TASK_PRIORITY_HIGH;
322         task_context->task_type                    = SCU_TASK_TYPE_RAW_FRAME;
323         task_context->transfer_length_bytes        = 0;
324         task_context->type.ssp.frame_type          = SSP_TASK;
325         task_context->ssp_command_iu_length =
326                 sizeof(struct ssp_task_iu) / sizeof(u32);
327 }
328
329 /**
330  * This method is will fill in the SCU Task Context for any type of SATA
331  *    request.  This is called from the various SATA constructors.
332  * @sci_req: The general IO request object which is to be used in
333  *    constructing the SCU task context.
334  * @task_context: The buffer pointer for the SCU task context which is being
335  *    constructed.
336  *
337  * The general io request construction is complete. The buffer assignment for
338  * the command buffer is complete. none Revisit task context construction to
339  * determine what is common for SSP/SMP/STP task context structures.
340  */
341 static void scu_sata_reqeust_construct_task_context(
342         struct isci_request *ireq,
343         struct scu_task_context *task_context)
344 {
345         dma_addr_t dma_addr;
346         struct isci_remote_device *idev;
347         struct isci_port *iport;
348
349         idev = ireq->target_device;
350         iport = idev->owning_port;
351
352         /* Fill in the TC with the its required data */
353         task_context->abort = 0;
354         task_context->priority = SCU_TASK_PRIORITY_NORMAL;
355         task_context->initiator_request = 1;
356         task_context->connection_rate = idev->connection_rate;
357         task_context->protocol_engine_index = ISCI_PEG;
358         task_context->logical_port_index = iport->physical_port_index;
359         task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
360         task_context->valid = SCU_TASK_CONTEXT_VALID;
361         task_context->context_type = SCU_TASK_CONTEXT_TYPE;
362
363         task_context->remote_node_index = idev->rnc.remote_node_index;
364         task_context->command_code = 0;
365
366         task_context->link_layer_control = 0;
367         task_context->do_not_dma_ssp_good_response = 1;
368         task_context->strict_ordering = 0;
369         task_context->control_frame = 0;
370         task_context->timeout_enable = 0;
371         task_context->block_guard_enable = 0;
372
373         task_context->address_modifier = 0;
374         task_context->task_phase = 0x01;
375
376         task_context->ssp_command_iu_length =
377                 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
378
379         /* Set the first word of the H2D REG FIS */
380         task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
381
382         ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
383                               (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
384                               (iport->physical_port_index <<
385                                SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
386                               ISCI_TAG_TCI(ireq->io_tag));
387         /*
388          * Copy the physical address for the command buffer to the SCU Task
389          * Context. We must offset the command buffer by 4 bytes because the
390          * first 4 bytes are transfered in the body of the TC.
391          */
392         dma_addr = sci_io_request_get_dma_addr(ireq,
393                                                 ((char *) &ireq->stp.cmd) +
394                                                 sizeof(u32));
395
396         task_context->command_iu_upper = upper_32_bits(dma_addr);
397         task_context->command_iu_lower = lower_32_bits(dma_addr);
398
399         /* SATA Requests do not have a response buffer */
400         task_context->response_iu_upper = 0;
401         task_context->response_iu_lower = 0;
402 }
403
404 static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
405 {
406         struct scu_task_context *task_context = ireq->tc;
407
408         scu_sata_reqeust_construct_task_context(ireq, task_context);
409
410         task_context->control_frame         = 0;
411         task_context->priority              = SCU_TASK_PRIORITY_NORMAL;
412         task_context->task_type             = SCU_TASK_TYPE_SATA_RAW_FRAME;
413         task_context->type.stp.fis_type     = FIS_REGH2D;
414         task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
415 }
416
417 static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
418                                                           bool copy_rx_frame)
419 {
420         struct isci_stp_request *stp_req = &ireq->stp.req;
421
422         scu_stp_raw_request_construct_task_context(ireq);
423
424         stp_req->status = 0;
425         stp_req->sgl.offset = 0;
426         stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
427
428         if (copy_rx_frame) {
429                 sci_request_build_sgl(ireq);
430                 stp_req->sgl.index = 0;
431         } else {
432                 /* The user does not want the data copied to the SGL buffer location */
433                 stp_req->sgl.index = -1;
434         }
435
436         return SCI_SUCCESS;
437 }
438
439 /**
440  *
441  * @sci_req: This parameter specifies the request to be constructed as an
442  *    optimized request.
443  * @optimized_task_type: This parameter specifies whether the request is to be
444  *    an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
445  *    value of 1 indicates NCQ.
446  *
447  * This method will perform request construction common to all types of STP
448  * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
449  * returns an indication as to whether the construction was successful.
450  */
451 static void sci_stp_optimized_request_construct(struct isci_request *ireq,
452                                                      u8 optimized_task_type,
453                                                      u32 len,
454                                                      enum dma_data_direction dir)
455 {
456         struct scu_task_context *task_context = ireq->tc;
457
458         /* Build the STP task context structure */
459         scu_sata_reqeust_construct_task_context(ireq, task_context);
460
461         /* Copy over the SGL elements */
462         sci_request_build_sgl(ireq);
463
464         /* Copy over the number of bytes to be transfered */
465         task_context->transfer_length_bytes = len;
466
467         if (dir == DMA_TO_DEVICE) {
468                 /*
469                  * The difference between the DMA IN and DMA OUT request task type
470                  * values are consistent with the difference between FPDMA READ
471                  * and FPDMA WRITE values.  Add the supplied task type parameter
472                  * to this difference to set the task type properly for this
473                  * DATA OUT (WRITE) case. */
474                 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
475                                                                  - SCU_TASK_TYPE_DMA_IN);
476         } else {
477                 /*
478                  * For the DATA IN (READ) case, simply save the supplied
479                  * optimized task type. */
480                 task_context->task_type = optimized_task_type;
481         }
482 }
483
484
485
486 static enum sci_status
487 sci_io_request_construct_sata(struct isci_request *ireq,
488                                u32 len,
489                                enum dma_data_direction dir,
490                                bool copy)
491 {
492         enum sci_status status = SCI_SUCCESS;
493         struct sas_task *task = isci_request_access_task(ireq);
494
495         /* check for management protocols */
496         if (ireq->ttype == tmf_task) {
497                 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
498
499                 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
500                     tmf->tmf_code == isci_tmf_sata_srst_low) {
501                         scu_stp_raw_request_construct_task_context(ireq);
502                         return SCI_SUCCESS;
503                 } else {
504                         dev_err(&ireq->owning_controller->pdev->dev,
505                                 "%s: Request 0x%p received un-handled SAT "
506                                 "management protocol 0x%x.\n",
507                                 __func__, ireq, tmf->tmf_code);
508
509                         return SCI_FAILURE;
510                 }
511         }
512
513         if (!sas_protocol_ata(task->task_proto)) {
514                 dev_err(&ireq->owning_controller->pdev->dev,
515                         "%s: Non-ATA protocol in SATA path: 0x%x\n",
516                         __func__,
517                         task->task_proto);
518                 return SCI_FAILURE;
519
520         }
521
522         /* non data */
523         if (task->data_dir == DMA_NONE) {
524                 scu_stp_raw_request_construct_task_context(ireq);
525                 return SCI_SUCCESS;
526         }
527
528         /* NCQ */
529         if (task->ata_task.use_ncq) {
530                 sci_stp_optimized_request_construct(ireq,
531                                                          SCU_TASK_TYPE_FPDMAQ_READ,
532                                                          len, dir);
533                 return SCI_SUCCESS;
534         }
535
536         /* DMA */
537         if (task->ata_task.dma_xfer) {
538                 sci_stp_optimized_request_construct(ireq,
539                                                          SCU_TASK_TYPE_DMA_IN,
540                                                          len, dir);
541                 return SCI_SUCCESS;
542         } else /* PIO */
543                 return sci_stp_pio_request_construct(ireq, copy);
544
545         return status;
546 }
547
548 static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
549 {
550         struct sas_task *task = isci_request_access_task(ireq);
551
552         ireq->protocol = SCIC_SSP_PROTOCOL;
553
554         scu_ssp_io_request_construct_task_context(ireq,
555                                                   task->data_dir,
556                                                   task->total_xfer_len);
557
558         sci_io_request_build_ssp_command_iu(ireq);
559
560         sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
561
562         return SCI_SUCCESS;
563 }
564
565 enum sci_status sci_task_request_construct_ssp(
566         struct isci_request *ireq)
567 {
568         /* Construct the SSP Task SCU Task Context */
569         scu_ssp_task_request_construct_task_context(ireq);
570
571         /* Fill in the SSP Task IU */
572         sci_task_request_build_ssp_task_iu(ireq);
573
574         sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
575
576         return SCI_SUCCESS;
577 }
578
579 static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
580 {
581         enum sci_status status;
582         bool copy = false;
583         struct sas_task *task = isci_request_access_task(ireq);
584
585         ireq->protocol = SCIC_STP_PROTOCOL;
586
587         copy = (task->data_dir == DMA_NONE) ? false : true;
588
589         status = sci_io_request_construct_sata(ireq,
590                                                 task->total_xfer_len,
591                                                 task->data_dir,
592                                                 copy);
593
594         if (status == SCI_SUCCESS)
595                 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
596
597         return status;
598 }
599
600 enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
601 {
602         enum sci_status status = SCI_SUCCESS;
603
604         /* check for management protocols */
605         if (ireq->ttype == tmf_task) {
606                 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
607
608                 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
609                     tmf->tmf_code == isci_tmf_sata_srst_low) {
610                         scu_stp_raw_request_construct_task_context(ireq);
611                 } else {
612                         dev_err(&ireq->owning_controller->pdev->dev,
613                                 "%s: Request 0x%p received un-handled SAT "
614                                 "Protocol 0x%x.\n",
615                                 __func__, ireq, tmf->tmf_code);
616
617                         return SCI_FAILURE;
618                 }
619         }
620
621         if (status != SCI_SUCCESS)
622                 return status;
623         sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
624
625         return status;
626 }
627
628 /**
629  * sci_req_tx_bytes - bytes transferred when reply underruns request
630  * @sci_req: request that was terminated early
631  */
632 #define SCU_TASK_CONTEXT_SRAM 0x200000
633 static u32 sci_req_tx_bytes(struct isci_request *ireq)
634 {
635         struct isci_host *ihost = ireq->owning_controller;
636         u32 ret_val = 0;
637
638         if (readl(&ihost->smu_registers->address_modifier) == 0) {
639                 void __iomem *scu_reg_base = ihost->scu_registers;
640
641                 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
642                  *   BAR1 is the scu_registers
643                  *   0x20002C = 0x200000 + 0x2c
644                  *            = start of task context SRAM + offset of (type.ssp.data_offset)
645                  *   TCi is the io_tag of struct sci_request
646                  */
647                 ret_val = readl(scu_reg_base +
648                                 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
649                                 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
650         }
651
652         return ret_val;
653 }
654
655 enum sci_status sci_request_start(struct isci_request *ireq)
656 {
657         enum sci_base_request_states state;
658         struct scu_task_context *tc = ireq->tc;
659         struct isci_host *ihost = ireq->owning_controller;
660
661         state = ireq->sm.current_state_id;
662         if (state != SCI_REQ_CONSTRUCTED) {
663                 dev_warn(&ihost->pdev->dev,
664                         "%s: SCIC IO Request requested to start while in wrong "
665                          "state %d\n", __func__, state);
666                 return SCI_FAILURE_INVALID_STATE;
667         }
668
669         tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
670
671         switch (tc->protocol_type) {
672         case SCU_TASK_CONTEXT_PROTOCOL_SMP:
673         case SCU_TASK_CONTEXT_PROTOCOL_SSP:
674                 /* SSP/SMP Frame */
675                 tc->type.ssp.tag = ireq->io_tag;
676                 tc->type.ssp.target_port_transfer_tag = 0xFFFF;
677                 break;
678
679         case SCU_TASK_CONTEXT_PROTOCOL_STP:
680                 /* STP/SATA Frame
681                  * tc->type.stp.ncq_tag = ireq->ncq_tag;
682                  */
683                 break;
684
685         case SCU_TASK_CONTEXT_PROTOCOL_NONE:
686                 /* / @todo When do we set no protocol type? */
687                 break;
688
689         default:
690                 /* This should never happen since we build the IO
691                  * requests */
692                 break;
693         }
694
695         /* Add to the post_context the io tag value */
696         ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
697
698         /* Everything is good go ahead and change state */
699         sci_change_state(&ireq->sm, SCI_REQ_STARTED);
700
701         return SCI_SUCCESS;
702 }
703
704 enum sci_status
705 sci_io_request_terminate(struct isci_request *ireq)
706 {
707         enum sci_base_request_states state;
708
709         state = ireq->sm.current_state_id;
710
711         switch (state) {
712         case SCI_REQ_CONSTRUCTED:
713                 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
714                 ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
715                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
716                 return SCI_SUCCESS;
717         case SCI_REQ_STARTED:
718         case SCI_REQ_TASK_WAIT_TC_COMP:
719         case SCI_REQ_SMP_WAIT_RESP:
720         case SCI_REQ_SMP_WAIT_TC_COMP:
721         case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
722         case SCI_REQ_STP_UDMA_WAIT_D2H:
723         case SCI_REQ_STP_NON_DATA_WAIT_H2D:
724         case SCI_REQ_STP_NON_DATA_WAIT_D2H:
725         case SCI_REQ_STP_PIO_WAIT_H2D:
726         case SCI_REQ_STP_PIO_WAIT_FRAME:
727         case SCI_REQ_STP_PIO_DATA_IN:
728         case SCI_REQ_STP_PIO_DATA_OUT:
729         case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
730         case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
731         case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
732                 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
733                 return SCI_SUCCESS;
734         case SCI_REQ_TASK_WAIT_TC_RESP:
735                 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
736                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
737                 return SCI_SUCCESS;
738         case SCI_REQ_ABORTING:
739                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
740                 return SCI_SUCCESS;
741         case SCI_REQ_COMPLETED:
742         default:
743                 dev_warn(&ireq->owning_controller->pdev->dev,
744                          "%s: SCIC IO Request requested to abort while in wrong "
745                          "state %d\n",
746                          __func__,
747                          ireq->sm.current_state_id);
748                 break;
749         }
750
751         return SCI_FAILURE_INVALID_STATE;
752 }
753
754 enum sci_status sci_request_complete(struct isci_request *ireq)
755 {
756         enum sci_base_request_states state;
757         struct isci_host *ihost = ireq->owning_controller;
758
759         state = ireq->sm.current_state_id;
760         if (WARN_ONCE(state != SCI_REQ_COMPLETED,
761                       "isci: request completion from wrong state (%d)\n", state))
762                 return SCI_FAILURE_INVALID_STATE;
763
764         if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
765                 sci_controller_release_frame(ihost,
766                                                   ireq->saved_rx_frame_index);
767
768         /* XXX can we just stop the machine and remove the 'final' state? */
769         sci_change_state(&ireq->sm, SCI_REQ_FINAL);
770         return SCI_SUCCESS;
771 }
772
773 enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
774                                                   u32 event_code)
775 {
776         enum sci_base_request_states state;
777         struct isci_host *ihost = ireq->owning_controller;
778
779         state = ireq->sm.current_state_id;
780
781         if (state != SCI_REQ_STP_PIO_DATA_IN) {
782                 dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n",
783                          __func__, event_code, state);
784
785                 return SCI_FAILURE_INVALID_STATE;
786         }
787
788         switch (scu_get_event_specifier(event_code)) {
789         case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
790                 /* We are waiting for data and the SCU has R_ERR the data frame.
791                  * Go back to waiting for the D2H Register FIS
792                  */
793                 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
794                 return SCI_SUCCESS;
795         default:
796                 dev_err(&ihost->pdev->dev,
797                         "%s: pio request unexpected event %#x\n",
798                         __func__, event_code);
799
800                 /* TODO Should we fail the PIO request when we get an
801                  * unexpected event?
802                  */
803                 return SCI_FAILURE;
804         }
805 }
806
807 /*
808  * This function copies response data for requests returning response data
809  *    instead of sense data.
810  * @sci_req: This parameter specifies the request object for which to copy
811  *    the response data.
812  */
813 static void sci_io_request_copy_response(struct isci_request *ireq)
814 {
815         void *resp_buf;
816         u32 len;
817         struct ssp_response_iu *ssp_response;
818         struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
819
820         ssp_response = &ireq->ssp.rsp;
821
822         resp_buf = &isci_tmf->resp.resp_iu;
823
824         len = min_t(u32,
825                     SSP_RESP_IU_MAX_SIZE,
826                     be32_to_cpu(ssp_response->response_data_len));
827
828         memcpy(resp_buf, ssp_response->resp_data, len);
829 }
830
831 static enum sci_status
832 request_started_state_tc_event(struct isci_request *ireq,
833                                u32 completion_code)
834 {
835         struct ssp_response_iu *resp_iu;
836         u8 datapres;
837
838         /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
839          * to determine SDMA status
840          */
841         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
842         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
843                 ireq->scu_status = SCU_TASK_DONE_GOOD;
844                 ireq->sci_status = SCI_SUCCESS;
845                 break;
846         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
847                 /* There are times when the SCU hardware will return an early
848                  * response because the io request specified more data than is
849                  * returned by the target device (mode pages, inquiry data,
850                  * etc.).  We must check the response stats to see if this is
851                  * truly a failed request or a good request that just got
852                  * completed early.
853                  */
854                 struct ssp_response_iu *resp = &ireq->ssp.rsp;
855                 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
856
857                 sci_swab32_cpy(&ireq->ssp.rsp,
858                                &ireq->ssp.rsp,
859                                word_cnt);
860
861                 if (resp->status == 0) {
862                         ireq->scu_status = SCU_TASK_DONE_GOOD;
863                         ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
864                 } else {
865                         ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
866                         ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
867                 }
868                 break;
869         }
870         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
871                 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
872
873                 sci_swab32_cpy(&ireq->ssp.rsp,
874                                &ireq->ssp.rsp,
875                                word_cnt);
876
877                 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
878                 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
879                 break;
880         }
881
882         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
883                 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
884                  * guaranteed to be received before this completion status is
885                  * posted?
886                  */
887                 resp_iu = &ireq->ssp.rsp;
888                 datapres = resp_iu->datapres;
889
890                 if (datapres == 1 || datapres == 2) {
891                         ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
892                         ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
893                 } else {
894                         ireq->scu_status = SCU_TASK_DONE_GOOD;
895                         ireq->sci_status = SCI_SUCCESS;
896                 }
897                 break;
898         /* only stp device gets suspended. */
899         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
900         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
901         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
902         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
903         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
904         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
905         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
906         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
907         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
908         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
909         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
910                 if (ireq->protocol == SCIC_STP_PROTOCOL) {
911                         ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
912                                            SCU_COMPLETION_TL_STATUS_SHIFT;
913                         ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
914                 } else {
915                         ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
916                                            SCU_COMPLETION_TL_STATUS_SHIFT;
917                         ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
918                 }
919                 break;
920
921         /* both stp/ssp device gets suspended */
922         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
923         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
924         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
925         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
926         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
927         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
928         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
929         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
930         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
931         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
932                 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
933                                    SCU_COMPLETION_TL_STATUS_SHIFT;
934                 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
935                 break;
936
937         /* neither ssp nor stp gets suspended. */
938         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
939         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
940         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
941         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
942         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
943         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
944         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
945         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
946         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
947         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
948         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
949         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
950         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
951         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
952         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
953         default:
954                 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
955                                    SCU_COMPLETION_TL_STATUS_SHIFT;
956                 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
957                 break;
958         }
959
960         /*
961          * TODO: This is probably wrong for ACK/NAK timeout conditions
962          */
963
964         /* In all cases we will treat this as the completion of the IO req. */
965         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
966         return SCI_SUCCESS;
967 }
968
969 static enum sci_status
970 request_aborting_state_tc_event(struct isci_request *ireq,
971                                 u32 completion_code)
972 {
973         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
974         case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
975         case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
976                 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
977                 ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
978                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
979                 break;
980
981         default:
982                 /* Unless we get some strange error wait for the task abort to complete
983                  * TODO: Should there be a state change for this completion?
984                  */
985                 break;
986         }
987
988         return SCI_SUCCESS;
989 }
990
991 static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
992                                                        u32 completion_code)
993 {
994         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
995         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
996                 ireq->scu_status = SCU_TASK_DONE_GOOD;
997                 ireq->sci_status = SCI_SUCCESS;
998                 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
999                 break;
1000         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1001                 /* Currently, the decision is to simply allow the task request
1002                  * to timeout if the task IU wasn't received successfully.
1003                  * There is a potential for receiving multiple task responses if
1004                  * we decide to send the task IU again.
1005                  */
1006                 dev_warn(&ireq->owning_controller->pdev->dev,
1007                          "%s: TaskRequest:0x%p CompletionCode:%x - "
1008                          "ACK/NAK timeout\n", __func__, ireq,
1009                          completion_code);
1010
1011                 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1012                 break;
1013         default:
1014                 /*
1015                  * All other completion status cause the IO to be complete.
1016                  * If a NAK was received, then it is up to the user to retry
1017                  * the request.
1018                  */
1019                 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1020                 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1021                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1022                 break;
1023         }
1024
1025         return SCI_SUCCESS;
1026 }
1027
1028 static enum sci_status
1029 smp_request_await_response_tc_event(struct isci_request *ireq,
1030                                     u32 completion_code)
1031 {
1032         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1033         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1034                 /* In the AWAIT RESPONSE state, any TC completion is
1035                  * unexpected.  but if the TC has success status, we
1036                  * complete the IO anyway.
1037                  */
1038                 ireq->scu_status = SCU_TASK_DONE_GOOD;
1039                 ireq->sci_status = SCI_SUCCESS;
1040                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1041                 break;
1042         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1043         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1044         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1045         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1046                 /* These status has been seen in a specific LSI
1047                  * expander, which sometimes is not able to send smp
1048                  * response within 2 ms. This causes our hardware break
1049                  * the connection and set TC completion with one of
1050                  * these SMP_XXX_XX_ERR status. For these type of error,
1051                  * we ask ihost user to retry the request.
1052                  */
1053                 ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
1054                 ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
1055                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1056                 break;
1057         default:
1058                 /* All other completion status cause the IO to be complete.  If a NAK
1059                  * was received, then it is up to the user to retry the request
1060                  */
1061                 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1062                 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1063                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1064                 break;
1065         }
1066
1067         return SCI_SUCCESS;
1068 }
1069
1070 static enum sci_status
1071 smp_request_await_tc_event(struct isci_request *ireq,
1072                            u32 completion_code)
1073 {
1074         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1075         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1076                 ireq->scu_status = SCU_TASK_DONE_GOOD;
1077                 ireq->sci_status = SCI_SUCCESS;
1078                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1079                 break;
1080         default:
1081                 /* All other completion status cause the IO to be
1082                  * complete.  If a NAK was received, then it is up to
1083                  * the user to retry the request.
1084                  */
1085                 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1086                 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1087                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1088                 break;
1089         }
1090
1091         return SCI_SUCCESS;
1092 }
1093
1094 static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
1095 {
1096         struct scu_sgl_element *sgl;
1097         struct scu_sgl_element_pair *sgl_pair;
1098         struct isci_request *ireq = to_ireq(stp_req);
1099         struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
1100
1101         sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1102         if (!sgl_pair)
1103                 sgl = NULL;
1104         else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
1105                 if (sgl_pair->B.address_lower == 0 &&
1106                     sgl_pair->B.address_upper == 0) {
1107                         sgl = NULL;
1108                 } else {
1109                         pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
1110                         sgl = &sgl_pair->B;
1111                 }
1112         } else {
1113                 if (sgl_pair->next_pair_lower == 0 &&
1114                     sgl_pair->next_pair_upper == 0) {
1115                         sgl = NULL;
1116                 } else {
1117                         pio_sgl->index++;
1118                         pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
1119                         sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1120                         sgl = &sgl_pair->A;
1121                 }
1122         }
1123
1124         return sgl;
1125 }
1126
1127 static enum sci_status
1128 stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
1129                                         u32 completion_code)
1130 {
1131         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1132         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1133                 ireq->scu_status = SCU_TASK_DONE_GOOD;
1134                 ireq->sci_status = SCI_SUCCESS;
1135                 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1136                 break;
1137
1138         default:
1139                 /* All other completion status cause the IO to be
1140                  * complete.  If a NAK was received, then it is up to
1141                  * the user to retry the request.
1142                  */
1143                 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1144                 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1145                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1146                 break;
1147         }
1148
1149         return SCI_SUCCESS;
1150 }
1151
1152 #define SCU_MAX_FRAME_BUFFER_SIZE  0x400  /* 1K is the maximum SCU frame data payload */
1153
1154 /* transmit DATA_FIS from (current sgl + offset) for input
1155  * parameter length. current sgl and offset is alreay stored in the IO request
1156  */
1157 static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
1158         struct isci_request *ireq,
1159         u32 length)
1160 {
1161         struct isci_stp_request *stp_req = &ireq->stp.req;
1162         struct scu_task_context *task_context = ireq->tc;
1163         struct scu_sgl_element_pair *sgl_pair;
1164         struct scu_sgl_element *current_sgl;
1165
1166         /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1167          * for the data from current_sgl+offset for the input length
1168          */
1169         sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1170         if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
1171                 current_sgl = &sgl_pair->A;
1172         else
1173                 current_sgl = &sgl_pair->B;
1174
1175         /* update the TC */
1176         task_context->command_iu_upper = current_sgl->address_upper;
1177         task_context->command_iu_lower = current_sgl->address_lower;
1178         task_context->transfer_length_bytes = length;
1179         task_context->type.stp.fis_type = FIS_DATA;
1180
1181         /* send the new TC out. */
1182         return sci_controller_continue_io(ireq);
1183 }
1184
1185 static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
1186 {
1187         struct isci_stp_request *stp_req = &ireq->stp.req;
1188         struct scu_sgl_element_pair *sgl_pair;
1189         struct scu_sgl_element *sgl;
1190         enum sci_status status;
1191         u32 offset;
1192         u32 len = 0;
1193
1194         offset = stp_req->sgl.offset;
1195         sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1196         if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
1197                 return SCI_FAILURE;
1198
1199         if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
1200                 sgl = &sgl_pair->A;
1201                 len = sgl_pair->A.length - offset;
1202         } else {
1203                 sgl = &sgl_pair->B;
1204                 len = sgl_pair->B.length - offset;
1205         }
1206
1207         if (stp_req->pio_len == 0)
1208                 return SCI_SUCCESS;
1209
1210         if (stp_req->pio_len >= len) {
1211                 status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
1212                 if (status != SCI_SUCCESS)
1213                         return status;
1214                 stp_req->pio_len -= len;
1215
1216                 /* update the current sgl, offset and save for future */
1217                 sgl = pio_sgl_next(stp_req);
1218                 offset = 0;
1219         } else if (stp_req->pio_len < len) {
1220                 sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
1221
1222                 /* Sgl offset will be adjusted and saved for future */
1223                 offset += stp_req->pio_len;
1224                 sgl->address_lower += stp_req->pio_len;
1225                 stp_req->pio_len = 0;
1226         }
1227
1228         stp_req->sgl.offset = offset;
1229
1230         return status;
1231 }
1232
1233 /**
1234  *
1235  * @stp_request: The request that is used for the SGL processing.
1236  * @data_buffer: The buffer of data to be copied.
1237  * @length: The length of the data transfer.
1238  *
1239  * Copy the data from the buffer for the length specified to the IO reqeust SGL
1240  * specified data region. enum sci_status
1241  */
1242 static enum sci_status
1243 sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
1244                                                   u8 *data_buf, u32 len)
1245 {
1246         struct isci_request *ireq;
1247         u8 *src_addr;
1248         int copy_len;
1249         struct sas_task *task;
1250         struct scatterlist *sg;
1251         void *kaddr;
1252         int total_len = len;
1253
1254         ireq = to_ireq(stp_req);
1255         task = isci_request_access_task(ireq);
1256         src_addr = data_buf;
1257
1258         if (task->num_scatter > 0) {
1259                 sg = task->scatter;
1260
1261                 while (total_len > 0) {
1262                         struct page *page = sg_page(sg);
1263
1264                         copy_len = min_t(int, total_len, sg_dma_len(sg));
1265                         kaddr = kmap_atomic(page, KM_IRQ0);
1266                         memcpy(kaddr + sg->offset, src_addr, copy_len);
1267                         kunmap_atomic(kaddr, KM_IRQ0);
1268                         total_len -= copy_len;
1269                         src_addr += copy_len;
1270                         sg = sg_next(sg);
1271                 }
1272         } else {
1273                 BUG_ON(task->total_xfer_len < total_len);
1274                 memcpy(task->scatter, src_addr, total_len);
1275         }
1276
1277         return SCI_SUCCESS;
1278 }
1279
1280 /**
1281  *
1282  * @sci_req: The PIO DATA IN request that is to receive the data.
1283  * @data_buffer: The buffer to copy from.
1284  *
1285  * Copy the data buffer to the io request data region. enum sci_status
1286  */
1287 static enum sci_status sci_stp_request_pio_data_in_copy_data(
1288         struct isci_stp_request *stp_req,
1289         u8 *data_buffer)
1290 {
1291         enum sci_status status;
1292
1293         /*
1294          * If there is less than 1K remaining in the transfer request
1295          * copy just the data for the transfer */
1296         if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
1297                 status = sci_stp_request_pio_data_in_copy_data_buffer(
1298                         stp_req, data_buffer, stp_req->pio_len);
1299
1300                 if (status == SCI_SUCCESS)
1301                         stp_req->pio_len = 0;
1302         } else {
1303                 /* We are transfering the whole frame so copy */
1304                 status = sci_stp_request_pio_data_in_copy_data_buffer(
1305                         stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1306
1307                 if (status == SCI_SUCCESS)
1308                         stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
1309         }
1310
1311         return status;
1312 }
1313
1314 static enum sci_status
1315 stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
1316                                               u32 completion_code)
1317 {
1318         enum sci_status status = SCI_SUCCESS;
1319
1320         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1321         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1322                 ireq->scu_status = SCU_TASK_DONE_GOOD;
1323                 ireq->sci_status = SCI_SUCCESS;
1324                 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1325                 break;
1326
1327         default:
1328                 /* All other completion status cause the IO to be
1329                  * complete.  If a NAK was received, then it is up to
1330                  * the user to retry the request.
1331                  */
1332                 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1333                 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1334                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1335                 break;
1336         }
1337
1338         return status;
1339 }
1340
1341 static enum sci_status
1342 pio_data_out_tx_done_tc_event(struct isci_request *ireq,
1343                               u32 completion_code)
1344 {
1345         enum sci_status status = SCI_SUCCESS;
1346         bool all_frames_transferred = false;
1347         struct isci_stp_request *stp_req = &ireq->stp.req;
1348
1349         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1350         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1351                 /* Transmit data */
1352                 if (stp_req->pio_len != 0) {
1353                         status = sci_stp_request_pio_data_out_transmit_data(ireq);
1354                         if (status == SCI_SUCCESS) {
1355                                 if (stp_req->pio_len == 0)
1356                                         all_frames_transferred = true;
1357                         }
1358                 } else if (stp_req->pio_len == 0) {
1359                         /*
1360                          * this will happen if the all data is written at the
1361                          * first time after the pio setup fis is received
1362                          */
1363                         all_frames_transferred  = true;
1364                 }
1365
1366                 /* all data transferred. */
1367                 if (all_frames_transferred) {
1368                         /*
1369                          * Change the state to SCI_REQ_STP_PIO_DATA_IN
1370                          * and wait for PIO_SETUP fis / or D2H REg fis. */
1371                         sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1372                 }
1373                 break;
1374
1375         default:
1376                 /*
1377                  * All other completion status cause the IO to be complete.
1378                  * If a NAK was received, then it is up to the user to retry
1379                  * the request.
1380                  */
1381                 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1382                 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1383                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1384                 break;
1385         }
1386
1387         return status;
1388 }
1389
1390 static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
1391                                                                        u32 frame_index)
1392 {
1393         struct isci_host *ihost = ireq->owning_controller;
1394         struct dev_to_host_fis *frame_header;
1395         enum sci_status status;
1396         u32 *frame_buffer;
1397
1398         status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1399                                                                frame_index,
1400                                                                (void **)&frame_header);
1401
1402         if ((status == SCI_SUCCESS) &&
1403             (frame_header->fis_type == FIS_REGD2H)) {
1404                 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1405                                                               frame_index,
1406                                                               (void **)&frame_buffer);
1407
1408                 sci_controller_copy_sata_response(&ireq->stp.rsp,
1409                                                        frame_header,
1410                                                        frame_buffer);
1411         }
1412
1413         sci_controller_release_frame(ihost, frame_index);
1414
1415         return status;
1416 }
1417
1418 enum sci_status
1419 sci_io_request_frame_handler(struct isci_request *ireq,
1420                                   u32 frame_index)
1421 {
1422         struct isci_host *ihost = ireq->owning_controller;
1423         struct isci_stp_request *stp_req = &ireq->stp.req;
1424         enum sci_base_request_states state;
1425         enum sci_status status;
1426         ssize_t word_cnt;
1427
1428         state = ireq->sm.current_state_id;
1429         switch (state)  {
1430         case SCI_REQ_STARTED: {
1431                 struct ssp_frame_hdr ssp_hdr;
1432                 void *frame_header;
1433
1434                 sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1435                                                               frame_index,
1436                                                               &frame_header);
1437
1438                 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1439                 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1440
1441                 if (ssp_hdr.frame_type == SSP_RESPONSE) {
1442                         struct ssp_response_iu *resp_iu;
1443                         ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1444
1445                         sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1446                                                                       frame_index,
1447                                                                       (void **)&resp_iu);
1448
1449                         sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
1450
1451                         resp_iu = &ireq->ssp.rsp;
1452
1453                         if (resp_iu->datapres == 0x01 ||
1454                             resp_iu->datapres == 0x02) {
1455                                 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1456                                 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1457                         } else {
1458                                 ireq->scu_status = SCU_TASK_DONE_GOOD;
1459                                 ireq->sci_status = SCI_SUCCESS;
1460                         }
1461                 } else {
1462                         /* not a response frame, why did it get forwarded? */
1463                         dev_err(&ihost->pdev->dev,
1464                                 "%s: SCIC IO Request 0x%p received unexpected "
1465                                 "frame %d type 0x%02x\n", __func__, ireq,
1466                                 frame_index, ssp_hdr.frame_type);
1467                 }
1468
1469                 /*
1470                  * In any case we are done with this frame buffer return it to
1471                  * the controller
1472                  */
1473                 sci_controller_release_frame(ihost, frame_index);
1474
1475                 return SCI_SUCCESS;
1476         }
1477
1478         case SCI_REQ_TASK_WAIT_TC_RESP:
1479                 sci_io_request_copy_response(ireq);
1480                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1481                 sci_controller_release_frame(ihost, frame_index);
1482                 return SCI_SUCCESS;
1483
1484         case SCI_REQ_SMP_WAIT_RESP: {
1485                 struct smp_resp *rsp_hdr = &ireq->smp.rsp;
1486                 void *frame_header;
1487
1488                 sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1489                                                               frame_index,
1490                                                               &frame_header);
1491
1492                 /* byte swap the header. */
1493                 word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
1494                 sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
1495
1496                 if (rsp_hdr->frame_type == SMP_RESPONSE) {
1497                         void *smp_resp;
1498
1499                         sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1500                                                                       frame_index,
1501                                                                       &smp_resp);
1502
1503                         word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
1504                                 sizeof(u32);
1505
1506                         sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1507                                        smp_resp, word_cnt);
1508
1509                         ireq->scu_status = SCU_TASK_DONE_GOOD;
1510                         ireq->sci_status = SCI_SUCCESS;
1511                         sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1512                 } else {
1513                         /*
1514                          * This was not a response frame why did it get
1515                          * forwarded?
1516                          */
1517                         dev_err(&ihost->pdev->dev,
1518                                 "%s: SCIC SMP Request 0x%p received unexpected "
1519                                 "frame %d type 0x%02x\n",
1520                                 __func__,
1521                                 ireq,
1522                                 frame_index,
1523                                 rsp_hdr->frame_type);
1524
1525                         ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
1526                         ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1527                         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1528                 }
1529
1530                 sci_controller_release_frame(ihost, frame_index);
1531
1532                 return SCI_SUCCESS;
1533         }
1534
1535         case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1536                 return sci_stp_request_udma_general_frame_handler(ireq,
1537                                                                        frame_index);
1538
1539         case SCI_REQ_STP_UDMA_WAIT_D2H:
1540                 /* Use the general frame handler to copy the resposne data */
1541                 status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
1542
1543                 if (status != SCI_SUCCESS)
1544                         return status;
1545
1546                 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1547                 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1548                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1549                 return SCI_SUCCESS;
1550
1551         case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
1552                 struct dev_to_host_fis *frame_header;
1553                 u32 *frame_buffer;
1554
1555                 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1556                                                                        frame_index,
1557                                                                        (void **)&frame_header);
1558
1559                 if (status != SCI_SUCCESS) {
1560                         dev_err(&ihost->pdev->dev,
1561                                 "%s: SCIC IO Request 0x%p could not get frame "
1562                                 "header for frame index %d, status %x\n",
1563                                 __func__,
1564                                 stp_req,
1565                                 frame_index,
1566                                 status);
1567
1568                         return status;
1569                 }
1570
1571                 switch (frame_header->fis_type) {
1572                 case FIS_REGD2H:
1573                         sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1574                                                                       frame_index,
1575                                                                       (void **)&frame_buffer);
1576
1577                         sci_controller_copy_sata_response(&ireq->stp.rsp,
1578                                                                frame_header,
1579                                                                frame_buffer);
1580
1581                         /* The command has completed with error */
1582                         ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1583                         ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1584                         break;
1585
1586                 default:
1587                         dev_warn(&ihost->pdev->dev,
1588                                  "%s: IO Request:0x%p Frame Id:%d protocol "
1589                                   "violation occurred\n", __func__, stp_req,
1590                                   frame_index);
1591
1592                         ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
1593                         ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1594                         break;
1595                 }
1596
1597                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1598
1599                 /* Frame has been decoded return it to the controller */
1600                 sci_controller_release_frame(ihost, frame_index);
1601
1602                 return status;
1603         }
1604
1605         case SCI_REQ_STP_PIO_WAIT_FRAME: {
1606                 struct sas_task *task = isci_request_access_task(ireq);
1607                 struct dev_to_host_fis *frame_header;
1608                 u32 *frame_buffer;
1609
1610                 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1611                                                                        frame_index,
1612                                                                        (void **)&frame_header);
1613
1614                 if (status != SCI_SUCCESS) {
1615                         dev_err(&ihost->pdev->dev,
1616                                 "%s: SCIC IO Request 0x%p could not get frame "
1617                                 "header for frame index %d, status %x\n",
1618                                 __func__, stp_req, frame_index, status);
1619                         return status;
1620                 }
1621
1622                 switch (frame_header->fis_type) {
1623                 case FIS_PIO_SETUP:
1624                         /* Get from the frame buffer the PIO Setup Data */
1625                         sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1626                                                                       frame_index,
1627                                                                       (void **)&frame_buffer);
1628
1629                         /* Get the data from the PIO Setup The SCU Hardware
1630                          * returns first word in the frame_header and the rest
1631                          * of the data is in the frame buffer so we need to
1632                          * back up one dword
1633                          */
1634
1635                         /* transfer_count: first 16bits in the 4th dword */
1636                         stp_req->pio_len = frame_buffer[3] & 0xffff;
1637
1638                         /* status: 4th byte in the 3rd dword */
1639                         stp_req->status = (frame_buffer[2] >> 24) & 0xff;
1640
1641                         sci_controller_copy_sata_response(&ireq->stp.rsp,
1642                                                                frame_header,
1643                                                                frame_buffer);
1644
1645                         ireq->stp.rsp.status = stp_req->status;
1646
1647                         /* The next state is dependent on whether the
1648                          * request was PIO Data-in or Data out
1649                          */
1650                         if (task->data_dir == DMA_FROM_DEVICE) {
1651                                 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
1652                         } else if (task->data_dir == DMA_TO_DEVICE) {
1653                                 /* Transmit data */
1654                                 status = sci_stp_request_pio_data_out_transmit_data(ireq);
1655                                 if (status != SCI_SUCCESS)
1656                                         break;
1657                                 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
1658                         }
1659                         break;
1660
1661                 case FIS_SETDEVBITS:
1662                         sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1663                         break;
1664
1665                 case FIS_REGD2H:
1666                         if (frame_header->status & ATA_BUSY) {
1667                                 /*
1668                                  * Now why is the drive sending a D2H Register
1669                                  * FIS when it is still busy?  Do nothing since
1670                                  * we are still in the right state.
1671                                  */
1672                                 dev_dbg(&ihost->pdev->dev,
1673                                         "%s: SCIC PIO Request 0x%p received "
1674                                         "D2H Register FIS with BSY status "
1675                                         "0x%x\n",
1676                                         __func__,
1677                                         stp_req,
1678                                         frame_header->status);
1679                                 break;
1680                         }
1681
1682                         sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1683                                                                       frame_index,
1684                                                                       (void **)&frame_buffer);
1685
1686                         sci_controller_copy_sata_response(&ireq->stp.req,
1687                                                                frame_header,
1688                                                                frame_buffer);
1689
1690                         ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1691                         ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1692                         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1693                         break;
1694
1695                 default:
1696                         /* FIXME: what do we do here? */
1697                         break;
1698                 }
1699
1700                 /* Frame is decoded return it to the controller */
1701                 sci_controller_release_frame(ihost, frame_index);
1702
1703                 return status;
1704         }
1705
1706         case SCI_REQ_STP_PIO_DATA_IN: {
1707                 struct dev_to_host_fis *frame_header;
1708                 struct sata_fis_data *frame_buffer;
1709
1710                 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1711                                                                        frame_index,
1712                                                                        (void **)&frame_header);
1713
1714                 if (status != SCI_SUCCESS) {
1715                         dev_err(&ihost->pdev->dev,
1716                                 "%s: SCIC IO Request 0x%p could not get frame "
1717                                 "header for frame index %d, status %x\n",
1718                                 __func__,
1719                                 stp_req,
1720                                 frame_index,
1721                                 status);
1722                         return status;
1723                 }
1724
1725                 if (frame_header->fis_type != FIS_DATA) {
1726                         dev_err(&ihost->pdev->dev,
1727                                 "%s: SCIC PIO Request 0x%p received frame %d "
1728                                 "with fis type 0x%02x when expecting a data "
1729                                 "fis.\n",
1730                                 __func__,
1731                                 stp_req,
1732                                 frame_index,
1733                                 frame_header->fis_type);
1734
1735                         ireq->scu_status = SCU_TASK_DONE_GOOD;
1736                         ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
1737                         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1738
1739                         /* Frame is decoded return it to the controller */
1740                         sci_controller_release_frame(ihost, frame_index);
1741                         return status;
1742                 }
1743
1744                 if (stp_req->sgl.index < 0) {
1745                         ireq->saved_rx_frame_index = frame_index;
1746                         stp_req->pio_len = 0;
1747                 } else {
1748                         sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1749                                                                       frame_index,
1750                                                                       (void **)&frame_buffer);
1751
1752                         status = sci_stp_request_pio_data_in_copy_data(stp_req,
1753                                                                             (u8 *)frame_buffer);
1754
1755                         /* Frame is decoded return it to the controller */
1756                         sci_controller_release_frame(ihost, frame_index);
1757                 }
1758
1759                 /* Check for the end of the transfer, are there more
1760                  * bytes remaining for this data transfer
1761                  */
1762                 if (status != SCI_SUCCESS || stp_req->pio_len != 0)
1763                         return status;
1764
1765                 if ((stp_req->status & ATA_BUSY) == 0) {
1766                         ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1767                         ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1768                         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1769                 } else {
1770                         sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1771                 }
1772                 return status;
1773         }
1774
1775         case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
1776                 struct dev_to_host_fis *frame_header;
1777                 u32 *frame_buffer;
1778
1779                 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1780                                                                        frame_index,
1781                                                                        (void **)&frame_header);
1782                 if (status != SCI_SUCCESS) {
1783                         dev_err(&ihost->pdev->dev,
1784                                 "%s: SCIC IO Request 0x%p could not get frame "
1785                                 "header for frame index %d, status %x\n",
1786                                 __func__,
1787                                 stp_req,
1788                                 frame_index,
1789                                 status);
1790                         return status;
1791                 }
1792
1793                 switch (frame_header->fis_type) {
1794                 case FIS_REGD2H:
1795                         sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1796                                                                       frame_index,
1797                                                                       (void **)&frame_buffer);
1798
1799                         sci_controller_copy_sata_response(&ireq->stp.rsp,
1800                                                                frame_header,
1801                                                                frame_buffer);
1802
1803                         /* The command has completed with error */
1804                         ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1805                         ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1806                         break;
1807
1808                 default:
1809                         dev_warn(&ihost->pdev->dev,
1810                                  "%s: IO Request:0x%p Frame Id:%d protocol "
1811                                  "violation occurred\n",
1812                                  __func__,
1813                                  stp_req,
1814                                  frame_index);
1815
1816                         ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
1817                         ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1818                         break;
1819                 }
1820
1821                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1822
1823                 /* Frame has been decoded return it to the controller */
1824                 sci_controller_release_frame(ihost, frame_index);
1825
1826                 return status;
1827         }
1828         case SCI_REQ_ABORTING:
1829                 /*
1830                  * TODO: Is it even possible to get an unsolicited frame in the
1831                  * aborting state?
1832                  */
1833                 sci_controller_release_frame(ihost, frame_index);
1834                 return SCI_SUCCESS;
1835
1836         default:
1837                 dev_warn(&ihost->pdev->dev,
1838                          "%s: SCIC IO Request given unexpected frame %x while "
1839                          "in state %d\n",
1840                          __func__,
1841                          frame_index,
1842                          state);
1843
1844                 sci_controller_release_frame(ihost, frame_index);
1845                 return SCI_FAILURE_INVALID_STATE;
1846         }
1847 }
1848
1849 static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
1850                                                        u32 completion_code)
1851 {
1852         enum sci_status status = SCI_SUCCESS;
1853
1854         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1855         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1856                 ireq->scu_status = SCU_TASK_DONE_GOOD;
1857                 ireq->sci_status = SCI_SUCCESS;
1858                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1859                 break;
1860         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
1861         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1862                 /* We must check ther response buffer to see if the D2H
1863                  * Register FIS was received before we got the TC
1864                  * completion.
1865                  */
1866                 if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
1867                         sci_remote_device_suspend(ireq->target_device,
1868                                 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1869
1870                         ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1871                         ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1872                         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1873                 } else {
1874                         /* If we have an error completion status for the
1875                          * TC then we can expect a D2H register FIS from
1876                          * the device so we must change state to wait
1877                          * for it
1878                          */
1879                         sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
1880                 }
1881                 break;
1882
1883         /* TODO Check to see if any of these completion status need to
1884          * wait for the device to host register fis.
1885          */
1886         /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
1887          * - this comes only for B0
1888          */
1889         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
1890         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1891         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
1892         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
1893         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
1894                 sci_remote_device_suspend(ireq->target_device,
1895                         SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1896         /* Fall through to the default case */
1897         default:
1898                 /* All other completion status cause the IO to be complete. */
1899                 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1900                 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1901                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1902                 break;
1903         }
1904
1905         return status;
1906 }
1907
1908 static enum sci_status
1909 stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
1910                                                    u32 completion_code)
1911 {
1912         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1913         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1914                 ireq->scu_status = SCU_TASK_DONE_GOOD;
1915                 ireq->sci_status = SCI_SUCCESS;
1916                 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
1917                 break;
1918
1919         default:
1920                 /*
1921                  * All other completion status cause the IO to be complete.
1922                  * If a NAK was received, then it is up to the user to retry
1923                  * the request.
1924                  */
1925                 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1926                 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1927                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1928                 break;
1929         }
1930
1931         return SCI_SUCCESS;
1932 }
1933
1934 static enum sci_status
1935 stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
1936                                                      u32 completion_code)
1937 {
1938         switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1939         case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1940                 ireq->scu_status = SCU_TASK_DONE_GOOD;
1941                 ireq->sci_status = SCI_SUCCESS;
1942                 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
1943                 break;
1944
1945         default:
1946                 /* All other completion status cause the IO to be complete.  If
1947                  * a NAK was received, then it is up to the user to retry the
1948                  * request.
1949                  */
1950                 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1951                 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1952                 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1953                 break;
1954         }
1955
1956         return SCI_SUCCESS;
1957 }
1958
1959 enum sci_status
1960 sci_io_request_tc_completion(struct isci_request *ireq,
1961                                   u32 completion_code)
1962 {
1963         enum sci_base_request_states state;
1964         struct isci_host *ihost = ireq->owning_controller;
1965
1966         state = ireq->sm.current_state_id;
1967
1968         switch (state) {
1969         case SCI_REQ_STARTED:
1970                 return request_started_state_tc_event(ireq, completion_code);
1971
1972         case SCI_REQ_TASK_WAIT_TC_COMP:
1973                 return ssp_task_request_await_tc_event(ireq,
1974                                                        completion_code);
1975
1976         case SCI_REQ_SMP_WAIT_RESP:
1977                 return smp_request_await_response_tc_event(ireq,
1978                                                            completion_code);
1979
1980         case SCI_REQ_SMP_WAIT_TC_COMP:
1981                 return smp_request_await_tc_event(ireq, completion_code);
1982
1983         case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1984                 return stp_request_udma_await_tc_event(ireq,
1985                                                        completion_code);
1986
1987         case SCI_REQ_STP_NON_DATA_WAIT_H2D:
1988                 return stp_request_non_data_await_h2d_tc_event(ireq,
1989                                                                completion_code);
1990
1991         case SCI_REQ_STP_PIO_WAIT_H2D:
1992                 return stp_request_pio_await_h2d_completion_tc_event(ireq,
1993                                                                      completion_code);
1994
1995         case SCI_REQ_STP_PIO_DATA_OUT:
1996                 return pio_data_out_tx_done_tc_event(ireq, completion_code);
1997
1998         case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
1999                 return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
2000                                                                           completion_code);
2001
2002         case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
2003                 return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
2004                                                                             completion_code);
2005
2006         case SCI_REQ_ABORTING:
2007                 return request_aborting_state_tc_event(ireq,
2008                                                        completion_code);
2009
2010         default:
2011                 dev_warn(&ihost->pdev->dev,
2012                          "%s: SCIC IO Request given task completion "
2013                          "notification %x while in wrong state %d\n",
2014                          __func__,
2015                          completion_code,
2016                          state);
2017                 return SCI_FAILURE_INVALID_STATE;
2018         }
2019 }
2020
2021 /**
2022  * isci_request_process_response_iu() - This function sets the status and
2023  *    response iu, in the task struct, from the request object for the upper
2024  *    layer driver.
2025  * @sas_task: This parameter is the task struct from the upper layer driver.
2026  * @resp_iu: This parameter points to the response iu of the completed request.
2027  * @dev: This parameter specifies the linux device struct.
2028  *
2029  * none.
2030  */
2031 static void isci_request_process_response_iu(
2032         struct sas_task *task,
2033         struct ssp_response_iu *resp_iu,
2034         struct device *dev)
2035 {
2036         dev_dbg(dev,
2037                 "%s: resp_iu = %p "
2038                 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2039                 "resp_iu->response_data_len = %x, "
2040                 "resp_iu->sense_data_len = %x\nrepsonse data: ",
2041                 __func__,
2042                 resp_iu,
2043                 resp_iu->status,
2044                 resp_iu->datapres,
2045                 resp_iu->response_data_len,
2046                 resp_iu->sense_data_len);
2047
2048         task->task_status.stat = resp_iu->status;
2049
2050         /* libsas updates the task status fields based on the response iu. */
2051         sas_ssp_task_response(dev, task, resp_iu);
2052 }
2053
2054 /**
2055  * isci_request_set_open_reject_status() - This function prepares the I/O
2056  *    completion for OPEN_REJECT conditions.
2057  * @request: This parameter is the completed isci_request object.
2058  * @response_ptr: This parameter specifies the service response for the I/O.
2059  * @status_ptr: This parameter specifies the exec status for the I/O.
2060  * @complete_to_host_ptr: This parameter specifies the action to be taken by
2061  *    the LLDD with respect to completing this request or forcing an abort
2062  *    condition on the I/O.
2063  * @open_rej_reason: This parameter specifies the encoded reason for the
2064  *    abandon-class reject.
2065  *
2066  * none.
2067  */
2068 static void isci_request_set_open_reject_status(
2069         struct isci_request *request,
2070         struct sas_task *task,
2071         enum service_response *response_ptr,
2072         enum exec_status *status_ptr,
2073         enum isci_completion_selection *complete_to_host_ptr,
2074         enum sas_open_rej_reason open_rej_reason)
2075 {
2076         /* Task in the target is done. */
2077         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2078         *response_ptr                     = SAS_TASK_UNDELIVERED;
2079         *status_ptr                       = SAS_OPEN_REJECT;
2080         *complete_to_host_ptr             = isci_perform_normal_io_completion;
2081         task->task_status.open_rej_reason = open_rej_reason;
2082 }
2083
2084 /**
2085  * isci_request_handle_controller_specific_errors() - This function decodes
2086  *    controller-specific I/O completion error conditions.
2087  * @request: This parameter is the completed isci_request object.
2088  * @response_ptr: This parameter specifies the service response for the I/O.
2089  * @status_ptr: This parameter specifies the exec status for the I/O.
2090  * @complete_to_host_ptr: This parameter specifies the action to be taken by
2091  *    the LLDD with respect to completing this request or forcing an abort
2092  *    condition on the I/O.
2093  *
2094  * none.
2095  */
2096 static void isci_request_handle_controller_specific_errors(
2097         struct isci_remote_device *idev,
2098         struct isci_request *request,
2099         struct sas_task *task,
2100         enum service_response *response_ptr,
2101         enum exec_status *status_ptr,
2102         enum isci_completion_selection *complete_to_host_ptr)
2103 {
2104         unsigned int cstatus;
2105
2106         cstatus = request->scu_status;
2107
2108         dev_dbg(&request->isci_host->pdev->dev,
2109                 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2110                 "- controller status = 0x%x\n",
2111                 __func__, request, cstatus);
2112
2113         /* Decode the controller-specific errors; most
2114          * important is to recognize those conditions in which
2115          * the target may still have a task outstanding that
2116          * must be aborted.
2117          *
2118          * Note that there are SCU completion codes being
2119          * named in the decode below for which SCIC has already
2120          * done work to handle them in a way other than as
2121          * a controller-specific completion code; these are left
2122          * in the decode below for completeness sake.
2123          */
2124         switch (cstatus) {
2125         case SCU_TASK_DONE_DMASETUP_DIRERR:
2126         /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2127         case SCU_TASK_DONE_XFERCNT_ERR:
2128                 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2129                 if (task->task_proto == SAS_PROTOCOL_SMP) {
2130                         /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2131                         *response_ptr = SAS_TASK_COMPLETE;
2132
2133                         /* See if the device has been/is being stopped. Note
2134                          * that we ignore the quiesce state, since we are
2135                          * concerned about the actual device state.
2136                          */
2137                         if (!idev)
2138                                 *status_ptr = SAS_DEVICE_UNKNOWN;
2139                         else
2140                                 *status_ptr = SAS_ABORTED_TASK;
2141
2142                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2143
2144                         *complete_to_host_ptr =
2145                                 isci_perform_normal_io_completion;
2146                 } else {
2147                         /* Task in the target is not done. */
2148                         *response_ptr = SAS_TASK_UNDELIVERED;
2149
2150                         if (!idev)
2151                                 *status_ptr = SAS_DEVICE_UNKNOWN;
2152                         else
2153                                 *status_ptr = SAM_STAT_TASK_ABORTED;
2154
2155                         clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2156
2157                         *complete_to_host_ptr =
2158                                 isci_perform_error_io_completion;
2159                 }
2160
2161                 break;
2162
2163         case SCU_TASK_DONE_CRC_ERR:
2164         case SCU_TASK_DONE_NAK_CMD_ERR:
2165         case SCU_TASK_DONE_EXCESS_DATA:
2166         case SCU_TASK_DONE_UNEXP_FIS:
2167         /* Also SCU_TASK_DONE_UNEXP_RESP: */
2168         case SCU_TASK_DONE_VIIT_ENTRY_NV:       /* TODO - conditions? */
2169         case SCU_TASK_DONE_IIT_ENTRY_NV:        /* TODO - conditions? */
2170         case SCU_TASK_DONE_RNCNV_OUTBOUND:      /* TODO - conditions? */
2171                 /* These are conditions in which the target
2172                  * has completed the task, so that no cleanup
2173                  * is necessary.
2174                  */
2175                 *response_ptr = SAS_TASK_COMPLETE;
2176
2177                 /* See if the device has been/is being stopped. Note
2178                  * that we ignore the quiesce state, since we are
2179                  * concerned about the actual device state.
2180                  */
2181                 if (!idev)
2182                         *status_ptr = SAS_DEVICE_UNKNOWN;
2183                 else
2184                         *status_ptr = SAS_ABORTED_TASK;
2185
2186                 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2187
2188                 *complete_to_host_ptr = isci_perform_normal_io_completion;
2189                 break;
2190
2191
2192         /* Note that the only open reject completion codes seen here will be
2193          * abandon-class codes; all others are automatically retried in the SCU.
2194          */
2195         case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2196
2197                 isci_request_set_open_reject_status(
2198                         request, task, response_ptr, status_ptr,
2199                         complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2200                 break;
2201
2202         case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2203
2204                 /* Note - the return of AB0 will change when
2205                  * libsas implements detection of zone violations.
2206                  */
2207                 isci_request_set_open_reject_status(
2208                         request, task, response_ptr, status_ptr,
2209                         complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2210                 break;
2211
2212         case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2213
2214                 isci_request_set_open_reject_status(
2215                         request, task, response_ptr, status_ptr,
2216                         complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2217                 break;
2218
2219         case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2220
2221                 isci_request_set_open_reject_status(
2222                         request, task, response_ptr, status_ptr,
2223                         complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2224                 break;
2225
2226         case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2227
2228                 isci_request_set_open_reject_status(
2229                         request, task, response_ptr, status_ptr,
2230                         complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2231                 break;
2232
2233         case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2234
2235                 isci_request_set_open_reject_status(
2236                         request, task, response_ptr, status_ptr,
2237                         complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2238                 break;
2239
2240         case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2241
2242                 isci_request_set_open_reject_status(
2243                         request, task, response_ptr, status_ptr,
2244                         complete_to_host_ptr, SAS_OREJ_STP_NORES);
2245                 break;
2246
2247         case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2248
2249                 isci_request_set_open_reject_status(
2250                         request, task, response_ptr, status_ptr,
2251                         complete_to_host_ptr, SAS_OREJ_EPROTO);
2252                 break;
2253
2254         case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2255
2256                 isci_request_set_open_reject_status(
2257                         request, task, response_ptr, status_ptr,
2258                         complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2259                 break;
2260
2261         case SCU_TASK_DONE_LL_R_ERR:
2262         /* Also SCU_TASK_DONE_ACK_NAK_TO: */
2263         case SCU_TASK_DONE_LL_PERR:
2264         case SCU_TASK_DONE_LL_SY_TERM:
2265         /* Also SCU_TASK_DONE_NAK_ERR:*/
2266         case SCU_TASK_DONE_LL_LF_TERM:
2267         /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2268         case SCU_TASK_DONE_LL_ABORT_ERR:
2269         case SCU_TASK_DONE_SEQ_INV_TYPE:
2270         /* Also SCU_TASK_DONE_UNEXP_XR: */
2271         case SCU_TASK_DONE_XR_IU_LEN_ERR:
2272         case SCU_TASK_DONE_INV_FIS_LEN:
2273         /* Also SCU_TASK_DONE_XR_WD_LEN: */
2274         case SCU_TASK_DONE_SDMA_ERR:
2275         case SCU_TASK_DONE_OFFSET_ERR:
2276         case SCU_TASK_DONE_MAX_PLD_ERR:
2277         case SCU_TASK_DONE_LF_ERR:
2278         case SCU_TASK_DONE_SMP_RESP_TO_ERR:  /* Escalate to dev reset? */
2279         case SCU_TASK_DONE_SMP_LL_RX_ERR:
2280         case SCU_TASK_DONE_UNEXP_DATA:
2281         case SCU_TASK_DONE_UNEXP_SDBFIS:
2282         case SCU_TASK_DONE_REG_ERR:
2283         case SCU_TASK_DONE_SDB_ERR:
2284         case SCU_TASK_DONE_TASK_ABORT:
2285         default:
2286                 /* Task in the target is not done. */
2287                 *response_ptr = SAS_TASK_UNDELIVERED;
2288                 *status_ptr = SAM_STAT_TASK_ABORTED;
2289
2290                 if (task->task_proto == SAS_PROTOCOL_SMP) {
2291                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2292
2293                         *complete_to_host_ptr = isci_perform_normal_io_completion;
2294                 } else {
2295                         clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2296
2297                         *complete_to_host_ptr = isci_perform_error_io_completion;
2298                 }
2299                 break;
2300         }
2301 }
2302
2303 /**
2304  * isci_task_save_for_upper_layer_completion() - This function saves the
2305  *    request for later completion to the upper layer driver.
2306  * @host: This parameter is a pointer to the host on which the the request
2307  *    should be queued (either as an error or success).
2308  * @request: This parameter is the completed request.
2309  * @response: This parameter is the response code for the completed task.
2310  * @status: This parameter is the status code for the completed task.
2311  *
2312  * none.
2313  */
2314 static void isci_task_save_for_upper_layer_completion(
2315         struct isci_host *host,
2316         struct isci_request *request,
2317         enum service_response response,
2318         enum exec_status status,
2319         enum isci_completion_selection task_notification_selection)
2320 {
2321         struct sas_task *task = isci_request_access_task(request);
2322
2323         task_notification_selection
2324                 = isci_task_set_completion_status(task, response, status,
2325                                                   task_notification_selection);
2326
2327         /* Tasks aborted specifically by a call to the lldd_abort_task
2328          * function should not be completed to the host in the regular path.
2329          */
2330         switch (task_notification_selection) {
2331
2332         case isci_perform_normal_io_completion:
2333
2334                 /* Normal notification (task_done) */
2335                 dev_dbg(&host->pdev->dev,
2336                         "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
2337                         __func__,
2338                         task,
2339                         task->task_status.resp, response,
2340                         task->task_status.stat, status);
2341                 /* Add to the completed list. */
2342                 list_add(&request->completed_node,
2343                          &host->requests_to_complete);
2344
2345                 /* Take the request off the device's pending request list. */
2346                 list_del_init(&request->dev_node);
2347                 break;
2348
2349         case isci_perform_aborted_io_completion:
2350                 /* No notification to libsas because this request is
2351                  * already in the abort path.
2352                  */
2353                 dev_dbg(&host->pdev->dev,
2354                          "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
2355                          __func__,
2356                          task,
2357                          task->task_status.resp, response,
2358                          task->task_status.stat, status);
2359
2360                 /* Wake up whatever process was waiting for this
2361                  * request to complete.
2362                  */
2363                 WARN_ON(request->io_request_completion == NULL);
2364
2365                 if (request->io_request_completion != NULL) {
2366
2367                         /* Signal whoever is waiting that this
2368                         * request is complete.
2369                         */
2370                         complete(request->io_request_completion);
2371                 }
2372                 break;
2373
2374         case isci_perform_error_io_completion:
2375                 /* Use sas_task_abort */
2376                 dev_dbg(&host->pdev->dev,
2377                          "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
2378                          __func__,
2379                          task,
2380                          task->task_status.resp, response,
2381                          task->task_status.stat, status);
2382                 /* Add to the aborted list. */
2383                 list_add(&request->completed_node,
2384                          &host->requests_to_errorback);
2385                 break;
2386
2387         default:
2388                 dev_dbg(&host->pdev->dev,
2389                          "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
2390                          __func__,
2391                          task,
2392                          task->task_status.resp, response,
2393                          task->task_status.stat, status);
2394
2395                 /* Add to the error to libsas list. */
2396                 list_add(&request->completed_node,
2397                          &host->requests_to_errorback);
2398                 break;
2399         }
2400 }
2401
2402 static void isci_request_process_stp_response(struct sas_task *task,
2403                                               void *response_buffer)
2404 {
2405         struct dev_to_host_fis *d2h_reg_fis = response_buffer;
2406         struct task_status_struct *ts = &task->task_status;
2407         struct ata_task_resp *resp = (void *)&ts->buf[0];
2408
2409         resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6));
2410         memcpy(&resp->ending_fis[0], response_buffer + 16, 24);
2411         ts->buf_valid_size = sizeof(*resp);
2412
2413         /**
2414          * If the device fault bit is set in the status register, then
2415          * set the sense data and return.
2416          */
2417         if (d2h_reg_fis->status & ATA_DF)
2418                 ts->stat = SAS_PROTO_RESPONSE;
2419         else
2420                 ts->stat = SAM_STAT_GOOD;
2421
2422         ts->resp = SAS_TASK_COMPLETE;
2423 }
2424
2425 static void isci_request_io_request_complete(struct isci_host *ihost,
2426                                              struct isci_request *request,
2427                                              enum sci_io_status completion_status)
2428 {
2429         struct sas_task *task = isci_request_access_task(request);
2430         struct ssp_response_iu *resp_iu;
2431         void *resp_buf;
2432         unsigned long task_flags;
2433         struct isci_remote_device *idev = isci_lookup_device(task->dev);
2434         enum service_response response       = SAS_TASK_UNDELIVERED;
2435         enum exec_status status         = SAS_ABORTED_TASK;
2436         enum isci_request_status request_status;
2437         enum isci_completion_selection complete_to_host
2438                 = isci_perform_normal_io_completion;
2439
2440         dev_dbg(&ihost->pdev->dev,
2441                 "%s: request = %p, task = %p,\n"
2442                 "task->data_dir = %d completion_status = 0x%x\n",
2443                 __func__,
2444                 request,
2445                 task,
2446                 task->data_dir,
2447                 completion_status);
2448
2449         spin_lock(&request->state_lock);
2450         request_status = request->status;
2451
2452         /* Decode the request status.  Note that if the request has been
2453          * aborted by a task management function, we don't care
2454          * what the status is.
2455          */
2456         switch (request_status) {
2457
2458         case aborted:
2459                 /* "aborted" indicates that the request was aborted by a task
2460                  * management function, since once a task management request is
2461                  * perfomed by the device, the request only completes because
2462                  * of the subsequent driver terminate.
2463                  *
2464                  * Aborted also means an external thread is explicitly managing
2465                  * this request, so that we do not complete it up the stack.
2466                  *
2467                  * The target is still there (since the TMF was successful).
2468                  */
2469                 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2470                 response = SAS_TASK_COMPLETE;
2471
2472                 /* See if the device has been/is being stopped. Note
2473                  * that we ignore the quiesce state, since we are
2474                  * concerned about the actual device state.
2475                  */
2476                 if (!idev)
2477                         status = SAS_DEVICE_UNKNOWN;
2478                 else
2479                         status = SAS_ABORTED_TASK;
2480
2481                 complete_to_host = isci_perform_aborted_io_completion;
2482                 /* This was an aborted request. */
2483
2484                 spin_unlock(&request->state_lock);
2485                 break;
2486
2487         case aborting:
2488                 /* aborting means that the task management function tried and
2489                  * failed to abort the request. We need to note the request
2490                  * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
2491                  * target as down.
2492                  *
2493                  * Aborting also means an external thread is explicitly managing
2494                  * this request, so that we do not complete it up the stack.
2495                  */
2496                 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2497                 response = SAS_TASK_UNDELIVERED;
2498
2499                 if (!idev)
2500                         /* The device has been /is being stopped. Note that
2501                          * we ignore the quiesce state, since we are
2502                          * concerned about the actual device state.
2503                          */
2504                         status = SAS_DEVICE_UNKNOWN;
2505                 else
2506                         status = SAS_PHY_DOWN;
2507
2508                 complete_to_host = isci_perform_aborted_io_completion;
2509
2510                 /* This was an aborted request. */
2511
2512                 spin_unlock(&request->state_lock);
2513                 break;
2514
2515         case terminating:
2516
2517                 /* This was an terminated request.  This happens when
2518                  * the I/O is being terminated because of an action on
2519                  * the device (reset, tear down, etc.), and the I/O needs
2520                  * to be completed up the stack.
2521                  */
2522                 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2523                 response = SAS_TASK_UNDELIVERED;
2524
2525                 /* See if the device has been/is being stopped. Note
2526                  * that we ignore the quiesce state, since we are
2527                  * concerned about the actual device state.
2528                  */
2529                 if (!idev)
2530                         status = SAS_DEVICE_UNKNOWN;
2531                 else
2532                         status = SAS_ABORTED_TASK;
2533
2534                 complete_to_host = isci_perform_aborted_io_completion;
2535
2536                 /* This was a terminated request. */
2537
2538                 spin_unlock(&request->state_lock);
2539                 break;
2540
2541         case dead:
2542                 /* This was a terminated request that timed-out during the
2543                  * termination process.  There is no task to complete to
2544                  * libsas.
2545                  */
2546                 complete_to_host = isci_perform_normal_io_completion;
2547                 spin_unlock(&request->state_lock);
2548                 break;
2549
2550         default:
2551
2552                 /* The request is done from an SCU HW perspective. */
2553                 request->status = completed;
2554
2555                 spin_unlock(&request->state_lock);
2556
2557                 /* This is an active request being completed from the core. */
2558                 switch (completion_status) {
2559
2560                 case SCI_IO_FAILURE_RESPONSE_VALID:
2561                         dev_dbg(&ihost->pdev->dev,
2562                                 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2563                                 __func__,
2564                                 request,
2565                                 task);
2566
2567                         if (sas_protocol_ata(task->task_proto)) {
2568                                 resp_buf = &request->stp.rsp;
2569                                 isci_request_process_stp_response(task,
2570                                                                   resp_buf);
2571                         } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2572
2573                                 /* crack the iu response buffer. */
2574                                 resp_iu = &request->ssp.rsp;
2575                                 isci_request_process_response_iu(task, resp_iu,
2576                                                                  &ihost->pdev->dev);
2577
2578                         } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2579
2580                                 dev_err(&ihost->pdev->dev,
2581                                         "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2582                                         "SAS_PROTOCOL_SMP protocol\n",
2583                                         __func__);
2584
2585                         } else
2586                                 dev_err(&ihost->pdev->dev,
2587                                         "%s: unknown protocol\n", __func__);
2588
2589                         /* use the task status set in the task struct by the
2590                          * isci_request_process_response_iu call.
2591                          */
2592                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2593                         response = task->task_status.resp;
2594                         status = task->task_status.stat;
2595                         break;
2596
2597                 case SCI_IO_SUCCESS:
2598                 case SCI_IO_SUCCESS_IO_DONE_EARLY:
2599
2600                         response = SAS_TASK_COMPLETE;
2601                         status   = SAM_STAT_GOOD;
2602                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2603
2604                         if (task->task_proto == SAS_PROTOCOL_SMP) {
2605                                 void *rsp = &request->smp.rsp;
2606
2607                                 dev_dbg(&ihost->pdev->dev,
2608                                         "%s: SMP protocol completion\n",
2609                                         __func__);
2610
2611                                 sg_copy_from_buffer(
2612                                         &task->smp_task.smp_resp, 1,
2613                                         rsp, sizeof(struct smp_resp));
2614                         } else if (completion_status
2615                                    == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2616
2617                                 /* This was an SSP / STP / SATA transfer.
2618                                  * There is a possibility that less data than
2619                                  * the maximum was transferred.
2620                                  */
2621                                 u32 transferred_length = sci_req_tx_bytes(request);
2622
2623                                 task->task_status.residual
2624                                         = task->total_xfer_len - transferred_length;
2625
2626                                 /* If there were residual bytes, call this an
2627                                  * underrun.
2628                                  */
2629                                 if (task->task_status.residual != 0)
2630                                         status = SAS_DATA_UNDERRUN;
2631
2632                                 dev_dbg(&ihost->pdev->dev,
2633                                         "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2634                                         __func__,
2635                                         status);
2636
2637                         } else
2638                                 dev_dbg(&ihost->pdev->dev,
2639                                         "%s: SCI_IO_SUCCESS\n",
2640                                         __func__);
2641
2642                         break;
2643
2644                 case SCI_IO_FAILURE_TERMINATED:
2645                         dev_dbg(&ihost->pdev->dev,
2646                                 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2647                                 __func__,
2648                                 request,
2649                                 task);
2650
2651                         /* The request was terminated explicitly.  No handling
2652                          * is needed in the SCSI error handler path.
2653                          */
2654                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2655                         response = SAS_TASK_UNDELIVERED;
2656
2657                         /* See if the device has been/is being stopped. Note
2658                          * that we ignore the quiesce state, since we are
2659                          * concerned about the actual device state.
2660                          */
2661                         if (!idev)
2662                                 status = SAS_DEVICE_UNKNOWN;
2663                         else
2664                                 status = SAS_ABORTED_TASK;
2665
2666                         complete_to_host = isci_perform_normal_io_completion;
2667                         break;
2668
2669                 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2670
2671                         isci_request_handle_controller_specific_errors(
2672                                 idev, request, task, &response, &status,
2673                                 &complete_to_host);
2674
2675                         break;
2676
2677                 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2678                         /* This is a special case, in that the I/O completion
2679                          * is telling us that the device needs a reset.
2680                          * In order for the device reset condition to be
2681                          * noticed, the I/O has to be handled in the error
2682                          * handler.  Set the reset flag and cause the
2683                          * SCSI error thread to be scheduled.
2684                          */
2685                         spin_lock_irqsave(&task->task_state_lock, task_flags);
2686                         task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2687                         spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2688
2689                         /* Fail the I/O. */
2690                         response = SAS_TASK_UNDELIVERED;
2691                         status = SAM_STAT_TASK_ABORTED;
2692
2693                         complete_to_host = isci_perform_error_io_completion;
2694                         clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2695                         break;
2696
2697                 case SCI_FAILURE_RETRY_REQUIRED:
2698
2699                         /* Fail the I/O so it can be retried. */
2700                         response = SAS_TASK_UNDELIVERED;
2701                         if (!idev)
2702                                 status = SAS_DEVICE_UNKNOWN;
2703                         else
2704                                 status = SAS_ABORTED_TASK;
2705
2706                         complete_to_host = isci_perform_normal_io_completion;
2707                         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2708                         break;
2709
2710
2711                 default:
2712                         /* Catch any otherwise unhandled error codes here. */
2713                         dev_dbg(&ihost->pdev->dev,
2714                                  "%s: invalid completion code: 0x%x - "
2715                                  "isci_request = %p\n",
2716                                  __func__, completion_status, request);
2717
2718                         response = SAS_TASK_UNDELIVERED;
2719
2720                         /* See if the device has been/is being stopped. Note
2721                          * that we ignore the quiesce state, since we are
2722                          * concerned about the actual device state.
2723                          */
2724                         if (!idev)
2725                                 status = SAS_DEVICE_UNKNOWN;
2726                         else
2727                                 status = SAS_ABORTED_TASK;
2728
2729                         if (SAS_PROTOCOL_SMP == task->task_proto) {
2730                                 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2731                                 complete_to_host = isci_perform_normal_io_completion;
2732                         } else {
2733                                 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2734                                 complete_to_host = isci_perform_error_io_completion;
2735                         }
2736                         break;
2737                 }
2738                 break;
2739         }
2740
2741         switch (task->task_proto) {
2742         case SAS_PROTOCOL_SSP:
2743                 if (task->data_dir == DMA_NONE)
2744                         break;
2745                 if (task->num_scatter == 0)
2746                         /* 0 indicates a single dma address */
2747                         dma_unmap_single(&ihost->pdev->dev,
2748                                          request->zero_scatter_daddr,
2749                                          task->total_xfer_len, task->data_dir);
2750                 else  /* unmap the sgl dma addresses */
2751                         dma_unmap_sg(&ihost->pdev->dev, task->scatter,
2752                                      request->num_sg_entries, task->data_dir);
2753                 break;
2754         case SAS_PROTOCOL_SMP: {
2755                 struct scatterlist *sg = &task->smp_task.smp_req;
2756                 struct smp_req *smp_req;
2757                 void *kaddr;
2758
2759                 dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
2760
2761                 /* need to swab it back in case the command buffer is re-used */
2762                 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
2763                 smp_req = kaddr + sg->offset;
2764                 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
2765                 kunmap_atomic(kaddr, KM_IRQ0);
2766                 break;
2767         }
2768         default:
2769                 break;
2770         }
2771
2772         /* Put the completed request on the correct list */
2773         isci_task_save_for_upper_layer_completion(ihost, request, response,
2774                                                   status, complete_to_host
2775                                                   );
2776
2777         /* complete the io request to the core. */
2778         sci_controller_complete_io(ihost, request->target_device, request);
2779         isci_put_device(idev);
2780
2781         /* set terminated handle so it cannot be completed or
2782          * terminated again, and to cause any calls into abort
2783          * task to recognize the already completed case.
2784          */
2785         set_bit(IREQ_TERMINATED, &request->flags);
2786 }
2787
2788 static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
2789 {
2790         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2791         struct domain_device *dev = ireq->target_device->domain_dev;
2792         struct sas_task *task;
2793
2794         /* XXX as hch said always creating an internal sas_task for tmf
2795          * requests would simplify the driver
2796          */
2797         task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
2798
2799         /* all unaccelerated request types (non ssp or ncq) handled with
2800          * substates
2801          */
2802         if (!task && dev->dev_type == SAS_END_DEV) {
2803                 sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
2804         } else if (!task &&
2805                    (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
2806                     isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
2807                 sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
2808         } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
2809                 sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
2810         } else if (task && sas_protocol_ata(task->task_proto) &&
2811                    !task->ata_task.use_ncq) {
2812                 u32 state;
2813
2814                 if (task->data_dir == DMA_NONE)
2815                         state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
2816                 else if (task->ata_task.dma_xfer)
2817                         state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
2818                 else /* PIO */
2819                         state = SCI_REQ_STP_PIO_WAIT_H2D;
2820
2821                 sci_change_state(sm, state);
2822         }
2823 }
2824
2825 static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
2826 {
2827         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2828         struct isci_host *ihost = ireq->owning_controller;
2829
2830         /* Tell the SCI_USER that the IO request is complete */
2831         if (!test_bit(IREQ_TMF, &ireq->flags))
2832                 isci_request_io_request_complete(ihost, ireq,
2833                                                  ireq->sci_status);
2834         else
2835                 isci_task_request_complete(ihost, ireq, ireq->sci_status);
2836 }
2837
2838 static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
2839 {
2840         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2841
2842         /* Setting the abort bit in the Task Context is required by the silicon. */
2843         ireq->tc->abort = 1;
2844 }
2845
2846 static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2847 {
2848         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2849
2850         ireq->target_device->working_request = ireq;
2851 }
2852
2853 static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2854 {
2855         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2856
2857         ireq->target_device->working_request = ireq;
2858 }
2859
2860 static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
2861 {
2862         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2863
2864         ireq->target_device->working_request = ireq;
2865 }
2866
2867 static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
2868 {
2869         struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2870         struct scu_task_context *tc = ireq->tc;
2871         struct host_to_dev_fis *h2d_fis;
2872         enum sci_status status;
2873
2874         /* Clear the SRST bit */
2875         h2d_fis = &ireq->stp.cmd;
2876         h2d_fis->control = 0;
2877
2878         /* Clear the TC control bit */
2879         tc->control_frame = 0;
2880
2881         status = sci_controller_continue_io(ireq);
2882         WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
2883 }
2884
2885 static const struct sci_base_state sci_request_state_table[] = {
2886         [SCI_REQ_INIT] = { },
2887         [SCI_REQ_CONSTRUCTED] = { },
2888         [SCI_REQ_STARTED] = {
2889                 .enter_state = sci_request_started_state_enter,
2890         },
2891         [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
2892                 .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
2893         },
2894         [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
2895         [SCI_REQ_STP_PIO_WAIT_H2D] = {
2896                 .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
2897         },
2898         [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
2899         [SCI_REQ_STP_PIO_DATA_IN] = { },
2900         [SCI_REQ_STP_PIO_DATA_OUT] = { },
2901         [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
2902         [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
2903         [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
2904                 .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
2905         },
2906         [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
2907                 .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
2908         },
2909         [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
2910         [SCI_REQ_TASK_WAIT_TC_COMP] = { },
2911         [SCI_REQ_TASK_WAIT_TC_RESP] = { },
2912         [SCI_REQ_SMP_WAIT_RESP] = { },
2913         [SCI_REQ_SMP_WAIT_TC_COMP] = { },
2914         [SCI_REQ_COMPLETED] = {
2915                 .enter_state = sci_request_completed_state_enter,
2916         },
2917         [SCI_REQ_ABORTING] = {
2918                 .enter_state = sci_request_aborting_state_enter,
2919         },
2920         [SCI_REQ_FINAL] = { },
2921 };
2922
2923 static void
2924 sci_general_request_construct(struct isci_host *ihost,
2925                                    struct isci_remote_device *idev,
2926                                    struct isci_request *ireq)
2927 {
2928         sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
2929
2930         ireq->target_device = idev;
2931         ireq->protocol = SCIC_NO_PROTOCOL;
2932         ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
2933
2934         ireq->sci_status   = SCI_SUCCESS;
2935         ireq->scu_status   = 0;
2936         ireq->post_context = 0xFFFFFFFF;
2937 }
2938
2939 static enum sci_status
2940 sci_io_request_construct(struct isci_host *ihost,
2941                           struct isci_remote_device *idev,
2942                           struct isci_request *ireq)
2943 {
2944         struct domain_device *dev = idev->domain_dev;
2945         enum sci_status status = SCI_SUCCESS;
2946
2947         /* Build the common part of the request */
2948         sci_general_request_construct(ihost, idev, ireq);
2949
2950         if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
2951                 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
2952
2953         if (dev->dev_type == SAS_END_DEV)
2954                 /* pass */;
2955         else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
2956                 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
2957         else if (dev_is_expander(dev))
2958                 /* pass */;
2959         else
2960                 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
2961
2962         memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
2963
2964         return status;
2965 }
2966
2967 enum sci_status sci_task_request_construct(struct isci_host *ihost,
2968                                             struct isci_remote_device *idev,
2969                                             u16 io_tag, struct isci_request *ireq)
2970 {
2971         struct domain_device *dev = idev->domain_dev;
2972         enum sci_status status = SCI_SUCCESS;
2973
2974         /* Build the common part of the request */
2975         sci_general_request_construct(ihost, idev, ireq);
2976
2977         if (dev->dev_type == SAS_END_DEV ||
2978             dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
2979                 set_bit(IREQ_TMF, &ireq->flags);
2980                 memset(ireq->tc, 0, sizeof(struct scu_task_context));
2981         } else
2982                 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
2983
2984         return status;
2985 }
2986
2987 static enum sci_status isci_request_ssp_request_construct(
2988         struct isci_request *request)
2989 {
2990         enum sci_status status;
2991
2992         dev_dbg(&request->isci_host->pdev->dev,
2993                 "%s: request = %p\n",
2994                 __func__,
2995                 request);
2996         status = sci_io_request_construct_basic_ssp(request);
2997         return status;
2998 }
2999
3000 static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
3001 {
3002         struct sas_task *task = isci_request_access_task(ireq);
3003         struct host_to_dev_fis *fis = &ireq->stp.cmd;
3004         struct ata_queued_cmd *qc = task->uldd_task;
3005         enum sci_status status;
3006
3007         dev_dbg(&ireq->isci_host->pdev->dev,
3008                 "%s: ireq = %p\n",
3009                 __func__,
3010                 ireq);
3011
3012         memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
3013         if (!task->ata_task.device_control_reg_update)
3014                 fis->flags |= 0x80;
3015         fis->flags &= 0xF0;
3016
3017         status = sci_io_request_construct_basic_sata(ireq);
3018
3019         if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
3020                    qc->tf.command == ATA_CMD_FPDMA_READ)) {
3021                 fis->sector_count = qc->tag << 3;
3022                 ireq->tc->type.stp.ncq_tag = qc->tag;
3023         }
3024
3025         return status;
3026 }
3027
3028 static enum sci_status
3029 sci_io_request_construct_smp(struct device *dev,
3030                               struct isci_request *ireq,
3031                               struct sas_task *task)
3032 {
3033         struct scatterlist *sg = &task->smp_task.smp_req;
3034         struct isci_remote_device *idev;
3035         struct scu_task_context *task_context;
3036         struct isci_port *iport;
3037         struct smp_req *smp_req;
3038         void *kaddr;
3039         u8 req_len;
3040         u32 cmd;
3041
3042         kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
3043         smp_req = kaddr + sg->offset;
3044         /*
3045          * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3046          * functions under SAS 2.0, a zero request length really indicates
3047          * a non-zero default length.
3048          */
3049         if (smp_req->req_len == 0) {
3050                 switch (smp_req->func) {
3051                 case SMP_DISCOVER:
3052                 case SMP_REPORT_PHY_ERR_LOG:
3053                 case SMP_REPORT_PHY_SATA:
3054                 case SMP_REPORT_ROUTE_INFO:
3055                         smp_req->req_len = 2;
3056                         break;
3057                 case SMP_CONF_ROUTE_INFO:
3058                 case SMP_PHY_CONTROL:
3059                 case SMP_PHY_TEST_FUNCTION:
3060                         smp_req->req_len = 9;
3061                         break;
3062                         /* Default - zero is a valid default for 2.0. */
3063                 }
3064         }
3065         req_len = smp_req->req_len;
3066         sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3067         cmd = *(u32 *) smp_req;
3068         kunmap_atomic(kaddr, KM_IRQ0);
3069
3070         if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3071                 return SCI_FAILURE;
3072
3073         ireq->protocol = SCIC_SMP_PROTOCOL;
3074
3075         /* byte swap the smp request. */
3076
3077         task_context = ireq->tc;
3078
3079         idev = ireq->target_device;
3080         iport = idev->owning_port;
3081
3082         /*
3083          * Fill in the TC with the its required data
3084          * 00h
3085          */
3086         task_context->priority = 0;
3087         task_context->initiator_request = 1;
3088         task_context->connection_rate = idev->connection_rate;
3089         task_context->protocol_engine_index = ISCI_PEG;
3090         task_context->logical_port_index = iport->physical_port_index;
3091         task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3092         task_context->abort = 0;
3093         task_context->valid = SCU_TASK_CONTEXT_VALID;
3094         task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3095
3096         /* 04h */
3097         task_context->remote_node_index = idev->rnc.remote_node_index;
3098         task_context->command_code = 0;
3099         task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3100
3101         /* 08h */
3102         task_context->link_layer_control = 0;
3103         task_context->do_not_dma_ssp_good_response = 1;
3104         task_context->strict_ordering = 0;
3105         task_context->control_frame = 1;
3106         task_context->timeout_enable = 0;
3107         task_context->block_guard_enable = 0;
3108
3109         /* 0ch */
3110         task_context->address_modifier = 0;
3111
3112         /* 10h */
3113         task_context->ssp_command_iu_length = req_len;
3114
3115         /* 14h */
3116         task_context->transfer_length_bytes = 0;
3117
3118         /*
3119          * 18h ~ 30h, protocol specific
3120          * since commandIU has been build by framework at this point, we just
3121          * copy the frist DWord from command IU to this location. */
3122         memcpy(&task_context->type.smp, &cmd, sizeof(u32));
3123
3124         /*
3125          * 40h
3126          * "For SMP you could program it to zero. We would prefer that way
3127          * so that done code will be consistent." - Venki
3128          */
3129         task_context->task_phase = 0;
3130
3131         ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3132                               (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3133                                (iport->physical_port_index <<
3134                                 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3135                               ISCI_TAG_TCI(ireq->io_tag));
3136         /*
3137          * Copy the physical address for the command buffer to the SCU Task
3138          * Context command buffer should not contain command header.
3139          */
3140         task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
3141         task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
3142
3143         /* SMP response comes as UF, so no need to set response IU address. */
3144         task_context->response_iu_upper = 0;
3145         task_context->response_iu_lower = 0;
3146
3147         sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
3148
3149         return SCI_SUCCESS;
3150 }
3151
3152 /*
3153  * isci_smp_request_build() - This function builds the smp request.
3154  * @ireq: This parameter points to the isci_request allocated in the
3155  *    request construct function.
3156  *
3157  * SCI_SUCCESS on successfull completion, or specific failure code.
3158  */
3159 static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3160 {
3161         struct sas_task *task = isci_request_access_task(ireq);
3162         struct device *dev = &ireq->isci_host->pdev->dev;
3163         enum sci_status status = SCI_FAILURE;
3164
3165         status = sci_io_request_construct_smp(dev, ireq, task);
3166         if (status != SCI_SUCCESS)
3167                 dev_dbg(&ireq->isci_host->pdev->dev,
3168                          "%s: failed with status = %d\n",
3169                          __func__,
3170                          status);
3171
3172         return status;
3173 }
3174
3175 /**
3176  * isci_io_request_build() - This function builds the io request object.
3177  * @ihost: This parameter specifies the ISCI host object
3178  * @request: This parameter points to the isci_request object allocated in the
3179  *    request construct function.
3180  * @sci_device: This parameter is the handle for the sci core's remote device
3181  *    object that is the destination for this request.
3182  *
3183  * SCI_SUCCESS on successfull completion, or specific failure code.
3184  */
3185 static enum sci_status isci_io_request_build(struct isci_host *ihost,
3186                                              struct isci_request *request,
3187                                              struct isci_remote_device *idev)
3188 {
3189         enum sci_status status = SCI_SUCCESS;
3190         struct sas_task *task = isci_request_access_task(request);
3191
3192         dev_dbg(&ihost->pdev->dev,
3193                 "%s: idev = 0x%p; request = %p, "
3194                 "num_scatter = %d\n",
3195                 __func__,
3196                 idev,
3197                 request,
3198                 task->num_scatter);
3199
3200         /* map the sgl addresses, if present.
3201          * libata does the mapping for sata devices
3202          * before we get the request.
3203          */
3204         if (task->num_scatter &&
3205             !sas_protocol_ata(task->task_proto) &&
3206             !(SAS_PROTOCOL_SMP & task->task_proto)) {
3207
3208                 request->num_sg_entries = dma_map_sg(
3209                         &ihost->pdev->dev,
3210                         task->scatter,
3211                         task->num_scatter,
3212                         task->data_dir
3213                         );
3214
3215                 if (request->num_sg_entries == 0)
3216                         return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3217         }
3218
3219         status = sci_io_request_construct(ihost, idev, request);
3220
3221         if (status != SCI_SUCCESS) {
3222                 dev_dbg(&ihost->pdev->dev,
3223                          "%s: failed request construct\n",
3224                          __func__);
3225                 return SCI_FAILURE;
3226         }
3227
3228         switch (task->task_proto) {
3229         case SAS_PROTOCOL_SMP:
3230                 status = isci_smp_request_build(request);
3231                 break;
3232         case SAS_PROTOCOL_SSP:
3233                 status = isci_request_ssp_request_construct(request);
3234                 break;
3235         case SAS_PROTOCOL_SATA:
3236         case SAS_PROTOCOL_STP:
3237         case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3238                 status = isci_request_stp_request_construct(request);
3239                 break;
3240         default:
3241                 dev_dbg(&ihost->pdev->dev,
3242                          "%s: unknown protocol\n", __func__);
3243                 return SCI_FAILURE;
3244         }
3245
3246         return SCI_SUCCESS;
3247 }
3248
3249 static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
3250 {
3251         struct isci_request *ireq;
3252
3253         ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
3254         ireq->io_tag = tag;
3255         ireq->io_request_completion = NULL;
3256         ireq->flags = 0;
3257         ireq->num_sg_entries = 0;
3258         INIT_LIST_HEAD(&ireq->completed_node);
3259         INIT_LIST_HEAD(&ireq->dev_node);
3260         isci_request_change_state(ireq, allocated);
3261
3262         return ireq;
3263 }
3264
3265 static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
3266                                                      struct sas_task *task,
3267                                                      u16 tag)
3268 {
3269         struct isci_request *ireq;
3270
3271         ireq = isci_request_from_tag(ihost, tag);
3272         ireq->ttype_ptr.io_task_ptr = task;
3273         ireq->ttype = io_task;
3274         task->lldd_task = ireq;
3275
3276         return ireq;
3277 }
3278
3279 struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
3280                                                struct isci_tmf *isci_tmf,
3281                                                u16 tag)
3282 {
3283         struct isci_request *ireq;
3284
3285         ireq = isci_request_from_tag(ihost, tag);
3286         ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
3287         ireq->ttype = tmf_task;
3288
3289         return ireq;
3290 }
3291
3292 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
3293                          struct sas_task *task, u16 tag)
3294 {
3295         enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3296         struct isci_request *ireq;
3297         unsigned long flags;
3298         int ret = 0;
3299
3300         /* do common allocation and init of request object. */
3301         ireq = isci_io_request_from_tag(ihost, task, tag);
3302
3303         status = isci_io_request_build(ihost, ireq, idev);
3304         if (status != SCI_SUCCESS) {
3305                 dev_dbg(&ihost->pdev->dev,
3306                          "%s: request_construct failed - status = 0x%x\n",
3307                          __func__,
3308                          status);
3309                 return status;
3310         }
3311
3312         spin_lock_irqsave(&ihost->scic_lock, flags);
3313
3314         if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
3315
3316                 if (isci_task_is_ncq_recovery(task)) {
3317
3318                         /* The device is in an NCQ recovery state.  Issue the
3319                          * request on the task side.  Note that it will
3320                          * complete on the I/O request side because the
3321                          * request was built that way (ie.
3322                          * ireq->is_task_management_request is false).
3323                          */
3324                         status = sci_controller_start_task(ihost,
3325                                                             idev,
3326                                                             ireq);
3327                 } else {
3328                         status = SCI_FAILURE;
3329                 }
3330         } else {
3331                 /* send the request, let the core assign the IO TAG.    */
3332                 status = sci_controller_start_io(ihost, idev,
3333                                                   ireq);
3334         }
3335
3336         if (status != SCI_SUCCESS &&
3337             status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3338                 dev_dbg(&ihost->pdev->dev,
3339                          "%s: failed request start (0x%x)\n",
3340                          __func__, status);
3341                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3342                 return status;
3343         }
3344
3345         /* Either I/O started OK, or the core has signaled that
3346          * the device needs a target reset.
3347          *
3348          * In either case, hold onto the I/O for later.
3349          *
3350          * Update it's status and add it to the list in the
3351          * remote device object.
3352          */
3353         list_add(&ireq->dev_node, &idev->reqs_in_process);
3354
3355         if (status == SCI_SUCCESS) {
3356                 isci_request_change_state(ireq, started);
3357         } else {
3358                 /* The request did not really start in the
3359                  * hardware, so clear the request handle
3360                  * here so no terminations will be done.
3361                  */
3362                 set_bit(IREQ_TERMINATED, &ireq->flags);
3363                 isci_request_change_state(ireq, completed);
3364         }
3365         spin_unlock_irqrestore(&ihost->scic_lock, flags);
3366
3367         if (status ==
3368             SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3369                 /* Signal libsas that we need the SCSI error
3370                  * handler thread to work on this I/O and that
3371                  * we want a device reset.
3372                  */
3373                 spin_lock_irqsave(&task->task_state_lock, flags);
3374                 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3375                 spin_unlock_irqrestore(&task->task_state_lock, flags);
3376
3377                 /* Cause this task to be scheduled in the SCSI error
3378                  * handler thread.
3379                  */
3380                 isci_execpath_callback(ihost, task,
3381                                        sas_task_abort);
3382
3383                 /* Change the status, since we are holding
3384                  * the I/O until it is managed by the SCSI
3385                  * error handler.
3386                  */
3387                 status = SCI_SUCCESS;
3388         }
3389
3390         return ret;
3391 }