isci: Fix a race condition in the SSP task management path
[pandora-kernel.git] / drivers / scsi / isci / task.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55
56 #include <linux/completion.h>
57 #include <linux/irqflags.h>
58 #include "sas.h"
59 #include <scsi/libsas.h>
60 #include "remote_device.h"
61 #include "remote_node_context.h"
62 #include "isci.h"
63 #include "request.h"
64 #include "task.h"
65 #include "host.h"
66
67 /**
68 * isci_task_refuse() - complete the request to the upper layer driver in
69 *     the case where an I/O needs to be completed back in the submit path.
70 * @ihost: host on which the the request was queued
71 * @task: request to complete
72 * @response: response code for the completed task.
73 * @status: status code for the completed task.
74 *
75 */
76 static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
77                              enum service_response response,
78                              enum exec_status status)
79
80 {
81         enum isci_completion_selection disposition;
82
83         disposition = isci_perform_normal_io_completion;
84         disposition = isci_task_set_completion_status(task, response, status,
85                                                       disposition);
86
87         /* Tasks aborted specifically by a call to the lldd_abort_task
88          * function should not be completed to the host in the regular path.
89          */
90         switch (disposition) {
91         case isci_perform_normal_io_completion:
92                 /* Normal notification (task_done) */
93                 dev_dbg(&ihost->pdev->dev,
94                         "%s: Normal - task = %p, response=%d, "
95                         "status=%d\n",
96                         __func__, task, response, status);
97
98                 task->lldd_task = NULL;
99
100                 isci_execpath_callback(ihost, task, task->task_done);
101                 break;
102
103         case isci_perform_aborted_io_completion:
104                 /*
105                  * No notification because this request is already in the
106                  * abort path.
107                  */
108                 dev_dbg(&ihost->pdev->dev,
109                         "%s: Aborted - task = %p, response=%d, "
110                         "status=%d\n",
111                         __func__, task, response, status);
112                 break;
113
114         case isci_perform_error_io_completion:
115                 /* Use sas_task_abort */
116                 dev_dbg(&ihost->pdev->dev,
117                         "%s: Error - task = %p, response=%d, "
118                         "status=%d\n",
119                         __func__, task, response, status);
120
121                 isci_execpath_callback(ihost, task, sas_task_abort);
122                 break;
123
124         default:
125                 dev_dbg(&ihost->pdev->dev,
126                         "%s: isci task notification default case!",
127                         __func__);
128                 sas_task_abort(task);
129                 break;
130         }
131 }
132
133 #define for_each_sas_task(num, task) \
134         for (; num > 0; num--,\
135              task = list_entry(task->list.next, struct sas_task, list))
136
137
138 static inline int isci_device_io_ready(struct isci_remote_device *idev,
139                                        struct sas_task *task)
140 {
141         return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
142                       (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
143                        isci_task_is_ncq_recovery(task))
144                     : 0;
145 }
146 /**
147  * isci_task_execute_task() - This function is one of the SAS Domain Template
148  *    functions. This function is called by libsas to send a task down to
149  *    hardware.
150  * @task: This parameter specifies the SAS task to send.
151  * @num: This parameter specifies the number of tasks to queue.
152  * @gfp_flags: This parameter specifies the context of this call.
153  *
154  * status, zero indicates success.
155  */
156 int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
157 {
158         struct isci_host *ihost = dev_to_ihost(task->dev);
159         struct isci_remote_device *idev;
160         unsigned long flags;
161         bool io_ready;
162         u16 tag;
163
164         dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
165
166         for_each_sas_task(num, task) {
167                 enum sci_status status = SCI_FAILURE;
168
169                 spin_lock_irqsave(&ihost->scic_lock, flags);
170                 idev = isci_lookup_device(task->dev);
171                 io_ready = isci_device_io_ready(idev, task);
172                 tag = isci_alloc_tag(ihost);
173                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
174
175                 dev_dbg(&ihost->pdev->dev,
176                         "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
177                         task, num, task->dev, idev, idev ? idev->flags : 0,
178                         task->uldd_task);
179
180                 if (!idev) {
181                         isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
182                                          SAS_DEVICE_UNKNOWN);
183                 } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
184                         /* Indicate QUEUE_FULL so that the scsi midlayer
185                          * retries.
186                           */
187                         isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
188                                          SAS_QUEUE_FULL);
189                 } else {
190                         /* There is a device and it's ready for I/O. */
191                         spin_lock_irqsave(&task->task_state_lock, flags);
192
193                         if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
194                                 /* The I/O was aborted. */
195                                 spin_unlock_irqrestore(&task->task_state_lock,
196                                                        flags);
197
198                                 isci_task_refuse(ihost, task,
199                                                  SAS_TASK_UNDELIVERED,
200                                                  SAM_STAT_TASK_ABORTED);
201                         } else {
202                                 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
203                                 spin_unlock_irqrestore(&task->task_state_lock, flags);
204
205                                 /* build and send the request. */
206                                 status = isci_request_execute(ihost, idev, task, tag);
207
208                                 if (status != SCI_SUCCESS) {
209
210                                         spin_lock_irqsave(&task->task_state_lock, flags);
211                                         /* Did not really start this command. */
212                                         task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
213                                         spin_unlock_irqrestore(&task->task_state_lock, flags);
214
215                                         if (test_bit(IDEV_GONE, &idev->flags)) {
216
217                                                 /* Indicate that the device
218                                                  * is gone.
219                                                  */
220                                                 isci_task_refuse(ihost, task,
221                                                         SAS_TASK_UNDELIVERED,
222                                                         SAS_DEVICE_UNKNOWN);
223                                         } else {
224                                                 /* Indicate QUEUE_FULL so that
225                                                  * the scsi midlayer retries.
226                                                  * If the request failed for
227                                                  * remote device reasons, it
228                                                  * gets returned as
229                                                  * SAS_TASK_UNDELIVERED next
230                                                  * time through.
231                                                  */
232                                                 isci_task_refuse(ihost, task,
233                                                         SAS_TASK_COMPLETE,
234                                                         SAS_QUEUE_FULL);
235                                         }
236                                 }
237                         }
238                 }
239                 if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
240                         spin_lock_irqsave(&ihost->scic_lock, flags);
241                         /* command never hit the device, so just free
242                          * the tci and skip the sequence increment
243                          */
244                         isci_tci_free(ihost, ISCI_TAG_TCI(tag));
245                         spin_unlock_irqrestore(&ihost->scic_lock, flags);
246                 }
247                 isci_put_device(idev);
248         }
249         return 0;
250 }
251
252 static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
253 {
254         struct isci_tmf *isci_tmf;
255         enum sci_status status;
256
257         if (!test_bit(IREQ_TMF, &ireq->flags))
258                 return SCI_FAILURE;
259
260         isci_tmf = isci_request_access_tmf(ireq);
261
262         switch (isci_tmf->tmf_code) {
263
264         case isci_tmf_sata_srst_high:
265         case isci_tmf_sata_srst_low: {
266                 struct host_to_dev_fis *fis = &ireq->stp.cmd;
267
268                 memset(fis, 0, sizeof(*fis));
269
270                 fis->fis_type  =  0x27;
271                 fis->flags     &= ~0x80;
272                 fis->flags     &= 0xF0;
273                 if (isci_tmf->tmf_code == isci_tmf_sata_srst_high)
274                         fis->control |= ATA_SRST;
275                 else
276                         fis->control &= ~ATA_SRST;
277                 break;
278         }
279         /* other management commnd go here... */
280         default:
281                 return SCI_FAILURE;
282         }
283
284         /* core builds the protocol specific request
285          *  based on the h2d fis.
286          */
287         status = sci_task_request_construct_sata(ireq);
288
289         return status;
290 }
291
292 static struct isci_request *isci_task_request_build(struct isci_host *ihost,
293                                                     struct isci_remote_device *idev,
294                                                     u16 tag, struct isci_tmf *isci_tmf)
295 {
296         enum sci_status status = SCI_FAILURE;
297         struct isci_request *ireq = NULL;
298         struct domain_device *dev;
299
300         dev_dbg(&ihost->pdev->dev,
301                 "%s: isci_tmf = %p\n", __func__, isci_tmf);
302
303         dev = idev->domain_dev;
304
305         /* do common allocation and init of request object. */
306         ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
307         if (!ireq)
308                 return NULL;
309
310         /* let the core do it's construct. */
311         status = sci_task_request_construct(ihost, idev, tag,
312                                              ireq);
313
314         if (status != SCI_SUCCESS) {
315                 dev_warn(&ihost->pdev->dev,
316                          "%s: sci_task_request_construct failed - "
317                          "status = 0x%x\n",
318                          __func__,
319                          status);
320                 return NULL;
321         }
322
323         /* XXX convert to get this from task->tproto like other drivers */
324         if (dev->dev_type == SAS_END_DEV) {
325                 isci_tmf->proto = SAS_PROTOCOL_SSP;
326                 status = sci_task_request_construct_ssp(ireq);
327                 if (status != SCI_SUCCESS)
328                         return NULL;
329         }
330
331         if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
332                 isci_tmf->proto = SAS_PROTOCOL_SATA;
333                 status = isci_sata_management_task_request_build(ireq);
334
335                 if (status != SCI_SUCCESS)
336                         return NULL;
337         }
338         return ireq;
339 }
340
341 /**
342 * isci_request_mark_zombie() - This function must be called with scic_lock held.
343 */
344 static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq)
345 {
346         struct completion *tmf_completion = NULL;
347         struct completion *req_completion;
348
349         /* Set the request state to "dead". */
350         ireq->status = dead;
351
352         req_completion = ireq->io_request_completion;
353         ireq->io_request_completion = NULL;
354
355         if (test_bit(IREQ_TMF, &ireq->flags)) {
356                 /* Break links with the TMF request. */
357                 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
358
359                 /* In the case where a task request is dying,
360                  * the thread waiting on the complete will sit and
361                  * timeout unless we wake it now.  Since the TMF
362                  * has a default error status, complete it here
363                  * to wake the waiting thread.
364                  */
365                 if (tmf) {
366                         tmf_completion = tmf->complete;
367                         tmf->complete = NULL;
368                 }
369                 ireq->ttype_ptr.tmf_task_ptr = NULL;
370                 dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n",
371                         __func__, tmf->tmf_code, tmf->io_tag);
372         } else {
373                 /* Break links with the sas_task - the callback is done
374                  * elsewhere.
375                  */
376                 struct sas_task *task = isci_request_access_task(ireq);
377
378                 if (task)
379                         task->lldd_task = NULL;
380
381                 ireq->ttype_ptr.io_task_ptr = NULL;
382         }
383
384         dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n",
385                  ireq->io_tag);
386
387         /* Don't force waiting threads to timeout. */
388         if (req_completion)
389                 complete(req_completion);
390
391         if (tmf_completion != NULL)
392                 complete(tmf_completion);
393 }
394
395 static int isci_task_execute_tmf(struct isci_host *ihost,
396                                  struct isci_remote_device *idev,
397                                  struct isci_tmf *tmf, unsigned long timeout_ms)
398 {
399         DECLARE_COMPLETION_ONSTACK(completion);
400         enum sci_task_status status = SCI_TASK_FAILURE;
401         struct isci_request *ireq;
402         int ret = TMF_RESP_FUNC_FAILED;
403         unsigned long flags;
404         unsigned long timeleft;
405         u16 tag;
406
407         spin_lock_irqsave(&ihost->scic_lock, flags);
408         tag = isci_alloc_tag(ihost);
409         spin_unlock_irqrestore(&ihost->scic_lock, flags);
410
411         if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
412                 return ret;
413
414         /* sanity check, return TMF_RESP_FUNC_FAILED
415          * if the device is not there and ready.
416          */
417         if (!idev ||
418             (!test_bit(IDEV_IO_READY, &idev->flags) &&
419              !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
420                 dev_dbg(&ihost->pdev->dev,
421                         "%s: idev = %p not ready (%#lx)\n",
422                         __func__,
423                         idev, idev ? idev->flags : 0);
424                 goto err_tci;
425         } else
426                 dev_dbg(&ihost->pdev->dev,
427                         "%s: idev = %p\n",
428                         __func__, idev);
429
430         /* Assign the pointer to the TMF's completion kernel wait structure. */
431         tmf->complete = &completion;
432         tmf->status = SCI_FAILURE_TIMEOUT;
433
434         ireq = isci_task_request_build(ihost, idev, tag, tmf);
435         if (!ireq)
436                 goto err_tci;
437
438         spin_lock_irqsave(&ihost->scic_lock, flags);
439
440         /* start the TMF io. */
441         status = sci_controller_start_task(ihost, idev, ireq);
442
443         if (status != SCI_TASK_SUCCESS) {
444                 dev_dbg(&ihost->pdev->dev,
445                          "%s: start_io failed - status = 0x%x, request = %p\n",
446                          __func__,
447                          status,
448                          ireq);
449                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
450                 goto err_tci;
451         }
452
453         if (tmf->cb_state_func != NULL)
454                 tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
455
456         isci_request_change_state(ireq, started);
457
458         /* add the request to the remote device request list. */
459         list_add(&ireq->dev_node, &idev->reqs_in_process);
460
461         spin_unlock_irqrestore(&ihost->scic_lock, flags);
462
463         /* Wait for the TMF to complete, or a timeout. */
464         timeleft = wait_for_completion_timeout(&completion,
465                                                msecs_to_jiffies(timeout_ms));
466
467         if (timeleft == 0) {
468                 /* The TMF did not complete - this could be because
469                  * of an unplug.  Terminate the TMF request now.
470                  */
471                 spin_lock_irqsave(&ihost->scic_lock, flags);
472
473                 if (tmf->cb_state_func != NULL)
474                         tmf->cb_state_func(isci_tmf_timed_out, tmf,
475                                            tmf->cb_data);
476
477                 sci_controller_terminate_request(ihost, idev, ireq);
478
479                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
480
481                 timeleft = wait_for_completion_timeout(
482                         &completion,
483                         msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
484
485                 if (!timeleft) {
486                         /* Strange condition - the termination of the TMF
487                          * request timed-out.
488                          */
489                         spin_lock_irqsave(&ihost->scic_lock, flags);
490
491                         /* If the TMF status has not changed, kill it. */
492                         if (tmf->status == SCI_FAILURE_TIMEOUT)
493                                 isci_request_mark_zombie(ihost, ireq);
494
495                         spin_unlock_irqrestore(&ihost->scic_lock, flags);
496                 }
497         }
498
499         isci_print_tmf(tmf);
500
501         if (tmf->status == SCI_SUCCESS)
502                 ret =  TMF_RESP_FUNC_COMPLETE;
503         else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
504                 dev_dbg(&ihost->pdev->dev,
505                         "%s: tmf.status == "
506                         "SCI_FAILURE_IO_RESPONSE_VALID\n",
507                         __func__);
508                 ret =  TMF_RESP_FUNC_COMPLETE;
509         }
510         /* Else - leave the default "failed" status alone. */
511
512         dev_dbg(&ihost->pdev->dev,
513                 "%s: completed request = %p\n",
514                 __func__,
515                 ireq);
516
517         return ret;
518
519  err_tci:
520         spin_lock_irqsave(&ihost->scic_lock, flags);
521         isci_tci_free(ihost, ISCI_TAG_TCI(tag));
522         spin_unlock_irqrestore(&ihost->scic_lock, flags);
523
524         return ret;
525 }
526
527 static void isci_task_build_tmf(struct isci_tmf *tmf,
528                                 enum isci_tmf_function_codes code,
529                                 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
530                                                     struct isci_tmf *,
531                                                     void *),
532                                 void *cb_data)
533 {
534         memset(tmf, 0, sizeof(*tmf));
535
536         tmf->tmf_code      = code;
537         tmf->cb_state_func = tmf_sent_cb;
538         tmf->cb_data       = cb_data;
539 }
540
541 static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
542                                            enum isci_tmf_function_codes code,
543                                            void (*tmf_sent_cb)(enum isci_tmf_cb_state,
544                                                                struct isci_tmf *,
545                                                                void *),
546                                            struct isci_request *old_request)
547 {
548         isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request);
549         tmf->io_tag = old_request->io_tag;
550 }
551
552 /**
553  * isci_task_validate_request_to_abort() - This function checks the given I/O
554  *    against the "started" state.  If the request is still "started", it's
555  *    state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
556  *    BEFORE CALLING THIS FUNCTION.
557  * @isci_request: This parameter specifies the request object to control.
558  * @isci_host: This parameter specifies the ISCI host object
559  * @isci_device: This is the device to which the request is pending.
560  * @aborted_io_completion: This is a completion structure that will be added to
561  *    the request in case it is changed to aborting; this completion is
562  *    triggered when the request is fully completed.
563  *
564  * Either "started" on successful change of the task status to "aborted", or
565  * "unallocated" if the task cannot be controlled.
566  */
567 static enum isci_request_status isci_task_validate_request_to_abort(
568         struct isci_request *isci_request,
569         struct isci_host *isci_host,
570         struct isci_remote_device *isci_device,
571         struct completion *aborted_io_completion)
572 {
573         enum isci_request_status old_state = unallocated;
574
575         /* Only abort the task if it's in the
576          *  device's request_in_process list
577          */
578         if (isci_request && !list_empty(&isci_request->dev_node)) {
579                 old_state = isci_request_change_started_to_aborted(
580                         isci_request, aborted_io_completion);
581
582         }
583
584         return old_state;
585 }
586
587 static int isci_request_is_dealloc_managed(enum isci_request_status stat)
588 {
589         switch (stat) {
590         case aborted:
591         case aborting:
592         case terminating:
593         case completed:
594         case dead:
595                 return true;
596         default:
597                 return false;
598         }
599 }
600
601 /**
602  * isci_terminate_request_core() - This function will terminate the given
603  *    request, and wait for it to complete.  This function must only be called
604  *    from a thread that can wait.  Note that the request is terminated and
605  *    completed (back to the host, if started there).
606  * @ihost: This SCU.
607  * @idev: The target.
608  * @isci_request: The I/O request to be terminated.
609  *
610  */
611 static void isci_terminate_request_core(struct isci_host *ihost,
612                                         struct isci_remote_device *idev,
613                                         struct isci_request *isci_request)
614 {
615         enum sci_status status      = SCI_SUCCESS;
616         bool was_terminated         = false;
617         bool needs_cleanup_handling = false;
618         unsigned long     flags;
619         unsigned long     termination_completed = 1;
620         struct completion *io_request_completion;
621
622         dev_dbg(&ihost->pdev->dev,
623                 "%s: device = %p; request = %p\n",
624                 __func__, idev, isci_request);
625
626         spin_lock_irqsave(&ihost->scic_lock, flags);
627
628         io_request_completion = isci_request->io_request_completion;
629
630         /* Note that we are not going to control
631          * the target to abort the request.
632          */
633         set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
634
635         /* Make sure the request wasn't just sitting around signalling
636          * device condition (if the request handle is NULL, then the
637          * request completed but needed additional handling here).
638          */
639         if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
640                 was_terminated = true;
641                 needs_cleanup_handling = true;
642                 status = sci_controller_terminate_request(ihost,
643                                                            idev,
644                                                            isci_request);
645         }
646         spin_unlock_irqrestore(&ihost->scic_lock, flags);
647
648         /*
649          * The only time the request to terminate will
650          * fail is when the io request is completed and
651          * being aborted.
652          */
653         if (status != SCI_SUCCESS) {
654                 dev_dbg(&ihost->pdev->dev,
655                         "%s: sci_controller_terminate_request"
656                         " returned = 0x%x\n",
657                         __func__, status);
658
659                 isci_request->io_request_completion = NULL;
660
661         } else {
662                 if (was_terminated) {
663                         dev_dbg(&ihost->pdev->dev,
664                                 "%s: before completion wait (%p/%p)\n",
665                                 __func__, isci_request, io_request_completion);
666
667                         /* Wait here for the request to complete. */
668                         termination_completed
669                                 = wait_for_completion_timeout(
670                                    io_request_completion,
671                                    msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
672
673                         if (!termination_completed) {
674
675                                 /* The request to terminate has timed out.  */
676                                 spin_lock_irqsave(&ihost->scic_lock, flags);
677
678                                 /* Check for state changes. */
679                                 if (!test_bit(IREQ_TERMINATED,
680                                               &isci_request->flags)) {
681
682                                         /* The best we can do is to have the
683                                          * request die a silent death if it
684                                          * ever really completes.
685                                          */
686                                         isci_request_mark_zombie(ihost,
687                                                                  isci_request);
688                                         needs_cleanup_handling = true;
689                                 } else
690                                         termination_completed = 1;
691
692                                 spin_unlock_irqrestore(&ihost->scic_lock,
693                                                        flags);
694
695                                 if (!termination_completed) {
696
697                                         dev_dbg(&ihost->pdev->dev,
698                                                 "%s: *** Timeout waiting for "
699                                                 "termination(%p/%p)\n",
700                                                 __func__, io_request_completion,
701                                                 isci_request);
702
703                                         /* The request can no longer be referenced
704                                          * safely since it may go away if the
705                                          * termination every really does complete.
706                                          */
707                                         isci_request = NULL;
708                                 }
709                         }
710                         if (termination_completed)
711                                 dev_dbg(&ihost->pdev->dev,
712                                         "%s: after completion wait (%p/%p)\n",
713                                         __func__, isci_request, io_request_completion);
714                 }
715
716                 if (termination_completed) {
717
718                         isci_request->io_request_completion = NULL;
719
720                         /* Peek at the status of the request.  This will tell
721                          * us if there was special handling on the request such that it
722                          * needs to be detached and freed here.
723                          */
724                         spin_lock_irqsave(&isci_request->state_lock, flags);
725
726                         needs_cleanup_handling
727                                 = isci_request_is_dealloc_managed(
728                                         isci_request->status);
729
730                         spin_unlock_irqrestore(&isci_request->state_lock, flags);
731
732                 }
733                 if (needs_cleanup_handling) {
734
735                         dev_dbg(&ihost->pdev->dev,
736                                 "%s: cleanup isci_device=%p, request=%p\n",
737                                 __func__, idev, isci_request);
738
739                         if (isci_request != NULL) {
740                                 spin_lock_irqsave(&ihost->scic_lock, flags);
741                                 isci_free_tag(ihost, isci_request->io_tag);
742                                 isci_request_change_state(isci_request, unallocated);
743                                 list_del_init(&isci_request->dev_node);
744                                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
745                         }
746                 }
747         }
748 }
749
750 /**
751  * isci_terminate_pending_requests() - This function will change the all of the
752  *    requests on the given device's state to "aborting", will terminate the
753  *    requests, and wait for them to complete.  This function must only be
754  *    called from a thread that can wait.  Note that the requests are all
755  *    terminated and completed (back to the host, if started there).
756  * @isci_host: This parameter specifies SCU.
757  * @idev: This parameter specifies the target.
758  *
759  */
760 void isci_terminate_pending_requests(struct isci_host *ihost,
761                                      struct isci_remote_device *idev)
762 {
763         struct completion request_completion;
764         enum isci_request_status old_state;
765         unsigned long flags;
766         LIST_HEAD(list);
767
768         spin_lock_irqsave(&ihost->scic_lock, flags);
769         list_splice_init(&idev->reqs_in_process, &list);
770
771         /* assumes that isci_terminate_request_core deletes from the list */
772         while (!list_empty(&list)) {
773                 struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
774
775                 /* Change state to "terminating" if it is currently
776                  * "started".
777                  */
778                 old_state = isci_request_change_started_to_newstate(ireq,
779                                                                     &request_completion,
780                                                                     terminating);
781                 switch (old_state) {
782                 case started:
783                 case completed:
784                 case aborting:
785                         break;
786                 default:
787                         /* termination in progress, or otherwise dispositioned.
788                          * We know the request was on 'list' so should be safe
789                          * to move it back to reqs_in_process
790                          */
791                         list_move(&ireq->dev_node, &idev->reqs_in_process);
792                         ireq = NULL;
793                         break;
794                 }
795
796                 if (!ireq)
797                         continue;
798                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
799
800                 init_completion(&request_completion);
801
802                 dev_dbg(&ihost->pdev->dev,
803                          "%s: idev=%p request=%p; task=%p old_state=%d\n",
804                          __func__, idev, ireq,
805                         (!test_bit(IREQ_TMF, &ireq->flags)
806                                 ? isci_request_access_task(ireq)
807                                 : NULL),
808                         old_state);
809
810                 /* If the old_state is started:
811                  * This request was not already being aborted. If it had been,
812                  * then the aborting I/O (ie. the TMF request) would not be in
813                  * the aborting state, and thus would be terminated here.  Note
814                  * that since the TMF completion's call to the kernel function
815                  * "complete()" does not happen until the pending I/O request
816                  * terminate fully completes, we do not have to implement a
817                  * special wait here for already aborting requests - the
818                  * termination of the TMF request will force the request
819                  * to finish it's already started terminate.
820                  *
821                  * If old_state == completed:
822                  * This request completed from the SCU hardware perspective
823                  * and now just needs cleaning up in terms of freeing the
824                  * request and potentially calling up to libsas.
825                  *
826                  * If old_state == aborting:
827                  * This request has already gone through a TMF timeout, but may
828                  * not have been terminated; needs cleaning up at least.
829                  */
830                 isci_terminate_request_core(ihost, idev, ireq);
831                 spin_lock_irqsave(&ihost->scic_lock, flags);
832         }
833         spin_unlock_irqrestore(&ihost->scic_lock, flags);
834 }
835
836 /**
837  * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
838  *    Template functions.
839  * @lun: This parameter specifies the lun to be reset.
840  *
841  * status, zero indicates success.
842  */
843 static int isci_task_send_lu_reset_sas(
844         struct isci_host *isci_host,
845         struct isci_remote_device *isci_device,
846         u8 *lun)
847 {
848         struct isci_tmf tmf;
849         int ret = TMF_RESP_FUNC_FAILED;
850
851         dev_dbg(&isci_host->pdev->dev,
852                 "%s: isci_host = %p, isci_device = %p\n",
853                 __func__, isci_host, isci_device);
854         /* Send the LUN reset to the target.  By the time the call returns,
855          * the TMF has fully exected in the target (in which case the return
856          * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
857          * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
858          */
859         isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
860
861         #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
862         ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
863
864         if (ret == TMF_RESP_FUNC_COMPLETE)
865                 dev_dbg(&isci_host->pdev->dev,
866                         "%s: %p: TMF_LU_RESET passed\n",
867                         __func__, isci_device);
868         else
869                 dev_dbg(&isci_host->pdev->dev,
870                         "%s: %p: TMF_LU_RESET failed (%x)\n",
871                         __func__, isci_device, ret);
872
873         return ret;
874 }
875
876 static int isci_task_send_lu_reset_sata(struct isci_host *ihost,
877                                  struct isci_remote_device *idev, u8 *lun)
878 {
879         int ret = TMF_RESP_FUNC_FAILED;
880         struct isci_tmf tmf;
881
882         /* Send the soft reset to the target */
883         #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
884         isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
885
886         ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS);
887
888         if (ret != TMF_RESP_FUNC_COMPLETE) {
889                 dev_dbg(&ihost->pdev->dev,
890                          "%s: Assert SRST failed (%p) = %x",
891                          __func__, idev, ret);
892
893                 /* Return the failure so that the LUN reset is escalated
894                  * to a target reset.
895                  */
896         }
897         return ret;
898 }
899
900 /**
901  * isci_task_lu_reset() - This function is one of the SAS Domain Template
902  *    functions. This is one of the Task Management functoins called by libsas,
903  *    to reset the given lun. Note the assumption that while this call is
904  *    executing, no I/O will be sent by the host to the device.
905  * @lun: This parameter specifies the lun to be reset.
906  *
907  * status, zero indicates success.
908  */
909 int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
910 {
911         struct isci_host *isci_host = dev_to_ihost(domain_device);
912         struct isci_remote_device *isci_device;
913         unsigned long flags;
914         int ret;
915
916         spin_lock_irqsave(&isci_host->scic_lock, flags);
917         isci_device = isci_lookup_device(domain_device);
918         spin_unlock_irqrestore(&isci_host->scic_lock, flags);
919
920         dev_dbg(&isci_host->pdev->dev,
921                 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
922                  __func__, domain_device, isci_host, isci_device);
923
924         if (!isci_device) {
925                 /* If the device is gone, stop the escalations. */
926                 dev_dbg(&isci_host->pdev->dev, "%s: No dev\n", __func__);
927
928                 ret = TMF_RESP_FUNC_COMPLETE;
929                 goto out;
930         }
931         set_bit(IDEV_EH, &isci_device->flags);
932
933         /* Send the task management part of the reset. */
934         if (sas_protocol_ata(domain_device->tproto)) {
935                 ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
936         } else
937                 ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
938
939         /* If the LUN reset worked, all the I/O can now be terminated. */
940         if (ret == TMF_RESP_FUNC_COMPLETE)
941                 /* Terminate all I/O now. */
942                 isci_terminate_pending_requests(isci_host,
943                                                 isci_device);
944
945  out:
946         isci_put_device(isci_device);
947         return ret;
948 }
949
950
951 /*       int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
952 int isci_task_clear_nexus_port(struct asd_sas_port *port)
953 {
954         return TMF_RESP_FUNC_FAILED;
955 }
956
957
958
959 int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
960 {
961         return TMF_RESP_FUNC_FAILED;
962 }
963
964 /* Task Management Functions. Must be called from process context.       */
965
966 /**
967  * isci_abort_task_process_cb() - This is a helper function for the abort task
968  *    TMF command.  It manages the request state with respect to the successful
969  *    transmission / completion of the abort task request.
970  * @cb_state: This parameter specifies when this function was called - after
971  *    the TMF request has been started and after it has timed-out.
972  * @tmf: This parameter specifies the TMF in progress.
973  *
974  *
975  */
976 static void isci_abort_task_process_cb(
977         enum isci_tmf_cb_state cb_state,
978         struct isci_tmf *tmf,
979         void *cb_data)
980 {
981         struct isci_request *old_request;
982
983         old_request = (struct isci_request *)cb_data;
984
985         dev_dbg(&old_request->isci_host->pdev->dev,
986                 "%s: tmf=%p, old_request=%p\n",
987                 __func__, tmf, old_request);
988
989         switch (cb_state) {
990
991         case isci_tmf_started:
992                 /* The TMF has been started.  Nothing to do here, since the
993                  * request state was already set to "aborted" by the abort
994                  * task function.
995                  */
996                 if ((old_request->status != aborted)
997                         && (old_request->status != completed))
998                         dev_dbg(&old_request->isci_host->pdev->dev,
999                                 "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
1000                                 __func__, old_request->status, tmf, old_request);
1001                 break;
1002
1003         case isci_tmf_timed_out:
1004
1005                 /* Set the task's state to "aborting", since the abort task
1006                  * function thread set it to "aborted" (above) in anticipation
1007                  * of the task management request working correctly.  Since the
1008                  * timeout has now fired, the TMF request failed.  We set the
1009                  * state such that the request completion will indicate the
1010                  * device is no longer present.
1011                  */
1012                 isci_request_change_state(old_request, aborting);
1013                 break;
1014
1015         default:
1016                 dev_dbg(&old_request->isci_host->pdev->dev,
1017                         "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
1018                         __func__, cb_state, tmf, old_request);
1019                 break;
1020         }
1021 }
1022
1023 /**
1024  * isci_task_abort_task() - This function is one of the SAS Domain Template
1025  *    functions. This function is called by libsas to abort a specified task.
1026  * @task: This parameter specifies the SAS task to abort.
1027  *
1028  * status, zero indicates success.
1029  */
1030 int isci_task_abort_task(struct sas_task *task)
1031 {
1032         struct isci_host *isci_host = dev_to_ihost(task->dev);
1033         DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
1034         struct isci_request       *old_request = NULL;
1035         enum isci_request_status  old_state;
1036         struct isci_remote_device *isci_device = NULL;
1037         struct isci_tmf           tmf;
1038         int                       ret = TMF_RESP_FUNC_FAILED;
1039         unsigned long             flags;
1040         int                       perform_termination = 0;
1041         int                       target_done_already = 0;
1042
1043         /* Get the isci_request reference from the task.  Note that
1044          * this check does not depend on the pending request list
1045          * in the device, because tasks driving resets may land here
1046          * after completion in the core.
1047          */
1048         spin_lock_irqsave(&isci_host->scic_lock, flags);
1049         spin_lock(&task->task_state_lock);
1050
1051         old_request = task->lldd_task;
1052
1053         /* If task is already done, the request isn't valid */
1054         if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
1055             (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
1056             old_request) {
1057                 isci_device = isci_lookup_device(task->dev);
1058                 target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
1059                                                &old_request->flags);
1060         }
1061         spin_unlock(&task->task_state_lock);
1062         spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1063
1064         dev_dbg(&isci_host->pdev->dev,
1065                 "%s: dev = %p, task = %p, old_request == %p\n",
1066                 __func__, isci_device, task, old_request);
1067
1068         if (isci_device)
1069                 set_bit(IDEV_EH, &isci_device->flags);
1070
1071         /* Device reset conditions signalled in task_state_flags are the
1072          * responsbility of libsas to observe at the start of the error
1073          * handler thread.
1074          */
1075         if (!isci_device || !old_request) {
1076                 /* The request has already completed and there
1077                 * is nothing to do here other than to set the task
1078                 * done bit, and indicate that the task abort function
1079                 * was sucessful.
1080                 */
1081                 spin_lock_irqsave(&task->task_state_lock, flags);
1082                 task->task_state_flags |= SAS_TASK_STATE_DONE;
1083                 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1084                                             SAS_TASK_STATE_PENDING);
1085                 spin_unlock_irqrestore(&task->task_state_lock, flags);
1086
1087                 ret = TMF_RESP_FUNC_COMPLETE;
1088
1089                 dev_dbg(&isci_host->pdev->dev,
1090                         "%s: abort task not needed for %p\n",
1091                         __func__, task);
1092                 goto out;
1093         }
1094
1095         spin_lock_irqsave(&isci_host->scic_lock, flags);
1096
1097         /* Check the request status and change to "aborted" if currently
1098          * "starting"; if true then set the I/O kernel completion
1099          * struct that will be triggered when the request completes.
1100          */
1101         old_state = isci_task_validate_request_to_abort(
1102                                 old_request, isci_host, isci_device,
1103                                 &aborted_io_completion);
1104         if ((old_state != started) &&
1105             (old_state != completed) &&
1106             (old_state != aborting)) {
1107
1108                 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1109
1110                 /* The request was already being handled by someone else (because
1111                 * they got to set the state away from started).
1112                 */
1113                 dev_dbg(&isci_host->pdev->dev,
1114                         "%s:  device = %p; old_request %p already being aborted\n",
1115                         __func__,
1116                         isci_device, old_request);
1117                 ret = TMF_RESP_FUNC_COMPLETE;
1118                 goto out;
1119         }
1120         if (task->task_proto == SAS_PROTOCOL_SMP ||
1121             sas_protocol_ata(task->task_proto) ||
1122             target_done_already) {
1123
1124                 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1125
1126                 dev_dbg(&isci_host->pdev->dev,
1127                         "%s: %s request"
1128                         " or complete_in_target (%d), thus no TMF\n",
1129                         __func__,
1130                         ((task->task_proto == SAS_PROTOCOL_SMP)
1131                                 ? "SMP"
1132                                 : (sas_protocol_ata(task->task_proto)
1133                                         ? "SATA/STP"
1134                                         : "<other>")
1135                          ),
1136                         test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
1137
1138                 if (test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
1139                         spin_lock_irqsave(&task->task_state_lock, flags);
1140                         task->task_state_flags |= SAS_TASK_STATE_DONE;
1141                         task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1142                                                     SAS_TASK_STATE_PENDING);
1143                         spin_unlock_irqrestore(&task->task_state_lock, flags);
1144                         ret = TMF_RESP_FUNC_COMPLETE;
1145                 } else {
1146                         spin_lock_irqsave(&task->task_state_lock, flags);
1147                         task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1148                                                     SAS_TASK_STATE_PENDING);
1149                         spin_unlock_irqrestore(&task->task_state_lock, flags);
1150                 }
1151
1152                 /* STP and SMP devices are not sent a TMF, but the
1153                  * outstanding I/O request is terminated below.  This is
1154                  * because SATA/STP and SMP discovery path timeouts directly
1155                  * call the abort task interface for cleanup.
1156                  */
1157                 perform_termination = 1;
1158
1159         } else {
1160                 /* Fill in the tmf stucture */
1161                 isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
1162                                                isci_abort_task_process_cb,
1163                                                old_request);
1164
1165                 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1166
1167                 #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */
1168                 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
1169                                             ISCI_ABORT_TASK_TIMEOUT_MS);
1170
1171                 if (ret == TMF_RESP_FUNC_COMPLETE)
1172                         perform_termination = 1;
1173                 else
1174                         dev_dbg(&isci_host->pdev->dev,
1175                                 "%s: isci_task_send_tmf failed\n", __func__);
1176         }
1177         if (perform_termination) {
1178                 set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
1179
1180                 /* Clean up the request on our side, and wait for the aborted
1181                  * I/O to complete.
1182                  */
1183                 isci_terminate_request_core(isci_host, isci_device,
1184                                             old_request);
1185         }
1186
1187         /* Make sure we do not leave a reference to aborted_io_completion */
1188         old_request->io_request_completion = NULL;
1189  out:
1190         isci_put_device(isci_device);
1191         return ret;
1192 }
1193
1194 /**
1195  * isci_task_abort_task_set() - This function is one of the SAS Domain Template
1196  *    functions. This is one of the Task Management functoins called by libsas,
1197  *    to abort all task for the given lun.
1198  * @d_device: This parameter specifies the domain device associated with this
1199  *    request.
1200  * @lun: This parameter specifies the lun associated with this request.
1201  *
1202  * status, zero indicates success.
1203  */
1204 int isci_task_abort_task_set(
1205         struct domain_device *d_device,
1206         u8 *lun)
1207 {
1208         return TMF_RESP_FUNC_FAILED;
1209 }
1210
1211
1212 /**
1213  * isci_task_clear_aca() - This function is one of the SAS Domain Template
1214  *    functions. This is one of the Task Management functoins called by libsas.
1215  * @d_device: This parameter specifies the domain device associated with this
1216  *    request.
1217  * @lun: This parameter specifies the lun        associated with this request.
1218  *
1219  * status, zero indicates success.
1220  */
1221 int isci_task_clear_aca(
1222         struct domain_device *d_device,
1223         u8 *lun)
1224 {
1225         return TMF_RESP_FUNC_FAILED;
1226 }
1227
1228
1229
1230 /**
1231  * isci_task_clear_task_set() - This function is one of the SAS Domain Template
1232  *    functions. This is one of the Task Management functoins called by libsas.
1233  * @d_device: This parameter specifies the domain device associated with this
1234  *    request.
1235  * @lun: This parameter specifies the lun        associated with this request.
1236  *
1237  * status, zero indicates success.
1238  */
1239 int isci_task_clear_task_set(
1240         struct domain_device *d_device,
1241         u8 *lun)
1242 {
1243         return TMF_RESP_FUNC_FAILED;
1244 }
1245
1246
1247 /**
1248  * isci_task_query_task() - This function is implemented to cause libsas to
1249  *    correctly escalate the failed abort to a LUN or target reset (this is
1250  *    because sas_scsi_find_task libsas function does not correctly interpret
1251  *    all return codes from the abort task call).  When TMF_RESP_FUNC_SUCC is
1252  *    returned, libsas turns this into a LUN reset; when FUNC_FAILED is
1253  *    returned, libsas will turn this into a target reset
1254  * @task: This parameter specifies the sas task being queried.
1255  * @lun: This parameter specifies the lun associated with this request.
1256  *
1257  * status, zero indicates success.
1258  */
1259 int isci_task_query_task(
1260         struct sas_task *task)
1261 {
1262         /* See if there is a pending device reset for this device. */
1263         if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
1264                 return TMF_RESP_FUNC_FAILED;
1265         else
1266                 return TMF_RESP_FUNC_SUCC;
1267 }
1268
1269 /*
1270  * isci_task_request_complete() - This function is called by the sci core when
1271  *    an task request completes.
1272  * @ihost: This parameter specifies the ISCI host object
1273  * @ireq: This parameter is the completed isci_request object.
1274  * @completion_status: This parameter specifies the completion status from the
1275  *    sci core.
1276  *
1277  * none.
1278  */
1279 void
1280 isci_task_request_complete(struct isci_host *ihost,
1281                            struct isci_request *ireq,
1282                            enum sci_task_status completion_status)
1283 {
1284         struct isci_tmf *tmf = isci_request_access_tmf(ireq);
1285         struct completion *tmf_complete = NULL;
1286         struct completion *request_complete = ireq->io_request_completion;
1287
1288         dev_dbg(&ihost->pdev->dev,
1289                 "%s: request = %p, status=%d\n",
1290                 __func__, ireq, completion_status);
1291
1292         isci_request_change_state(ireq, completed);
1293
1294         set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
1295
1296         if (tmf) {
1297                 tmf->status = completion_status;
1298
1299                 if (tmf->proto == SAS_PROTOCOL_SSP) {
1300                         memcpy(&tmf->resp.resp_iu,
1301                                &ireq->ssp.rsp,
1302                                SSP_RESP_IU_MAX_SIZE);
1303                 } else if (tmf->proto == SAS_PROTOCOL_SATA) {
1304                         memcpy(&tmf->resp.d2h_fis,
1305                                &ireq->stp.rsp,
1306                                sizeof(struct dev_to_host_fis));
1307                 }
1308                 /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
1309                 tmf_complete = tmf->complete;
1310         }
1311         sci_controller_complete_io(ihost, ireq->target_device, ireq);
1312         /* set the 'terminated' flag handle to make sure it cannot be terminated
1313          *  or completed again.
1314          */
1315         set_bit(IREQ_TERMINATED, &ireq->flags);
1316
1317         /* As soon as something is in the terminate path, deallocation is
1318          * managed there.  Note that the final non-managed state of a task
1319          * request is "completed".
1320          */
1321         if ((ireq->status == completed) ||
1322             !isci_request_is_dealloc_managed(ireq->status)) {
1323                 isci_request_change_state(ireq, unallocated);
1324                 isci_free_tag(ihost, ireq->io_tag);
1325                 list_del_init(&ireq->dev_node);
1326         }
1327
1328         /* "request_complete" is set if the task was being terminated. */
1329         if (request_complete)
1330                 complete(request_complete);
1331
1332         /* The task management part completes last. */
1333         if (tmf_complete)
1334                 complete(tmf_complete);
1335 }
1336
1337 static int isci_reset_device(struct isci_host *ihost,
1338                              struct isci_remote_device *idev)
1339 {
1340         struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
1341         enum sci_status status;
1342         unsigned long flags;
1343         int rc;
1344
1345         dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
1346
1347         spin_lock_irqsave(&ihost->scic_lock, flags);
1348         status = sci_remote_device_reset(idev);
1349         if (status != SCI_SUCCESS) {
1350                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1351
1352                 dev_dbg(&ihost->pdev->dev,
1353                          "%s: sci_remote_device_reset(%p) returned %d!\n",
1354                          __func__, idev, status);
1355
1356                 return TMF_RESP_FUNC_FAILED;
1357         }
1358         spin_unlock_irqrestore(&ihost->scic_lock, flags);
1359
1360         rc = sas_phy_reset(phy, true);
1361
1362         /* Terminate in-progress I/O now. */
1363         isci_remote_device_nuke_requests(ihost, idev);
1364
1365         /* Since all pending TCs have been cleaned, resume the RNC. */
1366         spin_lock_irqsave(&ihost->scic_lock, flags);
1367         status = sci_remote_device_reset_complete(idev);
1368         spin_unlock_irqrestore(&ihost->scic_lock, flags);
1369
1370         if (status != SCI_SUCCESS) {
1371                 dev_dbg(&ihost->pdev->dev,
1372                          "%s: sci_remote_device_reset_complete(%p) "
1373                          "returned %d!\n", __func__, idev, status);
1374         }
1375
1376         dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
1377
1378         return rc;
1379 }
1380
1381 int isci_task_I_T_nexus_reset(struct domain_device *dev)
1382 {
1383         struct isci_host *ihost = dev_to_ihost(dev);
1384         struct isci_remote_device *idev;
1385         unsigned long flags;
1386         int ret;
1387
1388         spin_lock_irqsave(&ihost->scic_lock, flags);
1389         idev = isci_lookup_device(dev);
1390         spin_unlock_irqrestore(&ihost->scic_lock, flags);
1391
1392         if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
1393                 ret = TMF_RESP_FUNC_COMPLETE;
1394                 goto out;
1395         }
1396
1397         ret = isci_reset_device(ihost, idev);
1398  out:
1399         isci_put_device(idev);
1400         return ret;
1401 }
1402
1403 int isci_bus_reset_handler(struct scsi_cmnd *cmd)
1404 {
1405         struct domain_device *dev = sdev_to_domain_dev(cmd->device);
1406         struct isci_host *ihost = dev_to_ihost(dev);
1407         struct isci_remote_device *idev;
1408         unsigned long flags;
1409         int ret;
1410
1411         spin_lock_irqsave(&ihost->scic_lock, flags);
1412         idev = isci_lookup_device(dev);
1413         spin_unlock_irqrestore(&ihost->scic_lock, flags);
1414
1415         if (!idev) {
1416                 ret = TMF_RESP_FUNC_COMPLETE;
1417                 goto out;
1418         }
1419
1420         ret = isci_reset_device(ihost, idev);
1421  out:
1422         isci_put_device(idev);
1423         return ret;
1424 }