2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 #include <linux/completion.h>
57 #include <linux/irqflags.h>
59 #include <scsi/libsas.h>
60 #include "remote_device.h"
61 #include "remote_node_context.h"
68 * isci_task_refuse() - complete the request to the upper layer driver in
69 * the case where an I/O needs to be completed back in the submit path.
70 * @ihost: host on which the the request was queued
71 * @task: request to complete
72 * @response: response code for the completed task.
73 * @status: status code for the completed task.
76 static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
77 enum service_response response,
78 enum exec_status status)
81 enum isci_completion_selection disposition;
83 disposition = isci_perform_normal_io_completion;
84 disposition = isci_task_set_completion_status(task, response, status,
87 /* Tasks aborted specifically by a call to the lldd_abort_task
88 * function should not be completed to the host in the regular path.
90 switch (disposition) {
91 case isci_perform_normal_io_completion:
92 /* Normal notification (task_done) */
93 dev_dbg(&ihost->pdev->dev,
94 "%s: Normal - task = %p, response=%d, "
96 __func__, task, response, status);
98 task->lldd_task = NULL;
99 task->task_done(task);
102 case isci_perform_aborted_io_completion:
104 * No notification because this request is already in the
107 dev_dbg(&ihost->pdev->dev,
108 "%s: Aborted - task = %p, response=%d, "
110 __func__, task, response, status);
113 case isci_perform_error_io_completion:
114 /* Use sas_task_abort */
115 dev_dbg(&ihost->pdev->dev,
116 "%s: Error - task = %p, response=%d, "
118 __func__, task, response, status);
119 sas_task_abort(task);
123 dev_dbg(&ihost->pdev->dev,
124 "%s: isci task notification default case!",
126 sas_task_abort(task);
131 #define for_each_sas_task(num, task) \
132 for (; num > 0; num--,\
133 task = list_entry(task->list.next, struct sas_task, list))
136 static inline int isci_device_io_ready(struct isci_remote_device *idev,
137 struct sas_task *task)
139 return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
140 (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
141 isci_task_is_ncq_recovery(task))
145 * isci_task_execute_task() - This function is one of the SAS Domain Template
146 * functions. This function is called by libsas to send a task down to
148 * @task: This parameter specifies the SAS task to send.
149 * @num: This parameter specifies the number of tasks to queue.
150 * @gfp_flags: This parameter specifies the context of this call.
152 * status, zero indicates success.
154 int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
156 struct isci_host *ihost = dev_to_ihost(task->dev);
157 struct isci_remote_device *idev;
162 dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
164 for_each_sas_task(num, task) {
165 enum sci_status status = SCI_FAILURE;
167 spin_lock_irqsave(&ihost->scic_lock, flags);
168 idev = isci_lookup_device(task->dev);
169 io_ready = isci_device_io_ready(idev, task);
170 tag = isci_alloc_tag(ihost);
171 spin_unlock_irqrestore(&ihost->scic_lock, flags);
173 dev_dbg(&ihost->pdev->dev,
174 "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
175 task, num, task->dev, idev, idev ? idev->flags : 0,
179 isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
181 } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
182 /* Indicate QUEUE_FULL so that the scsi midlayer
185 isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
188 /* There is a device and it's ready for I/O. */
189 spin_lock_irqsave(&task->task_state_lock, flags);
191 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
192 /* The I/O was aborted. */
193 spin_unlock_irqrestore(&task->task_state_lock,
196 isci_task_refuse(ihost, task,
197 SAS_TASK_UNDELIVERED,
198 SAM_STAT_TASK_ABORTED);
200 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
201 spin_unlock_irqrestore(&task->task_state_lock, flags);
203 /* build and send the request. */
204 status = isci_request_execute(ihost, idev, task, tag);
206 if (status != SCI_SUCCESS) {
208 spin_lock_irqsave(&task->task_state_lock, flags);
209 /* Did not really start this command. */
210 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
211 spin_unlock_irqrestore(&task->task_state_lock, flags);
213 if (test_bit(IDEV_GONE, &idev->flags)) {
215 /* Indicate that the device
218 isci_task_refuse(ihost, task,
219 SAS_TASK_UNDELIVERED,
222 /* Indicate QUEUE_FULL so that
223 * the scsi midlayer retries.
224 * If the request failed for
225 * remote device reasons, it
227 * SAS_TASK_UNDELIVERED next
230 isci_task_refuse(ihost, task,
237 if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
238 spin_lock_irqsave(&ihost->scic_lock, flags);
239 /* command never hit the device, so just free
240 * the tci and skip the sequence increment
242 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
243 spin_unlock_irqrestore(&ihost->scic_lock, flags);
245 isci_put_device(idev);
250 static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
252 struct isci_tmf *isci_tmf;
253 enum sci_status status;
255 if (!test_bit(IREQ_TMF, &ireq->flags))
258 isci_tmf = isci_request_access_tmf(ireq);
260 switch (isci_tmf->tmf_code) {
262 case isci_tmf_sata_srst_high:
263 case isci_tmf_sata_srst_low: {
264 struct host_to_dev_fis *fis = &ireq->stp.cmd;
266 memset(fis, 0, sizeof(*fis));
268 fis->fis_type = 0x27;
271 if (isci_tmf->tmf_code == isci_tmf_sata_srst_high)
272 fis->control |= ATA_SRST;
274 fis->control &= ~ATA_SRST;
277 /* other management commnd go here... */
282 /* core builds the protocol specific request
283 * based on the h2d fis.
285 status = sci_task_request_construct_sata(ireq);
290 static struct isci_request *isci_task_request_build(struct isci_host *ihost,
291 struct isci_remote_device *idev,
292 u16 tag, struct isci_tmf *isci_tmf)
294 enum sci_status status = SCI_FAILURE;
295 struct isci_request *ireq = NULL;
296 struct domain_device *dev;
298 dev_dbg(&ihost->pdev->dev,
299 "%s: isci_tmf = %p\n", __func__, isci_tmf);
301 dev = idev->domain_dev;
303 /* do common allocation and init of request object. */
304 ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
308 /* let the core do it's construct. */
309 status = sci_task_request_construct(ihost, idev, tag,
312 if (status != SCI_SUCCESS) {
313 dev_warn(&ihost->pdev->dev,
314 "%s: sci_task_request_construct failed - "
321 /* XXX convert to get this from task->tproto like other drivers */
322 if (dev->dev_type == SAS_END_DEV) {
323 isci_tmf->proto = SAS_PROTOCOL_SSP;
324 status = sci_task_request_construct_ssp(ireq);
325 if (status != SCI_SUCCESS)
329 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
330 isci_tmf->proto = SAS_PROTOCOL_SATA;
331 status = isci_sata_management_task_request_build(ireq);
333 if (status != SCI_SUCCESS)
340 * isci_request_mark_zombie() - This function must be called with scic_lock held.
342 static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq)
344 struct completion *tmf_completion = NULL;
345 struct completion *req_completion;
347 /* Set the request state to "dead". */
350 req_completion = ireq->io_request_completion;
351 ireq->io_request_completion = NULL;
353 if (test_bit(IREQ_TMF, &ireq->flags)) {
354 /* Break links with the TMF request. */
355 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
357 /* In the case where a task request is dying,
358 * the thread waiting on the complete will sit and
359 * timeout unless we wake it now. Since the TMF
360 * has a default error status, complete it here
361 * to wake the waiting thread.
364 tmf_completion = tmf->complete;
365 tmf->complete = NULL;
367 ireq->ttype_ptr.tmf_task_ptr = NULL;
368 dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n",
369 __func__, tmf->tmf_code, tmf->io_tag);
371 /* Break links with the sas_task - the callback is done
374 struct sas_task *task = isci_request_access_task(ireq);
377 task->lldd_task = NULL;
379 ireq->ttype_ptr.io_task_ptr = NULL;
382 dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n",
385 /* Don't force waiting threads to timeout. */
387 complete(req_completion);
389 if (tmf_completion != NULL)
390 complete(tmf_completion);
393 static int isci_task_execute_tmf(struct isci_host *ihost,
394 struct isci_remote_device *idev,
395 struct isci_tmf *tmf, unsigned long timeout_ms)
397 DECLARE_COMPLETION_ONSTACK(completion);
398 enum sci_task_status status = SCI_TASK_FAILURE;
399 struct isci_request *ireq;
400 int ret = TMF_RESP_FUNC_FAILED;
402 unsigned long timeleft;
405 spin_lock_irqsave(&ihost->scic_lock, flags);
406 tag = isci_alloc_tag(ihost);
407 spin_unlock_irqrestore(&ihost->scic_lock, flags);
409 if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
412 /* sanity check, return TMF_RESP_FUNC_FAILED
413 * if the device is not there and ready.
416 (!test_bit(IDEV_IO_READY, &idev->flags) &&
417 !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
418 dev_dbg(&ihost->pdev->dev,
419 "%s: idev = %p not ready (%#lx)\n",
421 idev, idev ? idev->flags : 0);
424 dev_dbg(&ihost->pdev->dev,
428 /* Assign the pointer to the TMF's completion kernel wait structure. */
429 tmf->complete = &completion;
430 tmf->status = SCI_FAILURE_TIMEOUT;
432 ireq = isci_task_request_build(ihost, idev, tag, tmf);
436 spin_lock_irqsave(&ihost->scic_lock, flags);
438 /* start the TMF io. */
439 status = sci_controller_start_task(ihost, idev, ireq);
441 if (status != SCI_TASK_SUCCESS) {
442 dev_dbg(&ihost->pdev->dev,
443 "%s: start_io failed - status = 0x%x, request = %p\n",
447 spin_unlock_irqrestore(&ihost->scic_lock, flags);
451 if (tmf->cb_state_func != NULL)
452 tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
454 isci_request_change_state(ireq, started);
456 /* add the request to the remote device request list. */
457 list_add(&ireq->dev_node, &idev->reqs_in_process);
459 spin_unlock_irqrestore(&ihost->scic_lock, flags);
461 /* Wait for the TMF to complete, or a timeout. */
462 timeleft = wait_for_completion_timeout(&completion,
463 msecs_to_jiffies(timeout_ms));
466 /* The TMF did not complete - this could be because
467 * of an unplug. Terminate the TMF request now.
469 spin_lock_irqsave(&ihost->scic_lock, flags);
471 if (tmf->cb_state_func != NULL)
472 tmf->cb_state_func(isci_tmf_timed_out, tmf,
475 sci_controller_terminate_request(ihost, idev, ireq);
477 spin_unlock_irqrestore(&ihost->scic_lock, flags);
479 timeleft = wait_for_completion_timeout(
481 msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
484 /* Strange condition - the termination of the TMF
487 spin_lock_irqsave(&ihost->scic_lock, flags);
489 /* If the TMF status has not changed, kill it. */
490 if (tmf->status == SCI_FAILURE_TIMEOUT)
491 isci_request_mark_zombie(ihost, ireq);
493 spin_unlock_irqrestore(&ihost->scic_lock, flags);
497 isci_print_tmf(ihost, tmf);
499 if (tmf->status == SCI_SUCCESS)
500 ret = TMF_RESP_FUNC_COMPLETE;
501 else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
502 dev_dbg(&ihost->pdev->dev,
504 "SCI_FAILURE_IO_RESPONSE_VALID\n",
506 ret = TMF_RESP_FUNC_COMPLETE;
508 /* Else - leave the default "failed" status alone. */
510 dev_dbg(&ihost->pdev->dev,
511 "%s: completed request = %p\n",
518 spin_lock_irqsave(&ihost->scic_lock, flags);
519 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
520 spin_unlock_irqrestore(&ihost->scic_lock, flags);
525 static void isci_task_build_tmf(struct isci_tmf *tmf,
526 enum isci_tmf_function_codes code,
527 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
532 memset(tmf, 0, sizeof(*tmf));
534 tmf->tmf_code = code;
535 tmf->cb_state_func = tmf_sent_cb;
536 tmf->cb_data = cb_data;
539 static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
540 enum isci_tmf_function_codes code,
541 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
544 struct isci_request *old_request)
546 isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request);
547 tmf->io_tag = old_request->io_tag;
551 * isci_task_validate_request_to_abort() - This function checks the given I/O
552 * against the "started" state. If the request is still "started", it's
553 * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
554 * BEFORE CALLING THIS FUNCTION.
555 * @isci_request: This parameter specifies the request object to control.
556 * @isci_host: This parameter specifies the ISCI host object
557 * @isci_device: This is the device to which the request is pending.
558 * @aborted_io_completion: This is a completion structure that will be added to
559 * the request in case it is changed to aborting; this completion is
560 * triggered when the request is fully completed.
562 * Either "started" on successful change of the task status to "aborted", or
563 * "unallocated" if the task cannot be controlled.
565 static enum isci_request_status isci_task_validate_request_to_abort(
566 struct isci_request *isci_request,
567 struct isci_host *isci_host,
568 struct isci_remote_device *isci_device,
569 struct completion *aborted_io_completion)
571 enum isci_request_status old_state = unallocated;
573 /* Only abort the task if it's in the
574 * device's request_in_process list
576 if (isci_request && !list_empty(&isci_request->dev_node)) {
577 old_state = isci_request_change_started_to_aborted(
578 isci_request, aborted_io_completion);
585 static int isci_request_is_dealloc_managed(enum isci_request_status stat)
600 * isci_terminate_request_core() - This function will terminate the given
601 * request, and wait for it to complete. This function must only be called
602 * from a thread that can wait. Note that the request is terminated and
603 * completed (back to the host, if started there).
606 * @isci_request: The I/O request to be terminated.
609 static void isci_terminate_request_core(struct isci_host *ihost,
610 struct isci_remote_device *idev,
611 struct isci_request *isci_request)
613 enum sci_status status = SCI_SUCCESS;
614 bool was_terminated = false;
615 bool needs_cleanup_handling = false;
617 unsigned long termination_completed = 1;
618 struct completion *io_request_completion;
620 dev_dbg(&ihost->pdev->dev,
621 "%s: device = %p; request = %p\n",
622 __func__, idev, isci_request);
624 spin_lock_irqsave(&ihost->scic_lock, flags);
626 io_request_completion = isci_request->io_request_completion;
628 /* Note that we are not going to control
629 * the target to abort the request.
631 set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
633 /* Make sure the request wasn't just sitting around signalling
634 * device condition (if the request handle is NULL, then the
635 * request completed but needed additional handling here).
637 if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
638 was_terminated = true;
639 needs_cleanup_handling = true;
640 status = sci_controller_terminate_request(ihost,
644 spin_unlock_irqrestore(&ihost->scic_lock, flags);
647 * The only time the request to terminate will
648 * fail is when the io request is completed and
651 if (status != SCI_SUCCESS) {
652 dev_dbg(&ihost->pdev->dev,
653 "%s: sci_controller_terminate_request"
654 " returned = 0x%x\n",
657 isci_request->io_request_completion = NULL;
660 if (was_terminated) {
661 dev_dbg(&ihost->pdev->dev,
662 "%s: before completion wait (%p/%p)\n",
663 __func__, isci_request, io_request_completion);
665 /* Wait here for the request to complete. */
666 termination_completed
667 = wait_for_completion_timeout(
668 io_request_completion,
669 msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
671 if (!termination_completed) {
673 /* The request to terminate has timed out. */
674 spin_lock_irqsave(&ihost->scic_lock, flags);
676 /* Check for state changes. */
677 if (!test_bit(IREQ_TERMINATED,
678 &isci_request->flags)) {
680 /* The best we can do is to have the
681 * request die a silent death if it
682 * ever really completes.
684 isci_request_mark_zombie(ihost,
686 needs_cleanup_handling = true;
688 termination_completed = 1;
690 spin_unlock_irqrestore(&ihost->scic_lock,
693 if (!termination_completed) {
695 dev_dbg(&ihost->pdev->dev,
696 "%s: *** Timeout waiting for "
697 "termination(%p/%p)\n",
698 __func__, io_request_completion,
701 /* The request can no longer be referenced
702 * safely since it may go away if the
703 * termination every really does complete.
708 if (termination_completed)
709 dev_dbg(&ihost->pdev->dev,
710 "%s: after completion wait (%p/%p)\n",
711 __func__, isci_request, io_request_completion);
714 if (termination_completed) {
716 isci_request->io_request_completion = NULL;
718 /* Peek at the status of the request. This will tell
719 * us if there was special handling on the request such that it
720 * needs to be detached and freed here.
722 spin_lock_irqsave(&isci_request->state_lock, flags);
724 needs_cleanup_handling
725 = isci_request_is_dealloc_managed(
726 isci_request->status);
728 spin_unlock_irqrestore(&isci_request->state_lock, flags);
731 if (needs_cleanup_handling) {
733 dev_dbg(&ihost->pdev->dev,
734 "%s: cleanup isci_device=%p, request=%p\n",
735 __func__, idev, isci_request);
737 if (isci_request != NULL) {
738 spin_lock_irqsave(&ihost->scic_lock, flags);
739 isci_free_tag(ihost, isci_request->io_tag);
740 isci_request_change_state(isci_request, unallocated);
741 list_del_init(&isci_request->dev_node);
742 spin_unlock_irqrestore(&ihost->scic_lock, flags);
749 * isci_terminate_pending_requests() - This function will change the all of the
750 * requests on the given device's state to "aborting", will terminate the
751 * requests, and wait for them to complete. This function must only be
752 * called from a thread that can wait. Note that the requests are all
753 * terminated and completed (back to the host, if started there).
754 * @isci_host: This parameter specifies SCU.
755 * @idev: This parameter specifies the target.
758 void isci_terminate_pending_requests(struct isci_host *ihost,
759 struct isci_remote_device *idev)
761 struct completion request_completion;
762 enum isci_request_status old_state;
766 spin_lock_irqsave(&ihost->scic_lock, flags);
767 list_splice_init(&idev->reqs_in_process, &list);
769 /* assumes that isci_terminate_request_core deletes from the list */
770 while (!list_empty(&list)) {
771 struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
773 /* Change state to "terminating" if it is currently
776 old_state = isci_request_change_started_to_newstate(ireq,
785 /* termination in progress, or otherwise dispositioned.
786 * We know the request was on 'list' so should be safe
787 * to move it back to reqs_in_process
789 list_move(&ireq->dev_node, &idev->reqs_in_process);
796 spin_unlock_irqrestore(&ihost->scic_lock, flags);
798 init_completion(&request_completion);
800 dev_dbg(&ihost->pdev->dev,
801 "%s: idev=%p request=%p; task=%p old_state=%d\n",
802 __func__, idev, ireq,
803 (!test_bit(IREQ_TMF, &ireq->flags)
804 ? isci_request_access_task(ireq)
808 /* If the old_state is started:
809 * This request was not already being aborted. If it had been,
810 * then the aborting I/O (ie. the TMF request) would not be in
811 * the aborting state, and thus would be terminated here. Note
812 * that since the TMF completion's call to the kernel function
813 * "complete()" does not happen until the pending I/O request
814 * terminate fully completes, we do not have to implement a
815 * special wait here for already aborting requests - the
816 * termination of the TMF request will force the request
817 * to finish it's already started terminate.
819 * If old_state == completed:
820 * This request completed from the SCU hardware perspective
821 * and now just needs cleaning up in terms of freeing the
822 * request and potentially calling up to libsas.
824 * If old_state == aborting:
825 * This request has already gone through a TMF timeout, but may
826 * not have been terminated; needs cleaning up at least.
828 isci_terminate_request_core(ihost, idev, ireq);
829 spin_lock_irqsave(&ihost->scic_lock, flags);
831 spin_unlock_irqrestore(&ihost->scic_lock, flags);
835 * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
836 * Template functions.
837 * @lun: This parameter specifies the lun to be reset.
839 * status, zero indicates success.
841 static int isci_task_send_lu_reset_sas(
842 struct isci_host *isci_host,
843 struct isci_remote_device *isci_device,
847 int ret = TMF_RESP_FUNC_FAILED;
849 dev_dbg(&isci_host->pdev->dev,
850 "%s: isci_host = %p, isci_device = %p\n",
851 __func__, isci_host, isci_device);
852 /* Send the LUN reset to the target. By the time the call returns,
853 * the TMF has fully exected in the target (in which case the return
854 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
855 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
857 isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
859 #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
860 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
862 if (ret == TMF_RESP_FUNC_COMPLETE)
863 dev_dbg(&isci_host->pdev->dev,
864 "%s: %p: TMF_LU_RESET passed\n",
865 __func__, isci_device);
867 dev_dbg(&isci_host->pdev->dev,
868 "%s: %p: TMF_LU_RESET failed (%x)\n",
869 __func__, isci_device, ret);
874 static int isci_task_send_lu_reset_sata(struct isci_host *ihost,
875 struct isci_remote_device *idev, u8 *lun)
877 int ret = TMF_RESP_FUNC_FAILED;
880 /* Send the soft reset to the target */
881 #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
882 isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
884 ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS);
886 if (ret != TMF_RESP_FUNC_COMPLETE) {
887 dev_dbg(&ihost->pdev->dev,
888 "%s: Assert SRST failed (%p) = %x",
889 __func__, idev, ret);
891 /* Return the failure so that the LUN reset is escalated
899 * isci_task_lu_reset() - This function is one of the SAS Domain Template
900 * functions. This is one of the Task Management functoins called by libsas,
901 * to reset the given lun. Note the assumption that while this call is
902 * executing, no I/O will be sent by the host to the device.
903 * @lun: This parameter specifies the lun to be reset.
905 * status, zero indicates success.
907 int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
909 struct isci_host *isci_host = dev_to_ihost(domain_device);
910 struct isci_remote_device *isci_device;
914 spin_lock_irqsave(&isci_host->scic_lock, flags);
915 isci_device = isci_lookup_device(domain_device);
916 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
918 dev_dbg(&isci_host->pdev->dev,
919 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
920 __func__, domain_device, isci_host, isci_device);
923 /* If the device is gone, stop the escalations. */
924 dev_dbg(&isci_host->pdev->dev, "%s: No dev\n", __func__);
926 ret = TMF_RESP_FUNC_COMPLETE;
929 set_bit(IDEV_EH, &isci_device->flags);
931 /* Send the task management part of the reset. */
932 if (sas_protocol_ata(domain_device->tproto)) {
933 ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
935 ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
937 /* If the LUN reset worked, all the I/O can now be terminated. */
938 if (ret == TMF_RESP_FUNC_COMPLETE)
939 /* Terminate all I/O now. */
940 isci_terminate_pending_requests(isci_host,
944 isci_put_device(isci_device);
949 /* int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
950 int isci_task_clear_nexus_port(struct asd_sas_port *port)
952 return TMF_RESP_FUNC_FAILED;
957 int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
959 return TMF_RESP_FUNC_FAILED;
962 /* Task Management Functions. Must be called from process context. */
965 * isci_abort_task_process_cb() - This is a helper function for the abort task
966 * TMF command. It manages the request state with respect to the successful
967 * transmission / completion of the abort task request.
968 * @cb_state: This parameter specifies when this function was called - after
969 * the TMF request has been started and after it has timed-out.
970 * @tmf: This parameter specifies the TMF in progress.
974 static void isci_abort_task_process_cb(
975 enum isci_tmf_cb_state cb_state,
976 struct isci_tmf *tmf,
979 struct isci_request *old_request;
981 old_request = (struct isci_request *)cb_data;
983 dev_dbg(&old_request->isci_host->pdev->dev,
984 "%s: tmf=%p, old_request=%p\n",
985 __func__, tmf, old_request);
989 case isci_tmf_started:
990 /* The TMF has been started. Nothing to do here, since the
991 * request state was already set to "aborted" by the abort
994 if ((old_request->status != aborted)
995 && (old_request->status != completed))
996 dev_dbg(&old_request->isci_host->pdev->dev,
997 "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
998 __func__, old_request->status, tmf, old_request);
1001 case isci_tmf_timed_out:
1003 /* Set the task's state to "aborting", since the abort task
1004 * function thread set it to "aborted" (above) in anticipation
1005 * of the task management request working correctly. Since the
1006 * timeout has now fired, the TMF request failed. We set the
1007 * state such that the request completion will indicate the
1008 * device is no longer present.
1010 isci_request_change_state(old_request, aborting);
1014 dev_dbg(&old_request->isci_host->pdev->dev,
1015 "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
1016 __func__, cb_state, tmf, old_request);
1022 * isci_task_abort_task() - This function is one of the SAS Domain Template
1023 * functions. This function is called by libsas to abort a specified task.
1024 * @task: This parameter specifies the SAS task to abort.
1026 * status, zero indicates success.
1028 int isci_task_abort_task(struct sas_task *task)
1030 struct isci_host *isci_host = dev_to_ihost(task->dev);
1031 DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
1032 struct isci_request *old_request = NULL;
1033 enum isci_request_status old_state;
1034 struct isci_remote_device *isci_device = NULL;
1035 struct isci_tmf tmf;
1036 int ret = TMF_RESP_FUNC_FAILED;
1037 unsigned long flags;
1038 int perform_termination = 0;
1040 /* Get the isci_request reference from the task. Note that
1041 * this check does not depend on the pending request list
1042 * in the device, because tasks driving resets may land here
1043 * after completion in the core.
1045 spin_lock_irqsave(&isci_host->scic_lock, flags);
1046 spin_lock(&task->task_state_lock);
1048 old_request = task->lldd_task;
1050 /* If task is already done, the request isn't valid */
1051 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
1052 (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
1054 isci_device = isci_lookup_device(task->dev);
1056 spin_unlock(&task->task_state_lock);
1057 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1059 dev_dbg(&isci_host->pdev->dev,
1060 "%s: dev = %p, task = %p, old_request == %p\n",
1061 __func__, isci_device, task, old_request);
1064 set_bit(IDEV_EH, &isci_device->flags);
1066 /* Device reset conditions signalled in task_state_flags are the
1067 * responsbility of libsas to observe at the start of the error
1070 if (!isci_device || !old_request) {
1071 /* The request has already completed and there
1072 * is nothing to do here other than to set the task
1073 * done bit, and indicate that the task abort function
1076 spin_lock_irqsave(&task->task_state_lock, flags);
1077 task->task_state_flags |= SAS_TASK_STATE_DONE;
1078 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1079 SAS_TASK_STATE_PENDING);
1080 spin_unlock_irqrestore(&task->task_state_lock, flags);
1082 ret = TMF_RESP_FUNC_COMPLETE;
1084 dev_dbg(&isci_host->pdev->dev,
1085 "%s: abort task not needed for %p\n",
1090 spin_lock_irqsave(&isci_host->scic_lock, flags);
1092 /* Check the request status and change to "aborted" if currently
1093 * "starting"; if true then set the I/O kernel completion
1094 * struct that will be triggered when the request completes.
1096 old_state = isci_task_validate_request_to_abort(
1097 old_request, isci_host, isci_device,
1098 &aborted_io_completion);
1099 if ((old_state != started) &&
1100 (old_state != completed) &&
1101 (old_state != aborting)) {
1103 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1105 /* The request was already being handled by someone else (because
1106 * they got to set the state away from started).
1108 dev_dbg(&isci_host->pdev->dev,
1109 "%s: device = %p; old_request %p already being aborted\n",
1111 isci_device, old_request);
1112 ret = TMF_RESP_FUNC_COMPLETE;
1115 if (task->task_proto == SAS_PROTOCOL_SMP ||
1116 sas_protocol_ata(task->task_proto) ||
1117 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
1119 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1121 dev_dbg(&isci_host->pdev->dev,
1123 " or complete_in_target (%d), thus no TMF\n",
1125 ((task->task_proto == SAS_PROTOCOL_SMP)
1127 : (sas_protocol_ata(task->task_proto)
1131 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
1133 if (test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
1134 spin_lock_irqsave(&task->task_state_lock, flags);
1135 task->task_state_flags |= SAS_TASK_STATE_DONE;
1136 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1137 SAS_TASK_STATE_PENDING);
1138 spin_unlock_irqrestore(&task->task_state_lock, flags);
1139 ret = TMF_RESP_FUNC_COMPLETE;
1141 spin_lock_irqsave(&task->task_state_lock, flags);
1142 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1143 SAS_TASK_STATE_PENDING);
1144 spin_unlock_irqrestore(&task->task_state_lock, flags);
1147 /* STP and SMP devices are not sent a TMF, but the
1148 * outstanding I/O request is terminated below. This is
1149 * because SATA/STP and SMP discovery path timeouts directly
1150 * call the abort task interface for cleanup.
1152 perform_termination = 1;
1155 /* Fill in the tmf stucture */
1156 isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
1157 isci_abort_task_process_cb,
1160 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1162 #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */
1163 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
1164 ISCI_ABORT_TASK_TIMEOUT_MS);
1166 if (ret == TMF_RESP_FUNC_COMPLETE)
1167 perform_termination = 1;
1169 dev_dbg(&isci_host->pdev->dev,
1170 "%s: isci_task_send_tmf failed\n", __func__);
1172 if (perform_termination) {
1173 set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
1175 /* Clean up the request on our side, and wait for the aborted
1178 isci_terminate_request_core(isci_host, isci_device,
1182 /* Make sure we do not leave a reference to aborted_io_completion */
1183 old_request->io_request_completion = NULL;
1185 isci_put_device(isci_device);
1190 * isci_task_abort_task_set() - This function is one of the SAS Domain Template
1191 * functions. This is one of the Task Management functoins called by libsas,
1192 * to abort all task for the given lun.
1193 * @d_device: This parameter specifies the domain device associated with this
1195 * @lun: This parameter specifies the lun associated with this request.
1197 * status, zero indicates success.
1199 int isci_task_abort_task_set(
1200 struct domain_device *d_device,
1203 return TMF_RESP_FUNC_FAILED;
1208 * isci_task_clear_aca() - This function is one of the SAS Domain Template
1209 * functions. This is one of the Task Management functoins called by libsas.
1210 * @d_device: This parameter specifies the domain device associated with this
1212 * @lun: This parameter specifies the lun associated with this request.
1214 * status, zero indicates success.
1216 int isci_task_clear_aca(
1217 struct domain_device *d_device,
1220 return TMF_RESP_FUNC_FAILED;
1226 * isci_task_clear_task_set() - This function is one of the SAS Domain Template
1227 * functions. This is one of the Task Management functoins called by libsas.
1228 * @d_device: This parameter specifies the domain device associated with this
1230 * @lun: This parameter specifies the lun associated with this request.
1232 * status, zero indicates success.
1234 int isci_task_clear_task_set(
1235 struct domain_device *d_device,
1238 return TMF_RESP_FUNC_FAILED;
1243 * isci_task_query_task() - This function is implemented to cause libsas to
1244 * correctly escalate the failed abort to a LUN or target reset (this is
1245 * because sas_scsi_find_task libsas function does not correctly interpret
1246 * all return codes from the abort task call). When TMF_RESP_FUNC_SUCC is
1247 * returned, libsas turns this into a LUN reset; when FUNC_FAILED is
1248 * returned, libsas will turn this into a target reset
1249 * @task: This parameter specifies the sas task being queried.
1250 * @lun: This parameter specifies the lun associated with this request.
1252 * status, zero indicates success.
1254 int isci_task_query_task(
1255 struct sas_task *task)
1257 /* See if there is a pending device reset for this device. */
1258 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
1259 return TMF_RESP_FUNC_FAILED;
1261 return TMF_RESP_FUNC_SUCC;
1265 * isci_task_request_complete() - This function is called by the sci core when
1266 * an task request completes.
1267 * @ihost: This parameter specifies the ISCI host object
1268 * @ireq: This parameter is the completed isci_request object.
1269 * @completion_status: This parameter specifies the completion status from the
1275 isci_task_request_complete(struct isci_host *ihost,
1276 struct isci_request *ireq,
1277 enum sci_task_status completion_status)
1279 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
1280 struct completion *tmf_complete = NULL;
1281 struct completion *request_complete = ireq->io_request_completion;
1283 dev_dbg(&ihost->pdev->dev,
1284 "%s: request = %p, status=%d\n",
1285 __func__, ireq, completion_status);
1287 isci_request_change_state(ireq, completed);
1289 set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
1292 tmf->status = completion_status;
1294 if (tmf->proto == SAS_PROTOCOL_SSP) {
1295 memcpy(&tmf->resp.resp_iu,
1297 SSP_RESP_IU_MAX_SIZE);
1298 } else if (tmf->proto == SAS_PROTOCOL_SATA) {
1299 memcpy(&tmf->resp.d2h_fis,
1301 sizeof(struct dev_to_host_fis));
1303 /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
1304 tmf_complete = tmf->complete;
1306 sci_controller_complete_io(ihost, ireq->target_device, ireq);
1307 /* set the 'terminated' flag handle to make sure it cannot be terminated
1308 * or completed again.
1310 set_bit(IREQ_TERMINATED, &ireq->flags);
1312 /* As soon as something is in the terminate path, deallocation is
1313 * managed there. Note that the final non-managed state of a task
1314 * request is "completed".
1316 if ((ireq->status == completed) ||
1317 !isci_request_is_dealloc_managed(ireq->status)) {
1318 isci_request_change_state(ireq, unallocated);
1319 isci_free_tag(ihost, ireq->io_tag);
1320 list_del_init(&ireq->dev_node);
1323 /* "request_complete" is set if the task was being terminated. */
1324 if (request_complete)
1325 complete(request_complete);
1327 /* The task management part completes last. */
1329 complete(tmf_complete);
1332 static int isci_reset_device(struct isci_host *ihost,
1333 struct isci_remote_device *idev)
1335 struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
1336 enum sci_status status;
1337 unsigned long flags;
1340 dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
1342 spin_lock_irqsave(&ihost->scic_lock, flags);
1343 status = sci_remote_device_reset(idev);
1344 if (status != SCI_SUCCESS) {
1345 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1347 dev_dbg(&ihost->pdev->dev,
1348 "%s: sci_remote_device_reset(%p) returned %d!\n",
1349 __func__, idev, status);
1351 return TMF_RESP_FUNC_FAILED;
1353 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1355 rc = sas_phy_reset(phy, true);
1357 /* Terminate in-progress I/O now. */
1358 isci_remote_device_nuke_requests(ihost, idev);
1360 /* Since all pending TCs have been cleaned, resume the RNC. */
1361 spin_lock_irqsave(&ihost->scic_lock, flags);
1362 status = sci_remote_device_reset_complete(idev);
1363 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1365 if (status != SCI_SUCCESS) {
1366 dev_dbg(&ihost->pdev->dev,
1367 "%s: sci_remote_device_reset_complete(%p) "
1368 "returned %d!\n", __func__, idev, status);
1371 dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
1376 int isci_task_I_T_nexus_reset(struct domain_device *dev)
1378 struct isci_host *ihost = dev_to_ihost(dev);
1379 struct isci_remote_device *idev;
1380 unsigned long flags;
1383 spin_lock_irqsave(&ihost->scic_lock, flags);
1384 idev = isci_lookup_device(dev);
1385 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1387 if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
1388 ret = TMF_RESP_FUNC_COMPLETE;
1392 ret = isci_reset_device(ihost, idev);
1394 isci_put_device(idev);
1398 int isci_bus_reset_handler(struct scsi_cmnd *cmd)
1400 struct domain_device *dev = sdev_to_domain_dev(cmd->device);
1401 struct isci_host *ihost = dev_to_ihost(dev);
1402 struct isci_remote_device *idev;
1403 unsigned long flags;
1406 spin_lock_irqsave(&ihost->scic_lock, flags);
1407 idev = isci_lookup_device(dev);
1408 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1411 ret = TMF_RESP_FUNC_COMPLETE;
1415 ret = isci_reset_device(ihost, idev);
1417 isci_put_device(idev);