Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[pandora-kernel.git] / drivers / scsi / isci / remote_node_context.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55
56 #include "host.h"
57 #include "isci.h"
58 #include "remote_device.h"
59 #include "remote_node_context.h"
60 #include "scu_event_codes.h"
61 #include "scu_task_context.h"
62
63
64 /**
65  *
66  * @sci_rnc: The RNC for which the is posted request is being made.
67  *
68  * This method will return true if the RNC is not in the initial state.  In all
69  * other states the RNC is considered active and this will return true. The
70  * destroy request of the state machine drives the RNC back to the initial
71  * state.  If the state machine changes then this routine will also have to be
72  * changed. bool true if the state machine is not in the initial state false if
73  * the state machine is in the initial state
74  */
75
76 /**
77  *
78  * @sci_rnc: The state of the remote node context object to check.
79  *
80  * This method will return true if the remote node context is in a READY state
81  * otherwise it will return false bool true if the remote node context is in
82  * the ready state. false if the remote node context is not in the ready state.
83  */
84 bool sci_remote_node_context_is_ready(
85         struct sci_remote_node_context *sci_rnc)
86 {
87         u32 current_state = sci_rnc->sm.current_state_id;
88
89         if (current_state == SCI_RNC_READY) {
90                 return true;
91         }
92
93         return false;
94 }
95
96 static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
97 {
98         if (id < ihost->remote_node_entries &&
99             ihost->device_table[id])
100                 return &ihost->remote_node_context_table[id];
101
102         return NULL;
103 }
104
105 static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
106 {
107         struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
108         struct domain_device *dev = idev->domain_dev;
109         int rni = sci_rnc->remote_node_index;
110         union scu_remote_node_context *rnc;
111         struct isci_host *ihost;
112         __le64 sas_addr;
113
114         ihost = idev->owning_port->owning_controller;
115         rnc = sci_rnc_by_id(ihost, rni);
116
117         memset(rnc, 0, sizeof(union scu_remote_node_context)
118                 * sci_remote_device_node_count(idev));
119
120         rnc->ssp.remote_node_index = rni;
121         rnc->ssp.remote_node_port_width = idev->device_port_width;
122         rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
123
124         /* sas address is __be64, context ram format is __le64 */
125         sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
126         rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
127         rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
128
129         rnc->ssp.nexus_loss_timer_enable = true;
130         rnc->ssp.check_bit               = false;
131         rnc->ssp.is_valid                = false;
132         rnc->ssp.is_remote_node_context  = true;
133         rnc->ssp.function_number         = 0;
134
135         rnc->ssp.arbitration_wait_time = 0;
136
137         if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
138                 rnc->ssp.connection_occupancy_timeout =
139                         ihost->user_parameters.stp_max_occupancy_timeout;
140                 rnc->ssp.connection_inactivity_timeout =
141                         ihost->user_parameters.stp_inactivity_timeout;
142         } else {
143                 rnc->ssp.connection_occupancy_timeout  =
144                         ihost->user_parameters.ssp_max_occupancy_timeout;
145                 rnc->ssp.connection_inactivity_timeout =
146                         ihost->user_parameters.ssp_inactivity_timeout;
147         }
148
149         rnc->ssp.initial_arbitration_wait_time = 0;
150
151         /* Open Address Frame Parameters */
152         rnc->ssp.oaf_connection_rate = idev->connection_rate;
153         rnc->ssp.oaf_features = 0;
154         rnc->ssp.oaf_source_zone_group = 0;
155         rnc->ssp.oaf_more_compatibility_features = 0;
156 }
157
158 /**
159  *
160  * @sci_rnc:
161  * @callback:
162  * @callback_parameter:
163  *
164  * This method will setup the remote node context object so it will transition
165  * to its ready state.  If the remote node context is already setup to
166  * transition to its final state then this function does nothing. none
167  */
168 static void sci_remote_node_context_setup_to_resume(
169         struct sci_remote_node_context *sci_rnc,
170         scics_sds_remote_node_context_callback callback,
171         void *callback_parameter)
172 {
173         if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) {
174                 sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY;
175                 sci_rnc->user_callback     = callback;
176                 sci_rnc->user_cookie       = callback_parameter;
177         }
178 }
179
180 static void sci_remote_node_context_setup_to_destory(
181         struct sci_remote_node_context *sci_rnc,
182         scics_sds_remote_node_context_callback callback,
183         void *callback_parameter)
184 {
185         sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL;
186         sci_rnc->user_callback     = callback;
187         sci_rnc->user_cookie       = callback_parameter;
188 }
189
190 /**
191  *
192  *
193  * This method just calls the user callback function and then resets the
194  * callback.
195  */
196 static void sci_remote_node_context_notify_user(
197         struct sci_remote_node_context *rnc)
198 {
199         if (rnc->user_callback != NULL) {
200                 (*rnc->user_callback)(rnc->user_cookie);
201
202                 rnc->user_callback = NULL;
203                 rnc->user_cookie = NULL;
204         }
205 }
206
207 static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
208 {
209         if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
210                 sci_remote_node_context_resume(rnc, rnc->user_callback,
211                                                     rnc->user_cookie);
212 }
213
214 static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
215 {
216         union scu_remote_node_context *rnc_buffer;
217         struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
218         struct domain_device *dev = idev->domain_dev;
219         struct isci_host *ihost = idev->owning_port->owning_controller;
220
221         rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
222
223         rnc_buffer->ssp.is_valid = true;
224
225         if (!idev->is_direct_attached &&
226             (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) {
227                 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
228         } else {
229                 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
230
231                 if (idev->is_direct_attached)
232                         sci_port_setup_transports(idev->owning_port,
233                                                   sci_rnc->remote_node_index);
234         }
235 }
236
237 static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
238 {
239         union scu_remote_node_context *rnc_buffer;
240         struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
241         struct isci_host *ihost = idev->owning_port->owning_controller;
242
243         rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
244
245         rnc_buffer->ssp.is_valid = false;
246
247         sci_remote_device_post_request(rnc_to_dev(sci_rnc),
248                                        SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
249 }
250
251 static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
252 {
253         struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
254
255         /* Check to see if we have gotten back to the initial state because
256          * someone requested to destroy the remote node context object.
257          */
258         if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
259                 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
260                 sci_remote_node_context_notify_user(rnc);
261         }
262 }
263
264 static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
265 {
266         struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
267
268         sci_remote_node_context_validate_context_buffer(sci_rnc);
269 }
270
271 static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
272 {
273         struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
274
275         sci_remote_node_context_invalidate_context_buffer(rnc);
276 }
277
278 static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
279 {
280         struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
281         struct isci_remote_device *idev;
282         struct domain_device *dev;
283
284         idev = rnc_to_dev(rnc);
285         dev = idev->domain_dev;
286
287         /*
288          * For direct attached SATA devices we need to clear the TLCR
289          * NCQ to TCi tag mapping on the phy and in cases where we
290          * resume because of a target reset we also need to update
291          * the STPTLDARNI register with the RNi of the device
292          */
293         if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
294             idev->is_direct_attached)
295                 sci_port_setup_transports(idev->owning_port,
296                                                rnc->remote_node_index);
297
298         sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
299 }
300
301 static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
302 {
303         struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
304
305         rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
306
307         if (rnc->user_callback)
308                 sci_remote_node_context_notify_user(rnc);
309 }
310
311 static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
312 {
313         struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
314
315         sci_remote_node_context_continue_state_transitions(rnc);
316 }
317
318 static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
319 {
320         struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
321
322         sci_remote_node_context_continue_state_transitions(rnc);
323 }
324
325 static const struct sci_base_state sci_remote_node_context_state_table[] = {
326         [SCI_RNC_INITIAL] = {
327                 .enter_state = sci_remote_node_context_initial_state_enter,
328         },
329         [SCI_RNC_POSTING] = {
330                 .enter_state = sci_remote_node_context_posting_state_enter,
331         },
332         [SCI_RNC_INVALIDATING] = {
333                 .enter_state = sci_remote_node_context_invalidating_state_enter,
334         },
335         [SCI_RNC_RESUMING] = {
336                 .enter_state = sci_remote_node_context_resuming_state_enter,
337         },
338         [SCI_RNC_READY] = {
339                 .enter_state = sci_remote_node_context_ready_state_enter,
340         },
341         [SCI_RNC_TX_SUSPENDED] = {
342                 .enter_state = sci_remote_node_context_tx_suspended_state_enter,
343         },
344         [SCI_RNC_TX_RX_SUSPENDED] = {
345                 .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
346         },
347         [SCI_RNC_AWAIT_SUSPENSION] = { },
348 };
349
350 void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
351                                             u16 remote_node_index)
352 {
353         memset(rnc, 0, sizeof(struct sci_remote_node_context));
354
355         rnc->remote_node_index = remote_node_index;
356         rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
357
358         sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
359 }
360
361 enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
362                                                            u32 event_code)
363 {
364         enum scis_sds_remote_node_context_states state;
365
366         state = sci_rnc->sm.current_state_id;
367         switch (state) {
368         case SCI_RNC_POSTING:
369                 switch (scu_get_event_code(event_code)) {
370                 case SCU_EVENT_POST_RNC_COMPLETE:
371                         sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
372                         break;
373                 default:
374                         goto out;
375                 }
376                 break;
377         case SCI_RNC_INVALIDATING:
378                 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
379                         if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL)
380                                 state = SCI_RNC_INITIAL;
381                         else
382                                 state = SCI_RNC_POSTING;
383                         sci_change_state(&sci_rnc->sm, state);
384                 } else {
385                         switch (scu_get_event_type(event_code)) {
386                         case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
387                         case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
388                                 /* We really dont care if the hardware is going to suspend
389                                  * the device since it's being invalidated anyway */
390                                 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
391                                         "%s: SCIC Remote Node Context 0x%p was "
392                                         "suspeneded by hardware while being "
393                                         "invalidated.\n", __func__, sci_rnc);
394                                 break;
395                         default:
396                                 goto out;
397                         }
398                 }
399                 break;
400         case SCI_RNC_RESUMING:
401                 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
402                         sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
403                 } else {
404                         switch (scu_get_event_type(event_code)) {
405                         case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
406                         case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
407                                 /* We really dont care if the hardware is going to suspend
408                                  * the device since it's being resumed anyway */
409                                 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
410                                         "%s: SCIC Remote Node Context 0x%p was "
411                                         "suspeneded by hardware while being resumed.\n",
412                                         __func__, sci_rnc);
413                                 break;
414                         default:
415                                 goto out;
416                         }
417                 }
418                 break;
419         case SCI_RNC_READY:
420                 switch (scu_get_event_type(event_code)) {
421                 case SCU_EVENT_TL_RNC_SUSPEND_TX:
422                         sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
423                         sci_rnc->suspension_code = scu_get_event_specifier(event_code);
424                         break;
425                 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
426                         sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
427                         sci_rnc->suspension_code = scu_get_event_specifier(event_code);
428                         break;
429                 default:
430                         goto out;
431                 }
432                 break;
433         case SCI_RNC_AWAIT_SUSPENSION:
434                 switch (scu_get_event_type(event_code)) {
435                 case SCU_EVENT_TL_RNC_SUSPEND_TX:
436                         sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
437                         sci_rnc->suspension_code = scu_get_event_specifier(event_code);
438                         break;
439                 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
440                         sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
441                         sci_rnc->suspension_code = scu_get_event_specifier(event_code);
442                         break;
443                 default:
444                         goto out;
445                 }
446                 break;
447         default:
448                 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
449                          "%s: invalid state %d\n", __func__, state);
450                 return SCI_FAILURE_INVALID_STATE;
451         }
452         return SCI_SUCCESS;
453
454  out:
455         dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
456                  "%s: code: %#x state: %d\n", __func__, event_code, state);
457         return SCI_FAILURE;
458
459 }
460
461 enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
462                                                       scics_sds_remote_node_context_callback cb_fn,
463                                                       void *cb_p)
464 {
465         enum scis_sds_remote_node_context_states state;
466
467         state = sci_rnc->sm.current_state_id;
468         switch (state) {
469         case SCI_RNC_INVALIDATING:
470                 sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
471                 return SCI_SUCCESS;
472         case SCI_RNC_POSTING:
473         case SCI_RNC_RESUMING:
474         case SCI_RNC_READY:
475         case SCI_RNC_TX_SUSPENDED:
476         case SCI_RNC_TX_RX_SUSPENDED:
477         case SCI_RNC_AWAIT_SUSPENSION:
478                 sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
479                 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
480                 return SCI_SUCCESS;
481         case SCI_RNC_INITIAL:
482                 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
483                          "%s: invalid state %d\n", __func__, state);
484                 /* We have decided that the destruct request on the remote node context
485                  * can not fail since it is either in the initial/destroyed state or is
486                  * can be destroyed.
487                  */
488                 return SCI_SUCCESS;
489         default:
490                 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
491                          "%s: invalid state %d\n", __func__, state);
492                 return SCI_FAILURE_INVALID_STATE;
493         }
494 }
495
496 enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
497                                                      u32 suspend_type,
498                                                      scics_sds_remote_node_context_callback cb_fn,
499                                                      void *cb_p)
500 {
501         enum scis_sds_remote_node_context_states state;
502
503         state = sci_rnc->sm.current_state_id;
504         if (state != SCI_RNC_READY) {
505                 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
506                          "%s: invalid state %d\n", __func__, state);
507                 return SCI_FAILURE_INVALID_STATE;
508         }
509
510         sci_rnc->user_callback   = cb_fn;
511         sci_rnc->user_cookie     = cb_p;
512         sci_rnc->suspension_code = suspend_type;
513
514         if (suspend_type == SCI_SOFTWARE_SUSPENSION) {
515                 sci_remote_device_post_request(rnc_to_dev(sci_rnc),
516                                                     SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
517         }
518
519         sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
520         return SCI_SUCCESS;
521 }
522
523 enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
524                                                     scics_sds_remote_node_context_callback cb_fn,
525                                                     void *cb_p)
526 {
527         enum scis_sds_remote_node_context_states state;
528
529         state = sci_rnc->sm.current_state_id;
530         switch (state) {
531         case SCI_RNC_INITIAL:
532                 if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
533                         return SCI_FAILURE_INVALID_STATE;
534
535                 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
536                 sci_remote_node_context_construct_buffer(sci_rnc);
537                 sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
538                 return SCI_SUCCESS;
539         case SCI_RNC_POSTING:
540         case SCI_RNC_INVALIDATING:
541         case SCI_RNC_RESUMING:
542                 if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
543                         return SCI_FAILURE_INVALID_STATE;
544
545                 sci_rnc->user_callback = cb_fn;
546                 sci_rnc->user_cookie   = cb_p;
547                 return SCI_SUCCESS;
548         case SCI_RNC_TX_SUSPENDED: {
549                 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
550                 struct domain_device *dev = idev->domain_dev;
551
552                 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
553
554                 /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */
555                 if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev))
556                         sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
557                 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
558                         if (idev->is_direct_attached) {
559                                 /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */
560                                 sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
561                         } else {
562                                 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
563                         }
564                 } else
565                         return SCI_FAILURE;
566                 return SCI_SUCCESS;
567         }
568         case SCI_RNC_TX_RX_SUSPENDED:
569                 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
570                 sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
571                 return SCI_FAILURE_INVALID_STATE;
572         case SCI_RNC_AWAIT_SUSPENSION:
573                 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
574                 return SCI_SUCCESS;
575         default:
576                 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
577                          "%s: invalid state %d\n", __func__, state);
578                 return SCI_FAILURE_INVALID_STATE;
579         }
580 }
581
582 enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
583                                                              struct isci_request *ireq)
584 {
585         enum scis_sds_remote_node_context_states state;
586
587         state = sci_rnc->sm.current_state_id;
588
589         switch (state) {
590         case SCI_RNC_READY:
591                 return SCI_SUCCESS;
592         case SCI_RNC_TX_SUSPENDED:
593         case SCI_RNC_TX_RX_SUSPENDED:
594         case SCI_RNC_AWAIT_SUSPENSION:
595                 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
596                          "%s: invalid state %d\n", __func__, state);
597                 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
598         default:
599                 break;
600         }
601         dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
602                 "%s: requested to start IO while still resuming, %d\n",
603                 __func__, state);
604         return SCI_FAILURE_INVALID_STATE;
605 }
606
607 enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
608                                                         struct isci_request *ireq)
609 {
610         enum scis_sds_remote_node_context_states state;
611
612         state = sci_rnc->sm.current_state_id;
613         switch (state) {
614         case SCI_RNC_RESUMING:
615         case SCI_RNC_READY:
616         case SCI_RNC_AWAIT_SUSPENSION:
617                 return SCI_SUCCESS;
618         case SCI_RNC_TX_SUSPENDED:
619         case SCI_RNC_TX_RX_SUSPENDED:
620                 sci_remote_node_context_resume(sci_rnc, NULL, NULL);
621                 return SCI_SUCCESS;
622         default:
623                 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
624                          "%s: invalid state %d\n", __func__, state);
625                 return SCI_FAILURE_INVALID_STATE;
626         }
627 }