2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
55 #include <linux/device.h>
61 #include "probe_roms.h"
62 #include "remote_device.h"
64 #include "scu_completion_codes.h"
65 #include "scu_event_codes.h"
66 #include "registers.h"
67 #include "scu_remote_node_context.h"
68 #include "scu_task_context.h"
69 #include "scu_unsolicited_frame.h"
72 #define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
75 * smu_dcc_get_max_ports() -
77 * This macro returns the maximum number of logical ports supported by the
78 * hardware. The caller passes in the value read from the device context
79 * capacity register and this macro will mash and shift the value appropriately.
81 #define smu_dcc_get_max_ports(dcc_value) \
83 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
84 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
88 * smu_dcc_get_max_task_context() -
90 * This macro returns the maximum number of task contexts supported by the
91 * hardware. The caller passes in the value read from the device context
92 * capacity register and this macro will mash and shift the value appropriately.
94 #define smu_dcc_get_max_task_context(dcc_value) \
96 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
97 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
101 * smu_dcc_get_max_remote_node_context() -
103 * This macro returns the maximum number of remote node contexts supported by
104 * the hardware. The caller passes in the value read from the device context
105 * capacity register and this macro will mash and shift the value appropriately.
107 #define smu_dcc_get_max_remote_node_context(dcc_value) \
109 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
110 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
114 #define SCIC_SDS_CONTROLLER_MIN_TIMER_COUNT 3
115 #define SCIC_SDS_CONTROLLER_MAX_TIMER_COUNT 3
120 * The number of milliseconds to wait for a phy to start.
122 #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
127 * The number of milliseconds to wait while a given phy is consuming power
128 * before allowing another set of phys to consume power. Ultimately, this will
129 * be specified by OEM parameter.
131 #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
134 * NORMALIZE_PUT_POINTER() -
136 * This macro will normalize the completion queue put pointer so its value can
137 * be used as an array inde
139 #define NORMALIZE_PUT_POINTER(x) \
140 ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
144 * NORMALIZE_EVENT_POINTER() -
146 * This macro will normalize the completion queue event entry so its value can
147 * be used as an index.
149 #define NORMALIZE_EVENT_POINTER(x) \
151 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
152 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
156 * INCREMENT_COMPLETION_QUEUE_GET() -
158 * This macro will increment the controllers completion queue index value and
159 * possibly toggle the cycle bit if the completion queue index wraps back to 0.
161 #define INCREMENT_COMPLETION_QUEUE_GET(controller, index, cycle) \
162 INCREMENT_QUEUE_GET(\
165 (controller)->completion_queue_entries, \
170 * INCREMENT_EVENT_QUEUE_GET() -
172 * This macro will increment the controllers event queue index value and
173 * possibly toggle the event cycle bit if the event queue index wraps back to 0.
175 #define INCREMENT_EVENT_QUEUE_GET(controller, index, cycle) \
176 INCREMENT_QUEUE_GET(\
179 (controller)->completion_event_entries, \
180 SMU_CQGR_EVENT_CYCLE_BIT \
185 * NORMALIZE_GET_POINTER() -
187 * This macro will normalize the completion queue get pointer so its value can
188 * be used as an index into an array
190 #define NORMALIZE_GET_POINTER(x) \
191 ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
194 * NORMALIZE_GET_POINTER_CYCLE_BIT() -
196 * This macro will normalize the completion queue cycle pointer so it matches
197 * the completion queue cycle bit
199 #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
200 ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
203 * COMPLETION_QUEUE_CYCLE_BIT() -
205 * This macro will return the cycle bit of the completion queue entry
207 #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
209 static bool scic_sds_controller_completion_queue_has_entries(
210 struct scic_sds_controller *scic)
212 u32 get_value = scic->completion_queue_get;
213 u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
215 if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
216 COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index]))
222 static bool scic_sds_controller_isr(struct scic_sds_controller *scic)
224 if (scic_sds_controller_completion_queue_has_entries(scic)) {
228 * we have a spurious interrupt it could be that we have already
229 * emptied the completion queue from a previous interrupt */
230 writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
233 * There is a race in the hardware that could cause us not to be notified
234 * of an interrupt completion if we do not take this step. We will mask
235 * then unmask the interrupts so if there is another interrupt pending
236 * the clearing of the interrupt source we get the next interrupt message. */
237 writel(0xFF000000, &scic->smu_registers->interrupt_mask);
238 writel(0, &scic->smu_registers->interrupt_mask);
244 irqreturn_t isci_msix_isr(int vec, void *data)
246 struct isci_host *ihost = data;
248 if (scic_sds_controller_isr(&ihost->sci))
249 tasklet_schedule(&ihost->completion_tasklet);
254 static bool scic_sds_controller_error_isr(struct scic_sds_controller *scic)
256 u32 interrupt_status;
259 readl(&scic->smu_registers->interrupt_status);
260 interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
262 if (interrupt_status != 0) {
264 * There is an error interrupt pending so let it through and handle
270 * There is a race in the hardware that could cause us not to be notified
271 * of an interrupt completion if we do not take this step. We will mask
272 * then unmask the error interrupts so if there was another interrupt
273 * pending we will be notified.
274 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
275 writel(0xff, &scic->smu_registers->interrupt_mask);
276 writel(0, &scic->smu_registers->interrupt_mask);
281 static void scic_sds_controller_task_completion(struct scic_sds_controller *scic,
282 u32 completion_entry)
285 struct scic_sds_request *io_request;
287 index = SCU_GET_COMPLETION_INDEX(completion_entry);
288 io_request = scic->io_request_table[index];
290 /* Make sure that we really want to process this IO request */
293 && (io_request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG)
295 scic_sds_io_tag_get_sequence(io_request->io_tag)
296 == scic->io_request_sequence[index]
299 /* Yep this is a valid io request pass it along to the io request handler */
300 scic_sds_io_request_tc_completion(io_request, completion_entry);
304 static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic,
305 u32 completion_entry)
308 struct scic_sds_request *io_request;
309 struct scic_sds_remote_device *device;
311 index = SCU_GET_COMPLETION_INDEX(completion_entry);
313 switch (scu_get_command_request_type(completion_entry)) {
314 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
315 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
316 io_request = scic->io_request_table[index];
317 dev_warn(scic_to_dev(scic),
318 "%s: SCIC SDS Completion type SDMA %x for io request "
323 /* @todo For a post TC operation we need to fail the IO
328 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
329 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
330 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
331 device = scic->device_table[index];
332 dev_warn(scic_to_dev(scic),
333 "%s: SCIC SDS Completion type SDMA %x for remote "
338 /* @todo For a port RNC operation we need to fail the
344 dev_warn(scic_to_dev(scic),
345 "%s: SCIC SDS Completion unknown SDMA completion "
354 static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *scic,
355 u32 completion_entry)
360 struct isci_host *ihost = scic_to_ihost(scic);
361 struct scu_unsolicited_frame_header *frame_header;
362 struct scic_sds_phy *phy;
363 struct scic_sds_remote_device *device;
365 enum sci_status result = SCI_FAILURE;
367 frame_index = SCU_GET_FRAME_INDEX(completion_entry);
369 frame_header = scic->uf_control.buffers.array[frame_index].header;
370 scic->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
372 if (SCU_GET_FRAME_ERROR(completion_entry)) {
374 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
375 * / this cause a problem? We expect the phy initialization will
376 * / fail if there is an error in the frame. */
377 scic_sds_controller_release_frame(scic, frame_index);
381 if (frame_header->is_address_frame) {
382 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
383 phy = &ihost->phys[index].sci;
384 result = scic_sds_phy_frame_handler(phy, frame_index);
387 index = SCU_GET_COMPLETION_INDEX(completion_entry);
389 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
391 * This is a signature fis or a frame from a direct attached SATA
392 * device that has not yet been created. In either case forwared
393 * the frame to the PE and let it take care of the frame data. */
394 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
395 phy = &ihost->phys[index].sci;
396 result = scic_sds_phy_frame_handler(phy, frame_index);
398 if (index < scic->remote_node_entries)
399 device = scic->device_table[index];
404 result = scic_sds_remote_device_frame_handler(device, frame_index);
406 scic_sds_controller_release_frame(scic, frame_index);
410 if (result != SCI_SUCCESS) {
412 * / @todo Is there any reason to report some additional error message
413 * / when we get this failure notifiction? */
417 static void scic_sds_controller_event_completion(struct scic_sds_controller *scic,
418 u32 completion_entry)
420 struct isci_host *ihost = scic_to_ihost(scic);
421 struct scic_sds_request *io_request;
422 struct scic_sds_remote_device *device;
423 struct scic_sds_phy *phy;
426 index = SCU_GET_COMPLETION_INDEX(completion_entry);
428 switch (scu_get_event_type(completion_entry)) {
429 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
430 /* / @todo The driver did something wrong and we need to fix the condtion. */
431 dev_err(scic_to_dev(scic),
432 "%s: SCIC Controller 0x%p received SMU command error "
439 case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
440 case SCU_EVENT_TYPE_SMU_ERROR:
441 case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
443 * / @todo This is a hardware failure and its likely that we want to
444 * / reset the controller. */
445 dev_err(scic_to_dev(scic),
446 "%s: SCIC Controller 0x%p received fatal controller "
453 case SCU_EVENT_TYPE_TRANSPORT_ERROR:
454 io_request = scic->io_request_table[index];
455 scic_sds_io_request_event_handler(io_request, completion_entry);
458 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
459 switch (scu_get_event_specifier(completion_entry)) {
460 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
461 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
462 io_request = scic->io_request_table[index];
463 if (io_request != NULL)
464 scic_sds_io_request_event_handler(io_request, completion_entry);
466 dev_warn(scic_to_dev(scic),
467 "%s: SCIC Controller 0x%p received "
468 "event 0x%x for io request object "
469 "that doesnt exist.\n",
476 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
477 device = scic->device_table[index];
479 scic_sds_remote_device_event_handler(device, completion_entry);
481 dev_warn(scic_to_dev(scic),
482 "%s: SCIC Controller 0x%p received "
483 "event 0x%x for remote device object "
484 "that doesnt exist.\n",
493 case SCU_EVENT_TYPE_BROADCAST_CHANGE:
495 * direct the broadcast change event to the phy first and then let
496 * the phy redirect the broadcast change to the port object */
497 case SCU_EVENT_TYPE_ERR_CNT_EVENT:
499 * direct error counter event to the phy object since that is where
500 * we get the event notification. This is a type 4 event. */
501 case SCU_EVENT_TYPE_OSSP_EVENT:
502 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
503 phy = &ihost->phys[index].sci;
504 scic_sds_phy_event_handler(phy, completion_entry);
507 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
508 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
509 case SCU_EVENT_TYPE_RNC_OPS_MISC:
510 if (index < scic->remote_node_entries) {
511 device = scic->device_table[index];
514 scic_sds_remote_device_event_handler(device, completion_entry);
516 dev_err(scic_to_dev(scic),
517 "%s: SCIC Controller 0x%p received event 0x%x "
518 "for remote device object 0x%0x that doesnt "
528 dev_warn(scic_to_dev(scic),
529 "%s: SCIC Controller received unknown event code %x\n",
538 static void scic_sds_controller_process_completions(struct scic_sds_controller *scic)
540 u32 completion_count = 0;
541 u32 completion_entry;
547 dev_dbg(scic_to_dev(scic),
548 "%s: completion queue begining get:0x%08x\n",
550 scic->completion_queue_get);
552 /* Get the component parts of the completion queue */
553 get_index = NORMALIZE_GET_POINTER(scic->completion_queue_get);
554 get_cycle = SMU_CQGR_CYCLE_BIT & scic->completion_queue_get;
556 event_index = NORMALIZE_EVENT_POINTER(scic->completion_queue_get);
557 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & scic->completion_queue_get;
560 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
561 == COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index])
565 completion_entry = scic->completion_queue[get_index];
566 INCREMENT_COMPLETION_QUEUE_GET(scic, get_index, get_cycle);
568 dev_dbg(scic_to_dev(scic),
569 "%s: completion queue entry:0x%08x\n",
573 switch (SCU_GET_COMPLETION_TYPE(completion_entry)) {
574 case SCU_COMPLETION_TYPE_TASK:
575 scic_sds_controller_task_completion(scic, completion_entry);
578 case SCU_COMPLETION_TYPE_SDMA:
579 scic_sds_controller_sdma_completion(scic, completion_entry);
582 case SCU_COMPLETION_TYPE_UFI:
583 scic_sds_controller_unsolicited_frame(scic, completion_entry);
586 case SCU_COMPLETION_TYPE_EVENT:
587 INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle);
588 scic_sds_controller_event_completion(scic, completion_entry);
591 case SCU_COMPLETION_TYPE_NOTIFY:
593 * Presently we do the same thing with a notify event that we do with the
594 * other event codes. */
595 INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle);
596 scic_sds_controller_event_completion(scic, completion_entry);
600 dev_warn(scic_to_dev(scic),
601 "%s: SCIC Controller received unknown "
602 "completion type %x\n",
609 /* Update the get register if we completed one or more entries */
610 if (completion_count > 0) {
611 scic->completion_queue_get =
612 SMU_CQGR_GEN_BIT(ENABLE) |
613 SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
615 SMU_CQGR_GEN_VAL(EVENT_POINTER, event_index) |
617 SMU_CQGR_GEN_VAL(POINTER, get_index);
619 writel(scic->completion_queue_get,
620 &scic->smu_registers->completion_queue_get);
624 dev_dbg(scic_to_dev(scic),
625 "%s: completion queue ending get:0x%08x\n",
627 scic->completion_queue_get);
631 static void scic_sds_controller_error_handler(struct scic_sds_controller *scic)
633 u32 interrupt_status;
636 readl(&scic->smu_registers->interrupt_status);
638 if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
639 scic_sds_controller_completion_queue_has_entries(scic)) {
641 scic_sds_controller_process_completions(scic);
642 writel(SMU_ISR_QUEUE_SUSPEND, &scic->smu_registers->interrupt_status);
644 dev_err(scic_to_dev(scic), "%s: status: %#x\n", __func__,
647 sci_base_state_machine_change_state(&scic->state_machine,
648 SCI_BASE_CONTROLLER_STATE_FAILED);
653 /* If we dont process any completions I am not sure that we want to do this.
654 * We are in the middle of a hardware fault and should probably be reset.
656 writel(0, &scic->smu_registers->interrupt_mask);
659 irqreturn_t isci_intx_isr(int vec, void *data)
661 irqreturn_t ret = IRQ_NONE;
662 struct isci_host *ihost = data;
663 struct scic_sds_controller *scic = &ihost->sci;
665 if (scic_sds_controller_isr(scic)) {
666 writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
667 tasklet_schedule(&ihost->completion_tasklet);
669 } else if (scic_sds_controller_error_isr(scic)) {
670 spin_lock(&ihost->scic_lock);
671 scic_sds_controller_error_handler(scic);
672 spin_unlock(&ihost->scic_lock);
679 irqreturn_t isci_error_isr(int vec, void *data)
681 struct isci_host *ihost = data;
683 if (scic_sds_controller_error_isr(&ihost->sci))
684 scic_sds_controller_error_handler(&ihost->sci);
690 * isci_host_start_complete() - This function is called by the core library,
691 * through the ISCI Module, to indicate controller start status.
692 * @isci_host: This parameter specifies the ISCI host object
693 * @completion_status: This parameter specifies the completion status from the
697 static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
699 if (completion_status != SCI_SUCCESS)
700 dev_info(&ihost->pdev->dev,
701 "controller start timed out, continuing...\n");
702 isci_host_change_state(ihost, isci_ready);
703 clear_bit(IHOST_START_PENDING, &ihost->flags);
704 wake_up(&ihost->eventq);
707 int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
709 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
711 if (test_bit(IHOST_START_PENDING, &ihost->flags))
714 /* todo: use sas_flush_discovery once it is upstream */
715 scsi_flush_work(shost);
717 scsi_flush_work(shost);
719 dev_dbg(&ihost->pdev->dev,
720 "%s: ihost->status = %d, time = %ld\n",
721 __func__, isci_host_get_state(ihost), time);
728 * scic_controller_get_suggested_start_timeout() - This method returns the
729 * suggested scic_controller_start() timeout amount. The user is free to
730 * use any timeout value, but this method provides the suggested minimum
731 * start timeout value. The returned value is based upon empirical
732 * information determined as a result of interoperability testing.
733 * @controller: the handle to the controller object for which to return the
734 * suggested start timeout.
736 * This method returns the number of milliseconds for the suggested start
739 static u32 scic_controller_get_suggested_start_timeout(
740 struct scic_sds_controller *sc)
742 /* Validate the user supplied parameters. */
747 * The suggested minimum timeout value for a controller start operation:
749 * Signature FIS Timeout
750 * + Phy Start Timeout
751 * + Number of Phy Spin Up Intervals
752 * ---------------------------------
753 * Number of milliseconds for the controller start operation.
755 * NOTE: The number of phy spin up intervals will be equivalent
756 * to the number of phys divided by the number phys allowed
757 * per interval - 1 (once OEM parameters are supported).
758 * Currently we assume only 1 phy per interval. */
760 return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
761 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
762 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
765 static void scic_controller_enable_interrupts(
766 struct scic_sds_controller *scic)
768 BUG_ON(scic->smu_registers == NULL);
769 writel(0, &scic->smu_registers->interrupt_mask);
772 void scic_controller_disable_interrupts(
773 struct scic_sds_controller *scic)
775 BUG_ON(scic->smu_registers == NULL);
776 writel(0xffffffff, &scic->smu_registers->interrupt_mask);
779 static void scic_sds_controller_enable_port_task_scheduler(
780 struct scic_sds_controller *scic)
782 u32 port_task_scheduler_value;
784 port_task_scheduler_value =
785 readl(&scic->scu_registers->peg0.ptsg.control);
786 port_task_scheduler_value |=
787 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
788 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
789 writel(port_task_scheduler_value,
790 &scic->scu_registers->peg0.ptsg.control);
793 static void scic_sds_controller_assign_task_entries(struct scic_sds_controller *scic)
798 * Assign all the TCs to function 0
799 * TODO: Do we actually need to read this register to write it back?
803 readl(&scic->smu_registers->task_context_assignment[0]);
805 task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
806 (SMU_TCA_GEN_VAL(ENDING, scic->task_context_entries - 1)) |
807 (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
809 writel(task_assignment,
810 &scic->smu_registers->task_context_assignment[0]);
814 static void scic_sds_controller_initialize_completion_queue(struct scic_sds_controller *scic)
817 u32 completion_queue_control_value;
818 u32 completion_queue_get_value;
819 u32 completion_queue_put_value;
821 scic->completion_queue_get = 0;
823 completion_queue_control_value = (
824 SMU_CQC_QUEUE_LIMIT_SET(scic->completion_queue_entries - 1)
825 | SMU_CQC_EVENT_LIMIT_SET(scic->completion_event_entries - 1)
828 writel(completion_queue_control_value,
829 &scic->smu_registers->completion_queue_control);
832 /* Set the completion queue get pointer and enable the queue */
833 completion_queue_get_value = (
834 (SMU_CQGR_GEN_VAL(POINTER, 0))
835 | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
836 | (SMU_CQGR_GEN_BIT(ENABLE))
837 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
840 writel(completion_queue_get_value,
841 &scic->smu_registers->completion_queue_get);
843 /* Set the completion queue put pointer */
844 completion_queue_put_value = (
845 (SMU_CQPR_GEN_VAL(POINTER, 0))
846 | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
849 writel(completion_queue_put_value,
850 &scic->smu_registers->completion_queue_put);
852 /* Initialize the cycle bit of the completion queue entries */
853 for (index = 0; index < scic->completion_queue_entries; index++) {
855 * If get.cycle_bit != completion_queue.cycle_bit
856 * its not a valid completion queue entry
857 * so at system start all entries are invalid */
858 scic->completion_queue[index] = 0x80000000;
862 static void scic_sds_controller_initialize_unsolicited_frame_queue(struct scic_sds_controller *scic)
864 u32 frame_queue_control_value;
865 u32 frame_queue_get_value;
866 u32 frame_queue_put_value;
868 /* Write the queue size */
869 frame_queue_control_value =
870 SCU_UFQC_GEN_VAL(QUEUE_SIZE,
871 scic->uf_control.address_table.count);
873 writel(frame_queue_control_value,
874 &scic->scu_registers->sdma.unsolicited_frame_queue_control);
876 /* Setup the get pointer for the unsolicited frame queue */
877 frame_queue_get_value = (
878 SCU_UFQGP_GEN_VAL(POINTER, 0)
879 | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
882 writel(frame_queue_get_value,
883 &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
884 /* Setup the put pointer for the unsolicited frame queue */
885 frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
886 writel(frame_queue_put_value,
887 &scic->scu_registers->sdma.unsolicited_frame_put_pointer);
891 * This method will attempt to transition into the ready state for the
892 * controller and indicate that the controller start operation has completed
893 * if all criteria are met.
894 * @scic: This parameter indicates the controller object for which
895 * to transition to ready.
896 * @status: This parameter indicates the status value to be pass into the call
897 * to scic_cb_controller_start_complete().
901 static void scic_sds_controller_transition_to_ready(
902 struct scic_sds_controller *scic,
903 enum sci_status status)
905 struct isci_host *ihost = scic_to_ihost(scic);
907 if (scic->state_machine.current_state_id ==
908 SCI_BASE_CONTROLLER_STATE_STARTING) {
910 * We move into the ready state, because some of the phys/ports
911 * may be up and operational.
913 sci_base_state_machine_change_state(&scic->state_machine,
914 SCI_BASE_CONTROLLER_STATE_READY);
916 isci_host_start_complete(ihost, status);
920 static void scic_sds_controller_phy_timer_stop(struct scic_sds_controller *scic)
922 isci_timer_stop(scic->phy_startup_timer);
924 scic->phy_startup_timer_pending = false;
927 static void scic_sds_controller_phy_timer_start(struct scic_sds_controller *scic)
929 isci_timer_start(scic->phy_startup_timer,
930 SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
932 scic->phy_startup_timer_pending = true;
935 static bool is_phy_starting(struct scic_sds_phy *sci_phy)
937 enum scic_sds_phy_states state;
939 state = sci_phy->state_machine.current_state_id;
941 case SCI_BASE_PHY_STATE_STARTING:
942 case SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL:
943 case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN:
944 case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF:
945 case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER:
946 case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER:
947 case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN:
948 case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN:
949 case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF:
950 case SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL:
958 * scic_sds_controller_start_next_phy - start phy
961 * If all the phys have been started, then attempt to transition the
962 * controller to the READY state and inform the user
963 * (scic_cb_controller_start_complete()).
965 static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_controller *scic)
967 struct isci_host *ihost = scic_to_ihost(scic);
968 struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
969 struct scic_sds_phy *sci_phy;
970 enum sci_status status;
972 status = SCI_SUCCESS;
974 if (scic->phy_startup_timer_pending)
977 if (scic->next_phy_to_start >= SCI_MAX_PHYS) {
978 bool is_controller_start_complete = true;
982 for (index = 0; index < SCI_MAX_PHYS; index++) {
983 sci_phy = &ihost->phys[index].sci;
984 state = sci_phy->state_machine.current_state_id;
986 if (!phy_get_non_dummy_port(sci_phy))
989 /* The controller start operation is complete iff:
990 * - all links have been given an opportunity to start
991 * - have no indication of a connected device
992 * - have an indication of a connected device and it has
993 * finished the link training process.
995 if ((sci_phy->is_in_link_training == false &&
996 state == SCI_BASE_PHY_STATE_INITIAL) ||
997 (sci_phy->is_in_link_training == false &&
998 state == SCI_BASE_PHY_STATE_STOPPED) ||
999 (sci_phy->is_in_link_training == true &&
1000 is_phy_starting(sci_phy))) {
1001 is_controller_start_complete = false;
1007 * The controller has successfully finished the start process.
1008 * Inform the SCI Core user and transition to the READY state. */
1009 if (is_controller_start_complete == true) {
1010 scic_sds_controller_transition_to_ready(scic, SCI_SUCCESS);
1011 scic_sds_controller_phy_timer_stop(scic);
1014 sci_phy = &ihost->phys[scic->next_phy_to_start].sci;
1016 if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
1017 if (phy_get_non_dummy_port(sci_phy) == NULL) {
1018 scic->next_phy_to_start++;
1020 /* Caution recursion ahead be forwarned
1022 * The PHY was never added to a PORT in MPC mode
1023 * so start the next phy in sequence This phy
1024 * will never go link up and will not draw power
1025 * the OEM parameters either configured the phy
1026 * incorrectly for the PORT or it was never
1027 * assigned to a PORT
1029 return scic_sds_controller_start_next_phy(scic);
1033 status = scic_sds_phy_start(sci_phy);
1035 if (status == SCI_SUCCESS) {
1036 scic_sds_controller_phy_timer_start(scic);
1038 dev_warn(scic_to_dev(scic),
1039 "%s: Controller stop operation failed "
1040 "to stop phy %d because of status "
1043 ihost->phys[scic->next_phy_to_start].sci.phy_index,
1047 scic->next_phy_to_start++;
1053 static void scic_sds_controller_phy_startup_timeout_handler(void *_scic)
1055 struct scic_sds_controller *scic = _scic;
1056 enum sci_status status;
1058 scic->phy_startup_timer_pending = false;
1059 status = SCI_FAILURE;
1060 while (status != SCI_SUCCESS)
1061 status = scic_sds_controller_start_next_phy(scic);
1064 static enum sci_status scic_controller_start(struct scic_sds_controller *scic,
1067 struct isci_host *ihost = scic_to_ihost(scic);
1068 enum sci_status result;
1071 if (scic->state_machine.current_state_id !=
1072 SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
1073 dev_warn(scic_to_dev(scic),
1074 "SCIC Controller start operation requested in "
1076 return SCI_FAILURE_INVALID_STATE;
1079 /* Build the TCi free pool */
1080 sci_pool_initialize(scic->tci_pool);
1081 for (index = 0; index < scic->task_context_entries; index++)
1082 sci_pool_put(scic->tci_pool, index);
1084 /* Build the RNi free pool */
1085 scic_sds_remote_node_table_initialize(
1086 &scic->available_remote_nodes,
1087 scic->remote_node_entries);
1090 * Before anything else lets make sure we will not be
1091 * interrupted by the hardware.
1093 scic_controller_disable_interrupts(scic);
1095 /* Enable the port task scheduler */
1096 scic_sds_controller_enable_port_task_scheduler(scic);
1098 /* Assign all the task entries to scic physical function */
1099 scic_sds_controller_assign_task_entries(scic);
1101 /* Now initialize the completion queue */
1102 scic_sds_controller_initialize_completion_queue(scic);
1104 /* Initialize the unsolicited frame queue for use */
1105 scic_sds_controller_initialize_unsolicited_frame_queue(scic);
1107 /* Start all of the ports on this controller */
1108 for (index = 0; index < scic->logical_port_entries; index++) {
1109 struct scic_sds_port *sci_port = &ihost->ports[index].sci;
1111 result = scic_sds_port_start(sci_port);
1116 scic_sds_controller_start_next_phy(scic);
1118 sci_mod_timer(&scic->timer, timeout);
1120 sci_base_state_machine_change_state(&scic->state_machine,
1121 SCI_BASE_CONTROLLER_STATE_STARTING);
1126 void isci_host_scan_start(struct Scsi_Host *shost)
1128 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1129 unsigned long tmo = scic_controller_get_suggested_start_timeout(&ihost->sci);
1131 set_bit(IHOST_START_PENDING, &ihost->flags);
1133 spin_lock_irq(&ihost->scic_lock);
1134 scic_controller_start(&ihost->sci, tmo);
1135 scic_controller_enable_interrupts(&ihost->sci);
1136 spin_unlock_irq(&ihost->scic_lock);
1139 static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
1141 isci_host_change_state(ihost, isci_stopped);
1142 scic_controller_disable_interrupts(&ihost->sci);
1143 clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1144 wake_up(&ihost->eventq);
1147 static void scic_sds_controller_completion_handler(struct scic_sds_controller *scic)
1149 /* Empty out the completion queue */
1150 if (scic_sds_controller_completion_queue_has_entries(scic))
1151 scic_sds_controller_process_completions(scic);
1153 /* Clear the interrupt and enable all interrupts again */
1154 writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
1155 /* Could we write the value of SMU_ISR_COMPLETION? */
1156 writel(0xFF000000, &scic->smu_registers->interrupt_mask);
1157 writel(0, &scic->smu_registers->interrupt_mask);
1161 * isci_host_completion_routine() - This function is the delayed service
1162 * routine that calls the sci core library's completion handler. It's
1163 * scheduled as a tasklet from the interrupt service routine when interrupts
1164 * in use, or set as the timeout function in polled mode.
1165 * @data: This parameter specifies the ISCI host object
1168 static void isci_host_completion_routine(unsigned long data)
1170 struct isci_host *isci_host = (struct isci_host *)data;
1171 struct list_head completed_request_list;
1172 struct list_head errored_request_list;
1173 struct list_head *current_position;
1174 struct list_head *next_position;
1175 struct isci_request *request;
1176 struct isci_request *next_request;
1177 struct sas_task *task;
1179 INIT_LIST_HEAD(&completed_request_list);
1180 INIT_LIST_HEAD(&errored_request_list);
1182 spin_lock_irq(&isci_host->scic_lock);
1184 scic_sds_controller_completion_handler(&isci_host->sci);
1186 /* Take the lists of completed I/Os from the host. */
1188 list_splice_init(&isci_host->requests_to_complete,
1189 &completed_request_list);
1191 /* Take the list of errored I/Os from the host. */
1192 list_splice_init(&isci_host->requests_to_errorback,
1193 &errored_request_list);
1195 spin_unlock_irq(&isci_host->scic_lock);
1197 /* Process any completions in the lists. */
1198 list_for_each_safe(current_position, next_position,
1199 &completed_request_list) {
1201 request = list_entry(current_position, struct isci_request,
1203 task = isci_request_access_task(request);
1205 /* Normal notification (task_done) */
1206 dev_dbg(&isci_host->pdev->dev,
1207 "%s: Normal - request/task = %p/%p\n",
1212 /* Return the task to libsas */
1215 task->lldd_task = NULL;
1216 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1218 /* If the task is already in the abort path,
1219 * the task_done callback cannot be called.
1221 task->task_done(task);
1224 /* Free the request object. */
1225 isci_request_free(isci_host, request);
1227 list_for_each_entry_safe(request, next_request, &errored_request_list,
1230 task = isci_request_access_task(request);
1232 /* Use sas_task_abort */
1233 dev_warn(&isci_host->pdev->dev,
1234 "%s: Error - request/task = %p/%p\n",
1241 /* Put the task into the abort path if it's not there
1244 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
1245 sas_task_abort(task);
1248 /* This is a case where the request has completed with a
1249 * status such that it needed further target servicing,
1250 * but the sas_task reference has already been removed
1251 * from the request. Since it was errored, it was not
1252 * being aborted, so there is nothing to do except free
1256 spin_lock_irq(&isci_host->scic_lock);
1257 /* Remove the request from the remote device's list
1258 * of pending requests.
1260 list_del_init(&request->dev_node);
1261 spin_unlock_irq(&isci_host->scic_lock);
1263 /* Free the request object. */
1264 isci_request_free(isci_host, request);
1271 * scic_controller_stop() - This method will stop an individual controller
1272 * object.This method will invoke the associated user callback upon
1273 * completion. The completion callback is called when the following
1274 * conditions are met: -# the method return status is SCI_SUCCESS. -# the
1275 * controller has been quiesced. This method will ensure that all IO
1276 * requests are quiesced, phys are stopped, and all additional operation by
1277 * the hardware is halted.
1278 * @controller: the handle to the controller object to stop.
1279 * @timeout: This parameter specifies the number of milliseconds in which the
1280 * stop operation should complete.
1282 * The controller must be in the STARTED or STOPPED state. Indicate if the
1283 * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
1284 * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
1285 * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
1286 * controller is not either in the STARTED or STOPPED states.
1288 static enum sci_status scic_controller_stop(struct scic_sds_controller *scic,
1291 if (scic->state_machine.current_state_id !=
1292 SCI_BASE_CONTROLLER_STATE_READY) {
1293 dev_warn(scic_to_dev(scic),
1294 "SCIC Controller stop operation requested in "
1296 return SCI_FAILURE_INVALID_STATE;
1299 sci_mod_timer(&scic->timer, timeout);
1300 sci_base_state_machine_change_state(&scic->state_machine,
1301 SCI_BASE_CONTROLLER_STATE_STOPPING);
1306 * scic_controller_reset() - This method will reset the supplied core
1307 * controller regardless of the state of said controller. This operation is
1308 * considered destructive. In other words, all current operations are wiped
1309 * out. No IO completions for outstanding devices occur. Outstanding IO
1310 * requests are not aborted or completed at the actual remote device.
1311 * @controller: the handle to the controller object to reset.
1313 * Indicate if the controller reset method succeeded or failed in some way.
1314 * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
1315 * the controller reset operation is unable to complete.
1317 static enum sci_status scic_controller_reset(struct scic_sds_controller *scic)
1319 switch (scic->state_machine.current_state_id) {
1320 case SCI_BASE_CONTROLLER_STATE_RESET:
1321 case SCI_BASE_CONTROLLER_STATE_READY:
1322 case SCI_BASE_CONTROLLER_STATE_STOPPED:
1323 case SCI_BASE_CONTROLLER_STATE_FAILED:
1325 * The reset operation is not a graceful cleanup, just
1326 * perform the state transition.
1328 sci_base_state_machine_change_state(&scic->state_machine,
1329 SCI_BASE_CONTROLLER_STATE_RESETTING);
1332 dev_warn(scic_to_dev(scic),
1333 "SCIC Controller reset operation requested in "
1335 return SCI_FAILURE_INVALID_STATE;
1339 void isci_host_deinit(struct isci_host *ihost)
1343 isci_host_change_state(ihost, isci_stopping);
1344 for (i = 0; i < SCI_MAX_PORTS; i++) {
1345 struct isci_port *iport = &ihost->ports[i];
1346 struct isci_remote_device *idev, *d;
1348 list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
1349 isci_remote_device_change_state(idev, isci_stopping);
1350 isci_remote_device_stop(ihost, idev);
1354 set_bit(IHOST_STOP_PENDING, &ihost->flags);
1356 spin_lock_irq(&ihost->scic_lock);
1357 scic_controller_stop(&ihost->sci, SCIC_CONTROLLER_STOP_TIMEOUT);
1358 spin_unlock_irq(&ihost->scic_lock);
1360 wait_for_stop(ihost);
1361 scic_controller_reset(&ihost->sci);
1363 /* Cancel any/all outstanding port timers */
1364 for (i = 0; i < ihost->sci.logical_port_entries; i++) {
1365 struct scic_sds_port *sci_port = &ihost->ports[i].sci;
1366 del_timer_sync(&sci_port->timer.timer);
1369 /* Cancel any/all outstanding phy timers */
1370 for (i = 0; i < SCI_MAX_PHYS; i++) {
1371 struct scic_sds_phy *sci_phy = &ihost->phys[i].sci;
1372 del_timer_sync(&sci_phy->sata_timer.timer);
1375 del_timer_sync(&ihost->sci.port_agent.timer.timer);
1377 del_timer_sync(&ihost->sci.power_control.timer.timer);
1379 del_timer_sync(&ihost->sci.timer.timer);
1381 isci_timer_list_destroy(ihost);
1384 static void __iomem *scu_base(struct isci_host *isci_host)
1386 struct pci_dev *pdev = isci_host->pdev;
1387 int id = isci_host->id;
1389 return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
1392 static void __iomem *smu_base(struct isci_host *isci_host)
1394 struct pci_dev *pdev = isci_host->pdev;
1395 int id = isci_host->id;
1397 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1400 static void isci_user_parameters_get(
1401 struct isci_host *isci_host,
1402 union scic_user_parameters *scic_user_params)
1404 struct scic_sds_user_parameters *u = &scic_user_params->sds1;
1407 for (i = 0; i < SCI_MAX_PHYS; i++) {
1408 struct sci_phy_user_params *u_phy = &u->phys[i];
1410 u_phy->max_speed_generation = phy_gen;
1412 /* we are not exporting these for now */
1413 u_phy->align_insertion_frequency = 0x7f;
1414 u_phy->in_connection_align_insertion_frequency = 0xff;
1415 u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
1418 u->stp_inactivity_timeout = stp_inactive_to;
1419 u->ssp_inactivity_timeout = ssp_inactive_to;
1420 u->stp_max_occupancy_timeout = stp_max_occ_to;
1421 u->ssp_max_occupancy_timeout = ssp_max_occ_to;
1422 u->no_outbound_task_timeout = no_outbound_task_to;
1423 u->max_number_concurrent_device_spin_up = max_concurr_spinup;
1426 static void scic_sds_controller_initial_state_enter(struct sci_base_state_machine *sm)
1428 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
1430 sci_base_state_machine_change_state(&scic->state_machine,
1431 SCI_BASE_CONTROLLER_STATE_RESET);
1434 static inline void scic_sds_controller_starting_state_exit(struct sci_base_state_machine *sm)
1436 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
1438 sci_del_timer(&scic->timer);
1441 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
1442 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
1443 #define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
1444 #define INTERRUPT_COALESCE_NUMBER_MAX 256
1445 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
1446 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
1449 * scic_controller_set_interrupt_coalescence() - This method allows the user to
1450 * configure the interrupt coalescence.
1451 * @controller: This parameter represents the handle to the controller object
1452 * for which its interrupt coalesce register is overridden.
1453 * @coalesce_number: Used to control the number of entries in the Completion
1454 * Queue before an interrupt is generated. If the number of entries exceed
1455 * this number, an interrupt will be generated. The valid range of the input
1456 * is [0, 256]. A setting of 0 results in coalescing being disabled.
1457 * @coalesce_timeout: Timeout value in microseconds. The valid range of the
1458 * input is [0, 2700000] . A setting of 0 is allowed and results in no
1459 * interrupt coalescing timeout.
1461 * Indicate if the user successfully set the interrupt coalesce parameters.
1462 * SCI_SUCCESS The user successfully updated the interrutp coalescence.
1463 * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
1465 static enum sci_status scic_controller_set_interrupt_coalescence(
1466 struct scic_sds_controller *scic_controller,
1467 u32 coalesce_number,
1468 u32 coalesce_timeout)
1470 u8 timeout_encode = 0;
1474 /* Check if the input parameters fall in the range. */
1475 if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
1476 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1479 * Defined encoding for interrupt coalescing timeout:
1480 * Value Min Max Units
1481 * ----- --- --- -----
1511 * Others Undefined */
1514 * Use the table above to decide the encode of interrupt coalescing timeout
1515 * value for register writing. */
1516 if (coalesce_timeout == 0)
1519 /* make the timeout value in unit of (10 ns). */
1520 coalesce_timeout = coalesce_timeout * 100;
1521 min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
1522 max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
1524 /* get the encode of timeout for register writing. */
1525 for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
1526 timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
1528 if (min <= coalesce_timeout && max > coalesce_timeout)
1530 else if (coalesce_timeout >= max && coalesce_timeout < min * 2
1531 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
1532 if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
1544 if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
1545 /* the value is out of range. */
1546 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1549 writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
1550 SMU_ICC_GEN_VAL(TIMER, timeout_encode),
1551 &scic_controller->smu_registers->interrupt_coalesce_control);
1554 scic_controller->interrupt_coalesce_number = (u16)coalesce_number;
1555 scic_controller->interrupt_coalesce_timeout = coalesce_timeout / 100;
1561 static void scic_sds_controller_ready_state_enter(struct sci_base_state_machine *sm)
1563 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
1565 /* set the default interrupt coalescence number and timeout value. */
1566 scic_controller_set_interrupt_coalescence(scic, 0x10, 250);
1569 static void scic_sds_controller_ready_state_exit(struct sci_base_state_machine *sm)
1571 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
1573 /* disable interrupt coalescence. */
1574 scic_controller_set_interrupt_coalescence(scic, 0, 0);
1577 static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller *scic)
1580 enum sci_status status;
1581 enum sci_status phy_status;
1582 struct isci_host *ihost = scic_to_ihost(scic);
1584 status = SCI_SUCCESS;
1586 for (index = 0; index < SCI_MAX_PHYS; index++) {
1587 phy_status = scic_sds_phy_stop(&ihost->phys[index].sci);
1589 if (phy_status != SCI_SUCCESS &&
1590 phy_status != SCI_FAILURE_INVALID_STATE) {
1591 status = SCI_FAILURE;
1593 dev_warn(scic_to_dev(scic),
1594 "%s: Controller stop operation failed to stop "
1595 "phy %d because of status %d.\n",
1597 ihost->phys[index].sci.phy_index, phy_status);
1604 static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller *scic)
1607 enum sci_status port_status;
1608 enum sci_status status = SCI_SUCCESS;
1609 struct isci_host *ihost = scic_to_ihost(scic);
1611 for (index = 0; index < scic->logical_port_entries; index++) {
1612 struct scic_sds_port *sci_port = &ihost->ports[index].sci;
1614 port_status = scic_sds_port_stop(sci_port);
1616 if ((port_status != SCI_SUCCESS) &&
1617 (port_status != SCI_FAILURE_INVALID_STATE)) {
1618 status = SCI_FAILURE;
1620 dev_warn(scic_to_dev(scic),
1621 "%s: Controller stop operation failed to "
1622 "stop port %d because of status %d.\n",
1624 sci_port->logical_port_index,
1632 static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controller *scic)
1635 enum sci_status status;
1636 enum sci_status device_status;
1638 status = SCI_SUCCESS;
1640 for (index = 0; index < scic->remote_node_entries; index++) {
1641 if (scic->device_table[index] != NULL) {
1642 /* / @todo What timeout value do we want to provide to this request? */
1643 device_status = scic_remote_device_stop(scic->device_table[index], 0);
1645 if ((device_status != SCI_SUCCESS) &&
1646 (device_status != SCI_FAILURE_INVALID_STATE)) {
1647 dev_warn(scic_to_dev(scic),
1648 "%s: Controller stop operation failed "
1649 "to stop device 0x%p because of "
1652 scic->device_table[index], device_status);
1660 static void scic_sds_controller_stopping_state_enter(struct sci_base_state_machine *sm)
1662 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
1664 /* Stop all of the components for this controller */
1665 scic_sds_controller_stop_phys(scic);
1666 scic_sds_controller_stop_ports(scic);
1667 scic_sds_controller_stop_devices(scic);
1670 static void scic_sds_controller_stopping_state_exit(struct sci_base_state_machine *sm)
1672 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
1674 sci_del_timer(&scic->timer);
1679 * scic_sds_controller_reset_hardware() -
1681 * This method will reset the controller hardware.
1683 static void scic_sds_controller_reset_hardware(struct scic_sds_controller *scic)
1685 /* Disable interrupts so we dont take any spurious interrupts */
1686 scic_controller_disable_interrupts(scic);
1689 writel(0xFFFFFFFF, &scic->smu_registers->soft_reset_control);
1691 /* Delay for 1ms to before clearing the CQP and UFQPR. */
1694 /* The write to the CQGR clears the CQP */
1695 writel(0x00000000, &scic->smu_registers->completion_queue_get);
1697 /* The write to the UFQGP clears the UFQPR */
1698 writel(0, &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
1701 static void scic_sds_controller_resetting_state_enter(struct sci_base_state_machine *sm)
1703 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
1705 scic_sds_controller_reset_hardware(scic);
1706 sci_base_state_machine_change_state(&scic->state_machine,
1707 SCI_BASE_CONTROLLER_STATE_RESET);
1710 static const struct sci_base_state scic_sds_controller_state_table[] = {
1711 [SCI_BASE_CONTROLLER_STATE_INITIAL] = {
1712 .enter_state = scic_sds_controller_initial_state_enter,
1714 [SCI_BASE_CONTROLLER_STATE_RESET] = {},
1715 [SCI_BASE_CONTROLLER_STATE_INITIALIZING] = {},
1716 [SCI_BASE_CONTROLLER_STATE_INITIALIZED] = {},
1717 [SCI_BASE_CONTROLLER_STATE_STARTING] = {
1718 .exit_state = scic_sds_controller_starting_state_exit,
1720 [SCI_BASE_CONTROLLER_STATE_READY] = {
1721 .enter_state = scic_sds_controller_ready_state_enter,
1722 .exit_state = scic_sds_controller_ready_state_exit,
1724 [SCI_BASE_CONTROLLER_STATE_RESETTING] = {
1725 .enter_state = scic_sds_controller_resetting_state_enter,
1727 [SCI_BASE_CONTROLLER_STATE_STOPPING] = {
1728 .enter_state = scic_sds_controller_stopping_state_enter,
1729 .exit_state = scic_sds_controller_stopping_state_exit,
1731 [SCI_BASE_CONTROLLER_STATE_STOPPED] = {},
1732 [SCI_BASE_CONTROLLER_STATE_FAILED] = {}
1735 static void scic_sds_controller_set_default_config_parameters(struct scic_sds_controller *scic)
1737 /* these defaults are overridden by the platform / firmware */
1738 struct isci_host *ihost = scic_to_ihost(scic);
1741 /* Default to APC mode. */
1742 scic->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
1744 /* Default to APC mode. */
1745 scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1;
1747 /* Default to no SSC operation. */
1748 scic->oem_parameters.sds1.controller.do_enable_ssc = false;
1750 /* Initialize all of the port parameter information to narrow ports. */
1751 for (index = 0; index < SCI_MAX_PORTS; index++) {
1752 scic->oem_parameters.sds1.ports[index].phy_mask = 0;
1755 /* Initialize all of the phy parameter information. */
1756 for (index = 0; index < SCI_MAX_PHYS; index++) {
1757 /* Default to 6G (i.e. Gen 3) for now. */
1758 scic->user_parameters.sds1.phys[index].max_speed_generation = 3;
1760 /* the frequencies cannot be 0 */
1761 scic->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f;
1762 scic->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff;
1763 scic->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
1766 * Previous Vitesse based expanders had a arbitration issue that
1767 * is worked around by having the upper 32-bits of SAS address
1768 * with a value greater then the Vitesse company identifier.
1769 * Hence, usage of 0x5FCFFFFF. */
1770 scic->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id;
1771 scic->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF;
1774 scic->user_parameters.sds1.stp_inactivity_timeout = 5;
1775 scic->user_parameters.sds1.ssp_inactivity_timeout = 5;
1776 scic->user_parameters.sds1.stp_max_occupancy_timeout = 5;
1777 scic->user_parameters.sds1.ssp_max_occupancy_timeout = 20;
1778 scic->user_parameters.sds1.no_outbound_task_timeout = 20;
1781 static void controller_timeout(unsigned long data)
1783 struct sci_timer *tmr = (struct sci_timer *)data;
1784 struct scic_sds_controller *scic = container_of(tmr, typeof(*scic), timer);
1785 struct isci_host *ihost = scic_to_ihost(scic);
1786 struct sci_base_state_machine *sm = &scic->state_machine;
1787 unsigned long flags;
1789 spin_lock_irqsave(&ihost->scic_lock, flags);
1794 if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STARTING)
1795 scic_sds_controller_transition_to_ready(scic, SCI_FAILURE_TIMEOUT);
1796 else if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STOPPING) {
1797 sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_FAILED);
1798 isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
1799 } else /* / @todo Now what do we want to do in this case? */
1800 dev_err(scic_to_dev(scic),
1801 "%s: Controller timer fired when controller was not "
1802 "in a state being timed.\n",
1806 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1810 * scic_controller_construct() - This method will attempt to construct a
1811 * controller object utilizing the supplied parameter information.
1812 * @c: This parameter specifies the controller to be constructed.
1813 * @scu_base: mapped base address of the scu registers
1814 * @smu_base: mapped base address of the smu registers
1816 * Indicate if the controller was successfully constructed or if it failed in
1817 * some way. SCI_SUCCESS This value is returned if the controller was
1818 * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned
1819 * if the interrupt coalescence timer may cause SAS compliance issues for SMP
1820 * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE
1821 * This value is returned if the controller does not support the supplied type.
1822 * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the
1823 * controller does not support the supplied initialization data version.
1825 static enum sci_status scic_controller_construct(struct scic_sds_controller *scic,
1826 void __iomem *scu_base,
1827 void __iomem *smu_base)
1829 struct isci_host *ihost = scic_to_ihost(scic);
1832 sci_base_state_machine_construct(&scic->state_machine,
1833 scic_sds_controller_state_table,
1834 SCI_BASE_CONTROLLER_STATE_INITIAL);
1836 sci_base_state_machine_start(&scic->state_machine);
1838 scic->scu_registers = scu_base;
1839 scic->smu_registers = smu_base;
1841 scic_sds_port_configuration_agent_construct(&scic->port_agent);
1843 /* Construct the ports for this controller */
1844 for (i = 0; i < SCI_MAX_PORTS; i++)
1845 scic_sds_port_construct(&ihost->ports[i].sci, i, scic);
1846 scic_sds_port_construct(&ihost->ports[i].sci, SCIC_SDS_DUMMY_PORT, scic);
1848 /* Construct the phys for this controller */
1849 for (i = 0; i < SCI_MAX_PHYS; i++) {
1850 /* Add all the PHYs to the dummy port */
1851 scic_sds_phy_construct(&ihost->phys[i].sci,
1852 &ihost->ports[SCI_MAX_PORTS].sci, i);
1855 scic->invalid_phy_mask = 0;
1857 sci_init_timer(&scic->timer, controller_timeout);
1859 /* Set the default maximum values */
1860 scic->completion_event_entries = SCU_EVENT_COUNT;
1861 scic->completion_queue_entries = SCU_COMPLETION_QUEUE_COUNT;
1862 scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES;
1863 scic->logical_port_entries = SCI_MAX_PORTS;
1864 scic->task_context_entries = SCU_IO_REQUEST_COUNT;
1865 scic->uf_control.buffers.count = SCU_UNSOLICITED_FRAME_COUNT;
1866 scic->uf_control.address_table.count = SCU_UNSOLICITED_FRAME_COUNT;
1868 /* Initialize the User and OEM parameters to default values. */
1869 scic_sds_controller_set_default_config_parameters(scic);
1871 return scic_controller_reset(scic);
1874 int scic_oem_parameters_validate(struct scic_sds_oem_params *oem)
1878 for (i = 0; i < SCI_MAX_PORTS; i++)
1879 if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
1882 for (i = 0; i < SCI_MAX_PHYS; i++)
1883 if (oem->phys[i].sas_address.high == 0 &&
1884 oem->phys[i].sas_address.low == 0)
1887 if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
1888 for (i = 0; i < SCI_MAX_PHYS; i++)
1889 if (oem->ports[i].phy_mask != 0)
1891 } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
1894 for (i = 0; i < SCI_MAX_PHYS; i++)
1895 phy_mask |= oem->ports[i].phy_mask;
1902 if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
1908 static enum sci_status scic_oem_parameters_set(struct scic_sds_controller *scic,
1909 union scic_oem_parameters *scic_parms)
1911 u32 state = scic->state_machine.current_state_id;
1913 if (state == SCI_BASE_CONTROLLER_STATE_RESET ||
1914 state == SCI_BASE_CONTROLLER_STATE_INITIALIZING ||
1915 state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
1917 if (scic_oem_parameters_validate(&scic_parms->sds1))
1918 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1919 scic->oem_parameters.sds1 = scic_parms->sds1;
1924 return SCI_FAILURE_INVALID_STATE;
1927 void scic_oem_parameters_get(
1928 struct scic_sds_controller *scic,
1929 union scic_oem_parameters *scic_parms)
1931 memcpy(scic_parms, (&scic->oem_parameters), sizeof(*scic_parms));
1934 static enum sci_status scic_sds_controller_initialize_phy_startup(struct scic_sds_controller *scic)
1936 struct isci_host *ihost = scic_to_ihost(scic);
1938 scic->phy_startup_timer = isci_timer_create(ihost,
1940 scic_sds_controller_phy_startup_timeout_handler);
1942 if (scic->phy_startup_timer == NULL)
1943 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
1945 scic->next_phy_to_start = 0;
1946 scic->phy_startup_timer_pending = false;
1952 static void power_control_timeout(unsigned long data)
1954 struct sci_timer *tmr = (struct sci_timer *)data;
1955 struct scic_sds_controller *scic = container_of(tmr, typeof(*scic), power_control.timer);
1956 struct isci_host *ihost = scic_to_ihost(scic);
1957 struct scic_sds_phy *sci_phy;
1958 unsigned long flags;
1961 spin_lock_irqsave(&ihost->scic_lock, flags);
1966 scic->power_control.phys_granted_power = 0;
1968 if (scic->power_control.phys_waiting == 0) {
1969 scic->power_control.timer_started = false;
1973 for (i = 0; i < SCI_MAX_PHYS; i++) {
1975 if (scic->power_control.phys_waiting == 0)
1978 sci_phy = scic->power_control.requesters[i];
1979 if (sci_phy == NULL)
1982 if (scic->power_control.phys_granted_power >=
1983 scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up)
1986 scic->power_control.requesters[i] = NULL;
1987 scic->power_control.phys_waiting--;
1988 scic->power_control.phys_granted_power++;
1989 scic_sds_phy_consume_power_handler(sci_phy);
1993 * It doesn't matter if the power list is empty, we need to start the
1994 * timer in case another phy becomes ready.
1996 sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1997 scic->power_control.timer_started = true;
2000 spin_unlock_irqrestore(&ihost->scic_lock, flags);
2004 * This method inserts the phy in the stagger spinup control queue.
2009 void scic_sds_controller_power_control_queue_insert(
2010 struct scic_sds_controller *scic,
2011 struct scic_sds_phy *sci_phy)
2013 BUG_ON(sci_phy == NULL);
2015 if (scic->power_control.phys_granted_power <
2016 scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) {
2017 scic->power_control.phys_granted_power++;
2018 scic_sds_phy_consume_power_handler(sci_phy);
2021 * stop and start the power_control timer. When the timer fires, the
2022 * no_of_phys_granted_power will be set to 0
2024 if (scic->power_control.timer_started)
2025 sci_del_timer(&scic->power_control.timer);
2027 sci_mod_timer(&scic->power_control.timer,
2028 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
2029 scic->power_control.timer_started = true;
2032 /* Add the phy in the waiting list */
2033 scic->power_control.requesters[sci_phy->phy_index] = sci_phy;
2034 scic->power_control.phys_waiting++;
2039 * This method removes the phy from the stagger spinup control queue.
2044 void scic_sds_controller_power_control_queue_remove(
2045 struct scic_sds_controller *scic,
2046 struct scic_sds_phy *sci_phy)
2048 BUG_ON(sci_phy == NULL);
2050 if (scic->power_control.requesters[sci_phy->phy_index] != NULL) {
2051 scic->power_control.phys_waiting--;
2054 scic->power_control.requesters[sci_phy->phy_index] = NULL;
2057 #define AFE_REGISTER_WRITE_DELAY 10
2059 /* Initialize the AFE for this phy index. We need to read the AFE setup from
2060 * the OEM parameters
2062 static void scic_sds_controller_afe_initialization(struct scic_sds_controller *scic)
2064 const struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
2068 /* Clear DFX Status registers */
2069 writel(0x0081000f, &scic->scu_registers->afe.afe_dfx_master_control0);
2070 udelay(AFE_REGISTER_WRITE_DELAY);
2073 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
2074 * Timer, PM Stagger Timer */
2075 writel(0x0007BFFF, &scic->scu_registers->afe.afe_pmsn_master_control2);
2076 udelay(AFE_REGISTER_WRITE_DELAY);
2079 /* Configure bias currents to normal */
2081 writel(0x00005500, &scic->scu_registers->afe.afe_bias_control);
2083 writel(0x00005A00, &scic->scu_registers->afe.afe_bias_control);
2085 writel(0x00005F00, &scic->scu_registers->afe.afe_bias_control);
2087 udelay(AFE_REGISTER_WRITE_DELAY);
2091 writel(0x80040A08, &scic->scu_registers->afe.afe_pll_control0);
2093 writel(0x80040908, &scic->scu_registers->afe.afe_pll_control0);
2095 udelay(AFE_REGISTER_WRITE_DELAY);
2097 /* Wait for the PLL to lock */
2099 afe_status = readl(&scic->scu_registers->afe.afe_common_block_status);
2100 udelay(AFE_REGISTER_WRITE_DELAY);
2101 } while ((afe_status & 0x00001000) == 0);
2103 if (is_a0() || is_a2()) {
2104 /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
2105 writel(0x7bcc96ad, &scic->scu_registers->afe.afe_pmsn_master_control0);
2106 udelay(AFE_REGISTER_WRITE_DELAY);
2109 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
2110 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
2113 /* Configure transmitter SSC parameters */
2114 writel(0x00030000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
2115 udelay(AFE_REGISTER_WRITE_DELAY);
2118 * All defaults, except the Receive Word Alignament/Comma Detect
2119 * Enable....(0xe800) */
2120 writel(0x00004512, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
2121 udelay(AFE_REGISTER_WRITE_DELAY);
2123 writel(0x0050100F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
2124 udelay(AFE_REGISTER_WRITE_DELAY);
2128 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
2129 * & increase TX int & ext bias 20%....(0xe85c) */
2131 writel(0x000003D4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2133 writel(0x000003F0, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2135 /* Power down TX and RX (PWRDNTX and PWRDNRX) */
2136 writel(0x000003d7, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2137 udelay(AFE_REGISTER_WRITE_DELAY);
2140 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
2141 * & increase TX int & ext bias 20%....(0xe85c) */
2142 writel(0x000003d4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2144 udelay(AFE_REGISTER_WRITE_DELAY);
2146 if (is_a0() || is_a2()) {
2147 /* Enable TX equalization (0xe824) */
2148 writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2149 udelay(AFE_REGISTER_WRITE_DELAY);
2153 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
2154 * RDD=0x0(RX Detect Enabled) ....(0xe800) */
2155 writel(0x00004100, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
2156 udelay(AFE_REGISTER_WRITE_DELAY);
2158 /* Leave DFE/FFE on */
2160 writel(0x3F09983F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2162 writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2164 writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2165 udelay(AFE_REGISTER_WRITE_DELAY);
2166 /* Enable TX equalization (0xe824) */
2167 writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2169 udelay(AFE_REGISTER_WRITE_DELAY);
2171 writel(oem_phy->afe_tx_amp_control0,
2172 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
2173 udelay(AFE_REGISTER_WRITE_DELAY);
2175 writel(oem_phy->afe_tx_amp_control1,
2176 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
2177 udelay(AFE_REGISTER_WRITE_DELAY);
2179 writel(oem_phy->afe_tx_amp_control2,
2180 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
2181 udelay(AFE_REGISTER_WRITE_DELAY);
2183 writel(oem_phy->afe_tx_amp_control3,
2184 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
2185 udelay(AFE_REGISTER_WRITE_DELAY);
2188 /* Transfer control to the PEs */
2189 writel(0x00010f00, &scic->scu_registers->afe.afe_dfx_master_control0);
2190 udelay(AFE_REGISTER_WRITE_DELAY);
2193 static enum sci_status scic_controller_set_mode(struct scic_sds_controller *scic,
2194 enum sci_controller_mode operating_mode)
2196 enum sci_status status = SCI_SUCCESS;
2198 if ((scic->state_machine.current_state_id ==
2199 SCI_BASE_CONTROLLER_STATE_INITIALIZING) ||
2200 (scic->state_machine.current_state_id ==
2201 SCI_BASE_CONTROLLER_STATE_INITIALIZED)) {
2202 switch (operating_mode) {
2203 case SCI_MODE_SPEED:
2204 scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES;
2205 scic->task_context_entries = SCU_IO_REQUEST_COUNT;
2206 scic->uf_control.buffers.count =
2207 SCU_UNSOLICITED_FRAME_COUNT;
2208 scic->completion_event_entries = SCU_EVENT_COUNT;
2209 scic->completion_queue_entries =
2210 SCU_COMPLETION_QUEUE_COUNT;
2214 scic->remote_node_entries = SCI_MIN_REMOTE_DEVICES;
2215 scic->task_context_entries = SCI_MIN_IO_REQUESTS;
2216 scic->uf_control.buffers.count =
2217 SCU_MIN_UNSOLICITED_FRAMES;
2218 scic->completion_event_entries = SCU_MIN_EVENTS;
2219 scic->completion_queue_entries =
2220 SCU_MIN_COMPLETION_QUEUE_ENTRIES;
2224 status = SCI_FAILURE_INVALID_PARAMETER_VALUE;
2228 status = SCI_FAILURE_INVALID_STATE;
2233 static void scic_sds_controller_initialize_power_control(struct scic_sds_controller *scic)
2235 sci_init_timer(&scic->power_control.timer, power_control_timeout);
2237 memset(scic->power_control.requesters, 0,
2238 sizeof(scic->power_control.requesters));
2240 scic->power_control.phys_waiting = 0;
2241 scic->power_control.phys_granted_power = 0;
2244 static enum sci_status scic_controller_initialize(struct scic_sds_controller *scic)
2246 struct sci_base_state_machine *sm = &scic->state_machine;
2247 enum sci_status result = SCI_SUCCESS;
2248 struct isci_host *ihost = scic_to_ihost(scic);
2251 if (scic->state_machine.current_state_id !=
2252 SCI_BASE_CONTROLLER_STATE_RESET) {
2253 dev_warn(scic_to_dev(scic),
2254 "SCIC Controller initialize operation requested "
2255 "in invalid state\n");
2256 return SCI_FAILURE_INVALID_STATE;
2259 sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_INITIALIZING);
2261 scic_sds_controller_initialize_phy_startup(scic);
2263 scic_sds_controller_initialize_power_control(scic);
2266 * There is nothing to do here for B0 since we do not have to
2267 * program the AFE registers.
2268 * / @todo The AFE settings are supposed to be correct for the B0 but
2269 * / presently they seem to be wrong. */
2270 scic_sds_controller_afe_initialization(scic);
2272 if (result == SCI_SUCCESS) {
2276 /* Take the hardware out of reset */
2277 writel(0, &scic->smu_registers->soft_reset_control);
2280 * / @todo Provide meaningfull error code for hardware failure
2281 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
2282 result = SCI_FAILURE;
2283 terminate_loop = 100;
2285 while (terminate_loop-- && (result != SCI_SUCCESS)) {
2286 /* Loop until the hardware reports success */
2287 udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2288 status = readl(&scic->smu_registers->control_status);
2290 if ((status & SCU_RAM_INIT_COMPLETED) ==
2291 SCU_RAM_INIT_COMPLETED)
2292 result = SCI_SUCCESS;
2296 if (result == SCI_SUCCESS) {
2297 u32 max_supported_ports;
2298 u32 max_supported_devices;
2299 u32 max_supported_io_requests;
2300 u32 device_context_capacity;
2303 * Determine what are the actaul device capacities that the
2304 * hardware will support */
2305 device_context_capacity =
2306 readl(&scic->smu_registers->device_context_capacity);
2309 max_supported_ports = smu_dcc_get_max_ports(device_context_capacity);
2310 max_supported_devices = smu_dcc_get_max_remote_node_context(device_context_capacity);
2311 max_supported_io_requests = smu_dcc_get_max_task_context(device_context_capacity);
2314 * Make all PEs that are unassigned match up with the
2317 for (index = 0; index < max_supported_ports; index++) {
2318 struct scu_port_task_scheduler_group_registers __iomem
2319 *ptsg = &scic->scu_registers->peg0.ptsg;
2321 writel(index, &ptsg->protocol_engine[index]);
2324 /* Record the smaller of the two capacity values */
2325 scic->logical_port_entries =
2326 min(max_supported_ports, scic->logical_port_entries);
2328 scic->task_context_entries =
2329 min(max_supported_io_requests,
2330 scic->task_context_entries);
2332 scic->remote_node_entries =
2333 min(max_supported_devices, scic->remote_node_entries);
2336 * Now that we have the correct hardware reported minimum values
2337 * build the MDL for the controller. Default to a performance
2340 scic_controller_set_mode(scic, SCI_MODE_SPEED);
2343 /* Initialize hardware PCI Relaxed ordering in DMA engines */
2344 if (result == SCI_SUCCESS) {
2345 u32 dma_configuration;
2347 /* Configure the payload DMA */
2349 readl(&scic->scu_registers->sdma.pdma_configuration);
2350 dma_configuration |=
2351 SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2352 writel(dma_configuration,
2353 &scic->scu_registers->sdma.pdma_configuration);
2355 /* Configure the control DMA */
2357 readl(&scic->scu_registers->sdma.cdma_configuration);
2358 dma_configuration |=
2359 SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2360 writel(dma_configuration,
2361 &scic->scu_registers->sdma.cdma_configuration);
2365 * Initialize the PHYs before the PORTs because the PHY registers
2366 * are accessed during the port initialization.
2368 if (result == SCI_SUCCESS) {
2369 /* Initialize the phys */
2371 (result == SCI_SUCCESS) && (index < SCI_MAX_PHYS);
2373 result = scic_sds_phy_initialize(
2374 &ihost->phys[index].sci,
2375 &scic->scu_registers->peg0.pe[index].tl,
2376 &scic->scu_registers->peg0.pe[index].ll);
2380 if (result == SCI_SUCCESS) {
2381 /* Initialize the logical ports */
2383 (index < scic->logical_port_entries) &&
2384 (result == SCI_SUCCESS);
2386 result = scic_sds_port_initialize(
2387 &ihost->ports[index].sci,
2388 &scic->scu_registers->peg0.ptsg.port[index],
2389 &scic->scu_registers->peg0.ptsg.protocol_engine,
2390 &scic->scu_registers->peg0.viit[index]);
2394 if (result == SCI_SUCCESS)
2395 result = scic_sds_port_configuration_agent_initialize(
2399 /* Advance the controller state machine */
2400 if (result == SCI_SUCCESS)
2401 state = SCI_BASE_CONTROLLER_STATE_INITIALIZED;
2403 state = SCI_BASE_CONTROLLER_STATE_FAILED;
2404 sci_base_state_machine_change_state(sm, state);
2409 static enum sci_status scic_user_parameters_set(
2410 struct scic_sds_controller *scic,
2411 union scic_user_parameters *scic_parms)
2413 u32 state = scic->state_machine.current_state_id;
2415 if (state == SCI_BASE_CONTROLLER_STATE_RESET ||
2416 state == SCI_BASE_CONTROLLER_STATE_INITIALIZING ||
2417 state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
2421 * Validate the user parameters. If they are not legal, then
2424 for (index = 0; index < SCI_MAX_PHYS; index++) {
2425 struct sci_phy_user_params *user_phy;
2427 user_phy = &scic_parms->sds1.phys[index];
2429 if (!((user_phy->max_speed_generation <=
2430 SCIC_SDS_PARM_MAX_SPEED) &&
2431 (user_phy->max_speed_generation >
2432 SCIC_SDS_PARM_NO_SPEED)))
2433 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2435 if (user_phy->in_connection_align_insertion_frequency <
2437 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2439 if ((user_phy->in_connection_align_insertion_frequency <
2441 (user_phy->align_insertion_frequency == 0) ||
2443 notify_enable_spin_up_insertion_frequency ==
2445 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2448 if ((scic_parms->sds1.stp_inactivity_timeout == 0) ||
2449 (scic_parms->sds1.ssp_inactivity_timeout == 0) ||
2450 (scic_parms->sds1.stp_max_occupancy_timeout == 0) ||
2451 (scic_parms->sds1.ssp_max_occupancy_timeout == 0) ||
2452 (scic_parms->sds1.no_outbound_task_timeout == 0))
2453 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2455 memcpy(&scic->user_parameters, scic_parms, sizeof(*scic_parms));
2460 return SCI_FAILURE_INVALID_STATE;
2463 static int scic_controller_mem_init(struct scic_sds_controller *scic)
2465 struct device *dev = scic_to_dev(scic);
2466 dma_addr_t dma_handle;
2467 enum sci_status result;
2469 scic->completion_queue = dmam_alloc_coherent(dev,
2470 scic->completion_queue_entries * sizeof(u32),
2471 &dma_handle, GFP_KERNEL);
2472 if (!scic->completion_queue)
2475 writel(lower_32_bits(dma_handle),
2476 &scic->smu_registers->completion_queue_lower);
2477 writel(upper_32_bits(dma_handle),
2478 &scic->smu_registers->completion_queue_upper);
2480 scic->remote_node_context_table = dmam_alloc_coherent(dev,
2481 scic->remote_node_entries *
2482 sizeof(union scu_remote_node_context),
2483 &dma_handle, GFP_KERNEL);
2484 if (!scic->remote_node_context_table)
2487 writel(lower_32_bits(dma_handle),
2488 &scic->smu_registers->remote_node_context_lower);
2489 writel(upper_32_bits(dma_handle),
2490 &scic->smu_registers->remote_node_context_upper);
2492 scic->task_context_table = dmam_alloc_coherent(dev,
2493 scic->task_context_entries *
2494 sizeof(struct scu_task_context),
2495 &dma_handle, GFP_KERNEL);
2496 if (!scic->task_context_table)
2499 writel(lower_32_bits(dma_handle),
2500 &scic->smu_registers->host_task_table_lower);
2501 writel(upper_32_bits(dma_handle),
2502 &scic->smu_registers->host_task_table_upper);
2504 result = scic_sds_unsolicited_frame_control_construct(scic);
2509 * Inform the silicon as to the location of the UF headers and
2512 writel(lower_32_bits(scic->uf_control.headers.physical_address),
2513 &scic->scu_registers->sdma.uf_header_base_address_lower);
2514 writel(upper_32_bits(scic->uf_control.headers.physical_address),
2515 &scic->scu_registers->sdma.uf_header_base_address_upper);
2517 writel(lower_32_bits(scic->uf_control.address_table.physical_address),
2518 &scic->scu_registers->sdma.uf_address_table_lower);
2519 writel(upper_32_bits(scic->uf_control.address_table.physical_address),
2520 &scic->scu_registers->sdma.uf_address_table_upper);
2525 int isci_host_init(struct isci_host *isci_host)
2528 enum sci_status status;
2529 union scic_oem_parameters oem;
2530 union scic_user_parameters scic_user_params;
2531 struct isci_pci_info *pci_info = to_pci_info(isci_host->pdev);
2533 isci_timer_list_construct(isci_host);
2535 spin_lock_init(&isci_host->state_lock);
2536 spin_lock_init(&isci_host->scic_lock);
2537 spin_lock_init(&isci_host->queue_lock);
2538 init_waitqueue_head(&isci_host->eventq);
2540 isci_host_change_state(isci_host, isci_starting);
2541 isci_host->can_queue = ISCI_CAN_QUEUE_VAL;
2543 status = scic_controller_construct(&isci_host->sci, scu_base(isci_host),
2544 smu_base(isci_host));
2546 if (status != SCI_SUCCESS) {
2547 dev_err(&isci_host->pdev->dev,
2548 "%s: scic_controller_construct failed - status = %x\n",
2554 isci_host->sas_ha.dev = &isci_host->pdev->dev;
2555 isci_host->sas_ha.lldd_ha = isci_host;
2558 * grab initial values stored in the controller object for OEM and USER
2561 isci_user_parameters_get(isci_host, &scic_user_params);
2562 status = scic_user_parameters_set(&isci_host->sci,
2564 if (status != SCI_SUCCESS) {
2565 dev_warn(&isci_host->pdev->dev,
2566 "%s: scic_user_parameters_set failed\n",
2571 scic_oem_parameters_get(&isci_host->sci, &oem);
2573 /* grab any OEM parameters specified in orom */
2574 if (pci_info->orom) {
2575 status = isci_parse_oem_parameters(&oem,
2578 if (status != SCI_SUCCESS) {
2579 dev_warn(&isci_host->pdev->dev,
2580 "parsing firmware oem parameters failed\n");
2585 status = scic_oem_parameters_set(&isci_host->sci, &oem);
2586 if (status != SCI_SUCCESS) {
2587 dev_warn(&isci_host->pdev->dev,
2588 "%s: scic_oem_parameters_set failed\n",
2593 tasklet_init(&isci_host->completion_tasklet,
2594 isci_host_completion_routine, (unsigned long)isci_host);
2596 INIT_LIST_HEAD(&isci_host->requests_to_complete);
2597 INIT_LIST_HEAD(&isci_host->requests_to_errorback);
2599 spin_lock_irq(&isci_host->scic_lock);
2600 status = scic_controller_initialize(&isci_host->sci);
2601 spin_unlock_irq(&isci_host->scic_lock);
2602 if (status != SCI_SUCCESS) {
2603 dev_warn(&isci_host->pdev->dev,
2604 "%s: scic_controller_initialize failed -"
2610 err = scic_controller_mem_init(&isci_host->sci);
2614 isci_host->dma_pool = dmam_pool_create(DRV_NAME, &isci_host->pdev->dev,
2615 sizeof(struct isci_request),
2616 SLAB_HWCACHE_ALIGN, 0);
2618 if (!isci_host->dma_pool)
2621 for (i = 0; i < SCI_MAX_PORTS; i++)
2622 isci_port_init(&isci_host->ports[i], isci_host, i);
2624 for (i = 0; i < SCI_MAX_PHYS; i++)
2625 isci_phy_init(&isci_host->phys[i], isci_host, i);
2627 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
2628 struct isci_remote_device *idev = &isci_host->devices[i];
2630 INIT_LIST_HEAD(&idev->reqs_in_process);
2631 INIT_LIST_HEAD(&idev->node);
2632 spin_lock_init(&idev->state_lock);
2638 void scic_sds_controller_link_up(struct scic_sds_controller *scic,
2639 struct scic_sds_port *port, struct scic_sds_phy *phy)
2641 switch (scic->state_machine.current_state_id) {
2642 case SCI_BASE_CONTROLLER_STATE_STARTING:
2643 scic_sds_controller_phy_timer_stop(scic);
2644 scic->port_agent.link_up_handler(scic, &scic->port_agent,
2646 scic_sds_controller_start_next_phy(scic);
2648 case SCI_BASE_CONTROLLER_STATE_READY:
2649 scic->port_agent.link_up_handler(scic, &scic->port_agent,
2653 dev_dbg(scic_to_dev(scic),
2654 "%s: SCIC Controller linkup event from phy %d in "
2655 "unexpected state %d\n", __func__, phy->phy_index,
2656 scic->state_machine.current_state_id);
2660 void scic_sds_controller_link_down(struct scic_sds_controller *scic,
2661 struct scic_sds_port *port, struct scic_sds_phy *phy)
2663 switch (scic->state_machine.current_state_id) {
2664 case SCI_BASE_CONTROLLER_STATE_STARTING:
2665 case SCI_BASE_CONTROLLER_STATE_READY:
2666 scic->port_agent.link_down_handler(scic, &scic->port_agent,
2670 dev_dbg(scic_to_dev(scic),
2671 "%s: SCIC Controller linkdown event from phy %d in "
2672 "unexpected state %d\n",
2675 scic->state_machine.current_state_id);
2680 * This is a helper method to determine if any remote devices on this
2681 * controller are still in the stopping state.
2684 static bool scic_sds_controller_has_remote_devices_stopping(
2685 struct scic_sds_controller *controller)
2689 for (index = 0; index < controller->remote_node_entries; index++) {
2690 if ((controller->device_table[index] != NULL) &&
2691 (controller->device_table[index]->state_machine.current_state_id
2692 == SCI_BASE_REMOTE_DEVICE_STATE_STOPPING))
2700 * This method is called by the remote device to inform the controller
2701 * object that the remote device has stopped.
2703 void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic,
2704 struct scic_sds_remote_device *sci_dev)
2706 if (scic->state_machine.current_state_id !=
2707 SCI_BASE_CONTROLLER_STATE_STOPPING) {
2708 dev_dbg(scic_to_dev(scic),
2709 "SCIC Controller 0x%p remote device stopped event "
2710 "from device 0x%p in unexpected state %d\n",
2712 scic->state_machine.current_state_id);
2716 if (!scic_sds_controller_has_remote_devices_stopping(scic)) {
2717 sci_base_state_machine_change_state(&scic->state_machine,
2718 SCI_BASE_CONTROLLER_STATE_STOPPED);
2723 * This method will write to the SCU PCP register the request value. The method
2724 * is used to suspend/resume ports, devices, and phys.
2729 void scic_sds_controller_post_request(
2730 struct scic_sds_controller *scic,
2733 dev_dbg(scic_to_dev(scic),
2734 "%s: SCIC Controller 0x%p post request 0x%08x\n",
2739 writel(request, &scic->smu_registers->post_context_port);
2743 * This method will copy the soft copy of the task context into the physical
2744 * memory accessible by the controller.
2745 * @scic: This parameter specifies the controller for which to copy
2747 * @sci_req: This parameter specifies the request for which the task
2748 * context is being copied.
2750 * After this call is made the SCIC_SDS_IO_REQUEST object will always point to
2751 * the physical memory version of the task context. Thus, all subsequent
2752 * updates to the task context are performed in the TC table (i.e. DMAable
2755 void scic_sds_controller_copy_task_context(
2756 struct scic_sds_controller *scic,
2757 struct scic_sds_request *sci_req)
2759 struct scu_task_context *task_context_buffer;
2761 task_context_buffer = scic_sds_controller_get_task_context_buffer(
2762 scic, sci_req->io_tag);
2764 memcpy(task_context_buffer,
2765 sci_req->task_context_buffer,
2766 offsetof(struct scu_task_context, sgl_snapshot_ac));
2769 * Now that the soft copy of the TC has been copied into the TC
2770 * table accessible by the silicon. Thus, any further changes to
2771 * the TC (e.g. TC termination) occur in the appropriate location. */
2772 sci_req->task_context_buffer = task_context_buffer;
2776 * This method returns the task context buffer for the given io tag.
2780 * struct scu_task_context*
2782 struct scu_task_context *scic_sds_controller_get_task_context_buffer(
2783 struct scic_sds_controller *scic,
2786 u16 task_index = scic_sds_io_tag_get_index(io_tag);
2788 if (task_index < scic->task_context_entries) {
2789 return &scic->task_context_table[task_index];
2795 struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic,
2801 task_index = scic_sds_io_tag_get_index(io_tag);
2803 if (task_index < scic->task_context_entries) {
2804 if (scic->io_request_table[task_index] != NULL) {
2805 task_sequence = scic_sds_io_tag_get_sequence(io_tag);
2807 if (task_sequence == scic->io_request_sequence[task_index]) {
2808 return scic->io_request_table[task_index];
2817 * This method allocates remote node index and the reserves the remote node
2818 * context space for use. This method can fail if there are no more remote
2819 * node index available.
2820 * @scic: This is the controller object which contains the set of
2821 * free remote node ids
2822 * @sci_dev: This is the device object which is requesting the a remote node
2824 * @node_id: This is the remote node id that is assinged to the device if one
2827 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
2828 * node index available.
2830 enum sci_status scic_sds_controller_allocate_remote_node_context(
2831 struct scic_sds_controller *scic,
2832 struct scic_sds_remote_device *sci_dev,
2836 u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev);
2838 node_index = scic_sds_remote_node_table_allocate_remote_node(
2839 &scic->available_remote_nodes, remote_node_count
2842 if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
2843 scic->device_table[node_index] = sci_dev;
2845 *node_id = node_index;
2850 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
2854 * This method frees the remote node index back to the available pool. Once
2855 * this is done the remote node context buffer is no longer valid and can
2862 void scic_sds_controller_free_remote_node_context(
2863 struct scic_sds_controller *scic,
2864 struct scic_sds_remote_device *sci_dev,
2867 u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev);
2869 if (scic->device_table[node_id] == sci_dev) {
2870 scic->device_table[node_id] = NULL;
2872 scic_sds_remote_node_table_release_remote_node_index(
2873 &scic->available_remote_nodes, remote_node_count, node_id
2879 * This method returns the union scu_remote_node_context for the specified remote
2884 * union scu_remote_node_context*
2886 union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
2887 struct scic_sds_controller *scic,
2891 (node_id < scic->remote_node_entries)
2892 && (scic->device_table[node_id] != NULL)
2894 return &scic->remote_node_context_table[node_id];
2902 * @resposne_buffer: This is the buffer into which the D2H register FIS will be
2904 * @frame_header: This is the frame header returned by the hardware.
2905 * @frame_buffer: This is the frame buffer returned by the hardware.
2907 * This method will combind the frame header and frame buffer to create a SATA
2908 * D2H register FIS none
2910 void scic_sds_controller_copy_sata_response(
2911 void *response_buffer,
2915 memcpy(response_buffer, frame_header, sizeof(u32));
2917 memcpy(response_buffer + sizeof(u32),
2919 sizeof(struct dev_to_host_fis) - sizeof(u32));
2923 * This method releases the frame once this is done the frame is available for
2924 * re-use by the hardware. The data contained in the frame header and frame
2925 * buffer is no longer valid. The UF queue get pointer is only updated if UF
2926 * control indicates this is appropriate.
2931 void scic_sds_controller_release_frame(
2932 struct scic_sds_controller *scic,
2935 if (scic_sds_unsolicited_frame_control_release_frame(
2936 &scic->uf_control, frame_index) == true)
2937 writel(scic->uf_control.get,
2938 &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
2942 * scic_controller_start_io() - This method is called by the SCI user to
2943 * send/start an IO request. If the method invocation is successful, then
2944 * the IO request has been queued to the hardware for processing.
2945 * @controller: the handle to the controller object for which to start an IO
2947 * @remote_device: the handle to the remote device object for which to start an
2949 * @io_request: the handle to the io request object to start.
2950 * @io_tag: This parameter specifies a previously allocated IO tag that the
2951 * user desires to be utilized for this request. This parameter is optional.
2952 * The user is allowed to supply SCI_CONTROLLER_INVALID_IO_TAG as the value
2953 * for this parameter.
2955 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
2956 * to ensure that each of the methods that may allocate or free available IO
2957 * tags are handled in a mutually exclusive manner. This method is one of said
2958 * methods requiring proper critical code section protection (e.g. semaphore,
2959 * spin-lock, etc.). - For SATA, the user is required to manage NCQ tags. As a
2960 * result, it is expected the user will have set the NCQ tag field in the host
2961 * to device register FIS prior to calling this method. There is also a
2962 * requirement for the user to call scic_stp_io_set_ncq_tag() prior to invoking
2963 * the scic_controller_start_io() method. scic_controller_allocate_tag() for
2964 * more information on allocating a tag. Indicate if the controller
2965 * successfully started the IO request. SCI_SUCCESS if the IO request was
2966 * successfully started. Determine the failure situations and return values.
2968 enum sci_status scic_controller_start_io(
2969 struct scic_sds_controller *scic,
2970 struct scic_sds_remote_device *rdev,
2971 struct scic_sds_request *req,
2974 enum sci_status status;
2976 if (scic->state_machine.current_state_id !=
2977 SCI_BASE_CONTROLLER_STATE_READY) {
2978 dev_warn(scic_to_dev(scic), "invalid state to start I/O");
2979 return SCI_FAILURE_INVALID_STATE;
2982 status = scic_sds_remote_device_start_io(scic, rdev, req);
2983 if (status != SCI_SUCCESS)
2986 scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
2987 scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(req));
2992 * scic_controller_terminate_request() - This method is called by the SCI Core
2993 * user to terminate an ongoing (i.e. started) core IO request. This does
2994 * not abort the IO request at the target, but rather removes the IO request
2995 * from the host controller.
2996 * @controller: the handle to the controller object for which to terminate a
2998 * @remote_device: the handle to the remote device object for which to
2999 * terminate a request.
3000 * @request: the handle to the io or task management request object to
3003 * Indicate if the controller successfully began the terminate process for the
3004 * IO request. SCI_SUCCESS if the terminate process was successfully started
3005 * for the request. Determine the failure situations and return values.
3007 enum sci_status scic_controller_terminate_request(
3008 struct scic_sds_controller *scic,
3009 struct scic_sds_remote_device *rdev,
3010 struct scic_sds_request *req)
3012 enum sci_status status;
3014 if (scic->state_machine.current_state_id !=
3015 SCI_BASE_CONTROLLER_STATE_READY) {
3016 dev_warn(scic_to_dev(scic),
3017 "invalid state to terminate request\n");
3018 return SCI_FAILURE_INVALID_STATE;
3021 status = scic_sds_io_request_terminate(req);
3022 if (status != SCI_SUCCESS)
3026 * Utilize the original post context command and or in the POST_TC_ABORT
3029 scic_sds_controller_post_request(scic,
3030 scic_sds_request_get_post_context(req) |
3031 SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
3036 * scic_controller_complete_io() - This method will perform core specific
3037 * completion operations for an IO request. After this method is invoked,
3038 * the user should consider the IO request as invalid until it is properly
3039 * reused (i.e. re-constructed).
3040 * @controller: The handle to the controller object for which to complete the
3042 * @remote_device: The handle to the remote device object for which to complete
3044 * @io_request: the handle to the io request object to complete.
3046 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
3047 * to ensure that each of the methods that may allocate or free available IO
3048 * tags are handled in a mutually exclusive manner. This method is one of said
3049 * methods requiring proper critical code section protection (e.g. semaphore,
3050 * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
3051 * Core user, using the scic_controller_allocate_io_tag() method, then it is
3052 * the responsibility of the caller to invoke the scic_controller_free_io_tag()
3053 * method to free the tag (i.e. this method will not free the IO tag). Indicate
3054 * if the controller successfully completed the IO request. SCI_SUCCESS if the
3055 * completion process was successful.
3057 enum sci_status scic_controller_complete_io(
3058 struct scic_sds_controller *scic,
3059 struct scic_sds_remote_device *rdev,
3060 struct scic_sds_request *request)
3062 enum sci_status status;
3065 switch (scic->state_machine.current_state_id) {
3066 case SCI_BASE_CONTROLLER_STATE_STOPPING:
3067 /* XXX: Implement this function */
3069 case SCI_BASE_CONTROLLER_STATE_READY:
3070 status = scic_sds_remote_device_complete_io(scic, rdev, request);
3071 if (status != SCI_SUCCESS)
3074 index = scic_sds_io_tag_get_index(request->io_tag);
3075 scic->io_request_table[index] = NULL;
3078 dev_warn(scic_to_dev(scic), "invalid state to complete I/O");
3079 return SCI_FAILURE_INVALID_STATE;
3084 enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req)
3086 struct scic_sds_controller *scic = sci_req->owning_controller;
3088 if (scic->state_machine.current_state_id !=
3089 SCI_BASE_CONTROLLER_STATE_READY) {
3090 dev_warn(scic_to_dev(scic), "invalid state to continue I/O");
3091 return SCI_FAILURE_INVALID_STATE;
3094 scic->io_request_table[scic_sds_io_tag_get_index(sci_req->io_tag)] = sci_req;
3095 scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(sci_req));
3100 * scic_controller_start_task() - This method is called by the SCIC user to
3101 * send/start a framework task management request.
3102 * @controller: the handle to the controller object for which to start the task
3103 * management request.
3104 * @remote_device: the handle to the remote device object for which to start
3105 * the task management request.
3106 * @task_request: the handle to the task request object to start.
3107 * @io_tag: This parameter specifies a previously allocated IO tag that the
3108 * user desires to be utilized for this request. Note this not the io_tag
3109 * of the request being managed. It is to be utilized for the task request
3110 * itself. This parameter is optional. The user is allowed to supply
3111 * SCI_CONTROLLER_INVALID_IO_TAG as the value for this parameter.
3113 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
3114 * to ensure that each of the methods that may allocate or free available IO
3115 * tags are handled in a mutually exclusive manner. This method is one of said
3116 * methods requiring proper critical code section protection (e.g. semaphore,
3117 * spin-lock, etc.). - The user must synchronize this task with completion
3118 * queue processing. If they are not synchronized then it is possible for the
3119 * io requests that are being managed by the task request can complete before
3120 * starting the task request. scic_controller_allocate_tag() for more
3121 * information on allocating a tag. Indicate if the controller successfully
3122 * started the IO request. SCI_TASK_SUCCESS if the task request was
3123 * successfully started. SCI_TASK_FAILURE_REQUIRES_SCSI_ABORT This value is
3124 * returned if there is/are task(s) outstanding that require termination or
3125 * completion before this request can succeed.
3127 enum sci_task_status scic_controller_start_task(
3128 struct scic_sds_controller *scic,
3129 struct scic_sds_remote_device *rdev,
3130 struct scic_sds_request *req,
3133 enum sci_status status;
3135 if (scic->state_machine.current_state_id !=
3136 SCI_BASE_CONTROLLER_STATE_READY) {
3137 dev_warn(scic_to_dev(scic),
3138 "%s: SCIC Controller starting task from invalid "
3141 return SCI_TASK_FAILURE_INVALID_STATE;
3144 status = scic_sds_remote_device_start_task(scic, rdev, req);
3146 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
3147 scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
3150 * We will let framework know this task request started successfully,
3151 * although core is still woring on starting the request (to post tc when
3156 scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
3158 scic_sds_controller_post_request(scic,
3159 scic_sds_request_get_post_context(req));
3169 * scic_controller_allocate_io_tag() - This method will allocate a tag from the
3170 * pool of free IO tags. Direct allocation of IO tags by the SCI Core user
3171 * is optional. The scic_controller_start_io() method will allocate an IO
3172 * tag if this method is not utilized and the tag is not supplied to the IO
3173 * construct routine. Direct allocation of IO tags may provide additional
3174 * performance improvements in environments capable of supporting this usage
3175 * model. Additionally, direct allocation of IO tags also provides
3176 * additional flexibility to the SCI Core user. Specifically, the user may
3177 * retain IO tags across the lives of multiple IO requests.
3178 * @controller: the handle to the controller object for which to allocate the
3181 * IO tags are a protected resource. It is incumbent upon the SCI Core user to
3182 * ensure that each of the methods that may allocate or free available IO tags
3183 * are handled in a mutually exclusive manner. This method is one of said
3184 * methods requiring proper critical code section protection (e.g. semaphore,
3185 * spin-lock, etc.). An unsigned integer representing an available IO tag.
3186 * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if there are no
3187 * currently available tags to be allocated. All return other values indicate a
3190 u16 scic_controller_allocate_io_tag(
3191 struct scic_sds_controller *scic)
3196 if (!sci_pool_empty(scic->tci_pool)) {
3197 sci_pool_get(scic->tci_pool, task_context);
3199 sequence_count = scic->io_request_sequence[task_context];
3201 return scic_sds_io_tag_construct(sequence_count, task_context);
3204 return SCI_CONTROLLER_INVALID_IO_TAG;
3208 * scic_controller_free_io_tag() - This method will free an IO tag to the pool
3209 * of free IO tags. This method provides the SCI Core user more flexibility
3210 * with regards to IO tags. The user may desire to keep an IO tag after an
3211 * IO request has completed, because they plan on re-using the tag for a
3212 * subsequent IO request. This method is only legal if the tag was
3213 * allocated via scic_controller_allocate_io_tag().
3214 * @controller: This parameter specifies the handle to the controller object
3215 * for which to free/return the tag.
3216 * @io_tag: This parameter represents the tag to be freed to the pool of
3219 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
3220 * to ensure that each of the methods that may allocate or free available IO
3221 * tags are handled in a mutually exclusive manner. This method is one of said
3222 * methods requiring proper critical code section protection (e.g. semaphore,
3223 * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
3224 * Core user, using the scic_controller_allocate_io_tag() method, then it is
3225 * the responsibility of the caller to invoke this method to free the tag. This
3226 * method returns an indication of whether the tag was successfully put back
3227 * (freed) to the pool of available tags. SCI_SUCCESS This return value
3228 * indicates the tag was successfully placed into the pool of available IO
3229 * tags. SCI_FAILURE_INVALID_IO_TAG This value is returned if the supplied tag
3230 * is not a valid IO tag value.
3232 enum sci_status scic_controller_free_io_tag(
3233 struct scic_sds_controller *scic,
3239 BUG_ON(io_tag == SCI_CONTROLLER_INVALID_IO_TAG);
3241 sequence = scic_sds_io_tag_get_sequence(io_tag);
3242 index = scic_sds_io_tag_get_index(io_tag);
3244 if (!sci_pool_full(scic->tci_pool)) {
3245 if (sequence == scic->io_request_sequence[index]) {
3246 scic_sds_io_sequence_increment(
3247 scic->io_request_sequence[index]);
3249 sci_pool_put(scic->tci_pool, index);
3255 return SCI_FAILURE_INVALID_IO_TAG;