isci: remove base_controller abstraction
[pandora-kernel.git] / drivers / scsi / isci / core / scic_sds_controller.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55
56 #include <linux/device.h>
57 #include "scic_controller.h"
58 #include "scic_phy.h"
59 #include "scic_port.h"
60 #include "scic_remote_device.h"
61 #include "scic_sds_controller.h"
62 #include "scu_registers.h"
63 #include "scic_sds_phy.h"
64 #include "scic_sds_port_configuration_agent.h"
65 #include "scic_sds_port.h"
66 #include "scic_sds_remote_device.h"
67 #include "scic_sds_request.h"
68 #include "sci_environment.h"
69 #include "sci_util.h"
70 #include "scu_completion_codes.h"
71 #include "scu_constants.h"
72 #include "scu_event_codes.h"
73 #include "scu_remote_node_context.h"
74 #include "scu_task_context.h"
75 #include "scu_unsolicited_frame.h"
76
77 #define SCU_CONTEXT_RAM_INIT_STALL_TIME      200
78
79 /**
80  * smu_dcc_get_max_ports() -
81  *
82  * This macro returns the maximum number of logical ports supported by the
83  * hardware. The caller passes in the value read from the device context
84  * capacity register and this macro will mash and shift the value appropriately.
85  */
86 #define smu_dcc_get_max_ports(dcc_value) \
87         (\
88                 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
89                  >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
90         )
91
92 /**
93  * smu_dcc_get_max_task_context() -
94  *
95  * This macro returns the maximum number of task contexts supported by the
96  * hardware. The caller passes in the value read from the device context
97  * capacity register and this macro will mash and shift the value appropriately.
98  */
99 #define smu_dcc_get_max_task_context(dcc_value) \
100         (\
101                 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
102                  >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
103         )
104
105 /**
106  * smu_dcc_get_max_remote_node_context() -
107  *
108  * This macro returns the maximum number of remote node contexts supported by
109  * the hardware. The caller passes in the value read from the device context
110  * capacity register and this macro will mash and shift the value appropriately.
111  */
112 #define smu_dcc_get_max_remote_node_context(dcc_value) \
113         (\
114                 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
115                  >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
116         )
117
118
119 static void scic_sds_controller_power_control_timer_handler(
120         void *controller);
121 #define SCIC_SDS_CONTROLLER_MIN_TIMER_COUNT  3
122 #define SCIC_SDS_CONTROLLER_MAX_TIMER_COUNT  3
123
124 /**
125  *
126  *
127  * The number of milliseconds to wait for a phy to start.
128  */
129 #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT      100
130
131 /**
132  *
133  *
134  * The number of milliseconds to wait while a given phy is consuming power
135  * before allowing another set of phys to consume power. Ultimately, this will
136  * be specified by OEM parameter.
137  */
138 #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
139
140 /**
141  * COMPLETION_QUEUE_CYCLE_BIT() -
142  *
143  * This macro will return the cycle bit of the completion queue entry
144  */
145 #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
146
147 /**
148  * NORMALIZE_GET_POINTER() -
149  *
150  * This macro will normalize the completion queue get pointer so its value can
151  * be used as an index into an array
152  */
153 #define NORMALIZE_GET_POINTER(x) \
154         ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
155
156 /**
157  * NORMALIZE_PUT_POINTER() -
158  *
159  * This macro will normalize the completion queue put pointer so its value can
160  * be used as an array inde
161  */
162 #define NORMALIZE_PUT_POINTER(x) \
163         ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
164
165
166 /**
167  * NORMALIZE_GET_POINTER_CYCLE_BIT() -
168  *
169  * This macro will normalize the completion queue cycle pointer so it matches
170  * the completion queue cycle bit
171  */
172 #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
173         ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
174
175 /**
176  * NORMALIZE_EVENT_POINTER() -
177  *
178  * This macro will normalize the completion queue event entry so its value can
179  * be used as an index.
180  */
181 #define NORMALIZE_EVENT_POINTER(x) \
182         (\
183                 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
184                 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
185         )
186
187 /**
188  * INCREMENT_COMPLETION_QUEUE_GET() -
189  *
190  * This macro will increment the controllers completion queue index value and
191  * possibly toggle the cycle bit if the completion queue index wraps back to 0.
192  */
193 #define INCREMENT_COMPLETION_QUEUE_GET(controller, index, cycle) \
194         INCREMENT_QUEUE_GET(\
195                 (index), \
196                 (cycle), \
197                 (controller)->completion_queue_entries, \
198                 SMU_CQGR_CYCLE_BIT \
199                 )
200
201 /**
202  * INCREMENT_EVENT_QUEUE_GET() -
203  *
204  * This macro will increment the controllers event queue index value and
205  * possibly toggle the event cycle bit if the event queue index wraps back to 0.
206  */
207 #define INCREMENT_EVENT_QUEUE_GET(controller, index, cycle) \
208         INCREMENT_QUEUE_GET(\
209                 (index), \
210                 (cycle), \
211                 (controller)->completion_event_entries, \
212                 SMU_CQGR_EVENT_CYCLE_BIT \
213                 )
214
215 static void scic_sds_controller_initialize_power_control(struct scic_sds_controller *scic)
216 {
217         struct isci_host *ihost = sci_object_get_association(scic);
218         scic->power_control.timer = isci_timer_create(ihost,
219                                                       scic,
220                                         scic_sds_controller_power_control_timer_handler);
221
222         memset(scic->power_control.requesters, 0,
223                sizeof(scic->power_control.requesters));
224
225         scic->power_control.phys_waiting = 0;
226         scic->power_control.phys_granted_power = 0;
227 }
228
229 #define SCU_REMOTE_NODE_CONTEXT_ALIGNMENT       (32)
230 #define SCU_TASK_CONTEXT_ALIGNMENT              (256)
231 #define SCU_UNSOLICITED_FRAME_ADDRESS_ALIGNMENT (64)
232 #define SCU_UNSOLICITED_FRAME_BUFFER_ALIGNMENT  (1024)
233 #define SCU_UNSOLICITED_FRAME_HEADER_ALIGNMENT  (64)
234
235 /**
236  * This method builds the memory descriptor table for this controller.
237  * @this_controller: This parameter specifies the controller object for which
238  *    to build the memory table.
239  *
240  */
241 static void scic_sds_controller_build_memory_descriptor_table(
242         struct scic_sds_controller *this_controller)
243 {
244         sci_base_mde_construct(
245                 &this_controller->memory_descriptors[SCU_MDE_COMPLETION_QUEUE],
246                 SCU_COMPLETION_RAM_ALIGNMENT,
247                 (sizeof(u32) * this_controller->completion_queue_entries),
248                 (SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS)
249                 );
250
251         sci_base_mde_construct(
252                 &this_controller->memory_descriptors[SCU_MDE_REMOTE_NODE_CONTEXT],
253                 SCU_REMOTE_NODE_CONTEXT_ALIGNMENT,
254                 this_controller->remote_node_entries * sizeof(union scu_remote_node_context),
255                 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
256                 );
257
258         sci_base_mde_construct(
259                 &this_controller->memory_descriptors[SCU_MDE_TASK_CONTEXT],
260                 SCU_TASK_CONTEXT_ALIGNMENT,
261                 this_controller->task_context_entries * sizeof(struct scu_task_context),
262                 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
263                 );
264
265         /*
266          * The UF buffer address table size must be programmed to a power
267          * of 2.  Find the first power of 2 that is equal to or greater then
268          * the number of unsolicited frame buffers to be utilized. */
269         scic_sds_unsolicited_frame_control_set_address_table_count(
270                 &this_controller->uf_control
271                 );
272
273         sci_base_mde_construct(
274                 &this_controller->memory_descriptors[SCU_MDE_UF_BUFFER],
275                 SCU_UNSOLICITED_FRAME_BUFFER_ALIGNMENT,
276                 scic_sds_unsolicited_frame_control_get_mde_size(this_controller->uf_control),
277                 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
278                 );
279 }
280
281 /**
282  * This method validates the driver supplied memory descriptor table.
283  * @this_controller:
284  *
285  * enum sci_status
286  */
287 static enum sci_status scic_sds_controller_validate_memory_descriptor_table(
288         struct scic_sds_controller *this_controller)
289 {
290         bool mde_list_valid;
291
292         mde_list_valid = sci_base_mde_is_valid(
293                 &this_controller->memory_descriptors[SCU_MDE_COMPLETION_QUEUE],
294                 SCU_COMPLETION_RAM_ALIGNMENT,
295                 (sizeof(u32) * this_controller->completion_queue_entries),
296                 (SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS)
297                 );
298
299         if (mde_list_valid == false)
300                 return SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD;
301
302         mde_list_valid = sci_base_mde_is_valid(
303                 &this_controller->memory_descriptors[SCU_MDE_REMOTE_NODE_CONTEXT],
304                 SCU_REMOTE_NODE_CONTEXT_ALIGNMENT,
305                 this_controller->remote_node_entries * sizeof(union scu_remote_node_context),
306                 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
307                 );
308
309         if (mde_list_valid == false)
310                 return SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD;
311
312         mde_list_valid = sci_base_mde_is_valid(
313                 &this_controller->memory_descriptors[SCU_MDE_TASK_CONTEXT],
314                 SCU_TASK_CONTEXT_ALIGNMENT,
315                 this_controller->task_context_entries * sizeof(struct scu_task_context),
316                 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
317                 );
318
319         if (mde_list_valid == false)
320                 return SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD;
321
322         mde_list_valid = sci_base_mde_is_valid(
323                 &this_controller->memory_descriptors[SCU_MDE_UF_BUFFER],
324                 SCU_UNSOLICITED_FRAME_BUFFER_ALIGNMENT,
325                 scic_sds_unsolicited_frame_control_get_mde_size(this_controller->uf_control),
326                 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
327                 );
328
329         if (mde_list_valid == false)
330                 return SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD;
331
332         return SCI_SUCCESS;
333 }
334
335 /**
336  * This method initializes the controller with the physical memory addresses
337  *    that are used to communicate with the driver.
338  * @this_controller:
339  *
340  */
341 static void scic_sds_controller_ram_initialization(
342         struct scic_sds_controller *this_controller)
343 {
344         struct sci_physical_memory_descriptor *mde;
345
346         /*
347          * The completion queue is actually placed in cacheable memory
348          * Therefore it no longer comes out of memory in the MDL. */
349         mde = &this_controller->memory_descriptors[SCU_MDE_COMPLETION_QUEUE];
350         this_controller->completion_queue = (u32 *)mde->virtual_address;
351         writel(lower_32_bits(mde->physical_address), \
352                 &this_controller->smu_registers->completion_queue_lower);
353         writel(upper_32_bits(mde->physical_address),
354                 &this_controller->smu_registers->completion_queue_upper);
355
356         /*
357          * Program the location of the Remote Node Context table
358          * into the SCU. */
359         mde = &this_controller->memory_descriptors[SCU_MDE_REMOTE_NODE_CONTEXT];
360         this_controller->remote_node_context_table = (union scu_remote_node_context *)
361                                                      mde->virtual_address;
362         writel(lower_32_bits(mde->physical_address),
363                 &this_controller->smu_registers->remote_node_context_lower);
364         writel(upper_32_bits(mde->physical_address),
365                 &this_controller->smu_registers->remote_node_context_upper);
366
367         /* Program the location of the Task Context table into the SCU. */
368         mde = &this_controller->memory_descriptors[SCU_MDE_TASK_CONTEXT];
369         this_controller->task_context_table = (struct scu_task_context *)
370                                               mde->virtual_address;
371         writel(lower_32_bits(mde->physical_address),
372                 &this_controller->smu_registers->host_task_table_lower);
373         writel(upper_32_bits(mde->physical_address),
374                 &this_controller->smu_registers->host_task_table_upper);
375
376         mde = &this_controller->memory_descriptors[SCU_MDE_UF_BUFFER];
377         scic_sds_unsolicited_frame_control_construct(
378                 &this_controller->uf_control, mde, this_controller
379                 );
380
381         /*
382          * Inform the silicon as to the location of the UF headers and
383          * address table.
384          */
385         writel(lower_32_bits(this_controller->uf_control.headers.physical_address),
386                 &this_controller->scu_registers->sdma.uf_header_base_address_lower);
387         writel(upper_32_bits(this_controller->uf_control.headers.physical_address),
388                 &this_controller->scu_registers->sdma.uf_header_base_address_upper);
389
390         writel(lower_32_bits(this_controller->uf_control.address_table.physical_address),
391                 &this_controller->scu_registers->sdma.uf_address_table_lower);
392         writel(upper_32_bits(this_controller->uf_control.address_table.physical_address),
393                 &this_controller->scu_registers->sdma.uf_address_table_upper);
394 }
395
396 /**
397  * This method initializes the task context data for the controller.
398  * @this_controller:
399  *
400  */
401 static void
402 scic_sds_controller_assign_task_entries(struct scic_sds_controller *controller)
403 {
404         u32 task_assignment;
405
406         /*
407          * Assign all the TCs to function 0
408          * TODO: Do we actually need to read this register to write it back?
409          */
410
411         task_assignment =
412                 readl(&controller->smu_registers->task_context_assignment[0]);
413
414         task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
415                 (SMU_TCA_GEN_VAL(ENDING,  controller->task_context_entries - 1)) |
416                 (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
417
418         writel(task_assignment,
419                 &controller->smu_registers->task_context_assignment[0]);
420
421 }
422
423 /**
424  * This method initializes the hardware completion queue.
425  *
426  *
427  */
428 static void scic_sds_controller_initialize_completion_queue(
429         struct scic_sds_controller *this_controller)
430 {
431         u32 index;
432         u32 completion_queue_control_value;
433         u32 completion_queue_get_value;
434         u32 completion_queue_put_value;
435
436         this_controller->completion_queue_get = 0;
437
438         completion_queue_control_value = (
439                 SMU_CQC_QUEUE_LIMIT_SET(this_controller->completion_queue_entries - 1)
440                 | SMU_CQC_EVENT_LIMIT_SET(this_controller->completion_event_entries - 1)
441                 );
442
443         writel(completion_queue_control_value,
444                 &this_controller->smu_registers->completion_queue_control);
445
446
447         /* Set the completion queue get pointer and enable the queue */
448         completion_queue_get_value = (
449                 (SMU_CQGR_GEN_VAL(POINTER, 0))
450                 | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
451                 | (SMU_CQGR_GEN_BIT(ENABLE))
452                 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
453                 );
454
455         writel(completion_queue_get_value,
456                 &this_controller->smu_registers->completion_queue_get);
457
458         /* Set the completion queue put pointer */
459         completion_queue_put_value = (
460                 (SMU_CQPR_GEN_VAL(POINTER, 0))
461                 | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
462                 );
463
464         writel(completion_queue_put_value,
465                 &this_controller->smu_registers->completion_queue_put);
466
467
468         /* Initialize the cycle bit of the completion queue entries */
469         for (index = 0; index < this_controller->completion_queue_entries; index++) {
470                 /*
471                  * If get.cycle_bit != completion_queue.cycle_bit
472                  * its not a valid completion queue entry
473                  * so at system start all entries are invalid */
474                 this_controller->completion_queue[index] = 0x80000000;
475         }
476 }
477
478 /**
479  * This method initializes the hardware unsolicited frame queue.
480  *
481  *
482  */
483 static void scic_sds_controller_initialize_unsolicited_frame_queue(
484         struct scic_sds_controller *this_controller)
485 {
486         u32 frame_queue_control_value;
487         u32 frame_queue_get_value;
488         u32 frame_queue_put_value;
489
490         /* Write the queue size */
491         frame_queue_control_value =
492                 SCU_UFQC_GEN_VAL(QUEUE_SIZE, this_controller->uf_control.address_table.count);
493
494         writel(frame_queue_control_value,
495                 &this_controller->scu_registers->sdma.unsolicited_frame_queue_control);
496
497         /* Setup the get pointer for the unsolicited frame queue */
498         frame_queue_get_value = (
499                 SCU_UFQGP_GEN_VAL(POINTER, 0)
500                 |  SCU_UFQGP_GEN_BIT(ENABLE_BIT)
501                 );
502
503         writel(frame_queue_get_value,
504                 &this_controller->scu_registers->sdma.unsolicited_frame_get_pointer);
505         /* Setup the put pointer for the unsolicited frame queue */
506         frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
507         writel(frame_queue_put_value,
508                 &this_controller->scu_registers->sdma.unsolicited_frame_put_pointer);
509 }
510
511 /**
512  * This method enables the hardware port task scheduler.
513  *
514  *
515  */
516 static void scic_sds_controller_enable_port_task_scheduler(
517         struct scic_sds_controller *this_controller)
518 {
519         u32 port_task_scheduler_value;
520
521         port_task_scheduler_value =
522                 readl(&this_controller->scu_registers->peg0.ptsg.control);
523         port_task_scheduler_value |=
524                 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) | SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
525         writel(port_task_scheduler_value,
526                 &this_controller->scu_registers->peg0.ptsg.control);
527 }
528
529 /**
530  *
531  *
532  * This macro is used to delay between writes to the AFE registers during AFE
533  * initialization.
534  */
535 #define AFE_REGISTER_WRITE_DELAY 10
536
537 /* Initialize the AFE for this phy index. We need to read the AFE setup from
538  * the OEM parameters none
539  */
540 static void scic_sds_controller_afe_initialization(struct scic_sds_controller *scic)
541 {
542         const struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
543         u32 afe_status;
544         u32 phy_id;
545
546         /* Clear DFX Status registers */
547         writel(0x0081000f, &scic->scu_registers->afe.afe_dfx_master_control0);
548         udelay(AFE_REGISTER_WRITE_DELAY);
549
550         /* Configure bias currents to normal */
551         if (is_a0())
552                 writel(0x00005500, &scic->scu_registers->afe.afe_bias_control);
553         else
554                 writel(0x00005A00, &scic->scu_registers->afe.afe_bias_control);
555
556         udelay(AFE_REGISTER_WRITE_DELAY);
557
558         /* Enable PLL */
559         if (is_b0())
560                 writel(0x80040A08, &scic->scu_registers->afe.afe_pll_control0);
561         else
562                 writel(0x80040908, &scic->scu_registers->afe.afe_pll_control0);
563
564         udelay(AFE_REGISTER_WRITE_DELAY);
565
566         /* Wait for the PLL to lock */
567         do {
568                 afe_status = readl(&scic->scu_registers->afe.afe_common_block_status);
569                 udelay(AFE_REGISTER_WRITE_DELAY);
570         } while ((afe_status & 0x00001000) == 0);
571
572         if (is_b0()) {
573                 /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
574                 writel(0x7bcc96ad, &scic->scu_registers->afe.afe_pmsn_master_control0);
575                 udelay(AFE_REGISTER_WRITE_DELAY);
576         }
577
578         for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
579                 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
580
581                 if (is_b0()) {
582                          /* Configure transmitter SSC parameters */
583                         writel(0x00030000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
584                         udelay(AFE_REGISTER_WRITE_DELAY);
585                 } else {
586                         /*
587                          * All defaults, except the Receive Word Alignament/Comma Detect
588                          * Enable....(0xe800) */
589                         writel(0x00004512, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
590                         udelay(AFE_REGISTER_WRITE_DELAY);
591
592                         writel(0x0050100F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
593                         udelay(AFE_REGISTER_WRITE_DELAY);
594                 }
595
596                 /*
597                  * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
598                  * & increase TX int & ext bias 20%....(0xe85c) */
599                 if (is_a0())
600                         writel(0x000003D4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
601                 else if (is_a2())
602                         writel(0x000003F0, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
603                 else {
604                          /* Power down TX and RX (PWRDNTX and PWRDNRX) */
605                         writel(0x000003d7, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
606                         udelay(AFE_REGISTER_WRITE_DELAY);
607
608                         /*
609                          * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
610                          * & increase TX int & ext bias 20%....(0xe85c) */
611                         writel(0x000003d4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
612                 }
613                 udelay(AFE_REGISTER_WRITE_DELAY);
614
615                 if (is_a0() || is_a2()) {
616                         /* Enable TX equalization (0xe824) */
617                         writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
618                         udelay(AFE_REGISTER_WRITE_DELAY);
619                 }
620
621                 /*
622                  * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
623                  * RDD=0x0(RX Detect Enabled) ....(0xe800) */
624                 writel(0x00004100, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
625                 udelay(AFE_REGISTER_WRITE_DELAY);
626
627                 /* Leave DFE/FFE on */
628                 if (is_a0())
629                         writel(0x3F09983F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
630                 else if (is_a2())
631                         writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
632                 else {
633                         writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
634                         udelay(AFE_REGISTER_WRITE_DELAY);
635                         /* Enable TX equalization (0xe824) */
636                         writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
637                 }
638                 udelay(AFE_REGISTER_WRITE_DELAY);
639
640                 writel(oem_phy->afe_tx_amp_control0,
641                         &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
642                 udelay(AFE_REGISTER_WRITE_DELAY);
643
644                 writel(oem_phy->afe_tx_amp_control1,
645                         &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
646                 udelay(AFE_REGISTER_WRITE_DELAY);
647
648                 writel(oem_phy->afe_tx_amp_control2,
649                         &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
650                 udelay(AFE_REGISTER_WRITE_DELAY);
651
652                 writel(oem_phy->afe_tx_amp_control3,
653                         &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
654                 udelay(AFE_REGISTER_WRITE_DELAY);
655         }
656
657         /* Transfer control to the PEs */
658         writel(0x00010f00, &scic->scu_registers->afe.afe_dfx_master_control0);
659         udelay(AFE_REGISTER_WRITE_DELAY);
660 }
661
662 /*
663  * ****************************************************************************-
664  * * SCIC SDS Controller Internal Start/Stop Routines
665  * ****************************************************************************- */
666
667
668 /**
669  * This method will attempt to transition into the ready state for the
670  *    controller and indicate that the controller start operation has completed
671  *    if all criteria are met.
672  * @this_controller: This parameter indicates the controller object for which
673  *    to transition to ready.
674  * @status: This parameter indicates the status value to be pass into the call
675  *    to scic_cb_controller_start_complete().
676  *
677  * none.
678  */
679 static void scic_sds_controller_transition_to_ready(
680         struct scic_sds_controller *scic,
681         enum sci_status status)
682 {
683         struct isci_host *ihost = sci_object_get_association(scic);
684
685         if (scic->state_machine.current_state_id ==
686             SCI_BASE_CONTROLLER_STATE_STARTING) {
687                 /*
688                  * We move into the ready state, because some of the phys/ports
689                  * may be up and operational.
690                  */
691                 sci_base_state_machine_change_state(&scic->state_machine,
692                                                     SCI_BASE_CONTROLLER_STATE_READY);
693
694                 isci_host_start_complete(ihost, status);
695         }
696 }
697
698 static void scic_sds_controller_timeout_handler(void *_scic)
699 {
700         struct scic_sds_controller *scic = _scic;
701         struct isci_host *ihost = sci_object_get_association(scic);
702         struct sci_base_state_machine *sm = &scic->state_machine;
703
704         if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STARTING)
705                 scic_sds_controller_transition_to_ready(scic, SCI_FAILURE_TIMEOUT);
706         else if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STOPPING) {
707                 sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_FAILED);
708                 isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
709         } else  /* / @todo Now what do we want to do in this case? */
710                 dev_err(scic_to_dev(scic),
711                         "%s: Controller timer fired when controller was not "
712                         "in a state being timed.\n",
713                         __func__);
714 }
715
716 static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller *scic)
717 {
718         u32 index;
719         enum sci_status port_status;
720         enum sci_status status = SCI_SUCCESS;
721
722         for (index = 0; index < scic->logical_port_entries; index++) {
723                 struct scic_sds_port *sci_port = &scic->port_table[index];
724                 sci_base_port_handler_t stop;
725
726                 stop = sci_port->state_handlers->parent.stop_handler;
727                 port_status = stop(&sci_port->parent);
728
729                 if ((port_status != SCI_SUCCESS) &&
730                     (port_status != SCI_FAILURE_INVALID_STATE)) {
731                         status = SCI_FAILURE;
732
733                         dev_warn(scic_to_dev(scic),
734                                  "%s: Controller stop operation failed to "
735                                  "stop port %d because of status %d.\n",
736                                  __func__,
737                                  sci_port->logical_port_index,
738                                  port_status);
739                 }
740         }
741
742         return status;
743 }
744
745 static inline void scic_sds_controller_phy_timer_start(
746                 struct scic_sds_controller *scic)
747 {
748         isci_timer_start(scic->phy_startup_timer,
749                          SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
750
751         scic->phy_startup_timer_pending = true;
752 }
753
754 static void scic_sds_controller_phy_timer_stop(struct scic_sds_controller *scic)
755 {
756         isci_timer_stop(scic->phy_startup_timer);
757
758         scic->phy_startup_timer_pending = false;
759 }
760
761 /**
762  * scic_sds_controller_start_next_phy - start phy
763  * @scic: controller
764  *
765  * If all the phys have been started, then attempt to transition the
766  * controller to the READY state and inform the user
767  * (scic_cb_controller_start_complete()).
768  */
769 static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_controller *scic)
770 {
771         struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
772         struct scic_sds_phy *sci_phy;
773         enum sci_status status;
774
775         status = SCI_SUCCESS;
776
777         if (scic->phy_startup_timer_pending)
778                 return status;
779
780         if (scic->next_phy_to_start >= SCI_MAX_PHYS) {
781                 bool is_controller_start_complete = true;
782                 u32 state;
783                 u8 index;
784
785                 for (index = 0; index < SCI_MAX_PHYS; index++) {
786                         sci_phy = &scic->phy_table[index];
787                         state = sci_phy->parent.state_machine.current_state_id;
788
789                         if (!scic_sds_phy_get_port(sci_phy))
790                                 continue;
791
792                         /* The controller start operation is complete iff:
793                          * - all links have been given an opportunity to start
794                          * - have no indication of a connected device
795                          * - have an indication of a connected device and it has
796                          *   finished the link training process.
797                          */
798                         if ((sci_phy->is_in_link_training == false &&
799                              state == SCI_BASE_PHY_STATE_INITIAL) ||
800                             (sci_phy->is_in_link_training == false &&
801                              state == SCI_BASE_PHY_STATE_STOPPED) ||
802                             (sci_phy->is_in_link_training == true &&
803                              state == SCI_BASE_PHY_STATE_STARTING)) {
804                                 is_controller_start_complete = false;
805                                 break;
806                         }
807                 }
808
809                 /*
810                  * The controller has successfully finished the start process.
811                  * Inform the SCI Core user and transition to the READY state. */
812                 if (is_controller_start_complete == true) {
813                         scic_sds_controller_transition_to_ready(scic, SCI_SUCCESS);
814                         scic_sds_controller_phy_timer_stop(scic);
815                 }
816         } else {
817                 sci_phy = &scic->phy_table[scic->next_phy_to_start];
818
819                 if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
820                         if (scic_sds_phy_get_port(sci_phy) == NULL) {
821                                 scic->next_phy_to_start++;
822
823                                 /* Caution recursion ahead be forwarned
824                                  *
825                                  * The PHY was never added to a PORT in MPC mode
826                                  * so start the next phy in sequence This phy
827                                  * will never go link up and will not draw power
828                                  * the OEM parameters either configured the phy
829                                  * incorrectly for the PORT or it was never
830                                  * assigned to a PORT
831                                  */
832                                 return scic_sds_controller_start_next_phy(scic);
833                         }
834                 }
835
836                 status = scic_sds_phy_start(sci_phy);
837
838                 if (status == SCI_SUCCESS) {
839                         scic_sds_controller_phy_timer_start(scic);
840                 } else {
841                         dev_warn(scic_to_dev(scic),
842                                  "%s: Controller stop operation failed "
843                                  "to stop phy %d because of status "
844                                  "%d.\n",
845                                  __func__,
846                                  scic->phy_table[scic->next_phy_to_start].phy_index,
847                                  status);
848                 }
849
850                 scic->next_phy_to_start++;
851         }
852
853         return status;
854 }
855
856 static void scic_sds_controller_phy_startup_timeout_handler(void *_scic)
857 {
858         struct scic_sds_controller *scic = _scic;
859         enum sci_status status;
860
861         scic->phy_startup_timer_pending = false;
862         status = SCI_FAILURE;
863         while (status != SCI_SUCCESS)
864                 status = scic_sds_controller_start_next_phy(scic);
865 }
866
867 static enum sci_status scic_sds_controller_initialize_phy_startup(struct scic_sds_controller *scic)
868 {
869         struct isci_host *ihost = sci_object_get_association(scic);
870
871         scic->phy_startup_timer = isci_timer_create(ihost,
872                                                     scic,
873                                                     scic_sds_controller_phy_startup_timeout_handler);
874
875         if (scic->phy_startup_timer == NULL)
876                 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
877         else {
878                 scic->next_phy_to_start = 0;
879                 scic->phy_startup_timer_pending = false;
880         }
881
882         return SCI_SUCCESS;
883 }
884
885 static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller *scic)
886 {
887         u32 index;
888         enum sci_status status;
889         enum sci_status phy_status;
890
891         status = SCI_SUCCESS;
892
893         for (index = 0; index < SCI_MAX_PHYS; index++) {
894                 phy_status = scic_sds_phy_stop(&scic->phy_table[index]);
895
896                 if (
897                         (phy_status != SCI_SUCCESS)
898                         && (phy_status != SCI_FAILURE_INVALID_STATE)
899                         ) {
900                         status = SCI_FAILURE;
901
902                         dev_warn(scic_to_dev(scic),
903                                  "%s: Controller stop operation failed to stop "
904                                  "phy %d because of status %d.\n",
905                                  __func__,
906                                  scic->phy_table[index].phy_index, phy_status);
907                 }
908         }
909
910         return status;
911 }
912
913 static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controller *scic)
914 {
915         u32 index;
916         enum sci_status status;
917         enum sci_status device_status;
918
919         status = SCI_SUCCESS;
920
921         for (index = 0; index < scic->remote_node_entries; index++) {
922                 if (scic->device_table[index] != NULL) {
923                         /* / @todo What timeout value do we want to provide to this request? */
924                         device_status = scic_remote_device_stop(scic->device_table[index], 0);
925
926                         if ((device_status != SCI_SUCCESS) &&
927                             (device_status != SCI_FAILURE_INVALID_STATE)) {
928                                 dev_warn(scic_to_dev(scic),
929                                          "%s: Controller stop operation failed "
930                                          "to stop device 0x%p because of "
931                                          "status %d.\n",
932                                          __func__,
933                                          scic->device_table[index], device_status);
934                         }
935                 }
936         }
937
938         return status;
939 }
940
941 static void scic_sds_controller_power_control_timer_start(struct scic_sds_controller *scic)
942 {
943         isci_timer_start(scic->power_control.timer,
944                          SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
945
946         scic->power_control.timer_started = true;
947 }
948
949 static void scic_sds_controller_power_control_timer_stop(struct scic_sds_controller *scic)
950 {
951         if (scic->power_control.timer_started) {
952                 isci_timer_stop(scic->power_control.timer);
953                 scic->power_control.timer_started = false;
954         }
955 }
956
957 static void scic_sds_controller_power_control_timer_restart(struct scic_sds_controller *scic)
958 {
959         scic_sds_controller_power_control_timer_stop(scic);
960         scic_sds_controller_power_control_timer_start(scic);
961 }
962
963 static void scic_sds_controller_power_control_timer_handler(
964         void *controller)
965 {
966         struct scic_sds_controller *this_controller;
967
968         this_controller = (struct scic_sds_controller *)controller;
969
970         this_controller->power_control.phys_granted_power = 0;
971
972         if (this_controller->power_control.phys_waiting == 0) {
973                 this_controller->power_control.timer_started = false;
974         } else {
975                 struct scic_sds_phy *the_phy = NULL;
976                 u8 i;
977
978                 for (i = 0;
979                      (i < SCI_MAX_PHYS)
980                      && (this_controller->power_control.phys_waiting != 0);
981                      i++) {
982                         if (this_controller->power_control.requesters[i] != NULL) {
983                                 if (this_controller->power_control.phys_granted_power <
984                                     this_controller->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) {
985                                         the_phy = this_controller->power_control.requesters[i];
986                                         this_controller->power_control.requesters[i] = NULL;
987                                         this_controller->power_control.phys_waiting--;
988                                         this_controller->power_control.phys_granted_power++;
989                                         scic_sds_phy_consume_power_handler(the_phy);
990                                 } else {
991                                         break;
992                                 }
993                         }
994                 }
995
996                 /*
997                  * It doesn't matter if the power list is empty, we need to start the
998                  * timer in case another phy becomes ready.
999                  */
1000                 scic_sds_controller_power_control_timer_start(this_controller);
1001         }
1002 }
1003
1004 /**
1005  * This method inserts the phy in the stagger spinup control queue.
1006  * @this_controller:
1007  *
1008  *
1009  */
1010 void scic_sds_controller_power_control_queue_insert(
1011         struct scic_sds_controller *this_controller,
1012         struct scic_sds_phy *the_phy)
1013 {
1014         BUG_ON(the_phy == NULL);
1015
1016         if (this_controller->power_control.phys_granted_power <
1017             this_controller->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) {
1018                 this_controller->power_control.phys_granted_power++;
1019                 scic_sds_phy_consume_power_handler(the_phy);
1020
1021                 /*
1022                  * stop and start the power_control timer. When the timer fires, the
1023                  * no_of_phys_granted_power will be set to 0
1024                  */
1025                 scic_sds_controller_power_control_timer_restart(this_controller);
1026         } else {
1027                 /* Add the phy in the waiting list */
1028                 this_controller->power_control.requesters[the_phy->phy_index] = the_phy;
1029                 this_controller->power_control.phys_waiting++;
1030         }
1031 }
1032
1033 /**
1034  * This method removes the phy from the stagger spinup control queue.
1035  * @this_controller:
1036  *
1037  *
1038  */
1039 void scic_sds_controller_power_control_queue_remove(
1040         struct scic_sds_controller *this_controller,
1041         struct scic_sds_phy *the_phy)
1042 {
1043         BUG_ON(the_phy == NULL);
1044
1045         if (this_controller->power_control.requesters[the_phy->phy_index] != NULL) {
1046                 this_controller->power_control.phys_waiting--;
1047         }
1048
1049         this_controller->power_control.requesters[the_phy->phy_index] = NULL;
1050 }
1051
1052 /*
1053  * ****************************************************************************-
1054  * * SCIC SDS Controller Completion Routines
1055  * ****************************************************************************- */
1056
1057 /**
1058  * This method returns a true value if the completion queue has entries that
1059  *    can be processed
1060  * @this_controller:
1061  *
1062  * bool true if the completion queue has entries to process false if the
1063  * completion queue has no entries to process
1064  */
1065 static bool scic_sds_controller_completion_queue_has_entries(
1066         struct scic_sds_controller *this_controller)
1067 {
1068         u32 get_value = this_controller->completion_queue_get;
1069         u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
1070
1071         if (
1072                 NORMALIZE_GET_POINTER_CYCLE_BIT(get_value)
1073                 == COMPLETION_QUEUE_CYCLE_BIT(this_controller->completion_queue[get_index])
1074                 ) {
1075                 return true;
1076         }
1077
1078         return false;
1079 }
1080
1081 /**
1082  * This method processes a task completion notification.  This is called from
1083  *    within the controller completion handler.
1084  * @this_controller:
1085  * @completion_entry:
1086  *
1087  */
1088 static void scic_sds_controller_task_completion(
1089         struct scic_sds_controller *this_controller,
1090         u32 completion_entry)
1091 {
1092         u32 index;
1093         struct scic_sds_request *io_request;
1094
1095         index = SCU_GET_COMPLETION_INDEX(completion_entry);
1096         io_request = this_controller->io_request_table[index];
1097
1098         /* Make sure that we really want to process this IO request */
1099         if (
1100                 (io_request != NULL)
1101                 && (io_request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG)
1102                 && (
1103                         scic_sds_io_tag_get_sequence(io_request->io_tag)
1104                         == this_controller->io_request_sequence[index]
1105                         )
1106                 ) {
1107                 /* Yep this is a valid io request pass it along to the io request handler */
1108                 scic_sds_io_request_tc_completion(io_request, completion_entry);
1109         }
1110 }
1111
1112 /**
1113  * This method processes an SDMA completion event.  This is called from within
1114  *    the controller completion handler.
1115  * @this_controller:
1116  * @completion_entry:
1117  *
1118  */
1119 static void scic_sds_controller_sdma_completion(
1120         struct scic_sds_controller *this_controller,
1121         u32 completion_entry)
1122 {
1123         u32 index;
1124         struct scic_sds_request *io_request;
1125         struct scic_sds_remote_device *device;
1126
1127         index = SCU_GET_COMPLETION_INDEX(completion_entry);
1128
1129         switch (scu_get_command_request_type(completion_entry)) {
1130         case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
1131         case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
1132                 io_request = this_controller->io_request_table[index];
1133                 dev_warn(scic_to_dev(this_controller),
1134                          "%s: SCIC SDS Completion type SDMA %x for io request "
1135                          "%p\n",
1136                          __func__,
1137                          completion_entry,
1138                          io_request);
1139                 /* @todo For a post TC operation we need to fail the IO
1140                  * request
1141                  */
1142                 break;
1143
1144         case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
1145         case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
1146         case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
1147                 device = this_controller->device_table[index];
1148                 dev_warn(scic_to_dev(this_controller),
1149                          "%s: SCIC SDS Completion type SDMA %x for remote "
1150                          "device %p\n",
1151                          __func__,
1152                          completion_entry,
1153                          device);
1154                 /* @todo For a port RNC operation we need to fail the
1155                  * device
1156                  */
1157                 break;
1158
1159         default:
1160                 dev_warn(scic_to_dev(this_controller),
1161                          "%s: SCIC SDS Completion unknown SDMA completion "
1162                          "type %x\n",
1163                          __func__,
1164                          completion_entry);
1165                 break;
1166
1167         }
1168 }
1169
1170 /**
1171  *
1172  * @this_controller:
1173  * @completion_entry:
1174  *
1175  * This method processes an unsolicited frame message.  This is called from
1176  * within the controller completion handler. none
1177  */
1178 static void scic_sds_controller_unsolicited_frame(
1179         struct scic_sds_controller *this_controller,
1180         u32 completion_entry)
1181 {
1182         u32 index;
1183         u32 frame_index;
1184
1185         struct scu_unsolicited_frame_header *frame_header;
1186         struct scic_sds_phy *phy;
1187         struct scic_sds_remote_device *device;
1188
1189         enum sci_status result = SCI_FAILURE;
1190
1191         frame_index = SCU_GET_FRAME_INDEX(completion_entry);
1192
1193         frame_header
1194                 = this_controller->uf_control.buffers.array[frame_index].header;
1195         this_controller->uf_control.buffers.array[frame_index].state
1196                 = UNSOLICITED_FRAME_IN_USE;
1197
1198         if (SCU_GET_FRAME_ERROR(completion_entry)) {
1199                 /*
1200                  * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
1201                  * /       this cause a problem? We expect the phy initialization will
1202                  * /       fail if there is an error in the frame. */
1203                 scic_sds_controller_release_frame(this_controller, frame_index);
1204                 return;
1205         }
1206
1207         if (frame_header->is_address_frame) {
1208                 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
1209                 phy = &this_controller->phy_table[index];
1210                 if (phy != NULL) {
1211                         result = scic_sds_phy_frame_handler(phy, frame_index);
1212                 }
1213         } else {
1214
1215                 index = SCU_GET_COMPLETION_INDEX(completion_entry);
1216
1217                 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
1218                         /*
1219                          * This is a signature fis or a frame from a direct attached SATA
1220                          * device that has not yet been created.  In either case forwared
1221                          * the frame to the PE and let it take care of the frame data. */
1222                         index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
1223                         phy = &this_controller->phy_table[index];
1224                         result = scic_sds_phy_frame_handler(phy, frame_index);
1225                 } else {
1226                         if (index < this_controller->remote_node_entries)
1227                                 device = this_controller->device_table[index];
1228                         else
1229                                 device = NULL;
1230
1231                         if (device != NULL)
1232                                 result = scic_sds_remote_device_frame_handler(device, frame_index);
1233                         else
1234                                 scic_sds_controller_release_frame(this_controller, frame_index);
1235                 }
1236         }
1237
1238         if (result != SCI_SUCCESS) {
1239                 /*
1240                  * / @todo Is there any reason to report some additional error message
1241                  * /       when we get this failure notifiction? */
1242         }
1243 }
1244
1245 /**
1246  * This method processes an event completion entry.  This is called from within
1247  *    the controller completion handler.
1248  * @this_controller:
1249  * @completion_entry:
1250  *
1251  */
1252 static void scic_sds_controller_event_completion(
1253         struct scic_sds_controller *this_controller,
1254         u32 completion_entry)
1255 {
1256         u32 index;
1257         struct scic_sds_request *io_request;
1258         struct scic_sds_remote_device *device;
1259         struct scic_sds_phy *phy;
1260
1261         index = SCU_GET_COMPLETION_INDEX(completion_entry);
1262
1263         switch (scu_get_event_type(completion_entry)) {
1264         case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
1265                 /* / @todo The driver did something wrong and we need to fix the condtion. */
1266                 dev_err(scic_to_dev(this_controller),
1267                         "%s: SCIC Controller 0x%p received SMU command error "
1268                         "0x%x\n",
1269                         __func__,
1270                         this_controller,
1271                         completion_entry);
1272                 break;
1273
1274         case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
1275         case SCU_EVENT_TYPE_SMU_ERROR:
1276         case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
1277                 /*
1278                  * / @todo This is a hardware failure and its likely that we want to
1279                  * /       reset the controller. */
1280                 dev_err(scic_to_dev(this_controller),
1281                         "%s: SCIC Controller 0x%p received fatal controller "
1282                         "event  0x%x\n",
1283                         __func__,
1284                         this_controller,
1285                         completion_entry);
1286                 break;
1287
1288         case SCU_EVENT_TYPE_TRANSPORT_ERROR:
1289                 io_request = this_controller->io_request_table[index];
1290                 scic_sds_io_request_event_handler(io_request, completion_entry);
1291                 break;
1292
1293         case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
1294                 switch (scu_get_event_specifier(completion_entry)) {
1295                 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
1296                 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
1297                         io_request = this_controller->io_request_table[index];
1298                         if (io_request != NULL)
1299                                 scic_sds_io_request_event_handler(io_request, completion_entry);
1300                         else
1301                                 dev_warn(scic_to_dev(this_controller),
1302                                          "%s: SCIC Controller 0x%p received "
1303                                          "event 0x%x for io request object "
1304                                          "that doesnt exist.\n",
1305                                          __func__,
1306                                          this_controller,
1307                                          completion_entry);
1308
1309                         break;
1310
1311                 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
1312                         device = this_controller->device_table[index];
1313                         if (device != NULL)
1314                                 scic_sds_remote_device_event_handler(device, completion_entry);
1315                         else
1316                                 dev_warn(scic_to_dev(this_controller),
1317                                          "%s: SCIC Controller 0x%p received "
1318                                          "event 0x%x for remote device object "
1319                                          "that doesnt exist.\n",
1320                                          __func__,
1321                                          this_controller,
1322                                          completion_entry);
1323
1324                         break;
1325                 }
1326                 break;
1327
1328         case SCU_EVENT_TYPE_BROADCAST_CHANGE:
1329         /*
1330          * direct the broadcast change event to the phy first and then let
1331          * the phy redirect the broadcast change to the port object */
1332         case SCU_EVENT_TYPE_ERR_CNT_EVENT:
1333         /*
1334          * direct error counter event to the phy object since that is where
1335          * we get the event notification.  This is a type 4 event. */
1336         case SCU_EVENT_TYPE_OSSP_EVENT:
1337                 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
1338                 phy = &this_controller->phy_table[index];
1339                 scic_sds_phy_event_handler(phy, completion_entry);
1340                 break;
1341
1342         case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
1343         case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
1344         case SCU_EVENT_TYPE_RNC_OPS_MISC:
1345                 if (index < this_controller->remote_node_entries) {
1346                         device = this_controller->device_table[index];
1347
1348                         if (device != NULL)
1349                                 scic_sds_remote_device_event_handler(device, completion_entry);
1350                 } else
1351                         dev_err(scic_to_dev(this_controller),
1352                                 "%s: SCIC Controller 0x%p received event 0x%x "
1353                                 "for remote device object 0x%0x that doesnt "
1354                                 "exist.\n",
1355                                 __func__,
1356                                 this_controller,
1357                                 completion_entry,
1358                                 index);
1359
1360                 break;
1361
1362         default:
1363                 dev_warn(scic_to_dev(this_controller),
1364                          "%s: SCIC Controller received unknown event code %x\n",
1365                          __func__,
1366                          completion_entry);
1367                 break;
1368         }
1369 }
1370
1371 /**
1372  * This method is a private routine for processing the completion queue entries.
1373  * @this_controller:
1374  *
1375  */
1376 static void scic_sds_controller_process_completions(
1377         struct scic_sds_controller *this_controller)
1378 {
1379         u32 completion_count = 0;
1380         u32 completion_entry;
1381         u32 get_index;
1382         u32 get_cycle;
1383         u32 event_index;
1384         u32 event_cycle;
1385
1386         dev_dbg(scic_to_dev(this_controller),
1387                 "%s: completion queue begining get:0x%08x\n",
1388                 __func__,
1389                 this_controller->completion_queue_get);
1390
1391         /* Get the component parts of the completion queue */
1392         get_index = NORMALIZE_GET_POINTER(this_controller->completion_queue_get);
1393         get_cycle = SMU_CQGR_CYCLE_BIT & this_controller->completion_queue_get;
1394
1395         event_index = NORMALIZE_EVENT_POINTER(this_controller->completion_queue_get);
1396         event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & this_controller->completion_queue_get;
1397
1398         while (
1399                 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
1400                 == COMPLETION_QUEUE_CYCLE_BIT(this_controller->completion_queue[get_index])
1401                 ) {
1402                 completion_count++;
1403
1404                 completion_entry = this_controller->completion_queue[get_index];
1405                 INCREMENT_COMPLETION_QUEUE_GET(this_controller, get_index, get_cycle);
1406
1407                 dev_dbg(scic_to_dev(this_controller),
1408                         "%s: completion queue entry:0x%08x\n",
1409                         __func__,
1410                         completion_entry);
1411
1412                 switch (SCU_GET_COMPLETION_TYPE(completion_entry)) {
1413                 case SCU_COMPLETION_TYPE_TASK:
1414                         scic_sds_controller_task_completion(this_controller, completion_entry);
1415                         break;
1416
1417                 case SCU_COMPLETION_TYPE_SDMA:
1418                         scic_sds_controller_sdma_completion(this_controller, completion_entry);
1419                         break;
1420
1421                 case SCU_COMPLETION_TYPE_UFI:
1422                         scic_sds_controller_unsolicited_frame(this_controller, completion_entry);
1423                         break;
1424
1425                 case SCU_COMPLETION_TYPE_EVENT:
1426                         INCREMENT_EVENT_QUEUE_GET(this_controller, event_index, event_cycle);
1427                         scic_sds_controller_event_completion(this_controller, completion_entry);
1428                         break;
1429
1430                 case SCU_COMPLETION_TYPE_NOTIFY:
1431                         /*
1432                          * Presently we do the same thing with a notify event that we do with the
1433                          * other event codes. */
1434                         INCREMENT_EVENT_QUEUE_GET(this_controller, event_index, event_cycle);
1435                         scic_sds_controller_event_completion(this_controller, completion_entry);
1436                         break;
1437
1438                 default:
1439                         dev_warn(scic_to_dev(this_controller),
1440                                  "%s: SCIC Controller received unknown "
1441                                  "completion type %x\n",
1442                                  __func__,
1443                                  completion_entry);
1444                         break;
1445                 }
1446         }
1447
1448         /* Update the get register if we completed one or more entries */
1449         if (completion_count > 0) {
1450                 this_controller->completion_queue_get =
1451                         SMU_CQGR_GEN_BIT(ENABLE)
1452                         | SMU_CQGR_GEN_BIT(EVENT_ENABLE)
1453                         | event_cycle | SMU_CQGR_GEN_VAL(EVENT_POINTER, event_index)
1454                         | get_cycle   | SMU_CQGR_GEN_VAL(POINTER, get_index);
1455
1456                 writel(this_controller->completion_queue_get,
1457                         &this_controller->smu_registers->completion_queue_get);
1458
1459         }
1460
1461         dev_dbg(scic_to_dev(this_controller),
1462                 "%s: completion queue ending get:0x%08x\n",
1463                 __func__,
1464                 this_controller->completion_queue_get);
1465
1466 }
1467
1468 bool scic_sds_controller_isr(struct scic_sds_controller *scic)
1469 {
1470         if (scic_sds_controller_completion_queue_has_entries(scic)) {
1471                 return true;
1472         } else {
1473                 /*
1474                  * we have a spurious interrupt it could be that we have already
1475                  * emptied the completion queue from a previous interrupt */
1476                 writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
1477
1478                 /*
1479                  * There is a race in the hardware that could cause us not to be notified
1480                  * of an interrupt completion if we do not take this step.  We will mask
1481                  * then unmask the interrupts so if there is another interrupt pending
1482                  * the clearing of the interrupt source we get the next interrupt message. */
1483                 writel(0xFF000000, &scic->smu_registers->interrupt_mask);
1484                 writel(0, &scic->smu_registers->interrupt_mask);
1485         }
1486
1487         return false;
1488 }
1489
1490 void scic_sds_controller_completion_handler(struct scic_sds_controller *scic)
1491 {
1492         /* Empty out the completion queue */
1493         if (scic_sds_controller_completion_queue_has_entries(scic))
1494                 scic_sds_controller_process_completions(scic);
1495
1496         /* Clear the interrupt and enable all interrupts again */
1497         writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
1498         /* Could we write the value of SMU_ISR_COMPLETION? */
1499         writel(0xFF000000, &scic->smu_registers->interrupt_mask);
1500         writel(0, &scic->smu_registers->interrupt_mask);
1501 }
1502
1503 bool scic_sds_controller_error_isr(struct scic_sds_controller *scic)
1504 {
1505         u32 interrupt_status;
1506
1507         interrupt_status =
1508                 readl(&scic->smu_registers->interrupt_status);
1509         interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
1510
1511         if (interrupt_status != 0) {
1512                 /*
1513                  * There is an error interrupt pending so let it through and handle
1514                  * in the callback */
1515                 return true;
1516         }
1517
1518         /*
1519          * There is a race in the hardware that could cause us not to be notified
1520          * of an interrupt completion if we do not take this step.  We will mask
1521          * then unmask the error interrupts so if there was another interrupt
1522          * pending we will be notified.
1523          * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
1524         writel(0xff, &scic->smu_registers->interrupt_mask);
1525         writel(0, &scic->smu_registers->interrupt_mask);
1526
1527         return false;
1528 }
1529
1530 void scic_sds_controller_error_handler(struct scic_sds_controller *scic)
1531 {
1532         u32 interrupt_status;
1533
1534         interrupt_status =
1535                 readl(&scic->smu_registers->interrupt_status);
1536
1537         if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
1538             scic_sds_controller_completion_queue_has_entries(scic)) {
1539
1540                 scic_sds_controller_process_completions(scic);
1541                 writel(SMU_ISR_QUEUE_SUSPEND, &scic->smu_registers->interrupt_status);
1542         } else {
1543                 dev_err(scic_to_dev(scic), "%s: status: %#x\n", __func__,
1544                         interrupt_status);
1545
1546                 sci_base_state_machine_change_state(&scic->state_machine,
1547                                                     SCI_BASE_CONTROLLER_STATE_FAILED);
1548
1549                 return;
1550         }
1551
1552         /* If we dont process any completions I am not sure that we want to do this.
1553          * We are in the middle of a hardware fault and should probably be reset.
1554          */
1555         writel(0, &scic->smu_registers->interrupt_mask);
1556 }
1557
1558
1559
1560
1561 void scic_sds_controller_link_up(
1562         struct scic_sds_controller *scic,
1563         struct scic_sds_port *sci_port,
1564         struct scic_sds_phy *sci_phy)
1565 {
1566         scic_sds_controller_phy_handler_t link_up;
1567         u32 state;
1568
1569         state = scic->state_machine.current_state_id;
1570         link_up = scic_sds_controller_state_handler_table[state].link_up;
1571
1572         if (link_up)
1573                 link_up(scic, sci_port, sci_phy);
1574         else
1575                 dev_dbg(scic_to_dev(scic),
1576                         "%s: SCIC Controller linkup event from phy %d in "
1577                         "unexpected state %d\n", __func__, sci_phy->phy_index,
1578                         state);
1579 }
1580
1581
1582 void scic_sds_controller_link_down(
1583         struct scic_sds_controller *scic,
1584         struct scic_sds_port *sci_port,
1585         struct scic_sds_phy *sci_phy)
1586 {
1587         u32 state;
1588         scic_sds_controller_phy_handler_t link_down;
1589
1590         state = scic->state_machine.current_state_id;
1591         link_down = scic_sds_controller_state_handler_table[state].link_down;
1592
1593         if (link_down)
1594                 link_down(scic, sci_port, sci_phy);
1595         else
1596                 dev_dbg(scic_to_dev(scic),
1597                         "%s: SCIC Controller linkdown event from phy %d in "
1598                         "unexpected state %d\n",
1599                         __func__,
1600                         sci_phy->phy_index, state);
1601 }
1602
1603 /**
1604  * This is a helper method to determine if any remote devices on this
1605  * controller are still in the stopping state.
1606  *
1607  */
1608 static bool scic_sds_controller_has_remote_devices_stopping(
1609         struct scic_sds_controller *this_controller)
1610 {
1611         u32 index;
1612
1613         for (index = 0; index < this_controller->remote_node_entries; index++) {
1614                 if ((this_controller->device_table[index] != NULL) &&
1615                    (this_controller->device_table[index]->parent.state_machine.current_state_id
1616                     == SCI_BASE_REMOTE_DEVICE_STATE_STOPPING))
1617                         return true;
1618         }
1619
1620         return false;
1621 }
1622
1623 /**
1624  * This method is called by the remote device to inform the controller
1625  * object that the remote device has stopped.
1626  *
1627  */
1628
1629 void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic,
1630                                                struct scic_sds_remote_device *sci_dev)
1631 {
1632
1633         u32 state;
1634         scic_sds_controller_device_handler_t stopped;
1635
1636         state = scic->state_machine.current_state_id;
1637         stopped = scic_sds_controller_state_handler_table[state].device_stopped;
1638
1639         if (stopped)
1640                 stopped(scic, sci_dev);
1641         else {
1642                 dev_dbg(scic_to_dev(scic),
1643                         "%s: SCIC Controller 0x%p remote device stopped event "
1644                         "from device 0x%p in unexpected state  %d\n",
1645                         __func__, scic, sci_dev, state);
1646         }
1647 }
1648
1649
1650
1651 /**
1652  * This method will write to the SCU PCP register the request value. The method
1653  *    is used to suspend/resume ports, devices, and phys.
1654  * @this_controller:
1655  *
1656  *
1657  */
1658 void scic_sds_controller_post_request(
1659         struct scic_sds_controller *this_controller,
1660         u32 request)
1661 {
1662         dev_dbg(scic_to_dev(this_controller),
1663                 "%s: SCIC Controller 0x%p post request 0x%08x\n",
1664                 __func__,
1665                 this_controller,
1666                 request);
1667
1668         writel(request, &this_controller->smu_registers->post_context_port);
1669 }
1670
1671 /**
1672  * This method will copy the soft copy of the task context into the physical
1673  *    memory accessible by the controller.
1674  * @this_controller: This parameter specifies the controller for which to copy
1675  *    the task context.
1676  * @this_request: This parameter specifies the request for which the task
1677  *    context is being copied.
1678  *
1679  * After this call is made the SCIC_SDS_IO_REQUEST object will always point to
1680  * the physical memory version of the task context. Thus, all subsequent
1681  * updates to the task context are performed in the TC table (i.e. DMAable
1682  * memory). none
1683  */
1684 void scic_sds_controller_copy_task_context(
1685         struct scic_sds_controller *this_controller,
1686         struct scic_sds_request *this_request)
1687 {
1688         struct scu_task_context *task_context_buffer;
1689
1690         task_context_buffer = scic_sds_controller_get_task_context_buffer(
1691                 this_controller, this_request->io_tag
1692                 );
1693
1694         memcpy(
1695                 task_context_buffer,
1696                 this_request->task_context_buffer,
1697                 SCI_FIELD_OFFSET(struct scu_task_context, sgl_snapshot_ac)
1698                 );
1699
1700         /*
1701          * Now that the soft copy of the TC has been copied into the TC
1702          * table accessible by the silicon.  Thus, any further changes to
1703          * the TC (e.g. TC termination) occur in the appropriate location. */
1704         this_request->task_context_buffer = task_context_buffer;
1705 }
1706
1707 /**
1708  * This method returns the task context buffer for the given io tag.
1709  * @this_controller:
1710  * @io_tag:
1711  *
1712  * struct scu_task_context*
1713  */
1714 struct scu_task_context *scic_sds_controller_get_task_context_buffer(
1715         struct scic_sds_controller *this_controller,
1716         u16 io_tag
1717         ) {
1718         u16 task_index = scic_sds_io_tag_get_index(io_tag);
1719
1720         if (task_index < this_controller->task_context_entries) {
1721                 return &this_controller->task_context_table[task_index];
1722         }
1723
1724         return NULL;
1725 }
1726
1727 /**
1728  * This method returnst the sequence value from the io tag value
1729  * @this_controller:
1730  * @io_tag:
1731  *
1732  * u16
1733  */
1734
1735 /**
1736  * This method returns the IO request associated with the tag value
1737  * @this_controller:
1738  * @io_tag:
1739  *
1740  * SCIC_SDS_IO_REQUEST_T* NULL if there is no valid IO request at the tag value
1741  */
1742 struct scic_sds_request *scic_sds_controller_get_io_request_from_tag(
1743         struct scic_sds_controller *this_controller,
1744         u16 io_tag
1745         ) {
1746         u16 task_index;
1747         u16 task_sequence;
1748
1749         task_index = scic_sds_io_tag_get_index(io_tag);
1750
1751         if (task_index  < this_controller->task_context_entries) {
1752                 if (this_controller->io_request_table[task_index] != NULL) {
1753                         task_sequence = scic_sds_io_tag_get_sequence(io_tag);
1754
1755                         if (task_sequence == this_controller->io_request_sequence[task_index]) {
1756                                 return this_controller->io_request_table[task_index];
1757                         }
1758                 }
1759         }
1760
1761         return NULL;
1762 }
1763
1764 /**
1765  * This method allocates remote node index and the reserves the remote node
1766  *    context space for use. This method can fail if there are no more remote
1767  *    node index available.
1768  * @this_controller: This is the controller object which contains the set of
1769  *    free remote node ids
1770  * @the_devce: This is the device object which is requesting the a remote node
1771  *    id
1772  * @node_id: This is the remote node id that is assinged to the device if one
1773  *    is available
1774  *
1775  * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
1776  * node index available.
1777  */
1778 enum sci_status scic_sds_controller_allocate_remote_node_context(
1779         struct scic_sds_controller *this_controller,
1780         struct scic_sds_remote_device *the_device,
1781         u16 *node_id)
1782 {
1783         u16 node_index;
1784         u32 remote_node_count = scic_sds_remote_device_node_count(the_device);
1785
1786         node_index = scic_sds_remote_node_table_allocate_remote_node(
1787                 &this_controller->available_remote_nodes, remote_node_count
1788                 );
1789
1790         if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
1791                 this_controller->device_table[node_index] = the_device;
1792
1793                 *node_id = node_index;
1794
1795                 return SCI_SUCCESS;
1796         }
1797
1798         return SCI_FAILURE_INSUFFICIENT_RESOURCES;
1799 }
1800
1801 /**
1802  * This method frees the remote node index back to the available pool.  Once
1803  *    this is done the remote node context buffer is no longer valid and can
1804  *    not be used.
1805  * @this_controller:
1806  * @the_device:
1807  * @node_id:
1808  *
1809  */
1810 void scic_sds_controller_free_remote_node_context(
1811         struct scic_sds_controller *this_controller,
1812         struct scic_sds_remote_device *the_device,
1813         u16 node_id)
1814 {
1815         u32 remote_node_count = scic_sds_remote_device_node_count(the_device);
1816
1817         if (this_controller->device_table[node_id] == the_device) {
1818                 this_controller->device_table[node_id] = NULL;
1819
1820                 scic_sds_remote_node_table_release_remote_node_index(
1821                         &this_controller->available_remote_nodes, remote_node_count, node_id
1822                         );
1823         }
1824 }
1825
1826 /**
1827  * This method returns the union scu_remote_node_context for the specified remote
1828  *    node id.
1829  * @this_controller:
1830  * @node_id:
1831  *
1832  * union scu_remote_node_context*
1833  */
1834 union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
1835         struct scic_sds_controller *this_controller,
1836         u16 node_id
1837         ) {
1838         if (
1839                 (node_id < this_controller->remote_node_entries)
1840                 && (this_controller->device_table[node_id] != NULL)
1841                 ) {
1842                 return &this_controller->remote_node_context_table[node_id];
1843         }
1844
1845         return NULL;
1846 }
1847
1848 /**
1849  *
1850  * @resposne_buffer: This is the buffer into which the D2H register FIS will be
1851  *    constructed.
1852  * @frame_header: This is the frame header returned by the hardware.
1853  * @frame_buffer: This is the frame buffer returned by the hardware.
1854  *
1855  * This method will combind the frame header and frame buffer to create a SATA
1856  * D2H register FIS none
1857  */
1858 void scic_sds_controller_copy_sata_response(
1859         void *response_buffer,
1860         void *frame_header,
1861         void *frame_buffer)
1862 {
1863         memcpy(
1864                 response_buffer,
1865                 frame_header,
1866                 sizeof(u32)
1867                 );
1868
1869         memcpy(
1870                 (char *)((char *)response_buffer + sizeof(u32)),
1871                 frame_buffer,
1872                 sizeof(struct sata_fis_reg_d2h) - sizeof(u32)
1873                 );
1874 }
1875
1876 /**
1877  * This method releases the frame once this is done the frame is available for
1878  *    re-use by the hardware.  The data contained in the frame header and frame
1879  *    buffer is no longer valid. The UF queue get pointer is only updated if UF
1880  *    control indicates this is appropriate.
1881  * @this_controller:
1882  * @frame_index:
1883  *
1884  */
1885 void scic_sds_controller_release_frame(
1886         struct scic_sds_controller *this_controller,
1887         u32 frame_index)
1888 {
1889         if (scic_sds_unsolicited_frame_control_release_frame(
1890                     &this_controller->uf_control, frame_index) == true)
1891                 writel(this_controller->uf_control.get,
1892                         &this_controller->scu_registers->sdma.unsolicited_frame_get_pointer);
1893 }
1894
1895 /**
1896  * This method sets user parameters and OEM parameters to default values.
1897  *    Users can override these values utilizing the scic_user_parameters_set()
1898  *    and scic_oem_parameters_set() methods.
1899  * @scic: This parameter specifies the controller for which to set the
1900  *    configuration parameters to their default values.
1901  *
1902  */
1903 static void scic_sds_controller_set_default_config_parameters(struct scic_sds_controller *scic)
1904 {
1905         struct isci_host *ihost = sci_object_get_association(scic);
1906         u16 index;
1907
1908         /* Default to APC mode. */
1909         scic->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
1910
1911         /* Default to APC mode. */
1912         scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1;
1913
1914         /* Default to no SSC operation. */
1915         scic->oem_parameters.sds1.controller.do_enable_ssc = false;
1916
1917         /* Initialize all of the port parameter information to narrow ports. */
1918         for (index = 0; index < SCI_MAX_PORTS; index++) {
1919                 scic->oem_parameters.sds1.ports[index].phy_mask = 0;
1920         }
1921
1922         /* Initialize all of the phy parameter information. */
1923         for (index = 0; index < SCI_MAX_PHYS; index++) {
1924                 /* Default to 6G (i.e. Gen 3) for now. */
1925                 scic->user_parameters.sds1.phys[index].max_speed_generation = 3;
1926
1927                 /* the frequencies cannot be 0 */
1928                 scic->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f;
1929                 scic->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff;
1930                 scic->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
1931
1932                 /*
1933                  * Previous Vitesse based expanders had a arbitration issue that
1934                  * is worked around by having the upper 32-bits of SAS address
1935                  * with a value greater then the Vitesse company identifier.
1936                  * Hence, usage of 0x5FCFFFFF. */
1937                 scic->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id;
1938                 scic->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF;
1939         }
1940
1941         scic->user_parameters.sds1.stp_inactivity_timeout = 5;
1942         scic->user_parameters.sds1.ssp_inactivity_timeout = 5;
1943         scic->user_parameters.sds1.stp_max_occupancy_timeout = 5;
1944         scic->user_parameters.sds1.ssp_max_occupancy_timeout = 20;
1945         scic->user_parameters.sds1.no_outbound_task_timeout = 20;
1946 }
1947
1948 /**
1949  * scic_controller_initialize() - This method will initialize the controller
1950  *    hardware managed by the supplied core controller object.  This method
1951  *    will bring the physical controller hardware out of reset and enable the
1952  *    core to determine the capabilities of the hardware being managed.  Thus,
1953  *    the core controller can determine it's exact physical (DMA capable)
1954  *    memory requirements.
1955  * @controller: This parameter specifies the controller to be initialized.
1956  *
1957  * The SCI Core user must have called scic_controller_construct() on the
1958  * supplied controller object previously. Indicate if the controller was
1959  * successfully initialized or if it failed in some way. SCI_SUCCESS This value
1960  * is returned if the controller hardware was successfully initialized.
1961  */
1962 enum sci_status scic_controller_initialize(
1963         struct scic_sds_controller *scic)
1964 {
1965         enum sci_status status = SCI_FAILURE_INVALID_STATE;
1966         scic_sds_controller_handler_t initialize;
1967         u32 state;
1968
1969         state = scic->state_machine.current_state_id;
1970         initialize = scic_sds_controller_state_handler_table[state].initialize;
1971
1972         if (initialize)
1973                 status = initialize(scic);
1974         else
1975                 dev_warn(scic_to_dev(scic),
1976                          "%s: SCIC Controller initialize operation requested "
1977                          "in invalid state %d\n",  __func__, state);
1978
1979         return status;
1980 }
1981
1982 /**
1983  * scic_controller_get_suggested_start_timeout() - This method returns the
1984  *    suggested scic_controller_start() timeout amount.  The user is free to
1985  *    use any timeout value, but this method provides the suggested minimum
1986  *    start timeout value.  The returned value is based upon empirical
1987  *    information determined as a result of interoperability testing.
1988  * @controller: the handle to the controller object for which to return the
1989  *    suggested start timeout.
1990  *
1991  * This method returns the number of milliseconds for the suggested start
1992  * operation timeout.
1993  */
1994 u32 scic_controller_get_suggested_start_timeout(
1995         struct scic_sds_controller *sc)
1996 {
1997         /* Validate the user supplied parameters. */
1998         if (sc == NULL)
1999                 return 0;
2000
2001         /*
2002          * The suggested minimum timeout value for a controller start operation:
2003          *
2004          *     Signature FIS Timeout
2005          *   + Phy Start Timeout
2006          *   + Number of Phy Spin Up Intervals
2007          *   ---------------------------------
2008          *   Number of milliseconds for the controller start operation.
2009          *
2010          * NOTE: The number of phy spin up intervals will be equivalent
2011          *       to the number of phys divided by the number phys allowed
2012          *       per interval - 1 (once OEM parameters are supported).
2013          *       Currently we assume only 1 phy per interval. */
2014
2015         return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
2016                 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
2017                 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
2018 }
2019
2020 /**
2021  * scic_controller_start() - This method will start the supplied core
2022  *    controller.  This method will start the staggered spin up operation.  The
2023  *    SCI User completion callback is called when the following conditions are
2024  *    met: -# the return status of this method is SCI_SUCCESS. -# after all of
2025  *    the phys have successfully started or been given the opportunity to start.
2026  * @controller: the handle to the controller object to start.
2027  * @timeout: This parameter specifies the number of milliseconds in which the
2028  *    start operation should complete.
2029  *
2030  * The SCI Core user must have filled in the physical memory descriptor
2031  * structure via the sci_controller_get_memory_descriptor_list() method. The
2032  * SCI Core user must have invoked the scic_controller_initialize() method
2033  * prior to invoking this method. The controller must be in the INITIALIZED or
2034  * STARTED state. Indicate if the controller start method succeeded or failed
2035  * in some way. SCI_SUCCESS if the start operation succeeded.
2036  * SCI_WARNING_ALREADY_IN_STATE if the controller is already in the STARTED
2037  * state. SCI_FAILURE_INVALID_STATE if the controller is not either in the
2038  * INITIALIZED or STARTED states. SCI_FAILURE_INVALID_MEMORY_DESCRIPTOR if
2039  * there are inconsistent or invalid values in the supplied
2040  * struct sci_physical_memory_descriptor array.
2041  */
2042 enum sci_status scic_controller_start(
2043         struct scic_sds_controller *scic,
2044         u32 timeout)
2045 {
2046         enum sci_status status = SCI_FAILURE_INVALID_STATE;
2047         scic_sds_controller_timed_handler_t start;
2048         u32 state;
2049
2050         state = scic->state_machine.current_state_id;
2051         start = scic_sds_controller_state_handler_table[state].start;
2052
2053         if (start)
2054                 status = start(scic, timeout);
2055         else
2056                 dev_warn(scic_to_dev(scic),
2057                          "%s: SCIC Controller start operation requested in "
2058                          "invalid state %d\n", __func__, state);
2059
2060         return status;
2061 }
2062
2063 /**
2064  * scic_controller_stop() - This method will stop an individual controller
2065  *    object.This method will invoke the associated user callback upon
2066  *    completion.  The completion callback is called when the following
2067  *    conditions are met: -# the method return status is SCI_SUCCESS. -# the
2068  *    controller has been quiesced. This method will ensure that all IO
2069  *    requests are quiesced, phys are stopped, and all additional operation by
2070  *    the hardware is halted.
2071  * @controller: the handle to the controller object to stop.
2072  * @timeout: This parameter specifies the number of milliseconds in which the
2073  *    stop operation should complete.
2074  *
2075  * The controller must be in the STARTED or STOPPED state. Indicate if the
2076  * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
2077  * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
2078  * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
2079  * controller is not either in the STARTED or STOPPED states.
2080  */
2081 enum sci_status scic_controller_stop(
2082         struct scic_sds_controller *scic,
2083         u32 timeout)
2084 {
2085         enum sci_status status = SCI_FAILURE_INVALID_STATE;
2086         scic_sds_controller_timed_handler_t stop;
2087         u32 state;
2088
2089         state = scic->state_machine.current_state_id;
2090         stop = scic_sds_controller_state_handler_table[state].stop;
2091
2092         if (stop)
2093                 status = stop(scic, timeout);
2094         else
2095                 dev_warn(scic_to_dev(scic),
2096                          "%s: SCIC Controller stop operation requested in "
2097                          "invalid state %d\n", __func__, state);
2098
2099         return status;
2100 }
2101
2102 /**
2103  * scic_controller_reset() - This method will reset the supplied core
2104  *    controller regardless of the state of said controller.  This operation is
2105  *    considered destructive.  In other words, all current operations are wiped
2106  *    out.  No IO completions for outstanding devices occur.  Outstanding IO
2107  *    requests are not aborted or completed at the actual remote device.
2108  * @controller: the handle to the controller object to reset.
2109  *
2110  * Indicate if the controller reset method succeeded or failed in some way.
2111  * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
2112  * the controller reset operation is unable to complete.
2113  */
2114 enum sci_status scic_controller_reset(
2115         struct scic_sds_controller *scic)
2116 {
2117         enum sci_status status = SCI_FAILURE_INVALID_STATE;
2118         scic_sds_controller_handler_t reset;
2119         u32 state;
2120
2121         state = scic->state_machine.current_state_id;
2122         reset = scic_sds_controller_state_handler_table[state].reset;
2123
2124         if (reset)
2125                 status = reset(scic);
2126         else
2127                 dev_warn(scic_to_dev(scic),
2128                          "%s: SCIC Controller reset operation requested in "
2129                          "invalid state %d\n",  __func__, state);
2130
2131         return status;
2132 }
2133
2134 /**
2135  * scic_controller_start_io() - This method is called by the SCI user to
2136  *    send/start an IO request. If the method invocation is successful, then
2137  *    the IO request has been queued to the hardware for processing.
2138  * @controller: the handle to the controller object for which to start an IO
2139  *    request.
2140  * @remote_device: the handle to the remote device object for which to start an
2141  *    IO request.
2142  * @io_request: the handle to the io request object to start.
2143  * @io_tag: This parameter specifies a previously allocated IO tag that the
2144  *    user desires to be utilized for this request. This parameter is optional.
2145  *     The user is allowed to supply SCI_CONTROLLER_INVALID_IO_TAG as the value
2146  *    for this parameter.
2147  *
2148  * - IO tags are a protected resource.  It is incumbent upon the SCI Core user
2149  * to ensure that each of the methods that may allocate or free available IO
2150  * tags are handled in a mutually exclusive manner.  This method is one of said
2151  * methods requiring proper critical code section protection (e.g. semaphore,
2152  * spin-lock, etc.). - For SATA, the user is required to manage NCQ tags.  As a
2153  * result, it is expected the user will have set the NCQ tag field in the host
2154  * to device register FIS prior to calling this method.  There is also a
2155  * requirement for the user to call scic_stp_io_set_ncq_tag() prior to invoking
2156  * the scic_controller_start_io() method. scic_controller_allocate_tag() for
2157  * more information on allocating a tag. Indicate if the controller
2158  * successfully started the IO request. SCI_IO_SUCCESS if the IO request was
2159  * successfully started. Determine the failure situations and return values.
2160  */
2161 enum sci_io_status scic_controller_start_io(
2162         struct scic_sds_controller *scic,
2163         struct scic_sds_remote_device *remote_device,
2164         struct scic_sds_request *io_request,
2165         u16 io_tag)
2166 {
2167         u32 state;
2168         scic_sds_controller_start_request_handler_t start_io;
2169
2170         state = scic->state_machine.current_state_id;
2171         start_io = scic_sds_controller_state_handler_table[state].start_io;
2172
2173         return start_io(scic,
2174                         (struct sci_base_remote_device *) remote_device,
2175                         (struct sci_base_request *)io_request, io_tag);
2176 }
2177
2178 /**
2179  * scic_controller_terminate_request() - This method is called by the SCI Core
2180  *    user to terminate an ongoing (i.e. started) core IO request.  This does
2181  *    not abort the IO request at the target, but rather removes the IO request
2182  *    from the host controller.
2183  * @controller: the handle to the controller object for which to terminate a
2184  *    request.
2185  * @remote_device: the handle to the remote device object for which to
2186  *    terminate a request.
2187  * @request: the handle to the io or task management request object to
2188  *    terminate.
2189  *
2190  * Indicate if the controller successfully began the terminate process for the
2191  * IO request. SCI_SUCCESS if the terminate process was successfully started
2192  * for the request. Determine the failure situations and return values.
2193  */
2194 enum sci_status scic_controller_terminate_request(
2195         struct scic_sds_controller *scic,
2196         struct scic_sds_remote_device *remote_device,
2197         struct scic_sds_request *request)
2198 {
2199         scic_sds_controller_request_handler_t terminate_request;
2200         u32 state;
2201
2202         state = scic->state_machine.current_state_id;
2203         terminate_request = scic_sds_controller_state_handler_table[state].terminate_request;
2204
2205         return terminate_request(scic,
2206                                  (struct sci_base_remote_device *)remote_device,
2207                                  (struct sci_base_request *)request);
2208 }
2209
2210 /**
2211  * scic_controller_complete_io() - This method will perform core specific
2212  *    completion operations for an IO request.  After this method is invoked,
2213  *    the user should consider the IO request as invalid until it is properly
2214  *    reused (i.e. re-constructed).
2215  * @controller: The handle to the controller object for which to complete the
2216  *    IO request.
2217  * @remote_device: The handle to the remote device object for which to complete
2218  *    the IO request.
2219  * @io_request: the handle to the io request object to complete.
2220  *
2221  * - IO tags are a protected resource.  It is incumbent upon the SCI Core user
2222  * to ensure that each of the methods that may allocate or free available IO
2223  * tags are handled in a mutually exclusive manner.  This method is one of said
2224  * methods requiring proper critical code section protection (e.g. semaphore,
2225  * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
2226  * Core user, using the scic_controller_allocate_io_tag() method, then it is
2227  * the responsibility of the caller to invoke the scic_controller_free_io_tag()
2228  * method to free the tag (i.e. this method will not free the IO tag). Indicate
2229  * if the controller successfully completed the IO request. SCI_SUCCESS if the
2230  * completion process was successful.
2231  */
2232 enum sci_status scic_controller_complete_io(
2233         struct scic_sds_controller *scic,
2234         struct scic_sds_remote_device *remote_device,
2235         struct scic_sds_request *io_request)
2236 {
2237         u32 state;
2238         scic_sds_controller_request_handler_t complete_io;
2239
2240         state = scic->state_machine.current_state_id;
2241         complete_io = scic_sds_controller_state_handler_table[state].complete_io;
2242
2243         return complete_io(scic,
2244                            (struct sci_base_remote_device *)remote_device,
2245                            (struct sci_base_request *)io_request);
2246 }
2247
2248 /**
2249  * scic_controller_start_task() - This method is called by the SCIC user to
2250  *    send/start a framework task management request.
2251  * @controller: the handle to the controller object for which to start the task
2252  *    management request.
2253  * @remote_device: the handle to the remote device object for which to start
2254  *    the task management request.
2255  * @task_request: the handle to the task request object to start.
2256  * @io_tag: This parameter specifies a previously allocated IO tag that the
2257  *    user desires to be utilized for this request.  Note this not the io_tag
2258  *    of the request being managed.  It is to be utilized for the task request
2259  *    itself. This parameter is optional.  The user is allowed to supply
2260  *    SCI_CONTROLLER_INVALID_IO_TAG as the value for this parameter.
2261  *
2262  * - IO tags are a protected resource.  It is incumbent upon the SCI Core user
2263  * to ensure that each of the methods that may allocate or free available IO
2264  * tags are handled in a mutually exclusive manner.  This method is one of said
2265  * methods requiring proper critical code section protection (e.g. semaphore,
2266  * spin-lock, etc.). - The user must synchronize this task with completion
2267  * queue processing.  If they are not synchronized then it is possible for the
2268  * io requests that are being managed by the task request can complete before
2269  * starting the task request. scic_controller_allocate_tag() for more
2270  * information on allocating a tag. Indicate if the controller successfully
2271  * started the IO request. SCI_TASK_SUCCESS if the task request was
2272  * successfully started. SCI_TASK_FAILURE_REQUIRES_SCSI_ABORT This value is
2273  * returned if there is/are task(s) outstanding that require termination or
2274  * completion before this request can succeed.
2275  */
2276 enum sci_task_status scic_controller_start_task(
2277         struct scic_sds_controller *scic,
2278         struct scic_sds_remote_device *remote_device,
2279         struct scic_sds_request *task_request,
2280         u16 task_tag)
2281 {
2282         u32 state;
2283         scic_sds_controller_start_request_handler_t start_task;
2284         enum sci_task_status status = SCI_TASK_FAILURE_INVALID_STATE;
2285
2286         state = scic->state_machine.current_state_id;
2287         start_task = scic_sds_controller_state_handler_table[state].start_task;
2288
2289         if (start_task)
2290                 status = start_task(scic,
2291                                     (struct sci_base_remote_device *)remote_device,
2292                                     (struct sci_base_request *)task_request,
2293                                     task_tag);
2294         else
2295                 dev_warn(scic_to_dev(scic),
2296                          "%s: SCIC Controller starting task from invalid "
2297                          "state\n",
2298                          __func__);
2299
2300         return status;
2301 }
2302
2303 /**
2304  * scic_controller_complete_task() - This method will perform core specific
2305  *    completion operations for task management request. After this method is
2306  *    invoked, the user should consider the task request as invalid until it is
2307  *    properly reused (i.e. re-constructed).
2308  * @controller: The handle to the controller object for which to complete the
2309  *    task management request.
2310  * @remote_device: The handle to the remote device object for which to complete
2311  *    the task management request.
2312  * @task_request: the handle to the task management request object to complete.
2313  *
2314  * Indicate if the controller successfully completed the task management
2315  * request. SCI_SUCCESS if the completion process was successful.
2316  */
2317 enum sci_status scic_controller_complete_task(
2318         struct scic_sds_controller *scic,
2319         struct scic_sds_remote_device *remote_device,
2320         struct scic_sds_request *task_request)
2321 {
2322         u32 state;
2323         scic_sds_controller_request_handler_t complete_task;
2324         enum sci_status status = SCI_FAILURE_INVALID_STATE;
2325
2326         state = scic->state_machine.current_state_id;
2327         complete_task = scic_sds_controller_state_handler_table[state].complete_task;
2328
2329         if (complete_task)
2330                 status = complete_task(scic,
2331                                        (struct sci_base_remote_device *)remote_device,
2332                                        (struct sci_base_request *)task_request);
2333         else
2334                 dev_warn(scic_to_dev(scic),
2335                          "%s: SCIC Controller completing task from invalid "
2336                          "state\n",
2337                          __func__);
2338
2339         return status;
2340 }
2341
2342
2343 /**
2344  * scic_controller_get_port_handle() - This method simply provides the user
2345  *    with a unique handle for a given SAS/SATA core port index.
2346  * @controller: This parameter represents the handle to the controller object
2347  *    from which to retrieve a port (SAS or SATA) handle.
2348  * @port_index: This parameter specifies the port index in the controller for
2349  *    which to retrieve the port handle. 0 <= port_index < maximum number of
2350  *    phys.
2351  * @port_handle: This parameter specifies the retrieved port handle to be
2352  *    provided to the caller.
2353  *
2354  * Indicate if the retrieval of the port handle was successful. SCI_SUCCESS
2355  * This value is returned if the retrieval was successful.
2356  * SCI_FAILURE_INVALID_PORT This value is returned if the supplied port id is
2357  * not in the supported range.
2358  */
2359 enum sci_status scic_controller_get_port_handle(
2360         struct scic_sds_controller *scic,
2361         u8 port_index,
2362         struct scic_sds_port **port_handle)
2363 {
2364         if (port_index < scic->logical_port_entries) {
2365                 *port_handle = &scic->port_table[port_index];
2366
2367                 return SCI_SUCCESS;
2368         }
2369
2370         return SCI_FAILURE_INVALID_PORT;
2371 }
2372
2373 /**
2374  * scic_controller_get_phy_handle() - This method simply provides the user with
2375  *    a unique handle for a given SAS/SATA phy index/identifier.
2376  * @controller: This parameter represents the handle to the controller object
2377  *    from which to retrieve a phy (SAS or SATA) handle.
2378  * @phy_index: This parameter specifies the phy index in the controller for
2379  *    which to retrieve the phy handle. 0 <= phy_index < maximum number of phys.
2380  * @phy_handle: This parameter specifies the retrieved phy handle to be
2381  *    provided to the caller.
2382  *
2383  * Indicate if the retrieval of the phy handle was successful. SCI_SUCCESS This
2384  * value is returned if the retrieval was successful. SCI_FAILURE_INVALID_PHY
2385  * This value is returned if the supplied phy id is not in the supported range.
2386  */
2387 enum sci_status scic_controller_get_phy_handle(
2388         struct scic_sds_controller *scic,
2389         u8 phy_index,
2390         struct scic_sds_phy **phy_handle)
2391 {
2392         if (phy_index < ARRAY_SIZE(scic->phy_table)) {
2393                 *phy_handle = &scic->phy_table[phy_index];
2394
2395                 return SCI_SUCCESS;
2396         }
2397
2398         dev_err(scic_to_dev(scic),
2399                 "%s: Controller:0x%p PhyId:0x%x invalid phy index\n",
2400                 __func__, scic, phy_index);
2401
2402         return SCI_FAILURE_INVALID_PHY;
2403 }
2404
2405 /**
2406  * scic_controller_allocate_io_tag() - This method will allocate a tag from the
2407  *    pool of free IO tags. Direct allocation of IO tags by the SCI Core user
2408  *    is optional. The scic_controller_start_io() method will allocate an IO
2409  *    tag if this method is not utilized and the tag is not supplied to the IO
2410  *    construct routine.  Direct allocation of IO tags may provide additional
2411  *    performance improvements in environments capable of supporting this usage
2412  *    model.  Additionally, direct allocation of IO tags also provides
2413  *    additional flexibility to the SCI Core user.  Specifically, the user may
2414  *    retain IO tags across the lives of multiple IO requests.
2415  * @controller: the handle to the controller object for which to allocate the
2416  *    tag.
2417  *
2418  * IO tags are a protected resource.  It is incumbent upon the SCI Core user to
2419  * ensure that each of the methods that may allocate or free available IO tags
2420  * are handled in a mutually exclusive manner.  This method is one of said
2421  * methods requiring proper critical code section protection (e.g. semaphore,
2422  * spin-lock, etc.). An unsigned integer representing an available IO tag.
2423  * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if there are no
2424  * currently available tags to be allocated. All return other values indicate a
2425  * legitimate tag.
2426  */
2427 u16 scic_controller_allocate_io_tag(
2428         struct scic_sds_controller *scic)
2429 {
2430         u16 task_context;
2431         u16 sequence_count;
2432
2433         if (!sci_pool_empty(scic->tci_pool)) {
2434                 sci_pool_get(scic->tci_pool, task_context);
2435
2436                 sequence_count = scic->io_request_sequence[task_context];
2437
2438                 return scic_sds_io_tag_construct(sequence_count, task_context);
2439         }
2440
2441         return SCI_CONTROLLER_INVALID_IO_TAG;
2442 }
2443
2444 /**
2445  * scic_controller_free_io_tag() - This method will free an IO tag to the pool
2446  *    of free IO tags. This method provides the SCI Core user more flexibility
2447  *    with regards to IO tags.  The user may desire to keep an IO tag after an
2448  *    IO request has completed, because they plan on re-using the tag for a
2449  *    subsequent IO request.  This method is only legal if the tag was
2450  *    allocated via scic_controller_allocate_io_tag().
2451  * @controller: This parameter specifies the handle to the controller object
2452  *    for which to free/return the tag.
2453  * @io_tag: This parameter represents the tag to be freed to the pool of
2454  *    available tags.
2455  *
2456  * - IO tags are a protected resource.  It is incumbent upon the SCI Core user
2457  * to ensure that each of the methods that may allocate or free available IO
2458  * tags are handled in a mutually exclusive manner.  This method is one of said
2459  * methods requiring proper critical code section protection (e.g. semaphore,
2460  * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
2461  * Core user, using the scic_controller_allocate_io_tag() method, then it is
2462  * the responsibility of the caller to invoke this method to free the tag. This
2463  * method returns an indication of whether the tag was successfully put back
2464  * (freed) to the pool of available tags. SCI_SUCCESS This return value
2465  * indicates the tag was successfully placed into the pool of available IO
2466  * tags. SCI_FAILURE_INVALID_IO_TAG This value is returned if the supplied tag
2467  * is not a valid IO tag value.
2468  */
2469 enum sci_status scic_controller_free_io_tag(
2470         struct scic_sds_controller *scic,
2471         u16 io_tag)
2472 {
2473         u16 sequence;
2474         u16 index;
2475
2476         BUG_ON(io_tag == SCI_CONTROLLER_INVALID_IO_TAG);
2477
2478         sequence = scic_sds_io_tag_get_sequence(io_tag);
2479         index    = scic_sds_io_tag_get_index(io_tag);
2480
2481         if (!sci_pool_full(scic->tci_pool)) {
2482                 if (sequence == scic->io_request_sequence[index]) {
2483                         scic_sds_io_sequence_increment(
2484                                 scic->io_request_sequence[index]);
2485
2486                         sci_pool_put(scic->tci_pool, index);
2487
2488                         return SCI_SUCCESS;
2489                 }
2490         }
2491
2492         return SCI_FAILURE_INVALID_IO_TAG;
2493 }
2494
2495 void scic_controller_enable_interrupts(
2496         struct scic_sds_controller *scic)
2497 {
2498         BUG_ON(scic->smu_registers == NULL);
2499         writel(0, &scic->smu_registers->interrupt_mask);
2500 }
2501
2502 void scic_controller_disable_interrupts(
2503         struct scic_sds_controller *scic)
2504 {
2505         BUG_ON(scic->smu_registers == NULL);
2506         writel(0xffffffff, &scic->smu_registers->interrupt_mask);
2507 }
2508
2509 static enum sci_status scic_controller_set_mode(
2510         struct scic_sds_controller *scic,
2511         enum sci_controller_mode operating_mode)
2512 {
2513         enum sci_status status          = SCI_SUCCESS;
2514
2515         if ((scic->state_machine.current_state_id ==
2516                                 SCI_BASE_CONTROLLER_STATE_INITIALIZING) ||
2517             (scic->state_machine.current_state_id ==
2518                                 SCI_BASE_CONTROLLER_STATE_INITIALIZED)) {
2519                 switch (operating_mode) {
2520                 case SCI_MODE_SPEED:
2521                         scic->remote_node_entries      = SCI_MAX_REMOTE_DEVICES;
2522                         scic->task_context_entries     = SCU_IO_REQUEST_COUNT;
2523                         scic->uf_control.buffers.count =
2524                                 SCU_UNSOLICITED_FRAME_COUNT;
2525                         scic->completion_event_entries = SCU_EVENT_COUNT;
2526                         scic->completion_queue_entries =
2527                                 SCU_COMPLETION_QUEUE_COUNT;
2528                         scic_sds_controller_build_memory_descriptor_table(scic);
2529                         break;
2530
2531                 case SCI_MODE_SIZE:
2532                         scic->remote_node_entries      = SCI_MIN_REMOTE_DEVICES;
2533                         scic->task_context_entries     = SCI_MIN_IO_REQUESTS;
2534                         scic->uf_control.buffers.count =
2535                                 SCU_MIN_UNSOLICITED_FRAMES;
2536                         scic->completion_event_entries = SCU_MIN_EVENTS;
2537                         scic->completion_queue_entries =
2538                                 SCU_MIN_COMPLETION_QUEUE_ENTRIES;
2539                         scic_sds_controller_build_memory_descriptor_table(scic);
2540                         break;
2541
2542                 default:
2543                         status = SCI_FAILURE_INVALID_PARAMETER_VALUE;
2544                         break;
2545                 }
2546         } else
2547                 status = SCI_FAILURE_INVALID_STATE;
2548
2549         return status;
2550 }
2551
2552 /**
2553  * scic_sds_controller_reset_hardware() -
2554  *
2555  * This method will reset the controller hardware.
2556  */
2557 static void scic_sds_controller_reset_hardware(
2558         struct scic_sds_controller *scic)
2559 {
2560         /* Disable interrupts so we dont take any spurious interrupts */
2561         scic_controller_disable_interrupts(scic);
2562
2563         /* Reset the SCU */
2564         writel(0xFFFFFFFF, &scic->smu_registers->soft_reset_control);
2565
2566         /* Delay for 1ms to before clearing the CQP and UFQPR. */
2567         udelay(1000);
2568
2569         /* The write to the CQGR clears the CQP */
2570         writel(0x00000000, &scic->smu_registers->completion_queue_get);
2571
2572         /* The write to the UFQGP clears the UFQPR */
2573         writel(0, &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
2574 }
2575
2576 enum sci_status scic_user_parameters_set(
2577         struct scic_sds_controller *scic,
2578         union scic_user_parameters *scic_parms)
2579 {
2580         u32 state = scic->state_machine.current_state_id;
2581
2582         if (state == SCI_BASE_CONTROLLER_STATE_RESET ||
2583             state == SCI_BASE_CONTROLLER_STATE_INITIALIZING ||
2584             state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
2585                 u16 index;
2586
2587                 /*
2588                  * Validate the user parameters.  If they are not legal, then
2589                  * return a failure.
2590                  */
2591                 for (index = 0; index < SCI_MAX_PHYS; index++) {
2592                         struct sci_phy_user_params *user_phy;
2593
2594                         user_phy = &scic_parms->sds1.phys[index];
2595
2596                         if (!((user_phy->max_speed_generation <=
2597                                                 SCIC_SDS_PARM_MAX_SPEED) &&
2598                               (user_phy->max_speed_generation >
2599                                                 SCIC_SDS_PARM_NO_SPEED)))
2600                                 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2601
2602                         if (user_phy->in_connection_align_insertion_frequency <
2603                                         3)
2604                                 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2605
2606                         if ((user_phy->in_connection_align_insertion_frequency <
2607                                                 3) ||
2608                             (user_phy->align_insertion_frequency == 0) ||
2609                             (user_phy->
2610                                 notify_enable_spin_up_insertion_frequency ==
2611                                                 0))
2612                                 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2613                 }
2614
2615                 if ((scic_parms->sds1.stp_inactivity_timeout == 0) ||
2616                     (scic_parms->sds1.ssp_inactivity_timeout == 0) ||
2617                     (scic_parms->sds1.stp_max_occupancy_timeout == 0) ||
2618                     (scic_parms->sds1.ssp_max_occupancy_timeout == 0) ||
2619                     (scic_parms->sds1.no_outbound_task_timeout == 0))
2620                         return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2621
2622                 memcpy(&scic->user_parameters, scic_parms, sizeof(*scic_parms));
2623
2624                 return SCI_SUCCESS;
2625         }
2626
2627         return SCI_FAILURE_INVALID_STATE;
2628 }
2629
2630 enum sci_status scic_oem_parameters_set(
2631         struct scic_sds_controller *scic,
2632         union scic_oem_parameters *scic_parms)
2633 {
2634         u32 state = scic->state_machine.current_state_id;
2635
2636         if (state == SCI_BASE_CONTROLLER_STATE_RESET ||
2637             state == SCI_BASE_CONTROLLER_STATE_INITIALIZING ||
2638             state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
2639                 u16 index;
2640                 u8  combined_phy_mask = 0;
2641
2642                 /*
2643                  * Validate the oem parameters.  If they are not legal, then
2644                  * return a failure. */
2645                 for (index = 0; index < SCI_MAX_PORTS; index++) {
2646                         if (scic_parms->sds1.ports[index].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
2647                                 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2648                 }
2649
2650                 for (index = 0; index < SCI_MAX_PHYS; index++) {
2651                         if ((scic_parms->sds1.phys[index].sas_address.high == 0) &&
2652                             (scic_parms->sds1.phys[index].sas_address.low == 0))
2653                                 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2654                 }
2655
2656                 if (scic_parms->sds1.controller.mode_type ==
2657                                 SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
2658                         for (index = 0; index < SCI_MAX_PHYS; index++) {
2659                                 if (scic_parms->sds1.ports[index].phy_mask != 0)
2660                                         return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2661                         }
2662                 } else if (scic_parms->sds1.controller.mode_type ==
2663                                 SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
2664                         for (index = 0; index < SCI_MAX_PHYS; index++)
2665                                 combined_phy_mask |= scic_parms->sds1.ports[index].phy_mask;
2666
2667                         if (combined_phy_mask == 0)
2668                                 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2669                 } else
2670                         return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2671
2672                 if (scic_parms->sds1.controller.max_concurrent_dev_spin_up >
2673                                 MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
2674                         return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2675
2676                 scic->oem_parameters.sds1 = scic_parms->sds1;
2677
2678                 return SCI_SUCCESS;
2679         }
2680
2681         return SCI_FAILURE_INVALID_STATE;
2682 }
2683
2684 void scic_oem_parameters_get(
2685         struct scic_sds_controller *scic,
2686         union scic_oem_parameters *scic_parms)
2687 {
2688         memcpy(scic_parms, (&scic->oem_parameters), sizeof(*scic_parms));
2689 }
2690
2691 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
2692 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
2693 #define INTERRUPT_COALESCE_TIMEOUT_MAX_US                    2700000
2694 #define INTERRUPT_COALESCE_NUMBER_MAX                        256
2695 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN                7
2696 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX                28
2697
2698 /**
2699  * scic_controller_set_interrupt_coalescence() - This method allows the user to
2700  *    configure the interrupt coalescence.
2701  * @controller: This parameter represents the handle to the controller object
2702  *    for which its interrupt coalesce register is overridden.
2703  * @coalesce_number: Used to control the number of entries in the Completion
2704  *    Queue before an interrupt is generated. If the number of entries exceed
2705  *    this number, an interrupt will be generated. The valid range of the input
2706  *    is [0, 256]. A setting of 0 results in coalescing being disabled.
2707  * @coalesce_timeout: Timeout value in microseconds. The valid range of the
2708  *    input is [0, 2700000] . A setting of 0 is allowed and results in no
2709  *    interrupt coalescing timeout.
2710  *
2711  * Indicate if the user successfully set the interrupt coalesce parameters.
2712  * SCI_SUCCESS The user successfully updated the interrutp coalescence.
2713  * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
2714  */
2715 static enum sci_status scic_controller_set_interrupt_coalescence(
2716         struct scic_sds_controller *scic_controller,
2717         u32 coalesce_number,
2718         u32 coalesce_timeout)
2719 {
2720         u8 timeout_encode = 0;
2721         u32 min = 0;
2722         u32 max = 0;
2723
2724         /* Check if the input parameters fall in the range. */
2725         if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
2726                 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2727
2728         /*
2729          *  Defined encoding for interrupt coalescing timeout:
2730          *              Value   Min      Max     Units
2731          *              -----   ---      ---     -----
2732          *              0       -        -       Disabled
2733          *              1       13.3     20.0    ns
2734          *              2       26.7     40.0
2735          *              3       53.3     80.0
2736          *              4       106.7    160.0
2737          *              5       213.3    320.0
2738          *              6       426.7    640.0
2739          *              7       853.3    1280.0
2740          *              8       1.7      2.6     us
2741          *              9       3.4      5.1
2742          *              10      6.8      10.2
2743          *              11      13.7     20.5
2744          *              12      27.3     41.0
2745          *              13      54.6     81.9
2746          *              14      109.2    163.8
2747          *              15      218.5    327.7
2748          *              16      436.9    655.4
2749          *              17      873.8    1310.7
2750          *              18      1.7      2.6     ms
2751          *              19      3.5      5.2
2752          *              20      7.0      10.5
2753          *              21      14.0     21.0
2754          *              22      28.0     41.9
2755          *              23      55.9     83.9
2756          *              24      111.8    167.8
2757          *              25      223.7    335.5
2758          *              26      447.4    671.1
2759          *              27      894.8    1342.2
2760          *              28      1.8      2.7     s
2761          *              Others Undefined */
2762
2763         /*
2764          * Use the table above to decide the encode of interrupt coalescing timeout
2765          * value for register writing. */
2766         if (coalesce_timeout == 0)
2767                 timeout_encode = 0;
2768         else{
2769                 /* make the timeout value in unit of (10 ns). */
2770                 coalesce_timeout = coalesce_timeout * 100;
2771                 min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
2772                 max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
2773
2774                 /* get the encode of timeout for register writing. */
2775                 for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
2776                       timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
2777                       timeout_encode++) {
2778                         if (min <= coalesce_timeout &&  max > coalesce_timeout)
2779                                 break;
2780                         else if (coalesce_timeout >= max && coalesce_timeout < min * 2
2781                                  && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
2782                                 if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
2783                                         break;
2784                                 else{
2785                                         timeout_encode++;
2786                                         break;
2787                                 }
2788                         } else {
2789                                 max = max * 2;
2790                                 min = min * 2;
2791                         }
2792                 }
2793
2794                 if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
2795                         /* the value is out of range. */
2796                         return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2797         }
2798
2799         writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
2800                SMU_ICC_GEN_VAL(TIMER, timeout_encode),
2801                &scic_controller->smu_registers->interrupt_coalesce_control);
2802
2803
2804         scic_controller->interrupt_coalesce_number = (u16)coalesce_number;
2805         scic_controller->interrupt_coalesce_timeout = coalesce_timeout / 100;
2806
2807         return SCI_SUCCESS;
2808 }
2809
2810
2811 struct scic_sds_controller *scic_controller_alloc(struct device *dev)
2812 {
2813         return devm_kzalloc(dev, sizeof(struct scic_sds_controller), GFP_KERNEL);
2814 }
2815
2816 static enum sci_status
2817 default_controller_handler(struct scic_sds_controller *scic, const char *func)
2818 {
2819         dev_warn(scic_to_dev(scic), "%s: invalid state %d\n", func,
2820                  scic->state_machine.current_state_id);
2821
2822         return SCI_FAILURE_INVALID_STATE;
2823 }
2824
2825 static enum sci_status scic_sds_controller_default_start_operation_handler(
2826         struct scic_sds_controller *scic,
2827         struct sci_base_remote_device *remote_device,
2828         struct sci_base_request *io_request,
2829         u16 io_tag)
2830 {
2831         return default_controller_handler(scic, __func__);
2832 }
2833
2834 static enum sci_status scic_sds_controller_default_request_handler(
2835         struct scic_sds_controller *scic,
2836         struct sci_base_remote_device *remote_device,
2837         struct sci_base_request *io_request)
2838 {
2839         return default_controller_handler(scic, __func__);
2840 }
2841
2842 static enum sci_status
2843 scic_sds_controller_general_reset_handler(struct scic_sds_controller *scic)
2844 {
2845         /* The reset operation is not a graceful cleanup just perform the state
2846          * transition.
2847          */
2848         sci_base_state_machine_change_state(&scic->state_machine,
2849                                             SCI_BASE_CONTROLLER_STATE_RESETTING);
2850
2851         return SCI_SUCCESS;
2852 }
2853
2854 static enum sci_status
2855 scic_sds_controller_reset_state_initialize_handler(struct scic_sds_controller *scic)
2856 {
2857         struct sci_base_state_machine *sm = &scic->state_machine;
2858         enum sci_status result = SCI_SUCCESS;
2859         struct isci_host *ihost;
2860         u32 index, state;
2861
2862         ihost = sci_object_get_association(scic);
2863
2864         sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_INITIALIZING);
2865
2866         scic->timeout_timer = isci_timer_create(ihost,
2867                                                 scic,
2868                                                 scic_sds_controller_timeout_handler);
2869
2870         scic_sds_controller_initialize_phy_startup(scic);
2871
2872         scic_sds_controller_initialize_power_control(scic);
2873
2874         /*
2875          * There is nothing to do here for B0 since we do not have to
2876          * program the AFE registers.
2877          * / @todo The AFE settings are supposed to be correct for the B0 but
2878          * /       presently they seem to be wrong. */
2879         scic_sds_controller_afe_initialization(scic);
2880
2881         if (result == SCI_SUCCESS) {
2882                 u32 status;
2883                 u32 terminate_loop;
2884
2885                 /* Take the hardware out of reset */
2886                 writel(0, &scic->smu_registers->soft_reset_control);
2887
2888                 /*
2889                  * / @todo Provide meaningfull error code for hardware failure
2890                  * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
2891                 result = SCI_FAILURE;
2892                 terminate_loop = 100;
2893
2894                 while (terminate_loop-- && (result != SCI_SUCCESS)) {
2895                         /* Loop until the hardware reports success */
2896                         udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2897                         status = readl(&scic->smu_registers->control_status);
2898
2899                         if ((status & SCU_RAM_INIT_COMPLETED) ==
2900                                         SCU_RAM_INIT_COMPLETED)
2901                                 result = SCI_SUCCESS;
2902                 }
2903         }
2904
2905         if (result == SCI_SUCCESS) {
2906                 u32 max_supported_ports;
2907                 u32 max_supported_devices;
2908                 u32 max_supported_io_requests;
2909                 u32 device_context_capacity;
2910
2911                 /*
2912                  * Determine what are the actaul device capacities that the
2913                  * hardware will support */
2914                 device_context_capacity =
2915                         readl(&scic->smu_registers->device_context_capacity);
2916
2917
2918                 max_supported_ports = smu_dcc_get_max_ports(device_context_capacity);
2919                 max_supported_devices = smu_dcc_get_max_remote_node_context(device_context_capacity);
2920                 max_supported_io_requests = smu_dcc_get_max_task_context(device_context_capacity);
2921
2922                 /*
2923                  * Make all PEs that are unassigned match up with the
2924                  * logical ports
2925                  */
2926                 for (index = 0; index < max_supported_ports; index++) {
2927                         struct scu_port_task_scheduler_group_registers *ptsg =
2928                                 &scic->scu_registers->peg0.ptsg;
2929
2930                         writel(index, &ptsg->protocol_engine[index]);
2931                 }
2932
2933                 /* Record the smaller of the two capacity values */
2934                 scic->logical_port_entries =
2935                         min(max_supported_ports, scic->logical_port_entries);
2936
2937                 scic->task_context_entries =
2938                         min(max_supported_io_requests,
2939                             scic->task_context_entries);
2940
2941                 scic->remote_node_entries =
2942                         min(max_supported_devices, scic->remote_node_entries);
2943
2944                 /*
2945                  * Now that we have the correct hardware reported minimum values
2946                  * build the MDL for the controller.  Default to a performance
2947                  * configuration.
2948                  */
2949                 scic_controller_set_mode(scic, SCI_MODE_SPEED);
2950         }
2951
2952         /* Initialize hardware PCI Relaxed ordering in DMA engines */
2953         if (result == SCI_SUCCESS) {
2954                 u32 dma_configuration;
2955
2956                 /* Configure the payload DMA */
2957                 dma_configuration =
2958                         readl(&scic->scu_registers->sdma.pdma_configuration);
2959                 dma_configuration |=
2960                         SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2961                 writel(dma_configuration,
2962                         &scic->scu_registers->sdma.pdma_configuration);
2963
2964                 /* Configure the control DMA */
2965                 dma_configuration =
2966                         readl(&scic->scu_registers->sdma.cdma_configuration);
2967                 dma_configuration |=
2968                         SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2969                 writel(dma_configuration,
2970                         &scic->scu_registers->sdma.cdma_configuration);
2971         }
2972
2973         /*
2974          * Initialize the PHYs before the PORTs because the PHY registers
2975          * are accessed during the port initialization.
2976          */
2977         if (result == SCI_SUCCESS) {
2978                 /* Initialize the phys */
2979                 for (index = 0;
2980                      (result == SCI_SUCCESS) && (index < SCI_MAX_PHYS);
2981                      index++) {
2982                         result = scic_sds_phy_initialize(
2983                                 &scic->phy_table[index],
2984                                 &scic->scu_registers->peg0.pe[index].tl,
2985                                 &scic->scu_registers->peg0.pe[index].ll);
2986                 }
2987         }
2988
2989         if (result == SCI_SUCCESS) {
2990                 /* Initialize the logical ports */
2991                 for (index = 0;
2992                      (index < scic->logical_port_entries) &&
2993                      (result == SCI_SUCCESS);
2994                      index++) {
2995                         result = scic_sds_port_initialize(
2996                                 &scic->port_table[index],
2997                                 &scic->scu_registers->peg0.ptsg.port[index],
2998                                 &scic->scu_registers->peg0.ptsg.protocol_engine,
2999                                 &scic->scu_registers->peg0.viit[index]);
3000                 }
3001         }
3002
3003         if (result == SCI_SUCCESS)
3004                 result = scic_sds_port_configuration_agent_initialize(
3005                                 scic,
3006                                 &scic->port_agent);
3007
3008         /* Advance the controller state machine */
3009         if (result == SCI_SUCCESS)
3010                 state = SCI_BASE_CONTROLLER_STATE_INITIALIZED;
3011         else
3012                 state = SCI_BASE_CONTROLLER_STATE_FAILED;
3013         sci_base_state_machine_change_state(sm, state);
3014
3015         return result;
3016 }
3017
3018 /*
3019  * *****************************************************************************
3020  * * INITIALIZED STATE HANDLERS
3021  * ***************************************************************************** */
3022
3023 /*
3024  * This function is the struct scic_sds_controller start handler for the
3025  * initialized state.
3026  * - Validate we have a good memory descriptor table - Initialze the
3027  * physical memory before programming the hardware - Program the SCU hardware
3028  * with the physical memory addresses passed in the memory descriptor table. -
3029  * Initialzie the TCi pool - Initialize the RNi pool - Initialize the
3030  * completion queue - Initialize the unsolicited frame data - Take the SCU port
3031  * task scheduler out of reset - Start the first phy object. - Transition to
3032  * SCI_BASE_CONTROLLER_STATE_STARTING. enum sci_status SCI_SUCCESS if all of the
3033  * controller start operations complete
3034  * SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD if one or more of the memory
3035  * descriptor fields is invalid.
3036  */
3037 static enum sci_status scic_sds_controller_initialized_state_start_handler(
3038         struct scic_sds_controller *scic, u32 timeout)
3039 {
3040         u16 index;
3041         enum sci_status result;
3042
3043         /*
3044          * Make sure that the SCI User filled in the memory descriptor
3045          * table correctly
3046          */
3047         result = scic_sds_controller_validate_memory_descriptor_table(scic);
3048
3049         if (result == SCI_SUCCESS) {
3050                 /*
3051                  * The memory descriptor list looks good so program the
3052                  * hardware
3053                  */
3054                 scic_sds_controller_ram_initialization(scic);
3055         }
3056
3057         if (result == SCI_SUCCESS) {
3058                 /* Build the TCi free pool */
3059                 sci_pool_initialize(scic->tci_pool);
3060                 for (index = 0; index < scic->task_context_entries; index++)
3061                         sci_pool_put(scic->tci_pool, index);
3062
3063                 /* Build the RNi free pool */
3064                 scic_sds_remote_node_table_initialize(
3065                                 &scic->available_remote_nodes,
3066                                 scic->remote_node_entries);
3067         }
3068
3069         if (result == SCI_SUCCESS) {
3070                 /*
3071                  * Before anything else lets make sure we will not be
3072                  * interrupted by the hardware.
3073                  */
3074                 scic_controller_disable_interrupts(scic);
3075
3076                 /* Enable the port task scheduler */
3077                 scic_sds_controller_enable_port_task_scheduler(scic);
3078
3079                 /* Assign all the task entries to scic physical function */
3080                 scic_sds_controller_assign_task_entries(scic);
3081
3082                 /* Now initialze the completion queue */
3083                 scic_sds_controller_initialize_completion_queue(scic);
3084
3085                 /* Initialize the unsolicited frame queue for use */
3086                 scic_sds_controller_initialize_unsolicited_frame_queue(scic);
3087         }
3088
3089         /* Start all of the ports on this controller */
3090         for (index = 0;
3091              (index < scic->logical_port_entries) && (result == SCI_SUCCESS);
3092              index++) {
3093                 struct scic_sds_port *sci_port = &scic->port_table[index];
3094
3095                 result = sci_port->state_handlers->parent.start_handler(
3096                                 &sci_port->parent);
3097         }
3098
3099         if (result == SCI_SUCCESS) {
3100                 scic_sds_controller_start_next_phy(scic);
3101
3102                 isci_timer_start(scic->timeout_timer, timeout);
3103
3104                 sci_base_state_machine_change_state(&scic->state_machine,
3105                                                     SCI_BASE_CONTROLLER_STATE_STARTING);
3106         }
3107
3108         return result;
3109 }
3110
3111 /*
3112  * *****************************************************************************
3113  * * INITIALIZED STATE HANDLERS
3114  * ***************************************************************************** */
3115
3116 /**
3117  *
3118  * @controller: This is struct scic_sds_controller which receives the link up
3119  *    notification.
3120  * @port: This is struct scic_sds_port with which the phy is associated.
3121  * @phy: This is the struct scic_sds_phy which has gone link up.
3122  *
3123  * This method is called when the struct scic_sds_controller is in the starting state
3124  * link up handler is called.  This method will perform the following: - Stop
3125  * the phy timer - Start the next phy - Report the link up condition to the
3126  * port object none
3127  */
3128 static void scic_sds_controller_starting_state_link_up_handler(
3129         struct scic_sds_controller *this_controller,
3130         struct scic_sds_port *port,
3131         struct scic_sds_phy *phy)
3132 {
3133         scic_sds_controller_phy_timer_stop(this_controller);
3134
3135         this_controller->port_agent.link_up_handler(
3136                 this_controller, &this_controller->port_agent, port, phy
3137                 );
3138         /* scic_sds_port_link_up(port, phy); */
3139
3140         scic_sds_controller_start_next_phy(this_controller);
3141 }
3142
3143 /**
3144  *
3145  * @controller: This is struct scic_sds_controller which receives the link down
3146  *    notification.
3147  * @port: This is struct scic_sds_port with which the phy is associated.
3148  * @phy: This is the struct scic_sds_phy which has gone link down.
3149  *
3150  * This method is called when the struct scic_sds_controller is in the starting state
3151  * link down handler is called. - Report the link down condition to the port
3152  * object none
3153  */
3154 static void scic_sds_controller_starting_state_link_down_handler(
3155         struct scic_sds_controller *this_controller,
3156         struct scic_sds_port *port,
3157         struct scic_sds_phy *phy)
3158 {
3159         this_controller->port_agent.link_down_handler(
3160                 this_controller, &this_controller->port_agent, port, phy
3161                 );
3162         /* scic_sds_port_link_down(port, phy); */
3163 }
3164
3165 static enum sci_status scic_sds_controller_ready_state_stop_handler(
3166                 struct scic_sds_controller *scic,
3167                 u32 timeout)
3168 {
3169         isci_timer_start(scic->timeout_timer, timeout);
3170         sci_base_state_machine_change_state(&scic->state_machine,
3171                                             SCI_BASE_CONTROLLER_STATE_STOPPING);
3172         return SCI_SUCCESS;
3173 }
3174
3175 /*
3176  * This method is called when the struct scic_sds_controller is in the ready state and
3177  * the start io handler is called. - Start the io request on the remote device
3178  * - if successful - assign the io_request to the io_request_table - post the
3179  * request to the hardware enum sci_status SCI_SUCCESS if the start io operation
3180  * succeeds SCI_FAILURE_INSUFFICIENT_RESOURCES if the IO tag could not be
3181  * allocated for the io request. SCI_FAILURE_INVALID_STATE if one or more
3182  * objects are not in a valid state to accept io requests.
3183  *
3184  * XXX: How does the io_tag parameter get assigned to the io request?
3185  */
3186 static enum sci_status scic_sds_controller_ready_state_start_io_handler(
3187         struct scic_sds_controller *controller,
3188         struct sci_base_remote_device *remote_device,
3189         struct sci_base_request *io_request,
3190         u16 io_tag)
3191 {
3192         enum sci_status status;
3193
3194         struct scic_sds_request *the_request;
3195         struct scic_sds_remote_device *the_device;
3196
3197         the_request = (struct scic_sds_request *)io_request;
3198         the_device = (struct scic_sds_remote_device *)remote_device;
3199
3200         status = scic_sds_remote_device_start_io(controller, the_device, the_request);
3201
3202         if (status != SCI_SUCCESS)
3203                 return status;
3204
3205         controller->io_request_table[
3206                 scic_sds_io_tag_get_index(the_request->io_tag)] = the_request;
3207         scic_sds_controller_post_request(controller,
3208                 scic_sds_request_get_post_context(the_request));
3209         return SCI_SUCCESS;
3210 }
3211
3212 /*
3213  * This method is called when the struct scic_sds_controller is in the ready state and
3214  * the complete io handler is called. - Complete the io request on the remote
3215  * device - if successful - remove the io_request to the io_request_table
3216  * enum sci_status SCI_SUCCESS if the start io operation succeeds
3217  * SCI_FAILURE_INVALID_STATE if one or more objects are not in a valid state to
3218  * accept io requests.
3219  */
3220 static enum sci_status scic_sds_controller_ready_state_complete_io_handler(
3221         struct scic_sds_controller *controller,
3222         struct sci_base_remote_device *remote_device,
3223         struct sci_base_request *io_request)
3224 {
3225         u16 index;
3226         enum sci_status status;
3227         struct scic_sds_request *the_request;
3228         struct scic_sds_remote_device *the_device;
3229
3230         the_request = (struct scic_sds_request *)io_request;
3231         the_device = (struct scic_sds_remote_device *)remote_device;
3232
3233         status = scic_sds_remote_device_complete_io(controller, the_device,
3234                         the_request);
3235         if (status != SCI_SUCCESS)
3236                 return status;
3237
3238         index = scic_sds_io_tag_get_index(the_request->io_tag);
3239         controller->io_request_table[index] = NULL;
3240         return SCI_SUCCESS;
3241 }
3242
3243 /*
3244  * This method is called when the struct scic_sds_controller is in the ready state and
3245  * the continue io handler is called. enum sci_status
3246  */
3247 static enum sci_status scic_sds_controller_ready_state_continue_io_handler(
3248         struct scic_sds_controller *controller,
3249         struct sci_base_remote_device *remote_device,
3250         struct sci_base_request *io_request)
3251 {
3252         struct scic_sds_request *the_request;
3253
3254         the_request     = (struct scic_sds_request *)io_request;
3255
3256         controller->io_request_table[
3257                 scic_sds_io_tag_get_index(the_request->io_tag)] = the_request;
3258         scic_sds_controller_post_request(controller,
3259                 scic_sds_request_get_post_context(the_request));
3260         return SCI_SUCCESS;
3261 }
3262
3263 /*
3264  * This method is called when the struct scic_sds_controller is in the ready state and
3265  * the start task handler is called. - The remote device is requested to start
3266  * the task request - if successful - assign the task to the io_request_table -
3267  * post the request to the SCU hardware enum sci_status SCI_SUCCESS if the start io
3268  * operation succeeds SCI_FAILURE_INSUFFICIENT_RESOURCES if the IO tag could
3269  * not be allocated for the io request. SCI_FAILURE_INVALID_STATE if one or
3270  * more objects are not in a valid state to accept io requests. How does the io
3271  * tag get assigned in this code path?
3272  */
3273 static enum sci_status scic_sds_controller_ready_state_start_task_handler(
3274         struct scic_sds_controller *controller,
3275         struct sci_base_remote_device *remote_device,
3276         struct sci_base_request *io_request,
3277         u16 task_tag)
3278 {
3279         struct scic_sds_request *the_request     = (struct scic_sds_request *)
3280                                               io_request;
3281         struct scic_sds_remote_device *the_device      = (struct scic_sds_remote_device *)
3282                                                     remote_device;
3283         enum sci_status status;
3284
3285         status = scic_sds_remote_device_start_task(controller, the_device,
3286                         the_request);
3287
3288         if (status == SCI_SUCCESS) {
3289                 controller->io_request_table[
3290                         scic_sds_io_tag_get_index(the_request->io_tag)] = the_request;
3291
3292                 scic_sds_controller_post_request(controller,
3293                         scic_sds_request_get_post_context(the_request));
3294         } else if (status == SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS) {
3295                 controller->io_request_table[
3296                         scic_sds_io_tag_get_index(the_request->io_tag)] = the_request;
3297
3298                 /*
3299                  * We will let framework know this task request started successfully,
3300                  * although core is still woring on starting the request (to post tc when
3301                  * RNC is resumed.) */
3302                 status = SCI_SUCCESS;
3303         }
3304         return status;
3305 }
3306
3307 /*
3308  * This method is called when the struct scic_sds_controller is in the ready state and
3309  * the terminate request handler is called. - call the io request terminate
3310  * function - if successful - post the terminate request to the SCU hardware
3311  * enum sci_status SCI_SUCCESS if the start io operation succeeds
3312  * SCI_FAILURE_INVALID_STATE if one or more objects are not in a valid state to
3313  * accept io requests.
3314  */
3315 static enum sci_status scic_sds_controller_ready_state_terminate_request_handler(
3316         struct scic_sds_controller *controller,
3317         struct sci_base_remote_device *remote_device,
3318         struct sci_base_request *io_request)
3319 {
3320         struct scic_sds_request *the_request     = (struct scic_sds_request *)
3321                                               io_request;
3322         enum sci_status status;
3323
3324         status = scic_sds_io_request_terminate(the_request);
3325         if (status != SCI_SUCCESS)
3326                 return status;
3327
3328         /*
3329          * Utilize the original post context command and or in the POST_TC_ABORT
3330          * request sub-type.
3331          */
3332         scic_sds_controller_post_request(controller,
3333                 scic_sds_request_get_post_context(the_request) |
3334                 SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
3335         return SCI_SUCCESS;
3336 }
3337
3338 /**
3339  *
3340  * @controller: This is struct scic_sds_controller which receives the link up
3341  *    notification.
3342  * @port: This is struct scic_sds_port with which the phy is associated.
3343  * @phy: This is the struct scic_sds_phy which has gone link up.
3344  *
3345  * This method is called when the struct scic_sds_controller is in the starting state
3346  * link up handler is called.  This method will perform the following: - Stop
3347  * the phy timer - Start the next phy - Report the link up condition to the
3348  * port object none
3349  */
3350 static void scic_sds_controller_ready_state_link_up_handler(
3351         struct scic_sds_controller *this_controller,
3352         struct scic_sds_port *port,
3353         struct scic_sds_phy *phy)
3354 {
3355         this_controller->port_agent.link_up_handler(
3356                 this_controller, &this_controller->port_agent, port, phy
3357                 );
3358 }
3359
3360 /**
3361  *
3362  * @controller: This is struct scic_sds_controller which receives the link down
3363  *    notification.
3364  * @port: This is struct scic_sds_port with which the phy is associated.
3365  * @phy: This is the struct scic_sds_phy which has gone link down.
3366  *
3367  * This method is called when the struct scic_sds_controller is in the starting state
3368  * link down handler is called. - Report the link down condition to the port
3369  * object none
3370  */
3371 static void scic_sds_controller_ready_state_link_down_handler(
3372         struct scic_sds_controller *this_controller,
3373         struct scic_sds_port *port,
3374         struct scic_sds_phy *phy)
3375 {
3376         this_controller->port_agent.link_down_handler(
3377                 this_controller, &this_controller->port_agent, port, phy
3378                 );
3379 }
3380
3381 /*
3382  * *****************************************************************************
3383  * * STOPPING STATE HANDLERS
3384  * ***************************************************************************** */
3385
3386 /**
3387  * This method is called when the struct scic_sds_controller is in a stopping state
3388  * and the complete io handler is called. - This function is not yet
3389  * implemented enum sci_status SCI_FAILURE
3390  */
3391 static enum sci_status scic_sds_controller_stopping_state_complete_io_handler(
3392         struct scic_sds_controller *controller,
3393         struct sci_base_remote_device *remote_device,
3394         struct sci_base_request *io_request)
3395 {
3396         /* XXX: Implement this function */
3397         return SCI_FAILURE;
3398 }
3399
3400 /**
3401  * This method is called when the struct scic_sds_controller is in a stopping state
3402  * and the remote device has stopped.
3403  **/
3404 static void scic_sds_controller_stopping_state_device_stopped_handler(
3405         struct scic_sds_controller *controller,
3406         struct scic_sds_remote_device *remote_device
3407 )
3408 {
3409         if (!scic_sds_controller_has_remote_devices_stopping(controller)) {
3410                 sci_base_state_machine_change_state(&controller->state_machine,
3411                         SCI_BASE_CONTROLLER_STATE_STOPPED
3412                 );
3413         }
3414 }
3415
3416 const struct scic_sds_controller_state_handler scic_sds_controller_state_handler_table[] = {
3417         [SCI_BASE_CONTROLLER_STATE_INITIAL] = {
3418                 .start_io     = scic_sds_controller_default_start_operation_handler,
3419                 .complete_io  = scic_sds_controller_default_request_handler,
3420                 .continue_io  = scic_sds_controller_default_request_handler,
3421                 .terminate_request = scic_sds_controller_default_request_handler,
3422         },
3423         [SCI_BASE_CONTROLLER_STATE_RESET] = {
3424                 .reset        = scic_sds_controller_general_reset_handler,
3425                 .initialize   = scic_sds_controller_reset_state_initialize_handler,
3426                 .start_io     = scic_sds_controller_default_start_operation_handler,
3427                 .complete_io  = scic_sds_controller_default_request_handler,
3428                 .continue_io  = scic_sds_controller_default_request_handler,
3429                 .terminate_request = scic_sds_controller_default_request_handler,
3430         },
3431         [SCI_BASE_CONTROLLER_STATE_INITIALIZING] = {
3432                 .start_io     = scic_sds_controller_default_start_operation_handler,
3433                 .complete_io  = scic_sds_controller_default_request_handler,
3434                 .continue_io  = scic_sds_controller_default_request_handler,
3435                 .terminate_request = scic_sds_controller_default_request_handler,
3436         },
3437         [SCI_BASE_CONTROLLER_STATE_INITIALIZED] = {
3438                 .start        = scic_sds_controller_initialized_state_start_handler,
3439                 .start_io     = scic_sds_controller_default_start_operation_handler,
3440                 .complete_io  = scic_sds_controller_default_request_handler,
3441                 .continue_io  = scic_sds_controller_default_request_handler,
3442                 .terminate_request = scic_sds_controller_default_request_handler,
3443         },
3444         [SCI_BASE_CONTROLLER_STATE_STARTING] = {
3445                 .start_io     = scic_sds_controller_default_start_operation_handler,
3446                 .complete_io  = scic_sds_controller_default_request_handler,
3447                 .continue_io  = scic_sds_controller_default_request_handler,
3448                 .terminate_request = scic_sds_controller_default_request_handler,
3449                 .link_up           = scic_sds_controller_starting_state_link_up_handler,
3450                 .link_down         = scic_sds_controller_starting_state_link_down_handler
3451         },
3452         [SCI_BASE_CONTROLLER_STATE_READY] = {
3453                 .stop         = scic_sds_controller_ready_state_stop_handler,
3454                 .reset        = scic_sds_controller_general_reset_handler,
3455                 .start_io     = scic_sds_controller_ready_state_start_io_handler,
3456                 .complete_io  = scic_sds_controller_ready_state_complete_io_handler,
3457                 .continue_io  = scic_sds_controller_ready_state_continue_io_handler,
3458                 .start_task   = scic_sds_controller_ready_state_start_task_handler,
3459                 .complete_task = scic_sds_controller_ready_state_complete_io_handler,
3460                 .terminate_request = scic_sds_controller_ready_state_terminate_request_handler,
3461                 .link_up           = scic_sds_controller_ready_state_link_up_handler,
3462                 .link_down         = scic_sds_controller_ready_state_link_down_handler
3463         },
3464         [SCI_BASE_CONTROLLER_STATE_RESETTING] = {
3465                 .start_io     = scic_sds_controller_default_start_operation_handler,
3466                 .complete_io  = scic_sds_controller_default_request_handler,
3467                 .continue_io  = scic_sds_controller_default_request_handler,
3468                 .terminate_request = scic_sds_controller_default_request_handler,
3469         },
3470         [SCI_BASE_CONTROLLER_STATE_STOPPING] = {
3471                 .start_io     = scic_sds_controller_default_start_operation_handler,
3472                 .complete_io  = scic_sds_controller_stopping_state_complete_io_handler,
3473                 .continue_io  = scic_sds_controller_default_request_handler,
3474                 .terminate_request = scic_sds_controller_default_request_handler,
3475                 .device_stopped    = scic_sds_controller_stopping_state_device_stopped_handler,
3476         },
3477         [SCI_BASE_CONTROLLER_STATE_STOPPED] = {
3478                 .reset        = scic_sds_controller_general_reset_handler,
3479                 .start_io     = scic_sds_controller_default_start_operation_handler,
3480                 .complete_io  = scic_sds_controller_default_request_handler,
3481                 .continue_io  = scic_sds_controller_default_request_handler,
3482                 .terminate_request = scic_sds_controller_default_request_handler,
3483         },
3484         [SCI_BASE_CONTROLLER_STATE_FAILED] = {
3485                 .reset        = scic_sds_controller_general_reset_handler,
3486                 .start_io     = scic_sds_controller_default_start_operation_handler,
3487                 .complete_io  = scic_sds_controller_default_request_handler,
3488                 .continue_io  = scic_sds_controller_default_request_handler,
3489                 .terminate_request = scic_sds_controller_default_request_handler,
3490         },
3491 };
3492
3493 /**
3494  *
3495  * @object: This is the struct sci_base_object which is cast to a struct scic_sds_controller
3496  *    object.
3497  *
3498  * This method implements the actions taken by the struct scic_sds_controller on entry
3499  * to the SCI_BASE_CONTROLLER_STATE_INITIAL. - Set the state handlers to the
3500  * controllers initial state. none This function should initialze the
3501  * controller object.
3502  */
3503 static void scic_sds_controller_initial_state_enter(
3504         struct sci_base_object *object)
3505 {
3506         struct scic_sds_controller *this_controller;
3507
3508         this_controller = (struct scic_sds_controller *)object;
3509
3510         sci_base_state_machine_change_state(&this_controller->state_machine,
3511                         SCI_BASE_CONTROLLER_STATE_RESET);
3512 }
3513
3514 /**
3515  *
3516  * @object: This is the struct sci_base_object which is cast to a struct scic_sds_controller
3517  *    object.
3518  *
3519  * This method implements the actions taken by the struct scic_sds_controller on exit
3520  * from the SCI_BASE_CONTROLLER_STATE_STARTING. - This function stops the
3521  * controller starting timeout timer. none
3522  */
3523 static inline void scic_sds_controller_starting_state_exit(
3524         struct sci_base_object *object)
3525 {
3526         struct scic_sds_controller *scic = (struct scic_sds_controller *)object;
3527
3528         isci_timer_stop(scic->timeout_timer);
3529 }
3530
3531 /**
3532  *
3533  * @object: This is the struct sci_base_object which is cast to a struct scic_sds_controller
3534  *    object.
3535  *
3536  * This method implements the actions taken by the struct scic_sds_controller on entry
3537  * to the SCI_BASE_CONTROLLER_STATE_READY. - Set the state handlers to the
3538  * controllers ready state. none
3539  */
3540 static void scic_sds_controller_ready_state_enter(
3541         struct sci_base_object *object)
3542 {
3543         struct scic_sds_controller *this_controller;
3544
3545         this_controller = (struct scic_sds_controller *)object;
3546
3547         /* set the default interrupt coalescence number and timeout value. */
3548         scic_controller_set_interrupt_coalescence(
3549                 this_controller, 0x10, 250);
3550 }
3551
3552 /**
3553  *
3554  * @object: This is the struct sci_base_object which is cast to a struct scic_sds_controller
3555  *    object.
3556  *
3557  * This method implements the actions taken by the struct scic_sds_controller on exit
3558  * from the SCI_BASE_CONTROLLER_STATE_READY. - This function does nothing. none
3559  */
3560 static void scic_sds_controller_ready_state_exit(
3561         struct sci_base_object *object)
3562 {
3563         struct scic_sds_controller *this_controller;
3564
3565         this_controller = (struct scic_sds_controller *)object;
3566
3567         /* disable interrupt coalescence. */
3568         scic_controller_set_interrupt_coalescence(this_controller, 0, 0);
3569 }
3570
3571 /**
3572  *
3573  * @object: This is the struct sci_base_object which is cast to a struct scic_sds_controller
3574  *    object.
3575  *
3576  * This method implements the actions taken by the struct scic_sds_controller on entry
3577  * to the SCI_BASE_CONTROLLER_STATE_READY. - Set the state handlers to the
3578  * controllers ready state. - Stop the phys on this controller - Stop the ports
3579  * on this controller - Stop all of the remote devices on this controller none
3580  */
3581 static void scic_sds_controller_stopping_state_enter(
3582         struct sci_base_object *object)
3583 {
3584         struct scic_sds_controller *this_controller;
3585
3586         this_controller = (struct scic_sds_controller *)object;
3587
3588         /* Stop all of the components for this controller */
3589         scic_sds_controller_stop_phys(this_controller);
3590         scic_sds_controller_stop_ports(this_controller);
3591         scic_sds_controller_stop_devices(this_controller);
3592 }
3593
3594 /**
3595  *
3596  * @object: This is the struct sci_base_object which is cast to a struct
3597  * scic_sds_controller object.
3598  *
3599  * This funciton implements the actions taken by the struct scic_sds_controller
3600  * on exit from the SCI_BASE_CONTROLLER_STATE_STOPPING. -
3601  * This function stops the controller stopping timeout timer.
3602  */
3603 static inline void scic_sds_controller_stopping_state_exit(
3604         struct sci_base_object *object)
3605 {
3606         struct scic_sds_controller *scic =
3607                 (struct scic_sds_controller *)object;
3608
3609         isci_timer_stop(scic->timeout_timer);
3610 }
3611
3612 static void scic_sds_controller_resetting_state_enter(struct sci_base_object *object)
3613 {
3614         struct scic_sds_controller *scic;
3615
3616         scic = container_of(object, typeof(*scic), parent);
3617         scic_sds_controller_reset_hardware(scic);
3618         sci_base_state_machine_change_state(&scic->state_machine,
3619                                             SCI_BASE_CONTROLLER_STATE_RESET);
3620 }
3621
3622 static const struct sci_base_state scic_sds_controller_state_table[] = {
3623         [SCI_BASE_CONTROLLER_STATE_INITIAL] = {
3624                 .enter_state = scic_sds_controller_initial_state_enter,
3625         },
3626         [SCI_BASE_CONTROLLER_STATE_RESET] = {},
3627         [SCI_BASE_CONTROLLER_STATE_INITIALIZING] = {},
3628         [SCI_BASE_CONTROLLER_STATE_INITIALIZED] = {},
3629         [SCI_BASE_CONTROLLER_STATE_STARTING] = {
3630                 .exit_state  = scic_sds_controller_starting_state_exit,
3631         },
3632         [SCI_BASE_CONTROLLER_STATE_READY] = {
3633                 .enter_state = scic_sds_controller_ready_state_enter,
3634                 .exit_state  = scic_sds_controller_ready_state_exit,
3635         },
3636         [SCI_BASE_CONTROLLER_STATE_RESETTING] = {
3637                 .enter_state = scic_sds_controller_resetting_state_enter,
3638         },
3639         [SCI_BASE_CONTROLLER_STATE_STOPPING] = {
3640                 .enter_state = scic_sds_controller_stopping_state_enter,
3641                 .exit_state = scic_sds_controller_stopping_state_exit,
3642         },
3643         [SCI_BASE_CONTROLLER_STATE_STOPPED] = {},
3644         [SCI_BASE_CONTROLLER_STATE_FAILED] = {}
3645 };
3646
3647 /**
3648  * scic_controller_construct() - This method will attempt to construct a
3649  *    controller object utilizing the supplied parameter information.
3650  * @c: This parameter specifies the controller to be constructed.
3651  * @scu_base: mapped base address of the scu registers
3652  * @smu_base: mapped base address of the smu registers
3653  *
3654  * Indicate if the controller was successfully constructed or if it failed in
3655  * some way. SCI_SUCCESS This value is returned if the controller was
3656  * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned
3657  * if the interrupt coalescence timer may cause SAS compliance issues for SMP
3658  * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE
3659  * This value is returned if the controller does not support the supplied type.
3660  * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the
3661  * controller does not support the supplied initialization data version.
3662  */
3663 enum sci_status scic_controller_construct(struct scic_sds_controller *scic,
3664                                           void __iomem *scu_base,
3665                                           void __iomem *smu_base)
3666 {
3667         u8 i;
3668
3669         sci_base_state_machine_construct(&scic->state_machine,
3670                 &scic->parent, scic_sds_controller_state_table,
3671                 SCI_BASE_CONTROLLER_STATE_INITIAL);
3672
3673         sci_base_mdl_construct(&scic->mdl, scic->memory_descriptors,
3674                                 ARRAY_SIZE(scic->memory_descriptors), NULL);
3675         sci_base_state_machine_start(&scic->state_machine);
3676
3677         scic->scu_registers = scu_base;
3678         scic->smu_registers = smu_base;
3679
3680         scic_sds_port_configuration_agent_construct(&scic->port_agent);
3681
3682         /* Construct the ports for this controller */
3683         for (i = 0; i < SCI_MAX_PORTS; i++)
3684                 scic_sds_port_construct(&scic->port_table[i], i, scic);
3685         scic_sds_port_construct(&scic->port_table[i], SCIC_SDS_DUMMY_PORT, scic);
3686
3687         /* Construct the phys for this controller */
3688         for (i = 0; i < SCI_MAX_PHYS; i++) {
3689                 /* Add all the PHYs to the dummy port */
3690                 scic_sds_phy_construct(&scic->phy_table[i],
3691                                        &scic->port_table[SCI_MAX_PORTS], i);
3692         }
3693
3694         scic->invalid_phy_mask = 0;
3695
3696         /* Set the default maximum values */
3697         scic->completion_event_entries      = SCU_EVENT_COUNT;
3698         scic->completion_queue_entries      = SCU_COMPLETION_QUEUE_COUNT;
3699         scic->remote_node_entries           = SCI_MAX_REMOTE_DEVICES;
3700         scic->logical_port_entries          = SCI_MAX_PORTS;
3701         scic->task_context_entries          = SCU_IO_REQUEST_COUNT;
3702         scic->uf_control.buffers.count      = SCU_UNSOLICITED_FRAME_COUNT;
3703         scic->uf_control.address_table.count = SCU_UNSOLICITED_FRAME_COUNT;
3704
3705         /* Initialize the User and OEM parameters to default values. */
3706         scic_sds_controller_set_default_config_parameters(scic);
3707
3708         return scic_controller_reset(scic);
3709 }