isci: unify phy data structures
[pandora-kernel.git] / drivers / scsi / isci / core / scic_sds_controller.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55
56 #include <linux/device.h>
57 #include <scsi/sas.h>
58 #include "scic_controller.h"
59 #include "scic_phy.h"
60 #include "scic_port.h"
61 #include "scic_sds_controller.h"
62 #include "scu_registers.h"
63 #include "scic_sds_phy.h"
64 #include "scic_sds_port_configuration_agent.h"
65 #include "scic_sds_port.h"
66 #include "remote_device.h"
67 #include "scic_sds_request.h"
68 #include "sci_environment.h"
69 #include "sci_util.h"
70 #include "scu_completion_codes.h"
71 #include "scu_constants.h"
72 #include "scu_event_codes.h"
73 #include "scu_remote_node_context.h"
74 #include "scu_task_context.h"
75 #include "scu_unsolicited_frame.h"
76
77 #define SCU_CONTEXT_RAM_INIT_STALL_TIME      200
78
79 /**
80  * smu_dcc_get_max_ports() -
81  *
82  * This macro returns the maximum number of logical ports supported by the
83  * hardware. The caller passes in the value read from the device context
84  * capacity register and this macro will mash and shift the value appropriately.
85  */
86 #define smu_dcc_get_max_ports(dcc_value) \
87         (\
88                 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
89                  >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
90         )
91
92 /**
93  * smu_dcc_get_max_task_context() -
94  *
95  * This macro returns the maximum number of task contexts supported by the
96  * hardware. The caller passes in the value read from the device context
97  * capacity register and this macro will mash and shift the value appropriately.
98  */
99 #define smu_dcc_get_max_task_context(dcc_value) \
100         (\
101                 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
102                  >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
103         )
104
105 /**
106  * smu_dcc_get_max_remote_node_context() -
107  *
108  * This macro returns the maximum number of remote node contexts supported by
109  * the hardware. The caller passes in the value read from the device context
110  * capacity register and this macro will mash and shift the value appropriately.
111  */
112 #define smu_dcc_get_max_remote_node_context(dcc_value) \
113         (\
114                 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
115                  >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
116         )
117
118
119 static void scic_sds_controller_power_control_timer_handler(
120         void *controller);
121 #define SCIC_SDS_CONTROLLER_MIN_TIMER_COUNT  3
122 #define SCIC_SDS_CONTROLLER_MAX_TIMER_COUNT  3
123
124 /**
125  *
126  *
127  * The number of milliseconds to wait for a phy to start.
128  */
129 #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT      100
130
131 /**
132  *
133  *
134  * The number of milliseconds to wait while a given phy is consuming power
135  * before allowing another set of phys to consume power. Ultimately, this will
136  * be specified by OEM parameter.
137  */
138 #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
139
140 /**
141  * COMPLETION_QUEUE_CYCLE_BIT() -
142  *
143  * This macro will return the cycle bit of the completion queue entry
144  */
145 #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
146
147 /**
148  * NORMALIZE_GET_POINTER() -
149  *
150  * This macro will normalize the completion queue get pointer so its value can
151  * be used as an index into an array
152  */
153 #define NORMALIZE_GET_POINTER(x) \
154         ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
155
156 /**
157  * NORMALIZE_PUT_POINTER() -
158  *
159  * This macro will normalize the completion queue put pointer so its value can
160  * be used as an array inde
161  */
162 #define NORMALIZE_PUT_POINTER(x) \
163         ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
164
165
166 /**
167  * NORMALIZE_GET_POINTER_CYCLE_BIT() -
168  *
169  * This macro will normalize the completion queue cycle pointer so it matches
170  * the completion queue cycle bit
171  */
172 #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
173         ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
174
175 /**
176  * NORMALIZE_EVENT_POINTER() -
177  *
178  * This macro will normalize the completion queue event entry so its value can
179  * be used as an index.
180  */
181 #define NORMALIZE_EVENT_POINTER(x) \
182         (\
183                 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
184                 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
185         )
186
187 /**
188  * INCREMENT_COMPLETION_QUEUE_GET() -
189  *
190  * This macro will increment the controllers completion queue index value and
191  * possibly toggle the cycle bit if the completion queue index wraps back to 0.
192  */
193 #define INCREMENT_COMPLETION_QUEUE_GET(controller, index, cycle) \
194         INCREMENT_QUEUE_GET(\
195                 (index), \
196                 (cycle), \
197                 (controller)->completion_queue_entries, \
198                 SMU_CQGR_CYCLE_BIT \
199                 )
200
201 /**
202  * INCREMENT_EVENT_QUEUE_GET() -
203  *
204  * This macro will increment the controllers event queue index value and
205  * possibly toggle the event cycle bit if the event queue index wraps back to 0.
206  */
207 #define INCREMENT_EVENT_QUEUE_GET(controller, index, cycle) \
208         INCREMENT_QUEUE_GET(\
209                 (index), \
210                 (cycle), \
211                 (controller)->completion_event_entries, \
212                 SMU_CQGR_EVENT_CYCLE_BIT \
213                 )
214
215 static void scic_sds_controller_initialize_power_control(struct scic_sds_controller *scic)
216 {
217         struct isci_host *ihost = scic_to_ihost(scic);
218         scic->power_control.timer = isci_timer_create(ihost,
219                                                       scic,
220                                         scic_sds_controller_power_control_timer_handler);
221
222         memset(scic->power_control.requesters, 0,
223                sizeof(scic->power_control.requesters));
224
225         scic->power_control.phys_waiting = 0;
226         scic->power_control.phys_granted_power = 0;
227 }
228
229 int scic_controller_mem_init(struct scic_sds_controller *scic)
230 {
231         struct device *dev = scic_to_dev(scic);
232         dma_addr_t dma_handle;
233         enum sci_status result;
234
235         scic->completion_queue = dmam_alloc_coherent(dev,
236                         scic->completion_queue_entries * sizeof(u32),
237                         &dma_handle, GFP_KERNEL);
238         if (!scic->completion_queue)
239                 return -ENOMEM;
240
241         writel(lower_32_bits(dma_handle),
242                 &scic->smu_registers->completion_queue_lower);
243         writel(upper_32_bits(dma_handle),
244                 &scic->smu_registers->completion_queue_upper);
245
246         scic->remote_node_context_table = dmam_alloc_coherent(dev,
247                         scic->remote_node_entries *
248                                 sizeof(union scu_remote_node_context),
249                         &dma_handle, GFP_KERNEL);
250         if (!scic->remote_node_context_table)
251                 return -ENOMEM;
252
253         writel(lower_32_bits(dma_handle),
254                 &scic->smu_registers->remote_node_context_lower);
255         writel(upper_32_bits(dma_handle),
256                 &scic->smu_registers->remote_node_context_upper);
257
258         scic->task_context_table = dmam_alloc_coherent(dev,
259                         scic->task_context_entries *
260                                 sizeof(struct scu_task_context),
261                         &dma_handle, GFP_KERNEL);
262         if (!scic->task_context_table)
263                 return -ENOMEM;
264
265         writel(lower_32_bits(dma_handle),
266                 &scic->smu_registers->host_task_table_lower);
267         writel(upper_32_bits(dma_handle),
268                 &scic->smu_registers->host_task_table_upper);
269
270         result = scic_sds_unsolicited_frame_control_construct(scic);
271         if (result)
272                 return result;
273
274         /*
275          * Inform the silicon as to the location of the UF headers and
276          * address table.
277          */
278         writel(lower_32_bits(scic->uf_control.headers.physical_address),
279                 &scic->scu_registers->sdma.uf_header_base_address_lower);
280         writel(upper_32_bits(scic->uf_control.headers.physical_address),
281                 &scic->scu_registers->sdma.uf_header_base_address_upper);
282
283         writel(lower_32_bits(scic->uf_control.address_table.physical_address),
284                 &scic->scu_registers->sdma.uf_address_table_lower);
285         writel(upper_32_bits(scic->uf_control.address_table.physical_address),
286                 &scic->scu_registers->sdma.uf_address_table_upper);
287
288         return 0;
289 }
290
291 /**
292  * This method initializes the task context data for the controller.
293  * @scic:
294  *
295  */
296 static void
297 scic_sds_controller_assign_task_entries(struct scic_sds_controller *controller)
298 {
299         u32 task_assignment;
300
301         /*
302          * Assign all the TCs to function 0
303          * TODO: Do we actually need to read this register to write it back?
304          */
305
306         task_assignment =
307                 readl(&controller->smu_registers->task_context_assignment[0]);
308
309         task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
310                 (SMU_TCA_GEN_VAL(ENDING,  controller->task_context_entries - 1)) |
311                 (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
312
313         writel(task_assignment,
314                 &controller->smu_registers->task_context_assignment[0]);
315
316 }
317
318 /**
319  * This method initializes the hardware completion queue.
320  *
321  *
322  */
323 static void scic_sds_controller_initialize_completion_queue(
324         struct scic_sds_controller *scic)
325 {
326         u32 index;
327         u32 completion_queue_control_value;
328         u32 completion_queue_get_value;
329         u32 completion_queue_put_value;
330
331         scic->completion_queue_get = 0;
332
333         completion_queue_control_value = (
334                 SMU_CQC_QUEUE_LIMIT_SET(scic->completion_queue_entries - 1)
335                 | SMU_CQC_EVENT_LIMIT_SET(scic->completion_event_entries - 1)
336                 );
337
338         writel(completion_queue_control_value,
339                &scic->smu_registers->completion_queue_control);
340
341
342         /* Set the completion queue get pointer and enable the queue */
343         completion_queue_get_value = (
344                 (SMU_CQGR_GEN_VAL(POINTER, 0))
345                 | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
346                 | (SMU_CQGR_GEN_BIT(ENABLE))
347                 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
348                 );
349
350         writel(completion_queue_get_value,
351                &scic->smu_registers->completion_queue_get);
352
353         /* Set the completion queue put pointer */
354         completion_queue_put_value = (
355                 (SMU_CQPR_GEN_VAL(POINTER, 0))
356                 | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
357                 );
358
359         writel(completion_queue_put_value,
360                &scic->smu_registers->completion_queue_put);
361
362         /* Initialize the cycle bit of the completion queue entries */
363         for (index = 0; index < scic->completion_queue_entries; index++) {
364                 /*
365                  * If get.cycle_bit != completion_queue.cycle_bit
366                  * its not a valid completion queue entry
367                  * so at system start all entries are invalid */
368                 scic->completion_queue[index] = 0x80000000;
369         }
370 }
371
372 /**
373  * This method initializes the hardware unsolicited frame queue.
374  *
375  *
376  */
377 static void scic_sds_controller_initialize_unsolicited_frame_queue(
378         struct scic_sds_controller *scic)
379 {
380         u32 frame_queue_control_value;
381         u32 frame_queue_get_value;
382         u32 frame_queue_put_value;
383
384         /* Write the queue size */
385         frame_queue_control_value =
386                 SCU_UFQC_GEN_VAL(QUEUE_SIZE,
387                                  scic->uf_control.address_table.count);
388
389         writel(frame_queue_control_value,
390                &scic->scu_registers->sdma.unsolicited_frame_queue_control);
391
392         /* Setup the get pointer for the unsolicited frame queue */
393         frame_queue_get_value = (
394                 SCU_UFQGP_GEN_VAL(POINTER, 0)
395                 |  SCU_UFQGP_GEN_BIT(ENABLE_BIT)
396                 );
397
398         writel(frame_queue_get_value,
399                &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
400         /* Setup the put pointer for the unsolicited frame queue */
401         frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
402         writel(frame_queue_put_value,
403                &scic->scu_registers->sdma.unsolicited_frame_put_pointer);
404 }
405
406 /**
407  * This method enables the hardware port task scheduler.
408  *
409  *
410  */
411 static void scic_sds_controller_enable_port_task_scheduler(
412         struct scic_sds_controller *scic)
413 {
414         u32 port_task_scheduler_value;
415
416         port_task_scheduler_value =
417                 readl(&scic->scu_registers->peg0.ptsg.control);
418         port_task_scheduler_value |=
419                 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
420                  SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
421         writel(port_task_scheduler_value,
422                &scic->scu_registers->peg0.ptsg.control);
423 }
424
425 /**
426  *
427  *
428  * This macro is used to delay between writes to the AFE registers during AFE
429  * initialization.
430  */
431 #define AFE_REGISTER_WRITE_DELAY 10
432
433 /* Initialize the AFE for this phy index. We need to read the AFE setup from
434  * the OEM parameters none
435  */
436 static void scic_sds_controller_afe_initialization(struct scic_sds_controller *scic)
437 {
438         const struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
439         u32 afe_status;
440         u32 phy_id;
441
442         /* Clear DFX Status registers */
443         writel(0x0081000f, &scic->scu_registers->afe.afe_dfx_master_control0);
444         udelay(AFE_REGISTER_WRITE_DELAY);
445
446         if (is_b0()) {
447                 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
448                  * Timer, PM Stagger Timer */
449                 writel(0x0007BFFF, &scic->scu_registers->afe.afe_pmsn_master_control2);
450                 udelay(AFE_REGISTER_WRITE_DELAY);
451         }
452
453         /* Configure bias currents to normal */
454         if (is_a0())
455                 writel(0x00005500, &scic->scu_registers->afe.afe_bias_control);
456         else if (is_a2())
457                 writel(0x00005A00, &scic->scu_registers->afe.afe_bias_control);
458         else if (is_b0())
459                 writel(0x00005F00, &scic->scu_registers->afe.afe_bias_control);
460
461         udelay(AFE_REGISTER_WRITE_DELAY);
462
463         /* Enable PLL */
464         if (is_b0())
465                 writel(0x80040A08, &scic->scu_registers->afe.afe_pll_control0);
466         else
467                 writel(0x80040908, &scic->scu_registers->afe.afe_pll_control0);
468
469         udelay(AFE_REGISTER_WRITE_DELAY);
470
471         /* Wait for the PLL to lock */
472         do {
473                 afe_status = readl(&scic->scu_registers->afe.afe_common_block_status);
474                 udelay(AFE_REGISTER_WRITE_DELAY);
475         } while ((afe_status & 0x00001000) == 0);
476
477         if (is_a0() || is_a2()) {
478                 /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
479                 writel(0x7bcc96ad, &scic->scu_registers->afe.afe_pmsn_master_control0);
480                 udelay(AFE_REGISTER_WRITE_DELAY);
481         }
482
483         for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
484                 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
485
486                 if (is_b0()) {
487                          /* Configure transmitter SSC parameters */
488                         writel(0x00030000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
489                         udelay(AFE_REGISTER_WRITE_DELAY);
490                 } else {
491                         /*
492                          * All defaults, except the Receive Word Alignament/Comma Detect
493                          * Enable....(0xe800) */
494                         writel(0x00004512, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
495                         udelay(AFE_REGISTER_WRITE_DELAY);
496
497                         writel(0x0050100F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
498                         udelay(AFE_REGISTER_WRITE_DELAY);
499                 }
500
501                 /*
502                  * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
503                  * & increase TX int & ext bias 20%....(0xe85c) */
504                 if (is_a0())
505                         writel(0x000003D4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
506                 else if (is_a2())
507                         writel(0x000003F0, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
508                 else {
509                          /* Power down TX and RX (PWRDNTX and PWRDNRX) */
510                         writel(0x000003d7, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
511                         udelay(AFE_REGISTER_WRITE_DELAY);
512
513                         /*
514                          * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
515                          * & increase TX int & ext bias 20%....(0xe85c) */
516                         writel(0x000003d4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
517                 }
518                 udelay(AFE_REGISTER_WRITE_DELAY);
519
520                 if (is_a0() || is_a2()) {
521                         /* Enable TX equalization (0xe824) */
522                         writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
523                         udelay(AFE_REGISTER_WRITE_DELAY);
524                 }
525
526                 /*
527                  * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
528                  * RDD=0x0(RX Detect Enabled) ....(0xe800) */
529                 writel(0x00004100, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
530                 udelay(AFE_REGISTER_WRITE_DELAY);
531
532                 /* Leave DFE/FFE on */
533                 if (is_a0())
534                         writel(0x3F09983F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
535                 else if (is_a2())
536                         writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
537                 else {
538                         writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
539                         udelay(AFE_REGISTER_WRITE_DELAY);
540                         /* Enable TX equalization (0xe824) */
541                         writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
542                 }
543                 udelay(AFE_REGISTER_WRITE_DELAY);
544
545                 writel(oem_phy->afe_tx_amp_control0,
546                         &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
547                 udelay(AFE_REGISTER_WRITE_DELAY);
548
549                 writel(oem_phy->afe_tx_amp_control1,
550                         &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
551                 udelay(AFE_REGISTER_WRITE_DELAY);
552
553                 writel(oem_phy->afe_tx_amp_control2,
554                         &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
555                 udelay(AFE_REGISTER_WRITE_DELAY);
556
557                 writel(oem_phy->afe_tx_amp_control3,
558                         &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
559                 udelay(AFE_REGISTER_WRITE_DELAY);
560         }
561
562         /* Transfer control to the PEs */
563         writel(0x00010f00, &scic->scu_registers->afe.afe_dfx_master_control0);
564         udelay(AFE_REGISTER_WRITE_DELAY);
565 }
566
567 /*
568  * ****************************************************************************-
569  * * SCIC SDS Controller Internal Start/Stop Routines
570  * ****************************************************************************- */
571
572
573 /**
574  * This method will attempt to transition into the ready state for the
575  *    controller and indicate that the controller start operation has completed
576  *    if all criteria are met.
577  * @scic: This parameter indicates the controller object for which
578  *    to transition to ready.
579  * @status: This parameter indicates the status value to be pass into the call
580  *    to scic_cb_controller_start_complete().
581  *
582  * none.
583  */
584 static void scic_sds_controller_transition_to_ready(
585         struct scic_sds_controller *scic,
586         enum sci_status status)
587 {
588         struct isci_host *ihost = scic_to_ihost(scic);
589
590         if (scic->state_machine.current_state_id ==
591             SCI_BASE_CONTROLLER_STATE_STARTING) {
592                 /*
593                  * We move into the ready state, because some of the phys/ports
594                  * may be up and operational.
595                  */
596                 sci_base_state_machine_change_state(&scic->state_machine,
597                                                     SCI_BASE_CONTROLLER_STATE_READY);
598
599                 isci_host_start_complete(ihost, status);
600         }
601 }
602
603 static void scic_sds_controller_timeout_handler(void *_scic)
604 {
605         struct scic_sds_controller *scic = _scic;
606         struct isci_host *ihost = scic_to_ihost(scic);
607         struct sci_base_state_machine *sm = &scic->state_machine;
608
609         if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STARTING)
610                 scic_sds_controller_transition_to_ready(scic, SCI_FAILURE_TIMEOUT);
611         else if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STOPPING) {
612                 sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_FAILED);
613                 isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
614         } else  /* / @todo Now what do we want to do in this case? */
615                 dev_err(scic_to_dev(scic),
616                         "%s: Controller timer fired when controller was not "
617                         "in a state being timed.\n",
618                         __func__);
619 }
620
621 static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller *scic)
622 {
623         u32 index;
624         enum sci_status port_status;
625         enum sci_status status = SCI_SUCCESS;
626
627         for (index = 0; index < scic->logical_port_entries; index++) {
628                 struct scic_sds_port *sci_port = &scic->port_table[index];
629                 scic_sds_port_handler_t stop;
630
631                 stop = sci_port->state_handlers->stop_handler;
632                 port_status = stop(sci_port);
633
634                 if ((port_status != SCI_SUCCESS) &&
635                     (port_status != SCI_FAILURE_INVALID_STATE)) {
636                         status = SCI_FAILURE;
637
638                         dev_warn(scic_to_dev(scic),
639                                  "%s: Controller stop operation failed to "
640                                  "stop port %d because of status %d.\n",
641                                  __func__,
642                                  sci_port->logical_port_index,
643                                  port_status);
644                 }
645         }
646
647         return status;
648 }
649
650 static inline void scic_sds_controller_phy_timer_start(
651                 struct scic_sds_controller *scic)
652 {
653         isci_timer_start(scic->phy_startup_timer,
654                          SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
655
656         scic->phy_startup_timer_pending = true;
657 }
658
659 static void scic_sds_controller_phy_timer_stop(struct scic_sds_controller *scic)
660 {
661         isci_timer_stop(scic->phy_startup_timer);
662
663         scic->phy_startup_timer_pending = false;
664 }
665
666 /**
667  * scic_sds_controller_start_next_phy - start phy
668  * @scic: controller
669  *
670  * If all the phys have been started, then attempt to transition the
671  * controller to the READY state and inform the user
672  * (scic_cb_controller_start_complete()).
673  */
674 static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_controller *scic)
675 {
676         struct isci_host *ihost = scic_to_ihost(scic);
677         struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
678         struct scic_sds_phy *sci_phy;
679         enum sci_status status;
680
681         status = SCI_SUCCESS;
682
683         if (scic->phy_startup_timer_pending)
684                 return status;
685
686         if (scic->next_phy_to_start >= SCI_MAX_PHYS) {
687                 bool is_controller_start_complete = true;
688                 u32 state;
689                 u8 index;
690
691                 for (index = 0; index < SCI_MAX_PHYS; index++) {
692                         sci_phy = &ihost->phys[index].sci;
693                         state = sci_phy->state_machine.current_state_id;
694
695                         if (!scic_sds_phy_get_port(sci_phy))
696                                 continue;
697
698                         /* The controller start operation is complete iff:
699                          * - all links have been given an opportunity to start
700                          * - have no indication of a connected device
701                          * - have an indication of a connected device and it has
702                          *   finished the link training process.
703                          */
704                         if ((sci_phy->is_in_link_training == false &&
705                              state == SCI_BASE_PHY_STATE_INITIAL) ||
706                             (sci_phy->is_in_link_training == false &&
707                              state == SCI_BASE_PHY_STATE_STOPPED) ||
708                             (sci_phy->is_in_link_training == true &&
709                              state == SCI_BASE_PHY_STATE_STARTING)) {
710                                 is_controller_start_complete = false;
711                                 break;
712                         }
713                 }
714
715                 /*
716                  * The controller has successfully finished the start process.
717                  * Inform the SCI Core user and transition to the READY state. */
718                 if (is_controller_start_complete == true) {
719                         scic_sds_controller_transition_to_ready(scic, SCI_SUCCESS);
720                         scic_sds_controller_phy_timer_stop(scic);
721                 }
722         } else {
723                 sci_phy = &ihost->phys[scic->next_phy_to_start].sci;
724
725                 if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
726                         if (scic_sds_phy_get_port(sci_phy) == NULL) {
727                                 scic->next_phy_to_start++;
728
729                                 /* Caution recursion ahead be forwarned
730                                  *
731                                  * The PHY was never added to a PORT in MPC mode
732                                  * so start the next phy in sequence This phy
733                                  * will never go link up and will not draw power
734                                  * the OEM parameters either configured the phy
735                                  * incorrectly for the PORT or it was never
736                                  * assigned to a PORT
737                                  */
738                                 return scic_sds_controller_start_next_phy(scic);
739                         }
740                 }
741
742                 status = scic_sds_phy_start(sci_phy);
743
744                 if (status == SCI_SUCCESS) {
745                         scic_sds_controller_phy_timer_start(scic);
746                 } else {
747                         dev_warn(scic_to_dev(scic),
748                                  "%s: Controller stop operation failed "
749                                  "to stop phy %d because of status "
750                                  "%d.\n",
751                                  __func__,
752                                  ihost->phys[scic->next_phy_to_start].sci.phy_index,
753                                  status);
754                 }
755
756                 scic->next_phy_to_start++;
757         }
758
759         return status;
760 }
761
762 static void scic_sds_controller_phy_startup_timeout_handler(void *_scic)
763 {
764         struct scic_sds_controller *scic = _scic;
765         enum sci_status status;
766
767         scic->phy_startup_timer_pending = false;
768         status = SCI_FAILURE;
769         while (status != SCI_SUCCESS)
770                 status = scic_sds_controller_start_next_phy(scic);
771 }
772
773 static enum sci_status scic_sds_controller_initialize_phy_startup(struct scic_sds_controller *scic)
774 {
775         struct isci_host *ihost = scic_to_ihost(scic);
776
777         scic->phy_startup_timer = isci_timer_create(ihost,
778                                                     scic,
779                                                     scic_sds_controller_phy_startup_timeout_handler);
780
781         if (scic->phy_startup_timer == NULL)
782                 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
783         else {
784                 scic->next_phy_to_start = 0;
785                 scic->phy_startup_timer_pending = false;
786         }
787
788         return SCI_SUCCESS;
789 }
790
791 static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller *scic)
792 {
793         u32 index;
794         enum sci_status status;
795         enum sci_status phy_status;
796         struct isci_host *ihost = scic_to_ihost(scic);
797
798         status = SCI_SUCCESS;
799
800         for (index = 0; index < SCI_MAX_PHYS; index++) {
801                 phy_status = scic_sds_phy_stop(&ihost->phys[index].sci);
802
803                 if (phy_status != SCI_SUCCESS &&
804                     phy_status != SCI_FAILURE_INVALID_STATE) {
805                         status = SCI_FAILURE;
806
807                         dev_warn(scic_to_dev(scic),
808                                  "%s: Controller stop operation failed to stop "
809                                  "phy %d because of status %d.\n",
810                                  __func__,
811                                  ihost->phys[index].sci.phy_index, phy_status);
812                 }
813         }
814
815         return status;
816 }
817
818 static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controller *scic)
819 {
820         u32 index;
821         enum sci_status status;
822         enum sci_status device_status;
823
824         status = SCI_SUCCESS;
825
826         for (index = 0; index < scic->remote_node_entries; index++) {
827                 if (scic->device_table[index] != NULL) {
828                         /* / @todo What timeout value do we want to provide to this request? */
829                         device_status = scic_remote_device_stop(scic->device_table[index], 0);
830
831                         if ((device_status != SCI_SUCCESS) &&
832                             (device_status != SCI_FAILURE_INVALID_STATE)) {
833                                 dev_warn(scic_to_dev(scic),
834                                          "%s: Controller stop operation failed "
835                                          "to stop device 0x%p because of "
836                                          "status %d.\n",
837                                          __func__,
838                                          scic->device_table[index], device_status);
839                         }
840                 }
841         }
842
843         return status;
844 }
845
846 static void scic_sds_controller_power_control_timer_start(struct scic_sds_controller *scic)
847 {
848         isci_timer_start(scic->power_control.timer,
849                          SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
850
851         scic->power_control.timer_started = true;
852 }
853
854 static void scic_sds_controller_power_control_timer_stop(struct scic_sds_controller *scic)
855 {
856         if (scic->power_control.timer_started) {
857                 isci_timer_stop(scic->power_control.timer);
858                 scic->power_control.timer_started = false;
859         }
860 }
861
862 static void scic_sds_controller_power_control_timer_restart(struct scic_sds_controller *scic)
863 {
864         scic_sds_controller_power_control_timer_stop(scic);
865         scic_sds_controller_power_control_timer_start(scic);
866 }
867
868 static void scic_sds_controller_power_control_timer_handler(
869         void *controller)
870 {
871         struct scic_sds_controller *scic;
872
873         scic = (struct scic_sds_controller *)controller;
874
875         scic->power_control.phys_granted_power = 0;
876
877         if (scic->power_control.phys_waiting == 0) {
878                 scic->power_control.timer_started = false;
879         } else {
880                 struct scic_sds_phy *sci_phy = NULL;
881                 u8 i;
882
883                 for (i = 0;
884                      (i < SCI_MAX_PHYS)
885                      && (scic->power_control.phys_waiting != 0);
886                      i++) {
887                         if (scic->power_control.requesters[i] != NULL) {
888                                 if (scic->power_control.phys_granted_power <
889                                     scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) {
890                                         sci_phy = scic->power_control.requesters[i];
891                                         scic->power_control.requesters[i] = NULL;
892                                         scic->power_control.phys_waiting--;
893                                         scic->power_control.phys_granted_power++;
894                                         scic_sds_phy_consume_power_handler(sci_phy);
895                                 } else {
896                                         break;
897                                 }
898                         }
899                 }
900
901                 /*
902                  * It doesn't matter if the power list is empty, we need to start the
903                  * timer in case another phy becomes ready.
904                  */
905                 scic_sds_controller_power_control_timer_start(scic);
906         }
907 }
908
909 /**
910  * This method inserts the phy in the stagger spinup control queue.
911  * @scic:
912  *
913  *
914  */
915 void scic_sds_controller_power_control_queue_insert(
916         struct scic_sds_controller *scic,
917         struct scic_sds_phy *sci_phy)
918 {
919         BUG_ON(sci_phy == NULL);
920
921         if (scic->power_control.phys_granted_power <
922             scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) {
923                 scic->power_control.phys_granted_power++;
924                 scic_sds_phy_consume_power_handler(sci_phy);
925
926                 /*
927                  * stop and start the power_control timer. When the timer fires, the
928                  * no_of_phys_granted_power will be set to 0
929                  */
930                 scic_sds_controller_power_control_timer_restart(scic);
931         } else {
932                 /* Add the phy in the waiting list */
933                 scic->power_control.requesters[sci_phy->phy_index] = sci_phy;
934                 scic->power_control.phys_waiting++;
935         }
936 }
937
938 /**
939  * This method removes the phy from the stagger spinup control queue.
940  * @scic:
941  *
942  *
943  */
944 void scic_sds_controller_power_control_queue_remove(
945         struct scic_sds_controller *scic,
946         struct scic_sds_phy *sci_phy)
947 {
948         BUG_ON(sci_phy == NULL);
949
950         if (scic->power_control.requesters[sci_phy->phy_index] != NULL) {
951                 scic->power_control.phys_waiting--;
952         }
953
954         scic->power_control.requesters[sci_phy->phy_index] = NULL;
955 }
956
957 /*
958  * ****************************************************************************-
959  * * SCIC SDS Controller Completion Routines
960  * ****************************************************************************- */
961
962 /**
963  * This method returns a true value if the completion queue has entries that
964  *    can be processed
965  * @scic:
966  *
967  * bool true if the completion queue has entries to process false if the
968  * completion queue has no entries to process
969  */
970 static bool scic_sds_controller_completion_queue_has_entries(
971         struct scic_sds_controller *scic)
972 {
973         u32 get_value = scic->completion_queue_get;
974         u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
975
976         if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
977             COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index]))
978                 return true;
979
980         return false;
981 }
982
983 /**
984  * This method processes a task completion notification.  This is called from
985  *    within the controller completion handler.
986  * @scic:
987  * @completion_entry:
988  *
989  */
990 static void scic_sds_controller_task_completion(
991         struct scic_sds_controller *scic,
992         u32 completion_entry)
993 {
994         u32 index;
995         struct scic_sds_request *io_request;
996
997         index = SCU_GET_COMPLETION_INDEX(completion_entry);
998         io_request = scic->io_request_table[index];
999
1000         /* Make sure that we really want to process this IO request */
1001         if (
1002                 (io_request != NULL)
1003                 && (io_request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG)
1004                 && (
1005                         scic_sds_io_tag_get_sequence(io_request->io_tag)
1006                         == scic->io_request_sequence[index]
1007                         )
1008                 ) {
1009                 /* Yep this is a valid io request pass it along to the io request handler */
1010                 scic_sds_io_request_tc_completion(io_request, completion_entry);
1011         }
1012 }
1013
1014 /**
1015  * This method processes an SDMA completion event.  This is called from within
1016  *    the controller completion handler.
1017  * @scic:
1018  * @completion_entry:
1019  *
1020  */
1021 static void scic_sds_controller_sdma_completion(
1022         struct scic_sds_controller *scic,
1023         u32 completion_entry)
1024 {
1025         u32 index;
1026         struct scic_sds_request *io_request;
1027         struct scic_sds_remote_device *device;
1028
1029         index = SCU_GET_COMPLETION_INDEX(completion_entry);
1030
1031         switch (scu_get_command_request_type(completion_entry)) {
1032         case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
1033         case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
1034                 io_request = scic->io_request_table[index];
1035                 dev_warn(scic_to_dev(scic),
1036                          "%s: SCIC SDS Completion type SDMA %x for io request "
1037                          "%p\n",
1038                          __func__,
1039                          completion_entry,
1040                          io_request);
1041                 /* @todo For a post TC operation we need to fail the IO
1042                  * request
1043                  */
1044                 break;
1045
1046         case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
1047         case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
1048         case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
1049                 device = scic->device_table[index];
1050                 dev_warn(scic_to_dev(scic),
1051                          "%s: SCIC SDS Completion type SDMA %x for remote "
1052                          "device %p\n",
1053                          __func__,
1054                          completion_entry,
1055                          device);
1056                 /* @todo For a port RNC operation we need to fail the
1057                  * device
1058                  */
1059                 break;
1060
1061         default:
1062                 dev_warn(scic_to_dev(scic),
1063                          "%s: SCIC SDS Completion unknown SDMA completion "
1064                          "type %x\n",
1065                          __func__,
1066                          completion_entry);
1067                 break;
1068
1069         }
1070 }
1071
1072 static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *scic,
1073                                                   u32 completion_entry)
1074 {
1075         u32 index;
1076         u32 frame_index;
1077
1078         struct isci_host *ihost = scic_to_ihost(scic);
1079         struct scu_unsolicited_frame_header *frame_header;
1080         struct scic_sds_phy *phy;
1081         struct scic_sds_remote_device *device;
1082
1083         enum sci_status result = SCI_FAILURE;
1084
1085         frame_index = SCU_GET_FRAME_INDEX(completion_entry);
1086
1087         frame_header = scic->uf_control.buffers.array[frame_index].header;
1088         scic->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
1089
1090         if (SCU_GET_FRAME_ERROR(completion_entry)) {
1091                 /*
1092                  * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
1093                  * /       this cause a problem? We expect the phy initialization will
1094                  * /       fail if there is an error in the frame. */
1095                 scic_sds_controller_release_frame(scic, frame_index);
1096                 return;
1097         }
1098
1099         if (frame_header->is_address_frame) {
1100                 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
1101                 phy = &ihost->phys[index].sci;
1102                 result = scic_sds_phy_frame_handler(phy, frame_index);
1103         } else {
1104
1105                 index = SCU_GET_COMPLETION_INDEX(completion_entry);
1106
1107                 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
1108                         /*
1109                          * This is a signature fis or a frame from a direct attached SATA
1110                          * device that has not yet been created.  In either case forwared
1111                          * the frame to the PE and let it take care of the frame data. */
1112                         index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
1113                         phy = &ihost->phys[index].sci;
1114                         result = scic_sds_phy_frame_handler(phy, frame_index);
1115                 } else {
1116                         if (index < scic->remote_node_entries)
1117                                 device = scic->device_table[index];
1118                         else
1119                                 device = NULL;
1120
1121                         if (device != NULL)
1122                                 result = scic_sds_remote_device_frame_handler(device, frame_index);
1123                         else
1124                                 scic_sds_controller_release_frame(scic, frame_index);
1125                 }
1126         }
1127
1128         if (result != SCI_SUCCESS) {
1129                 /*
1130                  * / @todo Is there any reason to report some additional error message
1131                  * /       when we get this failure notifiction? */
1132         }
1133 }
1134
1135 static void scic_sds_controller_event_completion(struct scic_sds_controller *scic,
1136                                                  u32 completion_entry)
1137 {
1138         struct isci_host *ihost = scic_to_ihost(scic);
1139         struct scic_sds_request *io_request;
1140         struct scic_sds_remote_device *device;
1141         struct scic_sds_phy *phy;
1142         u32 index;
1143
1144         index = SCU_GET_COMPLETION_INDEX(completion_entry);
1145
1146         switch (scu_get_event_type(completion_entry)) {
1147         case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
1148                 /* / @todo The driver did something wrong and we need to fix the condtion. */
1149                 dev_err(scic_to_dev(scic),
1150                         "%s: SCIC Controller 0x%p received SMU command error "
1151                         "0x%x\n",
1152                         __func__,
1153                         scic,
1154                         completion_entry);
1155                 break;
1156
1157         case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
1158         case SCU_EVENT_TYPE_SMU_ERROR:
1159         case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
1160                 /*
1161                  * / @todo This is a hardware failure and its likely that we want to
1162                  * /       reset the controller. */
1163                 dev_err(scic_to_dev(scic),
1164                         "%s: SCIC Controller 0x%p received fatal controller "
1165                         "event  0x%x\n",
1166                         __func__,
1167                         scic,
1168                         completion_entry);
1169                 break;
1170
1171         case SCU_EVENT_TYPE_TRANSPORT_ERROR:
1172                 io_request = scic->io_request_table[index];
1173                 scic_sds_io_request_event_handler(io_request, completion_entry);
1174                 break;
1175
1176         case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
1177                 switch (scu_get_event_specifier(completion_entry)) {
1178                 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
1179                 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
1180                         io_request = scic->io_request_table[index];
1181                         if (io_request != NULL)
1182                                 scic_sds_io_request_event_handler(io_request, completion_entry);
1183                         else
1184                                 dev_warn(scic_to_dev(scic),
1185                                          "%s: SCIC Controller 0x%p received "
1186                                          "event 0x%x for io request object "
1187                                          "that doesnt exist.\n",
1188                                          __func__,
1189                                          scic,
1190                                          completion_entry);
1191
1192                         break;
1193
1194                 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
1195                         device = scic->device_table[index];
1196                         if (device != NULL)
1197                                 scic_sds_remote_device_event_handler(device, completion_entry);
1198                         else
1199                                 dev_warn(scic_to_dev(scic),
1200                                          "%s: SCIC Controller 0x%p received "
1201                                          "event 0x%x for remote device object "
1202                                          "that doesnt exist.\n",
1203                                          __func__,
1204                                          scic,
1205                                          completion_entry);
1206
1207                         break;
1208                 }
1209                 break;
1210
1211         case SCU_EVENT_TYPE_BROADCAST_CHANGE:
1212         /*
1213          * direct the broadcast change event to the phy first and then let
1214          * the phy redirect the broadcast change to the port object */
1215         case SCU_EVENT_TYPE_ERR_CNT_EVENT:
1216         /*
1217          * direct error counter event to the phy object since that is where
1218          * we get the event notification.  This is a type 4 event. */
1219         case SCU_EVENT_TYPE_OSSP_EVENT:
1220                 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
1221                 phy = &ihost->phys[index].sci;
1222                 scic_sds_phy_event_handler(phy, completion_entry);
1223                 break;
1224
1225         case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
1226         case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
1227         case SCU_EVENT_TYPE_RNC_OPS_MISC:
1228                 if (index < scic->remote_node_entries) {
1229                         device = scic->device_table[index];
1230
1231                         if (device != NULL)
1232                                 scic_sds_remote_device_event_handler(device, completion_entry);
1233                 } else
1234                         dev_err(scic_to_dev(scic),
1235                                 "%s: SCIC Controller 0x%p received event 0x%x "
1236                                 "for remote device object 0x%0x that doesnt "
1237                                 "exist.\n",
1238                                 __func__,
1239                                 scic,
1240                                 completion_entry,
1241                                 index);
1242
1243                 break;
1244
1245         default:
1246                 dev_warn(scic_to_dev(scic),
1247                          "%s: SCIC Controller received unknown event code %x\n",
1248                          __func__,
1249                          completion_entry);
1250                 break;
1251         }
1252 }
1253
1254 /**
1255  * This method is a private routine for processing the completion queue entries.
1256  * @scic:
1257  *
1258  */
1259 static void scic_sds_controller_process_completions(
1260         struct scic_sds_controller *scic)
1261 {
1262         u32 completion_count = 0;
1263         u32 completion_entry;
1264         u32 get_index;
1265         u32 get_cycle;
1266         u32 event_index;
1267         u32 event_cycle;
1268
1269         dev_dbg(scic_to_dev(scic),
1270                 "%s: completion queue begining get:0x%08x\n",
1271                 __func__,
1272                 scic->completion_queue_get);
1273
1274         /* Get the component parts of the completion queue */
1275         get_index = NORMALIZE_GET_POINTER(scic->completion_queue_get);
1276         get_cycle = SMU_CQGR_CYCLE_BIT & scic->completion_queue_get;
1277
1278         event_index = NORMALIZE_EVENT_POINTER(scic->completion_queue_get);
1279         event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & scic->completion_queue_get;
1280
1281         while (
1282                 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
1283                 == COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index])
1284                 ) {
1285                 completion_count++;
1286
1287                 completion_entry = scic->completion_queue[get_index];
1288                 INCREMENT_COMPLETION_QUEUE_GET(scic, get_index, get_cycle);
1289
1290                 dev_dbg(scic_to_dev(scic),
1291                         "%s: completion queue entry:0x%08x\n",
1292                         __func__,
1293                         completion_entry);
1294
1295                 switch (SCU_GET_COMPLETION_TYPE(completion_entry)) {
1296                 case SCU_COMPLETION_TYPE_TASK:
1297                         scic_sds_controller_task_completion(scic, completion_entry);
1298                         break;
1299
1300                 case SCU_COMPLETION_TYPE_SDMA:
1301                         scic_sds_controller_sdma_completion(scic, completion_entry);
1302                         break;
1303
1304                 case SCU_COMPLETION_TYPE_UFI:
1305                         scic_sds_controller_unsolicited_frame(scic, completion_entry);
1306                         break;
1307
1308                 case SCU_COMPLETION_TYPE_EVENT:
1309                         INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle);
1310                         scic_sds_controller_event_completion(scic, completion_entry);
1311                         break;
1312
1313                 case SCU_COMPLETION_TYPE_NOTIFY:
1314                         /*
1315                          * Presently we do the same thing with a notify event that we do with the
1316                          * other event codes. */
1317                         INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle);
1318                         scic_sds_controller_event_completion(scic, completion_entry);
1319                         break;
1320
1321                 default:
1322                         dev_warn(scic_to_dev(scic),
1323                                  "%s: SCIC Controller received unknown "
1324                                  "completion type %x\n",
1325                                  __func__,
1326                                  completion_entry);
1327                         break;
1328                 }
1329         }
1330
1331         /* Update the get register if we completed one or more entries */
1332         if (completion_count > 0) {
1333                 scic->completion_queue_get =
1334                         SMU_CQGR_GEN_BIT(ENABLE) |
1335                         SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
1336                         event_cycle |
1337                         SMU_CQGR_GEN_VAL(EVENT_POINTER, event_index) |
1338                         get_cycle |
1339                         SMU_CQGR_GEN_VAL(POINTER, get_index);
1340
1341                 writel(scic->completion_queue_get,
1342                        &scic->smu_registers->completion_queue_get);
1343
1344         }
1345
1346         dev_dbg(scic_to_dev(scic),
1347                 "%s: completion queue ending get:0x%08x\n",
1348                 __func__,
1349                 scic->completion_queue_get);
1350
1351 }
1352
1353 bool scic_sds_controller_isr(struct scic_sds_controller *scic)
1354 {
1355         if (scic_sds_controller_completion_queue_has_entries(scic)) {
1356                 return true;
1357         } else {
1358                 /*
1359                  * we have a spurious interrupt it could be that we have already
1360                  * emptied the completion queue from a previous interrupt */
1361                 writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
1362
1363                 /*
1364                  * There is a race in the hardware that could cause us not to be notified
1365                  * of an interrupt completion if we do not take this step.  We will mask
1366                  * then unmask the interrupts so if there is another interrupt pending
1367                  * the clearing of the interrupt source we get the next interrupt message. */
1368                 writel(0xFF000000, &scic->smu_registers->interrupt_mask);
1369                 writel(0, &scic->smu_registers->interrupt_mask);
1370         }
1371
1372         return false;
1373 }
1374
1375 void scic_sds_controller_completion_handler(struct scic_sds_controller *scic)
1376 {
1377         /* Empty out the completion queue */
1378         if (scic_sds_controller_completion_queue_has_entries(scic))
1379                 scic_sds_controller_process_completions(scic);
1380
1381         /* Clear the interrupt and enable all interrupts again */
1382         writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
1383         /* Could we write the value of SMU_ISR_COMPLETION? */
1384         writel(0xFF000000, &scic->smu_registers->interrupt_mask);
1385         writel(0, &scic->smu_registers->interrupt_mask);
1386 }
1387
1388 bool scic_sds_controller_error_isr(struct scic_sds_controller *scic)
1389 {
1390         u32 interrupt_status;
1391
1392         interrupt_status =
1393                 readl(&scic->smu_registers->interrupt_status);
1394         interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
1395
1396         if (interrupt_status != 0) {
1397                 /*
1398                  * There is an error interrupt pending so let it through and handle
1399                  * in the callback */
1400                 return true;
1401         }
1402
1403         /*
1404          * There is a race in the hardware that could cause us not to be notified
1405          * of an interrupt completion if we do not take this step.  We will mask
1406          * then unmask the error interrupts so if there was another interrupt
1407          * pending we will be notified.
1408          * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
1409         writel(0xff, &scic->smu_registers->interrupt_mask);
1410         writel(0, &scic->smu_registers->interrupt_mask);
1411
1412         return false;
1413 }
1414
1415 void scic_sds_controller_error_handler(struct scic_sds_controller *scic)
1416 {
1417         u32 interrupt_status;
1418
1419         interrupt_status =
1420                 readl(&scic->smu_registers->interrupt_status);
1421
1422         if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
1423             scic_sds_controller_completion_queue_has_entries(scic)) {
1424
1425                 scic_sds_controller_process_completions(scic);
1426                 writel(SMU_ISR_QUEUE_SUSPEND, &scic->smu_registers->interrupt_status);
1427         } else {
1428                 dev_err(scic_to_dev(scic), "%s: status: %#x\n", __func__,
1429                         interrupt_status);
1430
1431                 sci_base_state_machine_change_state(&scic->state_machine,
1432                                                     SCI_BASE_CONTROLLER_STATE_FAILED);
1433
1434                 return;
1435         }
1436
1437         /* If we dont process any completions I am not sure that we want to do this.
1438          * We are in the middle of a hardware fault and should probably be reset.
1439          */
1440         writel(0, &scic->smu_registers->interrupt_mask);
1441 }
1442
1443
1444
1445
1446 void scic_sds_controller_link_up(struct scic_sds_controller *scic,
1447                 struct scic_sds_port *port, struct scic_sds_phy *phy)
1448 {
1449         switch (scic->state_machine.current_state_id) {
1450         case SCI_BASE_CONTROLLER_STATE_STARTING:
1451                 scic_sds_controller_phy_timer_stop(scic);
1452                 scic->port_agent.link_up_handler(scic, &scic->port_agent,
1453                                                  port, phy);
1454                 scic_sds_controller_start_next_phy(scic);
1455                 break;
1456         case SCI_BASE_CONTROLLER_STATE_READY:
1457                 scic->port_agent.link_up_handler(scic, &scic->port_agent,
1458                                                  port, phy);
1459                 break;
1460         default:
1461                 dev_dbg(scic_to_dev(scic),
1462                         "%s: SCIC Controller linkup event from phy %d in "
1463                         "unexpected state %d\n", __func__, phy->phy_index,
1464                         scic->state_machine.current_state_id);
1465         }
1466 }
1467
1468 void scic_sds_controller_link_down(struct scic_sds_controller *scic,
1469                 struct scic_sds_port *port, struct scic_sds_phy *phy)
1470 {
1471         switch (scic->state_machine.current_state_id) {
1472         case SCI_BASE_CONTROLLER_STATE_STARTING:
1473         case SCI_BASE_CONTROLLER_STATE_READY:
1474                 scic->port_agent.link_down_handler(scic, &scic->port_agent,
1475                                                    port, phy);
1476                 break;
1477         default:
1478                 dev_dbg(scic_to_dev(scic),
1479                         "%s: SCIC Controller linkdown event from phy %d in "
1480                         "unexpected state %d\n",
1481                         __func__,
1482                         phy->phy_index,
1483                         scic->state_machine.current_state_id);
1484         }
1485 }
1486
1487 /**
1488  * This is a helper method to determine if any remote devices on this
1489  * controller are still in the stopping state.
1490  *
1491  */
1492 static bool scic_sds_controller_has_remote_devices_stopping(
1493         struct scic_sds_controller *controller)
1494 {
1495         u32 index;
1496
1497         for (index = 0; index < controller->remote_node_entries; index++) {
1498                 if ((controller->device_table[index] != NULL) &&
1499                    (controller->device_table[index]->state_machine.current_state_id
1500                     == SCI_BASE_REMOTE_DEVICE_STATE_STOPPING))
1501                         return true;
1502         }
1503
1504         return false;
1505 }
1506
1507 /**
1508  * This method is called by the remote device to inform the controller
1509  * object that the remote device has stopped.
1510  */
1511 void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic,
1512                                                struct scic_sds_remote_device *sci_dev)
1513 {
1514         if (scic->state_machine.current_state_id !=
1515             SCI_BASE_CONTROLLER_STATE_STOPPING) {
1516                 dev_dbg(scic_to_dev(scic),
1517                         "SCIC Controller 0x%p remote device stopped event "
1518                         "from device 0x%p in unexpected state %d\n",
1519                         scic, sci_dev,
1520                         scic->state_machine.current_state_id);
1521                 return;
1522         }
1523
1524         if (!scic_sds_controller_has_remote_devices_stopping(scic)) {
1525                 sci_base_state_machine_change_state(&scic->state_machine,
1526                                 SCI_BASE_CONTROLLER_STATE_STOPPED);
1527         }
1528 }
1529
1530 /**
1531  * This method will write to the SCU PCP register the request value. The method
1532  *    is used to suspend/resume ports, devices, and phys.
1533  * @scic:
1534  *
1535  *
1536  */
1537 void scic_sds_controller_post_request(
1538         struct scic_sds_controller *scic,
1539         u32 request)
1540 {
1541         dev_dbg(scic_to_dev(scic),
1542                 "%s: SCIC Controller 0x%p post request 0x%08x\n",
1543                 __func__,
1544                 scic,
1545                 request);
1546
1547         writel(request, &scic->smu_registers->post_context_port);
1548 }
1549
1550 /**
1551  * This method will copy the soft copy of the task context into the physical
1552  *    memory accessible by the controller.
1553  * @scic: This parameter specifies the controller for which to copy
1554  *    the task context.
1555  * @sci_req: This parameter specifies the request for which the task
1556  *    context is being copied.
1557  *
1558  * After this call is made the SCIC_SDS_IO_REQUEST object will always point to
1559  * the physical memory version of the task context. Thus, all subsequent
1560  * updates to the task context are performed in the TC table (i.e. DMAable
1561  * memory). none
1562  */
1563 void scic_sds_controller_copy_task_context(
1564         struct scic_sds_controller *scic,
1565         struct scic_sds_request *sci_req)
1566 {
1567         struct scu_task_context *task_context_buffer;
1568
1569         task_context_buffer = scic_sds_controller_get_task_context_buffer(
1570                 scic, sci_req->io_tag);
1571
1572         memcpy(task_context_buffer,
1573                sci_req->task_context_buffer,
1574                SCI_FIELD_OFFSET(struct scu_task_context, sgl_snapshot_ac));
1575
1576         /*
1577          * Now that the soft copy of the TC has been copied into the TC
1578          * table accessible by the silicon.  Thus, any further changes to
1579          * the TC (e.g. TC termination) occur in the appropriate location. */
1580         sci_req->task_context_buffer = task_context_buffer;
1581 }
1582
1583 /**
1584  * This method returns the task context buffer for the given io tag.
1585  * @scic:
1586  * @io_tag:
1587  *
1588  * struct scu_task_context*
1589  */
1590 struct scu_task_context *scic_sds_controller_get_task_context_buffer(
1591         struct scic_sds_controller *scic,
1592         u16 io_tag
1593         ) {
1594         u16 task_index = scic_sds_io_tag_get_index(io_tag);
1595
1596         if (task_index < scic->task_context_entries) {
1597                 return &scic->task_context_table[task_index];
1598         }
1599
1600         return NULL;
1601 }
1602
1603 struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic,
1604                                              u16 io_tag)
1605 {
1606         u16 task_index;
1607         u16 task_sequence;
1608
1609         task_index = scic_sds_io_tag_get_index(io_tag);
1610
1611         if (task_index  < scic->task_context_entries) {
1612                 if (scic->io_request_table[task_index] != NULL) {
1613                         task_sequence = scic_sds_io_tag_get_sequence(io_tag);
1614
1615                         if (task_sequence == scic->io_request_sequence[task_index]) {
1616                                 return scic->io_request_table[task_index];
1617                         }
1618                 }
1619         }
1620
1621         return NULL;
1622 }
1623
1624 /**
1625  * This method allocates remote node index and the reserves the remote node
1626  *    context space for use. This method can fail if there are no more remote
1627  *    node index available.
1628  * @scic: This is the controller object which contains the set of
1629  *    free remote node ids
1630  * @sci_dev: This is the device object which is requesting the a remote node
1631  *    id
1632  * @node_id: This is the remote node id that is assinged to the device if one
1633  *    is available
1634  *
1635  * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
1636  * node index available.
1637  */
1638 enum sci_status scic_sds_controller_allocate_remote_node_context(
1639         struct scic_sds_controller *scic,
1640         struct scic_sds_remote_device *sci_dev,
1641         u16 *node_id)
1642 {
1643         u16 node_index;
1644         u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev);
1645
1646         node_index = scic_sds_remote_node_table_allocate_remote_node(
1647                 &scic->available_remote_nodes, remote_node_count
1648                 );
1649
1650         if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
1651                 scic->device_table[node_index] = sci_dev;
1652
1653                 *node_id = node_index;
1654
1655                 return SCI_SUCCESS;
1656         }
1657
1658         return SCI_FAILURE_INSUFFICIENT_RESOURCES;
1659 }
1660
1661 /**
1662  * This method frees the remote node index back to the available pool.  Once
1663  *    this is done the remote node context buffer is no longer valid and can
1664  *    not be used.
1665  * @scic:
1666  * @sci_dev:
1667  * @node_id:
1668  *
1669  */
1670 void scic_sds_controller_free_remote_node_context(
1671         struct scic_sds_controller *scic,
1672         struct scic_sds_remote_device *sci_dev,
1673         u16 node_id)
1674 {
1675         u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev);
1676
1677         if (scic->device_table[node_id] == sci_dev) {
1678                 scic->device_table[node_id] = NULL;
1679
1680                 scic_sds_remote_node_table_release_remote_node_index(
1681                         &scic->available_remote_nodes, remote_node_count, node_id
1682                         );
1683         }
1684 }
1685
1686 /**
1687  * This method returns the union scu_remote_node_context for the specified remote
1688  *    node id.
1689  * @scic:
1690  * @node_id:
1691  *
1692  * union scu_remote_node_context*
1693  */
1694 union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
1695         struct scic_sds_controller *scic,
1696         u16 node_id
1697         ) {
1698         if (
1699                 (node_id < scic->remote_node_entries)
1700                 && (scic->device_table[node_id] != NULL)
1701                 ) {
1702                 return &scic->remote_node_context_table[node_id];
1703         }
1704
1705         return NULL;
1706 }
1707
1708 /**
1709  *
1710  * @resposne_buffer: This is the buffer into which the D2H register FIS will be
1711  *    constructed.
1712  * @frame_header: This is the frame header returned by the hardware.
1713  * @frame_buffer: This is the frame buffer returned by the hardware.
1714  *
1715  * This method will combind the frame header and frame buffer to create a SATA
1716  * D2H register FIS none
1717  */
1718 void scic_sds_controller_copy_sata_response(
1719         void *response_buffer,
1720         void *frame_header,
1721         void *frame_buffer)
1722 {
1723         memcpy(response_buffer, frame_header, sizeof(u32));
1724
1725         memcpy(response_buffer + sizeof(u32),
1726                frame_buffer,
1727                sizeof(struct dev_to_host_fis) - sizeof(u32));
1728 }
1729
1730 /**
1731  * This method releases the frame once this is done the frame is available for
1732  *    re-use by the hardware.  The data contained in the frame header and frame
1733  *    buffer is no longer valid. The UF queue get pointer is only updated if UF
1734  *    control indicates this is appropriate.
1735  * @scic:
1736  * @frame_index:
1737  *
1738  */
1739 void scic_sds_controller_release_frame(
1740         struct scic_sds_controller *scic,
1741         u32 frame_index)
1742 {
1743         if (scic_sds_unsolicited_frame_control_release_frame(
1744                     &scic->uf_control, frame_index) == true)
1745                 writel(scic->uf_control.get,
1746                         &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
1747 }
1748
1749 /**
1750  * This method sets user parameters and OEM parameters to default values.
1751  *    Users can override these values utilizing the scic_user_parameters_set()
1752  *    and scic_oem_parameters_set() methods.
1753  * @scic: This parameter specifies the controller for which to set the
1754  *    configuration parameters to their default values.
1755  *
1756  */
1757 static void scic_sds_controller_set_default_config_parameters(struct scic_sds_controller *scic)
1758 {
1759         struct isci_host *ihost = scic_to_ihost(scic);
1760         u16 index;
1761
1762         /* Default to APC mode. */
1763         scic->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
1764
1765         /* Default to APC mode. */
1766         scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1;
1767
1768         /* Default to no SSC operation. */
1769         scic->oem_parameters.sds1.controller.do_enable_ssc = false;
1770
1771         /* Initialize all of the port parameter information to narrow ports. */
1772         for (index = 0; index < SCI_MAX_PORTS; index++) {
1773                 scic->oem_parameters.sds1.ports[index].phy_mask = 0;
1774         }
1775
1776         /* Initialize all of the phy parameter information. */
1777         for (index = 0; index < SCI_MAX_PHYS; index++) {
1778                 /* Default to 6G (i.e. Gen 3) for now. */
1779                 scic->user_parameters.sds1.phys[index].max_speed_generation = 3;
1780
1781                 /* the frequencies cannot be 0 */
1782                 scic->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f;
1783                 scic->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff;
1784                 scic->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
1785
1786                 /*
1787                  * Previous Vitesse based expanders had a arbitration issue that
1788                  * is worked around by having the upper 32-bits of SAS address
1789                  * with a value greater then the Vitesse company identifier.
1790                  * Hence, usage of 0x5FCFFFFF. */
1791                 scic->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id;
1792                 scic->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF;
1793         }
1794
1795         scic->user_parameters.sds1.stp_inactivity_timeout = 5;
1796         scic->user_parameters.sds1.ssp_inactivity_timeout = 5;
1797         scic->user_parameters.sds1.stp_max_occupancy_timeout = 5;
1798         scic->user_parameters.sds1.ssp_max_occupancy_timeout = 20;
1799         scic->user_parameters.sds1.no_outbound_task_timeout = 20;
1800 }
1801
1802 /**
1803  * scic_controller_get_suggested_start_timeout() - This method returns the
1804  *    suggested scic_controller_start() timeout amount.  The user is free to
1805  *    use any timeout value, but this method provides the suggested minimum
1806  *    start timeout value.  The returned value is based upon empirical
1807  *    information determined as a result of interoperability testing.
1808  * @controller: the handle to the controller object for which to return the
1809  *    suggested start timeout.
1810  *
1811  * This method returns the number of milliseconds for the suggested start
1812  * operation timeout.
1813  */
1814 u32 scic_controller_get_suggested_start_timeout(
1815         struct scic_sds_controller *sc)
1816 {
1817         /* Validate the user supplied parameters. */
1818         if (sc == NULL)
1819                 return 0;
1820
1821         /*
1822          * The suggested minimum timeout value for a controller start operation:
1823          *
1824          *     Signature FIS Timeout
1825          *   + Phy Start Timeout
1826          *   + Number of Phy Spin Up Intervals
1827          *   ---------------------------------
1828          *   Number of milliseconds for the controller start operation.
1829          *
1830          * NOTE: The number of phy spin up intervals will be equivalent
1831          *       to the number of phys divided by the number phys allowed
1832          *       per interval - 1 (once OEM parameters are supported).
1833          *       Currently we assume only 1 phy per interval. */
1834
1835         return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
1836                 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
1837                 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1838 }
1839
1840 /**
1841  * scic_controller_stop() - This method will stop an individual controller
1842  *    object.This method will invoke the associated user callback upon
1843  *    completion.  The completion callback is called when the following
1844  *    conditions are met: -# the method return status is SCI_SUCCESS. -# the
1845  *    controller has been quiesced. This method will ensure that all IO
1846  *    requests are quiesced, phys are stopped, and all additional operation by
1847  *    the hardware is halted.
1848  * @controller: the handle to the controller object to stop.
1849  * @timeout: This parameter specifies the number of milliseconds in which the
1850  *    stop operation should complete.
1851  *
1852  * The controller must be in the STARTED or STOPPED state. Indicate if the
1853  * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
1854  * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
1855  * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
1856  * controller is not either in the STARTED or STOPPED states.
1857  */
1858 enum sci_status scic_controller_stop(
1859         struct scic_sds_controller *scic,
1860         u32 timeout)
1861 {
1862         if (scic->state_machine.current_state_id !=
1863             SCI_BASE_CONTROLLER_STATE_READY) {
1864                 dev_warn(scic_to_dev(scic),
1865                          "SCIC Controller stop operation requested in "
1866                          "invalid state\n");
1867                 return SCI_FAILURE_INVALID_STATE;
1868         }
1869
1870         isci_timer_start(scic->timeout_timer, timeout);
1871         sci_base_state_machine_change_state(&scic->state_machine,
1872                                             SCI_BASE_CONTROLLER_STATE_STOPPING);
1873         return SCI_SUCCESS;
1874 }
1875
1876 /**
1877  * scic_controller_reset() - This method will reset the supplied core
1878  *    controller regardless of the state of said controller.  This operation is
1879  *    considered destructive.  In other words, all current operations are wiped
1880  *    out.  No IO completions for outstanding devices occur.  Outstanding IO
1881  *    requests are not aborted or completed at the actual remote device.
1882  * @controller: the handle to the controller object to reset.
1883  *
1884  * Indicate if the controller reset method succeeded or failed in some way.
1885  * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
1886  * the controller reset operation is unable to complete.
1887  */
1888 enum sci_status scic_controller_reset(
1889         struct scic_sds_controller *scic)
1890 {
1891         switch (scic->state_machine.current_state_id) {
1892         case SCI_BASE_CONTROLLER_STATE_RESET:
1893         case SCI_BASE_CONTROLLER_STATE_READY:
1894         case SCI_BASE_CONTROLLER_STATE_STOPPED:
1895         case SCI_BASE_CONTROLLER_STATE_FAILED:
1896                 /*
1897                  * The reset operation is not a graceful cleanup, just
1898                  * perform the state transition.
1899                  */
1900                 sci_base_state_machine_change_state(&scic->state_machine,
1901                                 SCI_BASE_CONTROLLER_STATE_RESETTING);
1902                 return SCI_SUCCESS;
1903         default:
1904                 dev_warn(scic_to_dev(scic),
1905                          "SCIC Controller reset operation requested in "
1906                          "invalid state\n");
1907                 return SCI_FAILURE_INVALID_STATE;
1908         }
1909 }
1910
1911 /**
1912  * scic_controller_start_io() - This method is called by the SCI user to
1913  *    send/start an IO request. If the method invocation is successful, then
1914  *    the IO request has been queued to the hardware for processing.
1915  * @controller: the handle to the controller object for which to start an IO
1916  *    request.
1917  * @remote_device: the handle to the remote device object for which to start an
1918  *    IO request.
1919  * @io_request: the handle to the io request object to start.
1920  * @io_tag: This parameter specifies a previously allocated IO tag that the
1921  *    user desires to be utilized for this request. This parameter is optional.
1922  *     The user is allowed to supply SCI_CONTROLLER_INVALID_IO_TAG as the value
1923  *    for this parameter.
1924  *
1925  * - IO tags are a protected resource.  It is incumbent upon the SCI Core user
1926  * to ensure that each of the methods that may allocate or free available IO
1927  * tags are handled in a mutually exclusive manner.  This method is one of said
1928  * methods requiring proper critical code section protection (e.g. semaphore,
1929  * spin-lock, etc.). - For SATA, the user is required to manage NCQ tags.  As a
1930  * result, it is expected the user will have set the NCQ tag field in the host
1931  * to device register FIS prior to calling this method.  There is also a
1932  * requirement for the user to call scic_stp_io_set_ncq_tag() prior to invoking
1933  * the scic_controller_start_io() method. scic_controller_allocate_tag() for
1934  * more information on allocating a tag. Indicate if the controller
1935  * successfully started the IO request. SCI_SUCCESS if the IO request was
1936  * successfully started. Determine the failure situations and return values.
1937  */
1938 enum sci_status scic_controller_start_io(
1939         struct scic_sds_controller *scic,
1940         struct scic_sds_remote_device *rdev,
1941         struct scic_sds_request *req,
1942         u16 io_tag)
1943 {
1944         enum sci_status status;
1945
1946         if (scic->state_machine.current_state_id !=
1947             SCI_BASE_CONTROLLER_STATE_READY) {
1948                 dev_warn(scic_to_dev(scic), "invalid state to start I/O");
1949                 return SCI_FAILURE_INVALID_STATE;
1950         }
1951
1952         status = scic_sds_remote_device_start_io(scic, rdev, req);
1953         if (status != SCI_SUCCESS)
1954                 return status;
1955
1956         scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
1957         scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(req));
1958         return SCI_SUCCESS;
1959 }
1960
1961 /**
1962  * scic_controller_terminate_request() - This method is called by the SCI Core
1963  *    user to terminate an ongoing (i.e. started) core IO request.  This does
1964  *    not abort the IO request at the target, but rather removes the IO request
1965  *    from the host controller.
1966  * @controller: the handle to the controller object for which to terminate a
1967  *    request.
1968  * @remote_device: the handle to the remote device object for which to
1969  *    terminate a request.
1970  * @request: the handle to the io or task management request object to
1971  *    terminate.
1972  *
1973  * Indicate if the controller successfully began the terminate process for the
1974  * IO request. SCI_SUCCESS if the terminate process was successfully started
1975  * for the request. Determine the failure situations and return values.
1976  */
1977 enum sci_status scic_controller_terminate_request(
1978         struct scic_sds_controller *scic,
1979         struct scic_sds_remote_device *rdev,
1980         struct scic_sds_request *req)
1981 {
1982         enum sci_status status;
1983
1984         if (scic->state_machine.current_state_id !=
1985             SCI_BASE_CONTROLLER_STATE_READY) {
1986                 dev_warn(scic_to_dev(scic),
1987                          "invalid state to terminate request\n");
1988                 return SCI_FAILURE_INVALID_STATE;
1989         }
1990
1991         status = scic_sds_io_request_terminate(req);
1992         if (status != SCI_SUCCESS)
1993                 return status;
1994
1995         /*
1996          * Utilize the original post context command and or in the POST_TC_ABORT
1997          * request sub-type.
1998          */
1999         scic_sds_controller_post_request(scic,
2000                 scic_sds_request_get_post_context(req) |
2001                 SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2002         return SCI_SUCCESS;
2003 }
2004
2005 /**
2006  * scic_controller_complete_io() - This method will perform core specific
2007  *    completion operations for an IO request.  After this method is invoked,
2008  *    the user should consider the IO request as invalid until it is properly
2009  *    reused (i.e. re-constructed).
2010  * @controller: The handle to the controller object for which to complete the
2011  *    IO request.
2012  * @remote_device: The handle to the remote device object for which to complete
2013  *    the IO request.
2014  * @io_request: the handle to the io request object to complete.
2015  *
2016  * - IO tags are a protected resource.  It is incumbent upon the SCI Core user
2017  * to ensure that each of the methods that may allocate or free available IO
2018  * tags are handled in a mutually exclusive manner.  This method is one of said
2019  * methods requiring proper critical code section protection (e.g. semaphore,
2020  * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
2021  * Core user, using the scic_controller_allocate_io_tag() method, then it is
2022  * the responsibility of the caller to invoke the scic_controller_free_io_tag()
2023  * method to free the tag (i.e. this method will not free the IO tag). Indicate
2024  * if the controller successfully completed the IO request. SCI_SUCCESS if the
2025  * completion process was successful.
2026  */
2027 enum sci_status scic_controller_complete_io(
2028         struct scic_sds_controller *scic,
2029         struct scic_sds_remote_device *rdev,
2030         struct scic_sds_request *request)
2031 {
2032         enum sci_status status;
2033         u16 index;
2034
2035         switch (scic->state_machine.current_state_id) {
2036         case SCI_BASE_CONTROLLER_STATE_STOPPING:
2037                 /* XXX: Implement this function */
2038                 return SCI_FAILURE;
2039         case SCI_BASE_CONTROLLER_STATE_READY:
2040                 status = scic_sds_remote_device_complete_io(scic, rdev, request);
2041                 if (status != SCI_SUCCESS)
2042                         return status;
2043
2044                 index = scic_sds_io_tag_get_index(request->io_tag);
2045                 scic->io_request_table[index] = NULL;
2046                 return SCI_SUCCESS;
2047         default:
2048                 dev_warn(scic_to_dev(scic), "invalid state to complete I/O");
2049                 return SCI_FAILURE_INVALID_STATE;
2050         }
2051
2052 }
2053
2054 enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req)
2055 {
2056         struct scic_sds_controller *scic = sci_req->owning_controller;
2057
2058         if (scic->state_machine.current_state_id !=
2059             SCI_BASE_CONTROLLER_STATE_READY) {
2060                 dev_warn(scic_to_dev(scic), "invalid state to continue I/O");
2061                 return SCI_FAILURE_INVALID_STATE;
2062         }
2063
2064         scic->io_request_table[scic_sds_io_tag_get_index(sci_req->io_tag)] = sci_req;
2065         scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(sci_req));
2066         return SCI_SUCCESS;
2067 }
2068
2069 /**
2070  * scic_controller_start_task() - This method is called by the SCIC user to
2071  *    send/start a framework task management request.
2072  * @controller: the handle to the controller object for which to start the task
2073  *    management request.
2074  * @remote_device: the handle to the remote device object for which to start
2075  *    the task management request.
2076  * @task_request: the handle to the task request object to start.
2077  * @io_tag: This parameter specifies a previously allocated IO tag that the
2078  *    user desires to be utilized for this request.  Note this not the io_tag
2079  *    of the request being managed.  It is to be utilized for the task request
2080  *    itself. This parameter is optional.  The user is allowed to supply
2081  *    SCI_CONTROLLER_INVALID_IO_TAG as the value for this parameter.
2082  *
2083  * - IO tags are a protected resource.  It is incumbent upon the SCI Core user
2084  * to ensure that each of the methods that may allocate or free available IO
2085  * tags are handled in a mutually exclusive manner.  This method is one of said
2086  * methods requiring proper critical code section protection (e.g. semaphore,
2087  * spin-lock, etc.). - The user must synchronize this task with completion
2088  * queue processing.  If they are not synchronized then it is possible for the
2089  * io requests that are being managed by the task request can complete before
2090  * starting the task request. scic_controller_allocate_tag() for more
2091  * information on allocating a tag. Indicate if the controller successfully
2092  * started the IO request. SCI_TASK_SUCCESS if the task request was
2093  * successfully started. SCI_TASK_FAILURE_REQUIRES_SCSI_ABORT This value is
2094  * returned if there is/are task(s) outstanding that require termination or
2095  * completion before this request can succeed.
2096  */
2097 enum sci_task_status scic_controller_start_task(
2098         struct scic_sds_controller *scic,
2099         struct scic_sds_remote_device *rdev,
2100         struct scic_sds_request *req,
2101         u16 task_tag)
2102 {
2103         enum sci_status status;
2104
2105         if (scic->state_machine.current_state_id !=
2106             SCI_BASE_CONTROLLER_STATE_READY) {
2107                 dev_warn(scic_to_dev(scic),
2108                          "%s: SCIC Controller starting task from invalid "
2109                          "state\n",
2110                          __func__);
2111                 return SCI_TASK_FAILURE_INVALID_STATE;
2112         }
2113
2114         status = scic_sds_remote_device_start_task(scic, rdev, req);
2115         switch (status) {
2116         case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
2117                 scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
2118
2119                 /*
2120                  * We will let framework know this task request started successfully,
2121                  * although core is still woring on starting the request (to post tc when
2122                  * RNC is resumed.)
2123                  */
2124                 return SCI_SUCCESS;
2125         case SCI_SUCCESS:
2126                 scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
2127
2128                 scic_sds_controller_post_request(scic,
2129                         scic_sds_request_get_post_context(req));
2130                 break;
2131         default:
2132                 break;
2133         }
2134
2135         return status;
2136 }
2137
2138 /**
2139  * scic_controller_allocate_io_tag() - This method will allocate a tag from the
2140  *    pool of free IO tags. Direct allocation of IO tags by the SCI Core user
2141  *    is optional. The scic_controller_start_io() method will allocate an IO
2142  *    tag if this method is not utilized and the tag is not supplied to the IO
2143  *    construct routine.  Direct allocation of IO tags may provide additional
2144  *    performance improvements in environments capable of supporting this usage
2145  *    model.  Additionally, direct allocation of IO tags also provides
2146  *    additional flexibility to the SCI Core user.  Specifically, the user may
2147  *    retain IO tags across the lives of multiple IO requests.
2148  * @controller: the handle to the controller object for which to allocate the
2149  *    tag.
2150  *
2151  * IO tags are a protected resource.  It is incumbent upon the SCI Core user to
2152  * ensure that each of the methods that may allocate or free available IO tags
2153  * are handled in a mutually exclusive manner.  This method is one of said
2154  * methods requiring proper critical code section protection (e.g. semaphore,
2155  * spin-lock, etc.). An unsigned integer representing an available IO tag.
2156  * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if there are no
2157  * currently available tags to be allocated. All return other values indicate a
2158  * legitimate tag.
2159  */
2160 u16 scic_controller_allocate_io_tag(
2161         struct scic_sds_controller *scic)
2162 {
2163         u16 task_context;
2164         u16 sequence_count;
2165
2166         if (!sci_pool_empty(scic->tci_pool)) {
2167                 sci_pool_get(scic->tci_pool, task_context);
2168
2169                 sequence_count = scic->io_request_sequence[task_context];
2170
2171                 return scic_sds_io_tag_construct(sequence_count, task_context);
2172         }
2173
2174         return SCI_CONTROLLER_INVALID_IO_TAG;
2175 }
2176
2177 /**
2178  * scic_controller_free_io_tag() - This method will free an IO tag to the pool
2179  *    of free IO tags. This method provides the SCI Core user more flexibility
2180  *    with regards to IO tags.  The user may desire to keep an IO tag after an
2181  *    IO request has completed, because they plan on re-using the tag for a
2182  *    subsequent IO request.  This method is only legal if the tag was
2183  *    allocated via scic_controller_allocate_io_tag().
2184  * @controller: This parameter specifies the handle to the controller object
2185  *    for which to free/return the tag.
2186  * @io_tag: This parameter represents the tag to be freed to the pool of
2187  *    available tags.
2188  *
2189  * - IO tags are a protected resource.  It is incumbent upon the SCI Core user
2190  * to ensure that each of the methods that may allocate or free available IO
2191  * tags are handled in a mutually exclusive manner.  This method is one of said
2192  * methods requiring proper critical code section protection (e.g. semaphore,
2193  * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
2194  * Core user, using the scic_controller_allocate_io_tag() method, then it is
2195  * the responsibility of the caller to invoke this method to free the tag. This
2196  * method returns an indication of whether the tag was successfully put back
2197  * (freed) to the pool of available tags. SCI_SUCCESS This return value
2198  * indicates the tag was successfully placed into the pool of available IO
2199  * tags. SCI_FAILURE_INVALID_IO_TAG This value is returned if the supplied tag
2200  * is not a valid IO tag value.
2201  */
2202 enum sci_status scic_controller_free_io_tag(
2203         struct scic_sds_controller *scic,
2204         u16 io_tag)
2205 {
2206         u16 sequence;
2207         u16 index;
2208
2209         BUG_ON(io_tag == SCI_CONTROLLER_INVALID_IO_TAG);
2210
2211         sequence = scic_sds_io_tag_get_sequence(io_tag);
2212         index    = scic_sds_io_tag_get_index(io_tag);
2213
2214         if (!sci_pool_full(scic->tci_pool)) {
2215                 if (sequence == scic->io_request_sequence[index]) {
2216                         scic_sds_io_sequence_increment(
2217                                 scic->io_request_sequence[index]);
2218
2219                         sci_pool_put(scic->tci_pool, index);
2220
2221                         return SCI_SUCCESS;
2222                 }
2223         }
2224
2225         return SCI_FAILURE_INVALID_IO_TAG;
2226 }
2227
2228 void scic_controller_enable_interrupts(
2229         struct scic_sds_controller *scic)
2230 {
2231         BUG_ON(scic->smu_registers == NULL);
2232         writel(0, &scic->smu_registers->interrupt_mask);
2233 }
2234
2235 void scic_controller_disable_interrupts(
2236         struct scic_sds_controller *scic)
2237 {
2238         BUG_ON(scic->smu_registers == NULL);
2239         writel(0xffffffff, &scic->smu_registers->interrupt_mask);
2240 }
2241
2242 static enum sci_status scic_controller_set_mode(
2243         struct scic_sds_controller *scic,
2244         enum sci_controller_mode operating_mode)
2245 {
2246         enum sci_status status          = SCI_SUCCESS;
2247
2248         if ((scic->state_machine.current_state_id ==
2249                                 SCI_BASE_CONTROLLER_STATE_INITIALIZING) ||
2250             (scic->state_machine.current_state_id ==
2251                                 SCI_BASE_CONTROLLER_STATE_INITIALIZED)) {
2252                 switch (operating_mode) {
2253                 case SCI_MODE_SPEED:
2254                         scic->remote_node_entries      = SCI_MAX_REMOTE_DEVICES;
2255                         scic->task_context_entries     = SCU_IO_REQUEST_COUNT;
2256                         scic->uf_control.buffers.count =
2257                                 SCU_UNSOLICITED_FRAME_COUNT;
2258                         scic->completion_event_entries = SCU_EVENT_COUNT;
2259                         scic->completion_queue_entries =
2260                                 SCU_COMPLETION_QUEUE_COUNT;
2261                         break;
2262
2263                 case SCI_MODE_SIZE:
2264                         scic->remote_node_entries      = SCI_MIN_REMOTE_DEVICES;
2265                         scic->task_context_entries     = SCI_MIN_IO_REQUESTS;
2266                         scic->uf_control.buffers.count =
2267                                 SCU_MIN_UNSOLICITED_FRAMES;
2268                         scic->completion_event_entries = SCU_MIN_EVENTS;
2269                         scic->completion_queue_entries =
2270                                 SCU_MIN_COMPLETION_QUEUE_ENTRIES;
2271                         break;
2272
2273                 default:
2274                         status = SCI_FAILURE_INVALID_PARAMETER_VALUE;
2275                         break;
2276                 }
2277         } else
2278                 status = SCI_FAILURE_INVALID_STATE;
2279
2280         return status;
2281 }
2282
2283 /**
2284  * scic_sds_controller_reset_hardware() -
2285  *
2286  * This method will reset the controller hardware.
2287  */
2288 static void scic_sds_controller_reset_hardware(
2289         struct scic_sds_controller *scic)
2290 {
2291         /* Disable interrupts so we dont take any spurious interrupts */
2292         scic_controller_disable_interrupts(scic);
2293
2294         /* Reset the SCU */
2295         writel(0xFFFFFFFF, &scic->smu_registers->soft_reset_control);
2296
2297         /* Delay for 1ms to before clearing the CQP and UFQPR. */
2298         udelay(1000);
2299
2300         /* The write to the CQGR clears the CQP */
2301         writel(0x00000000, &scic->smu_registers->completion_queue_get);
2302
2303         /* The write to the UFQGP clears the UFQPR */
2304         writel(0, &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
2305 }
2306
2307 enum sci_status scic_user_parameters_set(
2308         struct scic_sds_controller *scic,
2309         union scic_user_parameters *scic_parms)
2310 {
2311         u32 state = scic->state_machine.current_state_id;
2312
2313         if (state == SCI_BASE_CONTROLLER_STATE_RESET ||
2314             state == SCI_BASE_CONTROLLER_STATE_INITIALIZING ||
2315             state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
2316                 u16 index;
2317
2318                 /*
2319                  * Validate the user parameters.  If they are not legal, then
2320                  * return a failure.
2321                  */
2322                 for (index = 0; index < SCI_MAX_PHYS; index++) {
2323                         struct sci_phy_user_params *user_phy;
2324
2325                         user_phy = &scic_parms->sds1.phys[index];
2326
2327                         if (!((user_phy->max_speed_generation <=
2328                                                 SCIC_SDS_PARM_MAX_SPEED) &&
2329                               (user_phy->max_speed_generation >
2330                                                 SCIC_SDS_PARM_NO_SPEED)))
2331                                 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2332
2333                         if (user_phy->in_connection_align_insertion_frequency <
2334                                         3)
2335                                 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2336
2337                         if ((user_phy->in_connection_align_insertion_frequency <
2338                                                 3) ||
2339                             (user_phy->align_insertion_frequency == 0) ||
2340                             (user_phy->
2341                                 notify_enable_spin_up_insertion_frequency ==
2342                                                 0))
2343                                 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2344                 }
2345
2346                 if ((scic_parms->sds1.stp_inactivity_timeout == 0) ||
2347                     (scic_parms->sds1.ssp_inactivity_timeout == 0) ||
2348                     (scic_parms->sds1.stp_max_occupancy_timeout == 0) ||
2349                     (scic_parms->sds1.ssp_max_occupancy_timeout == 0) ||
2350                     (scic_parms->sds1.no_outbound_task_timeout == 0))
2351                         return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2352
2353                 memcpy(&scic->user_parameters, scic_parms, sizeof(*scic_parms));
2354
2355                 return SCI_SUCCESS;
2356         }
2357
2358         return SCI_FAILURE_INVALID_STATE;
2359 }
2360
2361 int scic_oem_parameters_validate(struct scic_sds_oem_params *oem)
2362 {
2363         int i;
2364
2365         for (i = 0; i < SCI_MAX_PORTS; i++)
2366                 if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
2367                         return -EINVAL;
2368
2369         for (i = 0; i < SCI_MAX_PHYS; i++)
2370                 if (oem->phys[i].sas_address.high == 0 &&
2371                     oem->phys[i].sas_address.low == 0)
2372                         return -EINVAL;
2373
2374         if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
2375                 for (i = 0; i < SCI_MAX_PHYS; i++)
2376                         if (oem->ports[i].phy_mask != 0)
2377                                 return -EINVAL;
2378         } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
2379                 u8 phy_mask = 0;
2380
2381                 for (i = 0; i < SCI_MAX_PHYS; i++)
2382                         phy_mask |= oem->ports[i].phy_mask;
2383
2384                 if (phy_mask == 0)
2385                         return -EINVAL;
2386         } else
2387                 return -EINVAL;
2388
2389         if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
2390                 return -EINVAL;
2391
2392         return 0;
2393 }
2394
2395 enum sci_status scic_oem_parameters_set(struct scic_sds_controller *scic,
2396                                         union scic_oem_parameters *scic_parms)
2397 {
2398         u32 state = scic->state_machine.current_state_id;
2399
2400         if (state == SCI_BASE_CONTROLLER_STATE_RESET ||
2401             state == SCI_BASE_CONTROLLER_STATE_INITIALIZING ||
2402             state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
2403
2404                 if (scic_oem_parameters_validate(&scic_parms->sds1))
2405                         return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2406                 scic->oem_parameters.sds1 = scic_parms->sds1;
2407
2408                 return SCI_SUCCESS;
2409         }
2410
2411         return SCI_FAILURE_INVALID_STATE;
2412 }
2413
2414 void scic_oem_parameters_get(
2415         struct scic_sds_controller *scic,
2416         union scic_oem_parameters *scic_parms)
2417 {
2418         memcpy(scic_parms, (&scic->oem_parameters), sizeof(*scic_parms));
2419 }
2420
2421 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
2422 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
2423 #define INTERRUPT_COALESCE_TIMEOUT_MAX_US                    2700000
2424 #define INTERRUPT_COALESCE_NUMBER_MAX                        256
2425 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN                7
2426 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX                28
2427
2428 /**
2429  * scic_controller_set_interrupt_coalescence() - This method allows the user to
2430  *    configure the interrupt coalescence.
2431  * @controller: This parameter represents the handle to the controller object
2432  *    for which its interrupt coalesce register is overridden.
2433  * @coalesce_number: Used to control the number of entries in the Completion
2434  *    Queue before an interrupt is generated. If the number of entries exceed
2435  *    this number, an interrupt will be generated. The valid range of the input
2436  *    is [0, 256]. A setting of 0 results in coalescing being disabled.
2437  * @coalesce_timeout: Timeout value in microseconds. The valid range of the
2438  *    input is [0, 2700000] . A setting of 0 is allowed and results in no
2439  *    interrupt coalescing timeout.
2440  *
2441  * Indicate if the user successfully set the interrupt coalesce parameters.
2442  * SCI_SUCCESS The user successfully updated the interrutp coalescence.
2443  * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
2444  */
2445 static enum sci_status scic_controller_set_interrupt_coalescence(
2446         struct scic_sds_controller *scic_controller,
2447         u32 coalesce_number,
2448         u32 coalesce_timeout)
2449 {
2450         u8 timeout_encode = 0;
2451         u32 min = 0;
2452         u32 max = 0;
2453
2454         /* Check if the input parameters fall in the range. */
2455         if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
2456                 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2457
2458         /*
2459          *  Defined encoding for interrupt coalescing timeout:
2460          *              Value   Min      Max     Units
2461          *              -----   ---      ---     -----
2462          *              0       -        -       Disabled
2463          *              1       13.3     20.0    ns
2464          *              2       26.7     40.0
2465          *              3       53.3     80.0
2466          *              4       106.7    160.0
2467          *              5       213.3    320.0
2468          *              6       426.7    640.0
2469          *              7       853.3    1280.0
2470          *              8       1.7      2.6     us
2471          *              9       3.4      5.1
2472          *              10      6.8      10.2
2473          *              11      13.7     20.5
2474          *              12      27.3     41.0
2475          *              13      54.6     81.9
2476          *              14      109.2    163.8
2477          *              15      218.5    327.7
2478          *              16      436.9    655.4
2479          *              17      873.8    1310.7
2480          *              18      1.7      2.6     ms
2481          *              19      3.5      5.2
2482          *              20      7.0      10.5
2483          *              21      14.0     21.0
2484          *              22      28.0     41.9
2485          *              23      55.9     83.9
2486          *              24      111.8    167.8
2487          *              25      223.7    335.5
2488          *              26      447.4    671.1
2489          *              27      894.8    1342.2
2490          *              28      1.8      2.7     s
2491          *              Others Undefined */
2492
2493         /*
2494          * Use the table above to decide the encode of interrupt coalescing timeout
2495          * value for register writing. */
2496         if (coalesce_timeout == 0)
2497                 timeout_encode = 0;
2498         else{
2499                 /* make the timeout value in unit of (10 ns). */
2500                 coalesce_timeout = coalesce_timeout * 100;
2501                 min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
2502                 max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
2503
2504                 /* get the encode of timeout for register writing. */
2505                 for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
2506                       timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
2507                       timeout_encode++) {
2508                         if (min <= coalesce_timeout &&  max > coalesce_timeout)
2509                                 break;
2510                         else if (coalesce_timeout >= max && coalesce_timeout < min * 2
2511                                  && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
2512                                 if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
2513                                         break;
2514                                 else{
2515                                         timeout_encode++;
2516                                         break;
2517                                 }
2518                         } else {
2519                                 max = max * 2;
2520                                 min = min * 2;
2521                         }
2522                 }
2523
2524                 if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
2525                         /* the value is out of range. */
2526                         return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2527         }
2528
2529         writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
2530                SMU_ICC_GEN_VAL(TIMER, timeout_encode),
2531                &scic_controller->smu_registers->interrupt_coalesce_control);
2532
2533
2534         scic_controller->interrupt_coalesce_number = (u16)coalesce_number;
2535         scic_controller->interrupt_coalesce_timeout = coalesce_timeout / 100;
2536
2537         return SCI_SUCCESS;
2538 }
2539
2540
2541
2542 enum sci_status scic_controller_initialize(struct scic_sds_controller *scic)
2543 {
2544         struct sci_base_state_machine *sm = &scic->state_machine;
2545         enum sci_status result = SCI_SUCCESS;
2546         struct isci_host *ihost = scic_to_ihost(scic);
2547         u32 index, state;
2548
2549         if (scic->state_machine.current_state_id !=
2550             SCI_BASE_CONTROLLER_STATE_RESET) {
2551                 dev_warn(scic_to_dev(scic),
2552                          "SCIC Controller initialize operation requested "
2553                          "in invalid state\n");
2554                 return SCI_FAILURE_INVALID_STATE;
2555         }
2556
2557         sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_INITIALIZING);
2558
2559         scic->timeout_timer = isci_timer_create(ihost,
2560                                                 scic,
2561                                                 scic_sds_controller_timeout_handler);
2562
2563         scic_sds_controller_initialize_phy_startup(scic);
2564
2565         scic_sds_controller_initialize_power_control(scic);
2566
2567         /*
2568          * There is nothing to do here for B0 since we do not have to
2569          * program the AFE registers.
2570          * / @todo The AFE settings are supposed to be correct for the B0 but
2571          * /       presently they seem to be wrong. */
2572         scic_sds_controller_afe_initialization(scic);
2573
2574         if (result == SCI_SUCCESS) {
2575                 u32 status;
2576                 u32 terminate_loop;
2577
2578                 /* Take the hardware out of reset */
2579                 writel(0, &scic->smu_registers->soft_reset_control);
2580
2581                 /*
2582                  * / @todo Provide meaningfull error code for hardware failure
2583                  * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
2584                 result = SCI_FAILURE;
2585                 terminate_loop = 100;
2586
2587                 while (terminate_loop-- && (result != SCI_SUCCESS)) {
2588                         /* Loop until the hardware reports success */
2589                         udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2590                         status = readl(&scic->smu_registers->control_status);
2591
2592                         if ((status & SCU_RAM_INIT_COMPLETED) ==
2593                                         SCU_RAM_INIT_COMPLETED)
2594                                 result = SCI_SUCCESS;
2595                 }
2596         }
2597
2598         if (result == SCI_SUCCESS) {
2599                 u32 max_supported_ports;
2600                 u32 max_supported_devices;
2601                 u32 max_supported_io_requests;
2602                 u32 device_context_capacity;
2603
2604                 /*
2605                  * Determine what are the actaul device capacities that the
2606                  * hardware will support */
2607                 device_context_capacity =
2608                         readl(&scic->smu_registers->device_context_capacity);
2609
2610
2611                 max_supported_ports = smu_dcc_get_max_ports(device_context_capacity);
2612                 max_supported_devices = smu_dcc_get_max_remote_node_context(device_context_capacity);
2613                 max_supported_io_requests = smu_dcc_get_max_task_context(device_context_capacity);
2614
2615                 /*
2616                  * Make all PEs that are unassigned match up with the
2617                  * logical ports
2618                  */
2619                 for (index = 0; index < max_supported_ports; index++) {
2620                         struct scu_port_task_scheduler_group_registers __iomem
2621                                 *ptsg = &scic->scu_registers->peg0.ptsg;
2622
2623                         writel(index, &ptsg->protocol_engine[index]);
2624                 }
2625
2626                 /* Record the smaller of the two capacity values */
2627                 scic->logical_port_entries =
2628                         min(max_supported_ports, scic->logical_port_entries);
2629
2630                 scic->task_context_entries =
2631                         min(max_supported_io_requests,
2632                             scic->task_context_entries);
2633
2634                 scic->remote_node_entries =
2635                         min(max_supported_devices, scic->remote_node_entries);
2636
2637                 /*
2638                  * Now that we have the correct hardware reported minimum values
2639                  * build the MDL for the controller.  Default to a performance
2640                  * configuration.
2641                  */
2642                 scic_controller_set_mode(scic, SCI_MODE_SPEED);
2643         }
2644
2645         /* Initialize hardware PCI Relaxed ordering in DMA engines */
2646         if (result == SCI_SUCCESS) {
2647                 u32 dma_configuration;
2648
2649                 /* Configure the payload DMA */
2650                 dma_configuration =
2651                         readl(&scic->scu_registers->sdma.pdma_configuration);
2652                 dma_configuration |=
2653                         SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2654                 writel(dma_configuration,
2655                         &scic->scu_registers->sdma.pdma_configuration);
2656
2657                 /* Configure the control DMA */
2658                 dma_configuration =
2659                         readl(&scic->scu_registers->sdma.cdma_configuration);
2660                 dma_configuration |=
2661                         SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2662                 writel(dma_configuration,
2663                         &scic->scu_registers->sdma.cdma_configuration);
2664         }
2665
2666         /*
2667          * Initialize the PHYs before the PORTs because the PHY registers
2668          * are accessed during the port initialization.
2669          */
2670         if (result == SCI_SUCCESS) {
2671                 /* Initialize the phys */
2672                 for (index = 0;
2673                      (result == SCI_SUCCESS) && (index < SCI_MAX_PHYS);
2674                      index++) {
2675                         result = scic_sds_phy_initialize(
2676                                 &ihost->phys[index].sci,
2677                                 &scic->scu_registers->peg0.pe[index].tl,
2678                                 &scic->scu_registers->peg0.pe[index].ll);
2679                 }
2680         }
2681
2682         if (result == SCI_SUCCESS) {
2683                 /* Initialize the logical ports */
2684                 for (index = 0;
2685                      (index < scic->logical_port_entries) &&
2686                      (result == SCI_SUCCESS);
2687                      index++) {
2688                         result = scic_sds_port_initialize(
2689                                 &scic->port_table[index],
2690                                 &scic->scu_registers->peg0.ptsg.port[index],
2691                                 &scic->scu_registers->peg0.ptsg.protocol_engine,
2692                                 &scic->scu_registers->peg0.viit[index]);
2693                 }
2694         }
2695
2696         if (result == SCI_SUCCESS)
2697                 result = scic_sds_port_configuration_agent_initialize(
2698                                 scic,
2699                                 &scic->port_agent);
2700
2701         /* Advance the controller state machine */
2702         if (result == SCI_SUCCESS)
2703                 state = SCI_BASE_CONTROLLER_STATE_INITIALIZED;
2704         else
2705                 state = SCI_BASE_CONTROLLER_STATE_FAILED;
2706         sci_base_state_machine_change_state(sm, state);
2707
2708         return result;
2709 }
2710
2711 enum sci_status scic_controller_start(struct scic_sds_controller *scic,
2712                 u32 timeout)
2713 {
2714         enum sci_status result;
2715         u16 index;
2716
2717         if (scic->state_machine.current_state_id !=
2718             SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
2719                 dev_warn(scic_to_dev(scic),
2720                          "SCIC Controller start operation requested in "
2721                          "invalid state\n");
2722                 return SCI_FAILURE_INVALID_STATE;
2723         }
2724
2725         /* Build the TCi free pool */
2726         sci_pool_initialize(scic->tci_pool);
2727         for (index = 0; index < scic->task_context_entries; index++)
2728                 sci_pool_put(scic->tci_pool, index);
2729
2730         /* Build the RNi free pool */
2731         scic_sds_remote_node_table_initialize(
2732                         &scic->available_remote_nodes,
2733                         scic->remote_node_entries);
2734
2735         /*
2736          * Before anything else lets make sure we will not be
2737          * interrupted by the hardware.
2738          */
2739         scic_controller_disable_interrupts(scic);
2740
2741         /* Enable the port task scheduler */
2742         scic_sds_controller_enable_port_task_scheduler(scic);
2743
2744         /* Assign all the task entries to scic physical function */
2745         scic_sds_controller_assign_task_entries(scic);
2746
2747         /* Now initialize the completion queue */
2748         scic_sds_controller_initialize_completion_queue(scic);
2749
2750         /* Initialize the unsolicited frame queue for use */
2751         scic_sds_controller_initialize_unsolicited_frame_queue(scic);
2752
2753         /* Start all of the ports on this controller */
2754         for (index = 0; index < scic->logical_port_entries; index++) {
2755                 struct scic_sds_port *sci_port = &scic->port_table[index];
2756
2757                 result = sci_port->state_handlers->start_handler(
2758                                 sci_port);
2759                 if (result)
2760                         return result;
2761         }
2762
2763         scic_sds_controller_start_next_phy(scic);
2764
2765         isci_timer_start(scic->timeout_timer, timeout);
2766
2767         sci_base_state_machine_change_state(&scic->state_machine,
2768                                             SCI_BASE_CONTROLLER_STATE_STARTING);
2769
2770         return SCI_SUCCESS;
2771 }
2772
2773 /**
2774  *
2775  * @object: This is the object which is cast to a struct scic_sds_controller
2776  *    object.
2777  *
2778  * This method implements the actions taken by the struct scic_sds_controller on entry
2779  * to the SCI_BASE_CONTROLLER_STATE_INITIAL. - Set the state handlers to the
2780  * controllers initial state. none This function should initialize the
2781  * controller object.
2782  */
2783 static void scic_sds_controller_initial_state_enter(void *object)
2784 {
2785         struct scic_sds_controller *scic = object;
2786
2787         sci_base_state_machine_change_state(&scic->state_machine,
2788                         SCI_BASE_CONTROLLER_STATE_RESET);
2789 }
2790
2791 /**
2792  *
2793  * @object: This is the object which is cast to a struct scic_sds_controller
2794  *    object.
2795  *
2796  * This method implements the actions taken by the struct scic_sds_controller on exit
2797  * from the SCI_BASE_CONTROLLER_STATE_STARTING. - This function stops the
2798  * controller starting timeout timer. none
2799  */
2800 static inline void scic_sds_controller_starting_state_exit(void *object)
2801 {
2802         struct scic_sds_controller *scic = object;
2803
2804         isci_timer_stop(scic->timeout_timer);
2805 }
2806
2807 /**
2808  *
2809  * @object: This is the object which is cast to a struct scic_sds_controller
2810  *    object.
2811  *
2812  * This method implements the actions taken by the struct scic_sds_controller on entry
2813  * to the SCI_BASE_CONTROLLER_STATE_READY. - Set the state handlers to the
2814  * controllers ready state. none
2815  */
2816 static void scic_sds_controller_ready_state_enter(void *object)
2817 {
2818         struct scic_sds_controller *scic = object;
2819
2820         /* set the default interrupt coalescence number and timeout value. */
2821         scic_controller_set_interrupt_coalescence(
2822                 scic, 0x10, 250);
2823 }
2824
2825 /**
2826  *
2827  * @object: This is the object which is cast to a struct scic_sds_controller
2828  *    object.
2829  *
2830  * This method implements the actions taken by the struct scic_sds_controller on exit
2831  * from the SCI_BASE_CONTROLLER_STATE_READY. - This function does nothing. none
2832  */
2833 static void scic_sds_controller_ready_state_exit(void *object)
2834 {
2835         struct scic_sds_controller *scic = object;
2836
2837         /* disable interrupt coalescence. */
2838         scic_controller_set_interrupt_coalescence(scic, 0, 0);
2839 }
2840
2841 /**
2842  *
2843  * @object: This is the object which is cast to a struct scic_sds_controller
2844  *    object.
2845  *
2846  * This method implements the actions taken by the struct scic_sds_controller on entry
2847  * to the SCI_BASE_CONTROLLER_STATE_READY. - Set the state handlers to the
2848  * controllers ready state. - Stop the phys on this controller - Stop the ports
2849  * on this controller - Stop all of the remote devices on this controller none
2850  */
2851 static void scic_sds_controller_stopping_state_enter(void *object)
2852 {
2853         struct scic_sds_controller *scic = object;
2854
2855         /* Stop all of the components for this controller */
2856         scic_sds_controller_stop_phys(scic);
2857         scic_sds_controller_stop_ports(scic);
2858         scic_sds_controller_stop_devices(scic);
2859 }
2860
2861 /**
2862  *
2863  * @object: This is the object which is cast to a struct
2864  * scic_sds_controller object.
2865  *
2866  * This function implements the actions taken by the struct scic_sds_controller
2867  * on exit from the SCI_BASE_CONTROLLER_STATE_STOPPING. -
2868  * This function stops the controller stopping timeout timer.
2869  */
2870 static inline void scic_sds_controller_stopping_state_exit(void *object)
2871 {
2872         struct scic_sds_controller *scic = object;
2873
2874         isci_timer_stop(scic->timeout_timer);
2875 }
2876
2877 static void scic_sds_controller_resetting_state_enter(void *object)
2878 {
2879         struct scic_sds_controller *scic = object;
2880
2881         scic_sds_controller_reset_hardware(scic);
2882         sci_base_state_machine_change_state(&scic->state_machine,
2883                                             SCI_BASE_CONTROLLER_STATE_RESET);
2884 }
2885
2886 static const struct sci_base_state scic_sds_controller_state_table[] = {
2887         [SCI_BASE_CONTROLLER_STATE_INITIAL] = {
2888                 .enter_state = scic_sds_controller_initial_state_enter,
2889         },
2890         [SCI_BASE_CONTROLLER_STATE_RESET] = {},
2891         [SCI_BASE_CONTROLLER_STATE_INITIALIZING] = {},
2892         [SCI_BASE_CONTROLLER_STATE_INITIALIZED] = {},
2893         [SCI_BASE_CONTROLLER_STATE_STARTING] = {
2894                 .exit_state  = scic_sds_controller_starting_state_exit,
2895         },
2896         [SCI_BASE_CONTROLLER_STATE_READY] = {
2897                 .enter_state = scic_sds_controller_ready_state_enter,
2898                 .exit_state  = scic_sds_controller_ready_state_exit,
2899         },
2900         [SCI_BASE_CONTROLLER_STATE_RESETTING] = {
2901                 .enter_state = scic_sds_controller_resetting_state_enter,
2902         },
2903         [SCI_BASE_CONTROLLER_STATE_STOPPING] = {
2904                 .enter_state = scic_sds_controller_stopping_state_enter,
2905                 .exit_state = scic_sds_controller_stopping_state_exit,
2906         },
2907         [SCI_BASE_CONTROLLER_STATE_STOPPED] = {},
2908         [SCI_BASE_CONTROLLER_STATE_FAILED] = {}
2909 };
2910
2911 /**
2912  * scic_controller_construct() - This method will attempt to construct a
2913  *    controller object utilizing the supplied parameter information.
2914  * @c: This parameter specifies the controller to be constructed.
2915  * @scu_base: mapped base address of the scu registers
2916  * @smu_base: mapped base address of the smu registers
2917  *
2918  * Indicate if the controller was successfully constructed or if it failed in
2919  * some way. SCI_SUCCESS This value is returned if the controller was
2920  * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned
2921  * if the interrupt coalescence timer may cause SAS compliance issues for SMP
2922  * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE
2923  * This value is returned if the controller does not support the supplied type.
2924  * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the
2925  * controller does not support the supplied initialization data version.
2926  */
2927 enum sci_status scic_controller_construct(struct scic_sds_controller *scic,
2928                                           void __iomem *scu_base,
2929                                           void __iomem *smu_base)
2930 {
2931         struct isci_host *ihost = scic_to_ihost(scic);
2932         u8 i;
2933
2934         sci_base_state_machine_construct(&scic->state_machine,
2935                 scic, scic_sds_controller_state_table,
2936                 SCI_BASE_CONTROLLER_STATE_INITIAL);
2937
2938         sci_base_state_machine_start(&scic->state_machine);
2939
2940         scic->scu_registers = scu_base;
2941         scic->smu_registers = smu_base;
2942
2943         scic_sds_port_configuration_agent_construct(&scic->port_agent);
2944
2945         /* Construct the ports for this controller */
2946         for (i = 0; i < SCI_MAX_PORTS; i++)
2947                 scic_sds_port_construct(&scic->port_table[i], i, scic);
2948         scic_sds_port_construct(&scic->port_table[i], SCIC_SDS_DUMMY_PORT, scic);
2949
2950         /* Construct the phys for this controller */
2951         for (i = 0; i < SCI_MAX_PHYS; i++) {
2952                 /* Add all the PHYs to the dummy port */
2953                 scic_sds_phy_construct(&ihost->phys[i].sci,
2954                                        &scic->port_table[SCI_MAX_PORTS], i);
2955         }
2956
2957         scic->invalid_phy_mask = 0;
2958
2959         /* Set the default maximum values */
2960         scic->completion_event_entries      = SCU_EVENT_COUNT;
2961         scic->completion_queue_entries      = SCU_COMPLETION_QUEUE_COUNT;
2962         scic->remote_node_entries           = SCI_MAX_REMOTE_DEVICES;
2963         scic->logical_port_entries          = SCI_MAX_PORTS;
2964         scic->task_context_entries          = SCU_IO_REQUEST_COUNT;
2965         scic->uf_control.buffers.count      = SCU_UNSOLICITED_FRAME_COUNT;
2966         scic->uf_control.address_table.count = SCU_UNSOLICITED_FRAME_COUNT;
2967
2968         /* Initialize the User and OEM parameters to default values. */
2969         scic_sds_controller_set_default_config_parameters(scic);
2970
2971         return scic_controller_reset(scic);
2972 }