2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
33 2) Improve/fix IRQ and error handling sequences.
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
43 6) Add port multiplier support (intermediate)
45 8) Develop a low-power-consumption strategy, and implement it.
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <scsi/scsi_device.h>
80 #include <linux/libata.h>
82 #define DRV_NAME "sata_mv"
83 #define DRV_VERSION "1.20"
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
102 MV_SATAHC0_REG_BASE = 0x20000,
103 MV_FLASH_CTL = 0x1046c,
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
141 CRQB_FLAG_READ = (1 << 0),
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
145 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
146 CRQB_CMD_ADDR_SHIFT = 8,
147 CRQB_CMD_CS = (0x2 << 11),
148 CRQB_CMD_LAST = (1 << 15),
150 CRPB_FLAG_STATUS_SHIFT = 8,
151 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
152 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
154 EPRD_FLAG_END_OF_TBL = (1 << 31),
156 /* PCI interface registers */
158 PCI_COMMAND_OFS = 0xc00,
160 PCI_MAIN_CMD_STS_OFS = 0xd30,
161 STOP_PCI_MASTER = (1 << 2),
162 PCI_MASTER_EMPTY = (1 << 3),
163 GLOB_SFT_RST = (1 << 4),
166 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
167 MV_PCI_DISC_TIMER = 0xd04,
168 MV_PCI_MSI_TRIGGER = 0xc38,
169 MV_PCI_SERR_MASK = 0xc28,
170 MV_PCI_XBAR_TMOUT = 0x1d04,
171 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
172 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
173 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
174 MV_PCI_ERR_COMMAND = 0x1d50,
176 PCI_IRQ_CAUSE_OFS = 0x1d58,
177 PCI_IRQ_MASK_OFS = 0x1d5c,
178 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
180 PCIE_IRQ_CAUSE_OFS = 0x1900,
181 PCIE_IRQ_MASK_OFS = 0x1910,
182 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
184 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
185 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
186 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
187 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
188 PORT0_ERR = (1 << 0), /* shift by port # */
189 PORT0_DONE = (1 << 1), /* shift by port # */
190 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
191 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
193 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
194 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
195 PORTS_0_3_COAL_DONE = (1 << 8),
196 PORTS_4_7_COAL_DONE = (1 << 17),
197 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
198 GPIO_INT = (1 << 22),
199 SELF_INT = (1 << 23),
200 TWSI_INT = (1 << 24),
201 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
202 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
203 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
204 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
205 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
207 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
209 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
211 /* SATAHC registers */
214 HC_IRQ_CAUSE_OFS = 0x14,
215 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
216 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
217 DEV_IRQ = (1 << 8), /* shift by port # */
219 /* Shadow block registers */
221 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
224 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
225 SATA_ACTIVE_OFS = 0x350,
226 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
231 SATA_IFCTL_OFS = 0x344,
232 SATA_IFSTAT_OFS = 0x34c,
233 VENDOR_UNIQUE_FIS_OFS = 0x35c,
238 SATA_INTERFACE_CFG = 0x050,
240 MV_M2_PREAMP_MASK = 0x7e0,
244 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
245 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
246 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
247 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
248 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
249 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
250 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
252 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
253 EDMA_ERR_IRQ_MASK_OFS = 0xc,
254 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
255 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
256 EDMA_ERR_DEV = (1 << 2), /* device error */
257 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
258 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
259 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
260 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
261 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
262 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
263 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
264 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
265 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
266 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
267 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
269 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
270 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
273 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
275 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
277 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
278 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
279 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
280 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
281 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
282 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
284 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
286 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
287 EDMA_ERR_OVERRUN_5 = (1 << 5),
288 EDMA_ERR_UNDERRUN_5 = (1 << 6),
290 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
291 EDMA_ERR_LNK_CTRL_RX_1 |
292 EDMA_ERR_LNK_CTRL_RX_3 |
293 EDMA_ERR_LNK_CTRL_TX,
295 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
305 EDMA_ERR_LNK_CTRL_RX_2 |
306 EDMA_ERR_LNK_DATA_RX |
307 EDMA_ERR_LNK_DATA_TX |
308 EDMA_ERR_TRANS_PROTO,
310 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
315 EDMA_ERR_UNDERRUN_5 |
316 EDMA_ERR_SELF_DIS_5 |
322 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
323 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
325 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
326 EDMA_REQ_Q_PTR_SHIFT = 5,
328 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
329 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
330 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
331 EDMA_RSP_Q_PTR_SHIFT = 3,
333 EDMA_CMD_OFS = 0x28, /* EDMA command register */
334 EDMA_EN = (1 << 0), /* enable EDMA */
335 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
336 ATA_RST = (1 << 2), /* reset trans/link/phy */
338 EDMA_IORDY_TMOUT = 0x34,
341 /* Host private flags (hp_flags) */
342 MV_HP_FLAG_MSI = (1 << 0),
343 MV_HP_ERRATA_50XXB0 = (1 << 1),
344 MV_HP_ERRATA_50XXB2 = (1 << 2),
345 MV_HP_ERRATA_60X1B2 = (1 << 3),
346 MV_HP_ERRATA_60X1C0 = (1 << 4),
347 MV_HP_ERRATA_XX42A0 = (1 << 5),
348 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
349 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
350 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
351 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
353 /* Port private flags (pp_flags) */
354 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
355 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
358 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
359 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
360 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
361 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
364 /* DMA boundary 0xffff is required by the s/g splitting
365 * we need on /length/ in mv_fill-sg().
367 MV_DMA_BOUNDARY = 0xffffU,
369 /* mask of register bits containing lower 32 bits
370 * of EDMA request queue DMA address
372 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
374 /* ditto, for response queue */
375 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
389 /* Command ReQuest Block: 32B */
405 /* Command ResPonse Block: 8B */
412 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
420 struct mv_port_priv {
421 struct mv_crqb *crqb;
423 struct mv_crpb *crpb;
425 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
426 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
428 unsigned int req_idx;
429 unsigned int resp_idx;
434 struct mv_port_signal {
439 struct mv_host_priv {
441 struct mv_port_signal signal[8];
442 const struct mv_hw_ops *ops;
445 void __iomem *main_cause_reg_addr;
446 void __iomem *main_mask_reg_addr;
451 * These consistent DMA memory pools give us guaranteed
452 * alignment for hardware-accessed data structures,
453 * and less memory waste in accomplishing the alignment.
455 struct dma_pool *crqb_pool;
456 struct dma_pool *crpb_pool;
457 struct dma_pool *sg_tbl_pool;
461 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
463 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
464 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
466 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
468 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
469 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
472 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
473 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
474 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
475 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
476 static int mv_port_start(struct ata_port *ap);
477 static void mv_port_stop(struct ata_port *ap);
478 static void mv_qc_prep(struct ata_queued_cmd *qc);
479 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
480 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
481 static int mv_hardreset(struct ata_link *link, unsigned int *class,
482 unsigned long deadline);
483 static void mv_eh_freeze(struct ata_port *ap);
484 static void mv_eh_thaw(struct ata_port *ap);
485 static void mv6_dev_config(struct ata_device *dev);
487 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
489 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
490 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
492 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
494 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
495 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
497 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
499 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
500 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
502 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
504 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
505 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
507 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
509 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
510 void __iomem *mmio, unsigned int n_hc);
511 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
513 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
514 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
515 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
516 unsigned int port_no);
517 static int mv_stop_edma(struct ata_port *ap);
518 static int mv_stop_edma_engine(void __iomem *port_mmio);
519 static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
521 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
522 * because we have to allow room for worst case splitting of
523 * PRDs for 64K boundaries in mv_fill_sg().
525 static struct scsi_host_template mv5_sht = {
526 ATA_BASE_SHT(DRV_NAME),
527 .sg_tablesize = MV_MAX_SG_CT / 2,
528 .dma_boundary = MV_DMA_BOUNDARY,
531 static struct scsi_host_template mv6_sht = {
532 ATA_NCQ_SHT(DRV_NAME),
533 .can_queue = MV_MAX_Q_DEPTH - 1,
534 .sg_tablesize = MV_MAX_SG_CT / 2,
535 .dma_boundary = MV_DMA_BOUNDARY,
538 static struct ata_port_operations mv5_ops = {
539 .inherits = &ata_sff_port_ops,
541 .qc_prep = mv_qc_prep,
542 .qc_issue = mv_qc_issue,
544 .freeze = mv_eh_freeze,
546 .hardreset = mv_hardreset,
547 .error_handler = ata_std_error_handler, /* avoid SFF EH */
548 .post_internal_cmd = ATA_OP_NULL,
550 .scr_read = mv5_scr_read,
551 .scr_write = mv5_scr_write,
553 .port_start = mv_port_start,
554 .port_stop = mv_port_stop,
557 static struct ata_port_operations mv6_ops = {
558 .inherits = &mv5_ops,
559 .qc_defer = ata_std_qc_defer,
560 .dev_config = mv6_dev_config,
561 .scr_read = mv_scr_read,
562 .scr_write = mv_scr_write,
565 static struct ata_port_operations mv_iie_ops = {
566 .inherits = &mv6_ops,
567 .dev_config = ATA_OP_NULL,
568 .qc_prep = mv_qc_prep_iie,
571 static const struct ata_port_info mv_port_info[] = {
573 .flags = MV_COMMON_FLAGS,
574 .pio_mask = 0x1f, /* pio0-4 */
575 .udma_mask = ATA_UDMA6,
576 .port_ops = &mv5_ops,
579 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
580 .pio_mask = 0x1f, /* pio0-4 */
581 .udma_mask = ATA_UDMA6,
582 .port_ops = &mv5_ops,
585 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
586 .pio_mask = 0x1f, /* pio0-4 */
587 .udma_mask = ATA_UDMA6,
588 .port_ops = &mv5_ops,
591 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
593 .pio_mask = 0x1f, /* pio0-4 */
594 .udma_mask = ATA_UDMA6,
595 .port_ops = &mv6_ops,
598 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
599 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
600 .pio_mask = 0x1f, /* pio0-4 */
601 .udma_mask = ATA_UDMA6,
602 .port_ops = &mv6_ops,
605 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
607 .pio_mask = 0x1f, /* pio0-4 */
608 .udma_mask = ATA_UDMA6,
609 .port_ops = &mv_iie_ops,
612 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
614 .pio_mask = 0x1f, /* pio0-4 */
615 .udma_mask = ATA_UDMA6,
616 .port_ops = &mv_iie_ops,
619 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
620 .pio_mask = 0x1f, /* pio0-4 */
621 .udma_mask = ATA_UDMA6,
622 .port_ops = &mv_iie_ops,
626 static const struct pci_device_id mv_pci_tbl[] = {
627 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
628 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
629 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
630 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
631 /* RocketRAID 1740/174x have different identifiers */
632 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
633 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
635 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
636 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
637 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
638 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
639 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
641 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
644 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
646 /* Marvell 7042 support */
647 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
649 /* Highpoint RocketRAID PCIe series */
650 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
651 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
653 { } /* terminate list */
656 static const struct mv_hw_ops mv5xxx_ops = {
657 .phy_errata = mv5_phy_errata,
658 .enable_leds = mv5_enable_leds,
659 .read_preamp = mv5_read_preamp,
660 .reset_hc = mv5_reset_hc,
661 .reset_flash = mv5_reset_flash,
662 .reset_bus = mv5_reset_bus,
665 static const struct mv_hw_ops mv6xxx_ops = {
666 .phy_errata = mv6_phy_errata,
667 .enable_leds = mv6_enable_leds,
668 .read_preamp = mv6_read_preamp,
669 .reset_hc = mv6_reset_hc,
670 .reset_flash = mv6_reset_flash,
671 .reset_bus = mv_reset_pci_bus,
674 static const struct mv_hw_ops mv_soc_ops = {
675 .phy_errata = mv6_phy_errata,
676 .enable_leds = mv_soc_enable_leds,
677 .read_preamp = mv_soc_read_preamp,
678 .reset_hc = mv_soc_reset_hc,
679 .reset_flash = mv_soc_reset_flash,
680 .reset_bus = mv_soc_reset_bus,
687 static inline void writelfl(unsigned long data, void __iomem *addr)
690 (void) readl(addr); /* flush to avoid PCI posted write */
693 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
695 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
698 static inline unsigned int mv_hc_from_port(unsigned int port)
700 return port >> MV_PORT_HC_SHIFT;
703 static inline unsigned int mv_hardport_from_port(unsigned int port)
705 return port & MV_PORT_MASK;
708 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
711 return mv_hc_base(base, mv_hc_from_port(port));
714 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
716 return mv_hc_base_from_port(base, port) +
717 MV_SATAHC_ARBTR_REG_SZ +
718 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
721 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
723 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
724 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
726 return hc_mmio + ofs;
729 static inline void __iomem *mv_host_base(struct ata_host *host)
731 struct mv_host_priv *hpriv = host->private_data;
735 static inline void __iomem *mv_ap_base(struct ata_port *ap)
737 return mv_port_base(mv_host_base(ap->host), ap->port_no);
740 static inline int mv_get_hc_count(unsigned long port_flags)
742 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
745 static void mv_set_edma_ptrs(void __iomem *port_mmio,
746 struct mv_host_priv *hpriv,
747 struct mv_port_priv *pp)
752 * initialize request queue
754 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
756 WARN_ON(pp->crqb_dma & 0x3ff);
757 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
758 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
759 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
761 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
762 writelfl((pp->crqb_dma & 0xffffffff) | index,
763 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
765 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
768 * initialize response queue
770 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
772 WARN_ON(pp->crpb_dma & 0xff);
773 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
775 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
776 writelfl((pp->crpb_dma & 0xffffffff) | index,
777 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
779 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
781 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
782 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
786 * mv_start_dma - Enable eDMA engine
787 * @base: port base address
788 * @pp: port private data
790 * Verify the local cache of the eDMA state is accurate with a
794 * Inherited from caller.
796 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
797 struct mv_port_priv *pp, u8 protocol)
799 int want_ncq = (protocol == ATA_PROT_NCQ);
801 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
802 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
803 if (want_ncq != using_ncq)
806 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
807 struct mv_host_priv *hpriv = ap->host->private_data;
808 int hard_port = mv_hardport_from_port(ap->port_no);
809 void __iomem *hc_mmio = mv_hc_base_from_port(
810 mv_host_base(ap->host), hard_port);
811 u32 hc_irq_cause, ipending;
813 /* clear EDMA event indicators, if any */
814 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
816 /* clear EDMA interrupt indicator, if any */
817 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
818 ipending = (DEV_IRQ << hard_port) |
819 (CRPB_DMA_DONE << hard_port);
820 if (hc_irq_cause & ipending) {
821 writelfl(hc_irq_cause & ~ipending,
822 hc_mmio + HC_IRQ_CAUSE_OFS);
825 mv_edma_cfg(ap, want_ncq);
827 /* clear FIS IRQ Cause */
828 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
830 mv_set_edma_ptrs(port_mmio, hpriv, pp);
832 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
833 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
835 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
839 * mv_stop_edma_engine - Disable eDMA engine
840 * @port_mmio: io base address
843 * Inherited from caller.
845 static int mv_stop_edma_engine(void __iomem *port_mmio)
849 /* Disable eDMA. The disable bit auto clears. */
850 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
852 /* Wait for the chip to confirm eDMA is off. */
853 for (i = 10000; i > 0; i--) {
854 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
855 if (!(reg & EDMA_EN))
862 static int mv_stop_edma(struct ata_port *ap)
864 void __iomem *port_mmio = mv_ap_base(ap);
865 struct mv_port_priv *pp = ap->private_data;
867 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
869 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
870 if (mv_stop_edma_engine(port_mmio)) {
871 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
878 static void mv_dump_mem(void __iomem *start, unsigned bytes)
881 for (b = 0; b < bytes; ) {
882 DPRINTK("%p: ", start + b);
883 for (w = 0; b < bytes && w < 4; w++) {
884 printk("%08x ", readl(start + b));
892 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
897 for (b = 0; b < bytes; ) {
898 DPRINTK("%02x: ", b);
899 for (w = 0; b < bytes && w < 4; w++) {
900 (void) pci_read_config_dword(pdev, b, &dw);
908 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
909 struct pci_dev *pdev)
912 void __iomem *hc_base = mv_hc_base(mmio_base,
913 port >> MV_PORT_HC_SHIFT);
914 void __iomem *port_base;
915 int start_port, num_ports, p, start_hc, num_hcs, hc;
918 start_hc = start_port = 0;
919 num_ports = 8; /* shld be benign for 4 port devs */
922 start_hc = port >> MV_PORT_HC_SHIFT;
924 num_ports = num_hcs = 1;
926 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
927 num_ports > 1 ? num_ports - 1 : start_port);
930 DPRINTK("PCI config space regs:\n");
931 mv_dump_pci_cfg(pdev, 0x68);
933 DPRINTK("PCI regs:\n");
934 mv_dump_mem(mmio_base+0xc00, 0x3c);
935 mv_dump_mem(mmio_base+0xd00, 0x34);
936 mv_dump_mem(mmio_base+0xf00, 0x4);
937 mv_dump_mem(mmio_base+0x1d00, 0x6c);
938 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
939 hc_base = mv_hc_base(mmio_base, hc);
940 DPRINTK("HC regs (HC %i):\n", hc);
941 mv_dump_mem(hc_base, 0x1c);
943 for (p = start_port; p < start_port + num_ports; p++) {
944 port_base = mv_port_base(mmio_base, p);
945 DPRINTK("EDMA regs (port %i):\n", p);
946 mv_dump_mem(port_base, 0x54);
947 DPRINTK("SATA regs (port %i):\n", p);
948 mv_dump_mem(port_base+0x300, 0x60);
953 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
961 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
964 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
973 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
975 unsigned int ofs = mv_scr_offset(sc_reg_in);
977 if (ofs != 0xffffffffU) {
978 *val = readl(mv_ap_base(ap) + ofs);
984 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
986 unsigned int ofs = mv_scr_offset(sc_reg_in);
988 if (ofs != 0xffffffffU) {
989 writelfl(val, mv_ap_base(ap) + ofs);
995 static void mv6_dev_config(struct ata_device *adev)
998 * We don't have hob_nsect when doing NCQ commands on Gen-II.
999 * See mv_qc_prep() for more info.
1001 if (adev->flags & ATA_DFLAG_NCQ)
1002 if (adev->max_sectors > ATA_MAX_SECTORS)
1003 adev->max_sectors = ATA_MAX_SECTORS;
1006 static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1009 struct mv_port_priv *pp = ap->private_data;
1010 struct mv_host_priv *hpriv = ap->host->private_data;
1011 void __iomem *port_mmio = mv_ap_base(ap);
1013 /* set up non-NCQ EDMA configuration */
1014 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1016 if (IS_GEN_I(hpriv))
1017 cfg |= (1 << 8); /* enab config burst size mask */
1019 else if (IS_GEN_II(hpriv))
1020 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1022 else if (IS_GEN_IIE(hpriv)) {
1023 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1024 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1025 cfg |= (1 << 18); /* enab early completion */
1026 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1030 cfg |= EDMA_CFG_NCQ;
1031 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1033 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1035 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1038 static void mv_port_free_dma_mem(struct ata_port *ap)
1040 struct mv_host_priv *hpriv = ap->host->private_data;
1041 struct mv_port_priv *pp = ap->private_data;
1045 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1049 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1053 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1054 * For later hardware, we have one unique sg_tbl per NCQ tag.
1056 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1057 if (pp->sg_tbl[tag]) {
1058 if (tag == 0 || !IS_GEN_I(hpriv))
1059 dma_pool_free(hpriv->sg_tbl_pool,
1061 pp->sg_tbl_dma[tag]);
1062 pp->sg_tbl[tag] = NULL;
1068 * mv_port_start - Port specific init/start routine.
1069 * @ap: ATA channel to manipulate
1071 * Allocate and point to DMA memory, init port private memory,
1075 * Inherited from caller.
1077 static int mv_port_start(struct ata_port *ap)
1079 struct device *dev = ap->host->dev;
1080 struct mv_host_priv *hpriv = ap->host->private_data;
1081 struct mv_port_priv *pp;
1082 void __iomem *port_mmio = mv_ap_base(ap);
1083 unsigned long flags;
1086 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1089 ap->private_data = pp;
1091 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1094 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1096 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1098 goto out_port_free_dma_mem;
1099 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1102 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1103 * For later hardware, we need one unique sg_tbl per NCQ tag.
1105 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1106 if (tag == 0 || !IS_GEN_I(hpriv)) {
1107 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1108 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1109 if (!pp->sg_tbl[tag])
1110 goto out_port_free_dma_mem;
1112 pp->sg_tbl[tag] = pp->sg_tbl[0];
1113 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1117 spin_lock_irqsave(&ap->host->lock, flags);
1120 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1122 spin_unlock_irqrestore(&ap->host->lock, flags);
1124 /* Don't turn on EDMA here...do it before DMA commands only. Else
1125 * we'll be unable to send non-data, PIO, etc due to restricted access
1130 out_port_free_dma_mem:
1131 mv_port_free_dma_mem(ap);
1136 * mv_port_stop - Port specific cleanup/stop routine.
1137 * @ap: ATA channel to manipulate
1139 * Stop DMA, cleanup port memory.
1142 * This routine uses the host lock to protect the DMA stop.
1144 static void mv_port_stop(struct ata_port *ap)
1147 mv_port_free_dma_mem(ap);
1151 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1152 * @qc: queued command whose SG list to source from
1154 * Populate the SG list and mark the last entry.
1157 * Inherited from caller.
1159 static void mv_fill_sg(struct ata_queued_cmd *qc)
1161 struct mv_port_priv *pp = qc->ap->private_data;
1162 struct scatterlist *sg;
1163 struct mv_sg *mv_sg, *last_sg = NULL;
1166 mv_sg = pp->sg_tbl[qc->tag];
1167 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1168 dma_addr_t addr = sg_dma_address(sg);
1169 u32 sg_len = sg_dma_len(sg);
1172 u32 offset = addr & 0xffff;
1175 if ((offset + sg_len > 0x10000))
1176 len = 0x10000 - offset;
1178 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1179 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1180 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1190 if (likely(last_sg))
1191 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1194 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1196 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1197 (last ? CRQB_CMD_LAST : 0);
1198 *cmdw = cpu_to_le16(tmp);
1202 * mv_qc_prep - Host specific command preparation.
1203 * @qc: queued command to prepare
1205 * This routine simply redirects to the general purpose routine
1206 * if command is not DMA. Else, it handles prep of the CRQB
1207 * (command request block), does some sanity checking, and calls
1208 * the SG load routine.
1211 * Inherited from caller.
1213 static void mv_qc_prep(struct ata_queued_cmd *qc)
1215 struct ata_port *ap = qc->ap;
1216 struct mv_port_priv *pp = ap->private_data;
1218 struct ata_taskfile *tf;
1222 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1223 (qc->tf.protocol != ATA_PROT_NCQ))
1226 /* Fill in command request block
1228 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1229 flags |= CRQB_FLAG_READ;
1230 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1231 flags |= qc->tag << CRQB_TAG_SHIFT;
1233 /* get current queue index from software */
1234 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1236 pp->crqb[in_index].sg_addr =
1237 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1238 pp->crqb[in_index].sg_addr_hi =
1239 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1240 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1242 cw = &pp->crqb[in_index].ata_cmd[0];
1245 /* Sadly, the CRQB cannot accomodate all registers--there are
1246 * only 11 bytes...so we must pick and choose required
1247 * registers based on the command. So, we drop feature and
1248 * hob_feature for [RW] DMA commands, but they are needed for
1249 * NCQ. NCQ will drop hob_nsect.
1251 switch (tf->command) {
1253 case ATA_CMD_READ_EXT:
1255 case ATA_CMD_WRITE_EXT:
1256 case ATA_CMD_WRITE_FUA_EXT:
1257 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1259 case ATA_CMD_FPDMA_READ:
1260 case ATA_CMD_FPDMA_WRITE:
1261 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1262 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1265 /* The only other commands EDMA supports in non-queued and
1266 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1267 * of which are defined/used by Linux. If we get here, this
1268 * driver needs work.
1270 * FIXME: modify libata to give qc_prep a return value and
1271 * return error here.
1273 BUG_ON(tf->command);
1276 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1277 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1278 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1279 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1280 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1281 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1282 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1283 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1284 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1286 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1292 * mv_qc_prep_iie - Host specific command preparation.
1293 * @qc: queued command to prepare
1295 * This routine simply redirects to the general purpose routine
1296 * if command is not DMA. Else, it handles prep of the CRQB
1297 * (command request block), does some sanity checking, and calls
1298 * the SG load routine.
1301 * Inherited from caller.
1303 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1305 struct ata_port *ap = qc->ap;
1306 struct mv_port_priv *pp = ap->private_data;
1307 struct mv_crqb_iie *crqb;
1308 struct ata_taskfile *tf;
1312 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1313 (qc->tf.protocol != ATA_PROT_NCQ))
1316 /* Fill in Gen IIE command request block */
1317 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1318 flags |= CRQB_FLAG_READ;
1320 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1321 flags |= qc->tag << CRQB_TAG_SHIFT;
1322 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1324 /* get current queue index from software */
1325 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1327 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1328 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1329 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1330 crqb->flags = cpu_to_le32(flags);
1333 crqb->ata_cmd[0] = cpu_to_le32(
1334 (tf->command << 16) |
1337 crqb->ata_cmd[1] = cpu_to_le32(
1343 crqb->ata_cmd[2] = cpu_to_le32(
1344 (tf->hob_lbal << 0) |
1345 (tf->hob_lbam << 8) |
1346 (tf->hob_lbah << 16) |
1347 (tf->hob_feature << 24)
1349 crqb->ata_cmd[3] = cpu_to_le32(
1351 (tf->hob_nsect << 8)
1354 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1360 * mv_qc_issue - Initiate a command to the host
1361 * @qc: queued command to start
1363 * This routine simply redirects to the general purpose routine
1364 * if command is not DMA. Else, it sanity checks our local
1365 * caches of the request producer/consumer indices then enables
1366 * DMA and bumps the request producer index.
1369 * Inherited from caller.
1371 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1373 struct ata_port *ap = qc->ap;
1374 void __iomem *port_mmio = mv_ap_base(ap);
1375 struct mv_port_priv *pp = ap->private_data;
1378 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1379 (qc->tf.protocol != ATA_PROT_NCQ)) {
1380 /* We're about to send a non-EDMA capable command to the
1381 * port. Turn off EDMA so there won't be problems accessing
1382 * shadow block, etc registers.
1385 return ata_sff_qc_issue(qc);
1388 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1392 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1394 /* and write the request in pointer to kick the EDMA to life */
1395 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1396 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1402 * mv_err_intr - Handle error interrupts on the port
1403 * @ap: ATA channel to manipulate
1404 * @reset_allowed: bool: 0 == don't trigger from reset here
1406 * In most cases, just clear the interrupt and move on. However,
1407 * some cases require an eDMA reset, which also performs a COMRESET.
1408 * The SERR case requires a clear of pending errors in the SATA
1409 * SERROR register. Finally, if the port disabled DMA,
1410 * update our cached copy to match.
1413 * Inherited from caller.
1415 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1417 void __iomem *port_mmio = mv_ap_base(ap);
1418 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1419 struct mv_port_priv *pp = ap->private_data;
1420 struct mv_host_priv *hpriv = ap->host->private_data;
1421 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1422 unsigned int action = 0, err_mask = 0;
1423 struct ata_eh_info *ehi = &ap->link.eh_info;
1425 ata_ehi_clear_desc(ehi);
1427 if (!edma_enabled) {
1428 /* just a guess: do we need to do this? should we
1429 * expand this, and do it in all cases?
1431 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1432 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1435 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1437 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1440 * all generations share these EDMA error cause bits
1443 if (edma_err_cause & EDMA_ERR_DEV)
1444 err_mask |= AC_ERR_DEV;
1445 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1446 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1447 EDMA_ERR_INTRL_PAR)) {
1448 err_mask |= AC_ERR_ATA_BUS;
1449 action |= ATA_EH_RESET;
1450 ata_ehi_push_desc(ehi, "parity error");
1452 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1453 ata_ehi_hotplugged(ehi);
1454 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1455 "dev disconnect" : "dev connect");
1456 action |= ATA_EH_RESET;
1459 if (IS_GEN_I(hpriv)) {
1460 eh_freeze_mask = EDMA_EH_FREEZE_5;
1462 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1463 pp = ap->private_data;
1464 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1465 ata_ehi_push_desc(ehi, "EDMA self-disable");
1468 eh_freeze_mask = EDMA_EH_FREEZE;
1470 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1471 pp = ap->private_data;
1472 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1473 ata_ehi_push_desc(ehi, "EDMA self-disable");
1476 if (edma_err_cause & EDMA_ERR_SERR) {
1477 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1478 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1479 err_mask = AC_ERR_ATA_BUS;
1480 action |= ATA_EH_RESET;
1484 /* Clear EDMA now that SERR cleanup done */
1485 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1488 err_mask = AC_ERR_OTHER;
1489 action |= ATA_EH_RESET;
1492 ehi->serror |= serr;
1493 ehi->action |= action;
1496 qc->err_mask |= err_mask;
1498 ehi->err_mask |= err_mask;
1500 if (edma_err_cause & eh_freeze_mask)
1501 ata_port_freeze(ap);
1506 static void mv_intr_pio(struct ata_port *ap)
1508 struct ata_queued_cmd *qc;
1511 /* ignore spurious intr if drive still BUSY */
1512 ata_status = readb(ap->ioaddr.status_addr);
1513 if (unlikely(ata_status & ATA_BUSY))
1516 /* get active ATA command */
1517 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1518 if (unlikely(!qc)) /* no active tag */
1520 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1523 /* and finally, complete the ATA command */
1524 qc->err_mask |= ac_err_mask(ata_status);
1525 ata_qc_complete(qc);
1528 static void mv_intr_edma(struct ata_port *ap)
1530 void __iomem *port_mmio = mv_ap_base(ap);
1531 struct mv_host_priv *hpriv = ap->host->private_data;
1532 struct mv_port_priv *pp = ap->private_data;
1533 struct ata_queued_cmd *qc;
1534 u32 out_index, in_index;
1535 bool work_done = false;
1537 /* get h/w response queue pointer */
1538 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1539 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1545 /* get s/w response queue last-read pointer, and compare */
1546 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1547 if (in_index == out_index)
1550 /* 50xx: get active ATA command */
1551 if (IS_GEN_I(hpriv))
1552 tag = ap->link.active_tag;
1554 /* Gen II/IIE: get active ATA command via tag, to enable
1555 * support for queueing. this works transparently for
1556 * queued and non-queued modes.
1559 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1561 qc = ata_qc_from_tag(ap, tag);
1563 /* For non-NCQ mode, the lower 8 bits of status
1564 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1565 * which should be zero if all went well.
1567 status = le16_to_cpu(pp->crpb[out_index].flags);
1568 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1569 mv_err_intr(ap, qc);
1573 /* and finally, complete the ATA command */
1576 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1577 ata_qc_complete(qc);
1580 /* advance software response queue pointer, to
1581 * indicate (after the loop completes) to hardware
1582 * that we have consumed a response queue entry.
1589 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1590 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1591 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1595 * mv_host_intr - Handle all interrupts on the given host controller
1596 * @host: host specific structure
1597 * @relevant: port error bits relevant to this host controller
1598 * @hc: which host controller we're to look at
1600 * Read then write clear the HC interrupt status then walk each
1601 * port connected to the HC and see if it needs servicing. Port
1602 * success ints are reported in the HC interrupt status reg, the
1603 * port error ints are reported in the higher level main
1604 * interrupt status register and thus are passed in via the
1605 * 'relevant' argument.
1608 * Inherited from caller.
1610 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1612 struct mv_host_priv *hpriv = host->private_data;
1613 void __iomem *mmio = hpriv->base;
1614 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1616 int port, port0, last_port;
1621 port0 = MV_PORTS_PER_HC;
1624 last_port = port0 + MV_PORTS_PER_HC;
1626 last_port = port0 + hpriv->n_ports;
1627 /* we'll need the HC success int register in most cases */
1628 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1632 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1634 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1635 hc, relevant, hc_irq_cause);
1637 for (port = port0; port < last_port; port++) {
1638 struct ata_port *ap = host->ports[port];
1639 struct mv_port_priv *pp;
1640 int have_err_bits, hard_port, shift;
1642 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1645 pp = ap->private_data;
1647 shift = port << 1; /* (port * 2) */
1648 if (port >= MV_PORTS_PER_HC)
1649 shift++; /* skip bit 8 in the HC Main IRQ reg */
1651 have_err_bits = ((PORT0_ERR << shift) & relevant);
1653 if (unlikely(have_err_bits)) {
1654 struct ata_queued_cmd *qc;
1656 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1657 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1660 mv_err_intr(ap, qc);
1664 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1666 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1667 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1670 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1677 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1679 struct mv_host_priv *hpriv = host->private_data;
1680 struct ata_port *ap;
1681 struct ata_queued_cmd *qc;
1682 struct ata_eh_info *ehi;
1683 unsigned int i, err_mask, printed = 0;
1686 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1688 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1691 DPRINTK("All regs @ PCI error\n");
1692 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1694 writelfl(0, mmio + hpriv->irq_cause_ofs);
1696 for (i = 0; i < host->n_ports; i++) {
1697 ap = host->ports[i];
1698 if (!ata_link_offline(&ap->link)) {
1699 ehi = &ap->link.eh_info;
1700 ata_ehi_clear_desc(ehi);
1702 ata_ehi_push_desc(ehi,
1703 "PCI err cause 0x%08x", err_cause);
1704 err_mask = AC_ERR_HOST_BUS;
1705 ehi->action = ATA_EH_RESET;
1706 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1708 qc->err_mask |= err_mask;
1710 ehi->err_mask |= err_mask;
1712 ata_port_freeze(ap);
1718 * mv_interrupt - Main interrupt event handler
1720 * @dev_instance: private data; in this case the host structure
1722 * Read the read only register to determine if any host
1723 * controllers have pending interrupts. If so, call lower level
1724 * routine to handle. Also check for PCI errors which are only
1728 * This routine holds the host lock while processing pending
1731 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1733 struct ata_host *host = dev_instance;
1734 struct mv_host_priv *hpriv = host->private_data;
1735 unsigned int hc, handled = 0, n_hcs;
1736 void __iomem *mmio = hpriv->base;
1737 u32 irq_stat, irq_mask;
1739 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
1740 spin_lock(&host->lock);
1742 irq_stat = readl(hpriv->main_cause_reg_addr);
1743 irq_mask = readl(hpriv->main_mask_reg_addr);
1745 /* check the cases where we either have nothing pending or have read
1746 * a bogus register value which can indicate HW removal or PCI fault
1748 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1751 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1753 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1754 mv_pci_error(host, mmio);
1756 goto out_unlock; /* skip all other HC irq handling */
1759 for (hc = 0; hc < n_hcs; hc++) {
1760 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1762 mv_host_intr(host, relevant, hc);
1768 spin_unlock(&host->lock);
1770 return IRQ_RETVAL(handled);
1773 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1777 switch (sc_reg_in) {
1781 ofs = sc_reg_in * sizeof(u32);
1790 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1792 struct mv_host_priv *hpriv = ap->host->private_data;
1793 void __iomem *mmio = hpriv->base;
1794 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1795 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1797 if (ofs != 0xffffffffU) {
1798 *val = readl(addr + ofs);
1804 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1806 struct mv_host_priv *hpriv = ap->host->private_data;
1807 void __iomem *mmio = hpriv->base;
1808 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1809 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1811 if (ofs != 0xffffffffU) {
1812 writelfl(val, addr + ofs);
1818 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1820 struct pci_dev *pdev = to_pci_dev(host->dev);
1823 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1826 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1828 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1831 mv_reset_pci_bus(host, mmio);
1834 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1836 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1839 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1842 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1845 tmp = readl(phy_mmio + MV5_PHY_MODE);
1847 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1848 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1851 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1855 writel(0, mmio + MV_GPIO_PORT_CTL);
1857 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1859 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1861 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1864 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1867 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1868 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1870 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1873 tmp = readl(phy_mmio + MV5_LT_MODE);
1875 writel(tmp, phy_mmio + MV5_LT_MODE);
1877 tmp = readl(phy_mmio + MV5_PHY_CTL);
1880 writel(tmp, phy_mmio + MV5_PHY_CTL);
1883 tmp = readl(phy_mmio + MV5_PHY_MODE);
1885 tmp |= hpriv->signal[port].pre;
1886 tmp |= hpriv->signal[port].amps;
1887 writel(tmp, phy_mmio + MV5_PHY_MODE);
1892 #define ZERO(reg) writel(0, port_mmio + (reg))
1893 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1896 void __iomem *port_mmio = mv_port_base(mmio, port);
1899 * The datasheet warns against setting ATA_RST when EDMA is active
1900 * (but doesn't say what the problem might be). So we first try
1901 * to disable the EDMA engine before doing the ATA_RST operation.
1903 mv_reset_channel(hpriv, mmio, port);
1905 ZERO(0x028); /* command */
1906 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1907 ZERO(0x004); /* timer */
1908 ZERO(0x008); /* irq err cause */
1909 ZERO(0x00c); /* irq err mask */
1910 ZERO(0x010); /* rq bah */
1911 ZERO(0x014); /* rq inp */
1912 ZERO(0x018); /* rq outp */
1913 ZERO(0x01c); /* respq bah */
1914 ZERO(0x024); /* respq outp */
1915 ZERO(0x020); /* respq inp */
1916 ZERO(0x02c); /* test control */
1917 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1921 #define ZERO(reg) writel(0, hc_mmio + (reg))
1922 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1925 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1933 tmp = readl(hc_mmio + 0x20);
1936 writel(tmp, hc_mmio + 0x20);
1940 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1943 unsigned int hc, port;
1945 for (hc = 0; hc < n_hc; hc++) {
1946 for (port = 0; port < MV_PORTS_PER_HC; port++)
1947 mv5_reset_hc_port(hpriv, mmio,
1948 (hc * MV_PORTS_PER_HC) + port);
1950 mv5_reset_one_hc(hpriv, mmio, hc);
1957 #define ZERO(reg) writel(0, mmio + (reg))
1958 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
1960 struct mv_host_priv *hpriv = host->private_data;
1963 tmp = readl(mmio + MV_PCI_MODE);
1965 writel(tmp, mmio + MV_PCI_MODE);
1967 ZERO(MV_PCI_DISC_TIMER);
1968 ZERO(MV_PCI_MSI_TRIGGER);
1969 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1970 ZERO(HC_MAIN_IRQ_MASK_OFS);
1971 ZERO(MV_PCI_SERR_MASK);
1972 ZERO(hpriv->irq_cause_ofs);
1973 ZERO(hpriv->irq_mask_ofs);
1974 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1975 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1976 ZERO(MV_PCI_ERR_ATTRIBUTE);
1977 ZERO(MV_PCI_ERR_COMMAND);
1981 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1985 mv5_reset_flash(hpriv, mmio);
1987 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1989 tmp |= (1 << 5) | (1 << 6);
1990 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1994 * mv6_reset_hc - Perform the 6xxx global soft reset
1995 * @mmio: base address of the HBA
1997 * This routine only applies to 6xxx parts.
2000 * Inherited from caller.
2002 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2005 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2009 /* Following procedure defined in PCI "main command and status
2013 writel(t | STOP_PCI_MASTER, reg);
2015 for (i = 0; i < 1000; i++) {
2018 if (PCI_MASTER_EMPTY & t)
2021 if (!(PCI_MASTER_EMPTY & t)) {
2022 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2030 writel(t | GLOB_SFT_RST, reg);
2033 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2035 if (!(GLOB_SFT_RST & t)) {
2036 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2041 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2044 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2047 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2049 if (GLOB_SFT_RST & t) {
2050 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2057 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2060 void __iomem *port_mmio;
2063 tmp = readl(mmio + MV_RESET_CFG);
2064 if ((tmp & (1 << 0)) == 0) {
2065 hpriv->signal[idx].amps = 0x7 << 8;
2066 hpriv->signal[idx].pre = 0x1 << 5;
2070 port_mmio = mv_port_base(mmio, idx);
2071 tmp = readl(port_mmio + PHY_MODE2);
2073 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2074 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2077 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2079 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2082 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2085 void __iomem *port_mmio = mv_port_base(mmio, port);
2087 u32 hp_flags = hpriv->hp_flags;
2089 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2091 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2094 if (fix_phy_mode2) {
2095 m2 = readl(port_mmio + PHY_MODE2);
2098 writel(m2, port_mmio + PHY_MODE2);
2102 m2 = readl(port_mmio + PHY_MODE2);
2103 m2 &= ~((1 << 16) | (1 << 31));
2104 writel(m2, port_mmio + PHY_MODE2);
2109 /* who knows what this magic does */
2110 tmp = readl(port_mmio + PHY_MODE3);
2113 writel(tmp, port_mmio + PHY_MODE3);
2115 if (fix_phy_mode4) {
2118 m4 = readl(port_mmio + PHY_MODE4);
2120 if (hp_flags & MV_HP_ERRATA_60X1B2)
2121 tmp = readl(port_mmio + PHY_MODE3);
2123 /* workaround for errata FEr SATA#10 (part 1) */
2124 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2126 writel(m4, port_mmio + PHY_MODE4);
2128 if (hp_flags & MV_HP_ERRATA_60X1B2)
2129 writel(tmp, port_mmio + PHY_MODE3);
2132 /* Revert values of pre-emphasis and signal amps to the saved ones */
2133 m2 = readl(port_mmio + PHY_MODE2);
2135 m2 &= ~MV_M2_PREAMP_MASK;
2136 m2 |= hpriv->signal[port].amps;
2137 m2 |= hpriv->signal[port].pre;
2140 /* according to mvSata 3.6.1, some IIE values are fixed */
2141 if (IS_GEN_IIE(hpriv)) {
2146 writel(m2, port_mmio + PHY_MODE2);
2149 /* TODO: use the generic LED interface to configure the SATA Presence */
2150 /* & Acitivy LEDs on the board */
2151 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2157 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2160 void __iomem *port_mmio;
2163 port_mmio = mv_port_base(mmio, idx);
2164 tmp = readl(port_mmio + PHY_MODE2);
2166 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2167 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2171 #define ZERO(reg) writel(0, port_mmio + (reg))
2172 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2173 void __iomem *mmio, unsigned int port)
2175 void __iomem *port_mmio = mv_port_base(mmio, port);
2178 * The datasheet warns against setting ATA_RST when EDMA is active
2179 * (but doesn't say what the problem might be). So we first try
2180 * to disable the EDMA engine before doing the ATA_RST operation.
2182 mv_reset_channel(hpriv, mmio, port);
2184 ZERO(0x028); /* command */
2185 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2186 ZERO(0x004); /* timer */
2187 ZERO(0x008); /* irq err cause */
2188 ZERO(0x00c); /* irq err mask */
2189 ZERO(0x010); /* rq bah */
2190 ZERO(0x014); /* rq inp */
2191 ZERO(0x018); /* rq outp */
2192 ZERO(0x01c); /* respq bah */
2193 ZERO(0x024); /* respq outp */
2194 ZERO(0x020); /* respq inp */
2195 ZERO(0x02c); /* test control */
2196 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2201 #define ZERO(reg) writel(0, hc_mmio + (reg))
2202 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2205 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2215 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2216 void __iomem *mmio, unsigned int n_hc)
2220 for (port = 0; port < hpriv->n_ports; port++)
2221 mv_soc_reset_hc_port(hpriv, mmio, port);
2223 mv_soc_reset_one_hc(hpriv, mmio);
2228 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2234 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2239 static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2241 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2243 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2245 ifctl |= (1 << 7); /* enable gen2i speed */
2246 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2250 * Caller must ensure that EDMA is not active,
2251 * by first doing mv_stop_edma() where needed.
2253 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2254 unsigned int port_no)
2256 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2258 mv_stop_edma_engine(port_mmio);
2259 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2261 if (!IS_GEN_I(hpriv)) {
2262 /* Enable 3.0gb/s link speed */
2263 mv_setup_ifctl(port_mmio, 1);
2266 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2267 * link, and physical layers. It resets all SATA interface registers
2268 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2270 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2271 udelay(25); /* allow reset propagation */
2272 writelfl(0, port_mmio + EDMA_CMD_OFS);
2274 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2276 if (IS_GEN_I(hpriv))
2280 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2281 unsigned long deadline)
2283 struct ata_port *ap = link->ap;
2284 struct mv_host_priv *hpriv = ap->host->private_data;
2285 struct mv_port_priv *pp = ap->private_data;
2286 void __iomem *mmio = hpriv->base;
2287 int rc, attempts = 0, extra = 0;
2291 mv_reset_channel(hpriv, mmio, ap->port_no);
2292 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2294 /* Workaround for errata FEr SATA#10 (part 2) */
2296 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
2298 rc = sata_link_hardreset(link, timing, deadline + extra, &online, NULL);
2300 ata_link_printk(link, KERN_ERR,
2301 "COMRESET failed (errno=%d)\n", rc);
2304 sata_scr_read(link, SCR_STATUS, &sstatus);
2305 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2306 /* Force 1.5gb/s link speed and try again */
2307 mv_setup_ifctl(mv_ap_base(ap), 0);
2308 if (time_after(jiffies + HZ, deadline))
2309 extra = HZ; /* only extend it once, max */
2311 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2313 return online ? -EAGAIN : rc;
2316 static void mv_eh_freeze(struct ata_port *ap)
2318 struct mv_host_priv *hpriv = ap->host->private_data;
2319 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2323 /* FIXME: handle coalescing completion events properly */
2325 shift = ap->port_no * 2;
2329 mask = 0x3 << shift;
2331 /* disable assertion of portN err, done events */
2332 tmp = readl(hpriv->main_mask_reg_addr);
2333 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2336 static void mv_eh_thaw(struct ata_port *ap)
2338 struct mv_host_priv *hpriv = ap->host->private_data;
2339 void __iomem *mmio = hpriv->base;
2340 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2341 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2342 void __iomem *port_mmio = mv_ap_base(ap);
2343 u32 tmp, mask, hc_irq_cause;
2344 unsigned int shift, hc_port_no = ap->port_no;
2346 /* FIXME: handle coalescing completion events properly */
2348 shift = ap->port_no * 2;
2354 mask = 0x3 << shift;
2356 /* clear EDMA errors on this port */
2357 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2359 /* clear pending irq events */
2360 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2361 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2362 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2363 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2365 /* enable assertion of portN err, done events */
2366 tmp = readl(hpriv->main_mask_reg_addr);
2367 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2371 * mv_port_init - Perform some early initialization on a single port.
2372 * @port: libata data structure storing shadow register addresses
2373 * @port_mmio: base address of the port
2375 * Initialize shadow register mmio addresses, clear outstanding
2376 * interrupts on the port, and unmask interrupts for the future
2377 * start of the port.
2380 * Inherited from caller.
2382 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2384 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2387 /* PIO related setup
2389 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2391 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2392 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2393 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2394 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2395 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2396 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2398 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2399 /* special case: control/altstatus doesn't have ATA_REG_ address */
2400 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2403 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2405 /* Clear any currently outstanding port interrupt conditions */
2406 serr_ofs = mv_scr_offset(SCR_ERROR);
2407 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2408 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2410 /* unmask all non-transient EDMA error interrupts */
2411 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2413 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2414 readl(port_mmio + EDMA_CFG_OFS),
2415 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2416 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2419 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2421 struct pci_dev *pdev = to_pci_dev(host->dev);
2422 struct mv_host_priv *hpriv = host->private_data;
2423 u32 hp_flags = hpriv->hp_flags;
2425 switch (board_idx) {
2427 hpriv->ops = &mv5xxx_ops;
2428 hp_flags |= MV_HP_GEN_I;
2430 switch (pdev->revision) {
2432 hp_flags |= MV_HP_ERRATA_50XXB0;
2435 hp_flags |= MV_HP_ERRATA_50XXB2;
2438 dev_printk(KERN_WARNING, &pdev->dev,
2439 "Applying 50XXB2 workarounds to unknown rev\n");
2440 hp_flags |= MV_HP_ERRATA_50XXB2;
2447 hpriv->ops = &mv5xxx_ops;
2448 hp_flags |= MV_HP_GEN_I;
2450 switch (pdev->revision) {
2452 hp_flags |= MV_HP_ERRATA_50XXB0;
2455 hp_flags |= MV_HP_ERRATA_50XXB2;
2458 dev_printk(KERN_WARNING, &pdev->dev,
2459 "Applying B2 workarounds to unknown rev\n");
2460 hp_flags |= MV_HP_ERRATA_50XXB2;
2467 hpriv->ops = &mv6xxx_ops;
2468 hp_flags |= MV_HP_GEN_II;
2470 switch (pdev->revision) {
2472 hp_flags |= MV_HP_ERRATA_60X1B2;
2475 hp_flags |= MV_HP_ERRATA_60X1C0;
2478 dev_printk(KERN_WARNING, &pdev->dev,
2479 "Applying B2 workarounds to unknown rev\n");
2480 hp_flags |= MV_HP_ERRATA_60X1B2;
2486 hp_flags |= MV_HP_PCIE;
2487 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2488 (pdev->device == 0x2300 || pdev->device == 0x2310))
2491 * Highpoint RocketRAID PCIe 23xx series cards:
2493 * Unconfigured drives are treated as "Legacy"
2494 * by the BIOS, and it overwrites sector 8 with
2495 * a "Lgcy" metadata block prior to Linux boot.
2497 * Configured drives (RAID or JBOD) leave sector 8
2498 * alone, but instead overwrite a high numbered
2499 * sector for the RAID metadata. This sector can
2500 * be determined exactly, by truncating the physical
2501 * drive capacity to a nice even GB value.
2503 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2505 * Warn the user, lest they think we're just buggy.
2507 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2508 " BIOS CORRUPTS DATA on all attached drives,"
2509 " regardless of if/how they are configured."
2511 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2512 " use sectors 8-9 on \"Legacy\" drives,"
2513 " and avoid the final two gigabytes on"
2514 " all RocketRAID BIOS initialized drives.\n");
2517 hpriv->ops = &mv6xxx_ops;
2518 hp_flags |= MV_HP_GEN_IIE;
2520 switch (pdev->revision) {
2522 hp_flags |= MV_HP_ERRATA_XX42A0;
2525 hp_flags |= MV_HP_ERRATA_60X1C0;
2528 dev_printk(KERN_WARNING, &pdev->dev,
2529 "Applying 60X1C0 workarounds to unknown rev\n");
2530 hp_flags |= MV_HP_ERRATA_60X1C0;
2535 hpriv->ops = &mv_soc_ops;
2536 hp_flags |= MV_HP_ERRATA_60X1C0;
2540 dev_printk(KERN_ERR, host->dev,
2541 "BUG: invalid board index %u\n", board_idx);
2545 hpriv->hp_flags = hp_flags;
2546 if (hp_flags & MV_HP_PCIE) {
2547 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2548 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2549 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2551 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2552 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2553 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2560 * mv_init_host - Perform some early initialization of the host.
2561 * @host: ATA host to initialize
2562 * @board_idx: controller index
2564 * If possible, do an early global reset of the host. Then do
2565 * our port init and clear/unmask all/relevant host interrupts.
2568 * Inherited from caller.
2570 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2572 int rc = 0, n_hc, port, hc;
2573 struct mv_host_priv *hpriv = host->private_data;
2574 void __iomem *mmio = hpriv->base;
2576 rc = mv_chip_id(host, board_idx);
2580 if (HAS_PCI(host)) {
2581 hpriv->main_cause_reg_addr = hpriv->base +
2582 HC_MAIN_IRQ_CAUSE_OFS;
2583 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2585 hpriv->main_cause_reg_addr = hpriv->base +
2586 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2587 hpriv->main_mask_reg_addr = hpriv->base +
2588 HC_SOC_MAIN_IRQ_MASK_OFS;
2590 /* global interrupt mask */
2591 writel(0, hpriv->main_mask_reg_addr);
2593 n_hc = mv_get_hc_count(host->ports[0]->flags);
2595 for (port = 0; port < host->n_ports; port++)
2596 hpriv->ops->read_preamp(hpriv, port, mmio);
2598 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2602 hpriv->ops->reset_flash(hpriv, mmio);
2603 hpriv->ops->reset_bus(host, mmio);
2604 hpriv->ops->enable_leds(hpriv, mmio);
2606 for (port = 0; port < host->n_ports; port++) {
2607 struct ata_port *ap = host->ports[port];
2608 void __iomem *port_mmio = mv_port_base(mmio, port);
2610 mv_port_init(&ap->ioaddr, port_mmio);
2613 if (HAS_PCI(host)) {
2614 unsigned int offset = port_mmio - mmio;
2615 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2616 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2621 for (hc = 0; hc < n_hc; hc++) {
2622 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2624 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2625 "(before clear)=0x%08x\n", hc,
2626 readl(hc_mmio + HC_CFG_OFS),
2627 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2629 /* Clear any currently outstanding hc interrupt conditions */
2630 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2633 if (HAS_PCI(host)) {
2634 /* Clear any currently outstanding host interrupt conditions */
2635 writelfl(0, mmio + hpriv->irq_cause_ofs);
2637 /* and unmask interrupt generation for host regs */
2638 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2639 if (IS_GEN_I(hpriv))
2640 writelfl(~HC_MAIN_MASKED_IRQS_5,
2641 hpriv->main_mask_reg_addr);
2643 writelfl(~HC_MAIN_MASKED_IRQS,
2644 hpriv->main_mask_reg_addr);
2646 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2647 "PCI int cause/mask=0x%08x/0x%08x\n",
2648 readl(hpriv->main_cause_reg_addr),
2649 readl(hpriv->main_mask_reg_addr),
2650 readl(mmio + hpriv->irq_cause_ofs),
2651 readl(mmio + hpriv->irq_mask_ofs));
2653 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2654 hpriv->main_mask_reg_addr);
2655 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2656 readl(hpriv->main_cause_reg_addr),
2657 readl(hpriv->main_mask_reg_addr));
2663 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2665 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2667 if (!hpriv->crqb_pool)
2670 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2672 if (!hpriv->crpb_pool)
2675 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2677 if (!hpriv->sg_tbl_pool)
2684 * mv_platform_probe - handle a positive probe of an soc Marvell
2686 * @pdev: platform device found
2689 * Inherited from caller.
2691 static int mv_platform_probe(struct platform_device *pdev)
2693 static int printed_version;
2694 const struct mv_sata_platform_data *mv_platform_data;
2695 const struct ata_port_info *ppi[] =
2696 { &mv_port_info[chip_soc], NULL };
2697 struct ata_host *host;
2698 struct mv_host_priv *hpriv;
2699 struct resource *res;
2702 if (!printed_version++)
2703 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2706 * Simple resource validation ..
2708 if (unlikely(pdev->num_resources != 2)) {
2709 dev_err(&pdev->dev, "invalid number of resources\n");
2714 * Get the register base first
2716 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2721 mv_platform_data = pdev->dev.platform_data;
2722 n_ports = mv_platform_data->n_ports;
2724 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2725 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2727 if (!host || !hpriv)
2729 host->private_data = hpriv;
2730 hpriv->n_ports = n_ports;
2733 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2734 res->end - res->start + 1);
2735 hpriv->base -= MV_SATAHC0_REG_BASE;
2737 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2741 /* initialize adapter */
2742 rc = mv_init_host(host, chip_soc);
2746 dev_printk(KERN_INFO, &pdev->dev,
2747 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2750 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2751 IRQF_SHARED, &mv6_sht);
2756 * mv_platform_remove - unplug a platform interface
2757 * @pdev: platform device
2759 * A platform bus SATA device has been unplugged. Perform the needed
2760 * cleanup. Also called on module unload for any active devices.
2762 static int __devexit mv_platform_remove(struct platform_device *pdev)
2764 struct device *dev = &pdev->dev;
2765 struct ata_host *host = dev_get_drvdata(dev);
2767 ata_host_detach(host);
2771 static struct platform_driver mv_platform_driver = {
2772 .probe = mv_platform_probe,
2773 .remove = __devexit_p(mv_platform_remove),
2776 .owner = THIS_MODULE,
2782 static int mv_pci_init_one(struct pci_dev *pdev,
2783 const struct pci_device_id *ent);
2786 static struct pci_driver mv_pci_driver = {
2788 .id_table = mv_pci_tbl,
2789 .probe = mv_pci_init_one,
2790 .remove = ata_pci_remove_one,
2796 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2799 /* move to PCI layer or libata core? */
2800 static int pci_go_64(struct pci_dev *pdev)
2804 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2805 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2807 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2809 dev_printk(KERN_ERR, &pdev->dev,
2810 "64-bit DMA enable failed\n");
2815 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2817 dev_printk(KERN_ERR, &pdev->dev,
2818 "32-bit DMA enable failed\n");
2821 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2823 dev_printk(KERN_ERR, &pdev->dev,
2824 "32-bit consistent DMA enable failed\n");
2833 * mv_print_info - Dump key info to kernel log for perusal.
2834 * @host: ATA host to print info about
2836 * FIXME: complete this.
2839 * Inherited from caller.
2841 static void mv_print_info(struct ata_host *host)
2843 struct pci_dev *pdev = to_pci_dev(host->dev);
2844 struct mv_host_priv *hpriv = host->private_data;
2846 const char *scc_s, *gen;
2848 /* Use this to determine the HW stepping of the chip so we know
2849 * what errata to workaround
2851 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2854 else if (scc == 0x01)
2859 if (IS_GEN_I(hpriv))
2861 else if (IS_GEN_II(hpriv))
2863 else if (IS_GEN_IIE(hpriv))
2868 dev_printk(KERN_INFO, &pdev->dev,
2869 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2870 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2871 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2875 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
2876 * @pdev: PCI device found
2877 * @ent: PCI device ID entry for the matched host
2880 * Inherited from caller.
2882 static int mv_pci_init_one(struct pci_dev *pdev,
2883 const struct pci_device_id *ent)
2885 static int printed_version;
2886 unsigned int board_idx = (unsigned int)ent->driver_data;
2887 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2888 struct ata_host *host;
2889 struct mv_host_priv *hpriv;
2892 if (!printed_version++)
2893 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2896 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2898 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2899 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2900 if (!host || !hpriv)
2902 host->private_data = hpriv;
2903 hpriv->n_ports = n_ports;
2905 /* acquire resources */
2906 rc = pcim_enable_device(pdev);
2910 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2912 pcim_pin_device(pdev);
2915 host->iomap = pcim_iomap_table(pdev);
2916 hpriv->base = host->iomap[MV_PRIMARY_BAR];
2918 rc = pci_go_64(pdev);
2922 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2926 /* initialize adapter */
2927 rc = mv_init_host(host, board_idx);
2931 /* Enable interrupts */
2932 if (msi && pci_enable_msi(pdev))
2935 mv_dump_pci_cfg(pdev, 0x68);
2936 mv_print_info(host);
2938 pci_set_master(pdev);
2939 pci_try_set_mwi(pdev);
2940 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2941 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2945 static int mv_platform_probe(struct platform_device *pdev);
2946 static int __devexit mv_platform_remove(struct platform_device *pdev);
2948 static int __init mv_init(void)
2952 rc = pci_register_driver(&mv_pci_driver);
2956 rc = platform_driver_register(&mv_platform_driver);
2960 pci_unregister_driver(&mv_pci_driver);
2965 static void __exit mv_exit(void)
2968 pci_unregister_driver(&mv_pci_driver);
2970 platform_driver_unregister(&mv_platform_driver);
2973 MODULE_AUTHOR("Brett Russ");
2974 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2975 MODULE_LICENSE("GPL");
2976 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2977 MODULE_VERSION(DRV_VERSION);
2978 MODULE_ALIAS("platform:sata_mv");
2981 module_param(msi, int, 0444);
2982 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2985 module_init(mv_init);
2986 module_exit(mv_exit);