2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
33 2) Improve/fix IRQ and error handling sequences.
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
43 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
45 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
47 8) Develop a low-power-consumption strategy, and implement it.
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
67 #include <linux/kernel.h>
68 #include <linux/module.h>
69 #include <linux/pci.h>
70 #include <linux/init.h>
71 #include <linux/blkdev.h>
72 #include <linux/delay.h>
73 #include <linux/interrupt.h>
74 #include <linux/dmapool.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/device.h>
77 #include <linux/platform_device.h>
78 #include <linux/ata_platform.h>
79 #include <linux/mbus.h>
80 #include <scsi/scsi_host.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_device.h>
83 #include <linux/libata.h>
85 #define DRV_NAME "sata_mv"
86 #define DRV_VERSION "1.20"
89 /* BAR's are enumerated in terms of pci_resource_start() terms */
90 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
91 MV_IO_BAR = 2, /* offset 0x18: IO space */
92 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
94 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
95 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
98 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
99 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
100 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
101 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
102 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
103 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
105 MV_SATAHC0_REG_BASE = 0x20000,
106 MV_FLASH_CTL = 0x1046c,
107 MV_GPIO_PORT_CTL = 0x104f0,
108 MV_RESET_CFG = 0x180d8,
110 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
112 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
113 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
116 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
118 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
119 * CRPB needs alignment on a 256B boundary. Size == 256B
120 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
122 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
123 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
125 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
127 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
128 MV_PORT_HC_SHIFT = 2,
129 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
130 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
131 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
136 /* SoC integrated controllers, no PCI interface */
137 MV_FLAG_SOC = (1 << 28),
139 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
140 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
141 ATA_FLAG_PIO_POLLING,
142 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
144 CRQB_FLAG_READ = (1 << 0),
146 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
147 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
148 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
149 CRQB_CMD_ADDR_SHIFT = 8,
150 CRQB_CMD_CS = (0x2 << 11),
151 CRQB_CMD_LAST = (1 << 15),
153 CRPB_FLAG_STATUS_SHIFT = 8,
154 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
155 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
157 EPRD_FLAG_END_OF_TBL = (1 << 31),
159 /* PCI interface registers */
161 PCI_COMMAND_OFS = 0xc00,
163 PCI_MAIN_CMD_STS_OFS = 0xd30,
164 STOP_PCI_MASTER = (1 << 2),
165 PCI_MASTER_EMPTY = (1 << 3),
166 GLOB_SFT_RST = (1 << 4),
169 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
170 MV_PCI_DISC_TIMER = 0xd04,
171 MV_PCI_MSI_TRIGGER = 0xc38,
172 MV_PCI_SERR_MASK = 0xc28,
173 MV_PCI_XBAR_TMOUT = 0x1d04,
174 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
175 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
176 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
177 MV_PCI_ERR_COMMAND = 0x1d50,
179 PCI_IRQ_CAUSE_OFS = 0x1d58,
180 PCI_IRQ_MASK_OFS = 0x1d5c,
181 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
183 PCIE_IRQ_CAUSE_OFS = 0x1900,
184 PCIE_IRQ_MASK_OFS = 0x1910,
185 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
187 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
188 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
189 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
190 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
191 ERR_IRQ = (1 << 0), /* shift by port # */
192 DONE_IRQ = (1 << 1), /* shift by port # */
193 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
194 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
196 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
197 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
198 PORTS_0_3_COAL_DONE = (1 << 8),
199 PORTS_4_7_COAL_DONE = (1 << 17),
200 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
201 GPIO_INT = (1 << 22),
202 SELF_INT = (1 << 23),
203 TWSI_INT = (1 << 24),
204 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
205 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
206 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
207 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
208 PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
209 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
211 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
213 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
215 /* SATAHC registers */
218 HC_IRQ_CAUSE_OFS = 0x14,
219 DMA_IRQ = (1 << 0), /* shift by port # */
220 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
221 DEV_IRQ = (1 << 8), /* shift by port # */
223 /* Shadow block registers */
225 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
228 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
229 SATA_ACTIVE_OFS = 0x350,
230 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
233 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
238 SATA_IFCTL_OFS = 0x344,
239 SATA_IFSTAT_OFS = 0x34c,
240 VENDOR_UNIQUE_FIS_OFS = 0x35c,
243 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
248 SATA_INTERFACE_CFG = 0x050,
250 MV_M2_PREAMP_MASK = 0x7e0,
254 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
255 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
256 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
257 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
258 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
259 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
260 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
262 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
263 EDMA_ERR_IRQ_MASK_OFS = 0xc,
264 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
265 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
266 EDMA_ERR_DEV = (1 << 2), /* device error */
267 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
268 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
269 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
270 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
271 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
272 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
273 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
274 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
275 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
276 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
277 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
279 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
280 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
281 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
282 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
283 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
285 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
287 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
288 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
289 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
290 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
291 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
292 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
294 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
296 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
297 EDMA_ERR_OVERRUN_5 = (1 << 5),
298 EDMA_ERR_UNDERRUN_5 = (1 << 6),
300 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
301 EDMA_ERR_LNK_CTRL_RX_1 |
302 EDMA_ERR_LNK_CTRL_RX_3 |
303 EDMA_ERR_LNK_CTRL_TX |
304 /* temporary, until we fix hotplug: */
305 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
307 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
317 EDMA_ERR_LNK_CTRL_RX_2 |
318 EDMA_ERR_LNK_DATA_RX |
319 EDMA_ERR_LNK_DATA_TX |
320 EDMA_ERR_TRANS_PROTO,
322 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
327 EDMA_ERR_UNDERRUN_5 |
328 EDMA_ERR_SELF_DIS_5 |
334 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
335 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
337 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
338 EDMA_REQ_Q_PTR_SHIFT = 5,
340 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
341 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
342 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
343 EDMA_RSP_Q_PTR_SHIFT = 3,
345 EDMA_CMD_OFS = 0x28, /* EDMA command register */
346 EDMA_EN = (1 << 0), /* enable EDMA */
347 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
348 ATA_RST = (1 << 2), /* reset trans/link/phy */
350 EDMA_IORDY_TMOUT = 0x34,
353 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
355 /* Host private flags (hp_flags) */
356 MV_HP_FLAG_MSI = (1 << 0),
357 MV_HP_ERRATA_50XXB0 = (1 << 1),
358 MV_HP_ERRATA_50XXB2 = (1 << 2),
359 MV_HP_ERRATA_60X1B2 = (1 << 3),
360 MV_HP_ERRATA_60X1C0 = (1 << 4),
361 MV_HP_ERRATA_XX42A0 = (1 << 5),
362 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
363 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
364 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
365 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
367 /* Port private flags (pp_flags) */
368 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
369 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
372 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
373 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
374 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
375 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
377 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
378 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
381 /* DMA boundary 0xffff is required by the s/g splitting
382 * we need on /length/ in mv_fill-sg().
384 MV_DMA_BOUNDARY = 0xffffU,
386 /* mask of register bits containing lower 32 bits
387 * of EDMA request queue DMA address
389 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
391 /* ditto, for response queue */
392 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
406 /* Command ReQuest Block: 32B */
422 /* Command ResPonse Block: 8B */
429 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
437 struct mv_port_priv {
438 struct mv_crqb *crqb;
440 struct mv_crpb *crpb;
442 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
443 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
445 unsigned int req_idx;
446 unsigned int resp_idx;
451 struct mv_port_signal {
456 struct mv_host_priv {
458 struct mv_port_signal signal[8];
459 const struct mv_hw_ops *ops;
462 void __iomem *main_cause_reg_addr;
463 void __iomem *main_mask_reg_addr;
468 * These consistent DMA memory pools give us guaranteed
469 * alignment for hardware-accessed data structures,
470 * and less memory waste in accomplishing the alignment.
472 struct dma_pool *crqb_pool;
473 struct dma_pool *crpb_pool;
474 struct dma_pool *sg_tbl_pool;
478 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
480 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
481 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
483 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
485 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
486 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
489 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
490 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
491 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
492 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
493 static int mv_port_start(struct ata_port *ap);
494 static void mv_port_stop(struct ata_port *ap);
495 static void mv_qc_prep(struct ata_queued_cmd *qc);
496 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
497 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
498 static int mv_hardreset(struct ata_link *link, unsigned int *class,
499 unsigned long deadline);
500 static void mv_eh_freeze(struct ata_port *ap);
501 static void mv_eh_thaw(struct ata_port *ap);
502 static void mv6_dev_config(struct ata_device *dev);
504 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
506 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
507 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
509 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
511 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
512 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
514 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
516 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
517 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
519 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
521 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
522 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
524 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
526 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
527 void __iomem *mmio, unsigned int n_hc);
528 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
530 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
531 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
532 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
533 unsigned int port_no);
534 static int mv_stop_edma(struct ata_port *ap);
535 static int mv_stop_edma_engine(void __iomem *port_mmio);
536 static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
538 static void mv_pmp_select(struct ata_port *ap, int pmp);
539 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
540 unsigned long deadline);
541 static int mv_softreset(struct ata_link *link, unsigned int *class,
542 unsigned long deadline);
544 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
545 * because we have to allow room for worst case splitting of
546 * PRDs for 64K boundaries in mv_fill_sg().
548 static struct scsi_host_template mv5_sht = {
549 ATA_BASE_SHT(DRV_NAME),
550 .sg_tablesize = MV_MAX_SG_CT / 2,
551 .dma_boundary = MV_DMA_BOUNDARY,
554 static struct scsi_host_template mv6_sht = {
555 ATA_NCQ_SHT(DRV_NAME),
556 .can_queue = MV_MAX_Q_DEPTH - 1,
557 .sg_tablesize = MV_MAX_SG_CT / 2,
558 .dma_boundary = MV_DMA_BOUNDARY,
561 static struct ata_port_operations mv5_ops = {
562 .inherits = &ata_sff_port_ops,
564 .qc_prep = mv_qc_prep,
565 .qc_issue = mv_qc_issue,
567 .freeze = mv_eh_freeze,
569 .hardreset = mv_hardreset,
570 .error_handler = ata_std_error_handler, /* avoid SFF EH */
571 .post_internal_cmd = ATA_OP_NULL,
573 .scr_read = mv5_scr_read,
574 .scr_write = mv5_scr_write,
576 .port_start = mv_port_start,
577 .port_stop = mv_port_stop,
580 static struct ata_port_operations mv6_ops = {
581 .inherits = &mv5_ops,
582 .qc_defer = sata_pmp_qc_defer_cmd_switch,
583 .dev_config = mv6_dev_config,
584 .scr_read = mv_scr_read,
585 .scr_write = mv_scr_write,
587 .pmp_hardreset = mv_pmp_hardreset,
588 .pmp_softreset = mv_softreset,
589 .softreset = mv_softreset,
590 .error_handler = sata_pmp_error_handler,
593 static struct ata_port_operations mv_iie_ops = {
594 .inherits = &mv6_ops,
595 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
596 .dev_config = ATA_OP_NULL,
597 .qc_prep = mv_qc_prep_iie,
600 static const struct ata_port_info mv_port_info[] = {
602 .flags = MV_COMMON_FLAGS,
603 .pio_mask = 0x1f, /* pio0-4 */
604 .udma_mask = ATA_UDMA6,
605 .port_ops = &mv5_ops,
608 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
609 .pio_mask = 0x1f, /* pio0-4 */
610 .udma_mask = ATA_UDMA6,
611 .port_ops = &mv5_ops,
614 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
615 .pio_mask = 0x1f, /* pio0-4 */
616 .udma_mask = ATA_UDMA6,
617 .port_ops = &mv5_ops,
620 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
621 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
623 .pio_mask = 0x1f, /* pio0-4 */
624 .udma_mask = ATA_UDMA6,
625 .port_ops = &mv6_ops,
628 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
629 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
630 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
631 .pio_mask = 0x1f, /* pio0-4 */
632 .udma_mask = ATA_UDMA6,
633 .port_ops = &mv6_ops,
636 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
637 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
639 .pio_mask = 0x1f, /* pio0-4 */
640 .udma_mask = ATA_UDMA6,
641 .port_ops = &mv_iie_ops,
644 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
645 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
647 .pio_mask = 0x1f, /* pio0-4 */
648 .udma_mask = ATA_UDMA6,
649 .port_ops = &mv_iie_ops,
652 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
653 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
654 ATA_FLAG_NCQ | MV_FLAG_SOC,
655 .pio_mask = 0x1f, /* pio0-4 */
656 .udma_mask = ATA_UDMA6,
657 .port_ops = &mv_iie_ops,
661 static const struct pci_device_id mv_pci_tbl[] = {
662 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
663 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
664 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
665 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
666 /* RocketRAID 1740/174x have different identifiers */
667 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
668 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
670 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
671 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
672 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
673 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
674 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
676 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
679 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
681 /* Marvell 7042 support */
682 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
684 /* Highpoint RocketRAID PCIe series */
685 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
686 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
688 { } /* terminate list */
691 static const struct mv_hw_ops mv5xxx_ops = {
692 .phy_errata = mv5_phy_errata,
693 .enable_leds = mv5_enable_leds,
694 .read_preamp = mv5_read_preamp,
695 .reset_hc = mv5_reset_hc,
696 .reset_flash = mv5_reset_flash,
697 .reset_bus = mv5_reset_bus,
700 static const struct mv_hw_ops mv6xxx_ops = {
701 .phy_errata = mv6_phy_errata,
702 .enable_leds = mv6_enable_leds,
703 .read_preamp = mv6_read_preamp,
704 .reset_hc = mv6_reset_hc,
705 .reset_flash = mv6_reset_flash,
706 .reset_bus = mv_reset_pci_bus,
709 static const struct mv_hw_ops mv_soc_ops = {
710 .phy_errata = mv6_phy_errata,
711 .enable_leds = mv_soc_enable_leds,
712 .read_preamp = mv_soc_read_preamp,
713 .reset_hc = mv_soc_reset_hc,
714 .reset_flash = mv_soc_reset_flash,
715 .reset_bus = mv_soc_reset_bus,
722 static inline void writelfl(unsigned long data, void __iomem *addr)
725 (void) readl(addr); /* flush to avoid PCI posted write */
728 static inline unsigned int mv_hc_from_port(unsigned int port)
730 return port >> MV_PORT_HC_SHIFT;
733 static inline unsigned int mv_hardport_from_port(unsigned int port)
735 return port & MV_PORT_MASK;
739 * Consolidate some rather tricky bit shift calculations.
740 * This is hot-path stuff, so not a function.
741 * Simple code, with two return values, so macro rather than inline.
743 * port is the sole input, in range 0..7.
744 * shift is one output, for use with the main_cause and main_mask registers.
745 * hardport is the other output, in range 0..3
747 * Note that port and hardport may be the same variable in some cases.
749 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
751 shift = mv_hc_from_port(port) * HC_SHIFT; \
752 hardport = mv_hardport_from_port(port); \
753 shift += hardport * 2; \
756 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
758 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
761 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
764 return mv_hc_base(base, mv_hc_from_port(port));
767 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
769 return mv_hc_base_from_port(base, port) +
770 MV_SATAHC_ARBTR_REG_SZ +
771 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
774 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
776 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
777 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
779 return hc_mmio + ofs;
782 static inline void __iomem *mv_host_base(struct ata_host *host)
784 struct mv_host_priv *hpriv = host->private_data;
788 static inline void __iomem *mv_ap_base(struct ata_port *ap)
790 return mv_port_base(mv_host_base(ap->host), ap->port_no);
793 static inline int mv_get_hc_count(unsigned long port_flags)
795 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
798 static void mv_set_edma_ptrs(void __iomem *port_mmio,
799 struct mv_host_priv *hpriv,
800 struct mv_port_priv *pp)
805 * initialize request queue
807 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
809 WARN_ON(pp->crqb_dma & 0x3ff);
810 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
811 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
812 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
814 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
815 writelfl((pp->crqb_dma & 0xffffffff) | index,
816 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
818 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
821 * initialize response queue
823 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
825 WARN_ON(pp->crpb_dma & 0xff);
826 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
828 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
829 writelfl((pp->crpb_dma & 0xffffffff) | index,
830 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
832 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
834 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
835 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
839 * mv_start_dma - Enable eDMA engine
840 * @base: port base address
841 * @pp: port private data
843 * Verify the local cache of the eDMA state is accurate with a
847 * Inherited from caller.
849 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
850 struct mv_port_priv *pp, u8 protocol)
852 int want_ncq = (protocol == ATA_PROT_NCQ);
854 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
855 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
856 if (want_ncq != using_ncq)
859 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
860 struct mv_host_priv *hpriv = ap->host->private_data;
861 int hardport = mv_hardport_from_port(ap->port_no);
862 void __iomem *hc_mmio = mv_hc_base_from_port(
863 mv_host_base(ap->host), hardport);
864 u32 hc_irq_cause, ipending;
866 /* clear EDMA event indicators, if any */
867 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
869 /* clear EDMA interrupt indicator, if any */
870 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
871 ipending = (DEV_IRQ | DMA_IRQ) << hardport;
872 if (hc_irq_cause & ipending) {
873 writelfl(hc_irq_cause & ~ipending,
874 hc_mmio + HC_IRQ_CAUSE_OFS);
877 mv_edma_cfg(ap, want_ncq);
879 /* clear FIS IRQ Cause */
880 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
882 mv_set_edma_ptrs(port_mmio, hpriv, pp);
884 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
885 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
890 * mv_stop_edma_engine - Disable eDMA engine
891 * @port_mmio: io base address
894 * Inherited from caller.
896 static int mv_stop_edma_engine(void __iomem *port_mmio)
900 /* Disable eDMA. The disable bit auto clears. */
901 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
903 /* Wait for the chip to confirm eDMA is off. */
904 for (i = 10000; i > 0; i--) {
905 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
906 if (!(reg & EDMA_EN))
913 static int mv_stop_edma(struct ata_port *ap)
915 void __iomem *port_mmio = mv_ap_base(ap);
916 struct mv_port_priv *pp = ap->private_data;
918 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
920 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
921 if (mv_stop_edma_engine(port_mmio)) {
922 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
929 static void mv_dump_mem(void __iomem *start, unsigned bytes)
932 for (b = 0; b < bytes; ) {
933 DPRINTK("%p: ", start + b);
934 for (w = 0; b < bytes && w < 4; w++) {
935 printk("%08x ", readl(start + b));
943 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
948 for (b = 0; b < bytes; ) {
949 DPRINTK("%02x: ", b);
950 for (w = 0; b < bytes && w < 4; w++) {
951 (void) pci_read_config_dword(pdev, b, &dw);
959 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
960 struct pci_dev *pdev)
963 void __iomem *hc_base = mv_hc_base(mmio_base,
964 port >> MV_PORT_HC_SHIFT);
965 void __iomem *port_base;
966 int start_port, num_ports, p, start_hc, num_hcs, hc;
969 start_hc = start_port = 0;
970 num_ports = 8; /* shld be benign for 4 port devs */
973 start_hc = port >> MV_PORT_HC_SHIFT;
975 num_ports = num_hcs = 1;
977 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
978 num_ports > 1 ? num_ports - 1 : start_port);
981 DPRINTK("PCI config space regs:\n");
982 mv_dump_pci_cfg(pdev, 0x68);
984 DPRINTK("PCI regs:\n");
985 mv_dump_mem(mmio_base+0xc00, 0x3c);
986 mv_dump_mem(mmio_base+0xd00, 0x34);
987 mv_dump_mem(mmio_base+0xf00, 0x4);
988 mv_dump_mem(mmio_base+0x1d00, 0x6c);
989 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
990 hc_base = mv_hc_base(mmio_base, hc);
991 DPRINTK("HC regs (HC %i):\n", hc);
992 mv_dump_mem(hc_base, 0x1c);
994 for (p = start_port; p < start_port + num_ports; p++) {
995 port_base = mv_port_base(mmio_base, p);
996 DPRINTK("EDMA regs (port %i):\n", p);
997 mv_dump_mem(port_base, 0x54);
998 DPRINTK("SATA regs (port %i):\n", p);
999 mv_dump_mem(port_base+0x300, 0x60);
1004 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1008 switch (sc_reg_in) {
1012 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1015 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1024 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1026 unsigned int ofs = mv_scr_offset(sc_reg_in);
1028 if (ofs != 0xffffffffU) {
1029 *val = readl(mv_ap_base(ap) + ofs);
1035 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1037 unsigned int ofs = mv_scr_offset(sc_reg_in);
1039 if (ofs != 0xffffffffU) {
1040 writelfl(val, mv_ap_base(ap) + ofs);
1046 static void mv6_dev_config(struct ata_device *adev)
1049 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1051 * Gen-II does not support NCQ over a port multiplier
1052 * (no FIS-based switching).
1054 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1055 * See mv_qc_prep() for more info.
1057 if (adev->flags & ATA_DFLAG_NCQ) {
1058 if (sata_pmp_attached(adev->link->ap)) {
1059 adev->flags &= ~ATA_DFLAG_NCQ;
1060 ata_dev_printk(adev, KERN_INFO,
1061 "NCQ disabled for command-based switching\n");
1062 } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
1063 adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
1064 ata_dev_printk(adev, KERN_INFO,
1065 "max_sectors limited to %u for NCQ\n",
1071 static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1073 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1075 * Various bit settings required for operation
1076 * in FIS-based switching (fbs) mode on GenIIe:
1078 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1079 old_ltmode = readl(port_mmio + LTMODE_OFS);
1081 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1082 new_ltmode = old_ltmode | LTMODE_BIT8;
1083 } else { /* disable fbs */
1084 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1085 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1087 if (new_fcfg != old_fcfg)
1088 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1089 if (new_ltmode != old_ltmode)
1090 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
1093 static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1096 struct mv_port_priv *pp = ap->private_data;
1097 struct mv_host_priv *hpriv = ap->host->private_data;
1098 void __iomem *port_mmio = mv_ap_base(ap);
1100 /* set up non-NCQ EDMA configuration */
1101 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1103 if (IS_GEN_I(hpriv))
1104 cfg |= (1 << 8); /* enab config burst size mask */
1106 else if (IS_GEN_II(hpriv))
1107 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1109 else if (IS_GEN_IIE(hpriv)) {
1110 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1111 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1112 cfg |= (1 << 18); /* enab early completion */
1113 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1115 if (want_ncq && sata_pmp_attached(ap)) {
1116 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1117 mv_config_fbs(port_mmio, 1);
1119 mv_config_fbs(port_mmio, 0);
1124 cfg |= EDMA_CFG_NCQ;
1125 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1127 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1129 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1132 static void mv_port_free_dma_mem(struct ata_port *ap)
1134 struct mv_host_priv *hpriv = ap->host->private_data;
1135 struct mv_port_priv *pp = ap->private_data;
1139 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1143 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1147 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1148 * For later hardware, we have one unique sg_tbl per NCQ tag.
1150 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1151 if (pp->sg_tbl[tag]) {
1152 if (tag == 0 || !IS_GEN_I(hpriv))
1153 dma_pool_free(hpriv->sg_tbl_pool,
1155 pp->sg_tbl_dma[tag]);
1156 pp->sg_tbl[tag] = NULL;
1162 * mv_port_start - Port specific init/start routine.
1163 * @ap: ATA channel to manipulate
1165 * Allocate and point to DMA memory, init port private memory,
1169 * Inherited from caller.
1171 static int mv_port_start(struct ata_port *ap)
1173 struct device *dev = ap->host->dev;
1174 struct mv_host_priv *hpriv = ap->host->private_data;
1175 struct mv_port_priv *pp;
1178 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1181 ap->private_data = pp;
1183 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1186 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1188 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1190 goto out_port_free_dma_mem;
1191 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1194 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1195 * For later hardware, we need one unique sg_tbl per NCQ tag.
1197 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1198 if (tag == 0 || !IS_GEN_I(hpriv)) {
1199 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1200 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1201 if (!pp->sg_tbl[tag])
1202 goto out_port_free_dma_mem;
1204 pp->sg_tbl[tag] = pp->sg_tbl[0];
1205 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1210 out_port_free_dma_mem:
1211 mv_port_free_dma_mem(ap);
1216 * mv_port_stop - Port specific cleanup/stop routine.
1217 * @ap: ATA channel to manipulate
1219 * Stop DMA, cleanup port memory.
1222 * This routine uses the host lock to protect the DMA stop.
1224 static void mv_port_stop(struct ata_port *ap)
1227 mv_port_free_dma_mem(ap);
1231 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1232 * @qc: queued command whose SG list to source from
1234 * Populate the SG list and mark the last entry.
1237 * Inherited from caller.
1239 static void mv_fill_sg(struct ata_queued_cmd *qc)
1241 struct mv_port_priv *pp = qc->ap->private_data;
1242 struct scatterlist *sg;
1243 struct mv_sg *mv_sg, *last_sg = NULL;
1246 mv_sg = pp->sg_tbl[qc->tag];
1247 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1248 dma_addr_t addr = sg_dma_address(sg);
1249 u32 sg_len = sg_dma_len(sg);
1252 u32 offset = addr & 0xffff;
1255 if ((offset + sg_len > 0x10000))
1256 len = 0x10000 - offset;
1258 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1259 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1260 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1270 if (likely(last_sg))
1271 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1274 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1276 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1277 (last ? CRQB_CMD_LAST : 0);
1278 *cmdw = cpu_to_le16(tmp);
1282 * mv_qc_prep - Host specific command preparation.
1283 * @qc: queued command to prepare
1285 * This routine simply redirects to the general purpose routine
1286 * if command is not DMA. Else, it handles prep of the CRQB
1287 * (command request block), does some sanity checking, and calls
1288 * the SG load routine.
1291 * Inherited from caller.
1293 static void mv_qc_prep(struct ata_queued_cmd *qc)
1295 struct ata_port *ap = qc->ap;
1296 struct mv_port_priv *pp = ap->private_data;
1298 struct ata_taskfile *tf;
1302 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1303 (qc->tf.protocol != ATA_PROT_NCQ))
1306 /* Fill in command request block
1308 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1309 flags |= CRQB_FLAG_READ;
1310 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1311 flags |= qc->tag << CRQB_TAG_SHIFT;
1312 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1314 /* get current queue index from software */
1315 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1317 pp->crqb[in_index].sg_addr =
1318 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1319 pp->crqb[in_index].sg_addr_hi =
1320 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1321 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1323 cw = &pp->crqb[in_index].ata_cmd[0];
1326 /* Sadly, the CRQB cannot accomodate all registers--there are
1327 * only 11 bytes...so we must pick and choose required
1328 * registers based on the command. So, we drop feature and
1329 * hob_feature for [RW] DMA commands, but they are needed for
1330 * NCQ. NCQ will drop hob_nsect.
1332 switch (tf->command) {
1334 case ATA_CMD_READ_EXT:
1336 case ATA_CMD_WRITE_EXT:
1337 case ATA_CMD_WRITE_FUA_EXT:
1338 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1340 case ATA_CMD_FPDMA_READ:
1341 case ATA_CMD_FPDMA_WRITE:
1342 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1343 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1346 /* The only other commands EDMA supports in non-queued and
1347 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1348 * of which are defined/used by Linux. If we get here, this
1349 * driver needs work.
1351 * FIXME: modify libata to give qc_prep a return value and
1352 * return error here.
1354 BUG_ON(tf->command);
1357 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1358 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1359 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1360 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1361 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1362 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1363 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1364 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1365 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1367 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1373 * mv_qc_prep_iie - Host specific command preparation.
1374 * @qc: queued command to prepare
1376 * This routine simply redirects to the general purpose routine
1377 * if command is not DMA. Else, it handles prep of the CRQB
1378 * (command request block), does some sanity checking, and calls
1379 * the SG load routine.
1382 * Inherited from caller.
1384 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1386 struct ata_port *ap = qc->ap;
1387 struct mv_port_priv *pp = ap->private_data;
1388 struct mv_crqb_iie *crqb;
1389 struct ata_taskfile *tf;
1393 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1394 (qc->tf.protocol != ATA_PROT_NCQ))
1397 /* Fill in Gen IIE command request block */
1398 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1399 flags |= CRQB_FLAG_READ;
1401 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1402 flags |= qc->tag << CRQB_TAG_SHIFT;
1403 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1404 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1406 /* get current queue index from software */
1407 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1409 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1410 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1411 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1412 crqb->flags = cpu_to_le32(flags);
1415 crqb->ata_cmd[0] = cpu_to_le32(
1416 (tf->command << 16) |
1419 crqb->ata_cmd[1] = cpu_to_le32(
1425 crqb->ata_cmd[2] = cpu_to_le32(
1426 (tf->hob_lbal << 0) |
1427 (tf->hob_lbam << 8) |
1428 (tf->hob_lbah << 16) |
1429 (tf->hob_feature << 24)
1431 crqb->ata_cmd[3] = cpu_to_le32(
1433 (tf->hob_nsect << 8)
1436 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1442 * mv_qc_issue - Initiate a command to the host
1443 * @qc: queued command to start
1445 * This routine simply redirects to the general purpose routine
1446 * if command is not DMA. Else, it sanity checks our local
1447 * caches of the request producer/consumer indices then enables
1448 * DMA and bumps the request producer index.
1451 * Inherited from caller.
1453 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1455 struct ata_port *ap = qc->ap;
1456 void __iomem *port_mmio = mv_ap_base(ap);
1457 struct mv_port_priv *pp = ap->private_data;
1460 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1461 (qc->tf.protocol != ATA_PROT_NCQ)) {
1463 * We're about to send a non-EDMA capable command to the
1464 * port. Turn off EDMA so there won't be problems accessing
1465 * shadow block, etc registers.
1468 mv_pmp_select(ap, qc->dev->link->pmp);
1469 return ata_sff_qc_issue(qc);
1472 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1476 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1478 /* and write the request in pointer to kick the EDMA to life */
1479 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1480 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1486 * mv_err_intr - Handle error interrupts on the port
1487 * @ap: ATA channel to manipulate
1488 * @reset_allowed: bool: 0 == don't trigger from reset here
1490 * In most cases, just clear the interrupt and move on. However,
1491 * some cases require an eDMA reset, which also performs a COMRESET.
1492 * The SERR case requires a clear of pending errors in the SATA
1493 * SERROR register. Finally, if the port disabled DMA,
1494 * update our cached copy to match.
1497 * Inherited from caller.
1499 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1501 void __iomem *port_mmio = mv_ap_base(ap);
1502 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1503 struct mv_port_priv *pp = ap->private_data;
1504 struct mv_host_priv *hpriv = ap->host->private_data;
1505 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1506 unsigned int action = 0, err_mask = 0;
1507 struct ata_eh_info *ehi = &ap->link.eh_info;
1509 ata_ehi_clear_desc(ehi);
1511 if (!edma_enabled) {
1512 /* just a guess: do we need to do this? should we
1513 * expand this, and do it in all cases?
1515 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1516 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1519 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1521 ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause);
1524 * All generations share these EDMA error cause bits:
1526 if (edma_err_cause & EDMA_ERR_DEV)
1527 err_mask |= AC_ERR_DEV;
1528 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1529 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1530 EDMA_ERR_INTRL_PAR)) {
1531 err_mask |= AC_ERR_ATA_BUS;
1532 action |= ATA_EH_RESET;
1533 ata_ehi_push_desc(ehi, "parity error");
1535 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1536 ata_ehi_hotplugged(ehi);
1537 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1538 "dev disconnect" : "dev connect");
1539 action |= ATA_EH_RESET;
1543 * Gen-I has a different SELF_DIS bit,
1544 * different FREEZE bits, and no SERR bit:
1546 if (IS_GEN_I(hpriv)) {
1547 eh_freeze_mask = EDMA_EH_FREEZE_5;
1548 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1549 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1550 ata_ehi_push_desc(ehi, "EDMA self-disable");
1553 eh_freeze_mask = EDMA_EH_FREEZE;
1554 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1555 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1556 ata_ehi_push_desc(ehi, "EDMA self-disable");
1558 if (edma_err_cause & EDMA_ERR_SERR) {
1559 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1560 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1561 err_mask = AC_ERR_ATA_BUS;
1562 action |= ATA_EH_RESET;
1566 /* Clear EDMA now that SERR cleanup done */
1567 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1570 err_mask = AC_ERR_OTHER;
1571 action |= ATA_EH_RESET;
1574 ehi->serror |= serr;
1575 ehi->action |= action;
1578 qc->err_mask |= err_mask;
1580 ehi->err_mask |= err_mask;
1582 if (edma_err_cause & eh_freeze_mask)
1583 ata_port_freeze(ap);
1588 static void mv_intr_pio(struct ata_port *ap)
1590 struct ata_queued_cmd *qc;
1593 /* ignore spurious intr if drive still BUSY */
1594 ata_status = readb(ap->ioaddr.status_addr);
1595 if (unlikely(ata_status & ATA_BUSY))
1598 /* get active ATA command */
1599 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1600 if (unlikely(!qc)) /* no active tag */
1602 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1605 /* and finally, complete the ATA command */
1606 qc->err_mask |= ac_err_mask(ata_status);
1607 ata_qc_complete(qc);
1610 static void mv_intr_edma(struct ata_port *ap)
1612 void __iomem *port_mmio = mv_ap_base(ap);
1613 struct mv_host_priv *hpriv = ap->host->private_data;
1614 struct mv_port_priv *pp = ap->private_data;
1615 struct ata_queued_cmd *qc;
1616 u32 out_index, in_index;
1617 bool work_done = false;
1619 /* get h/w response queue pointer */
1620 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1621 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1627 /* get s/w response queue last-read pointer, and compare */
1628 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1629 if (in_index == out_index)
1632 /* 50xx: get active ATA command */
1633 if (IS_GEN_I(hpriv))
1634 tag = ap->link.active_tag;
1636 /* Gen II/IIE: get active ATA command via tag, to enable
1637 * support for queueing. this works transparently for
1638 * queued and non-queued modes.
1641 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1643 qc = ata_qc_from_tag(ap, tag);
1645 /* For non-NCQ mode, the lower 8 bits of status
1646 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1647 * which should be zero if all went well.
1649 status = le16_to_cpu(pp->crpb[out_index].flags);
1650 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1651 mv_err_intr(ap, qc);
1655 /* and finally, complete the ATA command */
1658 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1659 ata_qc_complete(qc);
1662 /* advance software response queue pointer, to
1663 * indicate (after the loop completes) to hardware
1664 * that we have consumed a response queue entry.
1670 /* Update the software queue position index in hardware */
1672 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1673 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1674 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1678 * mv_host_intr - Handle all interrupts on the given host controller
1679 * @host: host specific structure
1680 * @relevant: port error bits relevant to this host controller
1681 * @hc: which host controller we're to look at
1683 * Read then write clear the HC interrupt status then walk each
1684 * port connected to the HC and see if it needs servicing. Port
1685 * success ints are reported in the HC interrupt status reg, the
1686 * port error ints are reported in the higher level main
1687 * interrupt status register and thus are passed in via the
1688 * 'relevant' argument.
1691 * Inherited from caller.
1693 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1695 struct mv_host_priv *hpriv = host->private_data;
1696 void __iomem *mmio = hpriv->base;
1697 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1699 int port, port0, last_port;
1704 port0 = MV_PORTS_PER_HC;
1707 last_port = port0 + MV_PORTS_PER_HC;
1709 last_port = port0 + hpriv->n_ports;
1710 /* we'll need the HC success int register in most cases */
1711 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1715 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1717 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1718 hc, relevant, hc_irq_cause);
1720 for (port = port0; port < last_port; port++) {
1721 struct ata_port *ap = host->ports[port];
1722 struct mv_port_priv *pp;
1723 int have_err_bits, hardport, shift;
1725 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1728 pp = ap->private_data;
1730 shift = port << 1; /* (port * 2) */
1731 if (port >= MV_PORTS_PER_HC)
1732 shift++; /* skip bit 8 in the HC Main IRQ reg */
1734 have_err_bits = ((ERR_IRQ << shift) & relevant);
1736 if (unlikely(have_err_bits)) {
1737 struct ata_queued_cmd *qc;
1739 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1740 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1743 mv_err_intr(ap, qc);
1747 hardport = mv_hardport_from_port(port); /* range 0..3 */
1749 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1750 if ((DMA_IRQ << hardport) & hc_irq_cause)
1753 if ((DEV_IRQ << hardport) & hc_irq_cause)
1760 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1762 struct mv_host_priv *hpriv = host->private_data;
1763 struct ata_port *ap;
1764 struct ata_queued_cmd *qc;
1765 struct ata_eh_info *ehi;
1766 unsigned int i, err_mask, printed = 0;
1769 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1771 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1774 DPRINTK("All regs @ PCI error\n");
1775 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1777 writelfl(0, mmio + hpriv->irq_cause_ofs);
1779 for (i = 0; i < host->n_ports; i++) {
1780 ap = host->ports[i];
1781 if (!ata_link_offline(&ap->link)) {
1782 ehi = &ap->link.eh_info;
1783 ata_ehi_clear_desc(ehi);
1785 ata_ehi_push_desc(ehi,
1786 "PCI err cause 0x%08x", err_cause);
1787 err_mask = AC_ERR_HOST_BUS;
1788 ehi->action = ATA_EH_RESET;
1789 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1791 qc->err_mask |= err_mask;
1793 ehi->err_mask |= err_mask;
1795 ata_port_freeze(ap);
1801 * mv_interrupt - Main interrupt event handler
1803 * @dev_instance: private data; in this case the host structure
1805 * Read the read only register to determine if any host
1806 * controllers have pending interrupts. If so, call lower level
1807 * routine to handle. Also check for PCI errors which are only
1811 * This routine holds the host lock while processing pending
1814 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1816 struct ata_host *host = dev_instance;
1817 struct mv_host_priv *hpriv = host->private_data;
1818 unsigned int hc, handled = 0, n_hcs;
1819 void __iomem *mmio = hpriv->base;
1820 u32 main_cause, main_mask;
1822 spin_lock(&host->lock);
1823 main_cause = readl(hpriv->main_cause_reg_addr);
1824 main_mask = readl(hpriv->main_mask_reg_addr);
1826 * Deal with cases where we either have nothing pending, or have read
1827 * a bogus register value which can indicate HW removal or PCI fault.
1829 if (!(main_cause & main_mask) || (main_cause == 0xffffffffU))
1832 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1834 if (unlikely((main_cause & PCI_ERR) && HAS_PCI(host))) {
1835 mv_pci_error(host, mmio);
1837 goto out_unlock; /* skip all other HC irq handling */
1840 for (hc = 0; hc < n_hcs; hc++) {
1841 u32 relevant = main_cause & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1843 mv_host_intr(host, relevant, hc);
1849 spin_unlock(&host->lock);
1850 return IRQ_RETVAL(handled);
1853 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1857 switch (sc_reg_in) {
1861 ofs = sc_reg_in * sizeof(u32);
1870 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1872 struct mv_host_priv *hpriv = ap->host->private_data;
1873 void __iomem *mmio = hpriv->base;
1874 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1875 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1877 if (ofs != 0xffffffffU) {
1878 *val = readl(addr + ofs);
1884 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1886 struct mv_host_priv *hpriv = ap->host->private_data;
1887 void __iomem *mmio = hpriv->base;
1888 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1889 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1891 if (ofs != 0xffffffffU) {
1892 writelfl(val, addr + ofs);
1898 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1900 struct pci_dev *pdev = to_pci_dev(host->dev);
1903 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1906 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1908 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1911 mv_reset_pci_bus(host, mmio);
1914 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1916 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1919 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1922 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1925 tmp = readl(phy_mmio + MV5_PHY_MODE);
1927 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1928 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1931 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1935 writel(0, mmio + MV_GPIO_PORT_CTL);
1937 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1939 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1941 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1944 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1947 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1948 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1950 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1953 tmp = readl(phy_mmio + MV5_LT_MODE);
1955 writel(tmp, phy_mmio + MV5_LT_MODE);
1957 tmp = readl(phy_mmio + MV5_PHY_CTL);
1960 writel(tmp, phy_mmio + MV5_PHY_CTL);
1963 tmp = readl(phy_mmio + MV5_PHY_MODE);
1965 tmp |= hpriv->signal[port].pre;
1966 tmp |= hpriv->signal[port].amps;
1967 writel(tmp, phy_mmio + MV5_PHY_MODE);
1972 #define ZERO(reg) writel(0, port_mmio + (reg))
1973 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1976 void __iomem *port_mmio = mv_port_base(mmio, port);
1979 * The datasheet warns against setting ATA_RST when EDMA is active
1980 * (but doesn't say what the problem might be). So we first try
1981 * to disable the EDMA engine before doing the ATA_RST operation.
1983 mv_reset_channel(hpriv, mmio, port);
1985 ZERO(0x028); /* command */
1986 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1987 ZERO(0x004); /* timer */
1988 ZERO(0x008); /* irq err cause */
1989 ZERO(0x00c); /* irq err mask */
1990 ZERO(0x010); /* rq bah */
1991 ZERO(0x014); /* rq inp */
1992 ZERO(0x018); /* rq outp */
1993 ZERO(0x01c); /* respq bah */
1994 ZERO(0x024); /* respq outp */
1995 ZERO(0x020); /* respq inp */
1996 ZERO(0x02c); /* test control */
1997 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2001 #define ZERO(reg) writel(0, hc_mmio + (reg))
2002 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2005 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2013 tmp = readl(hc_mmio + 0x20);
2016 writel(tmp, hc_mmio + 0x20);
2020 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2023 unsigned int hc, port;
2025 for (hc = 0; hc < n_hc; hc++) {
2026 for (port = 0; port < MV_PORTS_PER_HC; port++)
2027 mv5_reset_hc_port(hpriv, mmio,
2028 (hc * MV_PORTS_PER_HC) + port);
2030 mv5_reset_one_hc(hpriv, mmio, hc);
2037 #define ZERO(reg) writel(0, mmio + (reg))
2038 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2040 struct mv_host_priv *hpriv = host->private_data;
2043 tmp = readl(mmio + MV_PCI_MODE);
2045 writel(tmp, mmio + MV_PCI_MODE);
2047 ZERO(MV_PCI_DISC_TIMER);
2048 ZERO(MV_PCI_MSI_TRIGGER);
2049 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2050 ZERO(HC_MAIN_IRQ_MASK_OFS);
2051 ZERO(MV_PCI_SERR_MASK);
2052 ZERO(hpriv->irq_cause_ofs);
2053 ZERO(hpriv->irq_mask_ofs);
2054 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2055 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2056 ZERO(MV_PCI_ERR_ATTRIBUTE);
2057 ZERO(MV_PCI_ERR_COMMAND);
2061 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2065 mv5_reset_flash(hpriv, mmio);
2067 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2069 tmp |= (1 << 5) | (1 << 6);
2070 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2074 * mv6_reset_hc - Perform the 6xxx global soft reset
2075 * @mmio: base address of the HBA
2077 * This routine only applies to 6xxx parts.
2080 * Inherited from caller.
2082 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2085 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2089 /* Following procedure defined in PCI "main command and status
2093 writel(t | STOP_PCI_MASTER, reg);
2095 for (i = 0; i < 1000; i++) {
2098 if (PCI_MASTER_EMPTY & t)
2101 if (!(PCI_MASTER_EMPTY & t)) {
2102 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2110 writel(t | GLOB_SFT_RST, reg);
2113 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2115 if (!(GLOB_SFT_RST & t)) {
2116 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2121 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2124 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2127 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2129 if (GLOB_SFT_RST & t) {
2130 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2134 * Temporary: wait 3 seconds before port-probing can happen,
2135 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2136 * This can go away once hotplug is fully/correctly implemented.
2144 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2147 void __iomem *port_mmio;
2150 tmp = readl(mmio + MV_RESET_CFG);
2151 if ((tmp & (1 << 0)) == 0) {
2152 hpriv->signal[idx].amps = 0x7 << 8;
2153 hpriv->signal[idx].pre = 0x1 << 5;
2157 port_mmio = mv_port_base(mmio, idx);
2158 tmp = readl(port_mmio + PHY_MODE2);
2160 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2161 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2164 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2166 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2169 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2172 void __iomem *port_mmio = mv_port_base(mmio, port);
2174 u32 hp_flags = hpriv->hp_flags;
2176 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2178 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2181 if (fix_phy_mode2) {
2182 m2 = readl(port_mmio + PHY_MODE2);
2185 writel(m2, port_mmio + PHY_MODE2);
2189 m2 = readl(port_mmio + PHY_MODE2);
2190 m2 &= ~((1 << 16) | (1 << 31));
2191 writel(m2, port_mmio + PHY_MODE2);
2196 /* who knows what this magic does */
2197 tmp = readl(port_mmio + PHY_MODE3);
2200 writel(tmp, port_mmio + PHY_MODE3);
2202 if (fix_phy_mode4) {
2205 m4 = readl(port_mmio + PHY_MODE4);
2207 if (hp_flags & MV_HP_ERRATA_60X1B2)
2208 tmp = readl(port_mmio + PHY_MODE3);
2210 /* workaround for errata FEr SATA#10 (part 1) */
2211 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2213 writel(m4, port_mmio + PHY_MODE4);
2215 if (hp_flags & MV_HP_ERRATA_60X1B2)
2216 writel(tmp, port_mmio + PHY_MODE3);
2219 /* Revert values of pre-emphasis and signal amps to the saved ones */
2220 m2 = readl(port_mmio + PHY_MODE2);
2222 m2 &= ~MV_M2_PREAMP_MASK;
2223 m2 |= hpriv->signal[port].amps;
2224 m2 |= hpriv->signal[port].pre;
2227 /* according to mvSata 3.6.1, some IIE values are fixed */
2228 if (IS_GEN_IIE(hpriv)) {
2233 writel(m2, port_mmio + PHY_MODE2);
2236 /* TODO: use the generic LED interface to configure the SATA Presence */
2237 /* & Acitivy LEDs on the board */
2238 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2244 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2247 void __iomem *port_mmio;
2250 port_mmio = mv_port_base(mmio, idx);
2251 tmp = readl(port_mmio + PHY_MODE2);
2253 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2254 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2258 #define ZERO(reg) writel(0, port_mmio + (reg))
2259 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2260 void __iomem *mmio, unsigned int port)
2262 void __iomem *port_mmio = mv_port_base(mmio, port);
2265 * The datasheet warns against setting ATA_RST when EDMA is active
2266 * (but doesn't say what the problem might be). So we first try
2267 * to disable the EDMA engine before doing the ATA_RST operation.
2269 mv_reset_channel(hpriv, mmio, port);
2271 ZERO(0x028); /* command */
2272 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2273 ZERO(0x004); /* timer */
2274 ZERO(0x008); /* irq err cause */
2275 ZERO(0x00c); /* irq err mask */
2276 ZERO(0x010); /* rq bah */
2277 ZERO(0x014); /* rq inp */
2278 ZERO(0x018); /* rq outp */
2279 ZERO(0x01c); /* respq bah */
2280 ZERO(0x024); /* respq outp */
2281 ZERO(0x020); /* respq inp */
2282 ZERO(0x02c); /* test control */
2283 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2288 #define ZERO(reg) writel(0, hc_mmio + (reg))
2289 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2292 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2302 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2303 void __iomem *mmio, unsigned int n_hc)
2307 for (port = 0; port < hpriv->n_ports; port++)
2308 mv_soc_reset_hc_port(hpriv, mmio, port);
2310 mv_soc_reset_one_hc(hpriv, mmio);
2315 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2321 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2326 static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2328 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2330 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2332 ifctl |= (1 << 7); /* enable gen2i speed */
2333 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2337 * Caller must ensure that EDMA is not active,
2338 * by first doing mv_stop_edma() where needed.
2340 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2341 unsigned int port_no)
2343 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2345 mv_stop_edma_engine(port_mmio);
2346 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2348 if (!IS_GEN_I(hpriv)) {
2349 /* Enable 3.0gb/s link speed */
2350 mv_setup_ifctl(port_mmio, 1);
2353 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2354 * link, and physical layers. It resets all SATA interface registers
2355 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2357 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2358 udelay(25); /* allow reset propagation */
2359 writelfl(0, port_mmio + EDMA_CMD_OFS);
2361 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2363 if (IS_GEN_I(hpriv))
2367 static void mv_pmp_select(struct ata_port *ap, int pmp)
2369 if (sata_pmp_supported(ap)) {
2370 void __iomem *port_mmio = mv_ap_base(ap);
2371 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2372 int old = reg & 0xf;
2375 reg = (reg & ~0xf) | pmp;
2376 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2381 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2382 unsigned long deadline)
2384 mv_pmp_select(link->ap, sata_srst_pmp(link));
2385 return sata_std_hardreset(link, class, deadline);
2388 static int mv_softreset(struct ata_link *link, unsigned int *class,
2389 unsigned long deadline)
2391 mv_pmp_select(link->ap, sata_srst_pmp(link));
2392 return ata_sff_softreset(link, class, deadline);
2395 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2396 unsigned long deadline)
2398 struct ata_port *ap = link->ap;
2399 struct mv_host_priv *hpriv = ap->host->private_data;
2400 struct mv_port_priv *pp = ap->private_data;
2401 void __iomem *mmio = hpriv->base;
2402 int rc, attempts = 0, extra = 0;
2406 mv_reset_channel(hpriv, mmio, ap->port_no);
2407 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2409 /* Workaround for errata FEr SATA#10 (part 2) */
2411 const unsigned long *timing =
2412 sata_ehc_deb_timing(&link->eh_context);
2414 rc = sata_link_hardreset(link, timing, deadline + extra,
2418 sata_scr_read(link, SCR_STATUS, &sstatus);
2419 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2420 /* Force 1.5gb/s link speed and try again */
2421 mv_setup_ifctl(mv_ap_base(ap), 0);
2422 if (time_after(jiffies + HZ, deadline))
2423 extra = HZ; /* only extend it once, max */
2425 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2430 static void mv_eh_freeze(struct ata_port *ap)
2432 struct mv_host_priv *hpriv = ap->host->private_data;
2433 unsigned int shift, hardport, port = ap->port_no;
2436 /* FIXME: handle coalescing completion events properly */
2439 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2441 /* disable assertion of portN err, done events */
2442 main_mask = readl(hpriv->main_mask_reg_addr);
2443 main_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
2444 writelfl(main_mask, hpriv->main_mask_reg_addr);
2447 static void mv_eh_thaw(struct ata_port *ap)
2449 struct mv_host_priv *hpriv = ap->host->private_data;
2450 unsigned int shift, hardport, port = ap->port_no;
2451 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
2452 void __iomem *port_mmio = mv_ap_base(ap);
2453 u32 main_mask, hc_irq_cause;
2455 /* FIXME: handle coalescing completion events properly */
2457 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2459 /* clear EDMA errors on this port */
2460 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2462 /* clear pending irq events */
2463 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2464 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
2465 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2467 /* enable assertion of portN err, done events */
2468 main_mask = readl(hpriv->main_mask_reg_addr);
2469 main_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
2470 writelfl(main_mask, hpriv->main_mask_reg_addr);
2474 * mv_port_init - Perform some early initialization on a single port.
2475 * @port: libata data structure storing shadow register addresses
2476 * @port_mmio: base address of the port
2478 * Initialize shadow register mmio addresses, clear outstanding
2479 * interrupts on the port, and unmask interrupts for the future
2480 * start of the port.
2483 * Inherited from caller.
2485 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2487 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2490 /* PIO related setup
2492 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2494 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2495 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2496 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2497 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2498 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2499 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2501 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2502 /* special case: control/altstatus doesn't have ATA_REG_ address */
2503 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2506 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2508 /* Clear any currently outstanding port interrupt conditions */
2509 serr_ofs = mv_scr_offset(SCR_ERROR);
2510 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2511 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2513 /* unmask all non-transient EDMA error interrupts */
2514 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2516 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2517 readl(port_mmio + EDMA_CFG_OFS),
2518 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2519 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2522 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2524 struct pci_dev *pdev = to_pci_dev(host->dev);
2525 struct mv_host_priv *hpriv = host->private_data;
2526 u32 hp_flags = hpriv->hp_flags;
2528 switch (board_idx) {
2530 hpriv->ops = &mv5xxx_ops;
2531 hp_flags |= MV_HP_GEN_I;
2533 switch (pdev->revision) {
2535 hp_flags |= MV_HP_ERRATA_50XXB0;
2538 hp_flags |= MV_HP_ERRATA_50XXB2;
2541 dev_printk(KERN_WARNING, &pdev->dev,
2542 "Applying 50XXB2 workarounds to unknown rev\n");
2543 hp_flags |= MV_HP_ERRATA_50XXB2;
2550 hpriv->ops = &mv5xxx_ops;
2551 hp_flags |= MV_HP_GEN_I;
2553 switch (pdev->revision) {
2555 hp_flags |= MV_HP_ERRATA_50XXB0;
2558 hp_flags |= MV_HP_ERRATA_50XXB2;
2561 dev_printk(KERN_WARNING, &pdev->dev,
2562 "Applying B2 workarounds to unknown rev\n");
2563 hp_flags |= MV_HP_ERRATA_50XXB2;
2570 hpriv->ops = &mv6xxx_ops;
2571 hp_flags |= MV_HP_GEN_II;
2573 switch (pdev->revision) {
2575 hp_flags |= MV_HP_ERRATA_60X1B2;
2578 hp_flags |= MV_HP_ERRATA_60X1C0;
2581 dev_printk(KERN_WARNING, &pdev->dev,
2582 "Applying B2 workarounds to unknown rev\n");
2583 hp_flags |= MV_HP_ERRATA_60X1B2;
2589 hp_flags |= MV_HP_PCIE;
2590 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2591 (pdev->device == 0x2300 || pdev->device == 0x2310))
2594 * Highpoint RocketRAID PCIe 23xx series cards:
2596 * Unconfigured drives are treated as "Legacy"
2597 * by the BIOS, and it overwrites sector 8 with
2598 * a "Lgcy" metadata block prior to Linux boot.
2600 * Configured drives (RAID or JBOD) leave sector 8
2601 * alone, but instead overwrite a high numbered
2602 * sector for the RAID metadata. This sector can
2603 * be determined exactly, by truncating the physical
2604 * drive capacity to a nice even GB value.
2606 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2608 * Warn the user, lest they think we're just buggy.
2610 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2611 " BIOS CORRUPTS DATA on all attached drives,"
2612 " regardless of if/how they are configured."
2614 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2615 " use sectors 8-9 on \"Legacy\" drives,"
2616 " and avoid the final two gigabytes on"
2617 " all RocketRAID BIOS initialized drives.\n");
2620 hpriv->ops = &mv6xxx_ops;
2621 hp_flags |= MV_HP_GEN_IIE;
2623 switch (pdev->revision) {
2625 hp_flags |= MV_HP_ERRATA_XX42A0;
2628 hp_flags |= MV_HP_ERRATA_60X1C0;
2631 dev_printk(KERN_WARNING, &pdev->dev,
2632 "Applying 60X1C0 workarounds to unknown rev\n");
2633 hp_flags |= MV_HP_ERRATA_60X1C0;
2638 hpriv->ops = &mv_soc_ops;
2639 hp_flags |= MV_HP_ERRATA_60X1C0;
2643 dev_printk(KERN_ERR, host->dev,
2644 "BUG: invalid board index %u\n", board_idx);
2648 hpriv->hp_flags = hp_flags;
2649 if (hp_flags & MV_HP_PCIE) {
2650 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2651 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2652 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2654 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2655 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2656 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2663 * mv_init_host - Perform some early initialization of the host.
2664 * @host: ATA host to initialize
2665 * @board_idx: controller index
2667 * If possible, do an early global reset of the host. Then do
2668 * our port init and clear/unmask all/relevant host interrupts.
2671 * Inherited from caller.
2673 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2675 int rc = 0, n_hc, port, hc;
2676 struct mv_host_priv *hpriv = host->private_data;
2677 void __iomem *mmio = hpriv->base;
2679 rc = mv_chip_id(host, board_idx);
2683 if (HAS_PCI(host)) {
2684 hpriv->main_cause_reg_addr = mmio + HC_MAIN_IRQ_CAUSE_OFS;
2685 hpriv->main_mask_reg_addr = mmio + HC_MAIN_IRQ_MASK_OFS;
2687 hpriv->main_cause_reg_addr = mmio + HC_SOC_MAIN_IRQ_CAUSE_OFS;
2688 hpriv->main_mask_reg_addr = mmio + HC_SOC_MAIN_IRQ_MASK_OFS;
2691 /* global interrupt mask: 0 == mask everything */
2692 writel(0, hpriv->main_mask_reg_addr);
2694 n_hc = mv_get_hc_count(host->ports[0]->flags);
2696 for (port = 0; port < host->n_ports; port++)
2697 hpriv->ops->read_preamp(hpriv, port, mmio);
2699 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2703 hpriv->ops->reset_flash(hpriv, mmio);
2704 hpriv->ops->reset_bus(host, mmio);
2705 hpriv->ops->enable_leds(hpriv, mmio);
2707 for (port = 0; port < host->n_ports; port++) {
2708 struct ata_port *ap = host->ports[port];
2709 void __iomem *port_mmio = mv_port_base(mmio, port);
2711 mv_port_init(&ap->ioaddr, port_mmio);
2714 if (HAS_PCI(host)) {
2715 unsigned int offset = port_mmio - mmio;
2716 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2717 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2722 for (hc = 0; hc < n_hc; hc++) {
2723 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2725 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2726 "(before clear)=0x%08x\n", hc,
2727 readl(hc_mmio + HC_CFG_OFS),
2728 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2730 /* Clear any currently outstanding hc interrupt conditions */
2731 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2734 if (HAS_PCI(host)) {
2735 /* Clear any currently outstanding host interrupt conditions */
2736 writelfl(0, mmio + hpriv->irq_cause_ofs);
2738 /* and unmask interrupt generation for host regs */
2739 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2740 if (IS_GEN_I(hpriv))
2741 writelfl(~HC_MAIN_MASKED_IRQS_5,
2742 hpriv->main_mask_reg_addr);
2744 writelfl(~HC_MAIN_MASKED_IRQS,
2745 hpriv->main_mask_reg_addr);
2747 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2748 "PCI int cause/mask=0x%08x/0x%08x\n",
2749 readl(hpriv->main_cause_reg_addr),
2750 readl(hpriv->main_mask_reg_addr),
2751 readl(mmio + hpriv->irq_cause_ofs),
2752 readl(mmio + hpriv->irq_mask_ofs));
2754 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2755 hpriv->main_mask_reg_addr);
2756 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2757 readl(hpriv->main_cause_reg_addr),
2758 readl(hpriv->main_mask_reg_addr));
2764 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2766 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2768 if (!hpriv->crqb_pool)
2771 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2773 if (!hpriv->crpb_pool)
2776 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2778 if (!hpriv->sg_tbl_pool)
2784 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2785 struct mbus_dram_target_info *dram)
2789 for (i = 0; i < 4; i++) {
2790 writel(0, hpriv->base + WINDOW_CTRL(i));
2791 writel(0, hpriv->base + WINDOW_BASE(i));
2794 for (i = 0; i < dram->num_cs; i++) {
2795 struct mbus_dram_window *cs = dram->cs + i;
2797 writel(((cs->size - 1) & 0xffff0000) |
2798 (cs->mbus_attr << 8) |
2799 (dram->mbus_dram_target_id << 4) | 1,
2800 hpriv->base + WINDOW_CTRL(i));
2801 writel(cs->base, hpriv->base + WINDOW_BASE(i));
2806 * mv_platform_probe - handle a positive probe of an soc Marvell
2808 * @pdev: platform device found
2811 * Inherited from caller.
2813 static int mv_platform_probe(struct platform_device *pdev)
2815 static int printed_version;
2816 const struct mv_sata_platform_data *mv_platform_data;
2817 const struct ata_port_info *ppi[] =
2818 { &mv_port_info[chip_soc], NULL };
2819 struct ata_host *host;
2820 struct mv_host_priv *hpriv;
2821 struct resource *res;
2824 if (!printed_version++)
2825 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2828 * Simple resource validation ..
2830 if (unlikely(pdev->num_resources != 2)) {
2831 dev_err(&pdev->dev, "invalid number of resources\n");
2836 * Get the register base first
2838 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2843 mv_platform_data = pdev->dev.platform_data;
2844 n_ports = mv_platform_data->n_ports;
2846 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2847 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2849 if (!host || !hpriv)
2851 host->private_data = hpriv;
2852 hpriv->n_ports = n_ports;
2855 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2856 res->end - res->start + 1);
2857 hpriv->base -= MV_SATAHC0_REG_BASE;
2860 * (Re-)program MBUS remapping windows if we are asked to.
2862 if (mv_platform_data->dram != NULL)
2863 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2865 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2869 /* initialize adapter */
2870 rc = mv_init_host(host, chip_soc);
2874 dev_printk(KERN_INFO, &pdev->dev,
2875 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2878 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2879 IRQF_SHARED, &mv6_sht);
2884 * mv_platform_remove - unplug a platform interface
2885 * @pdev: platform device
2887 * A platform bus SATA device has been unplugged. Perform the needed
2888 * cleanup. Also called on module unload for any active devices.
2890 static int __devexit mv_platform_remove(struct platform_device *pdev)
2892 struct device *dev = &pdev->dev;
2893 struct ata_host *host = dev_get_drvdata(dev);
2895 ata_host_detach(host);
2899 static struct platform_driver mv_platform_driver = {
2900 .probe = mv_platform_probe,
2901 .remove = __devexit_p(mv_platform_remove),
2904 .owner = THIS_MODULE,
2910 static int mv_pci_init_one(struct pci_dev *pdev,
2911 const struct pci_device_id *ent);
2914 static struct pci_driver mv_pci_driver = {
2916 .id_table = mv_pci_tbl,
2917 .probe = mv_pci_init_one,
2918 .remove = ata_pci_remove_one,
2924 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2927 /* move to PCI layer or libata core? */
2928 static int pci_go_64(struct pci_dev *pdev)
2932 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2933 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2935 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2937 dev_printk(KERN_ERR, &pdev->dev,
2938 "64-bit DMA enable failed\n");
2943 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2945 dev_printk(KERN_ERR, &pdev->dev,
2946 "32-bit DMA enable failed\n");
2949 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2951 dev_printk(KERN_ERR, &pdev->dev,
2952 "32-bit consistent DMA enable failed\n");
2961 * mv_print_info - Dump key info to kernel log for perusal.
2962 * @host: ATA host to print info about
2964 * FIXME: complete this.
2967 * Inherited from caller.
2969 static void mv_print_info(struct ata_host *host)
2971 struct pci_dev *pdev = to_pci_dev(host->dev);
2972 struct mv_host_priv *hpriv = host->private_data;
2974 const char *scc_s, *gen;
2976 /* Use this to determine the HW stepping of the chip so we know
2977 * what errata to workaround
2979 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2982 else if (scc == 0x01)
2987 if (IS_GEN_I(hpriv))
2989 else if (IS_GEN_II(hpriv))
2991 else if (IS_GEN_IIE(hpriv))
2996 dev_printk(KERN_INFO, &pdev->dev,
2997 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2998 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2999 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3003 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
3004 * @pdev: PCI device found
3005 * @ent: PCI device ID entry for the matched host
3008 * Inherited from caller.
3010 static int mv_pci_init_one(struct pci_dev *pdev,
3011 const struct pci_device_id *ent)
3013 static int printed_version;
3014 unsigned int board_idx = (unsigned int)ent->driver_data;
3015 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3016 struct ata_host *host;
3017 struct mv_host_priv *hpriv;
3020 if (!printed_version++)
3021 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3024 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3026 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3027 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3028 if (!host || !hpriv)
3030 host->private_data = hpriv;
3031 hpriv->n_ports = n_ports;
3033 /* acquire resources */
3034 rc = pcim_enable_device(pdev);
3038 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3040 pcim_pin_device(pdev);
3043 host->iomap = pcim_iomap_table(pdev);
3044 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3046 rc = pci_go_64(pdev);
3050 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3054 /* initialize adapter */
3055 rc = mv_init_host(host, board_idx);
3059 /* Enable interrupts */
3060 if (msi && pci_enable_msi(pdev))
3063 mv_dump_pci_cfg(pdev, 0x68);
3064 mv_print_info(host);
3066 pci_set_master(pdev);
3067 pci_try_set_mwi(pdev);
3068 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3069 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3073 static int mv_platform_probe(struct platform_device *pdev);
3074 static int __devexit mv_platform_remove(struct platform_device *pdev);
3076 static int __init mv_init(void)
3080 rc = pci_register_driver(&mv_pci_driver);
3084 rc = platform_driver_register(&mv_platform_driver);
3088 pci_unregister_driver(&mv_pci_driver);
3093 static void __exit mv_exit(void)
3096 pci_unregister_driver(&mv_pci_driver);
3098 platform_driver_unregister(&mv_platform_driver);
3101 MODULE_AUTHOR("Brett Russ");
3102 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3103 MODULE_LICENSE("GPL");
3104 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3105 MODULE_VERSION(DRV_VERSION);
3106 MODULE_ALIAS("platform:" DRV_NAME);
3109 module_param(msi, int, 0444);
3110 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3113 module_init(mv_init);
3114 module_exit(mv_exit);