2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.2"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
61 NV_PORT0_SCR_REG_OFFSET = 0x00,
62 NV_PORT1_SCR_REG_OFFSET = 0x40,
64 /* INT_STATUS/ENABLE */
67 NV_INT_STATUS_CK804 = 0x440,
68 NV_INT_ENABLE_CK804 = 0x441,
70 /* INT_STATUS/ENABLE bits */
74 NV_INT_REMOVED = 0x08,
76 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79 NV_INT_MASK = NV_INT_DEV |
80 NV_INT_ADDED | NV_INT_REMOVED,
84 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
86 // For PCI config register 20
87 NV_MCP_SATA_CFG_20 = 0x50,
88 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
89 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
90 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
91 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
92 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
94 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
99 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
100 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
101 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
102 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
104 /* BAR5 offset to ADMA general registers */
106 NV_ADMA_GEN_CTL = 0x00,
107 NV_ADMA_NOTIFIER_CLEAR = 0x30,
109 /* BAR5 offset to ADMA ports */
110 NV_ADMA_PORT = 0x480,
112 /* size of ADMA port register space */
113 NV_ADMA_PORT_SIZE = 0x100,
115 /* ADMA port registers */
117 NV_ADMA_CPB_COUNT = 0x42,
118 NV_ADMA_NEXT_CPB_IDX = 0x43,
120 NV_ADMA_CPB_BASE_LOW = 0x48,
121 NV_ADMA_CPB_BASE_HIGH = 0x4C,
122 NV_ADMA_APPEND = 0x50,
123 NV_ADMA_NOTIFIER = 0x68,
124 NV_ADMA_NOTIFIER_ERROR = 0x6C,
126 /* NV_ADMA_CTL register bits */
127 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
128 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
129 NV_ADMA_CTL_GO = (1 << 7),
130 NV_ADMA_CTL_AIEN = (1 << 8),
131 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
132 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
134 /* CPB response flag bits */
135 NV_CPB_RESP_DONE = (1 << 0),
136 NV_CPB_RESP_ATA_ERR = (1 << 3),
137 NV_CPB_RESP_CMD_ERR = (1 << 4),
138 NV_CPB_RESP_CPB_ERR = (1 << 7),
140 /* CPB control flag bits */
141 NV_CPB_CTL_CPB_VALID = (1 << 0),
142 NV_CPB_CTL_QUEUE = (1 << 1),
143 NV_CPB_CTL_APRD_VALID = (1 << 2),
144 NV_CPB_CTL_IEN = (1 << 3),
145 NV_CPB_CTL_FPDMA = (1 << 4),
148 NV_APRD_WRITE = (1 << 1),
149 NV_APRD_END = (1 << 2),
150 NV_APRD_CONT = (1 << 3),
152 /* NV_ADMA_STAT flags */
153 NV_ADMA_STAT_TIMEOUT = (1 << 0),
154 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
155 NV_ADMA_STAT_HOTPLUG = (1 << 2),
156 NV_ADMA_STAT_CPBERR = (1 << 4),
157 NV_ADMA_STAT_SERROR = (1 << 5),
158 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
159 NV_ADMA_STAT_IDLE = (1 << 8),
160 NV_ADMA_STAT_LEGACY = (1 << 9),
161 NV_ADMA_STAT_STOPPED = (1 << 10),
162 NV_ADMA_STAT_DONE = (1 << 12),
163 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
164 NV_ADMA_STAT_TIMEOUT,
167 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
168 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
172 /* ADMA Physical Region Descriptor - one SG segment */
181 enum nv_adma_regbits {
182 CMDEND = (1 << 15), /* end of command list */
183 WNB = (1 << 14), /* wait-not-BSY */
184 IGN = (1 << 13), /* ignore this entry */
185 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
186 DA2 = (1 << (2 + 8)),
187 DA1 = (1 << (1 + 8)),
188 DA0 = (1 << (0 + 8)),
191 /* ADMA Command Parameter Block
192 The first 5 SG segments are stored inside the Command Parameter Block itself.
193 If there are more than 5 segments the remainder are stored in a separate
194 memory area indicated by next_aprd. */
196 u8 resp_flags; /* 0 */
197 u8 reserved1; /* 1 */
198 u8 ctl_flags; /* 2 */
199 /* len is length of taskfile in 64 bit words */
202 u8 next_cpb_idx; /* 5 */
203 __le16 reserved2; /* 6-7 */
204 __le16 tf[12]; /* 8-31 */
205 struct nv_adma_prd aprd[5]; /* 32-111 */
206 __le64 next_aprd; /* 112-119 */
207 __le64 reserved3; /* 120-127 */
211 struct nv_adma_port_priv {
212 struct nv_adma_cpb *cpb;
214 struct nv_adma_prd *aprd;
219 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
221 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
222 static void nv_ck804_host_stop(struct ata_host *host);
223 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
224 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
225 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
226 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
227 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
229 static void nv_nf2_freeze(struct ata_port *ap);
230 static void nv_nf2_thaw(struct ata_port *ap);
231 static void nv_ck804_freeze(struct ata_port *ap);
232 static void nv_ck804_thaw(struct ata_port *ap);
233 static void nv_error_handler(struct ata_port *ap);
234 static int nv_adma_slave_config(struct scsi_device *sdev);
235 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
236 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
237 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
238 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
239 static void nv_adma_irq_clear(struct ata_port *ap);
240 static int nv_adma_port_start(struct ata_port *ap);
241 static void nv_adma_port_stop(struct ata_port *ap);
242 static void nv_adma_error_handler(struct ata_port *ap);
243 static void nv_adma_host_stop(struct ata_host *host);
244 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
245 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
246 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
247 static u8 nv_adma_bmdma_status(struct ata_port *ap);
253 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
258 static const struct pci_device_id nv_pci_tbl[] = {
259 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
260 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
261 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
262 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
263 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
264 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
265 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
266 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
267 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
268 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
269 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
270 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
271 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
273 { PCI_VDEVICE(NVIDIA, 0x045c), GENERIC }, /* MCP65 */
274 { PCI_VDEVICE(NVIDIA, 0x045d), GENERIC }, /* MCP65 */
275 { PCI_VDEVICE(NVIDIA, 0x045e), GENERIC }, /* MCP65 */
276 { PCI_VDEVICE(NVIDIA, 0x045f), GENERIC }, /* MCP65 */
277 { PCI_VDEVICE(NVIDIA, 0x0550), GENERIC }, /* MCP67 */
278 { PCI_VDEVICE(NVIDIA, 0x0551), GENERIC }, /* MCP67 */
279 { PCI_VDEVICE(NVIDIA, 0x0552), GENERIC }, /* MCP67 */
280 { PCI_VDEVICE(NVIDIA, 0x0553), GENERIC }, /* MCP67 */
281 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
282 PCI_ANY_ID, PCI_ANY_ID,
283 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
284 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
285 PCI_ANY_ID, PCI_ANY_ID,
286 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
288 { } /* terminate list */
291 static struct pci_driver nv_pci_driver = {
293 .id_table = nv_pci_tbl,
294 .probe = nv_init_one,
295 .remove = ata_pci_remove_one,
298 static struct scsi_host_template nv_sht = {
299 .module = THIS_MODULE,
301 .ioctl = ata_scsi_ioctl,
302 .queuecommand = ata_scsi_queuecmd,
303 .can_queue = ATA_DEF_QUEUE,
304 .this_id = ATA_SHT_THIS_ID,
305 .sg_tablesize = LIBATA_MAX_PRD,
306 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
307 .emulated = ATA_SHT_EMULATED,
308 .use_clustering = ATA_SHT_USE_CLUSTERING,
309 .proc_name = DRV_NAME,
310 .dma_boundary = ATA_DMA_BOUNDARY,
311 .slave_configure = ata_scsi_slave_config,
312 .slave_destroy = ata_scsi_slave_destroy,
313 .bios_param = ata_std_bios_param,
316 static struct scsi_host_template nv_adma_sht = {
317 .module = THIS_MODULE,
319 .ioctl = ata_scsi_ioctl,
320 .queuecommand = ata_scsi_queuecmd,
321 .can_queue = NV_ADMA_MAX_CPBS,
322 .this_id = ATA_SHT_THIS_ID,
323 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
324 .max_sectors = ATA_MAX_SECTORS,
325 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
326 .emulated = ATA_SHT_EMULATED,
327 .use_clustering = ATA_SHT_USE_CLUSTERING,
328 .proc_name = DRV_NAME,
329 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
330 .slave_configure = nv_adma_slave_config,
331 .slave_destroy = ata_scsi_slave_destroy,
332 .bios_param = ata_std_bios_param,
335 static const struct ata_port_operations nv_generic_ops = {
336 .port_disable = ata_port_disable,
337 .tf_load = ata_tf_load,
338 .tf_read = ata_tf_read,
339 .exec_command = ata_exec_command,
340 .check_status = ata_check_status,
341 .dev_select = ata_std_dev_select,
342 .bmdma_setup = ata_bmdma_setup,
343 .bmdma_start = ata_bmdma_start,
344 .bmdma_stop = ata_bmdma_stop,
345 .bmdma_status = ata_bmdma_status,
346 .qc_prep = ata_qc_prep,
347 .qc_issue = ata_qc_issue_prot,
348 .freeze = ata_bmdma_freeze,
349 .thaw = ata_bmdma_thaw,
350 .error_handler = nv_error_handler,
351 .post_internal_cmd = ata_bmdma_post_internal_cmd,
352 .data_xfer = ata_pio_data_xfer,
353 .irq_handler = nv_generic_interrupt,
354 .irq_clear = ata_bmdma_irq_clear,
355 .scr_read = nv_scr_read,
356 .scr_write = nv_scr_write,
357 .port_start = ata_port_start,
358 .port_stop = ata_port_stop,
359 .host_stop = ata_pci_host_stop,
362 static const struct ata_port_operations nv_nf2_ops = {
363 .port_disable = ata_port_disable,
364 .tf_load = ata_tf_load,
365 .tf_read = ata_tf_read,
366 .exec_command = ata_exec_command,
367 .check_status = ata_check_status,
368 .dev_select = ata_std_dev_select,
369 .bmdma_setup = ata_bmdma_setup,
370 .bmdma_start = ata_bmdma_start,
371 .bmdma_stop = ata_bmdma_stop,
372 .bmdma_status = ata_bmdma_status,
373 .qc_prep = ata_qc_prep,
374 .qc_issue = ata_qc_issue_prot,
375 .freeze = nv_nf2_freeze,
377 .error_handler = nv_error_handler,
378 .post_internal_cmd = ata_bmdma_post_internal_cmd,
379 .data_xfer = ata_pio_data_xfer,
380 .irq_handler = nv_nf2_interrupt,
381 .irq_clear = ata_bmdma_irq_clear,
382 .scr_read = nv_scr_read,
383 .scr_write = nv_scr_write,
384 .port_start = ata_port_start,
385 .port_stop = ata_port_stop,
386 .host_stop = ata_pci_host_stop,
389 static const struct ata_port_operations nv_ck804_ops = {
390 .port_disable = ata_port_disable,
391 .tf_load = ata_tf_load,
392 .tf_read = ata_tf_read,
393 .exec_command = ata_exec_command,
394 .check_status = ata_check_status,
395 .dev_select = ata_std_dev_select,
396 .bmdma_setup = ata_bmdma_setup,
397 .bmdma_start = ata_bmdma_start,
398 .bmdma_stop = ata_bmdma_stop,
399 .bmdma_status = ata_bmdma_status,
400 .qc_prep = ata_qc_prep,
401 .qc_issue = ata_qc_issue_prot,
402 .freeze = nv_ck804_freeze,
403 .thaw = nv_ck804_thaw,
404 .error_handler = nv_error_handler,
405 .post_internal_cmd = ata_bmdma_post_internal_cmd,
406 .data_xfer = ata_pio_data_xfer,
407 .irq_handler = nv_ck804_interrupt,
408 .irq_clear = ata_bmdma_irq_clear,
409 .scr_read = nv_scr_read,
410 .scr_write = nv_scr_write,
411 .port_start = ata_port_start,
412 .port_stop = ata_port_stop,
413 .host_stop = nv_ck804_host_stop,
416 static const struct ata_port_operations nv_adma_ops = {
417 .port_disable = ata_port_disable,
418 .tf_load = ata_tf_load,
419 .tf_read = ata_tf_read,
420 .check_atapi_dma = nv_adma_check_atapi_dma,
421 .exec_command = ata_exec_command,
422 .check_status = ata_check_status,
423 .dev_select = ata_std_dev_select,
424 .bmdma_setup = nv_adma_bmdma_setup,
425 .bmdma_start = nv_adma_bmdma_start,
426 .bmdma_stop = nv_adma_bmdma_stop,
427 .bmdma_status = nv_adma_bmdma_status,
428 .qc_prep = nv_adma_qc_prep,
429 .qc_issue = nv_adma_qc_issue,
430 .freeze = nv_ck804_freeze,
431 .thaw = nv_ck804_thaw,
432 .error_handler = nv_adma_error_handler,
433 .post_internal_cmd = nv_adma_bmdma_stop,
434 .data_xfer = ata_mmio_data_xfer,
435 .irq_handler = nv_adma_interrupt,
436 .irq_clear = nv_adma_irq_clear,
437 .scr_read = nv_scr_read,
438 .scr_write = nv_scr_write,
439 .port_start = nv_adma_port_start,
440 .port_stop = nv_adma_port_stop,
441 .host_stop = nv_adma_host_stop,
444 static struct ata_port_info nv_port_info[] = {
448 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
449 ATA_FLAG_HRST_TO_RESUME,
450 .pio_mask = NV_PIO_MASK,
451 .mwdma_mask = NV_MWDMA_MASK,
452 .udma_mask = NV_UDMA_MASK,
453 .port_ops = &nv_generic_ops,
458 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
459 ATA_FLAG_HRST_TO_RESUME,
460 .pio_mask = NV_PIO_MASK,
461 .mwdma_mask = NV_MWDMA_MASK,
462 .udma_mask = NV_UDMA_MASK,
463 .port_ops = &nv_nf2_ops,
468 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
469 ATA_FLAG_HRST_TO_RESUME,
470 .pio_mask = NV_PIO_MASK,
471 .mwdma_mask = NV_MWDMA_MASK,
472 .udma_mask = NV_UDMA_MASK,
473 .port_ops = &nv_ck804_ops,
478 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
479 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
480 .pio_mask = NV_PIO_MASK,
481 .mwdma_mask = NV_MWDMA_MASK,
482 .udma_mask = NV_UDMA_MASK,
483 .port_ops = &nv_adma_ops,
487 MODULE_AUTHOR("NVIDIA");
488 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
489 MODULE_LICENSE("GPL");
490 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
491 MODULE_VERSION(DRV_VERSION);
493 static int adma_enabled = 1;
495 static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio,
496 unsigned int port_no)
498 mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE;
502 static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap)
504 return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no);
507 static inline void __iomem *nv_adma_gen_block(struct ata_port *ap)
509 return (ap->host->mmio_base + NV_ADMA_GEN);
512 static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap)
514 return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no));
517 static void nv_adma_register_mode(struct ata_port *ap)
519 void __iomem *mmio = nv_adma_ctl_block(ap);
520 struct nv_adma_port_priv *pp = ap->private_data;
523 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
526 tmp = readw(mmio + NV_ADMA_CTL);
527 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
529 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
532 static void nv_adma_mode(struct ata_port *ap)
534 void __iomem *mmio = nv_adma_ctl_block(ap);
535 struct nv_adma_port_priv *pp = ap->private_data;
538 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
541 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
543 tmp = readw(mmio + NV_ADMA_CTL);
544 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
546 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
549 static int nv_adma_slave_config(struct scsi_device *sdev)
551 struct ata_port *ap = ata_shost_to_port(sdev->host);
552 struct nv_adma_port_priv *pp = ap->private_data;
553 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
555 unsigned long segment_boundary;
556 unsigned short sg_tablesize;
559 u32 current_reg, new_reg, config_mask;
561 rc = ata_scsi_slave_config(sdev);
563 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
564 /* Not a proper libata device, ignore */
567 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
569 * NVIDIA reports that ADMA mode does not support ATAPI commands.
570 * Therefore ATAPI commands are sent through the legacy interface.
571 * However, the legacy interface only supports 32-bit DMA.
572 * Restrict DMA parameters as required by the legacy interface
573 * when an ATAPI device is connected.
575 bounce_limit = ATA_DMA_MASK;
576 segment_boundary = ATA_DMA_BOUNDARY;
577 /* Subtract 1 since an extra entry may be needed for padding, see
579 sg_tablesize = LIBATA_MAX_PRD - 1;
581 /* Since the legacy DMA engine is in use, we need to disable ADMA
584 nv_adma_register_mode(ap);
587 bounce_limit = *ap->dev->dma_mask;
588 segment_boundary = NV_ADMA_DMA_BOUNDARY;
589 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
593 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
596 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
597 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
599 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
600 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
603 new_reg = current_reg | config_mask;
604 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
607 new_reg = current_reg & ~config_mask;
608 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
611 if(current_reg != new_reg)
612 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
614 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
615 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
616 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
617 ata_port_printk(ap, KERN_INFO,
618 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
619 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
623 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
625 struct nv_adma_port_priv *pp = qc->ap->private_data;
626 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
629 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
631 unsigned int idx = 0;
633 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
635 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
636 cpb[idx++] = cpu_to_le16(IGN);
637 cpb[idx++] = cpu_to_le16(IGN);
638 cpb[idx++] = cpu_to_le16(IGN);
639 cpb[idx++] = cpu_to_le16(IGN);
640 cpb[idx++] = cpu_to_le16(IGN);
643 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
644 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
645 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
646 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
647 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
649 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
650 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
651 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
652 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
653 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
655 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
660 static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
662 struct nv_adma_port_priv *pp = ap->private_data;
663 int complete = 0, have_err = 0;
664 u8 flags = pp->cpb[cpb_num].resp_flags;
666 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
668 if (flags & NV_CPB_RESP_DONE) {
669 VPRINTK("CPB flags done, flags=0x%x\n", flags);
672 if (flags & NV_CPB_RESP_ATA_ERR) {
673 ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
677 if (flags & NV_CPB_RESP_CMD_ERR) {
678 ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
682 if (flags & NV_CPB_RESP_CPB_ERR) {
683 ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
687 if(complete || force_err)
689 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
692 /* Only use the ATA port status for non-NCQ commands.
693 For NCQ commands the current status may have nothing to do with
694 the command just completed. */
695 if(qc->tf.protocol != ATA_PROT_NCQ)
696 ata_status = readb(nv_adma_ctl_block(ap) + (ATA_REG_STATUS * 4));
698 if(have_err || force_err)
699 ata_status |= ATA_ERR;
701 qc->err_mask |= ac_err_mask(ata_status);
702 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
709 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
711 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
714 /* freeze if hotplugged */
715 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
720 /* bail out if not our interrupt */
721 if (!(irq_stat & NV_INT_DEV))
724 /* DEV interrupt w/ no active qc? */
725 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
726 ata_check_status(ap);
730 /* handle interrupt */
731 handled = ata_host_intr(ap, qc);
732 if (unlikely(!handled)) {
733 /* spurious, clear it */
734 ata_check_status(ap);
740 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
742 struct ata_host *host = dev_instance;
744 u32 notifier_clears[2];
746 spin_lock(&host->lock);
748 for (i = 0; i < host->n_ports; i++) {
749 struct ata_port *ap = host->ports[i];
750 notifier_clears[i] = 0;
752 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
753 struct nv_adma_port_priv *pp = ap->private_data;
754 void __iomem *mmio = nv_adma_ctl_block(ap);
757 int have_global_err = 0;
758 u32 notifier, notifier_error;
760 /* if in ATA register mode, use standard ata interrupt handler */
761 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
762 u8 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804)
763 >> (NV_INT_PORT_SHIFT * i);
764 handled += nv_host_intr(ap, irq_stat);
768 notifier = readl(mmio + NV_ADMA_NOTIFIER);
769 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
770 notifier_clears[i] = notifier | notifier_error;
772 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
774 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
779 status = readw(mmio + NV_ADMA_STAT);
781 /* Clear status. Ensure the controller sees the clearing before we start
782 looking at any of the CPB statuses, so that any CPB completions after
783 this point in the handler will raise another interrupt. */
784 writew(status, mmio + NV_ADMA_STAT);
785 readw(mmio + NV_ADMA_STAT); /* flush posted write */
788 /* freeze if hotplugged */
789 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
790 ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
796 if (status & NV_ADMA_STAT_TIMEOUT) {
797 ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
800 if (status & NV_ADMA_STAT_CPBERR) {
801 ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
804 if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
805 /** Check CPBs for completed commands */
807 if(ata_tag_valid(ap->active_tag))
808 /* Non-NCQ command */
809 nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
810 (notifier_error & (1 << ap->active_tag)));
813 u32 active = ap->sactive;
814 while( (pos = ffs(active)) ) {
816 nv_adma_check_cpb(ap, pos, have_global_err ||
817 (notifier_error & (1 << pos)) );
818 active &= ~(1 << pos );
823 handled++; /* irq handled if we got here */
827 if(notifier_clears[0] || notifier_clears[1]) {
828 /* Note: Both notifier clear registers must be written
829 if either is set, even if one is zero, according to NVIDIA. */
830 writel(notifier_clears[0],
831 nv_adma_notifier_clear_block(host->ports[0]));
832 writel(notifier_clears[1],
833 nv_adma_notifier_clear_block(host->ports[1]));
836 spin_unlock(&host->lock);
838 return IRQ_RETVAL(handled);
841 static void nv_adma_irq_clear(struct ata_port *ap)
843 void __iomem *mmio = nv_adma_ctl_block(ap);
844 u16 status = readw(mmio + NV_ADMA_STAT);
845 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
846 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
847 unsigned long dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
849 /* clear ADMA status */
850 writew(status, mmio + NV_ADMA_STAT);
851 writel(notifier | notifier_error,
852 nv_adma_notifier_clear_block(ap));
854 /** clear legacy status */
855 outb(inb(dma_stat_addr), dma_stat_addr);
858 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
860 struct ata_port *ap = qc->ap;
861 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
862 struct nv_adma_port_priv *pp = ap->private_data;
865 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
870 /* load PRD table addr. */
871 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
873 /* specify data direction, triple-check start bit is clear */
874 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
875 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
877 dmactl |= ATA_DMA_WR;
879 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
881 /* issue r/w command */
882 ata_exec_command(ap, &qc->tf);
885 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
887 struct ata_port *ap = qc->ap;
888 struct nv_adma_port_priv *pp = ap->private_data;
891 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
896 /* start host DMA transaction */
897 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
898 outb(dmactl | ATA_DMA_START,
899 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
902 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
904 struct ata_port *ap = qc->ap;
905 struct nv_adma_port_priv *pp = ap->private_data;
907 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
910 /* clear start/stop bit */
911 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
912 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
914 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
915 ata_altstatus(ap); /* dummy read */
918 static u8 nv_adma_bmdma_status(struct ata_port *ap)
920 struct nv_adma_port_priv *pp = ap->private_data;
922 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
924 return inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
927 static int nv_adma_port_start(struct ata_port *ap)
929 struct device *dev = ap->host->dev;
930 struct nv_adma_port_priv *pp;
934 void __iomem *mmio = nv_adma_ctl_block(ap);
939 rc = ata_port_start(ap);
943 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
949 mem = dma_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
950 &mem_dma, GFP_KERNEL);
956 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
959 * First item in chunk of DMA memory:
960 * 128-byte command parameter block (CPB)
961 * one for each command tag
964 pp->cpb_dma = mem_dma;
966 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
967 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
969 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
970 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
973 * Second item: block of ADMA_SGTBL_LEN s/g entries
976 pp->aprd_dma = mem_dma;
978 ap->private_data = pp;
980 /* clear any outstanding interrupt conditions */
981 writew(0xffff, mmio + NV_ADMA_STAT);
983 /* initialize port variables */
984 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
986 /* clear CPB fetch count */
987 writew(0, mmio + NV_ADMA_CPB_COUNT);
989 /* clear GO for register mode */
990 tmp = readw(mmio + NV_ADMA_CTL);
991 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
993 tmp = readw(mmio + NV_ADMA_CTL);
994 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
995 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
997 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
998 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1009 static void nv_adma_port_stop(struct ata_port *ap)
1011 struct device *dev = ap->host->dev;
1012 struct nv_adma_port_priv *pp = ap->private_data;
1013 void __iomem *mmio = nv_adma_ctl_block(ap);
1017 writew(0, mmio + NV_ADMA_CTL);
1019 ap->private_data = NULL;
1020 dma_free_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, pp->cpb, pp->cpb_dma);
1026 static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1028 void __iomem *mmio = probe_ent->mmio_base;
1029 struct ata_ioports *ioport = &probe_ent->port[port];
1033 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1035 ioport->cmd_addr = (unsigned long) mmio;
1036 ioport->data_addr = (unsigned long) mmio + (ATA_REG_DATA * 4);
1037 ioport->error_addr =
1038 ioport->feature_addr = (unsigned long) mmio + (ATA_REG_ERR * 4);
1039 ioport->nsect_addr = (unsigned long) mmio + (ATA_REG_NSECT * 4);
1040 ioport->lbal_addr = (unsigned long) mmio + (ATA_REG_LBAL * 4);
1041 ioport->lbam_addr = (unsigned long) mmio + (ATA_REG_LBAM * 4);
1042 ioport->lbah_addr = (unsigned long) mmio + (ATA_REG_LBAH * 4);
1043 ioport->device_addr = (unsigned long) mmio + (ATA_REG_DEVICE * 4);
1044 ioport->status_addr =
1045 ioport->command_addr = (unsigned long) mmio + (ATA_REG_STATUS * 4);
1046 ioport->altstatus_addr =
1047 ioport->ctl_addr = (unsigned long) mmio + 0x20;
1050 static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1052 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1058 /* enable ADMA on the ports */
1059 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1060 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1061 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1062 NV_MCP_SATA_CFG_20_PORT1_EN |
1063 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1065 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1067 for (i = 0; i < probe_ent->n_ports; i++)
1068 nv_adma_setup_port(probe_ent, i);
1070 for (i = 0; i < probe_ent->n_ports; i++) {
1071 void __iomem *mmio = __nv_adma_ctl_block(probe_ent->mmio_base, i);
1074 /* enable interrupt, clear reset if not already clear */
1075 tmp = readw(mmio + NV_ADMA_CTL);
1076 writew(tmp | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1082 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1083 struct scatterlist *sg,
1085 struct nv_adma_prd *aprd)
1089 memset(aprd, 0, sizeof(struct nv_adma_prd));
1092 if (qc->tf.flags & ATA_TFLAG_WRITE)
1093 flags |= NV_APRD_WRITE;
1094 if (idx == qc->n_elem - 1)
1095 flags |= NV_APRD_END;
1097 flags |= NV_APRD_CONT;
1099 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1100 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1101 aprd->flags = flags;
1104 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1106 struct nv_adma_port_priv *pp = qc->ap->private_data;
1108 struct nv_adma_prd *aprd;
1109 struct scatterlist *sg;
1115 ata_for_each_sg(sg, qc) {
1116 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1117 nv_adma_fill_aprd(qc, sg, idx, aprd);
1121 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1124 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1126 struct nv_adma_port_priv *pp = qc->ap->private_data;
1127 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1128 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1129 NV_CPB_CTL_APRD_VALID |
1132 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1134 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1135 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1136 nv_adma_register_mode(qc->ap);
1141 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1145 cpb->next_cpb_idx = 0;
1147 /* turn on NCQ flags for NCQ commands */
1148 if (qc->tf.protocol == ATA_PROT_NCQ)
1149 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1151 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1153 nv_adma_fill_sg(qc, cpb);
1155 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1156 finished filling in all of the contents */
1158 cpb->ctl_flags = ctl_flags;
1161 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1163 struct nv_adma_port_priv *pp = qc->ap->private_data;
1164 void __iomem *mmio = nv_adma_ctl_block(qc->ap);
1168 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1169 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1170 /* use ATA register mode */
1171 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1172 nv_adma_register_mode(qc->ap);
1173 return ata_qc_issue_prot(qc);
1175 nv_adma_mode(qc->ap);
1177 /* write append register, command tag in lower 8 bits
1178 and (number of cpbs to append -1) in top 8 bits */
1180 writew(qc->tag, mmio + NV_ADMA_APPEND);
1182 DPRINTK("Issued tag %u\n",qc->tag);
1187 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1189 struct ata_host *host = dev_instance;
1191 unsigned int handled = 0;
1192 unsigned long flags;
1194 spin_lock_irqsave(&host->lock, flags);
1196 for (i = 0; i < host->n_ports; i++) {
1197 struct ata_port *ap;
1199 ap = host->ports[i];
1201 !(ap->flags & ATA_FLAG_DISABLED)) {
1202 struct ata_queued_cmd *qc;
1204 qc = ata_qc_from_tag(ap, ap->active_tag);
1205 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1206 handled += ata_host_intr(ap, qc);
1208 // No request pending? Clear interrupt status
1209 // anyway, in case there's one pending.
1210 ap->ops->check_status(ap);
1215 spin_unlock_irqrestore(&host->lock, flags);
1217 return IRQ_RETVAL(handled);
1220 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1224 for (i = 0; i < host->n_ports; i++) {
1225 struct ata_port *ap = host->ports[i];
1227 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1228 handled += nv_host_intr(ap, irq_stat);
1230 irq_stat >>= NV_INT_PORT_SHIFT;
1233 return IRQ_RETVAL(handled);
1236 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1238 struct ata_host *host = dev_instance;
1242 spin_lock(&host->lock);
1243 irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1244 ret = nv_do_interrupt(host, irq_stat);
1245 spin_unlock(&host->lock);
1250 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1252 struct ata_host *host = dev_instance;
1256 spin_lock(&host->lock);
1257 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804);
1258 ret = nv_do_interrupt(host, irq_stat);
1259 spin_unlock(&host->lock);
1264 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1266 if (sc_reg > SCR_CONTROL)
1269 return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
1272 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1274 if (sc_reg > SCR_CONTROL)
1277 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
1280 static void nv_nf2_freeze(struct ata_port *ap)
1282 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1283 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1286 mask = inb(scr_addr + NV_INT_ENABLE);
1287 mask &= ~(NV_INT_ALL << shift);
1288 outb(mask, scr_addr + NV_INT_ENABLE);
1291 static void nv_nf2_thaw(struct ata_port *ap)
1293 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1294 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1297 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1299 mask = inb(scr_addr + NV_INT_ENABLE);
1300 mask |= (NV_INT_MASK << shift);
1301 outb(mask, scr_addr + NV_INT_ENABLE);
1304 static void nv_ck804_freeze(struct ata_port *ap)
1306 void __iomem *mmio_base = ap->host->mmio_base;
1307 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1310 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1311 mask &= ~(NV_INT_ALL << shift);
1312 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1315 static void nv_ck804_thaw(struct ata_port *ap)
1317 void __iomem *mmio_base = ap->host->mmio_base;
1318 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1321 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1323 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1324 mask |= (NV_INT_MASK << shift);
1325 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1328 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1332 /* SATA hardreset fails to retrieve proper device signature on
1333 * some controllers. Don't classify on hardreset. For more
1334 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1336 return sata_std_hardreset(ap, &dummy);
1339 static void nv_error_handler(struct ata_port *ap)
1341 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1342 nv_hardreset, ata_std_postreset);
1345 static void nv_adma_error_handler(struct ata_port *ap)
1347 struct nv_adma_port_priv *pp = ap->private_data;
1348 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1349 void __iomem *mmio = nv_adma_ctl_block(ap);
1353 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1354 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1355 u32 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
1356 u32 status = readw(mmio + NV_ADMA_STAT);
1358 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1359 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1360 notifier, notifier_error, gen_ctl, status);
1362 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1363 struct nv_adma_cpb *cpb = &pp->cpb[i];
1364 if( cpb->ctl_flags || cpb->resp_flags )
1365 ata_port_printk(ap, KERN_ERR,
1366 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1367 i, cpb->ctl_flags, cpb->resp_flags);
1370 /* Push us back into port register mode for error handling. */
1371 nv_adma_register_mode(ap);
1373 ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1375 /* Mark all of the CPBs as invalid to prevent them from being executed */
1376 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1377 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1379 /* clear CPB fetch count */
1380 writew(0, mmio + NV_ADMA_CPB_COUNT);
1383 tmp = readw(mmio + NV_ADMA_CTL);
1384 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1385 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1387 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1388 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1391 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1392 nv_hardreset, ata_std_postreset);
1395 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1397 static int printed_version = 0;
1398 struct ata_port_info *ppi[2];
1399 struct ata_probe_ent *probe_ent;
1400 int pci_dev_busy = 0;
1404 unsigned long type = ent->driver_data;
1407 // Make sure this is a SATA controller by counting the number of bars
1408 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1409 // it's an IDE controller and we ignore it.
1410 for (bar=0; bar<6; bar++)
1411 if (pci_resource_start(pdev, bar) == 0)
1414 if ( !printed_version++)
1415 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1417 rc = pci_enable_device(pdev);
1421 rc = pci_request_regions(pdev, DRV_NAME);
1424 goto err_out_disable;
1427 if(type >= CK804 && adma_enabled) {
1428 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1430 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1431 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1436 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1438 goto err_out_regions;
1439 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1441 goto err_out_regions;
1446 ppi[0] = ppi[1] = &nv_port_info[type];
1447 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1449 goto err_out_regions;
1451 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
1452 if (!probe_ent->mmio_base) {
1454 goto err_out_free_ent;
1457 base = (unsigned long)probe_ent->mmio_base;
1459 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1460 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1462 /* enable SATA space for CK804 */
1463 if (type >= CK804) {
1466 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1467 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1468 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1471 pci_set_master(pdev);
1474 rc = nv_adma_host_init(probe_ent);
1476 goto err_out_iounmap;
1479 rc = ata_device_add(probe_ent);
1481 goto err_out_iounmap;
1488 pci_iounmap(pdev, probe_ent->mmio_base);
1492 pci_release_regions(pdev);
1495 pci_disable_device(pdev);
1500 static void nv_ck804_host_stop(struct ata_host *host)
1502 struct pci_dev *pdev = to_pci_dev(host->dev);
1505 /* disable SATA space for CK804 */
1506 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1507 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1508 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1510 ata_pci_host_stop(host);
1513 static void nv_adma_host_stop(struct ata_host *host)
1515 struct pci_dev *pdev = to_pci_dev(host->dev);
1519 for (i = 0; i < host->n_ports; i++) {
1520 void __iomem *mmio = __nv_adma_ctl_block(host->mmio_base, i);
1523 /* disable interrupt */
1524 tmp = readw(mmio + NV_ADMA_CTL);
1525 writew(tmp & ~NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1528 /* disable ADMA on the ports */
1529 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1530 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1531 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1532 NV_MCP_SATA_CFG_20_PORT1_EN |
1533 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1535 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1537 nv_ck804_host_stop(host);
1540 static int __init nv_init(void)
1542 return pci_register_driver(&nv_pci_driver);
1545 static void __exit nv_exit(void)
1547 pci_unregister_driver(&nv_pci_driver);
1550 module_init(nv_init);
1551 module_exit(nv_exit);
1552 module_param_named(adma, adma_enabled, bool, 0444);
1553 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");