2 libata-core.c - helper library for ATA
4 Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 Copyright 2003-2004 Jeff Garzik
7 The contents of this file are subject to the Open
8 Software License version 1.1 that can be found at
9 http://www.opensource.org/licenses/osl-1.1.txt and is included herein
12 Alternatively, the contents of this file may be used under the terms
13 of the GNU General Public License version 2 (the "GPL") as distributed
14 in the kernel source COPYING file, in which case the provisions of
15 the GPL are applicable instead of the above. If you wish to allow
16 the use of your version of this file only under the terms of the
17 GPL and not to allow others to use your version of this file under
18 the OSL, indicate your decision by deleting the provisions above and
19 replace them with the notice and other provisions required by the GPL.
20 If you do not delete the provisions above, a recipient may use your
21 version of this file under either the OSL or the GPL.
25 #include <linux/config.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
32 #include <linux/highmem.h>
33 #include <linux/spinlock.h>
34 #include <linux/blkdev.h>
35 #include <linux/delay.h>
36 #include <linux/timer.h>
37 #include <linux/interrupt.h>
38 #include <linux/completion.h>
39 #include <linux/suspend.h>
40 #include <linux/workqueue.h>
41 #include <scsi/scsi.h>
43 #include "scsi_priv.h"
44 #include <scsi/scsi_host.h>
45 #include <linux/libata.h>
47 #include <asm/semaphore.h>
48 #include <asm/byteorder.h>
52 static unsigned int ata_busy_sleep (struct ata_port *ap,
53 unsigned long tmout_pat,
55 static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
56 static void ata_set_mode(struct ata_port *ap);
57 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
58 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
59 static int fgb(u32 bitmap);
60 static int ata_choose_xfer_mode(struct ata_port *ap,
62 unsigned int *xfer_shift_out);
63 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
64 static void __ata_qc_complete(struct ata_queued_cmd *qc);
66 static unsigned int ata_unique_id = 1;
67 static struct workqueue_struct *ata_wq;
69 MODULE_AUTHOR("Jeff Garzik");
70 MODULE_DESCRIPTION("Library module for ATA devices");
71 MODULE_LICENSE("GPL");
72 MODULE_VERSION(DRV_VERSION);
75 * ata_tf_load - send taskfile registers to host controller
76 * @ap: Port to which output is sent
77 * @tf: ATA taskfile register set
79 * Outputs ATA taskfile to standard ATA host controller.
82 * Inherited from caller.
85 static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
87 struct ata_ioports *ioaddr = &ap->ioaddr;
88 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
90 if (tf->ctl != ap->last_ctl) {
91 outb(tf->ctl, ioaddr->ctl_addr);
92 ap->last_ctl = tf->ctl;
96 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
97 outb(tf->hob_feature, ioaddr->feature_addr);
98 outb(tf->hob_nsect, ioaddr->nsect_addr);
99 outb(tf->hob_lbal, ioaddr->lbal_addr);
100 outb(tf->hob_lbam, ioaddr->lbam_addr);
101 outb(tf->hob_lbah, ioaddr->lbah_addr);
102 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
111 outb(tf->feature, ioaddr->feature_addr);
112 outb(tf->nsect, ioaddr->nsect_addr);
113 outb(tf->lbal, ioaddr->lbal_addr);
114 outb(tf->lbam, ioaddr->lbam_addr);
115 outb(tf->lbah, ioaddr->lbah_addr);
116 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
124 if (tf->flags & ATA_TFLAG_DEVICE) {
125 outb(tf->device, ioaddr->device_addr);
126 VPRINTK("device 0x%X\n", tf->device);
133 * ata_tf_load_mmio - send taskfile registers to host controller
134 * @ap: Port to which output is sent
135 * @tf: ATA taskfile register set
137 * Outputs ATA taskfile to standard ATA host controller using MMIO.
140 * Inherited from caller.
143 static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
145 struct ata_ioports *ioaddr = &ap->ioaddr;
146 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
148 if (tf->ctl != ap->last_ctl) {
149 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
150 ap->last_ctl = tf->ctl;
154 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
155 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
156 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
157 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
158 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
159 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
160 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
169 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
170 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
171 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
172 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
173 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
174 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
182 if (tf->flags & ATA_TFLAG_DEVICE) {
183 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
184 VPRINTK("device 0x%X\n", tf->device);
192 * ata_tf_load - send taskfile registers to host controller
193 * @ap: Port to which output is sent
194 * @tf: ATA taskfile register set
196 * Outputs ATA taskfile to standard ATA host controller using MMIO
197 * or PIO as indicated by the ATA_FLAG_MMIO flag.
198 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
199 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
200 * hob_lbal, hob_lbam, and hob_lbah.
202 * This function waits for idle (!BUSY and !DRQ) after writing
203 * registers. If the control register has a new value, this
204 * function also waits for idle after writing control and before
205 * writing the remaining registers.
207 * May be used as the tf_load() entry in ata_port_operations.
210 * Inherited from caller.
212 void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
214 if (ap->flags & ATA_FLAG_MMIO)
215 ata_tf_load_mmio(ap, tf);
217 ata_tf_load_pio(ap, tf);
221 * ata_exec_command_pio - issue ATA command to host controller
222 * @ap: port to which command is being issued
223 * @tf: ATA taskfile register set
225 * Issues PIO write to ATA command register, with proper
226 * synchronization with interrupt handler / other threads.
229 * spin_lock_irqsave(host_set lock)
232 static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
234 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
236 outb(tf->command, ap->ioaddr.command_addr);
242 * ata_exec_command_mmio - issue ATA command to host controller
243 * @ap: port to which command is being issued
244 * @tf: ATA taskfile register set
246 * Issues MMIO write to ATA command register, with proper
247 * synchronization with interrupt handler / other threads.
250 * spin_lock_irqsave(host_set lock)
253 static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
255 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
257 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
263 * ata_exec_command - issue ATA command to host controller
264 * @ap: port to which command is being issued
265 * @tf: ATA taskfile register set
267 * Issues PIO/MMIO write to ATA command register, with proper
268 * synchronization with interrupt handler / other threads.
271 * spin_lock_irqsave(host_set lock)
273 void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
275 if (ap->flags & ATA_FLAG_MMIO)
276 ata_exec_command_mmio(ap, tf);
278 ata_exec_command_pio(ap, tf);
282 * ata_exec - issue ATA command to host controller
283 * @ap: port to which command is being issued
284 * @tf: ATA taskfile register set
286 * Issues PIO/MMIO write to ATA command register, with proper
287 * synchronization with interrupt handler / other threads.
290 * Obtains host_set lock.
293 static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
297 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
298 spin_lock_irqsave(&ap->host_set->lock, flags);
299 ap->ops->exec_command(ap, tf);
300 spin_unlock_irqrestore(&ap->host_set->lock, flags);
304 * ata_tf_to_host - issue ATA taskfile to host controller
305 * @ap: port to which command is being issued
306 * @tf: ATA taskfile register set
308 * Issues ATA taskfile register set to ATA host controller,
309 * with proper synchronization with interrupt handler and
313 * Obtains host_set lock.
316 static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
318 ap->ops->tf_load(ap, tf);
324 * ata_tf_to_host_nolock - issue ATA taskfile to host controller
325 * @ap: port to which command is being issued
326 * @tf: ATA taskfile register set
328 * Issues ATA taskfile register set to ATA host controller,
329 * with proper synchronization with interrupt handler and
333 * spin_lock_irqsave(host_set lock)
336 void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
338 ap->ops->tf_load(ap, tf);
339 ap->ops->exec_command(ap, tf);
343 * ata_tf_read_pio - input device's ATA taskfile shadow registers
344 * @ap: Port from which input is read
345 * @tf: ATA taskfile register set for storing input
347 * Reads ATA taskfile registers for currently-selected device
351 * Inherited from caller.
354 static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
356 struct ata_ioports *ioaddr = &ap->ioaddr;
358 tf->nsect = inb(ioaddr->nsect_addr);
359 tf->lbal = inb(ioaddr->lbal_addr);
360 tf->lbam = inb(ioaddr->lbam_addr);
361 tf->lbah = inb(ioaddr->lbah_addr);
362 tf->device = inb(ioaddr->device_addr);
364 if (tf->flags & ATA_TFLAG_LBA48) {
365 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
366 tf->hob_feature = inb(ioaddr->error_addr);
367 tf->hob_nsect = inb(ioaddr->nsect_addr);
368 tf->hob_lbal = inb(ioaddr->lbal_addr);
369 tf->hob_lbam = inb(ioaddr->lbam_addr);
370 tf->hob_lbah = inb(ioaddr->lbah_addr);
375 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
376 * @ap: Port from which input is read
377 * @tf: ATA taskfile register set for storing input
379 * Reads ATA taskfile registers for currently-selected device
383 * Inherited from caller.
386 static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
388 struct ata_ioports *ioaddr = &ap->ioaddr;
390 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
391 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
392 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
393 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
394 tf->device = readb((void __iomem *)ioaddr->device_addr);
396 if (tf->flags & ATA_TFLAG_LBA48) {
397 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
398 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
399 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
400 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
401 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
402 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
408 * ata_tf_read - input device's ATA taskfile shadow registers
409 * @ap: Port from which input is read
410 * @tf: ATA taskfile register set for storing input
412 * Reads ATA taskfile registers for currently-selected device
415 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
416 * is set, also reads the hob registers.
418 * May be used as the tf_read() entry in ata_port_operations.
421 * Inherited from caller.
423 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
425 if (ap->flags & ATA_FLAG_MMIO)
426 ata_tf_read_mmio(ap, tf);
428 ata_tf_read_pio(ap, tf);
432 * ata_check_status_pio - Read device status reg & clear interrupt
433 * @ap: port where the device is
435 * Reads ATA taskfile status register for currently-selected device
436 * and return its value. This also clears pending interrupts
440 * Inherited from caller.
442 static u8 ata_check_status_pio(struct ata_port *ap)
444 return inb(ap->ioaddr.status_addr);
448 * ata_check_status_mmio - Read device status reg & clear interrupt
449 * @ap: port where the device is
451 * Reads ATA taskfile status register for currently-selected device
452 * via MMIO and return its value. This also clears pending interrupts
456 * Inherited from caller.
458 static u8 ata_check_status_mmio(struct ata_port *ap)
460 return readb((void __iomem *) ap->ioaddr.status_addr);
465 * ata_check_status - Read device status reg & clear interrupt
466 * @ap: port where the device is
468 * Reads ATA taskfile status register for currently-selected device
469 * and return its value. This also clears pending interrupts
472 * May be used as the check_status() entry in ata_port_operations.
475 * Inherited from caller.
477 u8 ata_check_status(struct ata_port *ap)
479 if (ap->flags & ATA_FLAG_MMIO)
480 return ata_check_status_mmio(ap);
481 return ata_check_status_pio(ap);
486 * ata_altstatus - Read device alternate status reg
487 * @ap: port where the device is
489 * Reads ATA taskfile alternate status register for
490 * currently-selected device and return its value.
492 * Note: may NOT be used as the check_altstatus() entry in
493 * ata_port_operations.
496 * Inherited from caller.
498 u8 ata_altstatus(struct ata_port *ap)
500 if (ap->ops->check_altstatus)
501 return ap->ops->check_altstatus(ap);
503 if (ap->flags & ATA_FLAG_MMIO)
504 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
505 return inb(ap->ioaddr.altstatus_addr);
510 * ata_chk_err - Read device error reg
511 * @ap: port where the device is
513 * Reads ATA taskfile error register for
514 * currently-selected device and return its value.
516 * Note: may NOT be used as the check_err() entry in
517 * ata_port_operations.
520 * Inherited from caller.
522 u8 ata_chk_err(struct ata_port *ap)
524 if (ap->ops->check_err)
525 return ap->ops->check_err(ap);
527 if (ap->flags & ATA_FLAG_MMIO) {
528 return readb((void __iomem *) ap->ioaddr.error_addr);
530 return inb(ap->ioaddr.error_addr);
534 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
535 * @tf: Taskfile to convert
536 * @fis: Buffer into which data will output
537 * @pmp: Port multiplier port
539 * Converts a standard ATA taskfile to a Serial ATA
540 * FIS structure (Register - Host to Device).
543 * Inherited from caller.
546 void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
548 fis[0] = 0x27; /* Register - Host to Device FIS */
549 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
550 bit 7 indicates Command FIS */
551 fis[2] = tf->command;
552 fis[3] = tf->feature;
559 fis[8] = tf->hob_lbal;
560 fis[9] = tf->hob_lbam;
561 fis[10] = tf->hob_lbah;
562 fis[11] = tf->hob_feature;
565 fis[13] = tf->hob_nsect;
576 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
577 * @fis: Buffer from which data will be input
578 * @tf: Taskfile to output
580 * Converts a standard ATA taskfile to a Serial ATA
581 * FIS structure (Register - Host to Device).
584 * Inherited from caller.
587 void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
589 tf->command = fis[2]; /* status */
590 tf->feature = fis[3]; /* error */
597 tf->hob_lbal = fis[8];
598 tf->hob_lbam = fis[9];
599 tf->hob_lbah = fis[10];
602 tf->hob_nsect = fis[13];
606 * ata_prot_to_cmd - determine which read/write opcodes to use
607 * @protocol: ATA_PROT_xxx taskfile protocol
608 * @lba48: true is lba48 is present
610 * Given necessary input, determine which read/write commands
611 * to use to transfer data.
616 static int ata_prot_to_cmd(int protocol, int lba48)
618 int rcmd = 0, wcmd = 0;
623 rcmd = ATA_CMD_PIO_READ_EXT;
624 wcmd = ATA_CMD_PIO_WRITE_EXT;
626 rcmd = ATA_CMD_PIO_READ;
627 wcmd = ATA_CMD_PIO_WRITE;
633 rcmd = ATA_CMD_READ_EXT;
634 wcmd = ATA_CMD_WRITE_EXT;
637 wcmd = ATA_CMD_WRITE;
645 return rcmd | (wcmd << 8);
649 * ata_dev_set_protocol - set taskfile protocol and r/w commands
650 * @dev: device to examine and configure
652 * Examine the device configuration, after we have
653 * read the identify-device page and configured the
654 * data transfer mode. Set internal state related to
655 * the ATA taskfile protocol (pio, pio mult, dma, etc.)
656 * and calculate the proper read/write commands to use.
661 static void ata_dev_set_protocol(struct ata_device *dev)
663 int pio = (dev->flags & ATA_DFLAG_PIO);
664 int lba48 = (dev->flags & ATA_DFLAG_LBA48);
668 proto = dev->xfer_protocol = ATA_PROT_PIO;
670 proto = dev->xfer_protocol = ATA_PROT_DMA;
672 cmd = ata_prot_to_cmd(proto, lba48);
676 dev->read_cmd = cmd & 0xff;
677 dev->write_cmd = (cmd >> 8) & 0xff;
680 static const char * xfer_mode_str[] = {
700 * ata_udma_string - convert UDMA bit offset to string
701 * @mask: mask of bits supported; only highest bit counts.
703 * Determine string which represents the highest speed
704 * (highest bit in @udma_mask).
710 * Constant C string representing highest speed listed in
711 * @udma_mask, or the constant C string "<n/a>".
714 static const char *ata_mode_string(unsigned int mask)
718 for (i = 7; i >= 0; i--)
721 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
724 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
731 return xfer_mode_str[i];
735 * ata_pio_devchk - PATA device presence detection
736 * @ap: ATA channel to examine
737 * @device: Device to examine (starting at zero)
739 * This technique was originally described in
740 * Hale Landis's ATADRVR (www.ata-atapi.com), and
741 * later found its way into the ATA/ATAPI spec.
743 * Write a pattern to the ATA shadow registers,
744 * and if a device is present, it will respond by
745 * correctly storing and echoing back the
746 * ATA shadow register contents.
752 static unsigned int ata_pio_devchk(struct ata_port *ap,
755 struct ata_ioports *ioaddr = &ap->ioaddr;
758 ap->ops->dev_select(ap, device);
760 outb(0x55, ioaddr->nsect_addr);
761 outb(0xaa, ioaddr->lbal_addr);
763 outb(0xaa, ioaddr->nsect_addr);
764 outb(0x55, ioaddr->lbal_addr);
766 outb(0x55, ioaddr->nsect_addr);
767 outb(0xaa, ioaddr->lbal_addr);
769 nsect = inb(ioaddr->nsect_addr);
770 lbal = inb(ioaddr->lbal_addr);
772 if ((nsect == 0x55) && (lbal == 0xaa))
773 return 1; /* we found a device */
775 return 0; /* nothing found */
779 * ata_mmio_devchk - PATA device presence detection
780 * @ap: ATA channel to examine
781 * @device: Device to examine (starting at zero)
783 * This technique was originally described in
784 * Hale Landis's ATADRVR (www.ata-atapi.com), and
785 * later found its way into the ATA/ATAPI spec.
787 * Write a pattern to the ATA shadow registers,
788 * and if a device is present, it will respond by
789 * correctly storing and echoing back the
790 * ATA shadow register contents.
796 static unsigned int ata_mmio_devchk(struct ata_port *ap,
799 struct ata_ioports *ioaddr = &ap->ioaddr;
802 ap->ops->dev_select(ap, device);
804 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
805 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
807 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
808 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
810 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
811 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
813 nsect = readb((void __iomem *) ioaddr->nsect_addr);
814 lbal = readb((void __iomem *) ioaddr->lbal_addr);
816 if ((nsect == 0x55) && (lbal == 0xaa))
817 return 1; /* we found a device */
819 return 0; /* nothing found */
823 * ata_devchk - PATA device presence detection
824 * @ap: ATA channel to examine
825 * @device: Device to examine (starting at zero)
827 * Dispatch ATA device presence detection, depending
828 * on whether we are using PIO or MMIO to talk to the
829 * ATA shadow registers.
835 static unsigned int ata_devchk(struct ata_port *ap,
838 if (ap->flags & ATA_FLAG_MMIO)
839 return ata_mmio_devchk(ap, device);
840 return ata_pio_devchk(ap, device);
844 * ata_dev_classify - determine device type based on ATA-spec signature
845 * @tf: ATA taskfile register set for device to be identified
847 * Determine from taskfile register contents whether a device is
848 * ATA or ATAPI, as per "Signature and persistence" section
849 * of ATA/PI spec (volume 1, sect 5.14).
855 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
856 * the event of failure.
859 unsigned int ata_dev_classify(struct ata_taskfile *tf)
861 /* Apple's open source Darwin code hints that some devices only
862 * put a proper signature into the LBA mid/high registers,
863 * So, we only check those. It's sufficient for uniqueness.
866 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
867 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
868 DPRINTK("found ATA device by sig\n");
872 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
873 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
874 DPRINTK("found ATAPI device by sig\n");
875 return ATA_DEV_ATAPI;
878 DPRINTK("unknown device\n");
879 return ATA_DEV_UNKNOWN;
883 * ata_dev_try_classify - Parse returned ATA device signature
884 * @ap: ATA channel to examine
885 * @device: Device to examine (starting at zero)
887 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
888 * an ATA/ATAPI-defined set of values is placed in the ATA
889 * shadow registers, indicating the results of device detection
892 * Select the ATA device, and read the values from the ATA shadow
893 * registers. Then parse according to the Error register value,
894 * and the spec-defined values examined by ata_dev_classify().
900 static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
902 struct ata_device *dev = &ap->device[device];
903 struct ata_taskfile tf;
907 ap->ops->dev_select(ap, device);
909 memset(&tf, 0, sizeof(tf));
911 err = ata_chk_err(ap);
912 ap->ops->tf_read(ap, &tf);
914 dev->class = ATA_DEV_NONE;
916 /* see if device passed diags */
919 else if ((device == 0) && (err == 0x81))
924 /* determine if device if ATA or ATAPI */
925 class = ata_dev_classify(&tf);
926 if (class == ATA_DEV_UNKNOWN)
928 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
937 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
938 * @id: IDENTIFY DEVICE results we will examine
939 * @s: string into which data is output
940 * @ofs: offset into identify device page
941 * @len: length of string to return. must be an even number.
943 * The strings in the IDENTIFY DEVICE page are broken up into
944 * 16-bit chunks. Run through the string, and output each
945 * 8-bit chunk linearly, regardless of platform.
951 void ata_dev_id_string(u16 *id, unsigned char *s,
952 unsigned int ofs, unsigned int len)
972 * ata_noop_dev_select - Select device 0/1 on ATA bus
973 * @ap: ATA channel to manipulate
974 * @device: ATA device (numbered from zero) to select
976 * This function performs no actual function.
978 * May be used as the dev_select() entry in ata_port_operations.
983 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
989 * ata_std_dev_select - Select device 0/1 on ATA bus
990 * @ap: ATA channel to manipulate
991 * @device: ATA device (numbered from zero) to select
993 * Use the method defined in the ATA specification to
994 * make either device 0, or device 1, active on the
995 * ATA channel. Works with both PIO and MMIO.
997 * May be used as the dev_select() entry in ata_port_operations.
1003 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1008 tmp = ATA_DEVICE_OBS;
1010 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1012 if (ap->flags & ATA_FLAG_MMIO) {
1013 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
1015 outb(tmp, ap->ioaddr.device_addr);
1017 ata_pause(ap); /* needed; also flushes, for mmio */
1021 * ata_dev_select - Select device 0/1 on ATA bus
1022 * @ap: ATA channel to manipulate
1023 * @device: ATA device (numbered from zero) to select
1024 * @wait: non-zero to wait for Status register BSY bit to clear
1025 * @can_sleep: non-zero if context allows sleeping
1027 * Use the method defined in the ATA specification to
1028 * make either device 0, or device 1, active on the
1031 * This is a high-level version of ata_std_dev_select(),
1032 * which additionally provides the services of inserting
1033 * the proper pauses and status polling, where needed.
1039 void ata_dev_select(struct ata_port *ap, unsigned int device,
1040 unsigned int wait, unsigned int can_sleep)
1042 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
1043 ap->id, device, wait);
1048 ap->ops->dev_select(ap, device);
1051 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1058 * ata_dump_id - IDENTIFY DEVICE info debugging output
1059 * @dev: Device whose IDENTIFY DEVICE page we will dump
1061 * Dump selected 16-bit words from a detected device's
1062 * IDENTIFY PAGE page.
1068 static inline void ata_dump_id(struct ata_device *dev)
1070 DPRINTK("49==0x%04x "
1080 DPRINTK("80==0x%04x "
1090 DPRINTK("88==0x%04x "
1097 * ata_dev_identify - obtain IDENTIFY x DEVICE page
1098 * @ap: port on which device we wish to probe resides
1099 * @device: device bus address, starting at zero
1101 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
1102 * command, and read back the 512-byte device information page.
1103 * The device information page is fed to us via the standard
1104 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
1105 * using standard PIO-IN paths)
1107 * After reading the device information page, we use several
1108 * bits of information from it to initialize data structures
1109 * that will be used during the lifetime of the ata_device.
1110 * Other data from the info page is used to disqualify certain
1111 * older ATA devices we do not wish to support.
1114 * Inherited from caller. Some functions called by this function
1115 * obtain the host_set lock.
1118 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1120 struct ata_device *dev = &ap->device[device];
1121 unsigned int major_version;
1123 unsigned long xfer_modes;
1125 unsigned int using_edd;
1126 DECLARE_COMPLETION(wait);
1127 struct ata_queued_cmd *qc;
1128 unsigned long flags;
1131 if (!ata_dev_present(dev)) {
1132 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1137 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1142 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1144 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1145 dev->class == ATA_DEV_NONE);
1147 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1149 qc = ata_qc_new_init(ap, dev);
1152 ata_sg_init_one(qc, dev->id, sizeof(dev->id));
1153 qc->dma_dir = DMA_FROM_DEVICE;
1154 qc->tf.protocol = ATA_PROT_PIO;
1158 if (dev->class == ATA_DEV_ATA) {
1159 qc->tf.command = ATA_CMD_ID_ATA;
1160 DPRINTK("do ATA identify\n");
1162 qc->tf.command = ATA_CMD_ID_ATAPI;
1163 DPRINTK("do ATAPI identify\n");
1166 qc->waiting = &wait;
1167 qc->complete_fn = ata_qc_complete_noop;
1169 spin_lock_irqsave(&ap->host_set->lock, flags);
1170 rc = ata_qc_issue(qc);
1171 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1176 wait_for_completion(&wait);
1178 status = ata_chk_status(ap);
1179 if (status & ATA_ERR) {
1181 * arg! EDD works for all test cases, but seems to return
1182 * the ATA signature for some ATAPI devices. Until the
1183 * reason for this is found and fixed, we fix up the mess
1184 * here. If IDENTIFY DEVICE returns command aborted
1185 * (as ATAPI devices do), then we issue an
1186 * IDENTIFY PACKET DEVICE.
1188 * ATA software reset (SRST, the default) does not appear
1189 * to have this problem.
1191 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) {
1192 u8 err = ata_chk_err(ap);
1193 if (err & ATA_ABORTED) {
1194 dev->class = ATA_DEV_ATAPI;
1205 swap_buf_le16(dev->id, ATA_ID_WORDS);
1207 /* print device capabilities */
1208 printk(KERN_DEBUG "ata%u: dev %u cfg "
1209 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1210 ap->id, device, dev->id[49],
1211 dev->id[82], dev->id[83], dev->id[84],
1212 dev->id[85], dev->id[86], dev->id[87],
1216 * common ATA, ATAPI feature tests
1219 /* we require DMA support (bits 8 of word 49) */
1220 if (!ata_id_has_dma(dev->id)) {
1221 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1225 /* quick-n-dirty find max transfer mode; for printk only */
1226 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1228 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1230 xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3);
1231 xfer_modes |= (0x7 << ATA_SHIFT_PIO);
1236 /* ATA-specific feature tests */
1237 if (dev->class == ATA_DEV_ATA) {
1238 if (!ata_id_is_ata(dev->id)) /* sanity check */
1241 /* get major version */
1242 tmp = dev->id[ATA_ID_MAJOR_VER];
1243 for (major_version = 14; major_version >= 1; major_version--)
1244 if (tmp & (1 << major_version))
1248 * The exact sequence expected by certain pre-ATA4 drives is:
1251 * INITIALIZE DEVICE PARAMETERS
1253 * Some drives were very specific about that exact sequence.
1255 if (major_version < 4 || (!ata_id_has_lba(dev->id)))
1256 ata_dev_init_params(ap, dev);
1258 if (ata_id_has_lba(dev->id)) {
1259 dev->flags |= ATA_DFLAG_LBA;
1261 if (ata_id_has_lba48(dev->id)) {
1262 dev->flags |= ATA_DFLAG_LBA48;
1263 dev->n_sectors = ata_id_u64(dev->id, 100);
1265 dev->n_sectors = ata_id_u32(dev->id, 60);
1268 /* print device info to dmesg */
1269 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
1272 ata_mode_string(xfer_modes),
1273 (unsigned long long)dev->n_sectors,
1274 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
1278 /* Default translation */
1279 dev->cylinders = dev->id[1];
1280 dev->heads = dev->id[3];
1281 dev->sectors = dev->id[6];
1282 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1284 if (ata_id_current_chs_valid(dev->id)) {
1285 /* Current CHS translation is valid. */
1286 dev->cylinders = dev->id[54];
1287 dev->heads = dev->id[55];
1288 dev->sectors = dev->id[56];
1290 dev->n_sectors = ata_id_u32(dev->id, 57);
1293 /* print device info to dmesg */
1294 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
1297 ata_mode_string(xfer_modes),
1298 (unsigned long long)dev->n_sectors,
1299 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
1303 ap->host->max_cmd_len = 16;
1306 /* ATAPI-specific feature tests */
1308 if (ata_id_is_ata(dev->id)) /* sanity check */
1311 rc = atapi_cdb_len(dev->id);
1312 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1313 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1316 ap->cdb_len = (unsigned int) rc;
1317 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1319 /* print device info to dmesg */
1320 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1322 ata_mode_string(xfer_modes));
1325 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1329 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1332 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1333 DPRINTK("EXIT, err\n");
1337 static inline u8 ata_dev_knobble(struct ata_port *ap)
1339 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
1343 * ata_dev_config - Run device specific handlers and check for
1344 * SATA->PATA bridges
1351 void ata_dev_config(struct ata_port *ap, unsigned int i)
1353 /* limit bridge transfers to udma5, 200 sectors */
1354 if (ata_dev_knobble(ap)) {
1355 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1356 ap->id, ap->device->devno);
1357 ap->udma_mask &= ATA_UDMA5;
1358 ap->host->max_sectors = ATA_MAX_SECTORS;
1359 ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
1360 ap->device->flags |= ATA_DFLAG_LOCK_SECTORS;
1363 if (ap->ops->dev_config)
1364 ap->ops->dev_config(ap, &ap->device[i]);
1368 * ata_bus_probe - Reset and probe ATA bus
1371 * Master ATA bus probing function. Initiates a hardware-dependent
1372 * bus reset, then attempts to identify any devices found on
1376 * PCI/etc. bus probe sem.
1379 * Zero on success, non-zero on error.
1382 static int ata_bus_probe(struct ata_port *ap)
1384 unsigned int i, found = 0;
1386 ap->ops->phy_reset(ap);
1387 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1390 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1391 ata_dev_identify(ap, i);
1392 if (ata_dev_present(&ap->device[i])) {
1394 ata_dev_config(ap,i);
1398 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1399 goto err_out_disable;
1402 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1403 goto err_out_disable;
1408 ap->ops->port_disable(ap);
1414 * ata_port_probe - Mark port as enabled
1415 * @ap: Port for which we indicate enablement
1417 * Modify @ap data structure such that the system
1418 * thinks that the entire port is enabled.
1420 * LOCKING: host_set lock, or some other form of
1424 void ata_port_probe(struct ata_port *ap)
1426 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1430 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1431 * @ap: SATA port associated with target SATA PHY.
1433 * This function issues commands to standard SATA Sxxx
1434 * PHY registers, to wake up the phy (and device), and
1435 * clear any reset condition.
1438 * PCI/etc. bus probe sem.
1441 void __sata_phy_reset(struct ata_port *ap)
1444 unsigned long timeout = jiffies + (HZ * 5);
1446 if (ap->flags & ATA_FLAG_SATA_RESET) {
1447 /* issue phy wake/reset */
1448 scr_write_flush(ap, SCR_CONTROL, 0x301);
1449 /* Couldn't find anything in SATA I/II specs, but
1450 * AHCI-1.1 10.4.2 says at least 1 ms. */
1453 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1455 /* wait for phy to become ready, if necessary */
1458 sstatus = scr_read(ap, SCR_STATUS);
1459 if ((sstatus & 0xf) != 1)
1461 } while (time_before(jiffies, timeout));
1463 /* TODO: phy layer with polling, timeouts, etc. */
1464 if (sata_dev_present(ap))
1467 sstatus = scr_read(ap, SCR_STATUS);
1468 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1470 ata_port_disable(ap);
1473 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1476 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1477 ata_port_disable(ap);
1481 ap->cbl = ATA_CBL_SATA;
1485 * sata_phy_reset - Reset SATA bus.
1486 * @ap: SATA port associated with target SATA PHY.
1488 * This function resets the SATA bus, and then probes
1489 * the bus for devices.
1492 * PCI/etc. bus probe sem.
1495 void sata_phy_reset(struct ata_port *ap)
1497 __sata_phy_reset(ap);
1498 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1504 * ata_port_disable - Disable port.
1505 * @ap: Port to be disabled.
1507 * Modify @ap data structure such that the system
1508 * thinks that the entire port is disabled, and should
1509 * never attempt to probe or communicate with devices
1512 * LOCKING: host_set lock, or some other form of
1516 void ata_port_disable(struct ata_port *ap)
1518 ap->device[0].class = ATA_DEV_NONE;
1519 ap->device[1].class = ATA_DEV_NONE;
1520 ap->flags |= ATA_FLAG_PORT_DISABLED;
1526 } xfer_mode_classes[] = {
1527 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1528 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1529 { ATA_SHIFT_PIO, XFER_PIO_0 },
1532 static inline u8 base_from_shift(unsigned int shift)
1536 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1537 if (xfer_mode_classes[i].shift == shift)
1538 return xfer_mode_classes[i].base;
1543 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1548 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1551 if (dev->xfer_shift == ATA_SHIFT_PIO)
1552 dev->flags |= ATA_DFLAG_PIO;
1554 ata_dev_set_xfermode(ap, dev);
1556 base = base_from_shift(dev->xfer_shift);
1557 ofs = dev->xfer_mode - base;
1558 idx = ofs + dev->xfer_shift;
1559 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1561 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1562 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1564 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1565 ap->id, dev->devno, xfer_mode_str[idx]);
1568 static int ata_host_set_pio(struct ata_port *ap)
1574 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1577 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1581 base = base_from_shift(ATA_SHIFT_PIO);
1582 xfer_mode = base + x;
1584 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1585 (int)base, (int)xfer_mode, mask, x);
1587 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1588 struct ata_device *dev = &ap->device[i];
1589 if (ata_dev_present(dev)) {
1590 dev->pio_mode = xfer_mode;
1591 dev->xfer_mode = xfer_mode;
1592 dev->xfer_shift = ATA_SHIFT_PIO;
1593 if (ap->ops->set_piomode)
1594 ap->ops->set_piomode(ap, dev);
1601 static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1602 unsigned int xfer_shift)
1606 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1607 struct ata_device *dev = &ap->device[i];
1608 if (ata_dev_present(dev)) {
1609 dev->dma_mode = xfer_mode;
1610 dev->xfer_mode = xfer_mode;
1611 dev->xfer_shift = xfer_shift;
1612 if (ap->ops->set_dmamode)
1613 ap->ops->set_dmamode(ap, dev);
1619 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1620 * @ap: port on which timings will be programmed
1622 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1625 * PCI/etc. bus probe sem.
1628 static void ata_set_mode(struct ata_port *ap)
1630 unsigned int i, xfer_shift;
1634 /* step 1: always set host PIO timings */
1635 rc = ata_host_set_pio(ap);
1639 /* step 2: choose the best data xfer mode */
1640 xfer_mode = xfer_shift = 0;
1641 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1645 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1646 if (xfer_shift != ATA_SHIFT_PIO)
1647 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1649 /* step 4: update devices' xfer mode */
1650 ata_dev_set_mode(ap, &ap->device[0]);
1651 ata_dev_set_mode(ap, &ap->device[1]);
1653 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1656 if (ap->ops->post_set_mode)
1657 ap->ops->post_set_mode(ap);
1659 for (i = 0; i < 2; i++) {
1660 struct ata_device *dev = &ap->device[i];
1661 ata_dev_set_protocol(dev);
1667 ata_port_disable(ap);
1671 * ata_busy_sleep - sleep until BSY clears, or timeout
1672 * @ap: port containing status register to be polled
1673 * @tmout_pat: impatience timeout
1674 * @tmout: overall timeout
1676 * Sleep until ATA Status register bit BSY clears,
1677 * or a timeout occurs.
1683 static unsigned int ata_busy_sleep (struct ata_port *ap,
1684 unsigned long tmout_pat,
1685 unsigned long tmout)
1687 unsigned long timer_start, timeout;
1690 status = ata_busy_wait(ap, ATA_BUSY, 300);
1691 timer_start = jiffies;
1692 timeout = timer_start + tmout_pat;
1693 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1695 status = ata_busy_wait(ap, ATA_BUSY, 3);
1698 if (status & ATA_BUSY)
1699 printk(KERN_WARNING "ata%u is slow to respond, "
1700 "please be patient\n", ap->id);
1702 timeout = timer_start + tmout;
1703 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1705 status = ata_chk_status(ap);
1708 if (status & ATA_BUSY) {
1709 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1710 ap->id, tmout / HZ);
1717 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1719 struct ata_ioports *ioaddr = &ap->ioaddr;
1720 unsigned int dev0 = devmask & (1 << 0);
1721 unsigned int dev1 = devmask & (1 << 1);
1722 unsigned long timeout;
1724 /* if device 0 was found in ata_devchk, wait for its
1728 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1730 /* if device 1 was found in ata_devchk, wait for
1731 * register access, then wait for BSY to clear
1733 timeout = jiffies + ATA_TMOUT_BOOT;
1737 ap->ops->dev_select(ap, 1);
1738 if (ap->flags & ATA_FLAG_MMIO) {
1739 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1740 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1742 nsect = inb(ioaddr->nsect_addr);
1743 lbal = inb(ioaddr->lbal_addr);
1745 if ((nsect == 1) && (lbal == 1))
1747 if (time_after(jiffies, timeout)) {
1751 msleep(50); /* give drive a breather */
1754 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1756 /* is all this really necessary? */
1757 ap->ops->dev_select(ap, 0);
1759 ap->ops->dev_select(ap, 1);
1761 ap->ops->dev_select(ap, 0);
1765 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1766 * @ap: Port to reset and probe
1768 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1769 * probe the bus. Not often used these days.
1772 * PCI/etc. bus probe sem.
1776 static unsigned int ata_bus_edd(struct ata_port *ap)
1778 struct ata_taskfile tf;
1780 /* set up execute-device-diag (bus reset) taskfile */
1781 /* also, take interrupts to a known state (disabled) */
1782 DPRINTK("execute-device-diag\n");
1783 ata_tf_init(ap, &tf, 0);
1785 tf.command = ATA_CMD_EDD;
1786 tf.protocol = ATA_PROT_NODATA;
1789 ata_tf_to_host(ap, &tf);
1791 /* spec says at least 2ms. but who knows with those
1792 * crazy ATAPI devices...
1796 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1799 static unsigned int ata_bus_softreset(struct ata_port *ap,
1800 unsigned int devmask)
1802 struct ata_ioports *ioaddr = &ap->ioaddr;
1804 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1806 /* software reset. causes dev0 to be selected */
1807 if (ap->flags & ATA_FLAG_MMIO) {
1808 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1809 udelay(20); /* FIXME: flush */
1810 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1811 udelay(20); /* FIXME: flush */
1812 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1814 outb(ap->ctl, ioaddr->ctl_addr);
1816 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1818 outb(ap->ctl, ioaddr->ctl_addr);
1821 /* spec mandates ">= 2ms" before checking status.
1822 * We wait 150ms, because that was the magic delay used for
1823 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1824 * between when the ATA command register is written, and then
1825 * status is checked. Because waiting for "a while" before
1826 * checking status is fine, post SRST, we perform this magic
1827 * delay here as well.
1831 ata_bus_post_reset(ap, devmask);
1837 * ata_bus_reset - reset host port and associated ATA channel
1838 * @ap: port to reset
1840 * This is typically the first time we actually start issuing
1841 * commands to the ATA channel. We wait for BSY to clear, then
1842 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
1843 * result. Determine what devices, if any, are on the channel
1844 * by looking at the device 0/1 error register. Look at the signature
1845 * stored in each device's taskfile registers, to determine if
1846 * the device is ATA or ATAPI.
1849 * PCI/etc. bus probe sem.
1850 * Obtains host_set lock.
1853 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
1856 void ata_bus_reset(struct ata_port *ap)
1858 struct ata_ioports *ioaddr = &ap->ioaddr;
1859 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1861 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
1863 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
1865 /* determine if device 0/1 are present */
1866 if (ap->flags & ATA_FLAG_SATA_RESET)
1869 dev0 = ata_devchk(ap, 0);
1871 dev1 = ata_devchk(ap, 1);
1875 devmask |= (1 << 0);
1877 devmask |= (1 << 1);
1879 /* select device 0 again */
1880 ap->ops->dev_select(ap, 0);
1882 /* issue bus reset */
1883 if (ap->flags & ATA_FLAG_SRST)
1884 rc = ata_bus_softreset(ap, devmask);
1885 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
1886 /* set up device control */
1887 if (ap->flags & ATA_FLAG_MMIO)
1888 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1890 outb(ap->ctl, ioaddr->ctl_addr);
1891 rc = ata_bus_edd(ap);
1898 * determine by signature whether we have ATA or ATAPI devices
1900 err = ata_dev_try_classify(ap, 0);
1901 if ((slave_possible) && (err != 0x81))
1902 ata_dev_try_classify(ap, 1);
1904 /* re-enable interrupts */
1905 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
1908 /* is double-select really necessary? */
1909 if (ap->device[1].class != ATA_DEV_NONE)
1910 ap->ops->dev_select(ap, 1);
1911 if (ap->device[0].class != ATA_DEV_NONE)
1912 ap->ops->dev_select(ap, 0);
1914 /* if no devices were detected, disable this port */
1915 if ((ap->device[0].class == ATA_DEV_NONE) &&
1916 (ap->device[1].class == ATA_DEV_NONE))
1919 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
1920 /* set up device control for ATA_FLAG_SATA_RESET */
1921 if (ap->flags & ATA_FLAG_MMIO)
1922 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1924 outb(ap->ctl, ioaddr->ctl_addr);
1931 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
1932 ap->ops->port_disable(ap);
1937 static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev)
1939 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
1940 ap->id, dev->devno);
1943 static const char * ata_dma_blacklist [] = {
1962 "Toshiba CD-ROM XM-6202B",
1963 "TOSHIBA CD-ROM XM-1702BC",
1965 "E-IDE CD-ROM CR-840",
1968 "SAMSUNG CD-ROM SC-148C",
1969 "SAMSUNG CD-ROM SC",
1971 "ATAPI CD-ROM DRIVE 40X MAXIMUM",
1975 static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
1977 unsigned char model_num[40];
1982 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
1985 len = strnlen(s, sizeof(model_num));
1987 /* ATAPI specifies that empty space is blank-filled; remove blanks */
1988 while ((len > 0) && (s[len - 1] == ' ')) {
1993 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
1994 if (!strncmp(ata_dma_blacklist[i], s, len))
2000 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
2002 struct ata_device *master, *slave;
2005 master = &ap->device[0];
2006 slave = &ap->device[1];
2008 assert (ata_dev_present(master) || ata_dev_present(slave));
2010 if (shift == ATA_SHIFT_UDMA) {
2011 mask = ap->udma_mask;
2012 if (ata_dev_present(master)) {
2013 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
2014 if (ata_dma_blacklisted(ap, master)) {
2016 ata_pr_blacklisted(ap, master);
2019 if (ata_dev_present(slave)) {
2020 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
2021 if (ata_dma_blacklisted(ap, slave)) {
2023 ata_pr_blacklisted(ap, slave);
2027 else if (shift == ATA_SHIFT_MWDMA) {
2028 mask = ap->mwdma_mask;
2029 if (ata_dev_present(master)) {
2030 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
2031 if (ata_dma_blacklisted(ap, master)) {
2033 ata_pr_blacklisted(ap, master);
2036 if (ata_dev_present(slave)) {
2037 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
2038 if (ata_dma_blacklisted(ap, slave)) {
2040 ata_pr_blacklisted(ap, slave);
2044 else if (shift == ATA_SHIFT_PIO) {
2045 mask = ap->pio_mask;
2046 if (ata_dev_present(master)) {
2047 /* spec doesn't return explicit support for
2048 * PIO0-2, so we fake it
2050 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
2055 if (ata_dev_present(slave)) {
2056 /* spec doesn't return explicit support for
2057 * PIO0-2, so we fake it
2059 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
2066 mask = 0xffffffff; /* shut up compiler warning */
2073 /* find greatest bit */
2074 static int fgb(u32 bitmap)
2079 for (i = 0; i < 32; i++)
2080 if (bitmap & (1 << i))
2087 * ata_choose_xfer_mode - attempt to find best transfer mode
2088 * @ap: Port for which an xfer mode will be selected
2089 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
2090 * @xfer_shift_out: (output) bit shift that selects this mode
2092 * Based on host and device capabilities, determine the
2093 * maximum transfer mode that is amenable to all.
2096 * PCI/etc. bus probe sem.
2099 * Zero on success, negative on error.
2102 static int ata_choose_xfer_mode(struct ata_port *ap,
2104 unsigned int *xfer_shift_out)
2106 unsigned int mask, shift;
2109 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
2110 shift = xfer_mode_classes[i].shift;
2111 mask = ata_get_mode_mask(ap, shift);
2115 *xfer_mode_out = xfer_mode_classes[i].base + x;
2116 *xfer_shift_out = shift;
2125 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2126 * @ap: Port associated with device @dev
2127 * @dev: Device to which command will be sent
2129 * Issue SET FEATURES - XFER MODE command to device @dev
2133 * PCI/etc. bus probe sem.
2136 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2138 DECLARE_COMPLETION(wait);
2139 struct ata_queued_cmd *qc;
2141 unsigned long flags;
2143 /* set up set-features taskfile */
2144 DPRINTK("set features - xfer mode\n");
2146 qc = ata_qc_new_init(ap, dev);
2149 qc->tf.command = ATA_CMD_SET_FEATURES;
2150 qc->tf.feature = SETFEATURES_XFER;
2151 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2152 qc->tf.protocol = ATA_PROT_NODATA;
2153 qc->tf.nsect = dev->xfer_mode;
2155 qc->waiting = &wait;
2156 qc->complete_fn = ata_qc_complete_noop;
2158 spin_lock_irqsave(&ap->host_set->lock, flags);
2159 rc = ata_qc_issue(qc);
2160 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2163 ata_port_disable(ap);
2165 wait_for_completion(&wait);
2171 * ata_dev_init_params - Issue INIT DEV PARAMS command
2172 * @ap: Port associated with device @dev
2173 * @dev: Device to which command will be sent
2178 static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2180 DECLARE_COMPLETION(wait);
2181 struct ata_queued_cmd *qc;
2183 unsigned long flags;
2184 u16 sectors = dev->id[6];
2185 u16 heads = dev->id[3];
2187 /* Number of sectors per track 1-255. Number of heads 1-16 */
2188 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2191 /* set up init dev params taskfile */
2192 DPRINTK("init dev params \n");
2194 qc = ata_qc_new_init(ap, dev);
2197 qc->tf.command = ATA_CMD_INIT_DEV_PARAMS;
2198 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2199 qc->tf.protocol = ATA_PROT_NODATA;
2200 qc->tf.nsect = sectors;
2201 qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2203 qc->waiting = &wait;
2204 qc->complete_fn = ata_qc_complete_noop;
2206 spin_lock_irqsave(&ap->host_set->lock, flags);
2207 rc = ata_qc_issue(qc);
2208 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2211 ata_port_disable(ap);
2213 wait_for_completion(&wait);
2219 * ata_sg_clean - Unmap DMA memory associated with command
2220 * @qc: Command containing DMA memory to be released
2222 * Unmap all mapped DMA memory associated with this command.
2225 * spin_lock_irqsave(host_set lock)
2228 static void ata_sg_clean(struct ata_queued_cmd *qc)
2230 struct ata_port *ap = qc->ap;
2231 struct scatterlist *sg = qc->sg;
2232 int dir = qc->dma_dir;
2234 assert(qc->flags & ATA_QCFLAG_DMAMAP);
2237 if (qc->flags & ATA_QCFLAG_SINGLE)
2238 assert(qc->n_elem == 1);
2240 DPRINTK("unmapping %u sg elements\n", qc->n_elem);
2242 if (qc->flags & ATA_QCFLAG_SG)
2243 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2245 dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
2246 sg_dma_len(&sg[0]), dir);
2248 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2253 * ata_fill_sg - Fill PCI IDE PRD table
2254 * @qc: Metadata associated with taskfile to be transferred
2256 * Fill PCI IDE PRD (scatter-gather) table with segments
2257 * associated with the current disk command.
2260 * spin_lock_irqsave(host_set lock)
2263 static void ata_fill_sg(struct ata_queued_cmd *qc)
2265 struct scatterlist *sg = qc->sg;
2266 struct ata_port *ap = qc->ap;
2267 unsigned int idx, nelem;
2270 assert(qc->n_elem > 0);
2273 for (nelem = qc->n_elem; nelem; nelem--,sg++) {
2277 /* determine if physical DMA addr spans 64K boundary.
2278 * Note h/w doesn't support 64-bit, so we unconditionally
2279 * truncate dma_addr_t to u32.
2281 addr = (u32) sg_dma_address(sg);
2282 sg_len = sg_dma_len(sg);
2285 offset = addr & 0xffff;
2287 if ((offset + sg_len) > 0x10000)
2288 len = 0x10000 - offset;
2290 ap->prd[idx].addr = cpu_to_le32(addr);
2291 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2292 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2301 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2304 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2305 * @qc: Metadata associated with taskfile to check
2307 * Allow low-level driver to filter ATA PACKET commands, returning
2308 * a status indicating whether or not it is OK to use DMA for the
2309 * supplied PACKET command.
2312 * spin_lock_irqsave(host_set lock)
2314 * RETURNS: 0 when ATAPI DMA can be used
2317 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2319 struct ata_port *ap = qc->ap;
2320 int rc = 0; /* Assume ATAPI DMA is OK by default */
2322 if (ap->ops->check_atapi_dma)
2323 rc = ap->ops->check_atapi_dma(qc);
2328 * ata_qc_prep - Prepare taskfile for submission
2329 * @qc: Metadata associated with taskfile to be prepared
2331 * Prepare ATA taskfile for submission.
2334 * spin_lock_irqsave(host_set lock)
2336 void ata_qc_prep(struct ata_queued_cmd *qc)
2338 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2345 * ata_sg_init_one - Associate command with memory buffer
2346 * @qc: Command to be associated
2347 * @buf: Memory buffer
2348 * @buflen: Length of memory buffer, in bytes.
2350 * Initialize the data-related elements of queued_cmd @qc
2351 * to point to a single memory buffer, @buf of byte length @buflen.
2354 * spin_lock_irqsave(host_set lock)
2357 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2359 struct scatterlist *sg;
2361 qc->flags |= ATA_QCFLAG_SINGLE;
2363 memset(&qc->sgent, 0, sizeof(qc->sgent));
2364 qc->sg = &qc->sgent;
2369 sg->page = virt_to_page(buf);
2370 sg->offset = (unsigned long) buf & ~PAGE_MASK;
2371 sg->length = buflen;
2375 * ata_sg_init - Associate command with scatter-gather table.
2376 * @qc: Command to be associated
2377 * @sg: Scatter-gather table.
2378 * @n_elem: Number of elements in s/g table.
2380 * Initialize the data-related elements of queued_cmd @qc
2381 * to point to a scatter-gather table @sg, containing @n_elem
2385 * spin_lock_irqsave(host_set lock)
2388 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2389 unsigned int n_elem)
2391 qc->flags |= ATA_QCFLAG_SG;
2393 qc->n_elem = n_elem;
2397 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2398 * @qc: Command with memory buffer to be mapped.
2400 * DMA-map the memory buffer associated with queued_cmd @qc.
2403 * spin_lock_irqsave(host_set lock)
2406 * Zero on success, negative on error.
2409 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2411 struct ata_port *ap = qc->ap;
2412 int dir = qc->dma_dir;
2413 struct scatterlist *sg = qc->sg;
2414 dma_addr_t dma_address;
2416 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
2418 if (dma_mapping_error(dma_address))
2421 sg_dma_address(sg) = dma_address;
2422 sg_dma_len(sg) = sg->length;
2424 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
2425 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2431 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
2432 * @qc: Command with scatter-gather table to be mapped.
2434 * DMA-map the scatter-gather table associated with queued_cmd @qc.
2437 * spin_lock_irqsave(host_set lock)
2440 * Zero on success, negative on error.
2444 static int ata_sg_setup(struct ata_queued_cmd *qc)
2446 struct ata_port *ap = qc->ap;
2447 struct scatterlist *sg = qc->sg;
2450 VPRINTK("ENTER, ata%u\n", ap->id);
2451 assert(qc->flags & ATA_QCFLAG_SG);
2454 n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2458 DPRINTK("%d sg elements mapped\n", n_elem);
2460 qc->n_elem = n_elem;
2470 * None. (executing in kernel thread context)
2476 static unsigned long ata_pio_poll(struct ata_port *ap)
2479 unsigned int poll_state = PIO_ST_UNKNOWN;
2480 unsigned int reg_state = PIO_ST_UNKNOWN;
2481 const unsigned int tmout_state = PIO_ST_TMOUT;
2483 switch (ap->pio_task_state) {
2486 poll_state = PIO_ST_POLL;
2490 case PIO_ST_LAST_POLL:
2491 poll_state = PIO_ST_LAST_POLL;
2492 reg_state = PIO_ST_LAST;
2499 status = ata_chk_status(ap);
2500 if (status & ATA_BUSY) {
2501 if (time_after(jiffies, ap->pio_task_timeout)) {
2502 ap->pio_task_state = tmout_state;
2505 ap->pio_task_state = poll_state;
2506 return ATA_SHORT_PAUSE;
2509 ap->pio_task_state = reg_state;
2514 * ata_pio_complete -
2518 * None. (executing in kernel thread context)
2521 static void ata_pio_complete (struct ata_port *ap)
2523 struct ata_queued_cmd *qc;
2527 * This is purely hueristic. This is a fast path.
2528 * Sometimes when we enter, BSY will be cleared in
2529 * a chk-status or two. If not, the drive is probably seeking
2530 * or something. Snooze for a couple msecs, then
2531 * chk-status again. If still busy, fall back to
2532 * PIO_ST_POLL state.
2534 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2535 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2537 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2538 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2539 ap->pio_task_state = PIO_ST_LAST_POLL;
2540 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2545 drv_stat = ata_wait_idle(ap);
2546 if (!ata_ok(drv_stat)) {
2547 ap->pio_task_state = PIO_ST_ERR;
2551 qc = ata_qc_from_tag(ap, ap->active_tag);
2554 ap->pio_task_state = PIO_ST_IDLE;
2558 ata_qc_complete(qc, drv_stat);
2564 * @buf: Buffer to swap
2565 * @buf_words: Number of 16-bit words in buffer.
2567 * Swap halves of 16-bit words if needed to convert from
2568 * little-endian byte order to native cpu byte order, or
2573 void swap_buf_le16(u16 *buf, unsigned int buf_words)
2578 for (i = 0; i < buf_words; i++)
2579 buf[i] = le16_to_cpu(buf[i]);
2580 #endif /* __BIG_ENDIAN */
2583 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2584 unsigned int buflen, int write_data)
2587 unsigned int words = buflen >> 1;
2588 u16 *buf16 = (u16 *) buf;
2589 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
2592 for (i = 0; i < words; i++)
2593 writew(le16_to_cpu(buf16[i]), mmio);
2595 for (i = 0; i < words; i++)
2596 buf16[i] = cpu_to_le16(readw(mmio));
2600 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2601 unsigned int buflen, int write_data)
2603 unsigned int dwords = buflen >> 1;
2606 outsw(ap->ioaddr.data_addr, buf, dwords);
2608 insw(ap->ioaddr.data_addr, buf, dwords);
2611 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2612 unsigned int buflen, int do_write)
2614 if (ap->flags & ATA_FLAG_MMIO)
2615 ata_mmio_data_xfer(ap, buf, buflen, do_write);
2617 ata_pio_data_xfer(ap, buf, buflen, do_write);
2620 static void ata_pio_sector(struct ata_queued_cmd *qc)
2622 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2623 struct scatterlist *sg = qc->sg;
2624 struct ata_port *ap = qc->ap;
2626 unsigned int offset;
2629 if (qc->cursect == (qc->nsect - 1))
2630 ap->pio_task_state = PIO_ST_LAST;
2632 page = sg[qc->cursg].page;
2633 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
2635 /* get the current page and offset */
2636 page = nth_page(page, (offset >> PAGE_SHIFT));
2637 offset %= PAGE_SIZE;
2639 buf = kmap(page) + offset;
2644 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
2649 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2651 /* do the actual data transfer */
2652 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2653 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
2658 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2660 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2661 struct scatterlist *sg = qc->sg;
2662 struct ata_port *ap = qc->ap;
2665 unsigned int offset, count;
2667 if (qc->curbytes == qc->nbytes - bytes)
2668 ap->pio_task_state = PIO_ST_LAST;
2671 sg = &qc->sg[qc->cursg];
2674 offset = sg->offset + qc->cursg_ofs;
2676 /* get the current page and offset */
2677 page = nth_page(page, (offset >> PAGE_SHIFT));
2678 offset %= PAGE_SIZE;
2680 /* don't overrun current sg */
2681 count = min(sg->length - qc->cursg_ofs, bytes);
2683 /* don't cross page boundaries */
2684 count = min(count, (unsigned int)PAGE_SIZE - offset);
2686 buf = kmap(page) + offset;
2689 qc->curbytes += count;
2690 qc->cursg_ofs += count;
2692 if (qc->cursg_ofs == sg->length) {
2697 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2699 /* do the actual data transfer */
2700 ata_data_xfer(ap, buf, count, do_write);
2709 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2711 struct ata_port *ap = qc->ap;
2712 struct ata_device *dev = qc->dev;
2713 unsigned int ireason, bc_lo, bc_hi, bytes;
2714 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
2716 ap->ops->tf_read(ap, &qc->tf);
2717 ireason = qc->tf.nsect;
2718 bc_lo = qc->tf.lbam;
2719 bc_hi = qc->tf.lbah;
2720 bytes = (bc_hi << 8) | bc_lo;
2722 /* shall be cleared to zero, indicating xfer of data */
2723 if (ireason & (1 << 0))
2726 /* make sure transfer direction matches expected */
2727 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
2728 if (do_write != i_write)
2731 __atapi_pio_bytes(qc, bytes);
2736 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2737 ap->id, dev->devno);
2738 ap->pio_task_state = PIO_ST_ERR;
2746 * None. (executing in kernel thread context)
2749 static void ata_pio_block(struct ata_port *ap)
2751 struct ata_queued_cmd *qc;
2755 * This is purely hueristic. This is a fast path.
2756 * Sometimes when we enter, BSY will be cleared in
2757 * a chk-status or two. If not, the drive is probably seeking
2758 * or something. Snooze for a couple msecs, then
2759 * chk-status again. If still busy, fall back to
2760 * PIO_ST_POLL state.
2762 status = ata_busy_wait(ap, ATA_BUSY, 5);
2763 if (status & ATA_BUSY) {
2765 status = ata_busy_wait(ap, ATA_BUSY, 10);
2766 if (status & ATA_BUSY) {
2767 ap->pio_task_state = PIO_ST_POLL;
2768 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2773 qc = ata_qc_from_tag(ap, ap->active_tag);
2776 if (is_atapi_taskfile(&qc->tf)) {
2777 /* no more data to transfer or unsupported ATAPI command */
2778 if ((status & ATA_DRQ) == 0) {
2779 ap->pio_task_state = PIO_ST_IDLE;
2783 ata_qc_complete(qc, status);
2787 atapi_pio_bytes(qc);
2789 /* handle BSY=0, DRQ=0 as error */
2790 if ((status & ATA_DRQ) == 0) {
2791 ap->pio_task_state = PIO_ST_ERR;
2799 static void ata_pio_error(struct ata_port *ap)
2801 struct ata_queued_cmd *qc;
2804 qc = ata_qc_from_tag(ap, ap->active_tag);
2807 drv_stat = ata_chk_status(ap);
2808 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2811 ap->pio_task_state = PIO_ST_IDLE;
2815 ata_qc_complete(qc, drv_stat | ATA_ERR);
2818 static void ata_pio_task(void *_data)
2820 struct ata_port *ap = _data;
2821 unsigned long timeout = 0;
2823 switch (ap->pio_task_state) {
2832 ata_pio_complete(ap);
2836 case PIO_ST_LAST_POLL:
2837 timeout = ata_pio_poll(ap);
2847 queue_delayed_work(ata_wq, &ap->pio_task,
2850 queue_work(ata_wq, &ap->pio_task);
2853 static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
2854 struct scsi_cmnd *cmd)
2856 DECLARE_COMPLETION(wait);
2857 struct ata_queued_cmd *qc;
2858 unsigned long flags;
2861 DPRINTK("ATAPI request sense\n");
2863 qc = ata_qc_new_init(ap, dev);
2866 /* FIXME: is this needed? */
2867 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2869 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2870 qc->dma_dir = DMA_FROM_DEVICE;
2872 memset(&qc->cdb, 0, ap->cdb_len);
2873 qc->cdb[0] = REQUEST_SENSE;
2874 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2876 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2877 qc->tf.command = ATA_CMD_PACKET;
2879 qc->tf.protocol = ATA_PROT_ATAPI;
2880 qc->tf.lbam = (8 * 1024) & 0xff;
2881 qc->tf.lbah = (8 * 1024) >> 8;
2882 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2884 qc->waiting = &wait;
2885 qc->complete_fn = ata_qc_complete_noop;
2887 spin_lock_irqsave(&ap->host_set->lock, flags);
2888 rc = ata_qc_issue(qc);
2889 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2892 ata_port_disable(ap);
2894 wait_for_completion(&wait);
2900 * ata_qc_timeout - Handle timeout of queued command
2901 * @qc: Command that timed out
2903 * Some part of the kernel (currently, only the SCSI layer)
2904 * has noticed that the active command on port @ap has not
2905 * completed after a specified length of time. Handle this
2906 * condition by disabling DMA (if necessary) and completing
2907 * transactions, with error if necessary.
2909 * This also handles the case of the "lost interrupt", where
2910 * for some reason (possibly hardware bug, possibly driver bug)
2911 * an interrupt was not delivered to the driver, even though the
2912 * transaction completed successfully.
2915 * Inherited from SCSI layer (none, can sleep)
2918 static void ata_qc_timeout(struct ata_queued_cmd *qc)
2920 struct ata_port *ap = qc->ap;
2921 struct ata_device *dev = qc->dev;
2922 u8 host_stat = 0, drv_stat;
2926 /* FIXME: doesn't this conflict with timeout handling? */
2927 if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) {
2928 struct scsi_cmnd *cmd = qc->scsicmd;
2930 if (!(cmd->eh_eflags & SCSI_EH_CANCEL_CMD)) {
2932 /* finish completing original command */
2933 __ata_qc_complete(qc);
2935 atapi_request_sense(ap, dev, cmd);
2937 cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
2938 scsi_finish_command(cmd);
2944 /* hack alert! We cannot use the supplied completion
2945 * function from inside the ->eh_strategy_handler() thread.
2946 * libata is the only user of ->eh_strategy_handler() in
2947 * any kernel, so the default scsi_done() assumes it is
2948 * not being called from the SCSI EH.
2950 qc->scsidone = scsi_finish_command;
2952 switch (qc->tf.protocol) {
2955 case ATA_PROT_ATAPI_DMA:
2956 host_stat = ap->ops->bmdma_status(ap);
2958 /* before we do anything else, clear DMA-Start bit */
2959 ap->ops->bmdma_stop(ap);
2965 drv_stat = ata_chk_status(ap);
2967 /* ack bmdma irq events */
2968 ap->ops->irq_clear(ap);
2970 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
2971 ap->id, qc->tf.command, drv_stat, host_stat);
2973 /* complete taskfile transaction */
2974 ata_qc_complete(qc, drv_stat);
2982 * ata_eng_timeout - Handle timeout of queued command
2983 * @ap: Port on which timed-out command is active
2985 * Some part of the kernel (currently, only the SCSI layer)
2986 * has noticed that the active command on port @ap has not
2987 * completed after a specified length of time. Handle this
2988 * condition by disabling DMA (if necessary) and completing
2989 * transactions, with error if necessary.
2991 * This also handles the case of the "lost interrupt", where
2992 * for some reason (possibly hardware bug, possibly driver bug)
2993 * an interrupt was not delivered to the driver, even though the
2994 * transaction completed successfully.
2997 * Inherited from SCSI layer (none, can sleep)
3000 void ata_eng_timeout(struct ata_port *ap)
3002 struct ata_queued_cmd *qc;
3006 qc = ata_qc_from_tag(ap, ap->active_tag);
3008 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3020 * ata_qc_new - Request an available ATA command, for queueing
3021 * @ap: Port associated with device @dev
3022 * @dev: Device from whom we request an available command structure
3028 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3030 struct ata_queued_cmd *qc = NULL;
3033 for (i = 0; i < ATA_MAX_QUEUE; i++)
3034 if (!test_and_set_bit(i, &ap->qactive)) {
3035 qc = ata_qc_from_tag(ap, i);
3046 * ata_qc_new_init - Request an available ATA command, and initialize it
3047 * @ap: Port associated with device @dev
3048 * @dev: Device from whom we request an available command structure
3054 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3055 struct ata_device *dev)
3057 struct ata_queued_cmd *qc;
3059 qc = ata_qc_new(ap);
3066 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
3068 qc->nbytes = qc->curbytes = 0;
3070 ata_tf_init(ap, &qc->tf, dev->devno);
3072 if (dev->flags & ATA_DFLAG_LBA) {
3073 qc->tf.flags |= ATA_TFLAG_LBA;
3075 if (dev->flags & ATA_DFLAG_LBA48)
3076 qc->tf.flags |= ATA_TFLAG_LBA48;
3083 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
3088 static void __ata_qc_complete(struct ata_queued_cmd *qc)
3090 struct ata_port *ap = qc->ap;
3091 unsigned int tag, do_clear = 0;
3095 if (likely(ata_tag_valid(tag))) {
3096 if (tag == ap->active_tag)
3097 ap->active_tag = ATA_TAG_POISON;
3098 qc->tag = ATA_TAG_POISON;
3103 struct completion *waiting = qc->waiting;
3108 if (likely(do_clear))
3109 clear_bit(tag, &ap->qactive);
3113 * ata_qc_free - free unused ata_queued_cmd
3114 * @qc: Command to complete
3116 * Designed to free unused ata_queued_cmd object
3117 * in case something prevents using it.
3120 * spin_lock_irqsave(host_set lock)
3123 void ata_qc_free(struct ata_queued_cmd *qc)
3125 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3126 assert(qc->waiting == NULL); /* nothing should be waiting */
3128 __ata_qc_complete(qc);
3132 * ata_qc_complete - Complete an active ATA command
3133 * @qc: Command to complete
3134 * @drv_stat: ATA Status register contents
3136 * Indicate to the mid and upper layers that an ATA
3137 * command has completed, with either an ok or not-ok status.
3140 * spin_lock_irqsave(host_set lock)
3144 void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
3148 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3149 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3151 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3154 /* call completion callback */
3155 rc = qc->complete_fn(qc, drv_stat);
3156 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3158 /* if callback indicates not to complete command (non-zero),
3159 * return immediately
3164 __ata_qc_complete(qc);
3169 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3171 struct ata_port *ap = qc->ap;
3173 switch (qc->tf.protocol) {
3175 case ATA_PROT_ATAPI_DMA:
3178 case ATA_PROT_ATAPI:
3180 case ATA_PROT_PIO_MULT:
3181 if (ap->flags & ATA_FLAG_PIO_DMA)
3194 * ata_qc_issue - issue taskfile to device
3195 * @qc: command to issue to device
3197 * Prepare an ATA command to submission to device.
3198 * This includes mapping the data into a DMA-able
3199 * area, filling in the S/G table, and finally
3200 * writing the taskfile to hardware, starting the command.
3203 * spin_lock_irqsave(host_set lock)
3206 * Zero on success, negative on error.
3209 int ata_qc_issue(struct ata_queued_cmd *qc)
3211 struct ata_port *ap = qc->ap;
3213 if (ata_should_dma_map(qc)) {
3214 if (qc->flags & ATA_QCFLAG_SG) {
3215 if (ata_sg_setup(qc))
3217 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3218 if (ata_sg_setup_one(qc))
3222 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3225 ap->ops->qc_prep(qc);
3227 qc->ap->active_tag = qc->tag;
3228 qc->flags |= ATA_QCFLAG_ACTIVE;
3230 return ap->ops->qc_issue(qc);
3238 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
3239 * @qc: command to issue to device
3241 * Using various libata functions and hooks, this function
3242 * starts an ATA command. ATA commands are grouped into
3243 * classes called "protocols", and issuing each type of protocol
3244 * is slightly different.
3246 * May be used as the qc_issue() entry in ata_port_operations.
3249 * spin_lock_irqsave(host_set lock)
3252 * Zero on success, negative on error.
3255 int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3257 struct ata_port *ap = qc->ap;
3259 ata_dev_select(ap, qc->dev->devno, 1, 0);
3261 switch (qc->tf.protocol) {
3262 case ATA_PROT_NODATA:
3263 ata_tf_to_host_nolock(ap, &qc->tf);
3267 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3268 ap->ops->bmdma_setup(qc); /* set up bmdma */
3269 ap->ops->bmdma_start(qc); /* initiate bmdma */
3272 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
3273 ata_qc_set_polling(qc);
3274 ata_tf_to_host_nolock(ap, &qc->tf);
3275 ap->pio_task_state = PIO_ST;
3276 queue_work(ata_wq, &ap->pio_task);
3279 case ATA_PROT_ATAPI:
3280 ata_qc_set_polling(qc);
3281 ata_tf_to_host_nolock(ap, &qc->tf);
3282 queue_work(ata_wq, &ap->packet_task);
3285 case ATA_PROT_ATAPI_NODATA:
3286 ata_tf_to_host_nolock(ap, &qc->tf);
3287 queue_work(ata_wq, &ap->packet_task);
3290 case ATA_PROT_ATAPI_DMA:
3291 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3292 ap->ops->bmdma_setup(qc); /* set up bmdma */
3293 queue_work(ata_wq, &ap->packet_task);
3305 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
3306 * @qc: Info associated with this ATA transaction.
3309 * spin_lock_irqsave(host_set lock)
3312 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
3314 struct ata_port *ap = qc->ap;
3315 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3317 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3319 /* load PRD table addr. */
3320 mb(); /* make sure PRD table writes are visible to controller */
3321 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
3323 /* specify data direction, triple-check start bit is clear */
3324 dmactl = readb(mmio + ATA_DMA_CMD);
3325 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3327 dmactl |= ATA_DMA_WR;
3328 writeb(dmactl, mmio + ATA_DMA_CMD);
3330 /* issue r/w command */
3331 ap->ops->exec_command(ap, &qc->tf);
3335 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
3336 * @qc: Info associated with this ATA transaction.
3339 * spin_lock_irqsave(host_set lock)
3342 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
3344 struct ata_port *ap = qc->ap;
3345 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3348 /* start host DMA transaction */
3349 dmactl = readb(mmio + ATA_DMA_CMD);
3350 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
3352 /* Strictly, one may wish to issue a readb() here, to
3353 * flush the mmio write. However, control also passes
3354 * to the hardware at this point, and it will interrupt
3355 * us when we are to resume control. So, in effect,
3356 * we don't care when the mmio write flushes.
3357 * Further, a read of the DMA status register _immediately_
3358 * following the write may not be what certain flaky hardware
3359 * is expected, so I think it is best to not add a readb()
3360 * without first all the MMIO ATA cards/mobos.
3361 * Or maybe I'm just being paranoid.
3366 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
3367 * @qc: Info associated with this ATA transaction.
3370 * spin_lock_irqsave(host_set lock)
3373 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
3375 struct ata_port *ap = qc->ap;
3376 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3379 /* load PRD table addr. */
3380 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
3382 /* specify data direction, triple-check start bit is clear */
3383 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3384 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3386 dmactl |= ATA_DMA_WR;
3387 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3389 /* issue r/w command */
3390 ap->ops->exec_command(ap, &qc->tf);
3394 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
3395 * @qc: Info associated with this ATA transaction.
3398 * spin_lock_irqsave(host_set lock)
3401 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
3403 struct ata_port *ap = qc->ap;
3406 /* start host DMA transaction */
3407 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3408 outb(dmactl | ATA_DMA_START,
3409 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3414 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
3415 * @qc: Info associated with this ATA transaction.
3417 * Writes the ATA_DMA_START flag to the DMA command register.
3419 * May be used as the bmdma_start() entry in ata_port_operations.
3422 * spin_lock_irqsave(host_set lock)
3424 void ata_bmdma_start(struct ata_queued_cmd *qc)
3426 if (qc->ap->flags & ATA_FLAG_MMIO)
3427 ata_bmdma_start_mmio(qc);
3429 ata_bmdma_start_pio(qc);
3434 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
3435 * @qc: Info associated with this ATA transaction.
3437 * Writes address of PRD table to device's PRD Table Address
3438 * register, sets the DMA control register, and calls
3439 * ops->exec_command() to start the transfer.
3441 * May be used as the bmdma_setup() entry in ata_port_operations.
3444 * spin_lock_irqsave(host_set lock)
3446 void ata_bmdma_setup(struct ata_queued_cmd *qc)
3448 if (qc->ap->flags & ATA_FLAG_MMIO)
3449 ata_bmdma_setup_mmio(qc);
3451 ata_bmdma_setup_pio(qc);
3456 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
3457 * @ap: Port associated with this ATA transaction.
3459 * Clear interrupt and error flags in DMA status register.
3461 * May be used as the irq_clear() entry in ata_port_operations.
3464 * spin_lock_irqsave(host_set lock)
3467 void ata_bmdma_irq_clear(struct ata_port *ap)
3469 if (ap->flags & ATA_FLAG_MMIO) {
3470 void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
3471 writeb(readb(mmio), mmio);
3473 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
3474 outb(inb(addr), addr);
3481 * ata_bmdma_status - Read PCI IDE BMDMA status
3482 * @ap: Port associated with this ATA transaction.
3484 * Read and return BMDMA status register.
3486 * May be used as the bmdma_status() entry in ata_port_operations.
3489 * spin_lock_irqsave(host_set lock)
3492 u8 ata_bmdma_status(struct ata_port *ap)
3495 if (ap->flags & ATA_FLAG_MMIO) {
3496 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3497 host_stat = readb(mmio + ATA_DMA_STATUS);
3499 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3505 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
3506 * @ap: Port associated with this ATA transaction.
3508 * Clears the ATA_DMA_START flag in the dma control register
3510 * May be used as the bmdma_stop() entry in ata_port_operations.
3513 * spin_lock_irqsave(host_set lock)
3516 void ata_bmdma_stop(struct ata_port *ap)
3518 if (ap->flags & ATA_FLAG_MMIO) {
3519 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3521 /* clear start/stop bit */
3522 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3523 mmio + ATA_DMA_CMD);
3525 /* clear start/stop bit */
3526 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
3527 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3530 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3531 ata_altstatus(ap); /* dummy read */
3535 * ata_host_intr - Handle host interrupt for given (port, task)
3536 * @ap: Port on which interrupt arrived (possibly...)
3537 * @qc: Taskfile currently active in engine
3539 * Handle host interrupt for given queued command. Currently,
3540 * only DMA interrupts are handled. All other commands are
3541 * handled via polling with interrupts disabled (nIEN bit).
3544 * spin_lock_irqsave(host_set lock)
3547 * One if interrupt was handled, zero if not (shared irq).
3550 inline unsigned int ata_host_intr (struct ata_port *ap,
3551 struct ata_queued_cmd *qc)
3553 u8 status, host_stat;
3555 switch (qc->tf.protocol) {
3558 case ATA_PROT_ATAPI_DMA:
3559 case ATA_PROT_ATAPI:
3560 /* check status of DMA engine */
3561 host_stat = ap->ops->bmdma_status(ap);
3562 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
3564 /* if it's not our irq... */
3565 if (!(host_stat & ATA_DMA_INTR))
3568 /* before we do anything else, clear DMA-Start bit */
3569 ap->ops->bmdma_stop(ap);
3573 case ATA_PROT_ATAPI_NODATA:
3574 case ATA_PROT_NODATA:
3575 /* check altstatus */
3576 status = ata_altstatus(ap);
3577 if (status & ATA_BUSY)
3580 /* check main status, clearing INTRQ */
3581 status = ata_chk_status(ap);
3582 if (unlikely(status & ATA_BUSY))
3584 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
3585 ap->id, qc->tf.protocol, status);
3587 /* ack bmdma irq events */
3588 ap->ops->irq_clear(ap);
3590 /* complete taskfile transaction */
3591 ata_qc_complete(qc, status);
3598 return 1; /* irq handled */
3601 ap->stats.idle_irq++;
3604 if ((ap->stats.idle_irq % 1000) == 0) {
3606 ata_irq_ack(ap, 0); /* debug trap */
3607 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
3610 return 0; /* irq not handled */
3614 * ata_interrupt - Default ATA host interrupt handler
3615 * @irq: irq line (unused)
3616 * @dev_instance: pointer to our ata_host_set information structure
3619 * Default interrupt handler for PCI IDE devices. Calls
3620 * ata_host_intr() for each port that is not disabled.
3623 * Obtains host_set lock during operation.
3626 * IRQ_NONE or IRQ_HANDLED.
3630 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
3632 struct ata_host_set *host_set = dev_instance;
3634 unsigned int handled = 0;
3635 unsigned long flags;
3637 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
3638 spin_lock_irqsave(&host_set->lock, flags);
3640 for (i = 0; i < host_set->n_ports; i++) {
3641 struct ata_port *ap;
3643 ap = host_set->ports[i];
3644 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
3645 struct ata_queued_cmd *qc;
3647 qc = ata_qc_from_tag(ap, ap->active_tag);
3648 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
3649 (qc->flags & ATA_QCFLAG_ACTIVE))
3650 handled |= ata_host_intr(ap, qc);
3654 spin_unlock_irqrestore(&host_set->lock, flags);
3656 return IRQ_RETVAL(handled);
3660 * atapi_packet_task - Write CDB bytes to hardware
3661 * @_data: Port to which ATAPI device is attached.
3663 * When device has indicated its readiness to accept
3664 * a CDB, this function is called. Send the CDB.
3665 * If DMA is to be performed, exit immediately.
3666 * Otherwise, we are in polling mode, so poll
3667 * status under operation succeeds or fails.
3670 * Kernel thread context (may sleep)
3673 static void atapi_packet_task(void *_data)
3675 struct ata_port *ap = _data;
3676 struct ata_queued_cmd *qc;
3679 qc = ata_qc_from_tag(ap, ap->active_tag);
3681 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3683 /* sleep-wait for BSY to clear */
3684 DPRINTK("busy wait\n");
3685 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
3688 /* make sure DRQ is set */
3689 status = ata_chk_status(ap);
3690 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
3694 DPRINTK("send cdb\n");
3695 assert(ap->cdb_len >= 12);
3696 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3698 /* if we are DMA'ing, irq handler takes over from here */
3699 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3700 ap->ops->bmdma_start(qc); /* initiate bmdma */
3702 /* non-data commands are also handled via irq */
3703 else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3707 /* PIO commands are handled by polling */
3709 ap->pio_task_state = PIO_ST;
3710 queue_work(ata_wq, &ap->pio_task);
3716 ata_qc_complete(qc, ATA_ERR);
3721 * ata_port_start - Set port up for dma.
3722 * @ap: Port to initialize
3724 * Called just after data structures for each port are
3725 * initialized. Allocates space for PRD table.
3727 * May be used as the port_start() entry in ata_port_operations.
3732 int ata_port_start (struct ata_port *ap)
3734 struct device *dev = ap->host_set->dev;
3736 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
3740 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
3747 * ata_port_stop - Undo ata_port_start()
3748 * @ap: Port to shut down
3750 * Frees the PRD table.
3752 * May be used as the port_stop() entry in ata_port_operations.
3757 void ata_port_stop (struct ata_port *ap)
3759 struct device *dev = ap->host_set->dev;
3761 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
3764 void ata_host_stop (struct ata_host_set *host_set)
3766 if (host_set->mmio_base)
3767 iounmap(host_set->mmio_base);
3772 * ata_host_remove - Unregister SCSI host structure with upper layers
3773 * @ap: Port to unregister
3774 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
3779 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
3781 struct Scsi_Host *sh = ap->host;
3786 scsi_remove_host(sh);
3788 ap->ops->port_stop(ap);
3792 * ata_host_init - Initialize an ata_port structure
3793 * @ap: Structure to initialize
3794 * @host: associated SCSI mid-layer structure
3795 * @host_set: Collection of hosts to which @ap belongs
3796 * @ent: Probe information provided by low-level driver
3797 * @port_no: Port number associated with this ata_port
3799 * Initialize a new ata_port structure, and its associated
3803 * Inherited from caller.
3807 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
3808 struct ata_host_set *host_set,
3809 struct ata_probe_ent *ent, unsigned int port_no)
3815 host->max_channel = 1;
3816 host->unique_id = ata_unique_id++;
3817 host->max_cmd_len = 12;
3819 scsi_assign_lock(host, &host_set->lock);
3821 ap->flags = ATA_FLAG_PORT_DISABLED;
3822 ap->id = host->unique_id;
3824 ap->ctl = ATA_DEVCTL_OBS;
3825 ap->host_set = host_set;
3826 ap->port_no = port_no;
3828 ent->legacy_mode ? ent->hard_port_no : port_no;
3829 ap->pio_mask = ent->pio_mask;
3830 ap->mwdma_mask = ent->mwdma_mask;
3831 ap->udma_mask = ent->udma_mask;
3832 ap->flags |= ent->host_flags;
3833 ap->ops = ent->port_ops;
3834 ap->cbl = ATA_CBL_NONE;
3835 ap->active_tag = ATA_TAG_POISON;
3836 ap->last_ctl = 0xFF;
3838 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
3839 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
3841 for (i = 0; i < ATA_MAX_DEVICES; i++)
3842 ap->device[i].devno = i;
3845 ap->stats.unhandled_irq = 1;
3846 ap->stats.idle_irq = 1;
3849 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
3853 * ata_host_add - Attach low-level ATA driver to system
3854 * @ent: Information provided by low-level driver
3855 * @host_set: Collections of ports to which we add
3856 * @port_no: Port number associated with this host
3858 * Attach low-level ATA driver to system.
3861 * PCI/etc. bus probe sem.
3864 * New ata_port on success, for NULL on error.
3868 static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
3869 struct ata_host_set *host_set,
3870 unsigned int port_no)
3872 struct Scsi_Host *host;
3873 struct ata_port *ap;
3877 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
3881 ap = (struct ata_port *) &host->hostdata[0];
3883 ata_host_init(ap, host, host_set, ent, port_no);
3885 rc = ap->ops->port_start(ap);
3892 scsi_host_put(host);
3897 * ata_device_add - Register hardware device with ATA and SCSI layers
3898 * @ent: Probe information describing hardware device to be registered
3900 * This function processes the information provided in the probe
3901 * information struct @ent, allocates the necessary ATA and SCSI
3902 * host information structures, initializes them, and registers
3903 * everything with requisite kernel subsystems.
3905 * This function requests irqs, probes the ATA bus, and probes
3909 * PCI/etc. bus probe sem.
3912 * Number of ports registered. Zero on error (no ports registered).
3916 int ata_device_add(struct ata_probe_ent *ent)
3918 unsigned int count = 0, i;
3919 struct device *dev = ent->dev;
3920 struct ata_host_set *host_set;
3923 /* alloc a container for our list of ATA ports (buses) */
3924 host_set = kmalloc(sizeof(struct ata_host_set) +
3925 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
3928 memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
3929 spin_lock_init(&host_set->lock);
3931 host_set->dev = dev;
3932 host_set->n_ports = ent->n_ports;
3933 host_set->irq = ent->irq;
3934 host_set->mmio_base = ent->mmio_base;
3935 host_set->private_data = ent->private_data;
3936 host_set->ops = ent->port_ops;
3938 /* register each port bound to this device */
3939 for (i = 0; i < ent->n_ports; i++) {
3940 struct ata_port *ap;
3941 unsigned long xfer_mode_mask;
3943 ap = ata_host_add(ent, host_set, i);
3947 host_set->ports[i] = ap;
3948 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
3949 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
3950 (ap->pio_mask << ATA_SHIFT_PIO);
3952 /* print per-port info to dmesg */
3953 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
3954 "bmdma 0x%lX irq %lu\n",
3956 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
3957 ata_mode_string(xfer_mode_mask),
3958 ap->ioaddr.cmd_addr,
3959 ap->ioaddr.ctl_addr,
3960 ap->ioaddr.bmdma_addr,
3964 host_set->ops->irq_clear(ap);
3973 /* obtain irq, that is shared between channels */
3974 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
3975 DRV_NAME, host_set))
3978 /* perform each probe synchronously */
3979 DPRINTK("probe begin\n");
3980 for (i = 0; i < count; i++) {
3981 struct ata_port *ap;
3984 ap = host_set->ports[i];
3986 DPRINTK("ata%u: probe begin\n", ap->id);
3987 rc = ata_bus_probe(ap);
3988 DPRINTK("ata%u: probe end\n", ap->id);
3991 /* FIXME: do something useful here?
3992 * Current libata behavior will
3993 * tear down everything when
3994 * the module is removed
3995 * or the h/w is unplugged.
3999 rc = scsi_add_host(ap->host, dev);
4001 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4003 /* FIXME: do something useful here */
4004 /* FIXME: handle unconditional calls to
4005 * scsi_scan_host and ata_host_remove, below,
4011 /* probes are done, now scan each port's disk(s) */
4012 DPRINTK("probe begin\n");
4013 for (i = 0; i < count; i++) {
4014 struct ata_port *ap = host_set->ports[i];
4016 scsi_scan_host(ap->host);
4019 dev_set_drvdata(dev, host_set);
4021 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4022 return ent->n_ports; /* success */
4025 for (i = 0; i < count; i++) {
4026 ata_host_remove(host_set->ports[i], 1);
4027 scsi_host_put(host_set->ports[i]->host);
4030 VPRINTK("EXIT, returning 0\n");
4035 * ata_scsi_release - SCSI layer callback hook for host unload
4036 * @host: libata host to be unloaded
4038 * Performs all duties necessary to shut down a libata port...
4039 * Kill port kthread, disable port, and release resources.
4042 * Inherited from SCSI layer.
4048 int ata_scsi_release(struct Scsi_Host *host)
4050 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4054 ap->ops->port_disable(ap);
4055 ata_host_remove(ap, 0);
4062 * ata_std_ports - initialize ioaddr with standard port offsets.
4063 * @ioaddr: IO address structure to be initialized
4065 * Utility function which initializes data_addr, error_addr,
4066 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4067 * device_addr, status_addr, and command_addr to standard offsets
4068 * relative to cmd_addr.
4070 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4073 void ata_std_ports(struct ata_ioports *ioaddr)
4075 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4076 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4077 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4078 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4079 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4080 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4081 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4082 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4083 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4084 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4087 static struct ata_probe_ent *
4088 ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
4090 struct ata_probe_ent *probe_ent;
4092 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
4094 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
4095 kobject_name(&(dev->kobj)));
4099 memset(probe_ent, 0, sizeof(*probe_ent));
4101 INIT_LIST_HEAD(&probe_ent->node);
4102 probe_ent->dev = dev;
4104 probe_ent->sht = port->sht;
4105 probe_ent->host_flags = port->host_flags;
4106 probe_ent->pio_mask = port->pio_mask;
4107 probe_ent->mwdma_mask = port->mwdma_mask;
4108 probe_ent->udma_mask = port->udma_mask;
4109 probe_ent->port_ops = port->port_ops;
4117 * ata_pci_init_native_mode - Initialize native-mode driver
4118 * @pdev: pci device to be initialized
4119 * @port: array[2] of pointers to port info structures.
4121 * Utility function which allocates and initializes an
4122 * ata_probe_ent structure for a standard dual-port
4123 * PIO-based IDE controller. The returned ata_probe_ent
4124 * structure can be passed to ata_device_add(). The returned
4125 * ata_probe_ent structure should then be freed with kfree().
4129 struct ata_probe_ent *
4130 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
4132 struct ata_probe_ent *probe_ent =
4133 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4137 probe_ent->n_ports = 2;
4138 probe_ent->irq = pdev->irq;
4139 probe_ent->irq_flags = SA_SHIRQ;
4141 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
4142 probe_ent->port[0].altstatus_addr =
4143 probe_ent->port[0].ctl_addr =
4144 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4145 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
4147 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
4148 probe_ent->port[1].altstatus_addr =
4149 probe_ent->port[1].ctl_addr =
4150 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4151 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4153 ata_std_ports(&probe_ent->port[0]);
4154 ata_std_ports(&probe_ent->port[1]);
4159 static struct ata_probe_ent *
4160 ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
4161 struct ata_probe_ent **ppe2)
4163 struct ata_probe_ent *probe_ent, *probe_ent2;
4165 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4168 probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
4174 probe_ent->n_ports = 1;
4175 probe_ent->irq = 14;
4177 probe_ent->hard_port_no = 0;
4178 probe_ent->legacy_mode = 1;
4180 probe_ent2->n_ports = 1;
4181 probe_ent2->irq = 15;
4183 probe_ent2->hard_port_no = 1;
4184 probe_ent2->legacy_mode = 1;
4186 probe_ent->port[0].cmd_addr = 0x1f0;
4187 probe_ent->port[0].altstatus_addr =
4188 probe_ent->port[0].ctl_addr = 0x3f6;
4189 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
4191 probe_ent2->port[0].cmd_addr = 0x170;
4192 probe_ent2->port[0].altstatus_addr =
4193 probe_ent2->port[0].ctl_addr = 0x376;
4194 probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
4196 ata_std_ports(&probe_ent->port[0]);
4197 ata_std_ports(&probe_ent2->port[0]);
4204 * ata_pci_init_one - Initialize/register PCI IDE host controller
4205 * @pdev: Controller to be initialized
4206 * @port_info: Information from low-level host driver
4207 * @n_ports: Number of ports attached to host controller
4209 * This is a helper function which can be called from a driver's
4210 * xxx_init_one() probe function if the hardware uses traditional
4211 * IDE taskfile registers.
4213 * This function calls pci_enable_device(), reserves its register
4214 * regions, sets the dma mask, enables bus master mode, and calls
4218 * Inherited from PCI layer (may sleep).
4221 * Zero on success, negative on errno-based value on error.
4225 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4226 unsigned int n_ports)
4228 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
4229 struct ata_port_info *port[2];
4231 unsigned int legacy_mode = 0;
4232 int disable_dev_on_err = 1;
4237 port[0] = port_info[0];
4239 port[1] = port_info[1];
4243 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4244 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4245 /* TODO: support transitioning to native mode? */
4246 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4247 mask = (1 << 2) | (1 << 0);
4248 if ((tmp8 & mask) != mask)
4249 legacy_mode = (1 << 3);
4253 if ((!legacy_mode) && (n_ports > 1)) {
4254 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
4258 rc = pci_enable_device(pdev);
4262 rc = pci_request_regions(pdev, DRV_NAME);
4264 disable_dev_on_err = 0;
4269 if (!request_region(0x1f0, 8, "libata")) {
4270 struct resource *conflict, res;
4272 res.end = 0x1f0 + 8 - 1;
4273 conflict = ____request_resource(&ioport_resource, &res);
4274 if (!strcmp(conflict->name, "libata"))
4275 legacy_mode |= (1 << 0);
4277 disable_dev_on_err = 0;
4278 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
4281 legacy_mode |= (1 << 0);
4283 if (!request_region(0x170, 8, "libata")) {
4284 struct resource *conflict, res;
4286 res.end = 0x170 + 8 - 1;
4287 conflict = ____request_resource(&ioport_resource, &res);
4288 if (!strcmp(conflict->name, "libata"))
4289 legacy_mode |= (1 << 1);
4291 disable_dev_on_err = 0;
4292 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
4295 legacy_mode |= (1 << 1);
4298 /* we have legacy mode, but all ports are unavailable */
4299 if (legacy_mode == (1 << 3)) {
4301 goto err_out_regions;
4304 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
4306 goto err_out_regions;
4307 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
4309 goto err_out_regions;
4312 probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2);
4314 probe_ent = ata_pci_init_native_mode(pdev, port);
4317 goto err_out_regions;
4320 pci_set_master(pdev);
4322 /* FIXME: check ata_device_add return */
4324 if (legacy_mode & (1 << 0))
4325 ata_device_add(probe_ent);
4326 if (legacy_mode & (1 << 1))
4327 ata_device_add(probe_ent2);
4329 ata_device_add(probe_ent);
4337 if (legacy_mode & (1 << 0))
4338 release_region(0x1f0, 8);
4339 if (legacy_mode & (1 << 1))
4340 release_region(0x170, 8);
4341 pci_release_regions(pdev);
4343 if (disable_dev_on_err)
4344 pci_disable_device(pdev);
4349 * ata_pci_remove_one - PCI layer callback for device removal
4350 * @pdev: PCI device that was removed
4352 * PCI layer indicates to libata via this hook that
4353 * hot-unplug or module unload event has occured.
4354 * Handle this by unregistering all objects associated
4355 * with this PCI device. Free those objects. Then finally
4356 * release PCI resources and disable device.
4359 * Inherited from PCI layer (may sleep).
4362 void ata_pci_remove_one (struct pci_dev *pdev)
4364 struct device *dev = pci_dev_to_dev(pdev);
4365 struct ata_host_set *host_set = dev_get_drvdata(dev);
4366 struct ata_port *ap;
4369 for (i = 0; i < host_set->n_ports; i++) {
4370 ap = host_set->ports[i];
4372 scsi_remove_host(ap->host);
4375 free_irq(host_set->irq, host_set);
4377 for (i = 0; i < host_set->n_ports; i++) {
4378 ap = host_set->ports[i];
4380 ata_scsi_release(ap->host);
4382 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4383 struct ata_ioports *ioaddr = &ap->ioaddr;
4385 if (ioaddr->cmd_addr == 0x1f0)
4386 release_region(0x1f0, 8);
4387 else if (ioaddr->cmd_addr == 0x170)
4388 release_region(0x170, 8);
4391 scsi_host_put(ap->host);
4394 if (host_set->ops->host_stop)
4395 host_set->ops->host_stop(host_set);
4399 pci_release_regions(pdev);
4400 pci_disable_device(pdev);
4401 dev_set_drvdata(dev, NULL);
4404 /* move to PCI subsystem */
4405 int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
4407 unsigned long tmp = 0;
4409 switch (bits->width) {
4412 pci_read_config_byte(pdev, bits->reg, &tmp8);
4418 pci_read_config_word(pdev, bits->reg, &tmp16);
4424 pci_read_config_dword(pdev, bits->reg, &tmp32);
4435 return (tmp == bits->val) ? 1 : 0;
4437 #endif /* CONFIG_PCI */
4440 static int __init ata_init(void)
4442 ata_wq = create_workqueue("ata");
4446 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4450 static void __exit ata_exit(void)
4452 destroy_workqueue(ata_wq);
4455 module_init(ata_init);
4456 module_exit(ata_exit);
4459 * libata is essentially a library of internal helper functions for
4460 * low-level ATA host controller drivers. As such, the API/ABI is
4461 * likely to change as new drivers are added and updated.
4462 * Do not depend on ABI/API stability.
4465 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4466 EXPORT_SYMBOL_GPL(ata_std_ports);
4467 EXPORT_SYMBOL_GPL(ata_device_add);
4468 EXPORT_SYMBOL_GPL(ata_sg_init);
4469 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4470 EXPORT_SYMBOL_GPL(ata_qc_complete);
4471 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4472 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4473 EXPORT_SYMBOL_GPL(ata_tf_load);
4474 EXPORT_SYMBOL_GPL(ata_tf_read);
4475 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4476 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4477 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4478 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4479 EXPORT_SYMBOL_GPL(ata_check_status);
4480 EXPORT_SYMBOL_GPL(ata_altstatus);
4481 EXPORT_SYMBOL_GPL(ata_chk_err);
4482 EXPORT_SYMBOL_GPL(ata_exec_command);
4483 EXPORT_SYMBOL_GPL(ata_port_start);
4484 EXPORT_SYMBOL_GPL(ata_port_stop);
4485 EXPORT_SYMBOL_GPL(ata_host_stop);
4486 EXPORT_SYMBOL_GPL(ata_interrupt);
4487 EXPORT_SYMBOL_GPL(ata_qc_prep);
4488 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4489 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4490 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4491 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4492 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4493 EXPORT_SYMBOL_GPL(ata_port_probe);
4494 EXPORT_SYMBOL_GPL(sata_phy_reset);
4495 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4496 EXPORT_SYMBOL_GPL(ata_bus_reset);
4497 EXPORT_SYMBOL_GPL(ata_port_disable);
4498 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4499 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4500 EXPORT_SYMBOL_GPL(ata_scsi_error);
4501 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4502 EXPORT_SYMBOL_GPL(ata_scsi_release);
4503 EXPORT_SYMBOL_GPL(ata_host_intr);
4504 EXPORT_SYMBOL_GPL(ata_dev_classify);
4505 EXPORT_SYMBOL_GPL(ata_dev_id_string);
4506 EXPORT_SYMBOL_GPL(ata_dev_config);
4507 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4510 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4511 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4512 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4513 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4514 #endif /* CONFIG_PCI */