2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev);
66 static void ata_set_mode(struct ata_port *ap);
67 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
68 struct ata_device *dev);
69 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
71 static unsigned int ata_unique_id = 1;
72 static struct workqueue_struct *ata_wq;
74 int atapi_enabled = 1;
75 module_param(atapi_enabled, int, 0444);
76 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
79 module_param_named(fua, libata_fua, int, 0444);
80 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
82 MODULE_AUTHOR("Jeff Garzik");
83 MODULE_DESCRIPTION("Library module for ATA devices");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_VERSION);
89 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
90 * @tf: Taskfile to convert
91 * @fis: Buffer into which data will output
92 * @pmp: Port multiplier port
94 * Converts a standard ATA taskfile to a Serial ATA
95 * FIS structure (Register - Host to Device).
98 * Inherited from caller.
101 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
103 fis[0] = 0x27; /* Register - Host to Device FIS */
104 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
105 bit 7 indicates Command FIS */
106 fis[2] = tf->command;
107 fis[3] = tf->feature;
114 fis[8] = tf->hob_lbal;
115 fis[9] = tf->hob_lbam;
116 fis[10] = tf->hob_lbah;
117 fis[11] = tf->hob_feature;
120 fis[13] = tf->hob_nsect;
131 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
132 * @fis: Buffer from which data will be input
133 * @tf: Taskfile to output
135 * Converts a serial ATA FIS structure to a standard ATA taskfile.
138 * Inherited from caller.
141 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
143 tf->command = fis[2]; /* status */
144 tf->feature = fis[3]; /* error */
151 tf->hob_lbal = fis[8];
152 tf->hob_lbam = fis[9];
153 tf->hob_lbah = fis[10];
156 tf->hob_nsect = fis[13];
159 static const u8 ata_rw_cmds[] = {
163 ATA_CMD_READ_MULTI_EXT,
164 ATA_CMD_WRITE_MULTI_EXT,
168 ATA_CMD_WRITE_MULTI_FUA_EXT,
172 ATA_CMD_PIO_READ_EXT,
173 ATA_CMD_PIO_WRITE_EXT,
186 ATA_CMD_WRITE_FUA_EXT
190 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
191 * @qc: command to examine and configure
193 * Examine the device configuration and tf->flags to calculate
194 * the proper read/write commands and protocol to use.
199 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
201 struct ata_taskfile *tf = &qc->tf;
202 struct ata_device *dev = qc->dev;
205 int index, fua, lba48, write;
207 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
208 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
209 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
211 if (dev->flags & ATA_DFLAG_PIO) {
212 tf->protocol = ATA_PROT_PIO;
213 index = dev->multi_count ? 0 : 8;
214 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
215 /* Unable to use DMA due to host limitation */
216 tf->protocol = ATA_PROT_PIO;
217 index = dev->multi_count ? 0 : 8;
219 tf->protocol = ATA_PROT_DMA;
223 cmd = ata_rw_cmds[index + fua + lba48 + write];
232 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
233 * @pio_mask: pio_mask
234 * @mwdma_mask: mwdma_mask
235 * @udma_mask: udma_mask
237 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
238 * unsigned int xfer_mask.
246 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
247 unsigned int mwdma_mask,
248 unsigned int udma_mask)
250 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
251 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
252 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
256 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
257 * @xfer_mask: xfer_mask to unpack
258 * @pio_mask: resulting pio_mask
259 * @mwdma_mask: resulting mwdma_mask
260 * @udma_mask: resulting udma_mask
262 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
263 * Any NULL distination masks will be ignored.
265 static void ata_unpack_xfermask(unsigned int xfer_mask,
266 unsigned int *pio_mask,
267 unsigned int *mwdma_mask,
268 unsigned int *udma_mask)
271 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
273 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
275 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
278 static const struct ata_xfer_ent {
279 unsigned int shift, bits;
282 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
283 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
284 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
289 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
290 * @xfer_mask: xfer_mask of interest
292 * Return matching XFER_* value for @xfer_mask. Only the highest
293 * bit of @xfer_mask is considered.
299 * Matching XFER_* value, 0 if no match found.
301 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
303 int highbit = fls(xfer_mask) - 1;
304 const struct ata_xfer_ent *ent;
306 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
307 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
308 return ent->base + highbit - ent->shift;
313 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
314 * @xfer_mode: XFER_* of interest
316 * Return matching xfer_mask for @xfer_mode.
322 * Matching xfer_mask, 0 if no match found.
324 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
326 const struct ata_xfer_ent *ent;
328 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
329 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
330 return 1 << (ent->shift + xfer_mode - ent->base);
335 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
336 * @xfer_mode: XFER_* of interest
338 * Return matching xfer_shift for @xfer_mode.
344 * Matching xfer_shift, -1 if no match found.
346 static int ata_xfer_mode2shift(unsigned int xfer_mode)
348 const struct ata_xfer_ent *ent;
350 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
351 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
357 * ata_mode_string - convert xfer_mask to string
358 * @xfer_mask: mask of bits supported; only highest bit counts.
360 * Determine string which represents the highest speed
361 * (highest bit in @modemask).
367 * Constant C string representing highest speed listed in
368 * @mode_mask, or the constant C string "<n/a>".
370 static const char *ata_mode_string(unsigned int xfer_mask)
372 static const char * const xfer_mode_str[] = {
392 highbit = fls(xfer_mask) - 1;
393 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
394 return xfer_mode_str[highbit];
398 static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
400 if (ata_dev_present(dev)) {
401 printk(KERN_WARNING "ata%u: dev %u disabled\n",
408 * ata_pio_devchk - PATA device presence detection
409 * @ap: ATA channel to examine
410 * @device: Device to examine (starting at zero)
412 * This technique was originally described in
413 * Hale Landis's ATADRVR (www.ata-atapi.com), and
414 * later found its way into the ATA/ATAPI spec.
416 * Write a pattern to the ATA shadow registers,
417 * and if a device is present, it will respond by
418 * correctly storing and echoing back the
419 * ATA shadow register contents.
425 static unsigned int ata_pio_devchk(struct ata_port *ap,
428 struct ata_ioports *ioaddr = &ap->ioaddr;
431 ap->ops->dev_select(ap, device);
433 outb(0x55, ioaddr->nsect_addr);
434 outb(0xaa, ioaddr->lbal_addr);
436 outb(0xaa, ioaddr->nsect_addr);
437 outb(0x55, ioaddr->lbal_addr);
439 outb(0x55, ioaddr->nsect_addr);
440 outb(0xaa, ioaddr->lbal_addr);
442 nsect = inb(ioaddr->nsect_addr);
443 lbal = inb(ioaddr->lbal_addr);
445 if ((nsect == 0x55) && (lbal == 0xaa))
446 return 1; /* we found a device */
448 return 0; /* nothing found */
452 * ata_mmio_devchk - PATA device presence detection
453 * @ap: ATA channel to examine
454 * @device: Device to examine (starting at zero)
456 * This technique was originally described in
457 * Hale Landis's ATADRVR (www.ata-atapi.com), and
458 * later found its way into the ATA/ATAPI spec.
460 * Write a pattern to the ATA shadow registers,
461 * and if a device is present, it will respond by
462 * correctly storing and echoing back the
463 * ATA shadow register contents.
469 static unsigned int ata_mmio_devchk(struct ata_port *ap,
472 struct ata_ioports *ioaddr = &ap->ioaddr;
475 ap->ops->dev_select(ap, device);
477 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
478 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
480 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
481 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
483 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
484 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
486 nsect = readb((void __iomem *) ioaddr->nsect_addr);
487 lbal = readb((void __iomem *) ioaddr->lbal_addr);
489 if ((nsect == 0x55) && (lbal == 0xaa))
490 return 1; /* we found a device */
492 return 0; /* nothing found */
496 * ata_devchk - PATA device presence detection
497 * @ap: ATA channel to examine
498 * @device: Device to examine (starting at zero)
500 * Dispatch ATA device presence detection, depending
501 * on whether we are using PIO or MMIO to talk to the
502 * ATA shadow registers.
508 static unsigned int ata_devchk(struct ata_port *ap,
511 if (ap->flags & ATA_FLAG_MMIO)
512 return ata_mmio_devchk(ap, device);
513 return ata_pio_devchk(ap, device);
517 * ata_dev_classify - determine device type based on ATA-spec signature
518 * @tf: ATA taskfile register set for device to be identified
520 * Determine from taskfile register contents whether a device is
521 * ATA or ATAPI, as per "Signature and persistence" section
522 * of ATA/PI spec (volume 1, sect 5.14).
528 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
529 * the event of failure.
532 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
534 /* Apple's open source Darwin code hints that some devices only
535 * put a proper signature into the LBA mid/high registers,
536 * So, we only check those. It's sufficient for uniqueness.
539 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
540 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
541 DPRINTK("found ATA device by sig\n");
545 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
546 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
547 DPRINTK("found ATAPI device by sig\n");
548 return ATA_DEV_ATAPI;
551 DPRINTK("unknown device\n");
552 return ATA_DEV_UNKNOWN;
556 * ata_dev_try_classify - Parse returned ATA device signature
557 * @ap: ATA channel to examine
558 * @device: Device to examine (starting at zero)
559 * @r_err: Value of error register on completion
561 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
562 * an ATA/ATAPI-defined set of values is placed in the ATA
563 * shadow registers, indicating the results of device detection
566 * Select the ATA device, and read the values from the ATA shadow
567 * registers. Then parse according to the Error register value,
568 * and the spec-defined values examined by ata_dev_classify().
574 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
578 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
580 struct ata_taskfile tf;
584 ap->ops->dev_select(ap, device);
586 memset(&tf, 0, sizeof(tf));
588 ap->ops->tf_read(ap, &tf);
593 /* see if device passed diags */
596 else if ((device == 0) && (err == 0x81))
601 /* determine if device is ATA or ATAPI */
602 class = ata_dev_classify(&tf);
604 if (class == ATA_DEV_UNKNOWN)
606 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
612 * ata_id_string - Convert IDENTIFY DEVICE page into string
613 * @id: IDENTIFY DEVICE results we will examine
614 * @s: string into which data is output
615 * @ofs: offset into identify device page
616 * @len: length of string to return. must be an even number.
618 * The strings in the IDENTIFY DEVICE page are broken up into
619 * 16-bit chunks. Run through the string, and output each
620 * 8-bit chunk linearly, regardless of platform.
626 void ata_id_string(const u16 *id, unsigned char *s,
627 unsigned int ofs, unsigned int len)
646 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
647 * @id: IDENTIFY DEVICE results we will examine
648 * @s: string into which data is output
649 * @ofs: offset into identify device page
650 * @len: length of string to return. must be an odd number.
652 * This function is identical to ata_id_string except that it
653 * trims trailing spaces and terminates the resulting string with
654 * null. @len must be actual maximum length (even number) + 1.
659 void ata_id_c_string(const u16 *id, unsigned char *s,
660 unsigned int ofs, unsigned int len)
666 ata_id_string(id, s, ofs, len - 1);
668 p = s + strnlen(s, len - 1);
669 while (p > s && p[-1] == ' ')
674 static u64 ata_id_n_sectors(const u16 *id)
676 if (ata_id_has_lba(id)) {
677 if (ata_id_has_lba48(id))
678 return ata_id_u64(id, 100);
680 return ata_id_u32(id, 60);
682 if (ata_id_current_chs_valid(id))
683 return ata_id_u32(id, 57);
685 return id[1] * id[3] * id[6];
690 * ata_noop_dev_select - Select device 0/1 on ATA bus
691 * @ap: ATA channel to manipulate
692 * @device: ATA device (numbered from zero) to select
694 * This function performs no actual function.
696 * May be used as the dev_select() entry in ata_port_operations.
701 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
707 * ata_std_dev_select - Select device 0/1 on ATA bus
708 * @ap: ATA channel to manipulate
709 * @device: ATA device (numbered from zero) to select
711 * Use the method defined in the ATA specification to
712 * make either device 0, or device 1, active on the
713 * ATA channel. Works with both PIO and MMIO.
715 * May be used as the dev_select() entry in ata_port_operations.
721 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
726 tmp = ATA_DEVICE_OBS;
728 tmp = ATA_DEVICE_OBS | ATA_DEV1;
730 if (ap->flags & ATA_FLAG_MMIO) {
731 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
733 outb(tmp, ap->ioaddr.device_addr);
735 ata_pause(ap); /* needed; also flushes, for mmio */
739 * ata_dev_select - Select device 0/1 on ATA bus
740 * @ap: ATA channel to manipulate
741 * @device: ATA device (numbered from zero) to select
742 * @wait: non-zero to wait for Status register BSY bit to clear
743 * @can_sleep: non-zero if context allows sleeping
745 * Use the method defined in the ATA specification to
746 * make either device 0, or device 1, active on the
749 * This is a high-level version of ata_std_dev_select(),
750 * which additionally provides the services of inserting
751 * the proper pauses and status polling, where needed.
757 void ata_dev_select(struct ata_port *ap, unsigned int device,
758 unsigned int wait, unsigned int can_sleep)
760 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
761 ap->id, device, wait);
766 ap->ops->dev_select(ap, device);
769 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
776 * ata_dump_id - IDENTIFY DEVICE info debugging output
777 * @id: IDENTIFY DEVICE page to dump
779 * Dump selected 16-bit words from the given IDENTIFY DEVICE
786 static inline void ata_dump_id(const u16 *id)
788 DPRINTK("49==0x%04x "
798 DPRINTK("80==0x%04x "
808 DPRINTK("88==0x%04x "
815 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
816 * @id: IDENTIFY data to compute xfer mask from
818 * Compute the xfermask for this device. This is not as trivial
819 * as it seems if we must consider early devices correctly.
821 * FIXME: pre IDE drive timing (do we care ?).
829 static unsigned int ata_id_xfermask(const u16 *id)
831 unsigned int pio_mask, mwdma_mask, udma_mask;
833 /* Usual case. Word 53 indicates word 64 is valid */
834 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
835 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
839 /* If word 64 isn't valid then Word 51 high byte holds
840 * the PIO timing number for the maximum. Turn it into
843 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
845 /* But wait.. there's more. Design your standards by
846 * committee and you too can get a free iordy field to
847 * process. However its the speeds not the modes that
848 * are supported... Note drivers using the timing API
849 * will get this right anyway
853 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
856 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
857 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
859 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
863 * ata_port_queue_task - Queue port_task
864 * @ap: The ata_port to queue port_task for
866 * Schedule @fn(@data) for execution after @delay jiffies using
867 * port_task. There is one port_task per port and it's the
868 * user(low level driver)'s responsibility to make sure that only
869 * one task is active at any given time.
871 * libata core layer takes care of synchronization between
872 * port_task and EH. ata_port_queue_task() may be ignored for EH
876 * Inherited from caller.
878 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
883 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
886 PREPARE_WORK(&ap->port_task, fn, data);
889 rc = queue_work(ata_wq, &ap->port_task);
891 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
893 /* rc == 0 means that another user is using port task */
898 * ata_port_flush_task - Flush port_task
899 * @ap: The ata_port to flush port_task for
901 * After this function completes, port_task is guranteed not to
902 * be running or scheduled.
905 * Kernel thread context (may sleep)
907 void ata_port_flush_task(struct ata_port *ap)
913 spin_lock_irqsave(&ap->host_set->lock, flags);
914 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
915 spin_unlock_irqrestore(&ap->host_set->lock, flags);
917 DPRINTK("flush #1\n");
918 flush_workqueue(ata_wq);
921 * At this point, if a task is running, it's guaranteed to see
922 * the FLUSH flag; thus, it will never queue pio tasks again.
925 if (!cancel_delayed_work(&ap->port_task)) {
926 DPRINTK("flush #2\n");
927 flush_workqueue(ata_wq);
930 spin_lock_irqsave(&ap->host_set->lock, flags);
931 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
932 spin_unlock_irqrestore(&ap->host_set->lock, flags);
937 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
939 struct completion *waiting = qc->private_data;
941 qc->ap->ops->tf_read(qc->ap, &qc->tf);
946 * ata_exec_internal - execute libata internal command
947 * @ap: Port to which the command is sent
948 * @dev: Device to which the command is sent
949 * @tf: Taskfile registers for the command and the result
950 * @dma_dir: Data tranfer direction of the command
951 * @buf: Data buffer of the command
952 * @buflen: Length of data buffer
954 * Executes libata internal command with timeout. @tf contains
955 * command on entry and result on return. Timeout and error
956 * conditions are reported via return value. No recovery action
957 * is taken after a command times out. It's caller's duty to
958 * clean up after timeout.
961 * None. Should be called with kernel context, might sleep.
965 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
966 struct ata_taskfile *tf,
967 int dma_dir, void *buf, unsigned int buflen)
969 u8 command = tf->command;
970 struct ata_queued_cmd *qc;
971 DECLARE_COMPLETION(wait);
973 unsigned int err_mask;
975 spin_lock_irqsave(&ap->host_set->lock, flags);
977 qc = ata_qc_new_init(ap, dev);
981 qc->dma_dir = dma_dir;
982 if (dma_dir != DMA_NONE) {
983 ata_sg_init_one(qc, buf, buflen);
984 qc->nsect = buflen / ATA_SECT_SIZE;
987 qc->private_data = &wait;
988 qc->complete_fn = ata_qc_complete_internal;
990 qc->err_mask = ata_qc_issue(qc);
994 spin_unlock_irqrestore(&ap->host_set->lock, flags);
996 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
997 ata_port_flush_task(ap);
999 spin_lock_irqsave(&ap->host_set->lock, flags);
1001 /* We're racing with irq here. If we lose, the
1002 * following test prevents us from completing the qc
1003 * again. If completion irq occurs after here but
1004 * before the caller cleans up, it will result in a
1005 * spurious interrupt. We can live with that.
1007 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1008 qc->err_mask = AC_ERR_TIMEOUT;
1009 ata_qc_complete(qc);
1010 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1014 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1018 err_mask = qc->err_mask;
1022 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1023 * Until those drivers are fixed, we detect the condition
1024 * here, fail the command with AC_ERR_SYSTEM and reenable the
1027 * Note that this doesn't change any behavior as internal
1028 * command failure results in disabling the device in the
1029 * higher layer for LLDDs without new reset/EH callbacks.
1031 * Kill the following code as soon as those drivers are fixed.
1033 if (ap->flags & ATA_FLAG_PORT_DISABLED) {
1034 err_mask |= AC_ERR_SYSTEM;
1042 * ata_pio_need_iordy - check if iordy needed
1045 * Check if the current speed of the device requires IORDY. Used
1046 * by various controllers for chip configuration.
1049 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1052 int speed = adev->pio_mode - XFER_PIO_0;
1059 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1061 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1062 pio = adev->id[ATA_ID_EIDE_PIO];
1063 /* Is the speed faster than the drive allows non IORDY ? */
1065 /* This is cycle times not frequency - watch the logic! */
1066 if (pio > 240) /* PIO2 is 240nS per cycle */
1075 * ata_dev_read_id - Read ID data from the specified device
1076 * @ap: port on which target device resides
1077 * @dev: target device
1078 * @p_class: pointer to class of the target device (may be changed)
1079 * @post_reset: is this read ID post-reset?
1080 * @p_id: read IDENTIFY page (newly allocated)
1082 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1083 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1084 * devices. This function also takes care of EDD signature
1085 * misreporting (to be removed once EDD support is gone) and
1086 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
1089 * Kernel thread context (may sleep)
1092 * 0 on success, -errno otherwise.
1094 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1095 unsigned int *p_class, int post_reset, u16 **p_id)
1097 unsigned int class = *p_class;
1098 unsigned int using_edd;
1099 struct ata_taskfile tf;
1100 unsigned int err_mask = 0;
1105 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1107 if (ap->ops->probe_reset ||
1108 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1113 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1115 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1118 reason = "out of memory";
1123 ata_tf_init(ap, &tf, dev->devno);
1127 tf.command = ATA_CMD_ID_ATA;
1130 tf.command = ATA_CMD_ID_ATAPI;
1134 reason = "unsupported class";
1138 tf.protocol = ATA_PROT_PIO;
1140 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1141 id, sizeof(id[0]) * ATA_ID_WORDS);
1145 reason = "I/O error";
1147 if (err_mask & ~AC_ERR_DEV)
1151 * arg! EDD works for all test cases, but seems to return
1152 * the ATA signature for some ATAPI devices. Until the
1153 * reason for this is found and fixed, we fix up the mess
1154 * here. If IDENTIFY DEVICE returns command aborted
1155 * (as ATAPI devices do), then we issue an
1156 * IDENTIFY PACKET DEVICE.
1158 * ATA software reset (SRST, the default) does not appear
1159 * to have this problem.
1161 if ((using_edd) && (class == ATA_DEV_ATA)) {
1162 u8 err = tf.feature;
1163 if (err & ATA_ABORTED) {
1164 class = ATA_DEV_ATAPI;
1171 swap_buf_le16(id, ATA_ID_WORDS);
1174 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1176 reason = "device reports illegal type";
1180 if (post_reset && class == ATA_DEV_ATA) {
1182 * The exact sequence expected by certain pre-ATA4 drives is:
1185 * INITIALIZE DEVICE PARAMETERS
1187 * Some drives were very specific about that exact sequence.
1189 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1190 err_mask = ata_dev_init_params(ap, dev);
1193 reason = "INIT_DEV_PARAMS failed";
1197 /* current CHS translation info (id[53-58]) might be
1198 * changed. reread the identify device info.
1210 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1211 ap->id, dev->devno, reason);
1216 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1217 struct ata_device *dev)
1219 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1223 * ata_dev_configure - Configure the specified ATA/ATAPI device
1224 * @ap: Port on which target device resides
1225 * @dev: Target device to configure
1226 * @print_info: Enable device info printout
1228 * Configure @dev according to @dev->id. Generic and low-level
1229 * driver specific fixups are also applied.
1232 * Kernel thread context (may sleep)
1235 * 0 on success, -errno otherwise
1237 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1240 const u16 *id = dev->id;
1241 unsigned int xfer_mask;
1244 if (!ata_dev_present(dev)) {
1245 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1246 ap->id, dev->devno);
1250 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1252 /* print device capabilities */
1254 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1255 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1256 ap->id, dev->devno, id[49], id[82], id[83],
1257 id[84], id[85], id[86], id[87], id[88]);
1259 /* initialize to-be-configured parameters */
1261 dev->max_sectors = 0;
1269 * common ATA, ATAPI feature tests
1272 /* find max transfer mode; for printk only */
1273 xfer_mask = ata_id_xfermask(id);
1277 /* ATA-specific feature tests */
1278 if (dev->class == ATA_DEV_ATA) {
1279 dev->n_sectors = ata_id_n_sectors(id);
1281 if (ata_id_has_lba(id)) {
1282 const char *lba_desc;
1285 dev->flags |= ATA_DFLAG_LBA;
1286 if (ata_id_has_lba48(id)) {
1287 dev->flags |= ATA_DFLAG_LBA48;
1291 /* print device info to dmesg */
1293 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1294 "max %s, %Lu sectors: %s\n",
1296 ata_id_major_version(id),
1297 ata_mode_string(xfer_mask),
1298 (unsigned long long)dev->n_sectors,
1303 /* Default translation */
1304 dev->cylinders = id[1];
1306 dev->sectors = id[6];
1308 if (ata_id_current_chs_valid(id)) {
1309 /* Current CHS translation is valid. */
1310 dev->cylinders = id[54];
1311 dev->heads = id[55];
1312 dev->sectors = id[56];
1315 /* print device info to dmesg */
1317 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1318 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1320 ata_id_major_version(id),
1321 ata_mode_string(xfer_mask),
1322 (unsigned long long)dev->n_sectors,
1323 dev->cylinders, dev->heads, dev->sectors);
1329 /* ATAPI-specific feature tests */
1330 else if (dev->class == ATA_DEV_ATAPI) {
1331 rc = atapi_cdb_len(id);
1332 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1333 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1337 dev->cdb_len = (unsigned int) rc;
1339 /* print device info to dmesg */
1341 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1342 ap->id, dev->devno, ata_mode_string(xfer_mask));
1345 ap->host->max_cmd_len = 0;
1346 for (i = 0; i < ATA_MAX_DEVICES; i++)
1347 ap->host->max_cmd_len = max_t(unsigned int,
1348 ap->host->max_cmd_len,
1349 ap->device[i].cdb_len);
1351 /* limit bridge transfers to udma5, 200 sectors */
1352 if (ata_dev_knobble(ap, dev)) {
1354 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1355 ap->id, dev->devno);
1356 dev->udma_mask &= ATA_UDMA5;
1357 dev->max_sectors = ATA_MAX_SECTORS;
1360 if (ap->ops->dev_config)
1361 ap->ops->dev_config(ap, dev);
1363 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1367 DPRINTK("EXIT, err\n");
1372 * ata_bus_probe - Reset and probe ATA bus
1375 * Master ATA bus probing function. Initiates a hardware-dependent
1376 * bus reset, then attempts to identify any devices found on
1380 * PCI/etc. bus probe sem.
1383 * Zero on success, non-zero on error.
1386 static int ata_bus_probe(struct ata_port *ap)
1388 unsigned int classes[ATA_MAX_DEVICES];
1389 unsigned int i, rc, found = 0;
1393 /* reset and determine device classes */
1394 for (i = 0; i < ATA_MAX_DEVICES; i++)
1395 classes[i] = ATA_DEV_UNKNOWN;
1397 if (ap->ops->probe_reset) {
1398 rc = ap->ops->probe_reset(ap, classes);
1400 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1404 ap->ops->phy_reset(ap);
1406 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1407 for (i = 0; i < ATA_MAX_DEVICES; i++)
1408 classes[i] = ap->device[i].class;
1413 for (i = 0; i < ATA_MAX_DEVICES; i++)
1414 if (classes[i] == ATA_DEV_UNKNOWN)
1415 classes[i] = ATA_DEV_NONE;
1417 /* read IDENTIFY page and configure devices */
1418 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1419 struct ata_device *dev = &ap->device[i];
1421 dev->class = classes[i];
1423 if (!ata_dev_present(dev))
1426 WARN_ON(dev->id != NULL);
1427 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1428 dev->class = ATA_DEV_NONE;
1432 if (ata_dev_configure(ap, dev, 1)) {
1433 ata_dev_disable(ap, dev);
1441 goto err_out_disable;
1444 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1445 goto err_out_disable;
1450 ap->ops->port_disable(ap);
1455 * ata_port_probe - Mark port as enabled
1456 * @ap: Port for which we indicate enablement
1458 * Modify @ap data structure such that the system
1459 * thinks that the entire port is enabled.
1461 * LOCKING: host_set lock, or some other form of
1465 void ata_port_probe(struct ata_port *ap)
1467 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1471 * sata_print_link_status - Print SATA link status
1472 * @ap: SATA port to printk link status about
1474 * This function prints link speed and status of a SATA link.
1479 static void sata_print_link_status(struct ata_port *ap)
1484 if (!ap->ops->scr_read)
1487 sstatus = scr_read(ap, SCR_STATUS);
1489 if (sata_dev_present(ap)) {
1490 tmp = (sstatus >> 4) & 0xf;
1493 else if (tmp & (1 << 1))
1496 speed = "<unknown>";
1497 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1498 ap->id, speed, sstatus);
1500 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1506 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1507 * @ap: SATA port associated with target SATA PHY.
1509 * This function issues commands to standard SATA Sxxx
1510 * PHY registers, to wake up the phy (and device), and
1511 * clear any reset condition.
1514 * PCI/etc. bus probe sem.
1517 void __sata_phy_reset(struct ata_port *ap)
1520 unsigned long timeout = jiffies + (HZ * 5);
1522 if (ap->flags & ATA_FLAG_SATA_RESET) {
1523 /* issue phy wake/reset */
1524 scr_write_flush(ap, SCR_CONTROL, 0x301);
1525 /* Couldn't find anything in SATA I/II specs, but
1526 * AHCI-1.1 10.4.2 says at least 1 ms. */
1529 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1531 /* wait for phy to become ready, if necessary */
1534 sstatus = scr_read(ap, SCR_STATUS);
1535 if ((sstatus & 0xf) != 1)
1537 } while (time_before(jiffies, timeout));
1539 /* print link status */
1540 sata_print_link_status(ap);
1542 /* TODO: phy layer with polling, timeouts, etc. */
1543 if (sata_dev_present(ap))
1546 ata_port_disable(ap);
1548 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1551 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1552 ata_port_disable(ap);
1556 ap->cbl = ATA_CBL_SATA;
1560 * sata_phy_reset - Reset SATA bus.
1561 * @ap: SATA port associated with target SATA PHY.
1563 * This function resets the SATA bus, and then probes
1564 * the bus for devices.
1567 * PCI/etc. bus probe sem.
1570 void sata_phy_reset(struct ata_port *ap)
1572 __sata_phy_reset(ap);
1573 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1579 * ata_port_disable - Disable port.
1580 * @ap: Port to be disabled.
1582 * Modify @ap data structure such that the system
1583 * thinks that the entire port is disabled, and should
1584 * never attempt to probe or communicate with devices
1587 * LOCKING: host_set lock, or some other form of
1591 void ata_port_disable(struct ata_port *ap)
1593 ap->device[0].class = ATA_DEV_NONE;
1594 ap->device[1].class = ATA_DEV_NONE;
1595 ap->flags |= ATA_FLAG_PORT_DISABLED;
1599 * This mode timing computation functionality is ported over from
1600 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1603 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1604 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1605 * for PIO 5, which is a nonstandard extension and UDMA6, which
1606 * is currently supported only by Maxtor drives.
1609 static const struct ata_timing ata_timing[] = {
1611 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1612 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1613 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1614 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1616 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1617 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1618 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1620 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1622 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1623 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1624 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1626 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1627 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1628 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1630 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1631 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1632 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1634 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1635 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1636 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1638 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1643 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1644 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1646 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1648 q->setup = EZ(t->setup * 1000, T);
1649 q->act8b = EZ(t->act8b * 1000, T);
1650 q->rec8b = EZ(t->rec8b * 1000, T);
1651 q->cyc8b = EZ(t->cyc8b * 1000, T);
1652 q->active = EZ(t->active * 1000, T);
1653 q->recover = EZ(t->recover * 1000, T);
1654 q->cycle = EZ(t->cycle * 1000, T);
1655 q->udma = EZ(t->udma * 1000, UT);
1658 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1659 struct ata_timing *m, unsigned int what)
1661 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1662 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1663 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1664 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1665 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1666 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1667 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1668 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1671 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1673 const struct ata_timing *t;
1675 for (t = ata_timing; t->mode != speed; t++)
1676 if (t->mode == 0xFF)
1681 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1682 struct ata_timing *t, int T, int UT)
1684 const struct ata_timing *s;
1685 struct ata_timing p;
1691 if (!(s = ata_timing_find_mode(speed)))
1694 memcpy(t, s, sizeof(*s));
1697 * If the drive is an EIDE drive, it can tell us it needs extended
1698 * PIO/MW_DMA cycle timing.
1701 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1702 memset(&p, 0, sizeof(p));
1703 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1704 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1705 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1706 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1707 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1709 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1713 * Convert the timing to bus clock counts.
1716 ata_timing_quantize(t, t, T, UT);
1719 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1720 * S.M.A.R.T * and some other commands. We have to ensure that the
1721 * DMA cycle timing is slower/equal than the fastest PIO timing.
1724 if (speed > XFER_PIO_4) {
1725 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1726 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1730 * Lengthen active & recovery time so that cycle time is correct.
1733 if (t->act8b + t->rec8b < t->cyc8b) {
1734 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1735 t->rec8b = t->cyc8b - t->act8b;
1738 if (t->active + t->recover < t->cycle) {
1739 t->active += (t->cycle - (t->active + t->recover)) / 2;
1740 t->recover = t->cycle - t->active;
1746 static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1748 unsigned int err_mask;
1751 if (dev->xfer_shift == ATA_SHIFT_PIO)
1752 dev->flags |= ATA_DFLAG_PIO;
1754 err_mask = ata_dev_set_xfermode(ap, dev);
1757 "ata%u: failed to set xfermode (err_mask=0x%x)\n",
1762 rc = ata_dev_revalidate(ap, dev, 0);
1765 "ata%u: failed to revalidate after set xfermode\n",
1770 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1771 dev->xfer_shift, (int)dev->xfer_mode);
1773 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1775 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1779 static int ata_host_set_pio(struct ata_port *ap)
1783 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1784 struct ata_device *dev = &ap->device[i];
1786 if (!ata_dev_present(dev))
1789 if (!dev->pio_mode) {
1790 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1794 dev->xfer_mode = dev->pio_mode;
1795 dev->xfer_shift = ATA_SHIFT_PIO;
1796 if (ap->ops->set_piomode)
1797 ap->ops->set_piomode(ap, dev);
1803 static void ata_host_set_dma(struct ata_port *ap)
1807 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1808 struct ata_device *dev = &ap->device[i];
1810 if (!ata_dev_present(dev) || !dev->dma_mode)
1813 dev->xfer_mode = dev->dma_mode;
1814 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1815 if (ap->ops->set_dmamode)
1816 ap->ops->set_dmamode(ap, dev);
1821 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1822 * @ap: port on which timings will be programmed
1824 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1827 * PCI/etc. bus probe sem.
1829 static void ata_set_mode(struct ata_port *ap)
1833 /* step 1: calculate xfer_mask */
1834 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1835 struct ata_device *dev = &ap->device[i];
1836 unsigned int pio_mask, dma_mask;
1838 if (!ata_dev_present(dev))
1841 ata_dev_xfermask(ap, dev);
1843 /* TODO: let LLDD filter dev->*_mask here */
1845 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1846 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1847 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1848 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1851 /* step 2: always set host PIO timings */
1852 rc = ata_host_set_pio(ap);
1856 /* step 3: set host DMA timings */
1857 ata_host_set_dma(ap);
1859 /* step 4: update devices' xfer mode */
1860 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1861 struct ata_device *dev = &ap->device[i];
1863 if (!ata_dev_present(dev))
1866 if (ata_dev_set_mode(ap, dev))
1870 if (ap->ops->post_set_mode)
1871 ap->ops->post_set_mode(ap);
1876 ata_port_disable(ap);
1880 * ata_tf_to_host - issue ATA taskfile to host controller
1881 * @ap: port to which command is being issued
1882 * @tf: ATA taskfile register set
1884 * Issues ATA taskfile register set to ATA host controller,
1885 * with proper synchronization with interrupt handler and
1889 * spin_lock_irqsave(host_set lock)
1892 static inline void ata_tf_to_host(struct ata_port *ap,
1893 const struct ata_taskfile *tf)
1895 ap->ops->tf_load(ap, tf);
1896 ap->ops->exec_command(ap, tf);
1900 * ata_busy_sleep - sleep until BSY clears, or timeout
1901 * @ap: port containing status register to be polled
1902 * @tmout_pat: impatience timeout
1903 * @tmout: overall timeout
1905 * Sleep until ATA Status register bit BSY clears,
1906 * or a timeout occurs.
1911 unsigned int ata_busy_sleep (struct ata_port *ap,
1912 unsigned long tmout_pat, unsigned long tmout)
1914 unsigned long timer_start, timeout;
1917 status = ata_busy_wait(ap, ATA_BUSY, 300);
1918 timer_start = jiffies;
1919 timeout = timer_start + tmout_pat;
1920 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1922 status = ata_busy_wait(ap, ATA_BUSY, 3);
1925 if (status & ATA_BUSY)
1926 printk(KERN_WARNING "ata%u is slow to respond, "
1927 "please be patient\n", ap->id);
1929 timeout = timer_start + tmout;
1930 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1932 status = ata_chk_status(ap);
1935 if (status & ATA_BUSY) {
1936 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1937 ap->id, tmout / HZ);
1944 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1946 struct ata_ioports *ioaddr = &ap->ioaddr;
1947 unsigned int dev0 = devmask & (1 << 0);
1948 unsigned int dev1 = devmask & (1 << 1);
1949 unsigned long timeout;
1951 /* if device 0 was found in ata_devchk, wait for its
1955 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1957 /* if device 1 was found in ata_devchk, wait for
1958 * register access, then wait for BSY to clear
1960 timeout = jiffies + ATA_TMOUT_BOOT;
1964 ap->ops->dev_select(ap, 1);
1965 if (ap->flags & ATA_FLAG_MMIO) {
1966 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1967 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1969 nsect = inb(ioaddr->nsect_addr);
1970 lbal = inb(ioaddr->lbal_addr);
1972 if ((nsect == 1) && (lbal == 1))
1974 if (time_after(jiffies, timeout)) {
1978 msleep(50); /* give drive a breather */
1981 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1983 /* is all this really necessary? */
1984 ap->ops->dev_select(ap, 0);
1986 ap->ops->dev_select(ap, 1);
1988 ap->ops->dev_select(ap, 0);
1992 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1993 * @ap: Port to reset and probe
1995 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1996 * probe the bus. Not often used these days.
1999 * PCI/etc. bus probe sem.
2000 * Obtains host_set lock.
2004 static unsigned int ata_bus_edd(struct ata_port *ap)
2006 struct ata_taskfile tf;
2007 unsigned long flags;
2009 /* set up execute-device-diag (bus reset) taskfile */
2010 /* also, take interrupts to a known state (disabled) */
2011 DPRINTK("execute-device-diag\n");
2012 ata_tf_init(ap, &tf, 0);
2014 tf.command = ATA_CMD_EDD;
2015 tf.protocol = ATA_PROT_NODATA;
2018 spin_lock_irqsave(&ap->host_set->lock, flags);
2019 ata_tf_to_host(ap, &tf);
2020 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2022 /* spec says at least 2ms. but who knows with those
2023 * crazy ATAPI devices...
2027 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2030 static unsigned int ata_bus_softreset(struct ata_port *ap,
2031 unsigned int devmask)
2033 struct ata_ioports *ioaddr = &ap->ioaddr;
2035 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2037 /* software reset. causes dev0 to be selected */
2038 if (ap->flags & ATA_FLAG_MMIO) {
2039 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2040 udelay(20); /* FIXME: flush */
2041 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2042 udelay(20); /* FIXME: flush */
2043 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2045 outb(ap->ctl, ioaddr->ctl_addr);
2047 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2049 outb(ap->ctl, ioaddr->ctl_addr);
2052 /* spec mandates ">= 2ms" before checking status.
2053 * We wait 150ms, because that was the magic delay used for
2054 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2055 * between when the ATA command register is written, and then
2056 * status is checked. Because waiting for "a while" before
2057 * checking status is fine, post SRST, we perform this magic
2058 * delay here as well.
2060 * Old drivers/ide uses the 2mS rule and then waits for ready
2065 /* Before we perform post reset processing we want to see if
2066 the bus shows 0xFF because the odd clown forgets the D7 pulldown
2069 if (ata_check_status(ap) == 0xFF)
2070 return 1; /* Positive is failure for some reason */
2072 ata_bus_post_reset(ap, devmask);
2078 * ata_bus_reset - reset host port and associated ATA channel
2079 * @ap: port to reset
2081 * This is typically the first time we actually start issuing
2082 * commands to the ATA channel. We wait for BSY to clear, then
2083 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2084 * result. Determine what devices, if any, are on the channel
2085 * by looking at the device 0/1 error register. Look at the signature
2086 * stored in each device's taskfile registers, to determine if
2087 * the device is ATA or ATAPI.
2090 * PCI/etc. bus probe sem.
2091 * Obtains host_set lock.
2094 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2097 void ata_bus_reset(struct ata_port *ap)
2099 struct ata_ioports *ioaddr = &ap->ioaddr;
2100 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2102 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
2104 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2106 /* determine if device 0/1 are present */
2107 if (ap->flags & ATA_FLAG_SATA_RESET)
2110 dev0 = ata_devchk(ap, 0);
2112 dev1 = ata_devchk(ap, 1);
2116 devmask |= (1 << 0);
2118 devmask |= (1 << 1);
2120 /* select device 0 again */
2121 ap->ops->dev_select(ap, 0);
2123 /* issue bus reset */
2124 if (ap->flags & ATA_FLAG_SRST)
2125 rc = ata_bus_softreset(ap, devmask);
2126 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
2127 /* set up device control */
2128 if (ap->flags & ATA_FLAG_MMIO)
2129 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2131 outb(ap->ctl, ioaddr->ctl_addr);
2132 rc = ata_bus_edd(ap);
2139 * determine by signature whether we have ATA or ATAPI devices
2141 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2142 if ((slave_possible) && (err != 0x81))
2143 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2145 /* re-enable interrupts */
2146 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2149 /* is double-select really necessary? */
2150 if (ap->device[1].class != ATA_DEV_NONE)
2151 ap->ops->dev_select(ap, 1);
2152 if (ap->device[0].class != ATA_DEV_NONE)
2153 ap->ops->dev_select(ap, 0);
2155 /* if no devices were detected, disable this port */
2156 if ((ap->device[0].class == ATA_DEV_NONE) &&
2157 (ap->device[1].class == ATA_DEV_NONE))
2160 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2161 /* set up device control for ATA_FLAG_SATA_RESET */
2162 if (ap->flags & ATA_FLAG_MMIO)
2163 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2165 outb(ap->ctl, ioaddr->ctl_addr);
2172 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2173 ap->ops->port_disable(ap);
2178 static int sata_phy_resume(struct ata_port *ap)
2180 unsigned long timeout = jiffies + (HZ * 5);
2183 scr_write_flush(ap, SCR_CONTROL, 0x300);
2185 /* Wait for phy to become ready, if necessary. */
2188 sstatus = scr_read(ap, SCR_STATUS);
2189 if ((sstatus & 0xf) != 1)
2191 } while (time_before(jiffies, timeout));
2197 * ata_std_probeinit - initialize probing
2198 * @ap: port to be probed
2200 * @ap is about to be probed. Initialize it. This function is
2201 * to be used as standard callback for ata_drive_probe_reset().
2203 * NOTE!!! Do not use this function as probeinit if a low level
2204 * driver implements only hardreset. Just pass NULL as probeinit
2205 * in that case. Using this function is probably okay but doing
2206 * so makes reset sequence different from the original
2207 * ->phy_reset implementation and Jeff nervous. :-P
2209 extern void ata_std_probeinit(struct ata_port *ap)
2211 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2212 sata_phy_resume(ap);
2213 if (sata_dev_present(ap))
2214 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2219 * ata_std_softreset - reset host port via ATA SRST
2220 * @ap: port to reset
2221 * @verbose: fail verbosely
2222 * @classes: resulting classes of attached devices
2224 * Reset host port using ATA SRST. This function is to be used
2225 * as standard callback for ata_drive_*_reset() functions.
2228 * Kernel thread context (may sleep)
2231 * 0 on success, -errno otherwise.
2233 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2235 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2236 unsigned int devmask = 0, err_mask;
2241 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2242 classes[0] = ATA_DEV_NONE;
2246 /* determine if device 0/1 are present */
2247 if (ata_devchk(ap, 0))
2248 devmask |= (1 << 0);
2249 if (slave_possible && ata_devchk(ap, 1))
2250 devmask |= (1 << 1);
2252 /* select device 0 again */
2253 ap->ops->dev_select(ap, 0);
2255 /* issue bus reset */
2256 DPRINTK("about to softreset, devmask=%x\n", devmask);
2257 err_mask = ata_bus_softreset(ap, devmask);
2260 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2263 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2268 /* determine by signature whether we have ATA or ATAPI devices */
2269 classes[0] = ata_dev_try_classify(ap, 0, &err);
2270 if (slave_possible && err != 0x81)
2271 classes[1] = ata_dev_try_classify(ap, 1, &err);
2274 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2279 * sata_std_hardreset - reset host port via SATA phy reset
2280 * @ap: port to reset
2281 * @verbose: fail verbosely
2282 * @class: resulting class of attached device
2284 * SATA phy-reset host port using DET bits of SControl register.
2285 * This function is to be used as standard callback for
2286 * ata_drive_*_reset().
2289 * Kernel thread context (may sleep)
2292 * 0 on success, -errno otherwise.
2294 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2298 /* Issue phy wake/reset */
2299 scr_write_flush(ap, SCR_CONTROL, 0x301);
2302 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2303 * 10.4.2 says at least 1 ms.
2307 /* Bring phy back */
2308 sata_phy_resume(ap);
2310 /* TODO: phy layer with polling, timeouts, etc. */
2311 if (!sata_dev_present(ap)) {
2312 *class = ATA_DEV_NONE;
2313 DPRINTK("EXIT, link offline\n");
2317 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2319 printk(KERN_ERR "ata%u: COMRESET failed "
2320 "(device not ready)\n", ap->id);
2322 DPRINTK("EXIT, device not ready\n");
2326 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2328 *class = ata_dev_try_classify(ap, 0, NULL);
2330 DPRINTK("EXIT, class=%u\n", *class);
2335 * ata_std_postreset - standard postreset callback
2336 * @ap: the target ata_port
2337 * @classes: classes of attached devices
2339 * This function is invoked after a successful reset. Note that
2340 * the device might have been reset more than once using
2341 * different reset methods before postreset is invoked.
2343 * This function is to be used as standard callback for
2344 * ata_drive_*_reset().
2347 * Kernel thread context (may sleep)
2349 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2353 /* set cable type if it isn't already set */
2354 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2355 ap->cbl = ATA_CBL_SATA;
2357 /* print link status */
2358 if (ap->cbl == ATA_CBL_SATA)
2359 sata_print_link_status(ap);
2361 /* re-enable interrupts */
2362 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2365 /* is double-select really necessary? */
2366 if (classes[0] != ATA_DEV_NONE)
2367 ap->ops->dev_select(ap, 1);
2368 if (classes[1] != ATA_DEV_NONE)
2369 ap->ops->dev_select(ap, 0);
2371 /* bail out if no device is present */
2372 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2373 DPRINTK("EXIT, no device\n");
2377 /* set up device control */
2378 if (ap->ioaddr.ctl_addr) {
2379 if (ap->flags & ATA_FLAG_MMIO)
2380 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2382 outb(ap->ctl, ap->ioaddr.ctl_addr);
2389 * ata_std_probe_reset - standard probe reset method
2390 * @ap: prot to perform probe-reset
2391 * @classes: resulting classes of attached devices
2393 * The stock off-the-shelf ->probe_reset method.
2396 * Kernel thread context (may sleep)
2399 * 0 on success, -errno otherwise.
2401 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2403 ata_reset_fn_t hardreset;
2406 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2407 hardreset = sata_std_hardreset;
2409 return ata_drive_probe_reset(ap, ata_std_probeinit,
2410 ata_std_softreset, hardreset,
2411 ata_std_postreset, classes);
2414 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2415 ata_postreset_fn_t postreset,
2416 unsigned int *classes)
2420 for (i = 0; i < ATA_MAX_DEVICES; i++)
2421 classes[i] = ATA_DEV_UNKNOWN;
2423 rc = reset(ap, 0, classes);
2427 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2428 * is complete and convert all ATA_DEV_UNKNOWN to
2431 for (i = 0; i < ATA_MAX_DEVICES; i++)
2432 if (classes[i] != ATA_DEV_UNKNOWN)
2435 if (i < ATA_MAX_DEVICES)
2436 for (i = 0; i < ATA_MAX_DEVICES; i++)
2437 if (classes[i] == ATA_DEV_UNKNOWN)
2438 classes[i] = ATA_DEV_NONE;
2441 postreset(ap, classes);
2443 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2447 * ata_drive_probe_reset - Perform probe reset with given methods
2448 * @ap: port to reset
2449 * @probeinit: probeinit method (can be NULL)
2450 * @softreset: softreset method (can be NULL)
2451 * @hardreset: hardreset method (can be NULL)
2452 * @postreset: postreset method (can be NULL)
2453 * @classes: resulting classes of attached devices
2455 * Reset the specified port and classify attached devices using
2456 * given methods. This function prefers softreset but tries all
2457 * possible reset sequences to reset and classify devices. This
2458 * function is intended to be used for constructing ->probe_reset
2459 * callback by low level drivers.
2461 * Reset methods should follow the following rules.
2463 * - Return 0 on sucess, -errno on failure.
2464 * - If classification is supported, fill classes[] with
2465 * recognized class codes.
2466 * - If classification is not supported, leave classes[] alone.
2467 * - If verbose is non-zero, print error message on failure;
2468 * otherwise, shut up.
2471 * Kernel thread context (may sleep)
2474 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2475 * if classification fails, and any error code from reset
2478 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2479 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2480 ata_postreset_fn_t postreset, unsigned int *classes)
2488 rc = do_probe_reset(ap, softreset, postreset, classes);
2496 rc = do_probe_reset(ap, hardreset, postreset, classes);
2497 if (rc == 0 || rc != -ENODEV)
2501 rc = do_probe_reset(ap, softreset, postreset, classes);
2507 * ata_dev_same_device - Determine whether new ID matches configured device
2508 * @ap: port on which the device to compare against resides
2509 * @dev: device to compare against
2510 * @new_class: class of the new device
2511 * @new_id: IDENTIFY page of the new device
2513 * Compare @new_class and @new_id against @dev and determine
2514 * whether @dev is the device indicated by @new_class and
2521 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2523 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2524 unsigned int new_class, const u16 *new_id)
2526 const u16 *old_id = dev->id;
2527 unsigned char model[2][41], serial[2][21];
2530 if (dev->class != new_class) {
2532 "ata%u: dev %u class mismatch %d != %d\n",
2533 ap->id, dev->devno, dev->class, new_class);
2537 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2538 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2539 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2540 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2541 new_n_sectors = ata_id_n_sectors(new_id);
2543 if (strcmp(model[0], model[1])) {
2545 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2546 ap->id, dev->devno, model[0], model[1]);
2550 if (strcmp(serial[0], serial[1])) {
2552 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2553 ap->id, dev->devno, serial[0], serial[1]);
2557 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2559 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2560 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2561 (unsigned long long)new_n_sectors);
2569 * ata_dev_revalidate - Revalidate ATA device
2570 * @ap: port on which the device to revalidate resides
2571 * @dev: device to revalidate
2572 * @post_reset: is this revalidation after reset?
2574 * Re-read IDENTIFY page and make sure @dev is still attached to
2578 * Kernel thread context (may sleep)
2581 * 0 on success, negative errno otherwise
2583 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2590 if (!ata_dev_present(dev))
2596 /* allocate & read ID data */
2597 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2601 /* is the device still there? */
2602 if (!ata_dev_same_device(ap, dev, class, id)) {
2610 /* configure device according to the new ID */
2611 return ata_dev_configure(ap, dev, 0);
2614 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2615 ap->id, dev->devno, rc);
2620 static const char * const ata_dma_blacklist [] = {
2621 "WDC AC11000H", NULL,
2622 "WDC AC22100H", NULL,
2623 "WDC AC32500H", NULL,
2624 "WDC AC33100H", NULL,
2625 "WDC AC31600H", NULL,
2626 "WDC AC32100H", "24.09P07",
2627 "WDC AC23200L", "21.10N21",
2628 "Compaq CRD-8241B", NULL,
2633 "SanDisk SDP3B", NULL,
2634 "SanDisk SDP3B-64", NULL,
2635 "SANYO CD-ROM CRD", NULL,
2636 "HITACHI CDR-8", NULL,
2637 "HITACHI CDR-8335", NULL,
2638 "HITACHI CDR-8435", NULL,
2639 "Toshiba CD-ROM XM-6202B", NULL,
2640 "TOSHIBA CD-ROM XM-1702BC", NULL,
2642 "E-IDE CD-ROM CR-840", NULL,
2643 "CD-ROM Drive/F5A", NULL,
2644 "WPI CDD-820", NULL,
2645 "SAMSUNG CD-ROM SC-148C", NULL,
2646 "SAMSUNG CD-ROM SC", NULL,
2647 "SanDisk SDP3B-64", NULL,
2648 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2649 "_NEC DV5800A", NULL,
2650 "SAMSUNG CD-ROM SN-124", "N001"
2653 static int ata_strim(char *s, size_t len)
2655 len = strnlen(s, len);
2657 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2658 while ((len > 0) && (s[len - 1] == ' ')) {
2665 static int ata_dma_blacklisted(const struct ata_device *dev)
2667 unsigned char model_num[40];
2668 unsigned char model_rev[16];
2669 unsigned int nlen, rlen;
2672 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2674 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2676 nlen = ata_strim(model_num, sizeof(model_num));
2677 rlen = ata_strim(model_rev, sizeof(model_rev));
2679 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2680 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2681 if (ata_dma_blacklist[i+1] == NULL)
2683 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2691 * ata_dev_xfermask - Compute supported xfermask of the given device
2692 * @ap: Port on which the device to compute xfermask for resides
2693 * @dev: Device to compute xfermask for
2695 * Compute supported xfermask of @dev and store it in
2696 * dev->*_mask. This function is responsible for applying all
2697 * known limits including host controller limits, device
2703 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2705 unsigned long xfer_mask;
2708 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2711 /* use port-wide xfermask for now */
2712 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2713 struct ata_device *d = &ap->device[i];
2714 if (!ata_dev_present(d))
2716 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
2718 xfer_mask &= ata_id_xfermask(d->id);
2719 if (ata_dma_blacklisted(d))
2720 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2723 if (ata_dma_blacklisted(dev))
2724 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2725 "disabling DMA\n", ap->id, dev->devno);
2727 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2732 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2733 * @ap: Port associated with device @dev
2734 * @dev: Device to which command will be sent
2736 * Issue SET FEATURES - XFER MODE command to device @dev
2740 * PCI/etc. bus probe sem.
2743 * 0 on success, AC_ERR_* mask otherwise.
2746 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2747 struct ata_device *dev)
2749 struct ata_taskfile tf;
2750 unsigned int err_mask;
2752 /* set up set-features taskfile */
2753 DPRINTK("set features - xfer mode\n");
2755 ata_tf_init(ap, &tf, dev->devno);
2756 tf.command = ATA_CMD_SET_FEATURES;
2757 tf.feature = SETFEATURES_XFER;
2758 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2759 tf.protocol = ATA_PROT_NODATA;
2760 tf.nsect = dev->xfer_mode;
2762 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2764 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2769 * ata_dev_init_params - Issue INIT DEV PARAMS command
2770 * @ap: Port associated with device @dev
2771 * @dev: Device to which command will be sent
2774 * Kernel thread context (may sleep)
2777 * 0 on success, AC_ERR_* mask otherwise.
2780 static unsigned int ata_dev_init_params(struct ata_port *ap,
2781 struct ata_device *dev)
2783 struct ata_taskfile tf;
2784 unsigned int err_mask;
2785 u16 sectors = dev->id[6];
2786 u16 heads = dev->id[3];
2788 /* Number of sectors per track 1-255. Number of heads 1-16 */
2789 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2792 /* set up init dev params taskfile */
2793 DPRINTK("init dev params \n");
2795 ata_tf_init(ap, &tf, dev->devno);
2796 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2797 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2798 tf.protocol = ATA_PROT_NODATA;
2800 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2802 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2804 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2809 * ata_sg_clean - Unmap DMA memory associated with command
2810 * @qc: Command containing DMA memory to be released
2812 * Unmap all mapped DMA memory associated with this command.
2815 * spin_lock_irqsave(host_set lock)
2818 static void ata_sg_clean(struct ata_queued_cmd *qc)
2820 struct ata_port *ap = qc->ap;
2821 struct scatterlist *sg = qc->__sg;
2822 int dir = qc->dma_dir;
2823 void *pad_buf = NULL;
2825 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2826 WARN_ON(sg == NULL);
2828 if (qc->flags & ATA_QCFLAG_SINGLE)
2829 WARN_ON(qc->n_elem > 1);
2831 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2833 /* if we padded the buffer out to 32-bit bound, and data
2834 * xfer direction is from-device, we must copy from the
2835 * pad buffer back into the supplied buffer
2837 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2838 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2840 if (qc->flags & ATA_QCFLAG_SG) {
2842 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2843 /* restore last sg */
2844 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2846 struct scatterlist *psg = &qc->pad_sgent;
2847 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2848 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2849 kunmap_atomic(addr, KM_IRQ0);
2853 dma_unmap_single(ap->host_set->dev,
2854 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2857 sg->length += qc->pad_len;
2859 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2860 pad_buf, qc->pad_len);
2863 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2868 * ata_fill_sg - Fill PCI IDE PRD table
2869 * @qc: Metadata associated with taskfile to be transferred
2871 * Fill PCI IDE PRD (scatter-gather) table with segments
2872 * associated with the current disk command.
2875 * spin_lock_irqsave(host_set lock)
2878 static void ata_fill_sg(struct ata_queued_cmd *qc)
2880 struct ata_port *ap = qc->ap;
2881 struct scatterlist *sg;
2884 WARN_ON(qc->__sg == NULL);
2885 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2888 ata_for_each_sg(sg, qc) {
2892 /* determine if physical DMA addr spans 64K boundary.
2893 * Note h/w doesn't support 64-bit, so we unconditionally
2894 * truncate dma_addr_t to u32.
2896 addr = (u32) sg_dma_address(sg);
2897 sg_len = sg_dma_len(sg);
2900 offset = addr & 0xffff;
2902 if ((offset + sg_len) > 0x10000)
2903 len = 0x10000 - offset;
2905 ap->prd[idx].addr = cpu_to_le32(addr);
2906 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2907 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2916 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2919 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2920 * @qc: Metadata associated with taskfile to check
2922 * Allow low-level driver to filter ATA PACKET commands, returning
2923 * a status indicating whether or not it is OK to use DMA for the
2924 * supplied PACKET command.
2927 * spin_lock_irqsave(host_set lock)
2929 * RETURNS: 0 when ATAPI DMA can be used
2932 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2934 struct ata_port *ap = qc->ap;
2935 int rc = 0; /* Assume ATAPI DMA is OK by default */
2937 if (ap->ops->check_atapi_dma)
2938 rc = ap->ops->check_atapi_dma(qc);
2943 * ata_qc_prep - Prepare taskfile for submission
2944 * @qc: Metadata associated with taskfile to be prepared
2946 * Prepare ATA taskfile for submission.
2949 * spin_lock_irqsave(host_set lock)
2951 void ata_qc_prep(struct ata_queued_cmd *qc)
2953 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2959 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2962 * ata_sg_init_one - Associate command with memory buffer
2963 * @qc: Command to be associated
2964 * @buf: Memory buffer
2965 * @buflen: Length of memory buffer, in bytes.
2967 * Initialize the data-related elements of queued_cmd @qc
2968 * to point to a single memory buffer, @buf of byte length @buflen.
2971 * spin_lock_irqsave(host_set lock)
2974 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2976 struct scatterlist *sg;
2978 qc->flags |= ATA_QCFLAG_SINGLE;
2980 memset(&qc->sgent, 0, sizeof(qc->sgent));
2981 qc->__sg = &qc->sgent;
2983 qc->orig_n_elem = 1;
2987 sg_init_one(sg, buf, buflen);
2991 * ata_sg_init - Associate command with scatter-gather table.
2992 * @qc: Command to be associated
2993 * @sg: Scatter-gather table.
2994 * @n_elem: Number of elements in s/g table.
2996 * Initialize the data-related elements of queued_cmd @qc
2997 * to point to a scatter-gather table @sg, containing @n_elem
3001 * spin_lock_irqsave(host_set lock)
3004 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3005 unsigned int n_elem)
3007 qc->flags |= ATA_QCFLAG_SG;
3009 qc->n_elem = n_elem;
3010 qc->orig_n_elem = n_elem;
3014 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3015 * @qc: Command with memory buffer to be mapped.
3017 * DMA-map the memory buffer associated with queued_cmd @qc.
3020 * spin_lock_irqsave(host_set lock)
3023 * Zero on success, negative on error.
3026 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3028 struct ata_port *ap = qc->ap;
3029 int dir = qc->dma_dir;
3030 struct scatterlist *sg = qc->__sg;
3031 dma_addr_t dma_address;
3034 /* we must lengthen transfers to end on a 32-bit boundary */
3035 qc->pad_len = sg->length & 3;
3037 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3038 struct scatterlist *psg = &qc->pad_sgent;
3040 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3042 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3044 if (qc->tf.flags & ATA_TFLAG_WRITE)
3045 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3048 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3049 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3051 sg->length -= qc->pad_len;
3052 if (sg->length == 0)
3055 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3056 sg->length, qc->pad_len);
3064 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
3066 if (dma_mapping_error(dma_address)) {
3068 sg->length += qc->pad_len;
3072 sg_dma_address(sg) = dma_address;
3073 sg_dma_len(sg) = sg->length;
3076 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3077 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3083 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3084 * @qc: Command with scatter-gather table to be mapped.
3086 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3089 * spin_lock_irqsave(host_set lock)
3092 * Zero on success, negative on error.
3096 static int ata_sg_setup(struct ata_queued_cmd *qc)
3098 struct ata_port *ap = qc->ap;
3099 struct scatterlist *sg = qc->__sg;
3100 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3101 int n_elem, pre_n_elem, dir, trim_sg = 0;
3103 VPRINTK("ENTER, ata%u\n", ap->id);
3104 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3106 /* we must lengthen transfers to end on a 32-bit boundary */
3107 qc->pad_len = lsg->length & 3;
3109 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3110 struct scatterlist *psg = &qc->pad_sgent;
3111 unsigned int offset;
3113 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3115 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3118 * psg->page/offset are used to copy to-be-written
3119 * data in this function or read data in ata_sg_clean.
3121 offset = lsg->offset + lsg->length - qc->pad_len;
3122 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3123 psg->offset = offset_in_page(offset);
3125 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3126 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3127 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3128 kunmap_atomic(addr, KM_IRQ0);
3131 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3132 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3134 lsg->length -= qc->pad_len;
3135 if (lsg->length == 0)
3138 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3139 qc->n_elem - 1, lsg->length, qc->pad_len);
3142 pre_n_elem = qc->n_elem;
3143 if (trim_sg && pre_n_elem)
3152 n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
3154 /* restore last sg */
3155 lsg->length += qc->pad_len;
3159 DPRINTK("%d sg elements mapped\n", n_elem);
3162 qc->n_elem = n_elem;
3168 * ata_poll_qc_complete - turn irq back on and finish qc
3169 * @qc: Command to complete
3170 * @err_mask: ATA status register content
3173 * None. (grabs host lock)
3176 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3178 struct ata_port *ap = qc->ap;
3179 unsigned long flags;
3181 spin_lock_irqsave(&ap->host_set->lock, flags);
3182 ap->flags &= ~ATA_FLAG_NOINTR;
3184 ata_qc_complete(qc);
3185 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3189 * ata_pio_poll - poll using PIO, depending on current state
3190 * @ap: the target ata_port
3193 * None. (executing in kernel thread context)
3196 * timeout value to use
3199 static unsigned long ata_pio_poll(struct ata_port *ap)
3201 struct ata_queued_cmd *qc;
3203 unsigned int poll_state = HSM_ST_UNKNOWN;
3204 unsigned int reg_state = HSM_ST_UNKNOWN;
3206 qc = ata_qc_from_tag(ap, ap->active_tag);
3207 WARN_ON(qc == NULL);
3209 switch (ap->hsm_task_state) {
3212 poll_state = HSM_ST_POLL;
3216 case HSM_ST_LAST_POLL:
3217 poll_state = HSM_ST_LAST_POLL;
3218 reg_state = HSM_ST_LAST;
3225 status = ata_chk_status(ap);
3226 if (status & ATA_BUSY) {
3227 if (time_after(jiffies, ap->pio_task_timeout)) {
3228 qc->err_mask |= AC_ERR_TIMEOUT;
3229 ap->hsm_task_state = HSM_ST_TMOUT;
3232 ap->hsm_task_state = poll_state;
3233 return ATA_SHORT_PAUSE;
3236 ap->hsm_task_state = reg_state;
3241 * ata_pio_complete - check if drive is busy or idle
3242 * @ap: the target ata_port
3245 * None. (executing in kernel thread context)
3248 * Non-zero if qc completed, zero otherwise.
3251 static int ata_pio_complete (struct ata_port *ap)
3253 struct ata_queued_cmd *qc;
3257 * This is purely heuristic. This is a fast path. Sometimes when
3258 * we enter, BSY will be cleared in a chk-status or two. If not,
3259 * the drive is probably seeking or something. Snooze for a couple
3260 * msecs, then chk-status again. If still busy, fall back to
3261 * HSM_ST_POLL state.
3263 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3264 if (drv_stat & ATA_BUSY) {
3266 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3267 if (drv_stat & ATA_BUSY) {
3268 ap->hsm_task_state = HSM_ST_LAST_POLL;
3269 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3274 qc = ata_qc_from_tag(ap, ap->active_tag);
3275 WARN_ON(qc == NULL);
3277 drv_stat = ata_wait_idle(ap);
3278 if (!ata_ok(drv_stat)) {
3279 qc->err_mask |= __ac_err_mask(drv_stat);
3280 ap->hsm_task_state = HSM_ST_ERR;
3284 ap->hsm_task_state = HSM_ST_IDLE;
3286 WARN_ON(qc->err_mask);
3287 ata_poll_qc_complete(qc);
3289 /* another command may start at this point */
3296 * swap_buf_le16 - swap halves of 16-bit words in place
3297 * @buf: Buffer to swap
3298 * @buf_words: Number of 16-bit words in buffer.
3300 * Swap halves of 16-bit words if needed to convert from
3301 * little-endian byte order to native cpu byte order, or
3305 * Inherited from caller.
3307 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3312 for (i = 0; i < buf_words; i++)
3313 buf[i] = le16_to_cpu(buf[i]);
3314 #endif /* __BIG_ENDIAN */
3318 * ata_mmio_data_xfer - Transfer data by MMIO
3319 * @ap: port to read/write
3321 * @buflen: buffer length
3322 * @write_data: read/write
3324 * Transfer data from/to the device data register by MMIO.
3327 * Inherited from caller.
3330 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3331 unsigned int buflen, int write_data)
3334 unsigned int words = buflen >> 1;
3335 u16 *buf16 = (u16 *) buf;
3336 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3338 /* Transfer multiple of 2 bytes */
3340 for (i = 0; i < words; i++)
3341 writew(le16_to_cpu(buf16[i]), mmio);
3343 for (i = 0; i < words; i++)
3344 buf16[i] = cpu_to_le16(readw(mmio));
3347 /* Transfer trailing 1 byte, if any. */
3348 if (unlikely(buflen & 0x01)) {
3349 u16 align_buf[1] = { 0 };
3350 unsigned char *trailing_buf = buf + buflen - 1;
3353 memcpy(align_buf, trailing_buf, 1);
3354 writew(le16_to_cpu(align_buf[0]), mmio);
3356 align_buf[0] = cpu_to_le16(readw(mmio));
3357 memcpy(trailing_buf, align_buf, 1);
3363 * ata_pio_data_xfer - Transfer data by PIO
3364 * @ap: port to read/write
3366 * @buflen: buffer length
3367 * @write_data: read/write
3369 * Transfer data from/to the device data register by PIO.
3372 * Inherited from caller.
3375 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3376 unsigned int buflen, int write_data)
3378 unsigned int words = buflen >> 1;
3380 /* Transfer multiple of 2 bytes */
3382 outsw(ap->ioaddr.data_addr, buf, words);
3384 insw(ap->ioaddr.data_addr, buf, words);
3386 /* Transfer trailing 1 byte, if any. */
3387 if (unlikely(buflen & 0x01)) {
3388 u16 align_buf[1] = { 0 };
3389 unsigned char *trailing_buf = buf + buflen - 1;
3392 memcpy(align_buf, trailing_buf, 1);
3393 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3395 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3396 memcpy(trailing_buf, align_buf, 1);
3402 * ata_data_xfer - Transfer data from/to the data register.
3403 * @ap: port to read/write
3405 * @buflen: buffer length
3406 * @do_write: read/write
3408 * Transfer data from/to the device data register.
3411 * Inherited from caller.
3414 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3415 unsigned int buflen, int do_write)
3417 /* Make the crap hardware pay the costs not the good stuff */
3418 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3419 unsigned long flags;
3420 local_irq_save(flags);
3421 if (ap->flags & ATA_FLAG_MMIO)
3422 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3424 ata_pio_data_xfer(ap, buf, buflen, do_write);
3425 local_irq_restore(flags);
3427 if (ap->flags & ATA_FLAG_MMIO)
3428 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3430 ata_pio_data_xfer(ap, buf, buflen, do_write);
3435 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3436 * @qc: Command on going
3438 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3441 * Inherited from caller.
3444 static void ata_pio_sector(struct ata_queued_cmd *qc)
3446 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3447 struct scatterlist *sg = qc->__sg;
3448 struct ata_port *ap = qc->ap;
3450 unsigned int offset;
3453 if (qc->cursect == (qc->nsect - 1))
3454 ap->hsm_task_state = HSM_ST_LAST;
3456 page = sg[qc->cursg].page;
3457 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3459 /* get the current page and offset */
3460 page = nth_page(page, (offset >> PAGE_SHIFT));
3461 offset %= PAGE_SIZE;
3463 buf = kmap(page) + offset;
3468 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3473 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3475 /* do the actual data transfer */
3476 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3477 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3483 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3484 * @qc: Command on going
3485 * @bytes: number of bytes
3487 * Transfer Transfer data from/to the ATAPI device.
3490 * Inherited from caller.
3494 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3496 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3497 struct scatterlist *sg = qc->__sg;
3498 struct ata_port *ap = qc->ap;
3501 unsigned int offset, count;
3503 if (qc->curbytes + bytes >= qc->nbytes)
3504 ap->hsm_task_state = HSM_ST_LAST;
3507 if (unlikely(qc->cursg >= qc->n_elem)) {
3509 * The end of qc->sg is reached and the device expects
3510 * more data to transfer. In order not to overrun qc->sg
3511 * and fulfill length specified in the byte count register,
3512 * - for read case, discard trailing data from the device
3513 * - for write case, padding zero data to the device
3515 u16 pad_buf[1] = { 0 };
3516 unsigned int words = bytes >> 1;
3519 if (words) /* warning if bytes > 1 */
3520 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3523 for (i = 0; i < words; i++)
3524 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3526 ap->hsm_task_state = HSM_ST_LAST;
3530 sg = &qc->__sg[qc->cursg];
3533 offset = sg->offset + qc->cursg_ofs;
3535 /* get the current page and offset */
3536 page = nth_page(page, (offset >> PAGE_SHIFT));
3537 offset %= PAGE_SIZE;
3539 /* don't overrun current sg */
3540 count = min(sg->length - qc->cursg_ofs, bytes);
3542 /* don't cross page boundaries */
3543 count = min(count, (unsigned int)PAGE_SIZE - offset);
3545 buf = kmap(page) + offset;
3548 qc->curbytes += count;
3549 qc->cursg_ofs += count;
3551 if (qc->cursg_ofs == sg->length) {
3556 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3558 /* do the actual data transfer */
3559 ata_data_xfer(ap, buf, count, do_write);
3568 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3569 * @qc: Command on going
3571 * Transfer Transfer data from/to the ATAPI device.
3574 * Inherited from caller.
3577 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3579 struct ata_port *ap = qc->ap;
3580 struct ata_device *dev = qc->dev;
3581 unsigned int ireason, bc_lo, bc_hi, bytes;
3582 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3584 ap->ops->tf_read(ap, &qc->tf);
3585 ireason = qc->tf.nsect;
3586 bc_lo = qc->tf.lbam;
3587 bc_hi = qc->tf.lbah;
3588 bytes = (bc_hi << 8) | bc_lo;
3590 /* shall be cleared to zero, indicating xfer of data */
3591 if (ireason & (1 << 0))
3594 /* make sure transfer direction matches expected */
3595 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3596 if (do_write != i_write)
3599 __atapi_pio_bytes(qc, bytes);
3604 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3605 ap->id, dev->devno);
3606 qc->err_mask |= AC_ERR_HSM;
3607 ap->hsm_task_state = HSM_ST_ERR;
3611 * ata_pio_block - start PIO on a block
3612 * @ap: the target ata_port
3615 * None. (executing in kernel thread context)
3618 static void ata_pio_block(struct ata_port *ap)
3620 struct ata_queued_cmd *qc;
3624 * This is purely heuristic. This is a fast path.
3625 * Sometimes when we enter, BSY will be cleared in
3626 * a chk-status or two. If not, the drive is probably seeking
3627 * or something. Snooze for a couple msecs, then
3628 * chk-status again. If still busy, fall back to
3629 * HSM_ST_POLL state.
3631 status = ata_busy_wait(ap, ATA_BUSY, 5);
3632 if (status & ATA_BUSY) {
3634 status = ata_busy_wait(ap, ATA_BUSY, 10);
3635 if (status & ATA_BUSY) {
3636 ap->hsm_task_state = HSM_ST_POLL;
3637 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3642 qc = ata_qc_from_tag(ap, ap->active_tag);
3643 WARN_ON(qc == NULL);
3646 if (status & (ATA_ERR | ATA_DF)) {
3647 qc->err_mask |= AC_ERR_DEV;
3648 ap->hsm_task_state = HSM_ST_ERR;
3652 /* transfer data if any */
3653 if (is_atapi_taskfile(&qc->tf)) {
3654 /* DRQ=0 means no more data to transfer */
3655 if ((status & ATA_DRQ) == 0) {
3656 ap->hsm_task_state = HSM_ST_LAST;
3660 atapi_pio_bytes(qc);
3662 /* handle BSY=0, DRQ=0 as error */
3663 if ((status & ATA_DRQ) == 0) {
3664 qc->err_mask |= AC_ERR_HSM;
3665 ap->hsm_task_state = HSM_ST_ERR;
3673 static void ata_pio_error(struct ata_port *ap)
3675 struct ata_queued_cmd *qc;
3677 qc = ata_qc_from_tag(ap, ap->active_tag);
3678 WARN_ON(qc == NULL);
3680 if (qc->tf.command != ATA_CMD_PACKET)
3681 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3683 /* make sure qc->err_mask is available to
3684 * know what's wrong and recover
3686 WARN_ON(qc->err_mask == 0);
3688 ap->hsm_task_state = HSM_ST_IDLE;
3690 ata_poll_qc_complete(qc);
3693 static void ata_pio_task(void *_data)
3695 struct ata_port *ap = _data;
3696 unsigned long timeout;
3703 switch (ap->hsm_task_state) {
3712 qc_completed = ata_pio_complete(ap);
3716 case HSM_ST_LAST_POLL:
3717 timeout = ata_pio_poll(ap);
3727 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3728 else if (!qc_completed)
3733 * atapi_packet_task - Write CDB bytes to hardware
3734 * @_data: Port to which ATAPI device is attached.
3736 * When device has indicated its readiness to accept
3737 * a CDB, this function is called. Send the CDB.
3738 * If DMA is to be performed, exit immediately.
3739 * Otherwise, we are in polling mode, so poll
3740 * status under operation succeeds or fails.
3743 * Kernel thread context (may sleep)
3746 static void atapi_packet_task(void *_data)
3748 struct ata_port *ap = _data;
3749 struct ata_queued_cmd *qc;
3752 qc = ata_qc_from_tag(ap, ap->active_tag);
3753 WARN_ON(qc == NULL);
3754 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3756 /* sleep-wait for BSY to clear */
3757 DPRINTK("busy wait\n");
3758 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3759 qc->err_mask |= AC_ERR_TIMEOUT;
3763 /* make sure DRQ is set */
3764 status = ata_chk_status(ap);
3765 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3766 qc->err_mask |= AC_ERR_HSM;
3771 DPRINTK("send cdb\n");
3772 WARN_ON(qc->dev->cdb_len < 12);
3774 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3775 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3776 unsigned long flags;
3778 /* Once we're done issuing command and kicking bmdma,
3779 * irq handler takes over. To not lose irq, we need
3780 * to clear NOINTR flag before sending cdb, but
3781 * interrupt handler shouldn't be invoked before we're
3782 * finished. Hence, the following locking.
3784 spin_lock_irqsave(&ap->host_set->lock, flags);
3785 ap->flags &= ~ATA_FLAG_NOINTR;
3786 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3787 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3788 ap->ops->bmdma_start(qc); /* initiate bmdma */
3789 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3791 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3793 /* PIO commands are handled by polling */
3794 ap->hsm_task_state = HSM_ST;
3795 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3801 ata_poll_qc_complete(qc);
3805 * ata_qc_timeout - Handle timeout of queued command
3806 * @qc: Command that timed out
3808 * Some part of the kernel (currently, only the SCSI layer)
3809 * has noticed that the active command on port @ap has not
3810 * completed after a specified length of time. Handle this
3811 * condition by disabling DMA (if necessary) and completing
3812 * transactions, with error if necessary.
3814 * This also handles the case of the "lost interrupt", where
3815 * for some reason (possibly hardware bug, possibly driver bug)
3816 * an interrupt was not delivered to the driver, even though the
3817 * transaction completed successfully.
3820 * Inherited from SCSI layer (none, can sleep)
3823 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3825 struct ata_port *ap = qc->ap;
3826 struct ata_host_set *host_set = ap->host_set;
3827 u8 host_stat = 0, drv_stat;
3828 unsigned long flags;
3832 ap->hsm_task_state = HSM_ST_IDLE;
3834 spin_lock_irqsave(&host_set->lock, flags);
3836 switch (qc->tf.protocol) {
3839 case ATA_PROT_ATAPI_DMA:
3840 host_stat = ap->ops->bmdma_status(ap);
3842 /* before we do anything else, clear DMA-Start bit */
3843 ap->ops->bmdma_stop(qc);
3849 drv_stat = ata_chk_status(ap);
3851 /* ack bmdma irq events */
3852 ap->ops->irq_clear(ap);
3854 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3855 ap->id, qc->tf.command, drv_stat, host_stat);
3857 /* complete taskfile transaction */
3858 qc->err_mask |= ac_err_mask(drv_stat);
3862 spin_unlock_irqrestore(&host_set->lock, flags);
3864 ata_eh_qc_complete(qc);
3870 * ata_eng_timeout - Handle timeout of queued command
3871 * @ap: Port on which timed-out command is active
3873 * Some part of the kernel (currently, only the SCSI layer)
3874 * has noticed that the active command on port @ap has not
3875 * completed after a specified length of time. Handle this
3876 * condition by disabling DMA (if necessary) and completing
3877 * transactions, with error if necessary.
3879 * This also handles the case of the "lost interrupt", where
3880 * for some reason (possibly hardware bug, possibly driver bug)
3881 * an interrupt was not delivered to the driver, even though the
3882 * transaction completed successfully.
3885 * Inherited from SCSI layer (none, can sleep)
3888 void ata_eng_timeout(struct ata_port *ap)
3892 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3898 * ata_qc_new - Request an available ATA command, for queueing
3899 * @ap: Port associated with device @dev
3900 * @dev: Device from whom we request an available command structure
3906 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3908 struct ata_queued_cmd *qc = NULL;
3911 for (i = 0; i < ATA_MAX_QUEUE; i++)
3912 if (!test_and_set_bit(i, &ap->qactive)) {
3913 qc = ata_qc_from_tag(ap, i);
3924 * ata_qc_new_init - Request an available ATA command, and initialize it
3925 * @ap: Port associated with device @dev
3926 * @dev: Device from whom we request an available command structure
3932 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3933 struct ata_device *dev)
3935 struct ata_queued_cmd *qc;
3937 qc = ata_qc_new(ap);
3950 * ata_qc_free - free unused ata_queued_cmd
3951 * @qc: Command to complete
3953 * Designed to free unused ata_queued_cmd object
3954 * in case something prevents using it.
3957 * spin_lock_irqsave(host_set lock)
3959 void ata_qc_free(struct ata_queued_cmd *qc)
3961 struct ata_port *ap = qc->ap;
3964 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3968 if (likely(ata_tag_valid(tag))) {
3969 if (tag == ap->active_tag)
3970 ap->active_tag = ATA_TAG_POISON;
3971 qc->tag = ATA_TAG_POISON;
3972 clear_bit(tag, &ap->qactive);
3976 void __ata_qc_complete(struct ata_queued_cmd *qc)
3978 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3979 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3981 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3984 /* atapi: mark qc as inactive to prevent the interrupt handler
3985 * from completing the command twice later, before the error handler
3986 * is called. (when rc != 0 and atapi request sense is needed)
3988 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3990 /* call completion callback */
3991 qc->complete_fn(qc);
3994 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3996 struct ata_port *ap = qc->ap;
3998 switch (qc->tf.protocol) {
4000 case ATA_PROT_ATAPI_DMA:
4003 case ATA_PROT_ATAPI:
4005 if (ap->flags & ATA_FLAG_PIO_DMA)
4018 * ata_qc_issue - issue taskfile to device
4019 * @qc: command to issue to device
4021 * Prepare an ATA command to submission to device.
4022 * This includes mapping the data into a DMA-able
4023 * area, filling in the S/G table, and finally
4024 * writing the taskfile to hardware, starting the command.
4027 * spin_lock_irqsave(host_set lock)
4030 * Zero on success, AC_ERR_* mask on failure
4033 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
4035 struct ata_port *ap = qc->ap;
4037 if (ata_should_dma_map(qc)) {
4038 if (qc->flags & ATA_QCFLAG_SG) {
4039 if (ata_sg_setup(qc))
4041 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4042 if (ata_sg_setup_one(qc))
4046 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4049 ap->ops->qc_prep(qc);
4051 qc->ap->active_tag = qc->tag;
4052 qc->flags |= ATA_QCFLAG_ACTIVE;
4054 return ap->ops->qc_issue(qc);
4057 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4058 return AC_ERR_SYSTEM;
4063 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4064 * @qc: command to issue to device
4066 * Using various libata functions and hooks, this function
4067 * starts an ATA command. ATA commands are grouped into
4068 * classes called "protocols", and issuing each type of protocol
4069 * is slightly different.
4071 * May be used as the qc_issue() entry in ata_port_operations.
4074 * spin_lock_irqsave(host_set lock)
4077 * Zero on success, AC_ERR_* mask on failure
4080 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4082 struct ata_port *ap = qc->ap;
4084 ata_dev_select(ap, qc->dev->devno, 1, 0);
4086 switch (qc->tf.protocol) {
4087 case ATA_PROT_NODATA:
4088 ata_tf_to_host(ap, &qc->tf);
4092 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4093 ap->ops->bmdma_setup(qc); /* set up bmdma */
4094 ap->ops->bmdma_start(qc); /* initiate bmdma */
4097 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4098 ata_qc_set_polling(qc);
4099 ata_tf_to_host(ap, &qc->tf);
4100 ap->hsm_task_state = HSM_ST;
4101 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4104 case ATA_PROT_ATAPI:
4105 ata_qc_set_polling(qc);
4106 ata_tf_to_host(ap, &qc->tf);
4107 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4110 case ATA_PROT_ATAPI_NODATA:
4111 ap->flags |= ATA_FLAG_NOINTR;
4112 ata_tf_to_host(ap, &qc->tf);
4113 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4116 case ATA_PROT_ATAPI_DMA:
4117 ap->flags |= ATA_FLAG_NOINTR;
4118 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4119 ap->ops->bmdma_setup(qc); /* set up bmdma */
4120 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4125 return AC_ERR_SYSTEM;
4132 * ata_host_intr - Handle host interrupt for given (port, task)
4133 * @ap: Port on which interrupt arrived (possibly...)
4134 * @qc: Taskfile currently active in engine
4136 * Handle host interrupt for given queued command. Currently,
4137 * only DMA interrupts are handled. All other commands are
4138 * handled via polling with interrupts disabled (nIEN bit).
4141 * spin_lock_irqsave(host_set lock)
4144 * One if interrupt was handled, zero if not (shared irq).
4147 inline unsigned int ata_host_intr (struct ata_port *ap,
4148 struct ata_queued_cmd *qc)
4150 u8 status, host_stat;
4152 switch (qc->tf.protocol) {
4155 case ATA_PROT_ATAPI_DMA:
4156 case ATA_PROT_ATAPI:
4157 /* check status of DMA engine */
4158 host_stat = ap->ops->bmdma_status(ap);
4159 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4161 /* if it's not our irq... */
4162 if (!(host_stat & ATA_DMA_INTR))
4165 /* before we do anything else, clear DMA-Start bit */
4166 ap->ops->bmdma_stop(qc);
4170 case ATA_PROT_ATAPI_NODATA:
4171 case ATA_PROT_NODATA:
4172 /* check altstatus */
4173 status = ata_altstatus(ap);
4174 if (status & ATA_BUSY)
4177 /* check main status, clearing INTRQ */
4178 status = ata_chk_status(ap);
4179 if (unlikely(status & ATA_BUSY))
4181 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4182 ap->id, qc->tf.protocol, status);
4184 /* ack bmdma irq events */
4185 ap->ops->irq_clear(ap);
4187 /* complete taskfile transaction */
4188 qc->err_mask |= ac_err_mask(status);
4189 ata_qc_complete(qc);
4196 return 1; /* irq handled */
4199 ap->stats.idle_irq++;
4202 if ((ap->stats.idle_irq % 1000) == 0) {
4203 ata_irq_ack(ap, 0); /* debug trap */
4204 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4208 return 0; /* irq not handled */
4212 * ata_interrupt - Default ATA host interrupt handler
4213 * @irq: irq line (unused)
4214 * @dev_instance: pointer to our ata_host_set information structure
4217 * Default interrupt handler for PCI IDE devices. Calls
4218 * ata_host_intr() for each port that is not disabled.
4221 * Obtains host_set lock during operation.
4224 * IRQ_NONE or IRQ_HANDLED.
4227 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4229 struct ata_host_set *host_set = dev_instance;
4231 unsigned int handled = 0;
4232 unsigned long flags;
4234 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4235 spin_lock_irqsave(&host_set->lock, flags);
4237 for (i = 0; i < host_set->n_ports; i++) {
4238 struct ata_port *ap;
4240 ap = host_set->ports[i];
4242 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4243 struct ata_queued_cmd *qc;
4245 qc = ata_qc_from_tag(ap, ap->active_tag);
4246 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4247 (qc->flags & ATA_QCFLAG_ACTIVE))
4248 handled |= ata_host_intr(ap, qc);
4252 spin_unlock_irqrestore(&host_set->lock, flags);
4254 return IRQ_RETVAL(handled);
4259 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4260 * without filling any other registers
4262 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4265 struct ata_taskfile tf;
4268 ata_tf_init(ap, &tf, dev->devno);
4271 tf.flags |= ATA_TFLAG_DEVICE;
4272 tf.protocol = ATA_PROT_NODATA;
4274 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4276 printk(KERN_ERR "%s: ata command failed: %d\n",
4282 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4286 if (!ata_try_flush_cache(dev))
4289 if (ata_id_has_flush_ext(dev->id))
4290 cmd = ATA_CMD_FLUSH_EXT;
4292 cmd = ATA_CMD_FLUSH;
4294 return ata_do_simple_cmd(ap, dev, cmd);
4297 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4299 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4302 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4304 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4308 * ata_device_resume - wakeup a previously suspended devices
4309 * @ap: port the device is connected to
4310 * @dev: the device to resume
4312 * Kick the drive back into action, by sending it an idle immediate
4313 * command and making sure its transfer mode matches between drive
4317 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4319 if (ap->flags & ATA_FLAG_SUSPENDED) {
4320 ap->flags &= ~ATA_FLAG_SUSPENDED;
4323 if (!ata_dev_present(dev))
4325 if (dev->class == ATA_DEV_ATA)
4326 ata_start_drive(ap, dev);
4332 * ata_device_suspend - prepare a device for suspend
4333 * @ap: port the device is connected to
4334 * @dev: the device to suspend
4336 * Flush the cache on the drive, if appropriate, then issue a
4337 * standbynow command.
4339 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state)
4341 if (!ata_dev_present(dev))
4343 if (dev->class == ATA_DEV_ATA)
4344 ata_flush_cache(ap, dev);
4346 if (state.event != PM_EVENT_FREEZE)
4347 ata_standby_drive(ap, dev);
4348 ap->flags |= ATA_FLAG_SUSPENDED;
4353 * ata_port_start - Set port up for dma.
4354 * @ap: Port to initialize
4356 * Called just after data structures for each port are
4357 * initialized. Allocates space for PRD table.
4359 * May be used as the port_start() entry in ata_port_operations.
4362 * Inherited from caller.
4365 int ata_port_start (struct ata_port *ap)
4367 struct device *dev = ap->host_set->dev;
4370 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4374 rc = ata_pad_alloc(ap, dev);
4376 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4380 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4387 * ata_port_stop - Undo ata_port_start()
4388 * @ap: Port to shut down
4390 * Frees the PRD table.
4392 * May be used as the port_stop() entry in ata_port_operations.
4395 * Inherited from caller.
4398 void ata_port_stop (struct ata_port *ap)
4400 struct device *dev = ap->host_set->dev;
4402 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4403 ata_pad_free(ap, dev);
4406 void ata_host_stop (struct ata_host_set *host_set)
4408 if (host_set->mmio_base)
4409 iounmap(host_set->mmio_base);
4414 * ata_host_remove - Unregister SCSI host structure with upper layers
4415 * @ap: Port to unregister
4416 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4419 * Inherited from caller.
4422 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4424 struct Scsi_Host *sh = ap->host;
4429 scsi_remove_host(sh);
4431 ap->ops->port_stop(ap);
4435 * ata_host_init - Initialize an ata_port structure
4436 * @ap: Structure to initialize
4437 * @host: associated SCSI mid-layer structure
4438 * @host_set: Collection of hosts to which @ap belongs
4439 * @ent: Probe information provided by low-level driver
4440 * @port_no: Port number associated with this ata_port
4442 * Initialize a new ata_port structure, and its associated
4446 * Inherited from caller.
4449 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4450 struct ata_host_set *host_set,
4451 const struct ata_probe_ent *ent, unsigned int port_no)
4457 host->max_channel = 1;
4458 host->unique_id = ata_unique_id++;
4459 host->max_cmd_len = 12;
4461 ap->flags = ATA_FLAG_PORT_DISABLED;
4462 ap->id = host->unique_id;
4464 ap->ctl = ATA_DEVCTL_OBS;
4465 ap->host_set = host_set;
4466 ap->port_no = port_no;
4468 ent->legacy_mode ? ent->hard_port_no : port_no;
4469 ap->pio_mask = ent->pio_mask;
4470 ap->mwdma_mask = ent->mwdma_mask;
4471 ap->udma_mask = ent->udma_mask;
4472 ap->flags |= ent->host_flags;
4473 ap->ops = ent->port_ops;
4474 ap->cbl = ATA_CBL_NONE;
4475 ap->active_tag = ATA_TAG_POISON;
4476 ap->last_ctl = 0xFF;
4478 INIT_WORK(&ap->port_task, NULL, NULL);
4479 INIT_LIST_HEAD(&ap->eh_done_q);
4481 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4482 struct ata_device *dev = &ap->device[i];
4484 dev->pio_mask = UINT_MAX;
4485 dev->mwdma_mask = UINT_MAX;
4486 dev->udma_mask = UINT_MAX;
4490 ap->stats.unhandled_irq = 1;
4491 ap->stats.idle_irq = 1;
4494 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4498 * ata_host_add - Attach low-level ATA driver to system
4499 * @ent: Information provided by low-level driver
4500 * @host_set: Collections of ports to which we add
4501 * @port_no: Port number associated with this host
4503 * Attach low-level ATA driver to system.
4506 * PCI/etc. bus probe sem.
4509 * New ata_port on success, for NULL on error.
4512 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4513 struct ata_host_set *host_set,
4514 unsigned int port_no)
4516 struct Scsi_Host *host;
4517 struct ata_port *ap;
4521 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4525 host->transportt = &ata_scsi_transport_template;
4527 ap = (struct ata_port *) &host->hostdata[0];
4529 ata_host_init(ap, host, host_set, ent, port_no);
4531 rc = ap->ops->port_start(ap);
4538 scsi_host_put(host);
4543 * ata_device_add - Register hardware device with ATA and SCSI layers
4544 * @ent: Probe information describing hardware device to be registered
4546 * This function processes the information provided in the probe
4547 * information struct @ent, allocates the necessary ATA and SCSI
4548 * host information structures, initializes them, and registers
4549 * everything with requisite kernel subsystems.
4551 * This function requests irqs, probes the ATA bus, and probes
4555 * PCI/etc. bus probe sem.
4558 * Number of ports registered. Zero on error (no ports registered).
4561 int ata_device_add(const struct ata_probe_ent *ent)
4563 unsigned int count = 0, i;
4564 struct device *dev = ent->dev;
4565 struct ata_host_set *host_set;
4568 /* alloc a container for our list of ATA ports (buses) */
4569 host_set = kzalloc(sizeof(struct ata_host_set) +
4570 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4573 spin_lock_init(&host_set->lock);
4575 host_set->dev = dev;
4576 host_set->n_ports = ent->n_ports;
4577 host_set->irq = ent->irq;
4578 host_set->mmio_base = ent->mmio_base;
4579 host_set->private_data = ent->private_data;
4580 host_set->ops = ent->port_ops;
4582 /* register each port bound to this device */
4583 for (i = 0; i < ent->n_ports; i++) {
4584 struct ata_port *ap;
4585 unsigned long xfer_mode_mask;
4587 ap = ata_host_add(ent, host_set, i);
4591 host_set->ports[i] = ap;
4592 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4593 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4594 (ap->pio_mask << ATA_SHIFT_PIO);
4596 /* print per-port info to dmesg */
4597 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4598 "bmdma 0x%lX irq %lu\n",
4600 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4601 ata_mode_string(xfer_mode_mask),
4602 ap->ioaddr.cmd_addr,
4603 ap->ioaddr.ctl_addr,
4604 ap->ioaddr.bmdma_addr,
4608 host_set->ops->irq_clear(ap);
4615 /* obtain irq, that is shared between channels */
4616 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4617 DRV_NAME, host_set))
4620 /* perform each probe synchronously */
4621 DPRINTK("probe begin\n");
4622 for (i = 0; i < count; i++) {
4623 struct ata_port *ap;
4626 ap = host_set->ports[i];
4628 DPRINTK("ata%u: bus probe begin\n", ap->id);
4629 rc = ata_bus_probe(ap);
4630 DPRINTK("ata%u: bus probe end\n", ap->id);
4633 /* FIXME: do something useful here?
4634 * Current libata behavior will
4635 * tear down everything when
4636 * the module is removed
4637 * or the h/w is unplugged.
4641 rc = scsi_add_host(ap->host, dev);
4643 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4645 /* FIXME: do something useful here */
4646 /* FIXME: handle unconditional calls to
4647 * scsi_scan_host and ata_host_remove, below,
4653 /* probes are done, now scan each port's disk(s) */
4654 DPRINTK("host probe begin\n");
4655 for (i = 0; i < count; i++) {
4656 struct ata_port *ap = host_set->ports[i];
4658 ata_scsi_scan_host(ap);
4661 dev_set_drvdata(dev, host_set);
4663 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4664 return ent->n_ports; /* success */
4667 for (i = 0; i < count; i++) {
4668 ata_host_remove(host_set->ports[i], 1);
4669 scsi_host_put(host_set->ports[i]->host);
4673 VPRINTK("EXIT, returning 0\n");
4678 * ata_host_set_remove - PCI layer callback for device removal
4679 * @host_set: ATA host set that was removed
4681 * Unregister all objects associated with this host set. Free those
4685 * Inherited from calling layer (may sleep).
4688 void ata_host_set_remove(struct ata_host_set *host_set)
4690 struct ata_port *ap;
4693 for (i = 0; i < host_set->n_ports; i++) {
4694 ap = host_set->ports[i];
4695 scsi_remove_host(ap->host);
4698 free_irq(host_set->irq, host_set);
4700 for (i = 0; i < host_set->n_ports; i++) {
4701 ap = host_set->ports[i];
4703 ata_scsi_release(ap->host);
4705 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4706 struct ata_ioports *ioaddr = &ap->ioaddr;
4708 if (ioaddr->cmd_addr == 0x1f0)
4709 release_region(0x1f0, 8);
4710 else if (ioaddr->cmd_addr == 0x170)
4711 release_region(0x170, 8);
4714 scsi_host_put(ap->host);
4717 if (host_set->ops->host_stop)
4718 host_set->ops->host_stop(host_set);
4724 * ata_scsi_release - SCSI layer callback hook for host unload
4725 * @host: libata host to be unloaded
4727 * Performs all duties necessary to shut down a libata port...
4728 * Kill port kthread, disable port, and release resources.
4731 * Inherited from SCSI layer.
4737 int ata_scsi_release(struct Scsi_Host *host)
4739 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4744 ap->ops->port_disable(ap);
4745 ata_host_remove(ap, 0);
4746 for (i = 0; i < ATA_MAX_DEVICES; i++)
4747 kfree(ap->device[i].id);
4754 * ata_std_ports - initialize ioaddr with standard port offsets.
4755 * @ioaddr: IO address structure to be initialized
4757 * Utility function which initializes data_addr, error_addr,
4758 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4759 * device_addr, status_addr, and command_addr to standard offsets
4760 * relative to cmd_addr.
4762 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4765 void ata_std_ports(struct ata_ioports *ioaddr)
4767 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4768 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4769 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4770 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4771 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4772 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4773 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4774 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4775 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4776 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4782 void ata_pci_host_stop (struct ata_host_set *host_set)
4784 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4786 pci_iounmap(pdev, host_set->mmio_base);
4790 * ata_pci_remove_one - PCI layer callback for device removal
4791 * @pdev: PCI device that was removed
4793 * PCI layer indicates to libata via this hook that
4794 * hot-unplug or module unload event has occurred.
4795 * Handle this by unregistering all objects associated
4796 * with this PCI device. Free those objects. Then finally
4797 * release PCI resources and disable device.
4800 * Inherited from PCI layer (may sleep).
4803 void ata_pci_remove_one (struct pci_dev *pdev)
4805 struct device *dev = pci_dev_to_dev(pdev);
4806 struct ata_host_set *host_set = dev_get_drvdata(dev);
4808 ata_host_set_remove(host_set);
4809 pci_release_regions(pdev);
4810 pci_disable_device(pdev);
4811 dev_set_drvdata(dev, NULL);
4814 /* move to PCI subsystem */
4815 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4817 unsigned long tmp = 0;
4819 switch (bits->width) {
4822 pci_read_config_byte(pdev, bits->reg, &tmp8);
4828 pci_read_config_word(pdev, bits->reg, &tmp16);
4834 pci_read_config_dword(pdev, bits->reg, &tmp32);
4845 return (tmp == bits->val) ? 1 : 0;
4848 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
4850 pci_save_state(pdev);
4851 pci_disable_device(pdev);
4852 pci_set_power_state(pdev, PCI_D3hot);
4856 int ata_pci_device_resume(struct pci_dev *pdev)
4858 pci_set_power_state(pdev, PCI_D0);
4859 pci_restore_state(pdev);
4860 pci_enable_device(pdev);
4861 pci_set_master(pdev);
4864 #endif /* CONFIG_PCI */
4867 static int __init ata_init(void)
4869 ata_wq = create_workqueue("ata");
4873 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4877 static void __exit ata_exit(void)
4879 destroy_workqueue(ata_wq);
4882 module_init(ata_init);
4883 module_exit(ata_exit);
4885 static unsigned long ratelimit_time;
4886 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4888 int ata_ratelimit(void)
4891 unsigned long flags;
4893 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4895 if (time_after(jiffies, ratelimit_time)) {
4897 ratelimit_time = jiffies + (HZ/5);
4901 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4907 * libata is essentially a library of internal helper functions for
4908 * low-level ATA host controller drivers. As such, the API/ABI is
4909 * likely to change as new drivers are added and updated.
4910 * Do not depend on ABI/API stability.
4913 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4914 EXPORT_SYMBOL_GPL(ata_std_ports);
4915 EXPORT_SYMBOL_GPL(ata_device_add);
4916 EXPORT_SYMBOL_GPL(ata_host_set_remove);
4917 EXPORT_SYMBOL_GPL(ata_sg_init);
4918 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4919 EXPORT_SYMBOL_GPL(__ata_qc_complete);
4920 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4921 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4922 EXPORT_SYMBOL_GPL(ata_tf_load);
4923 EXPORT_SYMBOL_GPL(ata_tf_read);
4924 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4925 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4926 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4927 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4928 EXPORT_SYMBOL_GPL(ata_check_status);
4929 EXPORT_SYMBOL_GPL(ata_altstatus);
4930 EXPORT_SYMBOL_GPL(ata_exec_command);
4931 EXPORT_SYMBOL_GPL(ata_port_start);
4932 EXPORT_SYMBOL_GPL(ata_port_stop);
4933 EXPORT_SYMBOL_GPL(ata_host_stop);
4934 EXPORT_SYMBOL_GPL(ata_interrupt);
4935 EXPORT_SYMBOL_GPL(ata_qc_prep);
4936 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4937 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4938 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4939 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4940 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4941 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4942 EXPORT_SYMBOL_GPL(ata_port_probe);
4943 EXPORT_SYMBOL_GPL(sata_phy_reset);
4944 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4945 EXPORT_SYMBOL_GPL(ata_bus_reset);
4946 EXPORT_SYMBOL_GPL(ata_std_probeinit);
4947 EXPORT_SYMBOL_GPL(ata_std_softreset);
4948 EXPORT_SYMBOL_GPL(sata_std_hardreset);
4949 EXPORT_SYMBOL_GPL(ata_std_postreset);
4950 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4951 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
4952 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
4953 EXPORT_SYMBOL_GPL(ata_port_disable);
4954 EXPORT_SYMBOL_GPL(ata_ratelimit);
4955 EXPORT_SYMBOL_GPL(ata_busy_sleep);
4956 EXPORT_SYMBOL_GPL(ata_port_queue_task);
4957 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4958 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4959 EXPORT_SYMBOL_GPL(ata_scsi_error);
4960 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4961 EXPORT_SYMBOL_GPL(ata_scsi_release);
4962 EXPORT_SYMBOL_GPL(ata_host_intr);
4963 EXPORT_SYMBOL_GPL(ata_dev_classify);
4964 EXPORT_SYMBOL_GPL(ata_id_string);
4965 EXPORT_SYMBOL_GPL(ata_id_c_string);
4966 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4967 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4968 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
4970 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
4971 EXPORT_SYMBOL_GPL(ata_timing_compute);
4972 EXPORT_SYMBOL_GPL(ata_timing_merge);
4975 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4976 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
4977 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4978 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4979 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4980 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
4981 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
4982 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
4983 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
4984 #endif /* CONFIG_PCI */
4986 EXPORT_SYMBOL_GPL(ata_device_suspend);
4987 EXPORT_SYMBOL_GPL(ata_device_resume);
4988 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
4989 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);