2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
51 #include <linux/blkdev.h>
52 #include <linux/delay.h>
53 #include <linux/timer.h>
54 #include <linux/interrupt.h>
55 #include <linux/completion.h>
56 #include <linux/suspend.h>
57 #include <linux/workqueue.h>
58 #include <linux/jiffies.h>
59 #include <linux/scatterlist.h>
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_host.h>
64 #include <linux/libata.h>
65 #include <asm/semaphore.h>
66 #include <asm/byteorder.h>
67 #include <linux/cdrom.h>
72 /* debounce timing parameters in msecs { interval, duration, timeout } */
73 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
74 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
75 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
77 static unsigned int ata_dev_init_params(struct ata_device *dev,
78 u16 heads, u16 sectors);
79 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
80 static unsigned int ata_dev_set_feature(struct ata_device *dev,
81 u8 enable, u8 feature);
82 static void ata_dev_xfermask(struct ata_device *dev);
83 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
85 unsigned int ata_print_id = 1;
86 static struct workqueue_struct *ata_wq;
88 struct workqueue_struct *ata_aux_wq;
90 int atapi_enabled = 1;
91 module_param(atapi_enabled, int, 0444);
92 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
95 module_param(atapi_dmadir, int, 0444);
96 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
98 int atapi_passthru16 = 1;
99 module_param(atapi_passthru16, int, 0444);
100 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
103 module_param_named(fua, libata_fua, int, 0444);
104 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
106 static int ata_ignore_hpa;
107 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
108 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
110 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
111 module_param_named(dma, libata_dma_mask, int, 0444);
112 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
114 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
115 module_param(ata_probe_timeout, int, 0444);
116 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
118 int libata_noacpi = 0;
119 module_param_named(noacpi, libata_noacpi, int, 0444);
120 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
122 int libata_allow_tpm = 0;
123 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
124 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
126 MODULE_AUTHOR("Jeff Garzik");
127 MODULE_DESCRIPTION("Library module for ATA devices");
128 MODULE_LICENSE("GPL");
129 MODULE_VERSION(DRV_VERSION);
133 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
134 * @tf: Taskfile to convert
135 * @pmp: Port multiplier port
136 * @is_cmd: This FIS is for command
137 * @fis: Buffer into which data will output
139 * Converts a standard ATA taskfile to a Serial ATA
140 * FIS structure (Register - Host to Device).
143 * Inherited from caller.
145 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
147 fis[0] = 0x27; /* Register - Host to Device FIS */
148 fis[1] = pmp & 0xf; /* Port multiplier number*/
150 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
152 fis[2] = tf->command;
153 fis[3] = tf->feature;
160 fis[8] = tf->hob_lbal;
161 fis[9] = tf->hob_lbam;
162 fis[10] = tf->hob_lbah;
163 fis[11] = tf->hob_feature;
166 fis[13] = tf->hob_nsect;
177 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
178 * @fis: Buffer from which data will be input
179 * @tf: Taskfile to output
181 * Converts a serial ATA FIS structure to a standard ATA taskfile.
184 * Inherited from caller.
187 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
189 tf->command = fis[2]; /* status */
190 tf->feature = fis[3]; /* error */
197 tf->hob_lbal = fis[8];
198 tf->hob_lbam = fis[9];
199 tf->hob_lbah = fis[10];
202 tf->hob_nsect = fis[13];
205 static const u8 ata_rw_cmds[] = {
209 ATA_CMD_READ_MULTI_EXT,
210 ATA_CMD_WRITE_MULTI_EXT,
214 ATA_CMD_WRITE_MULTI_FUA_EXT,
218 ATA_CMD_PIO_READ_EXT,
219 ATA_CMD_PIO_WRITE_EXT,
232 ATA_CMD_WRITE_FUA_EXT
236 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
237 * @tf: command to examine and configure
238 * @dev: device tf belongs to
240 * Examine the device configuration and tf->flags to calculate
241 * the proper read/write commands and protocol to use.
246 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
250 int index, fua, lba48, write;
252 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
253 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
254 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
256 if (dev->flags & ATA_DFLAG_PIO) {
257 tf->protocol = ATA_PROT_PIO;
258 index = dev->multi_count ? 0 : 8;
259 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
260 /* Unable to use DMA due to host limitation */
261 tf->protocol = ATA_PROT_PIO;
262 index = dev->multi_count ? 0 : 8;
264 tf->protocol = ATA_PROT_DMA;
268 cmd = ata_rw_cmds[index + fua + lba48 + write];
277 * ata_tf_read_block - Read block address from ATA taskfile
278 * @tf: ATA taskfile of interest
279 * @dev: ATA device @tf belongs to
284 * Read block address from @tf. This function can handle all
285 * three address formats - LBA, LBA48 and CHS. tf->protocol and
286 * flags select the address format to use.
289 * Block address read from @tf.
291 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
295 if (tf->flags & ATA_TFLAG_LBA) {
296 if (tf->flags & ATA_TFLAG_LBA48) {
297 block |= (u64)tf->hob_lbah << 40;
298 block |= (u64)tf->hob_lbam << 32;
299 block |= tf->hob_lbal << 24;
301 block |= (tf->device & 0xf) << 24;
303 block |= tf->lbah << 16;
304 block |= tf->lbam << 8;
309 cyl = tf->lbam | (tf->lbah << 8);
310 head = tf->device & 0xf;
313 block = (cyl * dev->heads + head) * dev->sectors + sect;
320 * ata_build_rw_tf - Build ATA taskfile for given read/write request
321 * @tf: Target ATA taskfile
322 * @dev: ATA device @tf belongs to
323 * @block: Block address
324 * @n_block: Number of blocks
325 * @tf_flags: RW/FUA etc...
331 * Build ATA taskfile @tf for read/write request described by
332 * @block, @n_block, @tf_flags and @tag on @dev.
336 * 0 on success, -ERANGE if the request is too large for @dev,
337 * -EINVAL if the request is invalid.
339 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
340 u64 block, u32 n_block, unsigned int tf_flags,
343 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
344 tf->flags |= tf_flags;
346 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
348 if (!lba_48_ok(block, n_block))
351 tf->protocol = ATA_PROT_NCQ;
352 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
354 if (tf->flags & ATA_TFLAG_WRITE)
355 tf->command = ATA_CMD_FPDMA_WRITE;
357 tf->command = ATA_CMD_FPDMA_READ;
359 tf->nsect = tag << 3;
360 tf->hob_feature = (n_block >> 8) & 0xff;
361 tf->feature = n_block & 0xff;
363 tf->hob_lbah = (block >> 40) & 0xff;
364 tf->hob_lbam = (block >> 32) & 0xff;
365 tf->hob_lbal = (block >> 24) & 0xff;
366 tf->lbah = (block >> 16) & 0xff;
367 tf->lbam = (block >> 8) & 0xff;
368 tf->lbal = block & 0xff;
371 if (tf->flags & ATA_TFLAG_FUA)
372 tf->device |= 1 << 7;
373 } else if (dev->flags & ATA_DFLAG_LBA) {
374 tf->flags |= ATA_TFLAG_LBA;
376 if (lba_28_ok(block, n_block)) {
378 tf->device |= (block >> 24) & 0xf;
379 } else if (lba_48_ok(block, n_block)) {
380 if (!(dev->flags & ATA_DFLAG_LBA48))
384 tf->flags |= ATA_TFLAG_LBA48;
386 tf->hob_nsect = (n_block >> 8) & 0xff;
388 tf->hob_lbah = (block >> 40) & 0xff;
389 tf->hob_lbam = (block >> 32) & 0xff;
390 tf->hob_lbal = (block >> 24) & 0xff;
392 /* request too large even for LBA48 */
395 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
398 tf->nsect = n_block & 0xff;
400 tf->lbah = (block >> 16) & 0xff;
401 tf->lbam = (block >> 8) & 0xff;
402 tf->lbal = block & 0xff;
404 tf->device |= ATA_LBA;
407 u32 sect, head, cyl, track;
409 /* The request -may- be too large for CHS addressing. */
410 if (!lba_28_ok(block, n_block))
413 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
416 /* Convert LBA to CHS */
417 track = (u32)block / dev->sectors;
418 cyl = track / dev->heads;
419 head = track % dev->heads;
420 sect = (u32)block % dev->sectors + 1;
422 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
423 (u32)block, track, cyl, head, sect);
425 /* Check whether the converted CHS can fit.
429 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
432 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
443 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
444 * @pio_mask: pio_mask
445 * @mwdma_mask: mwdma_mask
446 * @udma_mask: udma_mask
448 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
449 * unsigned int xfer_mask.
457 unsigned long ata_pack_xfermask(unsigned long pio_mask,
458 unsigned long mwdma_mask,
459 unsigned long udma_mask)
461 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
462 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
463 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
467 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
468 * @xfer_mask: xfer_mask to unpack
469 * @pio_mask: resulting pio_mask
470 * @mwdma_mask: resulting mwdma_mask
471 * @udma_mask: resulting udma_mask
473 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
474 * Any NULL distination masks will be ignored.
476 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
477 unsigned long *mwdma_mask, unsigned long *udma_mask)
480 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
482 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
484 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
487 static const struct ata_xfer_ent {
491 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
492 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
493 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
498 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
499 * @xfer_mask: xfer_mask of interest
501 * Return matching XFER_* value for @xfer_mask. Only the highest
502 * bit of @xfer_mask is considered.
508 * Matching XFER_* value, 0xff if no match found.
510 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
512 int highbit = fls(xfer_mask) - 1;
513 const struct ata_xfer_ent *ent;
515 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
516 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
517 return ent->base + highbit - ent->shift;
522 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
523 * @xfer_mode: XFER_* of interest
525 * Return matching xfer_mask for @xfer_mode.
531 * Matching xfer_mask, 0 if no match found.
533 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
535 const struct ata_xfer_ent *ent;
537 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
538 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
539 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
540 & ~((1 << ent->shift) - 1);
545 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
546 * @xfer_mode: XFER_* of interest
548 * Return matching xfer_shift for @xfer_mode.
554 * Matching xfer_shift, -1 if no match found.
556 int ata_xfer_mode2shift(unsigned long xfer_mode)
558 const struct ata_xfer_ent *ent;
560 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
561 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
567 * ata_mode_string - convert xfer_mask to string
568 * @xfer_mask: mask of bits supported; only highest bit counts.
570 * Determine string which represents the highest speed
571 * (highest bit in @modemask).
577 * Constant C string representing highest speed listed in
578 * @mode_mask, or the constant C string "<n/a>".
580 const char *ata_mode_string(unsigned long xfer_mask)
582 static const char * const xfer_mode_str[] = {
606 highbit = fls(xfer_mask) - 1;
607 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
608 return xfer_mode_str[highbit];
612 static const char *sata_spd_string(unsigned int spd)
614 static const char * const spd_str[] = {
619 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
621 return spd_str[spd - 1];
624 void ata_dev_disable(struct ata_device *dev)
626 if (ata_dev_enabled(dev)) {
627 if (ata_msg_drv(dev->link->ap))
628 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
629 ata_acpi_on_disable(dev);
630 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
636 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
638 struct ata_link *link = dev->link;
639 struct ata_port *ap = link->ap;
641 unsigned int err_mask;
645 * disallow DIPM for drivers which haven't set
646 * ATA_FLAG_IPM. This is because when DIPM is enabled,
647 * phy ready will be set in the interrupt status on
648 * state changes, which will cause some drivers to
649 * think there are errors - additionally drivers will
650 * need to disable hot plug.
652 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
653 ap->pm_policy = NOT_AVAILABLE;
658 * For DIPM, we will only enable it for the
661 * Why? Because Disks are too stupid to know that
662 * If the host rejects a request to go to SLUMBER
663 * they should retry at PARTIAL, and instead it
664 * just would give up. So, for medium_power to
665 * work at all, we need to only allow HIPM.
667 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
673 /* no restrictions on IPM transitions */
674 scontrol &= ~(0x3 << 8);
675 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
680 if (dev->flags & ATA_DFLAG_DIPM)
681 err_mask = ata_dev_set_feature(dev,
682 SETFEATURES_SATA_ENABLE, SATA_DIPM);
685 /* allow IPM to PARTIAL */
686 scontrol &= ~(0x1 << 8);
687 scontrol |= (0x2 << 8);
688 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
693 * we don't have to disable DIPM since IPM flags
694 * disallow transitions to SLUMBER, which effectively
695 * disable DIPM if it does not support PARTIAL
699 case MAX_PERFORMANCE:
700 /* disable all IPM transitions */
701 scontrol |= (0x3 << 8);
702 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
707 * we don't have to disable DIPM since IPM flags
708 * disallow all transitions which effectively
709 * disable DIPM anyway.
714 /* FIXME: handle SET FEATURES failure */
721 * ata_dev_enable_pm - enable SATA interface power management
722 * @dev: device to enable power management
723 * @policy: the link power management policy
725 * Enable SATA Interface power management. This will enable
726 * Device Interface Power Management (DIPM) for min_power
727 * policy, and then call driver specific callbacks for
728 * enabling Host Initiated Power management.
731 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
733 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
736 struct ata_port *ap = dev->link->ap;
738 /* set HIPM first, then DIPM */
739 if (ap->ops->enable_pm)
740 rc = ap->ops->enable_pm(ap, policy);
743 rc = ata_dev_set_dipm(dev, policy);
747 ap->pm_policy = MAX_PERFORMANCE;
749 ap->pm_policy = policy;
750 return /* rc */; /* hopefully we can use 'rc' eventually */
755 * ata_dev_disable_pm - disable SATA interface power management
756 * @dev: device to disable power management
758 * Disable SATA Interface power management. This will disable
759 * Device Interface Power Management (DIPM) without changing
760 * policy, call driver specific callbacks for disabling Host
761 * Initiated Power management.
766 static void ata_dev_disable_pm(struct ata_device *dev)
768 struct ata_port *ap = dev->link->ap;
770 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
771 if (ap->ops->disable_pm)
772 ap->ops->disable_pm(ap);
774 #endif /* CONFIG_PM */
776 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
778 ap->pm_policy = policy;
779 ap->link.eh_info.action |= ATA_EHI_LPM;
780 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
781 ata_port_schedule_eh(ap);
785 static void ata_lpm_enable(struct ata_host *host)
787 struct ata_link *link;
789 struct ata_device *dev;
792 for (i = 0; i < host->n_ports; i++) {
794 ata_port_for_each_link(link, ap) {
795 ata_link_for_each_dev(dev, link)
796 ata_dev_disable_pm(dev);
801 static void ata_lpm_disable(struct ata_host *host)
805 for (i = 0; i < host->n_ports; i++) {
806 struct ata_port *ap = host->ports[i];
807 ata_lpm_schedule(ap, ap->pm_policy);
810 #endif /* CONFIG_PM */
814 * ata_devchk - PATA device presence detection
815 * @ap: ATA channel to examine
816 * @device: Device to examine (starting at zero)
818 * This technique was originally described in
819 * Hale Landis's ATADRVR (www.ata-atapi.com), and
820 * later found its way into the ATA/ATAPI spec.
822 * Write a pattern to the ATA shadow registers,
823 * and if a device is present, it will respond by
824 * correctly storing and echoing back the
825 * ATA shadow register contents.
831 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
833 struct ata_ioports *ioaddr = &ap->ioaddr;
836 ap->ops->dev_select(ap, device);
838 iowrite8(0x55, ioaddr->nsect_addr);
839 iowrite8(0xaa, ioaddr->lbal_addr);
841 iowrite8(0xaa, ioaddr->nsect_addr);
842 iowrite8(0x55, ioaddr->lbal_addr);
844 iowrite8(0x55, ioaddr->nsect_addr);
845 iowrite8(0xaa, ioaddr->lbal_addr);
847 nsect = ioread8(ioaddr->nsect_addr);
848 lbal = ioread8(ioaddr->lbal_addr);
850 if ((nsect == 0x55) && (lbal == 0xaa))
851 return 1; /* we found a device */
853 return 0; /* nothing found */
857 * ata_dev_classify - determine device type based on ATA-spec signature
858 * @tf: ATA taskfile register set for device to be identified
860 * Determine from taskfile register contents whether a device is
861 * ATA or ATAPI, as per "Signature and persistence" section
862 * of ATA/PI spec (volume 1, sect 5.14).
868 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
869 * %ATA_DEV_UNKNOWN the event of failure.
871 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
873 /* Apple's open source Darwin code hints that some devices only
874 * put a proper signature into the LBA mid/high registers,
875 * So, we only check those. It's sufficient for uniqueness.
877 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
878 * signatures for ATA and ATAPI devices attached on SerialATA,
879 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
880 * spec has never mentioned about using different signatures
881 * for ATA/ATAPI devices. Then, Serial ATA II: Port
882 * Multiplier specification began to use 0x69/0x96 to identify
883 * port multpliers and 0x3c/0xc3 to identify SEMB device.
884 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
885 * 0x69/0x96 shortly and described them as reserved for
888 * We follow the current spec and consider that 0x69/0x96
889 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
891 if ((tf->lbam == 0) && (tf->lbah == 0)) {
892 DPRINTK("found ATA device by sig\n");
896 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
897 DPRINTK("found ATAPI device by sig\n");
898 return ATA_DEV_ATAPI;
901 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
902 DPRINTK("found PMP device by sig\n");
906 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
907 printk(KERN_INFO "ata: SEMB device ignored\n");
908 return ATA_DEV_SEMB_UNSUP; /* not yet */
911 DPRINTK("unknown device\n");
912 return ATA_DEV_UNKNOWN;
916 * ata_dev_try_classify - Parse returned ATA device signature
917 * @dev: ATA device to classify (starting at zero)
918 * @present: device seems present
919 * @r_err: Value of error register on completion
921 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
922 * an ATA/ATAPI-defined set of values is placed in the ATA
923 * shadow registers, indicating the results of device detection
926 * Select the ATA device, and read the values from the ATA shadow
927 * registers. Then parse according to the Error register value,
928 * and the spec-defined values examined by ata_dev_classify().
934 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
936 unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
939 struct ata_port *ap = dev->link->ap;
940 struct ata_taskfile tf;
944 ap->ops->dev_select(ap, dev->devno);
946 memset(&tf, 0, sizeof(tf));
948 ap->ops->tf_read(ap, &tf);
953 /* see if device passed diags: if master then continue and warn later */
954 if (err == 0 && dev->devno == 0)
955 /* diagnostic fail : do nothing _YET_ */
956 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
959 else if ((dev->devno == 0) && (err == 0x81))
964 /* determine if device is ATA or ATAPI */
965 class = ata_dev_classify(&tf);
967 if (class == ATA_DEV_UNKNOWN) {
968 /* If the device failed diagnostic, it's likely to
969 * have reported incorrect device signature too.
970 * Assume ATA device if the device seems present but
971 * device signature is invalid with diagnostic
974 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
977 class = ATA_DEV_NONE;
978 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
979 class = ATA_DEV_NONE;
985 * ata_id_string - Convert IDENTIFY DEVICE page into string
986 * @id: IDENTIFY DEVICE results we will examine
987 * @s: string into which data is output
988 * @ofs: offset into identify device page
989 * @len: length of string to return. must be an even number.
991 * The strings in the IDENTIFY DEVICE page are broken up into
992 * 16-bit chunks. Run through the string, and output each
993 * 8-bit chunk linearly, regardless of platform.
999 void ata_id_string(const u16 *id, unsigned char *s,
1000 unsigned int ofs, unsigned int len)
1019 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1020 * @id: IDENTIFY DEVICE results we will examine
1021 * @s: string into which data is output
1022 * @ofs: offset into identify device page
1023 * @len: length of string to return. must be an odd number.
1025 * This function is identical to ata_id_string except that it
1026 * trims trailing spaces and terminates the resulting string with
1027 * null. @len must be actual maximum length (even number) + 1.
1032 void ata_id_c_string(const u16 *id, unsigned char *s,
1033 unsigned int ofs, unsigned int len)
1037 WARN_ON(!(len & 1));
1039 ata_id_string(id, s, ofs, len - 1);
1041 p = s + strnlen(s, len - 1);
1042 while (p > s && p[-1] == ' ')
1047 static u64 ata_id_n_sectors(const u16 *id)
1049 if (ata_id_has_lba(id)) {
1050 if (ata_id_has_lba48(id))
1051 return ata_id_u64(id, 100);
1053 return ata_id_u32(id, 60);
1055 if (ata_id_current_chs_valid(id))
1056 return ata_id_u32(id, 57);
1058 return id[1] * id[3] * id[6];
1062 static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1066 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1067 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1068 sectors |= (tf->hob_lbal & 0xff) << 24;
1069 sectors |= (tf->lbah & 0xff) << 16;
1070 sectors |= (tf->lbam & 0xff) << 8;
1071 sectors |= (tf->lbal & 0xff);
1076 static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1080 sectors |= (tf->device & 0x0f) << 24;
1081 sectors |= (tf->lbah & 0xff) << 16;
1082 sectors |= (tf->lbam & 0xff) << 8;
1083 sectors |= (tf->lbal & 0xff);
1089 * ata_read_native_max_address - Read native max address
1090 * @dev: target device
1091 * @max_sectors: out parameter for the result native max address
1093 * Perform an LBA48 or LBA28 native size query upon the device in
1097 * 0 on success, -EACCES if command is aborted by the drive.
1098 * -EIO on other errors.
1100 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1102 unsigned int err_mask;
1103 struct ata_taskfile tf;
1104 int lba48 = ata_id_has_lba48(dev->id);
1106 ata_tf_init(dev, &tf);
1108 /* always clear all address registers */
1109 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1112 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1113 tf.flags |= ATA_TFLAG_LBA48;
1115 tf.command = ATA_CMD_READ_NATIVE_MAX;
1117 tf.protocol |= ATA_PROT_NODATA;
1118 tf.device |= ATA_LBA;
1120 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1122 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1123 "max address (err_mask=0x%x)\n", err_mask);
1124 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1130 *max_sectors = ata_tf_to_lba48(&tf);
1132 *max_sectors = ata_tf_to_lba(&tf);
1133 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1139 * ata_set_max_sectors - Set max sectors
1140 * @dev: target device
1141 * @new_sectors: new max sectors value to set for the device
1143 * Set max sectors of @dev to @new_sectors.
1146 * 0 on success, -EACCES if command is aborted or denied (due to
1147 * previous non-volatile SET_MAX) by the drive. -EIO on other
1150 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1152 unsigned int err_mask;
1153 struct ata_taskfile tf;
1154 int lba48 = ata_id_has_lba48(dev->id);
1158 ata_tf_init(dev, &tf);
1160 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1163 tf.command = ATA_CMD_SET_MAX_EXT;
1164 tf.flags |= ATA_TFLAG_LBA48;
1166 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1167 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1168 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1170 tf.command = ATA_CMD_SET_MAX;
1172 tf.device |= (new_sectors >> 24) & 0xf;
1175 tf.protocol |= ATA_PROT_NODATA;
1176 tf.device |= ATA_LBA;
1178 tf.lbal = (new_sectors >> 0) & 0xff;
1179 tf.lbam = (new_sectors >> 8) & 0xff;
1180 tf.lbah = (new_sectors >> 16) & 0xff;
1182 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1184 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1185 "max address (err_mask=0x%x)\n", err_mask);
1186 if (err_mask == AC_ERR_DEV &&
1187 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1196 * ata_hpa_resize - Resize a device with an HPA set
1197 * @dev: Device to resize
1199 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1200 * it if required to the full size of the media. The caller must check
1201 * the drive has the HPA feature set enabled.
1204 * 0 on success, -errno on failure.
1206 static int ata_hpa_resize(struct ata_device *dev)
1208 struct ata_eh_context *ehc = &dev->link->eh_context;
1209 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1210 u64 sectors = ata_id_n_sectors(dev->id);
1214 /* do we need to do it? */
1215 if (dev->class != ATA_DEV_ATA ||
1216 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1217 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1220 /* read native max address */
1221 rc = ata_read_native_max_address(dev, &native_sectors);
1223 /* If HPA isn't going to be unlocked, skip HPA
1224 * resizing from the next try.
1226 if (!ata_ignore_hpa) {
1227 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1228 "broken, will skip HPA handling\n");
1229 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1231 /* we can continue if device aborted the command */
1239 /* nothing to do? */
1240 if (native_sectors <= sectors || !ata_ignore_hpa) {
1241 if (!print_info || native_sectors == sectors)
1244 if (native_sectors > sectors)
1245 ata_dev_printk(dev, KERN_INFO,
1246 "HPA detected: current %llu, native %llu\n",
1247 (unsigned long long)sectors,
1248 (unsigned long long)native_sectors);
1249 else if (native_sectors < sectors)
1250 ata_dev_printk(dev, KERN_WARNING,
1251 "native sectors (%llu) is smaller than "
1253 (unsigned long long)native_sectors,
1254 (unsigned long long)sectors);
1258 /* let's unlock HPA */
1259 rc = ata_set_max_sectors(dev, native_sectors);
1260 if (rc == -EACCES) {
1261 /* if device aborted the command, skip HPA resizing */
1262 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1263 "(%llu -> %llu), skipping HPA handling\n",
1264 (unsigned long long)sectors,
1265 (unsigned long long)native_sectors);
1266 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1271 /* re-read IDENTIFY data */
1272 rc = ata_dev_reread_id(dev, 0);
1274 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1275 "data after HPA resizing\n");
1280 u64 new_sectors = ata_id_n_sectors(dev->id);
1281 ata_dev_printk(dev, KERN_INFO,
1282 "HPA unlocked: %llu -> %llu, native %llu\n",
1283 (unsigned long long)sectors,
1284 (unsigned long long)new_sectors,
1285 (unsigned long long)native_sectors);
1292 * ata_noop_dev_select - Select device 0/1 on ATA bus
1293 * @ap: ATA channel to manipulate
1294 * @device: ATA device (numbered from zero) to select
1296 * This function performs no actual function.
1298 * May be used as the dev_select() entry in ata_port_operations.
1303 void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1309 * ata_std_dev_select - Select device 0/1 on ATA bus
1310 * @ap: ATA channel to manipulate
1311 * @device: ATA device (numbered from zero) to select
1313 * Use the method defined in the ATA specification to
1314 * make either device 0, or device 1, active on the
1315 * ATA channel. Works with both PIO and MMIO.
1317 * May be used as the dev_select() entry in ata_port_operations.
1323 void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1328 tmp = ATA_DEVICE_OBS;
1330 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1332 iowrite8(tmp, ap->ioaddr.device_addr);
1333 ata_pause(ap); /* needed; also flushes, for mmio */
1337 * ata_dev_select - Select device 0/1 on ATA bus
1338 * @ap: ATA channel to manipulate
1339 * @device: ATA device (numbered from zero) to select
1340 * @wait: non-zero to wait for Status register BSY bit to clear
1341 * @can_sleep: non-zero if context allows sleeping
1343 * Use the method defined in the ATA specification to
1344 * make either device 0, or device 1, active on the
1347 * This is a high-level version of ata_std_dev_select(),
1348 * which additionally provides the services of inserting
1349 * the proper pauses and status polling, where needed.
1355 void ata_dev_select(struct ata_port *ap, unsigned int device,
1356 unsigned int wait, unsigned int can_sleep)
1358 if (ata_msg_probe(ap))
1359 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1360 "device %u, wait %u\n", device, wait);
1365 ap->ops->dev_select(ap, device);
1368 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1375 * ata_dump_id - IDENTIFY DEVICE info debugging output
1376 * @id: IDENTIFY DEVICE page to dump
1378 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1385 static inline void ata_dump_id(const u16 *id)
1387 DPRINTK("49==0x%04x "
1397 DPRINTK("80==0x%04x "
1407 DPRINTK("88==0x%04x "
1414 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1415 * @id: IDENTIFY data to compute xfer mask from
1417 * Compute the xfermask for this device. This is not as trivial
1418 * as it seems if we must consider early devices correctly.
1420 * FIXME: pre IDE drive timing (do we care ?).
1428 unsigned long ata_id_xfermask(const u16 *id)
1430 unsigned long pio_mask, mwdma_mask, udma_mask;
1432 /* Usual case. Word 53 indicates word 64 is valid */
1433 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1434 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1438 /* If word 64 isn't valid then Word 51 high byte holds
1439 * the PIO timing number for the maximum. Turn it into
1442 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1443 if (mode < 5) /* Valid PIO range */
1444 pio_mask = (2 << mode) - 1;
1448 /* But wait.. there's more. Design your standards by
1449 * committee and you too can get a free iordy field to
1450 * process. However its the speeds not the modes that
1451 * are supported... Note drivers using the timing API
1452 * will get this right anyway
1456 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1458 if (ata_id_is_cfa(id)) {
1460 * Process compact flash extended modes
1462 int pio = id[163] & 0x7;
1463 int dma = (id[163] >> 3) & 7;
1466 pio_mask |= (1 << 5);
1468 pio_mask |= (1 << 6);
1470 mwdma_mask |= (1 << 3);
1472 mwdma_mask |= (1 << 4);
1476 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1477 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1479 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1483 * ata_port_queue_task - Queue port_task
1484 * @ap: The ata_port to queue port_task for
1485 * @fn: workqueue function to be scheduled
1486 * @data: data for @fn to use
1487 * @delay: delay time for workqueue function
1489 * Schedule @fn(@data) for execution after @delay jiffies using
1490 * port_task. There is one port_task per port and it's the
1491 * user(low level driver)'s responsibility to make sure that only
1492 * one task is active at any given time.
1494 * libata core layer takes care of synchronization between
1495 * port_task and EH. ata_port_queue_task() may be ignored for EH
1499 * Inherited from caller.
1501 void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1502 unsigned long delay)
1504 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1505 ap->port_task_data = data;
1507 /* may fail if ata_port_flush_task() in progress */
1508 queue_delayed_work(ata_wq, &ap->port_task, delay);
1512 * ata_port_flush_task - Flush port_task
1513 * @ap: The ata_port to flush port_task for
1515 * After this function completes, port_task is guranteed not to
1516 * be running or scheduled.
1519 * Kernel thread context (may sleep)
1521 void ata_port_flush_task(struct ata_port *ap)
1525 cancel_rearming_delayed_work(&ap->port_task);
1527 if (ata_msg_ctl(ap))
1528 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1531 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1533 struct completion *waiting = qc->private_data;
1539 * ata_exec_internal_sg - execute libata internal command
1540 * @dev: Device to which the command is sent
1541 * @tf: Taskfile registers for the command and the result
1542 * @cdb: CDB for packet command
1543 * @dma_dir: Data tranfer direction of the command
1544 * @sgl: sg list for the data buffer of the command
1545 * @n_elem: Number of sg entries
1546 * @timeout: Timeout in msecs (0 for default)
1548 * Executes libata internal command with timeout. @tf contains
1549 * command on entry and result on return. Timeout and error
1550 * conditions are reported via return value. No recovery action
1551 * is taken after a command times out. It's caller's duty to
1552 * clean up after timeout.
1555 * None. Should be called with kernel context, might sleep.
1558 * Zero on success, AC_ERR_* mask on failure
1560 unsigned ata_exec_internal_sg(struct ata_device *dev,
1561 struct ata_taskfile *tf, const u8 *cdb,
1562 int dma_dir, struct scatterlist *sgl,
1563 unsigned int n_elem, unsigned long timeout)
1565 struct ata_link *link = dev->link;
1566 struct ata_port *ap = link->ap;
1567 u8 command = tf->command;
1568 struct ata_queued_cmd *qc;
1569 unsigned int tag, preempted_tag;
1570 u32 preempted_sactive, preempted_qc_active;
1571 int preempted_nr_active_links;
1572 DECLARE_COMPLETION_ONSTACK(wait);
1573 unsigned long flags;
1574 unsigned int err_mask;
1577 spin_lock_irqsave(ap->lock, flags);
1579 /* no internal command while frozen */
1580 if (ap->pflags & ATA_PFLAG_FROZEN) {
1581 spin_unlock_irqrestore(ap->lock, flags);
1582 return AC_ERR_SYSTEM;
1585 /* initialize internal qc */
1587 /* XXX: Tag 0 is used for drivers with legacy EH as some
1588 * drivers choke if any other tag is given. This breaks
1589 * ata_tag_internal() test for those drivers. Don't use new
1590 * EH stuff without converting to it.
1592 if (ap->ops->error_handler)
1593 tag = ATA_TAG_INTERNAL;
1597 if (test_and_set_bit(tag, &ap->qc_allocated))
1599 qc = __ata_qc_from_tag(ap, tag);
1607 preempted_tag = link->active_tag;
1608 preempted_sactive = link->sactive;
1609 preempted_qc_active = ap->qc_active;
1610 preempted_nr_active_links = ap->nr_active_links;
1611 link->active_tag = ATA_TAG_POISON;
1614 ap->nr_active_links = 0;
1616 /* prepare & issue qc */
1619 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1620 qc->flags |= ATA_QCFLAG_RESULT_TF;
1621 qc->dma_dir = dma_dir;
1622 if (dma_dir != DMA_NONE) {
1623 unsigned int i, buflen = 0;
1624 struct scatterlist *sg;
1626 for_each_sg(sgl, sg, n_elem, i)
1627 buflen += sg->length;
1629 ata_sg_init(qc, sgl, n_elem);
1630 qc->nbytes = buflen;
1633 qc->private_data = &wait;
1634 qc->complete_fn = ata_qc_complete_internal;
1638 spin_unlock_irqrestore(ap->lock, flags);
1641 timeout = ata_probe_timeout * 1000 / HZ;
1643 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1645 ata_port_flush_task(ap);
1648 spin_lock_irqsave(ap->lock, flags);
1650 /* We're racing with irq here. If we lose, the
1651 * following test prevents us from completing the qc
1652 * twice. If we win, the port is frozen and will be
1653 * cleaned up by ->post_internal_cmd().
1655 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1656 qc->err_mask |= AC_ERR_TIMEOUT;
1658 if (ap->ops->error_handler)
1659 ata_port_freeze(ap);
1661 ata_qc_complete(qc);
1663 if (ata_msg_warn(ap))
1664 ata_dev_printk(dev, KERN_WARNING,
1665 "qc timeout (cmd 0x%x)\n", command);
1668 spin_unlock_irqrestore(ap->lock, flags);
1671 /* do post_internal_cmd */
1672 if (ap->ops->post_internal_cmd)
1673 ap->ops->post_internal_cmd(qc);
1675 /* perform minimal error analysis */
1676 if (qc->flags & ATA_QCFLAG_FAILED) {
1677 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1678 qc->err_mask |= AC_ERR_DEV;
1681 qc->err_mask |= AC_ERR_OTHER;
1683 if (qc->err_mask & ~AC_ERR_OTHER)
1684 qc->err_mask &= ~AC_ERR_OTHER;
1688 spin_lock_irqsave(ap->lock, flags);
1690 *tf = qc->result_tf;
1691 err_mask = qc->err_mask;
1694 link->active_tag = preempted_tag;
1695 link->sactive = preempted_sactive;
1696 ap->qc_active = preempted_qc_active;
1697 ap->nr_active_links = preempted_nr_active_links;
1699 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1700 * Until those drivers are fixed, we detect the condition
1701 * here, fail the command with AC_ERR_SYSTEM and reenable the
1704 * Note that this doesn't change any behavior as internal
1705 * command failure results in disabling the device in the
1706 * higher layer for LLDDs without new reset/EH callbacks.
1708 * Kill the following code as soon as those drivers are fixed.
1710 if (ap->flags & ATA_FLAG_DISABLED) {
1711 err_mask |= AC_ERR_SYSTEM;
1715 spin_unlock_irqrestore(ap->lock, flags);
1721 * ata_exec_internal - execute libata internal command
1722 * @dev: Device to which the command is sent
1723 * @tf: Taskfile registers for the command and the result
1724 * @cdb: CDB for packet command
1725 * @dma_dir: Data tranfer direction of the command
1726 * @buf: Data buffer of the command
1727 * @buflen: Length of data buffer
1728 * @timeout: Timeout in msecs (0 for default)
1730 * Wrapper around ata_exec_internal_sg() which takes simple
1731 * buffer instead of sg list.
1734 * None. Should be called with kernel context, might sleep.
1737 * Zero on success, AC_ERR_* mask on failure
1739 unsigned ata_exec_internal(struct ata_device *dev,
1740 struct ata_taskfile *tf, const u8 *cdb,
1741 int dma_dir, void *buf, unsigned int buflen,
1742 unsigned long timeout)
1744 struct scatterlist *psg = NULL, sg;
1745 unsigned int n_elem = 0;
1747 if (dma_dir != DMA_NONE) {
1749 sg_init_one(&sg, buf, buflen);
1754 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1759 * ata_do_simple_cmd - execute simple internal command
1760 * @dev: Device to which the command is sent
1761 * @cmd: Opcode to execute
1763 * Execute a 'simple' command, that only consists of the opcode
1764 * 'cmd' itself, without filling any other registers
1767 * Kernel thread context (may sleep).
1770 * Zero on success, AC_ERR_* mask on failure
1772 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1774 struct ata_taskfile tf;
1776 ata_tf_init(dev, &tf);
1779 tf.flags |= ATA_TFLAG_DEVICE;
1780 tf.protocol = ATA_PROT_NODATA;
1782 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1786 * ata_pio_need_iordy - check if iordy needed
1789 * Check if the current speed of the device requires IORDY. Used
1790 * by various controllers for chip configuration.
1793 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1795 /* Controller doesn't support IORDY. Probably a pointless check
1796 as the caller should know this */
1797 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1799 /* PIO3 and higher it is mandatory */
1800 if (adev->pio_mode > XFER_PIO_2)
1802 /* We turn it on when possible */
1803 if (ata_id_has_iordy(adev->id))
1809 * ata_pio_mask_no_iordy - Return the non IORDY mask
1812 * Compute the highest mode possible if we are not using iordy. Return
1813 * -1 if no iordy mode is available.
1816 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1818 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1819 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1820 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1821 /* Is the speed faster than the drive allows non IORDY ? */
1823 /* This is cycle times not frequency - watch the logic! */
1824 if (pio > 240) /* PIO2 is 240nS per cycle */
1825 return 3 << ATA_SHIFT_PIO;
1826 return 7 << ATA_SHIFT_PIO;
1829 return 3 << ATA_SHIFT_PIO;
1833 * ata_dev_read_id - Read ID data from the specified device
1834 * @dev: target device
1835 * @p_class: pointer to class of the target device (may be changed)
1836 * @flags: ATA_READID_* flags
1837 * @id: buffer to read IDENTIFY data into
1839 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1840 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1841 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1842 * for pre-ATA4 drives.
1844 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1845 * now we abort if we hit that case.
1848 * Kernel thread context (may sleep)
1851 * 0 on success, -errno otherwise.
1853 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1854 unsigned int flags, u16 *id)
1856 struct ata_port *ap = dev->link->ap;
1857 unsigned int class = *p_class;
1858 struct ata_taskfile tf;
1859 unsigned int err_mask = 0;
1861 int may_fallback = 1, tried_spinup = 0;
1864 if (ata_msg_ctl(ap))
1865 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1867 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1869 ata_tf_init(dev, &tf);
1873 tf.command = ATA_CMD_ID_ATA;
1876 tf.command = ATA_CMD_ID_ATAPI;
1880 reason = "unsupported class";
1884 tf.protocol = ATA_PROT_PIO;
1886 /* Some devices choke if TF registers contain garbage. Make
1887 * sure those are properly initialized.
1889 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1891 /* Device presence detection is unreliable on some
1892 * controllers. Always poll IDENTIFY if available.
1894 tf.flags |= ATA_TFLAG_POLLING;
1896 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1897 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1899 if (err_mask & AC_ERR_NODEV_HINT) {
1900 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1901 ap->print_id, dev->devno);
1905 /* Device or controller might have reported the wrong
1906 * device class. Give a shot at the other IDENTIFY if
1907 * the current one is aborted by the device.
1910 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1913 if (class == ATA_DEV_ATA)
1914 class = ATA_DEV_ATAPI;
1916 class = ATA_DEV_ATA;
1921 reason = "I/O error";
1925 /* Falling back doesn't make sense if ID data was read
1926 * successfully at least once.
1930 swap_buf_le16(id, ATA_ID_WORDS);
1934 reason = "device reports invalid type";
1936 if (class == ATA_DEV_ATA) {
1937 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1940 if (ata_id_is_ata(id))
1944 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1947 * Drive powered-up in standby mode, and requires a specific
1948 * SET_FEATURES spin-up subcommand before it will accept
1949 * anything other than the original IDENTIFY command.
1951 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1952 if (err_mask && id[2] != 0x738c) {
1954 reason = "SPINUP failed";
1958 * If the drive initially returned incomplete IDENTIFY info,
1959 * we now must reissue the IDENTIFY command.
1961 if (id[2] == 0x37c8)
1965 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1967 * The exact sequence expected by certain pre-ATA4 drives is:
1969 * IDENTIFY (optional in early ATA)
1970 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1972 * Some drives were very specific about that exact sequence.
1974 * Note that ATA4 says lba is mandatory so the second check
1975 * shoud never trigger.
1977 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1978 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1981 reason = "INIT_DEV_PARAMS failed";
1985 /* current CHS translation info (id[53-58]) might be
1986 * changed. reread the identify device info.
1988 flags &= ~ATA_READID_POSTRESET;
1998 if (ata_msg_warn(ap))
1999 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2000 "(%s, err_mask=0x%x)\n", reason, err_mask);
2004 static inline u8 ata_dev_knobble(struct ata_device *dev)
2006 struct ata_port *ap = dev->link->ap;
2007 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2010 static void ata_dev_config_ncq(struct ata_device *dev,
2011 char *desc, size_t desc_sz)
2013 struct ata_port *ap = dev->link->ap;
2014 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2016 if (!ata_id_has_ncq(dev->id)) {
2020 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2021 snprintf(desc, desc_sz, "NCQ (not used)");
2024 if (ap->flags & ATA_FLAG_NCQ) {
2025 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2026 dev->flags |= ATA_DFLAG_NCQ;
2029 if (hdepth >= ddepth)
2030 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2032 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2036 * ata_dev_configure - Configure the specified ATA/ATAPI device
2037 * @dev: Target device to configure
2039 * Configure @dev according to @dev->id. Generic and low-level
2040 * driver specific fixups are also applied.
2043 * Kernel thread context (may sleep)
2046 * 0 on success, -errno otherwise
2048 int ata_dev_configure(struct ata_device *dev)
2050 struct ata_port *ap = dev->link->ap;
2051 struct ata_eh_context *ehc = &dev->link->eh_context;
2052 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2053 const u16 *id = dev->id;
2054 unsigned long xfer_mask;
2055 char revbuf[7]; /* XYZ-99\0 */
2056 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2057 char modelbuf[ATA_ID_PROD_LEN+1];
2060 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2061 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2066 if (ata_msg_probe(ap))
2067 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
2070 dev->horkage |= ata_dev_blacklisted(dev);
2072 /* let ACPI work its magic */
2073 rc = ata_acpi_on_devcfg(dev);
2077 /* massage HPA, do it early as it might change IDENTIFY data */
2078 rc = ata_hpa_resize(dev);
2082 /* print device capabilities */
2083 if (ata_msg_probe(ap))
2084 ata_dev_printk(dev, KERN_DEBUG,
2085 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2086 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2088 id[49], id[82], id[83], id[84],
2089 id[85], id[86], id[87], id[88]);
2091 /* initialize to-be-configured parameters */
2092 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2093 dev->max_sectors = 0;
2101 * common ATA, ATAPI feature tests
2104 /* find max transfer mode; for printk only */
2105 xfer_mask = ata_id_xfermask(id);
2107 if (ata_msg_probe(ap))
2110 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2111 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2114 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2117 /* ATA-specific feature tests */
2118 if (dev->class == ATA_DEV_ATA) {
2119 if (ata_id_is_cfa(id)) {
2120 if (id[162] & 1) /* CPRM may make this media unusable */
2121 ata_dev_printk(dev, KERN_WARNING,
2122 "supports DRM functions and may "
2123 "not be fully accessable.\n");
2124 snprintf(revbuf, 7, "CFA");
2126 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2127 /* Warn the user if the device has TPM extensions */
2128 if (ata_id_has_tpm(id))
2129 ata_dev_printk(dev, KERN_WARNING,
2130 "supports DRM functions and may "
2131 "not be fully accessable.\n");
2134 dev->n_sectors = ata_id_n_sectors(id);
2136 if (dev->id[59] & 0x100)
2137 dev->multi_count = dev->id[59] & 0xff;
2139 if (ata_id_has_lba(id)) {
2140 const char *lba_desc;
2144 dev->flags |= ATA_DFLAG_LBA;
2145 if (ata_id_has_lba48(id)) {
2146 dev->flags |= ATA_DFLAG_LBA48;
2149 if (dev->n_sectors >= (1UL << 28) &&
2150 ata_id_has_flush_ext(id))
2151 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2155 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2157 /* print device info to dmesg */
2158 if (ata_msg_drv(ap) && print_info) {
2159 ata_dev_printk(dev, KERN_INFO,
2160 "%s: %s, %s, max %s\n",
2161 revbuf, modelbuf, fwrevbuf,
2162 ata_mode_string(xfer_mask));
2163 ata_dev_printk(dev, KERN_INFO,
2164 "%Lu sectors, multi %u: %s %s\n",
2165 (unsigned long long)dev->n_sectors,
2166 dev->multi_count, lba_desc, ncq_desc);
2171 /* Default translation */
2172 dev->cylinders = id[1];
2174 dev->sectors = id[6];
2176 if (ata_id_current_chs_valid(id)) {
2177 /* Current CHS translation is valid. */
2178 dev->cylinders = id[54];
2179 dev->heads = id[55];
2180 dev->sectors = id[56];
2183 /* print device info to dmesg */
2184 if (ata_msg_drv(ap) && print_info) {
2185 ata_dev_printk(dev, KERN_INFO,
2186 "%s: %s, %s, max %s\n",
2187 revbuf, modelbuf, fwrevbuf,
2188 ata_mode_string(xfer_mask));
2189 ata_dev_printk(dev, KERN_INFO,
2190 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2191 (unsigned long long)dev->n_sectors,
2192 dev->multi_count, dev->cylinders,
2193 dev->heads, dev->sectors);
2200 /* ATAPI-specific feature tests */
2201 else if (dev->class == ATA_DEV_ATAPI) {
2202 const char *cdb_intr_string = "";
2203 const char *atapi_an_string = "";
2206 rc = atapi_cdb_len(id);
2207 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2208 if (ata_msg_warn(ap))
2209 ata_dev_printk(dev, KERN_WARNING,
2210 "unsupported CDB len\n");
2214 dev->cdb_len = (unsigned int) rc;
2216 /* Enable ATAPI AN if both the host and device have
2217 * the support. If PMP is attached, SNTF is required
2218 * to enable ATAPI AN to discern between PHY status
2219 * changed notifications and ATAPI ANs.
2221 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2222 (!ap->nr_pmp_links ||
2223 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2224 unsigned int err_mask;
2226 /* issue SET feature command to turn this on */
2227 err_mask = ata_dev_set_feature(dev,
2228 SETFEATURES_SATA_ENABLE, SATA_AN);
2230 ata_dev_printk(dev, KERN_ERR,
2231 "failed to enable ATAPI AN "
2232 "(err_mask=0x%x)\n", err_mask);
2234 dev->flags |= ATA_DFLAG_AN;
2235 atapi_an_string = ", ATAPI AN";
2239 if (ata_id_cdb_intr(dev->id)) {
2240 dev->flags |= ATA_DFLAG_CDB_INTR;
2241 cdb_intr_string = ", CDB intr";
2244 /* print device info to dmesg */
2245 if (ata_msg_drv(ap) && print_info)
2246 ata_dev_printk(dev, KERN_INFO,
2247 "ATAPI: %s, %s, max %s%s%s\n",
2249 ata_mode_string(xfer_mask),
2250 cdb_intr_string, atapi_an_string);
2253 /* determine max_sectors */
2254 dev->max_sectors = ATA_MAX_SECTORS;
2255 if (dev->flags & ATA_DFLAG_LBA48)
2256 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2258 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2259 if (ata_id_has_hipm(dev->id))
2260 dev->flags |= ATA_DFLAG_HIPM;
2261 if (ata_id_has_dipm(dev->id))
2262 dev->flags |= ATA_DFLAG_DIPM;
2265 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2266 /* Let the user know. We don't want to disallow opens for
2267 rescue purposes, or in case the vendor is just a blithering
2270 ata_dev_printk(dev, KERN_WARNING,
2271 "Drive reports diagnostics failure. This may indicate a drive\n");
2272 ata_dev_printk(dev, KERN_WARNING,
2273 "fault or invalid emulation. Contact drive vendor for information.\n");
2277 /* limit bridge transfers to udma5, 200 sectors */
2278 if (ata_dev_knobble(dev)) {
2279 if (ata_msg_drv(ap) && print_info)
2280 ata_dev_printk(dev, KERN_INFO,
2281 "applying bridge limits\n");
2282 dev->udma_mask &= ATA_UDMA5;
2283 dev->max_sectors = ATA_MAX_SECTORS;
2286 if ((dev->class == ATA_DEV_ATAPI) &&
2287 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2288 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2289 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2292 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2293 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2296 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2297 dev->horkage |= ATA_HORKAGE_IPM;
2299 /* reset link pm_policy for this port to no pm */
2300 ap->pm_policy = MAX_PERFORMANCE;
2303 if (ap->ops->dev_config)
2304 ap->ops->dev_config(dev);
2306 if (ata_msg_probe(ap))
2307 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2308 __FUNCTION__, ata_chk_status(ap));
2312 if (ata_msg_probe(ap))
2313 ata_dev_printk(dev, KERN_DEBUG,
2314 "%s: EXIT, err\n", __FUNCTION__);
2319 * ata_cable_40wire - return 40 wire cable type
2322 * Helper method for drivers which want to hardwire 40 wire cable
2326 int ata_cable_40wire(struct ata_port *ap)
2328 return ATA_CBL_PATA40;
2332 * ata_cable_80wire - return 80 wire cable type
2335 * Helper method for drivers which want to hardwire 80 wire cable
2339 int ata_cable_80wire(struct ata_port *ap)
2341 return ATA_CBL_PATA80;
2345 * ata_cable_unknown - return unknown PATA cable.
2348 * Helper method for drivers which have no PATA cable detection.
2351 int ata_cable_unknown(struct ata_port *ap)
2353 return ATA_CBL_PATA_UNK;
2357 * ata_cable_sata - return SATA cable type
2360 * Helper method for drivers which have SATA cables
2363 int ata_cable_sata(struct ata_port *ap)
2365 return ATA_CBL_SATA;
2369 * ata_bus_probe - Reset and probe ATA bus
2372 * Master ATA bus probing function. Initiates a hardware-dependent
2373 * bus reset, then attempts to identify any devices found on
2377 * PCI/etc. bus probe sem.
2380 * Zero on success, negative errno otherwise.
2383 int ata_bus_probe(struct ata_port *ap)
2385 unsigned int classes[ATA_MAX_DEVICES];
2386 int tries[ATA_MAX_DEVICES];
2388 struct ata_device *dev;
2392 ata_link_for_each_dev(dev, &ap->link)
2393 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2396 ata_link_for_each_dev(dev, &ap->link) {
2397 /* If we issue an SRST then an ATA drive (not ATAPI)
2398 * may change configuration and be in PIO0 timing. If
2399 * we do a hard reset (or are coming from power on)
2400 * this is true for ATA or ATAPI. Until we've set a
2401 * suitable controller mode we should not touch the
2402 * bus as we may be talking too fast.
2404 dev->pio_mode = XFER_PIO_0;
2406 /* If the controller has a pio mode setup function
2407 * then use it to set the chipset to rights. Don't
2408 * touch the DMA setup as that will be dealt with when
2409 * configuring devices.
2411 if (ap->ops->set_piomode)
2412 ap->ops->set_piomode(ap, dev);
2415 /* reset and determine device classes */
2416 ap->ops->phy_reset(ap);
2418 ata_link_for_each_dev(dev, &ap->link) {
2419 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2420 dev->class != ATA_DEV_UNKNOWN)
2421 classes[dev->devno] = dev->class;
2423 classes[dev->devno] = ATA_DEV_NONE;
2425 dev->class = ATA_DEV_UNKNOWN;
2430 /* read IDENTIFY page and configure devices. We have to do the identify
2431 specific sequence bass-ackwards so that PDIAG- is released by
2434 ata_link_for_each_dev(dev, &ap->link) {
2435 if (tries[dev->devno])
2436 dev->class = classes[dev->devno];
2438 if (!ata_dev_enabled(dev))
2441 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2447 /* Now ask for the cable type as PDIAG- should have been released */
2448 if (ap->ops->cable_detect)
2449 ap->cbl = ap->ops->cable_detect(ap);
2451 /* We may have SATA bridge glue hiding here irrespective of the
2452 reported cable types and sensed types */
2453 ata_link_for_each_dev(dev, &ap->link) {
2454 if (!ata_dev_enabled(dev))
2456 /* SATA drives indicate we have a bridge. We don't know which
2457 end of the link the bridge is which is a problem */
2458 if (ata_id_is_sata(dev->id))
2459 ap->cbl = ATA_CBL_SATA;
2462 /* After the identify sequence we can now set up the devices. We do
2463 this in the normal order so that the user doesn't get confused */
2465 ata_link_for_each_dev(dev, &ap->link) {
2466 if (!ata_dev_enabled(dev))
2469 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2470 rc = ata_dev_configure(dev);
2471 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2476 /* configure transfer mode */
2477 rc = ata_set_mode(&ap->link, &dev);
2481 ata_link_for_each_dev(dev, &ap->link)
2482 if (ata_dev_enabled(dev))
2485 /* no device present, disable port */
2486 ata_port_disable(ap);
2490 tries[dev->devno]--;
2494 /* eeek, something went very wrong, give up */
2495 tries[dev->devno] = 0;
2499 /* give it just one more chance */
2500 tries[dev->devno] = min(tries[dev->devno], 1);
2502 if (tries[dev->devno] == 1) {
2503 /* This is the last chance, better to slow
2504 * down than lose it.
2506 sata_down_spd_limit(&ap->link);
2507 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2511 if (!tries[dev->devno])
2512 ata_dev_disable(dev);
2518 * ata_port_probe - Mark port as enabled
2519 * @ap: Port for which we indicate enablement
2521 * Modify @ap data structure such that the system
2522 * thinks that the entire port is enabled.
2524 * LOCKING: host lock, or some other form of
2528 void ata_port_probe(struct ata_port *ap)
2530 ap->flags &= ~ATA_FLAG_DISABLED;
2534 * sata_print_link_status - Print SATA link status
2535 * @link: SATA link to printk link status about
2537 * This function prints link speed and status of a SATA link.
2542 void sata_print_link_status(struct ata_link *link)
2544 u32 sstatus, scontrol, tmp;
2546 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2548 sata_scr_read(link, SCR_CONTROL, &scontrol);
2550 if (ata_link_online(link)) {
2551 tmp = (sstatus >> 4) & 0xf;
2552 ata_link_printk(link, KERN_INFO,
2553 "SATA link up %s (SStatus %X SControl %X)\n",
2554 sata_spd_string(tmp), sstatus, scontrol);
2556 ata_link_printk(link, KERN_INFO,
2557 "SATA link down (SStatus %X SControl %X)\n",
2563 * ata_dev_pair - return other device on cable
2566 * Obtain the other device on the same cable, or if none is
2567 * present NULL is returned
2570 struct ata_device *ata_dev_pair(struct ata_device *adev)
2572 struct ata_link *link = adev->link;
2573 struct ata_device *pair = &link->device[1 - adev->devno];
2574 if (!ata_dev_enabled(pair))
2580 * ata_port_disable - Disable port.
2581 * @ap: Port to be disabled.
2583 * Modify @ap data structure such that the system
2584 * thinks that the entire port is disabled, and should
2585 * never attempt to probe or communicate with devices
2588 * LOCKING: host lock, or some other form of
2592 void ata_port_disable(struct ata_port *ap)
2594 ap->link.device[0].class = ATA_DEV_NONE;
2595 ap->link.device[1].class = ATA_DEV_NONE;
2596 ap->flags |= ATA_FLAG_DISABLED;
2600 * sata_down_spd_limit - adjust SATA spd limit downward
2601 * @link: Link to adjust SATA spd limit for
2603 * Adjust SATA spd limit of @link downward. Note that this
2604 * function only adjusts the limit. The change must be applied
2605 * using sata_set_spd().
2608 * Inherited from caller.
2611 * 0 on success, negative errno on failure
2613 int sata_down_spd_limit(struct ata_link *link)
2615 u32 sstatus, spd, mask;
2618 if (!sata_scr_valid(link))
2621 /* If SCR can be read, use it to determine the current SPD.
2622 * If not, use cached value in link->sata_spd.
2624 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2626 spd = (sstatus >> 4) & 0xf;
2628 spd = link->sata_spd;
2630 mask = link->sata_spd_limit;
2634 /* unconditionally mask off the highest bit */
2635 highbit = fls(mask) - 1;
2636 mask &= ~(1 << highbit);
2638 /* Mask off all speeds higher than or equal to the current
2639 * one. Force 1.5Gbps if current SPD is not available.
2642 mask &= (1 << (spd - 1)) - 1;
2646 /* were we already at the bottom? */
2650 link->sata_spd_limit = mask;
2652 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2653 sata_spd_string(fls(mask)));
2658 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2660 struct ata_link *host_link = &link->ap->link;
2661 u32 limit, target, spd;
2663 limit = link->sata_spd_limit;
2665 /* Don't configure downstream link faster than upstream link.
2666 * It doesn't speed up anything and some PMPs choke on such
2669 if (!ata_is_host_link(link) && host_link->sata_spd)
2670 limit &= (1 << host_link->sata_spd) - 1;
2672 if (limit == UINT_MAX)
2675 target = fls(limit);
2677 spd = (*scontrol >> 4) & 0xf;
2678 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2680 return spd != target;
2684 * sata_set_spd_needed - is SATA spd configuration needed
2685 * @link: Link in question
2687 * Test whether the spd limit in SControl matches
2688 * @link->sata_spd_limit. This function is used to determine
2689 * whether hardreset is necessary to apply SATA spd
2693 * Inherited from caller.
2696 * 1 if SATA spd configuration is needed, 0 otherwise.
2698 int sata_set_spd_needed(struct ata_link *link)
2702 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2705 return __sata_set_spd_needed(link, &scontrol);
2709 * sata_set_spd - set SATA spd according to spd limit
2710 * @link: Link to set SATA spd for
2712 * Set SATA spd of @link according to sata_spd_limit.
2715 * Inherited from caller.
2718 * 0 if spd doesn't need to be changed, 1 if spd has been
2719 * changed. Negative errno if SCR registers are inaccessible.
2721 int sata_set_spd(struct ata_link *link)
2726 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2729 if (!__sata_set_spd_needed(link, &scontrol))
2732 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2739 * This mode timing computation functionality is ported over from
2740 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2743 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2744 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2745 * for UDMA6, which is currently supported only by Maxtor drives.
2747 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2750 static const struct ata_timing ata_timing[] = {
2751 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2752 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2753 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2754 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2755 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2756 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2757 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2758 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2760 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2761 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2762 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2764 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2765 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2766 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2767 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2768 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2770 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2771 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2772 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2773 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2774 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2775 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2776 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2777 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2782 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2783 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2785 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2787 q->setup = EZ(t->setup * 1000, T);
2788 q->act8b = EZ(t->act8b * 1000, T);
2789 q->rec8b = EZ(t->rec8b * 1000, T);
2790 q->cyc8b = EZ(t->cyc8b * 1000, T);
2791 q->active = EZ(t->active * 1000, T);
2792 q->recover = EZ(t->recover * 1000, T);
2793 q->cycle = EZ(t->cycle * 1000, T);
2794 q->udma = EZ(t->udma * 1000, UT);
2797 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2798 struct ata_timing *m, unsigned int what)
2800 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2801 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2802 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2803 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2804 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2805 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2806 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2807 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2810 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2812 const struct ata_timing *t = ata_timing;
2814 while (xfer_mode > t->mode)
2817 if (xfer_mode == t->mode)
2822 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2823 struct ata_timing *t, int T, int UT)
2825 const struct ata_timing *s;
2826 struct ata_timing p;
2832 if (!(s = ata_timing_find_mode(speed)))
2835 memcpy(t, s, sizeof(*s));
2838 * If the drive is an EIDE drive, it can tell us it needs extended
2839 * PIO/MW_DMA cycle timing.
2842 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2843 memset(&p, 0, sizeof(p));
2844 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2845 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2846 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2847 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2848 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2850 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2854 * Convert the timing to bus clock counts.
2857 ata_timing_quantize(t, t, T, UT);
2860 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2861 * S.M.A.R.T * and some other commands. We have to ensure that the
2862 * DMA cycle timing is slower/equal than the fastest PIO timing.
2865 if (speed > XFER_PIO_6) {
2866 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2867 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2871 * Lengthen active & recovery time so that cycle time is correct.
2874 if (t->act8b + t->rec8b < t->cyc8b) {
2875 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2876 t->rec8b = t->cyc8b - t->act8b;
2879 if (t->active + t->recover < t->cycle) {
2880 t->active += (t->cycle - (t->active + t->recover)) / 2;
2881 t->recover = t->cycle - t->active;
2884 /* In a few cases quantisation may produce enough errors to
2885 leave t->cycle too low for the sum of active and recovery
2886 if so we must correct this */
2887 if (t->active + t->recover > t->cycle)
2888 t->cycle = t->active + t->recover;
2894 * ata_down_xfermask_limit - adjust dev xfer masks downward
2895 * @dev: Device to adjust xfer masks
2896 * @sel: ATA_DNXFER_* selector
2898 * Adjust xfer masks of @dev downward. Note that this function
2899 * does not apply the change. Invoking ata_set_mode() afterwards
2900 * will apply the limit.
2903 * Inherited from caller.
2906 * 0 on success, negative errno on failure
2908 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2911 unsigned long orig_mask, xfer_mask;
2912 unsigned long pio_mask, mwdma_mask, udma_mask;
2915 quiet = !!(sel & ATA_DNXFER_QUIET);
2916 sel &= ~ATA_DNXFER_QUIET;
2918 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2921 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2924 case ATA_DNXFER_PIO:
2925 highbit = fls(pio_mask) - 1;
2926 pio_mask &= ~(1 << highbit);
2929 case ATA_DNXFER_DMA:
2931 highbit = fls(udma_mask) - 1;
2932 udma_mask &= ~(1 << highbit);
2935 } else if (mwdma_mask) {
2936 highbit = fls(mwdma_mask) - 1;
2937 mwdma_mask &= ~(1 << highbit);
2943 case ATA_DNXFER_40C:
2944 udma_mask &= ATA_UDMA_MASK_40C;
2947 case ATA_DNXFER_FORCE_PIO0:
2949 case ATA_DNXFER_FORCE_PIO:
2958 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2960 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2964 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2965 snprintf(buf, sizeof(buf), "%s:%s",
2966 ata_mode_string(xfer_mask),
2967 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2969 snprintf(buf, sizeof(buf), "%s",
2970 ata_mode_string(xfer_mask));
2972 ata_dev_printk(dev, KERN_WARNING,
2973 "limiting speed to %s\n", buf);
2976 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2982 static int ata_dev_set_mode(struct ata_device *dev)
2984 struct ata_eh_context *ehc = &dev->link->eh_context;
2985 unsigned int err_mask;
2988 dev->flags &= ~ATA_DFLAG_PIO;
2989 if (dev->xfer_shift == ATA_SHIFT_PIO)
2990 dev->flags |= ATA_DFLAG_PIO;
2992 err_mask = ata_dev_set_xfermode(dev);
2994 /* Old CFA may refuse this command, which is just fine */
2995 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2996 err_mask &= ~AC_ERR_DEV;
2998 /* Some very old devices and some bad newer ones fail any kind of
2999 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3000 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3001 dev->pio_mode <= XFER_PIO_2)
3002 err_mask &= ~AC_ERR_DEV;
3004 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3005 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3006 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3007 dev->dma_mode == XFER_MW_DMA_0 &&
3008 (dev->id[63] >> 8) & 1)
3009 err_mask &= ~AC_ERR_DEV;
3012 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3013 "(err_mask=0x%x)\n", err_mask);
3017 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3018 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3019 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3023 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3024 dev->xfer_shift, (int)dev->xfer_mode);
3026 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
3027 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
3032 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3033 * @link: link on which timings will be programmed
3034 * @r_failed_dev: out paramter for failed device
3036 * Standard implementation of the function used to tune and set
3037 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3038 * ata_dev_set_mode() fails, pointer to the failing device is
3039 * returned in @r_failed_dev.
3042 * PCI/etc. bus probe sem.
3045 * 0 on success, negative errno otherwise
3048 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3050 struct ata_port *ap = link->ap;
3051 struct ata_device *dev;
3052 int rc = 0, used_dma = 0, found = 0;
3054 /* step 1: calculate xfer_mask */
3055 ata_link_for_each_dev(dev, link) {
3056 unsigned long pio_mask, dma_mask;
3057 unsigned int mode_mask;
3059 if (!ata_dev_enabled(dev))
3062 mode_mask = ATA_DMA_MASK_ATA;
3063 if (dev->class == ATA_DEV_ATAPI)
3064 mode_mask = ATA_DMA_MASK_ATAPI;
3065 else if (ata_id_is_cfa(dev->id))
3066 mode_mask = ATA_DMA_MASK_CFA;
3068 ata_dev_xfermask(dev);
3070 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3071 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3073 if (libata_dma_mask & mode_mask)
3074 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3078 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3079 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3082 if (dev->dma_mode != 0xff)
3088 /* step 2: always set host PIO timings */
3089 ata_link_for_each_dev(dev, link) {
3090 if (!ata_dev_enabled(dev))
3093 if (dev->pio_mode == 0xff) {
3094 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3099 dev->xfer_mode = dev->pio_mode;
3100 dev->xfer_shift = ATA_SHIFT_PIO;
3101 if (ap->ops->set_piomode)
3102 ap->ops->set_piomode(ap, dev);
3105 /* step 3: set host DMA timings */
3106 ata_link_for_each_dev(dev, link) {
3107 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
3110 dev->xfer_mode = dev->dma_mode;
3111 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3112 if (ap->ops->set_dmamode)
3113 ap->ops->set_dmamode(ap, dev);
3116 /* step 4: update devices' xfer mode */
3117 ata_link_for_each_dev(dev, link) {
3118 /* don't update suspended devices' xfer mode */
3119 if (!ata_dev_enabled(dev))
3122 rc = ata_dev_set_mode(dev);
3127 /* Record simplex status. If we selected DMA then the other
3128 * host channels are not permitted to do so.
3130 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3131 ap->host->simplex_claimed = ap;
3135 *r_failed_dev = dev;
3140 * ata_tf_to_host - issue ATA taskfile to host controller
3141 * @ap: port to which command is being issued
3142 * @tf: ATA taskfile register set
3144 * Issues ATA taskfile register set to ATA host controller,
3145 * with proper synchronization with interrupt handler and
3149 * spin_lock_irqsave(host lock)
3152 static inline void ata_tf_to_host(struct ata_port *ap,
3153 const struct ata_taskfile *tf)
3155 ap->ops->tf_load(ap, tf);
3156 ap->ops->exec_command(ap, tf);
3160 * ata_busy_sleep - sleep until BSY clears, or timeout
3161 * @ap: port containing status register to be polled
3162 * @tmout_pat: impatience timeout
3163 * @tmout: overall timeout
3165 * Sleep until ATA Status register bit BSY clears,
3166 * or a timeout occurs.
3169 * Kernel thread context (may sleep).
3172 * 0 on success, -errno otherwise.
3174 int ata_busy_sleep(struct ata_port *ap,
3175 unsigned long tmout_pat, unsigned long tmout)
3177 unsigned long timer_start, timeout;
3180 status = ata_busy_wait(ap, ATA_BUSY, 300);
3181 timer_start = jiffies;
3182 timeout = timer_start + tmout_pat;
3183 while (status != 0xff && (status & ATA_BUSY) &&
3184 time_before(jiffies, timeout)) {
3186 status = ata_busy_wait(ap, ATA_BUSY, 3);
3189 if (status != 0xff && (status & ATA_BUSY))
3190 ata_port_printk(ap, KERN_WARNING,
3191 "port is slow to respond, please be patient "
3192 "(Status 0x%x)\n", status);
3194 timeout = timer_start + tmout;
3195 while (status != 0xff && (status & ATA_BUSY) &&
3196 time_before(jiffies, timeout)) {
3198 status = ata_chk_status(ap);
3204 if (status & ATA_BUSY) {
3205 ata_port_printk(ap, KERN_ERR, "port failed to respond "
3206 "(%lu secs, Status 0x%x)\n",
3207 tmout / HZ, status);
3215 * ata_wait_after_reset - wait before checking status after reset
3216 * @ap: port containing status register to be polled
3217 * @deadline: deadline jiffies for the operation
3219 * After reset, we need to pause a while before reading status.
3220 * Also, certain combination of controller and device report 0xff
3221 * for some duration (e.g. until SATA PHY is up and running)
3222 * which is interpreted as empty port in ATA world. This
3223 * function also waits for such devices to get out of 0xff
3227 * Kernel thread context (may sleep).
3229 void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3231 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3233 if (time_before(until, deadline))
3236 /* Spec mandates ">= 2ms" before checking status. We wait
3237 * 150ms, because that was the magic delay used for ATAPI
3238 * devices in Hale Landis's ATADRVR, for the period of time
3239 * between when the ATA command register is written, and then
3240 * status is checked. Because waiting for "a while" before
3241 * checking status is fine, post SRST, we perform this magic
3242 * delay here as well.
3244 * Old drivers/ide uses the 2mS rule and then waits for ready.
3248 /* Wait for 0xff to clear. Some SATA devices take a long time
3249 * to clear 0xff after reset. For example, HHD424020F7SV00
3250 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3253 * Note that some PATA controllers (pata_ali) explode if
3254 * status register is read more than once when there's no
3257 if (ap->flags & ATA_FLAG_SATA) {
3259 u8 status = ata_chk_status(ap);
3261 if (status != 0xff || time_after(jiffies, deadline))
3270 * ata_wait_ready - sleep until BSY clears, or timeout
3271 * @ap: port containing status register to be polled
3272 * @deadline: deadline jiffies for the operation
3274 * Sleep until ATA Status register bit BSY clears, or timeout
3278 * Kernel thread context (may sleep).
3281 * 0 on success, -errno otherwise.
3283 int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3285 unsigned long start = jiffies;
3289 u8 status = ata_chk_status(ap);
3290 unsigned long now = jiffies;
3292 if (!(status & ATA_BUSY))
3294 if (!ata_link_online(&ap->link) && status == 0xff)
3296 if (time_after(now, deadline))
3299 if (!warned && time_after(now, start + 5 * HZ) &&
3300 (deadline - now > 3 * HZ)) {
3301 ata_port_printk(ap, KERN_WARNING,
3302 "port is slow to respond, please be patient "
3303 "(Status 0x%x)\n", status);
3311 static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3312 unsigned long deadline)
3314 struct ata_ioports *ioaddr = &ap->ioaddr;
3315 unsigned int dev0 = devmask & (1 << 0);
3316 unsigned int dev1 = devmask & (1 << 1);
3319 /* if device 0 was found in ata_devchk, wait for its
3323 rc = ata_wait_ready(ap, deadline);
3331 /* if device 1 was found in ata_devchk, wait for register
3332 * access briefly, then wait for BSY to clear.
3337 ap->ops->dev_select(ap, 1);
3339 /* Wait for register access. Some ATAPI devices fail
3340 * to set nsect/lbal after reset, so don't waste too
3341 * much time on it. We're gonna wait for !BSY anyway.
3343 for (i = 0; i < 2; i++) {
3346 nsect = ioread8(ioaddr->nsect_addr);
3347 lbal = ioread8(ioaddr->lbal_addr);
3348 if ((nsect == 1) && (lbal == 1))
3350 msleep(50); /* give drive a breather */
3353 rc = ata_wait_ready(ap, deadline);
3361 /* is all this really necessary? */
3362 ap->ops->dev_select(ap, 0);
3364 ap->ops->dev_select(ap, 1);
3366 ap->ops->dev_select(ap, 0);
3371 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3372 unsigned long deadline)
3374 struct ata_ioports *ioaddr = &ap->ioaddr;
3376 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3378 /* software reset. causes dev0 to be selected */
3379 iowrite8(ap->ctl, ioaddr->ctl_addr);
3380 udelay(20); /* FIXME: flush */
3381 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3382 udelay(20); /* FIXME: flush */
3383 iowrite8(ap->ctl, ioaddr->ctl_addr);
3385 /* wait a while before checking status */
3386 ata_wait_after_reset(ap, deadline);
3388 /* Before we perform post reset processing we want to see if
3389 * the bus shows 0xFF because the odd clown forgets the D7
3390 * pulldown resistor.
3392 if (ata_chk_status(ap) == 0xFF)
3395 return ata_bus_post_reset(ap, devmask, deadline);
3399 * ata_bus_reset - reset host port and associated ATA channel
3400 * @ap: port to reset
3402 * This is typically the first time we actually start issuing
3403 * commands to the ATA channel. We wait for BSY to clear, then
3404 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3405 * result. Determine what devices, if any, are on the channel
3406 * by looking at the device 0/1 error register. Look at the signature
3407 * stored in each device's taskfile registers, to determine if
3408 * the device is ATA or ATAPI.
3411 * PCI/etc. bus probe sem.
3412 * Obtains host lock.
3415 * Sets ATA_FLAG_DISABLED if bus reset fails.
3418 void ata_bus_reset(struct ata_port *ap)
3420 struct ata_device *device = ap->link.device;
3421 struct ata_ioports *ioaddr = &ap->ioaddr;
3422 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3424 unsigned int dev0, dev1 = 0, devmask = 0;
3427 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3429 /* determine if device 0/1 are present */
3430 if (ap->flags & ATA_FLAG_SATA_RESET)
3433 dev0 = ata_devchk(ap, 0);
3435 dev1 = ata_devchk(ap, 1);
3439 devmask |= (1 << 0);
3441 devmask |= (1 << 1);
3443 /* select device 0 again */
3444 ap->ops->dev_select(ap, 0);
3446 /* issue bus reset */
3447 if (ap->flags & ATA_FLAG_SRST) {
3448 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3449 if (rc && rc != -ENODEV)
3454 * determine by signature whether we have ATA or ATAPI devices
3456 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
3457 if ((slave_possible) && (err != 0x81))
3458 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
3460 /* is double-select really necessary? */
3461 if (device[1].class != ATA_DEV_NONE)
3462 ap->ops->dev_select(ap, 1);
3463 if (device[0].class != ATA_DEV_NONE)
3464 ap->ops->dev_select(ap, 0);
3466 /* if no devices were detected, disable this port */
3467 if ((device[0].class == ATA_DEV_NONE) &&
3468 (device[1].class == ATA_DEV_NONE))
3471 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3472 /* set up device control for ATA_FLAG_SATA_RESET */
3473 iowrite8(ap->ctl, ioaddr->ctl_addr);
3480 ata_port_printk(ap, KERN_ERR, "disabling port\n");
3481 ata_port_disable(ap);
3487 * sata_link_debounce - debounce SATA phy status
3488 * @link: ATA link to debounce SATA phy status for
3489 * @params: timing parameters { interval, duratinon, timeout } in msec
3490 * @deadline: deadline jiffies for the operation
3492 * Make sure SStatus of @link reaches stable state, determined by
3493 * holding the same value where DET is not 1 for @duration polled
3494 * every @interval, before @timeout. Timeout constraints the
3495 * beginning of the stable state. Because DET gets stuck at 1 on
3496 * some controllers after hot unplugging, this functions waits
3497 * until timeout then returns 0 if DET is stable at 1.
3499 * @timeout is further limited by @deadline. The sooner of the
3503 * Kernel thread context (may sleep)
3506 * 0 on success, -errno on failure.
3508 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3509 unsigned long deadline)
3511 unsigned long interval_msec = params[0];
3512 unsigned long duration = msecs_to_jiffies(params[1]);
3513 unsigned long last_jiffies, t;
3517 t = jiffies + msecs_to_jiffies(params[2]);
3518 if (time_before(t, deadline))
3521 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3526 last_jiffies = jiffies;
3529 msleep(interval_msec);
3530 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3536 if (cur == 1 && time_before(jiffies, deadline))
3538 if (time_after(jiffies, last_jiffies + duration))
3543 /* unstable, start over */
3545 last_jiffies = jiffies;
3547 /* Check deadline. If debouncing failed, return
3548 * -EPIPE to tell upper layer to lower link speed.
3550 if (time_after(jiffies, deadline))
3556 * sata_link_resume - resume SATA link
3557 * @link: ATA link to resume SATA
3558 * @params: timing parameters { interval, duratinon, timeout } in msec
3559 * @deadline: deadline jiffies for the operation
3561 * Resume SATA phy @link and debounce it.
3564 * Kernel thread context (may sleep)
3567 * 0 on success, -errno on failure.
3569 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3570 unsigned long deadline)
3575 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3578 scontrol = (scontrol & 0x0f0) | 0x300;
3580 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3583 /* Some PHYs react badly if SStatus is pounded immediately
3584 * after resuming. Delay 200ms before debouncing.
3588 return sata_link_debounce(link, params, deadline);
3592 * ata_std_prereset - prepare for reset
3593 * @link: ATA link to be reset
3594 * @deadline: deadline jiffies for the operation
3596 * @link is about to be reset. Initialize it. Failure from
3597 * prereset makes libata abort whole reset sequence and give up
3598 * that port, so prereset should be best-effort. It does its
3599 * best to prepare for reset sequence but if things go wrong, it
3600 * should just whine, not fail.
3603 * Kernel thread context (may sleep)
3606 * 0 on success, -errno otherwise.
3608 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3610 struct ata_port *ap = link->ap;
3611 struct ata_eh_context *ehc = &link->eh_context;
3612 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3615 /* handle link resume */
3616 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3617 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3618 ehc->i.action |= ATA_EH_HARDRESET;
3620 /* Some PMPs don't work with only SRST, force hardreset if PMP
3623 if (ap->flags & ATA_FLAG_PMP)
3624 ehc->i.action |= ATA_EH_HARDRESET;
3626 /* if we're about to do hardreset, nothing more to do */
3627 if (ehc->i.action & ATA_EH_HARDRESET)
3630 /* if SATA, resume link */
3631 if (ap->flags & ATA_FLAG_SATA) {
3632 rc = sata_link_resume(link, timing, deadline);
3633 /* whine about phy resume failure but proceed */
3634 if (rc && rc != -EOPNOTSUPP)
3635 ata_link_printk(link, KERN_WARNING, "failed to resume "
3636 "link for reset (errno=%d)\n", rc);
3639 /* Wait for !BSY if the controller can wait for the first D2H
3640 * Reg FIS and we don't know that no device is attached.
3642 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3643 rc = ata_wait_ready(ap, deadline);
3644 if (rc && rc != -ENODEV) {
3645 ata_link_printk(link, KERN_WARNING, "device not ready "
3646 "(errno=%d), forcing hardreset\n", rc);
3647 ehc->i.action |= ATA_EH_HARDRESET;
3655 * ata_std_softreset - reset host port via ATA SRST
3656 * @link: ATA link to reset
3657 * @classes: resulting classes of attached devices
3658 * @deadline: deadline jiffies for the operation
3660 * Reset host port using ATA SRST.
3663 * Kernel thread context (may sleep)
3666 * 0 on success, -errno otherwise.
3668 int ata_std_softreset(struct ata_link *link, unsigned int *classes,
3669 unsigned long deadline)
3671 struct ata_port *ap = link->ap;
3672 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3673 unsigned int devmask = 0;
3679 if (ata_link_offline(link)) {
3680 classes[0] = ATA_DEV_NONE;
3684 /* determine if device 0/1 are present */
3685 if (ata_devchk(ap, 0))
3686 devmask |= (1 << 0);
3687 if (slave_possible && ata_devchk(ap, 1))
3688 devmask |= (1 << 1);
3690 /* select device 0 again */
3691 ap->ops->dev_select(ap, 0);
3693 /* issue bus reset */
3694 DPRINTK("about to softreset, devmask=%x\n", devmask);
3695 rc = ata_bus_softreset(ap, devmask, deadline);
3696 /* if link is occupied, -ENODEV too is an error */
3697 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
3698 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3702 /* determine by signature whether we have ATA or ATAPI devices */
3703 classes[0] = ata_dev_try_classify(&link->device[0],
3704 devmask & (1 << 0), &err);
3705 if (slave_possible && err != 0x81)
3706 classes[1] = ata_dev_try_classify(&link->device[1],
3707 devmask & (1 << 1), &err);
3710 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3715 * sata_link_hardreset - reset link via SATA phy reset
3716 * @link: link to reset
3717 * @timing: timing parameters { interval, duratinon, timeout } in msec
3718 * @deadline: deadline jiffies for the operation
3720 * SATA phy-reset @link using DET bits of SControl register.
3723 * Kernel thread context (may sleep)
3726 * 0 on success, -errno otherwise.
3728 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3729 unsigned long deadline)
3736 if (sata_set_spd_needed(link)) {
3737 /* SATA spec says nothing about how to reconfigure
3738 * spd. To be on the safe side, turn off phy during
3739 * reconfiguration. This works for at least ICH7 AHCI
3742 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3745 scontrol = (scontrol & 0x0f0) | 0x304;
3747 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3753 /* issue phy wake/reset */
3754 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3757 scontrol = (scontrol & 0x0f0) | 0x301;
3759 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3762 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3763 * 10.4.2 says at least 1 ms.
3767 /* bring link back */
3768 rc = sata_link_resume(link, timing, deadline);
3770 DPRINTK("EXIT, rc=%d\n", rc);
3775 * sata_std_hardreset - reset host port via SATA phy reset
3776 * @link: link to reset
3777 * @class: resulting class of attached device
3778 * @deadline: deadline jiffies for the operation
3780 * SATA phy-reset host port using DET bits of SControl register,
3781 * wait for !BSY and classify the attached device.
3784 * Kernel thread context (may sleep)
3787 * 0 on success, -errno otherwise.
3789 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3790 unsigned long deadline)
3792 struct ata_port *ap = link->ap;
3793 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3799 rc = sata_link_hardreset(link, timing, deadline);
3801 ata_link_printk(link, KERN_ERR,
3802 "COMRESET failed (errno=%d)\n", rc);
3806 /* TODO: phy layer with polling, timeouts, etc. */
3807 if (ata_link_offline(link)) {
3808 *class = ATA_DEV_NONE;
3809 DPRINTK("EXIT, link offline\n");
3813 /* wait a while before checking status */
3814 ata_wait_after_reset(ap, deadline);
3816 /* If PMP is supported, we have to do follow-up SRST. Note
3817 * that some PMPs don't send D2H Reg FIS after hardreset at
3818 * all if the first port is empty. Wait for it just for a
3819 * second and request follow-up SRST.
3821 if (ap->flags & ATA_FLAG_PMP) {
3822 ata_wait_ready(ap, jiffies + HZ);
3826 rc = ata_wait_ready(ap, deadline);
3827 /* link occupied, -ENODEV too is an error */
3829 ata_link_printk(link, KERN_ERR,
3830 "COMRESET failed (errno=%d)\n", rc);
3834 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3836 *class = ata_dev_try_classify(link->device, 1, NULL);
3838 DPRINTK("EXIT, class=%u\n", *class);
3843 * ata_std_postreset - standard postreset callback
3844 * @link: the target ata_link
3845 * @classes: classes of attached devices
3847 * This function is invoked after a successful reset. Note that
3848 * the device might have been reset more than once using
3849 * different reset methods before postreset is invoked.
3852 * Kernel thread context (may sleep)
3854 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3856 struct ata_port *ap = link->ap;
3861 /* print link status */
3862 sata_print_link_status(link);
3865 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3866 sata_scr_write(link, SCR_ERROR, serror);
3867 link->eh_info.serror = 0;
3869 /* is double-select really necessary? */
3870 if (classes[0] != ATA_DEV_NONE)
3871 ap->ops->dev_select(ap, 1);
3872 if (classes[1] != ATA_DEV_NONE)
3873 ap->ops->dev_select(ap, 0);
3875 /* bail out if no device is present */
3876 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3877 DPRINTK("EXIT, no device\n");
3881 /* set up device control */
3882 if (ap->ioaddr.ctl_addr)
3883 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3889 * ata_dev_same_device - Determine whether new ID matches configured device
3890 * @dev: device to compare against
3891 * @new_class: class of the new device
3892 * @new_id: IDENTIFY page of the new device
3894 * Compare @new_class and @new_id against @dev and determine
3895 * whether @dev is the device indicated by @new_class and
3902 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3904 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3907 const u16 *old_id = dev->id;
3908 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3909 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3911 if (dev->class != new_class) {
3912 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3913 dev->class, new_class);
3917 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3918 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3919 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3920 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3922 if (strcmp(model[0], model[1])) {
3923 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3924 "'%s' != '%s'\n", model[0], model[1]);
3928 if (strcmp(serial[0], serial[1])) {
3929 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3930 "'%s' != '%s'\n", serial[0], serial[1]);
3938 * ata_dev_reread_id - Re-read IDENTIFY data
3939 * @dev: target ATA device
3940 * @readid_flags: read ID flags
3942 * Re-read IDENTIFY page and make sure @dev is still attached to
3946 * Kernel thread context (may sleep)
3949 * 0 on success, negative errno otherwise
3951 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3953 unsigned int class = dev->class;
3954 u16 *id = (void *)dev->link->ap->sector_buf;
3958 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3962 /* is the device still there? */
3963 if (!ata_dev_same_device(dev, class, id))
3966 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3971 * ata_dev_revalidate - Revalidate ATA device
3972 * @dev: device to revalidate
3973 * @new_class: new class code
3974 * @readid_flags: read ID flags
3976 * Re-read IDENTIFY page, make sure @dev is still attached to the
3977 * port and reconfigure it according to the new IDENTIFY page.
3980 * Kernel thread context (may sleep)
3983 * 0 on success, negative errno otherwise
3985 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3986 unsigned int readid_flags)
3988 u64 n_sectors = dev->n_sectors;
3991 if (!ata_dev_enabled(dev))
3994 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3995 if (ata_class_enabled(new_class) &&
3996 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3997 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3998 dev->class, new_class);
4004 rc = ata_dev_reread_id(dev, readid_flags);
4008 /* configure device according to the new ID */
4009 rc = ata_dev_configure(dev);
4013 /* verify n_sectors hasn't changed */
4014 if (dev->class == ATA_DEV_ATA && n_sectors &&
4015 dev->n_sectors != n_sectors) {
4016 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4018 (unsigned long long)n_sectors,
4019 (unsigned long long)dev->n_sectors);
4021 /* restore original n_sectors */
4022 dev->n_sectors = n_sectors;
4031 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4035 struct ata_blacklist_entry {
4036 const char *model_num;
4037 const char *model_rev;
4038 unsigned long horkage;
4041 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4042 /* Devices with DMA related problems under Linux */
4043 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4044 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4045 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4046 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4047 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4048 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4049 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4050 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4051 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4052 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4053 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4054 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4055 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4056 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4057 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4058 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4059 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4060 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4061 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4062 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4063 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4064 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4065 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4066 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4067 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4068 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4069 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4070 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4071 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4072 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4073 /* Odd clown on sil3726/4726 PMPs */
4074 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4075 ATA_HORKAGE_SKIP_PM },
4077 /* Weird ATAPI devices */
4078 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4080 /* Devices we expect to fail diagnostics */
4082 /* Devices where NCQ should be avoided */
4084 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4085 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4086 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4087 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4089 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4090 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4091 { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
4092 { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
4093 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4094 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4096 /* Blacklist entries taken from Silicon Image 3124/3132
4097 Windows driver .inf file - also several Linux problem reports */
4098 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4099 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4100 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4102 /* devices which puke on READ_NATIVE_MAX */
4103 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4104 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4105 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4106 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4108 /* Devices which report 1 sector over size HPA */
4109 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4110 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4112 /* Devices which get the IVB wrong */
4113 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4114 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
4115 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4116 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4117 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
4123 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4129 * check for trailing wildcard: *\0
4131 p = strchr(patt, wildchar);
4132 if (p && ((*(p + 1)) == 0))
4143 return strncmp(patt, name, len);
4146 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4148 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4149 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4150 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4152 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4153 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4155 while (ad->model_num) {
4156 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
4157 if (ad->model_rev == NULL)
4159 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4167 static int ata_dma_blacklisted(const struct ata_device *dev)
4169 /* We don't support polling DMA.
4170 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4171 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4173 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4174 (dev->flags & ATA_DFLAG_CDB_INTR))
4176 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4180 * ata_is_40wire - check drive side detection
4183 * Perform drive side detection decoding, allowing for device vendors
4184 * who can't follow the documentation.
4187 static int ata_is_40wire(struct ata_device *dev)
4189 if (dev->horkage & ATA_HORKAGE_IVB)
4190 return ata_drive_40wire_relaxed(dev->id);
4191 return ata_drive_40wire(dev->id);
4195 * ata_dev_xfermask - Compute supported xfermask of the given device
4196 * @dev: Device to compute xfermask for
4198 * Compute supported xfermask of @dev and store it in
4199 * dev->*_mask. This function is responsible for applying all
4200 * known limits including host controller limits, device
4206 static void ata_dev_xfermask(struct ata_device *dev)
4208 struct ata_link *link = dev->link;
4209 struct ata_port *ap = link->ap;
4210 struct ata_host *host = ap->host;
4211 unsigned long xfer_mask;
4213 /* controller modes available */
4214 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4215 ap->mwdma_mask, ap->udma_mask);
4217 /* drive modes available */
4218 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4219 dev->mwdma_mask, dev->udma_mask);
4220 xfer_mask &= ata_id_xfermask(dev->id);
4223 * CFA Advanced TrueIDE timings are not allowed on a shared
4226 if (ata_dev_pair(dev)) {
4227 /* No PIO5 or PIO6 */
4228 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4229 /* No MWDMA3 or MWDMA 4 */
4230 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4233 if (ata_dma_blacklisted(dev)) {
4234 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4235 ata_dev_printk(dev, KERN_WARNING,
4236 "device is on DMA blacklist, disabling DMA\n");
4239 if ((host->flags & ATA_HOST_SIMPLEX) &&
4240 host->simplex_claimed && host->simplex_claimed != ap) {
4241 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4242 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4243 "other device, disabling DMA\n");
4246 if (ap->flags & ATA_FLAG_NO_IORDY)
4247 xfer_mask &= ata_pio_mask_no_iordy(dev);
4249 if (ap->ops->mode_filter)
4250 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4252 /* Apply cable rule here. Don't apply it early because when
4253 * we handle hot plug the cable type can itself change.
4254 * Check this last so that we know if the transfer rate was
4255 * solely limited by the cable.
4256 * Unknown or 80 wire cables reported host side are checked
4257 * drive side as well. Cases where we know a 40wire cable
4258 * is used safely for 80 are not checked here.
4260 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4261 /* UDMA/44 or higher would be available */
4262 if ((ap->cbl == ATA_CBL_PATA40) ||
4263 (ata_is_40wire(dev) &&
4264 (ap->cbl == ATA_CBL_PATA_UNK ||
4265 ap->cbl == ATA_CBL_PATA80))) {
4266 ata_dev_printk(dev, KERN_WARNING,
4267 "limited to UDMA/33 due to 40-wire cable\n");
4268 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4271 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4272 &dev->mwdma_mask, &dev->udma_mask);
4276 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4277 * @dev: Device to which command will be sent
4279 * Issue SET FEATURES - XFER MODE command to device @dev
4283 * PCI/etc. bus probe sem.
4286 * 0 on success, AC_ERR_* mask otherwise.
4289 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4291 struct ata_taskfile tf;
4292 unsigned int err_mask;
4294 /* set up set-features taskfile */
4295 DPRINTK("set features - xfer mode\n");
4297 /* Some controllers and ATAPI devices show flaky interrupt
4298 * behavior after setting xfer mode. Use polling instead.
4300 ata_tf_init(dev, &tf);
4301 tf.command = ATA_CMD_SET_FEATURES;
4302 tf.feature = SETFEATURES_XFER;
4303 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4304 tf.protocol = ATA_PROT_NODATA;
4305 /* If we are using IORDY we must send the mode setting command */
4306 if (ata_pio_need_iordy(dev))
4307 tf.nsect = dev->xfer_mode;
4308 /* If the device has IORDY and the controller does not - turn it off */
4309 else if (ata_id_has_iordy(dev->id))
4311 else /* In the ancient relic department - skip all of this */
4314 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4316 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4320 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4321 * @dev: Device to which command will be sent
4322 * @enable: Whether to enable or disable the feature
4323 * @feature: The sector count represents the feature to set
4325 * Issue SET FEATURES - SATA FEATURES command to device @dev
4326 * on port @ap with sector count
4329 * PCI/etc. bus probe sem.
4332 * 0 on success, AC_ERR_* mask otherwise.
4334 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4337 struct ata_taskfile tf;
4338 unsigned int err_mask;
4340 /* set up set-features taskfile */
4341 DPRINTK("set features - SATA features\n");
4343 ata_tf_init(dev, &tf);
4344 tf.command = ATA_CMD_SET_FEATURES;
4345 tf.feature = enable;
4346 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4347 tf.protocol = ATA_PROT_NODATA;
4350 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4352 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4357 * ata_dev_init_params - Issue INIT DEV PARAMS command
4358 * @dev: Device to which command will be sent
4359 * @heads: Number of heads (taskfile parameter)
4360 * @sectors: Number of sectors (taskfile parameter)
4363 * Kernel thread context (may sleep)
4366 * 0 on success, AC_ERR_* mask otherwise.
4368 static unsigned int ata_dev_init_params(struct ata_device *dev,
4369 u16 heads, u16 sectors)
4371 struct ata_taskfile tf;
4372 unsigned int err_mask;
4374 /* Number of sectors per track 1-255. Number of heads 1-16 */
4375 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4376 return AC_ERR_INVALID;
4378 /* set up init dev params taskfile */
4379 DPRINTK("init dev params \n");
4381 ata_tf_init(dev, &tf);
4382 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4383 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4384 tf.protocol = ATA_PROT_NODATA;
4386 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4388 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4389 /* A clean abort indicates an original or just out of spec drive
4390 and we should continue as we issue the setup based on the
4391 drive reported working geometry */
4392 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4395 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4400 * ata_sg_clean - Unmap DMA memory associated with command
4401 * @qc: Command containing DMA memory to be released
4403 * Unmap all mapped DMA memory associated with this command.
4406 * spin_lock_irqsave(host lock)
4408 void ata_sg_clean(struct ata_queued_cmd *qc)
4410 struct ata_port *ap = qc->ap;
4411 struct scatterlist *sg = qc->__sg;
4412 int dir = qc->dma_dir;
4413 void *pad_buf = NULL;
4415 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4416 WARN_ON(sg == NULL);
4418 if (qc->flags & ATA_QCFLAG_SINGLE)
4419 WARN_ON(qc->n_elem > 1);
4421 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4423 /* if we padded the buffer out to 32-bit bound, and data
4424 * xfer direction is from-device, we must copy from the
4425 * pad buffer back into the supplied buffer
4427 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4428 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4430 if (qc->flags & ATA_QCFLAG_SG) {
4432 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4433 /* restore last sg */
4434 sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
4436 struct scatterlist *psg = &qc->pad_sgent;
4437 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4438 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4439 kunmap_atomic(addr, KM_IRQ0);
4443 dma_unmap_single(ap->dev,
4444 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4447 sg->length += qc->pad_len;
4449 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4450 pad_buf, qc->pad_len);
4453 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4458 * ata_fill_sg - Fill PCI IDE PRD table
4459 * @qc: Metadata associated with taskfile to be transferred
4461 * Fill PCI IDE PRD (scatter-gather) table with segments
4462 * associated with the current disk command.
4465 * spin_lock_irqsave(host lock)
4468 static void ata_fill_sg(struct ata_queued_cmd *qc)
4470 struct ata_port *ap = qc->ap;
4471 struct scatterlist *sg;
4474 WARN_ON(qc->__sg == NULL);
4475 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4478 ata_for_each_sg(sg, qc) {
4482 /* determine if physical DMA addr spans 64K boundary.
4483 * Note h/w doesn't support 64-bit, so we unconditionally
4484 * truncate dma_addr_t to u32.
4486 addr = (u32) sg_dma_address(sg);
4487 sg_len = sg_dma_len(sg);
4490 offset = addr & 0xffff;
4492 if ((offset + sg_len) > 0x10000)
4493 len = 0x10000 - offset;
4495 ap->prd[idx].addr = cpu_to_le32(addr);
4496 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4497 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4506 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4510 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4511 * @qc: Metadata associated with taskfile to be transferred
4513 * Fill PCI IDE PRD (scatter-gather) table with segments
4514 * associated with the current disk command. Perform the fill
4515 * so that we avoid writing any length 64K records for
4516 * controllers that don't follow the spec.
4519 * spin_lock_irqsave(host lock)
4522 static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4524 struct ata_port *ap = qc->ap;
4525 struct scatterlist *sg;
4528 WARN_ON(qc->__sg == NULL);
4529 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4532 ata_for_each_sg(sg, qc) {
4534 u32 sg_len, len, blen;
4536 /* determine if physical DMA addr spans 64K boundary.
4537 * Note h/w doesn't support 64-bit, so we unconditionally
4538 * truncate dma_addr_t to u32.
4540 addr = (u32) sg_dma_address(sg);
4541 sg_len = sg_dma_len(sg);
4544 offset = addr & 0xffff;
4546 if ((offset + sg_len) > 0x10000)
4547 len = 0x10000 - offset;
4549 blen = len & 0xffff;
4550 ap->prd[idx].addr = cpu_to_le32(addr);
4552 /* Some PATA chipsets like the CS5530 can't
4553 cope with 0x0000 meaning 64K as the spec says */
4554 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4556 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4558 ap->prd[idx].flags_len = cpu_to_le32(blen);
4559 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4568 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4572 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4573 * @qc: Metadata associated with taskfile to check
4575 * Allow low-level driver to filter ATA PACKET commands, returning
4576 * a status indicating whether or not it is OK to use DMA for the
4577 * supplied PACKET command.
4580 * spin_lock_irqsave(host lock)
4582 * RETURNS: 0 when ATAPI DMA can be used
4585 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4587 struct ata_port *ap = qc->ap;
4589 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4590 * few ATAPI devices choke on such DMA requests.
4592 if (unlikely(qc->nbytes & 15))
4595 if (ap->ops->check_atapi_dma)
4596 return ap->ops->check_atapi_dma(qc);
4602 * atapi_qc_may_overflow - Check whether data transfer may overflow
4603 * @qc: ATA command in question
4605 * ATAPI commands which transfer variable length data to host
4606 * might overflow due to application error or hardare bug. This
4607 * function checks whether overflow should be drained and ignored
4614 * 1 if @qc may overflow; otherwise, 0.
4616 static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
4618 if (qc->tf.protocol != ATA_PROT_ATAPI &&
4619 qc->tf.protocol != ATA_PROT_ATAPI_DMA)
4622 if (qc->tf.flags & ATA_TFLAG_WRITE)
4625 switch (qc->cdb[0]) {
4631 case GPCMD_READ_CD_MSF:
4639 * ata_std_qc_defer - Check whether a qc needs to be deferred
4640 * @qc: ATA command in question
4642 * Non-NCQ commands cannot run with any other command, NCQ or
4643 * not. As upper layer only knows the queue depth, we are
4644 * responsible for maintaining exclusion. This function checks
4645 * whether a new command @qc can be issued.
4648 * spin_lock_irqsave(host lock)
4651 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4653 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4655 struct ata_link *link = qc->dev->link;
4657 if (qc->tf.protocol == ATA_PROT_NCQ) {
4658 if (!ata_tag_valid(link->active_tag))
4661 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4665 return ATA_DEFER_LINK;
4669 * ata_qc_prep - Prepare taskfile for submission
4670 * @qc: Metadata associated with taskfile to be prepared
4672 * Prepare ATA taskfile for submission.
4675 * spin_lock_irqsave(host lock)
4677 void ata_qc_prep(struct ata_queued_cmd *qc)
4679 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4686 * ata_dumb_qc_prep - Prepare taskfile for submission
4687 * @qc: Metadata associated with taskfile to be prepared
4689 * Prepare ATA taskfile for submission.
4692 * spin_lock_irqsave(host lock)
4694 void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4696 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4699 ata_fill_sg_dumb(qc);
4702 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4705 * ata_sg_init_one - Associate command with memory buffer
4706 * @qc: Command to be associated
4707 * @buf: Memory buffer
4708 * @buflen: Length of memory buffer, in bytes.
4710 * Initialize the data-related elements of queued_cmd @qc
4711 * to point to a single memory buffer, @buf of byte length @buflen.
4714 * spin_lock_irqsave(host lock)
4717 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4719 qc->flags |= ATA_QCFLAG_SINGLE;
4721 qc->__sg = &qc->sgent;
4723 qc->orig_n_elem = 1;
4725 qc->nbytes = buflen;
4726 qc->cursg = qc->__sg;
4728 sg_init_one(&qc->sgent, buf, buflen);
4732 * ata_sg_init - Associate command with scatter-gather table.
4733 * @qc: Command to be associated
4734 * @sg: Scatter-gather table.
4735 * @n_elem: Number of elements in s/g table.
4737 * Initialize the data-related elements of queued_cmd @qc
4738 * to point to a scatter-gather table @sg, containing @n_elem
4742 * spin_lock_irqsave(host lock)
4745 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4746 unsigned int n_elem)
4748 qc->flags |= ATA_QCFLAG_SG;
4750 qc->n_elem = n_elem;
4751 qc->orig_n_elem = n_elem;
4752 qc->cursg = qc->__sg;
4756 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4757 * @qc: Command with memory buffer to be mapped.
4759 * DMA-map the memory buffer associated with queued_cmd @qc.
4762 * spin_lock_irqsave(host lock)
4765 * Zero on success, negative on error.
4768 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4770 struct ata_port *ap = qc->ap;
4771 int dir = qc->dma_dir;
4772 struct scatterlist *sg = qc->__sg;
4773 dma_addr_t dma_address;
4776 /* we must lengthen transfers to end on a 32-bit boundary */
4777 qc->pad_len = sg->length & 3;
4779 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4780 struct scatterlist *psg = &qc->pad_sgent;
4782 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4784 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4786 if (qc->tf.flags & ATA_TFLAG_WRITE)
4787 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4790 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4791 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4793 sg->length -= qc->pad_len;
4794 if (sg->length == 0)
4797 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4798 sg->length, qc->pad_len);
4806 dma_address = dma_map_single(ap->dev, qc->buf_virt,
4808 if (dma_mapping_error(dma_address)) {
4810 sg->length += qc->pad_len;
4814 sg_dma_address(sg) = dma_address;
4815 sg_dma_len(sg) = sg->length;
4818 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4819 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4825 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4826 * @qc: Command with scatter-gather table to be mapped.
4828 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4831 * spin_lock_irqsave(host lock)
4834 * Zero on success, negative on error.
4838 static int ata_sg_setup(struct ata_queued_cmd *qc)
4840 struct ata_port *ap = qc->ap;
4841 struct scatterlist *sg = qc->__sg;
4842 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
4843 int n_elem, pre_n_elem, dir, trim_sg = 0;
4845 VPRINTK("ENTER, ata%u\n", ap->print_id);
4846 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4848 /* we must lengthen transfers to end on a 32-bit boundary */
4849 qc->pad_len = lsg->length & 3;
4851 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4852 struct scatterlist *psg = &qc->pad_sgent;
4853 unsigned int offset;
4855 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4857 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4860 * psg->page/offset are used to copy to-be-written
4861 * data in this function or read data in ata_sg_clean.
4863 offset = lsg->offset + lsg->length - qc->pad_len;
4864 sg_init_table(psg, 1);
4865 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4866 qc->pad_len, offset_in_page(offset));
4868 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4869 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4870 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4871 kunmap_atomic(addr, KM_IRQ0);
4874 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4875 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4877 lsg->length -= qc->pad_len;
4878 if (lsg->length == 0)
4881 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4882 qc->n_elem - 1, lsg->length, qc->pad_len);
4885 pre_n_elem = qc->n_elem;
4886 if (trim_sg && pre_n_elem)
4895 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4897 /* restore last sg */
4898 lsg->length += qc->pad_len;
4902 DPRINTK("%d sg elements mapped\n", n_elem);
4905 qc->n_elem = n_elem;
4911 * swap_buf_le16 - swap halves of 16-bit words in place
4912 * @buf: Buffer to swap
4913 * @buf_words: Number of 16-bit words in buffer.
4915 * Swap halves of 16-bit words if needed to convert from
4916 * little-endian byte order to native cpu byte order, or
4920 * Inherited from caller.
4922 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4927 for (i = 0; i < buf_words; i++)
4928 buf[i] = le16_to_cpu(buf[i]);
4929 #endif /* __BIG_ENDIAN */
4933 * ata_data_xfer - Transfer data by PIO
4934 * @adev: device to target
4936 * @buflen: buffer length
4937 * @write_data: read/write
4939 * Transfer data from/to the device data register by PIO.
4942 * Inherited from caller.
4944 void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4945 unsigned int buflen, int write_data)
4947 struct ata_port *ap = adev->link->ap;
4948 unsigned int words = buflen >> 1;
4950 /* Transfer multiple of 2 bytes */
4952 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4954 ioread16_rep(ap->ioaddr.data_addr, buf, words);
4956 /* Transfer trailing 1 byte, if any. */
4957 if (unlikely(buflen & 0x01)) {
4958 u16 align_buf[1] = { 0 };
4959 unsigned char *trailing_buf = buf + buflen - 1;
4962 memcpy(align_buf, trailing_buf, 1);
4963 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4965 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4966 memcpy(trailing_buf, align_buf, 1);
4972 * ata_data_xfer_noirq - Transfer data by PIO
4973 * @adev: device to target
4975 * @buflen: buffer length
4976 * @write_data: read/write
4978 * Transfer data from/to the device data register by PIO. Do the
4979 * transfer with interrupts disabled.
4982 * Inherited from caller.
4984 void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4985 unsigned int buflen, int write_data)
4987 unsigned long flags;
4988 local_irq_save(flags);
4989 ata_data_xfer(adev, buf, buflen, write_data);
4990 local_irq_restore(flags);
4995 * ata_pio_sector - Transfer a sector of data.
4996 * @qc: Command on going
4998 * Transfer qc->sect_size bytes of data from/to the ATA device.
5001 * Inherited from caller.
5004 static void ata_pio_sector(struct ata_queued_cmd *qc)
5006 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
5007 struct ata_port *ap = qc->ap;
5009 unsigned int offset;
5012 if (qc->curbytes == qc->nbytes - qc->sect_size)
5013 ap->hsm_task_state = HSM_ST_LAST;
5015 page = sg_page(qc->cursg);
5016 offset = qc->cursg->offset + qc->cursg_ofs;
5018 /* get the current page and offset */
5019 page = nth_page(page, (offset >> PAGE_SHIFT));
5020 offset %= PAGE_SIZE;
5022 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5024 if (PageHighMem(page)) {
5025 unsigned long flags;
5027 /* FIXME: use a bounce buffer */
5028 local_irq_save(flags);
5029 buf = kmap_atomic(page, KM_IRQ0);
5031 /* do the actual data transfer */
5032 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5034 kunmap_atomic(buf, KM_IRQ0);
5035 local_irq_restore(flags);
5037 buf = page_address(page);
5038 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5041 qc->curbytes += qc->sect_size;
5042 qc->cursg_ofs += qc->sect_size;
5044 if (qc->cursg_ofs == qc->cursg->length) {
5045 qc->cursg = sg_next(qc->cursg);
5051 * ata_pio_sectors - Transfer one or many sectors.
5052 * @qc: Command on going
5054 * Transfer one or many sectors of data from/to the
5055 * ATA device for the DRQ request.
5058 * Inherited from caller.
5061 static void ata_pio_sectors(struct ata_queued_cmd *qc)
5063 if (is_multi_taskfile(&qc->tf)) {
5064 /* READ/WRITE MULTIPLE */
5067 WARN_ON(qc->dev->multi_count == 0);
5069 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
5070 qc->dev->multi_count);
5076 ata_altstatus(qc->ap); /* flush */
5080 * atapi_send_cdb - Write CDB bytes to hardware
5081 * @ap: Port to which ATAPI device is attached.
5082 * @qc: Taskfile currently active
5084 * When device has indicated its readiness to accept
5085 * a CDB, this function is called. Send the CDB.
5091 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5094 DPRINTK("send cdb\n");
5095 WARN_ON(qc->dev->cdb_len < 12);
5097 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
5098 ata_altstatus(ap); /* flush */
5100 switch (qc->tf.protocol) {
5101 case ATA_PROT_ATAPI:
5102 ap->hsm_task_state = HSM_ST;
5104 case ATA_PROT_ATAPI_NODATA:
5105 ap->hsm_task_state = HSM_ST_LAST;
5107 case ATA_PROT_ATAPI_DMA:
5108 ap->hsm_task_state = HSM_ST_LAST;
5109 /* initiate bmdma */
5110 ap->ops->bmdma_start(qc);
5116 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5117 * @qc: Command on going
5118 * @bytes: number of bytes
5120 * Transfer Transfer data from/to the ATAPI device.
5123 * Inherited from caller.
5126 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
5128 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
5129 struct ata_port *ap = qc->ap;
5130 struct ata_eh_info *ehi = &qc->dev->link->eh_info;
5131 struct scatterlist *sg;
5134 unsigned int offset, count;
5138 if (unlikely(!sg)) {
5140 * The end of qc->sg is reached and the device expects
5141 * more data to transfer. In order not to overrun qc->sg
5142 * and fulfill length specified in the byte count register,
5143 * - for read case, discard trailing data from the device
5144 * - for write case, padding zero data to the device
5146 u16 pad_buf[1] = { 0 };
5149 if (bytes > qc->curbytes - qc->nbytes + ATAPI_MAX_DRAIN) {
5150 ata_ehi_push_desc(ehi, "too much trailing data "
5151 "buf=%u cur=%u bytes=%u",
5152 qc->nbytes, qc->curbytes, bytes);
5156 /* overflow is exptected for misc ATAPI commands */
5157 if (bytes && !atapi_qc_may_overflow(qc))
5158 ata_dev_printk(qc->dev, KERN_WARNING, "ATAPI %u bytes "
5159 "trailing data (cdb=%02x nbytes=%u)\n",
5160 bytes, qc->cdb[0], qc->nbytes);
5162 for (i = 0; i < (bytes + 1) / 2; i++)
5163 ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
5165 qc->curbytes += bytes;
5171 offset = sg->offset + qc->cursg_ofs;
5173 /* get the current page and offset */
5174 page = nth_page(page, (offset >> PAGE_SHIFT));
5175 offset %= PAGE_SIZE;
5177 /* don't overrun current sg */
5178 count = min(sg->length - qc->cursg_ofs, bytes);
5180 /* don't cross page boundaries */
5181 count = min(count, (unsigned int)PAGE_SIZE - offset);
5183 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5185 if (PageHighMem(page)) {
5186 unsigned long flags;
5188 /* FIXME: use bounce buffer */
5189 local_irq_save(flags);
5190 buf = kmap_atomic(page, KM_IRQ0);
5192 /* do the actual data transfer */
5193 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
5195 kunmap_atomic(buf, KM_IRQ0);
5196 local_irq_restore(flags);
5198 buf = page_address(page);
5199 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
5203 if ((count & 1) && bytes)
5205 qc->curbytes += count;
5206 qc->cursg_ofs += count;
5208 if (qc->cursg_ofs == sg->length) {
5209 qc->cursg = sg_next(qc->cursg);
5220 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5221 * @qc: Command on going
5223 * Transfer Transfer data from/to the ATAPI device.
5226 * Inherited from caller.
5229 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5231 struct ata_port *ap = qc->ap;
5232 struct ata_device *dev = qc->dev;
5233 unsigned int ireason, bc_lo, bc_hi, bytes;
5234 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5236 /* Abuse qc->result_tf for temp storage of intermediate TF
5237 * here to save some kernel stack usage.
5238 * For normal completion, qc->result_tf is not relevant. For
5239 * error, qc->result_tf is later overwritten by ata_qc_complete().
5240 * So, the correctness of qc->result_tf is not affected.
5242 ap->ops->tf_read(ap, &qc->result_tf);
5243 ireason = qc->result_tf.nsect;
5244 bc_lo = qc->result_tf.lbam;
5245 bc_hi = qc->result_tf.lbah;
5246 bytes = (bc_hi << 8) | bc_lo;
5248 /* shall be cleared to zero, indicating xfer of data */
5249 if (ireason & (1 << 0))
5252 /* make sure transfer direction matches expected */
5253 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5254 if (do_write != i_write)
5257 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
5259 if (__atapi_pio_bytes(qc, bytes))
5261 ata_altstatus(ap); /* flush */
5266 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
5267 qc->err_mask |= AC_ERR_HSM;
5268 ap->hsm_task_state = HSM_ST_ERR;
5272 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5273 * @ap: the target ata_port
5277 * 1 if ok in workqueue, 0 otherwise.
5280 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
5282 if (qc->tf.flags & ATA_TFLAG_POLLING)
5285 if (ap->hsm_task_state == HSM_ST_FIRST) {
5286 if (qc->tf.protocol == ATA_PROT_PIO &&
5287 (qc->tf.flags & ATA_TFLAG_WRITE))
5290 if (ata_is_atapi(qc->tf.protocol) &&
5291 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5299 * ata_hsm_qc_complete - finish a qc running on standard HSM
5300 * @qc: Command to complete
5301 * @in_wq: 1 if called from workqueue, 0 otherwise
5303 * Finish @qc which is running on standard HSM.
5306 * If @in_wq is zero, spin_lock_irqsave(host lock).
5307 * Otherwise, none on entry and grabs host lock.
5309 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5311 struct ata_port *ap = qc->ap;
5312 unsigned long flags;
5314 if (ap->ops->error_handler) {
5316 spin_lock_irqsave(ap->lock, flags);
5318 /* EH might have kicked in while host lock is
5321 qc = ata_qc_from_tag(ap, qc->tag);
5323 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
5324 ap->ops->irq_on(ap);
5325 ata_qc_complete(qc);
5327 ata_port_freeze(ap);
5330 spin_unlock_irqrestore(ap->lock, flags);
5332 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5333 ata_qc_complete(qc);
5335 ata_port_freeze(ap);
5339 spin_lock_irqsave(ap->lock, flags);
5340 ap->ops->irq_on(ap);
5341 ata_qc_complete(qc);
5342 spin_unlock_irqrestore(ap->lock, flags);
5344 ata_qc_complete(qc);
5349 * ata_hsm_move - move the HSM to the next state.
5350 * @ap: the target ata_port
5352 * @status: current device status
5353 * @in_wq: 1 if called from workqueue, 0 otherwise
5356 * 1 when poll next status needed, 0 otherwise.
5358 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5359 u8 status, int in_wq)
5361 unsigned long flags = 0;
5364 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5366 /* Make sure ata_qc_issue_prot() does not throw things
5367 * like DMA polling into the workqueue. Notice that
5368 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5370 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
5373 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
5374 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
5376 switch (ap->hsm_task_state) {
5378 /* Send first data block or PACKET CDB */
5380 /* If polling, we will stay in the work queue after
5381 * sending the data. Otherwise, interrupt handler
5382 * takes over after sending the data.
5384 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5386 /* check device status */
5387 if (unlikely((status & ATA_DRQ) == 0)) {
5388 /* handle BSY=0, DRQ=0 as error */
5389 if (likely(status & (ATA_ERR | ATA_DF)))
5390 /* device stops HSM for abort/error */
5391 qc->err_mask |= AC_ERR_DEV;
5393 /* HSM violation. Let EH handle this */
5394 qc->err_mask |= AC_ERR_HSM;
5396 ap->hsm_task_state = HSM_ST_ERR;
5400 /* Device should not ask for data transfer (DRQ=1)
5401 * when it finds something wrong.
5402 * We ignore DRQ here and stop the HSM by
5403 * changing hsm_task_state to HSM_ST_ERR and
5404 * let the EH abort the command or reset the device.
5406 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5407 /* Some ATAPI tape drives forget to clear the ERR bit
5408 * when doing the next command (mostly request sense).
5409 * We ignore ERR here to workaround and proceed sending
5412 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
5413 ata_port_printk(ap, KERN_WARNING,
5414 "DRQ=1 with device error, "
5415 "dev_stat 0x%X\n", status);
5416 qc->err_mask |= AC_ERR_HSM;
5417 ap->hsm_task_state = HSM_ST_ERR;
5422 /* Send the CDB (atapi) or the first data block (ata pio out).
5423 * During the state transition, interrupt handler shouldn't
5424 * be invoked before the data transfer is complete and
5425 * hsm_task_state is changed. Hence, the following locking.
5428 spin_lock_irqsave(ap->lock, flags);
5430 if (qc->tf.protocol == ATA_PROT_PIO) {
5431 /* PIO data out protocol.
5432 * send first data block.
5435 /* ata_pio_sectors() might change the state
5436 * to HSM_ST_LAST. so, the state is changed here
5437 * before ata_pio_sectors().
5439 ap->hsm_task_state = HSM_ST;
5440 ata_pio_sectors(qc);
5443 atapi_send_cdb(ap, qc);
5446 spin_unlock_irqrestore(ap->lock, flags);
5448 /* if polling, ata_pio_task() handles the rest.
5449 * otherwise, interrupt handler takes over from here.
5454 /* complete command or read/write the data register */
5455 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5456 /* ATAPI PIO protocol */
5457 if ((status & ATA_DRQ) == 0) {
5458 /* No more data to transfer or device error.
5459 * Device error will be tagged in HSM_ST_LAST.
5461 ap->hsm_task_state = HSM_ST_LAST;
5465 /* Device should not ask for data transfer (DRQ=1)
5466 * when it finds something wrong.
5467 * We ignore DRQ here and stop the HSM by
5468 * changing hsm_task_state to HSM_ST_ERR and
5469 * let the EH abort the command or reset the device.
5471 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5472 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5473 "device error, dev_stat 0x%X\n",
5475 qc->err_mask |= AC_ERR_HSM;
5476 ap->hsm_task_state = HSM_ST_ERR;
5480 atapi_pio_bytes(qc);
5482 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5483 /* bad ireason reported by device */
5487 /* ATA PIO protocol */
5488 if (unlikely((status & ATA_DRQ) == 0)) {
5489 /* handle BSY=0, DRQ=0 as error */
5490 if (likely(status & (ATA_ERR | ATA_DF)))
5491 /* device stops HSM for abort/error */
5492 qc->err_mask |= AC_ERR_DEV;
5494 /* HSM violation. Let EH handle this.
5495 * Phantom devices also trigger this
5496 * condition. Mark hint.
5498 qc->err_mask |= AC_ERR_HSM |
5501 ap->hsm_task_state = HSM_ST_ERR;
5505 /* For PIO reads, some devices may ask for
5506 * data transfer (DRQ=1) alone with ERR=1.
5507 * We respect DRQ here and transfer one
5508 * block of junk data before changing the
5509 * hsm_task_state to HSM_ST_ERR.
5511 * For PIO writes, ERR=1 DRQ=1 doesn't make
5512 * sense since the data block has been
5513 * transferred to the device.
5515 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5516 /* data might be corrputed */
5517 qc->err_mask |= AC_ERR_DEV;
5519 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5520 ata_pio_sectors(qc);
5521 status = ata_wait_idle(ap);
5524 if (status & (ATA_BUSY | ATA_DRQ))
5525 qc->err_mask |= AC_ERR_HSM;
5527 /* ata_pio_sectors() might change the
5528 * state to HSM_ST_LAST. so, the state
5529 * is changed after ata_pio_sectors().
5531 ap->hsm_task_state = HSM_ST_ERR;
5535 ata_pio_sectors(qc);
5537 if (ap->hsm_task_state == HSM_ST_LAST &&
5538 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5540 status = ata_wait_idle(ap);
5549 if (unlikely(!ata_ok(status))) {
5550 qc->err_mask |= __ac_err_mask(status);
5551 ap->hsm_task_state = HSM_ST_ERR;
5555 /* no more data to transfer */
5556 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
5557 ap->print_id, qc->dev->devno, status);
5559 WARN_ON(qc->err_mask);
5561 ap->hsm_task_state = HSM_ST_IDLE;
5563 /* complete taskfile transaction */
5564 ata_hsm_qc_complete(qc, in_wq);
5570 /* make sure qc->err_mask is available to
5571 * know what's wrong and recover
5573 WARN_ON(qc->err_mask == 0);
5575 ap->hsm_task_state = HSM_ST_IDLE;
5577 /* complete taskfile transaction */
5578 ata_hsm_qc_complete(qc, in_wq);
5590 static void ata_pio_task(struct work_struct *work)
5592 struct ata_port *ap =
5593 container_of(work, struct ata_port, port_task.work);
5594 struct ata_queued_cmd *qc = ap->port_task_data;
5599 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5602 * This is purely heuristic. This is a fast path.
5603 * Sometimes when we enter, BSY will be cleared in
5604 * a chk-status or two. If not, the drive is probably seeking
5605 * or something. Snooze for a couple msecs, then
5606 * chk-status again. If still busy, queue delayed work.
5608 status = ata_busy_wait(ap, ATA_BUSY, 5);
5609 if (status & ATA_BUSY) {
5611 status = ata_busy_wait(ap, ATA_BUSY, 10);
5612 if (status & ATA_BUSY) {
5613 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5619 poll_next = ata_hsm_move(ap, qc, status, 1);
5621 /* another command or interrupt handler
5622 * may be running at this point.
5629 * ata_qc_new - Request an available ATA command, for queueing
5630 * @ap: Port associated with device @dev
5631 * @dev: Device from whom we request an available command structure
5637 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5639 struct ata_queued_cmd *qc = NULL;
5642 /* no command while frozen */
5643 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5646 /* the last tag is reserved for internal command. */
5647 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5648 if (!test_and_set_bit(i, &ap->qc_allocated)) {
5649 qc = __ata_qc_from_tag(ap, i);
5660 * ata_qc_new_init - Request an available ATA command, and initialize it
5661 * @dev: Device from whom we request an available command structure
5667 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5669 struct ata_port *ap = dev->link->ap;
5670 struct ata_queued_cmd *qc;
5672 qc = ata_qc_new(ap);
5685 * ata_qc_free - free unused ata_queued_cmd
5686 * @qc: Command to complete
5688 * Designed to free unused ata_queued_cmd object
5689 * in case something prevents using it.
5692 * spin_lock_irqsave(host lock)
5694 void ata_qc_free(struct ata_queued_cmd *qc)
5696 struct ata_port *ap = qc->ap;
5699 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5703 if (likely(ata_tag_valid(tag))) {
5704 qc->tag = ATA_TAG_POISON;
5705 clear_bit(tag, &ap->qc_allocated);
5709 void __ata_qc_complete(struct ata_queued_cmd *qc)
5711 struct ata_port *ap = qc->ap;
5712 struct ata_link *link = qc->dev->link;
5714 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5715 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5717 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5720 /* command should be marked inactive atomically with qc completion */
5721 if (qc->tf.protocol == ATA_PROT_NCQ) {
5722 link->sactive &= ~(1 << qc->tag);
5724 ap->nr_active_links--;
5726 link->active_tag = ATA_TAG_POISON;
5727 ap->nr_active_links--;
5730 /* clear exclusive status */
5731 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5732 ap->excl_link == link))
5733 ap->excl_link = NULL;
5735 /* atapi: mark qc as inactive to prevent the interrupt handler
5736 * from completing the command twice later, before the error handler
5737 * is called. (when rc != 0 and atapi request sense is needed)
5739 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5740 ap->qc_active &= ~(1 << qc->tag);
5742 /* call completion callback */
5743 qc->complete_fn(qc);
5746 static void fill_result_tf(struct ata_queued_cmd *qc)
5748 struct ata_port *ap = qc->ap;
5750 qc->result_tf.flags = qc->tf.flags;
5751 ap->ops->tf_read(ap, &qc->result_tf);
5754 static void ata_verify_xfer(struct ata_queued_cmd *qc)
5756 struct ata_device *dev = qc->dev;
5758 if (ata_tag_internal(qc->tag))
5761 if (ata_is_nodata(qc->tf.protocol))
5764 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5767 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5771 * ata_qc_complete - Complete an active ATA command
5772 * @qc: Command to complete
5773 * @err_mask: ATA Status register contents
5775 * Indicate to the mid and upper layers that an ATA
5776 * command has completed, with either an ok or not-ok status.
5779 * spin_lock_irqsave(host lock)
5781 void ata_qc_complete(struct ata_queued_cmd *qc)
5783 struct ata_port *ap = qc->ap;
5785 /* XXX: New EH and old EH use different mechanisms to
5786 * synchronize EH with regular execution path.
5788 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5789 * Normal execution path is responsible for not accessing a
5790 * failed qc. libata core enforces the rule by returning NULL
5791 * from ata_qc_from_tag() for failed qcs.
5793 * Old EH depends on ata_qc_complete() nullifying completion
5794 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5795 * not synchronize with interrupt handler. Only PIO task is
5798 if (ap->ops->error_handler) {
5799 struct ata_device *dev = qc->dev;
5800 struct ata_eh_info *ehi = &dev->link->eh_info;
5802 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5804 if (unlikely(qc->err_mask))
5805 qc->flags |= ATA_QCFLAG_FAILED;
5807 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5808 if (!ata_tag_internal(qc->tag)) {
5809 /* always fill result TF for failed qc */
5811 ata_qc_schedule_eh(qc);
5816 /* read result TF if requested */
5817 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5820 /* Some commands need post-processing after successful
5823 switch (qc->tf.command) {
5824 case ATA_CMD_SET_FEATURES:
5825 if (qc->tf.feature != SETFEATURES_WC_ON &&
5826 qc->tf.feature != SETFEATURES_WC_OFF)
5829 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5830 case ATA_CMD_SET_MULTI: /* multi_count changed */
5831 /* revalidate device */
5832 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5833 ata_port_schedule_eh(ap);
5837 dev->flags |= ATA_DFLAG_SLEEPING;
5841 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5842 ata_verify_xfer(qc);
5844 __ata_qc_complete(qc);
5846 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5849 /* read result TF if failed or requested */
5850 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5853 __ata_qc_complete(qc);
5858 * ata_qc_complete_multiple - Complete multiple qcs successfully
5859 * @ap: port in question
5860 * @qc_active: new qc_active mask
5861 * @finish_qc: LLDD callback invoked before completing a qc
5863 * Complete in-flight commands. This functions is meant to be
5864 * called from low-level driver's interrupt routine to complete
5865 * requests normally. ap->qc_active and @qc_active is compared
5866 * and commands are completed accordingly.
5869 * spin_lock_irqsave(host lock)
5872 * Number of completed commands on success, -errno otherwise.
5874 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5875 void (*finish_qc)(struct ata_queued_cmd *))
5881 done_mask = ap->qc_active ^ qc_active;
5883 if (unlikely(done_mask & qc_active)) {
5884 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5885 "(%08x->%08x)\n", ap->qc_active, qc_active);
5889 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5890 struct ata_queued_cmd *qc;
5892 if (!(done_mask & (1 << i)))
5895 if ((qc = ata_qc_from_tag(ap, i))) {
5898 ata_qc_complete(qc);
5907 * ata_qc_issue - issue taskfile to device
5908 * @qc: command to issue to device
5910 * Prepare an ATA command to submission to device.
5911 * This includes mapping the data into a DMA-able
5912 * area, filling in the S/G table, and finally
5913 * writing the taskfile to hardware, starting the command.
5916 * spin_lock_irqsave(host lock)
5918 void ata_qc_issue(struct ata_queued_cmd *qc)
5920 struct ata_port *ap = qc->ap;
5921 struct ata_link *link = qc->dev->link;
5922 u8 prot = qc->tf.protocol;
5924 /* Make sure only one non-NCQ command is outstanding. The
5925 * check is skipped for old EH because it reuses active qc to
5926 * request ATAPI sense.
5928 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5930 if (prot == ATA_PROT_NCQ) {
5931 WARN_ON(link->sactive & (1 << qc->tag));
5934 ap->nr_active_links++;
5935 link->sactive |= 1 << qc->tag;
5937 WARN_ON(link->sactive);
5939 ap->nr_active_links++;
5940 link->active_tag = qc->tag;
5943 qc->flags |= ATA_QCFLAG_ACTIVE;
5944 ap->qc_active |= 1 << qc->tag;
5946 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5947 (ap->flags & ATA_FLAG_PIO_DMA))) {
5948 if (qc->flags & ATA_QCFLAG_SG) {
5949 if (ata_sg_setup(qc))
5951 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5952 if (ata_sg_setup_one(qc))
5956 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5959 /* if device is sleeping, schedule softreset and abort the link */
5960 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5961 link->eh_info.action |= ATA_EH_SOFTRESET;
5962 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5963 ata_link_abort(link);
5967 ap->ops->qc_prep(qc);
5969 qc->err_mask |= ap->ops->qc_issue(qc);
5970 if (unlikely(qc->err_mask))
5975 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5976 qc->err_mask |= AC_ERR_SYSTEM;
5978 ata_qc_complete(qc);
5982 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5983 * @qc: command to issue to device
5985 * Using various libata functions and hooks, this function
5986 * starts an ATA command. ATA commands are grouped into
5987 * classes called "protocols", and issuing each type of protocol
5988 * is slightly different.
5990 * May be used as the qc_issue() entry in ata_port_operations.
5993 * spin_lock_irqsave(host lock)
5996 * Zero on success, AC_ERR_* mask on failure
5999 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
6001 struct ata_port *ap = qc->ap;
6003 /* Use polling pio if the LLD doesn't handle
6004 * interrupt driven pio and atapi CDB interrupt.
6006 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6007 switch (qc->tf.protocol) {
6009 case ATA_PROT_NODATA:
6010 case ATA_PROT_ATAPI:
6011 case ATA_PROT_ATAPI_NODATA:
6012 qc->tf.flags |= ATA_TFLAG_POLLING;
6014 case ATA_PROT_ATAPI_DMA:
6015 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
6016 /* see ata_dma_blacklisted() */
6024 /* select the device */
6025 ata_dev_select(ap, qc->dev->devno, 1, 0);
6027 /* start the command */
6028 switch (qc->tf.protocol) {
6029 case ATA_PROT_NODATA:
6030 if (qc->tf.flags & ATA_TFLAG_POLLING)
6031 ata_qc_set_polling(qc);
6033 ata_tf_to_host(ap, &qc->tf);
6034 ap->hsm_task_state = HSM_ST_LAST;
6036 if (qc->tf.flags & ATA_TFLAG_POLLING)
6037 ata_port_queue_task(ap, ata_pio_task, qc, 0);
6042 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6044 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6045 ap->ops->bmdma_setup(qc); /* set up bmdma */
6046 ap->ops->bmdma_start(qc); /* initiate bmdma */
6047 ap->hsm_task_state = HSM_ST_LAST;
6051 if (qc->tf.flags & ATA_TFLAG_POLLING)
6052 ata_qc_set_polling(qc);
6054 ata_tf_to_host(ap, &qc->tf);
6056 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6057 /* PIO data out protocol */
6058 ap->hsm_task_state = HSM_ST_FIRST;
6059 ata_port_queue_task(ap, ata_pio_task, qc, 0);
6061 /* always send first data block using
6062 * the ata_pio_task() codepath.
6065 /* PIO data in protocol */
6066 ap->hsm_task_state = HSM_ST;
6068 if (qc->tf.flags & ATA_TFLAG_POLLING)
6069 ata_port_queue_task(ap, ata_pio_task, qc, 0);
6071 /* if polling, ata_pio_task() handles the rest.
6072 * otherwise, interrupt handler takes over from here.
6078 case ATA_PROT_ATAPI:
6079 case ATA_PROT_ATAPI_NODATA:
6080 if (qc->tf.flags & ATA_TFLAG_POLLING)
6081 ata_qc_set_polling(qc);
6083 ata_tf_to_host(ap, &qc->tf);
6085 ap->hsm_task_state = HSM_ST_FIRST;
6087 /* send cdb by polling if no cdb interrupt */
6088 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6089 (qc->tf.flags & ATA_TFLAG_POLLING))
6090 ata_port_queue_task(ap, ata_pio_task, qc, 0);
6093 case ATA_PROT_ATAPI_DMA:
6094 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6096 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6097 ap->ops->bmdma_setup(qc); /* set up bmdma */
6098 ap->hsm_task_state = HSM_ST_FIRST;
6100 /* send cdb by polling if no cdb interrupt */
6101 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6102 ata_port_queue_task(ap, ata_pio_task, qc, 0);
6107 return AC_ERR_SYSTEM;
6114 * ata_host_intr - Handle host interrupt for given (port, task)
6115 * @ap: Port on which interrupt arrived (possibly...)
6116 * @qc: Taskfile currently active in engine
6118 * Handle host interrupt for given queued command. Currently,
6119 * only DMA interrupts are handled. All other commands are
6120 * handled via polling with interrupts disabled (nIEN bit).
6123 * spin_lock_irqsave(host lock)
6126 * One if interrupt was handled, zero if not (shared irq).
6129 inline unsigned int ata_host_intr(struct ata_port *ap,
6130 struct ata_queued_cmd *qc)
6132 struct ata_eh_info *ehi = &ap->link.eh_info;
6133 u8 status, host_stat = 0;
6135 VPRINTK("ata%u: protocol %d task_state %d\n",
6136 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
6138 /* Check whether we are expecting interrupt in this state */
6139 switch (ap->hsm_task_state) {
6141 /* Some pre-ATAPI-4 devices assert INTRQ
6142 * at this state when ready to receive CDB.
6145 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6146 * The flag was turned on only for atapi devices. No
6147 * need to check ata_is_atapi(qc->tf.protocol) again.
6149 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6153 if (qc->tf.protocol == ATA_PROT_DMA ||
6154 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
6155 /* check status of DMA engine */
6156 host_stat = ap->ops->bmdma_status(ap);
6157 VPRINTK("ata%u: host_stat 0x%X\n",
6158 ap->print_id, host_stat);
6160 /* if it's not our irq... */
6161 if (!(host_stat & ATA_DMA_INTR))
6164 /* before we do anything else, clear DMA-Start bit */
6165 ap->ops->bmdma_stop(qc);
6167 if (unlikely(host_stat & ATA_DMA_ERR)) {
6168 /* error when transfering data to/from memory */
6169 qc->err_mask |= AC_ERR_HOST_BUS;
6170 ap->hsm_task_state = HSM_ST_ERR;
6180 /* check altstatus */
6181 status = ata_altstatus(ap);
6182 if (status & ATA_BUSY)
6185 /* check main status, clearing INTRQ */
6186 status = ata_chk_status(ap);
6187 if (unlikely(status & ATA_BUSY))
6190 /* ack bmdma irq events */
6191 ap->ops->irq_clear(ap);
6193 ata_hsm_move(ap, qc, status, 0);
6195 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
6196 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
6197 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6199 return 1; /* irq handled */
6202 ap->stats.idle_irq++;
6205 if ((ap->stats.idle_irq % 1000) == 0) {
6207 ap->ops->irq_clear(ap);
6208 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
6212 return 0; /* irq not handled */
6216 * ata_interrupt - Default ATA host interrupt handler
6217 * @irq: irq line (unused)
6218 * @dev_instance: pointer to our ata_host information structure
6220 * Default interrupt handler for PCI IDE devices. Calls
6221 * ata_host_intr() for each port that is not disabled.
6224 * Obtains host lock during operation.
6227 * IRQ_NONE or IRQ_HANDLED.
6230 irqreturn_t ata_interrupt(int irq, void *dev_instance)
6232 struct ata_host *host = dev_instance;
6234 unsigned int handled = 0;
6235 unsigned long flags;
6237 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
6238 spin_lock_irqsave(&host->lock, flags);
6240 for (i = 0; i < host->n_ports; i++) {
6241 struct ata_port *ap;
6243 ap = host->ports[i];
6245 !(ap->flags & ATA_FLAG_DISABLED)) {
6246 struct ata_queued_cmd *qc;
6248 qc = ata_qc_from_tag(ap, ap->link.active_tag);
6249 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
6250 (qc->flags & ATA_QCFLAG_ACTIVE))
6251 handled |= ata_host_intr(ap, qc);
6255 spin_unlock_irqrestore(&host->lock, flags);
6257 return IRQ_RETVAL(handled);
6261 * sata_scr_valid - test whether SCRs are accessible
6262 * @link: ATA link to test SCR accessibility for
6264 * Test whether SCRs are accessible for @link.
6270 * 1 if SCRs are accessible, 0 otherwise.
6272 int sata_scr_valid(struct ata_link *link)
6274 struct ata_port *ap = link->ap;
6276 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
6280 * sata_scr_read - read SCR register of the specified port
6281 * @link: ATA link to read SCR for
6283 * @val: Place to store read value
6285 * Read SCR register @reg of @link into *@val. This function is
6286 * guaranteed to succeed if @link is ap->link, the cable type of
6287 * the port is SATA and the port implements ->scr_read.
6290 * None if @link is ap->link. Kernel thread context otherwise.
6293 * 0 on success, negative errno on failure.
6295 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
6297 if (ata_is_host_link(link)) {
6298 struct ata_port *ap = link->ap;
6300 if (sata_scr_valid(link))
6301 return ap->ops->scr_read(ap, reg, val);
6305 return sata_pmp_scr_read(link, reg, val);
6309 * sata_scr_write - write SCR register of the specified port
6310 * @link: ATA link to write SCR for
6311 * @reg: SCR to write
6312 * @val: value to write
6314 * Write @val to SCR register @reg of @link. This function is
6315 * guaranteed to succeed if @link is ap->link, the cable type of
6316 * the port is SATA and the port implements ->scr_read.
6319 * None if @link is ap->link. Kernel thread context otherwise.
6322 * 0 on success, negative errno on failure.
6324 int sata_scr_write(struct ata_link *link, int reg, u32 val)
6326 if (ata_is_host_link(link)) {
6327 struct ata_port *ap = link->ap;
6329 if (sata_scr_valid(link))
6330 return ap->ops->scr_write(ap, reg, val);
6334 return sata_pmp_scr_write(link, reg, val);
6338 * sata_scr_write_flush - write SCR register of the specified port and flush
6339 * @link: ATA link to write SCR for
6340 * @reg: SCR to write
6341 * @val: value to write
6343 * This function is identical to sata_scr_write() except that this
6344 * function performs flush after writing to the register.
6347 * None if @link is ap->link. Kernel thread context otherwise.
6350 * 0 on success, negative errno on failure.
6352 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
6354 if (ata_is_host_link(link)) {
6355 struct ata_port *ap = link->ap;
6358 if (sata_scr_valid(link)) {
6359 rc = ap->ops->scr_write(ap, reg, val);
6361 rc = ap->ops->scr_read(ap, reg, &val);
6367 return sata_pmp_scr_write(link, reg, val);
6371 * ata_link_online - test whether the given link is online
6372 * @link: ATA link to test
6374 * Test whether @link is online. Note that this function returns
6375 * 0 if online status of @link cannot be obtained, so
6376 * ata_link_online(link) != !ata_link_offline(link).
6382 * 1 if the port online status is available and online.
6384 int ata_link_online(struct ata_link *link)
6388 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6389 (sstatus & 0xf) == 0x3)
6395 * ata_link_offline - test whether the given link is offline
6396 * @link: ATA link to test
6398 * Test whether @link is offline. Note that this function
6399 * returns 0 if offline status of @link cannot be obtained, so
6400 * ata_link_online(link) != !ata_link_offline(link).
6406 * 1 if the port offline status is available and offline.
6408 int ata_link_offline(struct ata_link *link)
6412 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6413 (sstatus & 0xf) != 0x3)
6418 int ata_flush_cache(struct ata_device *dev)
6420 unsigned int err_mask;
6423 if (!ata_try_flush_cache(dev))
6426 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
6427 cmd = ATA_CMD_FLUSH_EXT;
6429 cmd = ATA_CMD_FLUSH;
6431 /* This is wrong. On a failed flush we get back the LBA of the lost
6432 sector and we should (assuming it wasn't aborted as unknown) issue
6433 a further flush command to continue the writeback until it
6435 err_mask = ata_do_simple_cmd(dev, cmd);
6437 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6445 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6446 unsigned int action, unsigned int ehi_flags,
6449 unsigned long flags;
6452 for (i = 0; i < host->n_ports; i++) {
6453 struct ata_port *ap = host->ports[i];
6454 struct ata_link *link;
6456 /* Previous resume operation might still be in
6457 * progress. Wait for PM_PENDING to clear.
6459 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6460 ata_port_wait_eh(ap);
6461 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6464 /* request PM ops to EH */
6465 spin_lock_irqsave(ap->lock, flags);
6470 ap->pm_result = &rc;
6473 ap->pflags |= ATA_PFLAG_PM_PENDING;
6474 __ata_port_for_each_link(link, ap) {
6475 link->eh_info.action |= action;
6476 link->eh_info.flags |= ehi_flags;
6479 ata_port_schedule_eh(ap);
6481 spin_unlock_irqrestore(ap->lock, flags);
6483 /* wait and check result */
6485 ata_port_wait_eh(ap);
6486 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6496 * ata_host_suspend - suspend host
6497 * @host: host to suspend
6500 * Suspend @host. Actual operation is performed by EH. This
6501 * function requests EH to perform PM operations and waits for EH
6505 * Kernel thread context (may sleep).
6508 * 0 on success, -errno on failure.
6510 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
6515 * disable link pm on all ports before requesting
6518 ata_lpm_enable(host);
6520 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
6522 host->dev->power.power_state = mesg;
6527 * ata_host_resume - resume host
6528 * @host: host to resume
6530 * Resume @host. Actual operation is performed by EH. This
6531 * function requests EH to perform PM operations and returns.
6532 * Note that all resume operations are performed parallely.
6535 * Kernel thread context (may sleep).
6537 void ata_host_resume(struct ata_host *host)
6539 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6540 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6541 host->dev->power.power_state = PMSG_ON;
6543 /* reenable link pm */
6544 ata_lpm_disable(host);
6549 * ata_port_start - Set port up for dma.
6550 * @ap: Port to initialize
6552 * Called just after data structures for each port are
6553 * initialized. Allocates space for PRD table.
6555 * May be used as the port_start() entry in ata_port_operations.
6558 * Inherited from caller.
6560 int ata_port_start(struct ata_port *ap)
6562 struct device *dev = ap->dev;
6565 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6570 rc = ata_pad_alloc(ap, dev);
6574 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6575 (unsigned long long)ap->prd_dma);
6580 * ata_dev_init - Initialize an ata_device structure
6581 * @dev: Device structure to initialize
6583 * Initialize @dev in preparation for probing.
6586 * Inherited from caller.
6588 void ata_dev_init(struct ata_device *dev)
6590 struct ata_link *link = dev->link;
6591 struct ata_port *ap = link->ap;
6592 unsigned long flags;
6594 /* SATA spd limit is bound to the first device */
6595 link->sata_spd_limit = link->hw_sata_spd_limit;
6598 /* High bits of dev->flags are used to record warm plug
6599 * requests which occur asynchronously. Synchronize using
6602 spin_lock_irqsave(ap->lock, flags);
6603 dev->flags &= ~ATA_DFLAG_INIT_MASK;
6605 spin_unlock_irqrestore(ap->lock, flags);
6607 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6608 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6609 dev->pio_mask = UINT_MAX;
6610 dev->mwdma_mask = UINT_MAX;
6611 dev->udma_mask = UINT_MAX;
6615 * ata_link_init - Initialize an ata_link structure
6616 * @ap: ATA port link is attached to
6617 * @link: Link structure to initialize
6618 * @pmp: Port multiplier port number
6623 * Kernel thread context (may sleep)
6625 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
6629 /* clear everything except for devices */
6630 memset(link, 0, offsetof(struct ata_link, device[0]));
6634 link->active_tag = ATA_TAG_POISON;
6635 link->hw_sata_spd_limit = UINT_MAX;
6637 /* can't use iterator, ap isn't initialized yet */
6638 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6639 struct ata_device *dev = &link->device[i];
6642 dev->devno = dev - link->device;
6648 * sata_link_init_spd - Initialize link->sata_spd_limit
6649 * @link: Link to configure sata_spd_limit for
6651 * Initialize @link->[hw_]sata_spd_limit to the currently
6655 * Kernel thread context (may sleep).
6658 * 0 on success, -errno on failure.
6660 int sata_link_init_spd(struct ata_link *link)
6665 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6669 spd = (scontrol >> 4) & 0xf;
6671 link->hw_sata_spd_limit &= (1 << spd) - 1;
6673 link->sata_spd_limit = link->hw_sata_spd_limit;
6679 * ata_port_alloc - allocate and initialize basic ATA port resources
6680 * @host: ATA host this allocated port belongs to
6682 * Allocate and initialize basic ATA port resources.
6685 * Allocate ATA port on success, NULL on failure.
6688 * Inherited from calling layer (may sleep).
6690 struct ata_port *ata_port_alloc(struct ata_host *host)
6692 struct ata_port *ap;
6696 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6700 ap->pflags |= ATA_PFLAG_INITIALIZING;
6701 ap->lock = &host->lock;
6702 ap->flags = ATA_FLAG_DISABLED;
6704 ap->ctl = ATA_DEVCTL_OBS;
6706 ap->dev = host->dev;
6707 ap->last_ctl = 0xFF;
6709 #if defined(ATA_VERBOSE_DEBUG)
6710 /* turn on all debugging levels */
6711 ap->msg_enable = 0x00FF;
6712 #elif defined(ATA_DEBUG)
6713 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6715 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6718 INIT_DELAYED_WORK(&ap->port_task, NULL);
6719 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6720 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6721 INIT_LIST_HEAD(&ap->eh_done_q);
6722 init_waitqueue_head(&ap->eh_wait_q);
6723 init_timer_deferrable(&ap->fastdrain_timer);
6724 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6725 ap->fastdrain_timer.data = (unsigned long)ap;
6727 ap->cbl = ATA_CBL_NONE;
6729 ata_link_init(ap, &ap->link, 0);
6732 ap->stats.unhandled_irq = 1;
6733 ap->stats.idle_irq = 1;
6738 static void ata_host_release(struct device *gendev, void *res)
6740 struct ata_host *host = dev_get_drvdata(gendev);
6743 for (i = 0; i < host->n_ports; i++) {
6744 struct ata_port *ap = host->ports[i];
6750 scsi_host_put(ap->scsi_host);
6752 kfree(ap->pmp_link);
6754 host->ports[i] = NULL;
6757 dev_set_drvdata(gendev, NULL);
6761 * ata_host_alloc - allocate and init basic ATA host resources
6762 * @dev: generic device this host is associated with
6763 * @max_ports: maximum number of ATA ports associated with this host
6765 * Allocate and initialize basic ATA host resources. LLD calls
6766 * this function to allocate a host, initializes it fully and
6767 * attaches it using ata_host_register().
6769 * @max_ports ports are allocated and host->n_ports is
6770 * initialized to @max_ports. The caller is allowed to decrease
6771 * host->n_ports before calling ata_host_register(). The unused
6772 * ports will be automatically freed on registration.
6775 * Allocate ATA host on success, NULL on failure.
6778 * Inherited from calling layer (may sleep).
6780 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6782 struct ata_host *host;
6788 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6791 /* alloc a container for our list of ATA ports (buses) */
6792 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6793 /* alloc a container for our list of ATA ports (buses) */
6794 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6798 devres_add(dev, host);
6799 dev_set_drvdata(dev, host);
6801 spin_lock_init(&host->lock);
6803 host->n_ports = max_ports;
6805 /* allocate ports bound to this host */
6806 for (i = 0; i < max_ports; i++) {
6807 struct ata_port *ap;
6809 ap = ata_port_alloc(host);
6814 host->ports[i] = ap;
6817 devres_remove_group(dev, NULL);
6821 devres_release_group(dev, NULL);
6826 * ata_host_alloc_pinfo - alloc host and init with port_info array
6827 * @dev: generic device this host is associated with
6828 * @ppi: array of ATA port_info to initialize host with
6829 * @n_ports: number of ATA ports attached to this host
6831 * Allocate ATA host and initialize with info from @ppi. If NULL
6832 * terminated, @ppi may contain fewer entries than @n_ports. The
6833 * last entry will be used for the remaining ports.
6836 * Allocate ATA host on success, NULL on failure.
6839 * Inherited from calling layer (may sleep).
6841 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6842 const struct ata_port_info * const * ppi,
6845 const struct ata_port_info *pi;
6846 struct ata_host *host;
6849 host = ata_host_alloc(dev, n_ports);
6853 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6854 struct ata_port *ap = host->ports[i];
6859 ap->pio_mask = pi->pio_mask;
6860 ap->mwdma_mask = pi->mwdma_mask;
6861 ap->udma_mask = pi->udma_mask;
6862 ap->flags |= pi->flags;
6863 ap->link.flags |= pi->link_flags;
6864 ap->ops = pi->port_ops;
6866 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6867 host->ops = pi->port_ops;
6868 if (!host->private_data && pi->private_data)
6869 host->private_data = pi->private_data;
6875 static void ata_host_stop(struct device *gendev, void *res)
6877 struct ata_host *host = dev_get_drvdata(gendev);
6880 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6882 for (i = 0; i < host->n_ports; i++) {
6883 struct ata_port *ap = host->ports[i];
6885 if (ap->ops->port_stop)
6886 ap->ops->port_stop(ap);
6889 if (host->ops->host_stop)
6890 host->ops->host_stop(host);
6894 * ata_host_start - start and freeze ports of an ATA host
6895 * @host: ATA host to start ports for
6897 * Start and then freeze ports of @host. Started status is
6898 * recorded in host->flags, so this function can be called
6899 * multiple times. Ports are guaranteed to get started only
6900 * once. If host->ops isn't initialized yet, its set to the
6901 * first non-dummy port ops.
6904 * Inherited from calling layer (may sleep).
6907 * 0 if all ports are started successfully, -errno otherwise.
6909 int ata_host_start(struct ata_host *host)
6912 void *start_dr = NULL;
6915 if (host->flags & ATA_HOST_STARTED)
6918 for (i = 0; i < host->n_ports; i++) {
6919 struct ata_port *ap = host->ports[i];
6921 if (!host->ops && !ata_port_is_dummy(ap))
6922 host->ops = ap->ops;
6924 if (ap->ops->port_stop)
6928 if (host->ops->host_stop)
6932 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6937 for (i = 0; i < host->n_ports; i++) {
6938 struct ata_port *ap = host->ports[i];
6940 if (ap->ops->port_start) {
6941 rc = ap->ops->port_start(ap);
6944 dev_printk(KERN_ERR, host->dev,
6945 "failed to start port %d "
6946 "(errno=%d)\n", i, rc);
6950 ata_eh_freeze_port(ap);
6954 devres_add(host->dev, start_dr);
6955 host->flags |= ATA_HOST_STARTED;
6960 struct ata_port *ap = host->ports[i];
6962 if (ap->ops->port_stop)
6963 ap->ops->port_stop(ap);
6965 devres_free(start_dr);
6970 * ata_sas_host_init - Initialize a host struct
6971 * @host: host to initialize
6972 * @dev: device host is attached to
6973 * @flags: host flags
6977 * PCI/etc. bus probe sem.
6980 /* KILLME - the only user left is ipr */
6981 void ata_host_init(struct ata_host *host, struct device *dev,
6982 unsigned long flags, const struct ata_port_operations *ops)
6984 spin_lock_init(&host->lock);
6986 host->flags = flags;
6991 * ata_host_register - register initialized ATA host
6992 * @host: ATA host to register
6993 * @sht: template for SCSI host
6995 * Register initialized ATA host. @host is allocated using
6996 * ata_host_alloc() and fully initialized by LLD. This function
6997 * starts ports, registers @host with ATA and SCSI layers and
6998 * probe registered devices.
7001 * Inherited from calling layer (may sleep).
7004 * 0 on success, -errno otherwise.
7006 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7010 /* host must have been started */
7011 if (!(host->flags & ATA_HOST_STARTED)) {
7012 dev_printk(KERN_ERR, host->dev,
7013 "BUG: trying to register unstarted host\n");
7018 /* Blow away unused ports. This happens when LLD can't
7019 * determine the exact number of ports to allocate at
7022 for (i = host->n_ports; host->ports[i]; i++)
7023 kfree(host->ports[i]);
7025 /* give ports names and add SCSI hosts */
7026 for (i = 0; i < host->n_ports; i++)
7027 host->ports[i]->print_id = ata_print_id++;
7029 rc = ata_scsi_add_hosts(host, sht);
7033 /* associate with ACPI nodes */
7034 ata_acpi_associate(host);
7036 /* set cable, sata_spd_limit and report */
7037 for (i = 0; i < host->n_ports; i++) {
7038 struct ata_port *ap = host->ports[i];
7039 unsigned long xfer_mask;
7041 /* set SATA cable type if still unset */
7042 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7043 ap->cbl = ATA_CBL_SATA;
7045 /* init sata_spd_limit to the current value */
7046 sata_link_init_spd(&ap->link);
7048 /* print per-port info to dmesg */
7049 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7052 if (!ata_port_is_dummy(ap)) {
7053 ata_port_printk(ap, KERN_INFO,
7054 "%cATA max %s %s\n",
7055 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
7056 ata_mode_string(xfer_mask),
7057 ap->link.eh_info.desc);
7058 ata_ehi_clear_desc(&ap->link.eh_info);
7060 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7063 /* perform each probe synchronously */
7064 DPRINTK("probe begin\n");
7065 for (i = 0; i < host->n_ports; i++) {
7066 struct ata_port *ap = host->ports[i];
7070 if (ap->ops->error_handler) {
7071 struct ata_eh_info *ehi = &ap->link.eh_info;
7072 unsigned long flags;
7076 /* kick EH for boot probing */
7077 spin_lock_irqsave(ap->lock, flags);
7080 (1 << ata_link_max_devices(&ap->link)) - 1;
7081 ehi->action |= ATA_EH_SOFTRESET;
7082 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7084 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
7085 ap->pflags |= ATA_PFLAG_LOADING;
7086 ata_port_schedule_eh(ap);
7088 spin_unlock_irqrestore(ap->lock, flags);
7090 /* wait for EH to finish */
7091 ata_port_wait_eh(ap);
7093 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7094 rc = ata_bus_probe(ap);
7095 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7098 /* FIXME: do something useful here?
7099 * Current libata behavior will
7100 * tear down everything when
7101 * the module is removed
7102 * or the h/w is unplugged.
7108 /* probes are done, now scan each port's disk(s) */
7109 DPRINTK("host probe begin\n");
7110 for (i = 0; i < host->n_ports; i++) {
7111 struct ata_port *ap = host->ports[i];
7113 ata_scsi_scan_host(ap, 1);
7114 ata_lpm_schedule(ap, ap->pm_policy);
7121 * ata_host_activate - start host, request IRQ and register it
7122 * @host: target ATA host
7123 * @irq: IRQ to request
7124 * @irq_handler: irq_handler used when requesting IRQ
7125 * @irq_flags: irq_flags used when requesting IRQ
7126 * @sht: scsi_host_template to use when registering the host
7128 * After allocating an ATA host and initializing it, most libata
7129 * LLDs perform three steps to activate the host - start host,
7130 * request IRQ and register it. This helper takes necessasry
7131 * arguments and performs the three steps in one go.
7133 * An invalid IRQ skips the IRQ registration and expects the host to
7134 * have set polling mode on the port. In this case, @irq_handler
7138 * Inherited from calling layer (may sleep).
7141 * 0 on success, -errno otherwise.
7143 int ata_host_activate(struct ata_host *host, int irq,
7144 irq_handler_t irq_handler, unsigned long irq_flags,
7145 struct scsi_host_template *sht)
7149 rc = ata_host_start(host);
7153 /* Special case for polling mode */
7155 WARN_ON(irq_handler);
7156 return ata_host_register(host, sht);
7159 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7160 dev_driver_string(host->dev), host);
7164 for (i = 0; i < host->n_ports; i++)
7165 ata_port_desc(host->ports[i], "irq %d", irq);
7167 rc = ata_host_register(host, sht);
7168 /* if failed, just free the IRQ and leave ports alone */
7170 devm_free_irq(host->dev, irq, host);
7176 * ata_port_detach - Detach ATA port in prepration of device removal
7177 * @ap: ATA port to be detached
7179 * Detach all ATA devices and the associated SCSI devices of @ap;
7180 * then, remove the associated SCSI host. @ap is guaranteed to
7181 * be quiescent on return from this function.
7184 * Kernel thread context (may sleep).
7186 static void ata_port_detach(struct ata_port *ap)
7188 unsigned long flags;
7189 struct ata_link *link;
7190 struct ata_device *dev;
7192 if (!ap->ops->error_handler)
7195 /* tell EH we're leaving & flush EH */
7196 spin_lock_irqsave(ap->lock, flags);
7197 ap->pflags |= ATA_PFLAG_UNLOADING;
7198 spin_unlock_irqrestore(ap->lock, flags);
7200 ata_port_wait_eh(ap);
7202 /* EH is now guaranteed to see UNLOADING - EH context belongs
7203 * to us. Disable all existing devices.
7205 ata_port_for_each_link(link, ap) {
7206 ata_link_for_each_dev(dev, link)
7207 ata_dev_disable(dev);
7210 /* Final freeze & EH. All in-flight commands are aborted. EH
7211 * will be skipped and retrials will be terminated with bad
7214 spin_lock_irqsave(ap->lock, flags);
7215 ata_port_freeze(ap); /* won't be thawed */
7216 spin_unlock_irqrestore(ap->lock, flags);
7218 ata_port_wait_eh(ap);
7219 cancel_rearming_delayed_work(&ap->hotplug_task);
7222 /* remove the associated SCSI host */
7223 scsi_remove_host(ap->scsi_host);
7227 * ata_host_detach - Detach all ports of an ATA host
7228 * @host: Host to detach
7230 * Detach all ports of @host.
7233 * Kernel thread context (may sleep).
7235 void ata_host_detach(struct ata_host *host)
7239 for (i = 0; i < host->n_ports; i++)
7240 ata_port_detach(host->ports[i]);
7242 /* the host is dead now, dissociate ACPI */
7243 ata_acpi_dissociate(host);
7247 * ata_std_ports - initialize ioaddr with standard port offsets.
7248 * @ioaddr: IO address structure to be initialized
7250 * Utility function which initializes data_addr, error_addr,
7251 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7252 * device_addr, status_addr, and command_addr to standard offsets
7253 * relative to cmd_addr.
7255 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
7258 void ata_std_ports(struct ata_ioports *ioaddr)
7260 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7261 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7262 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7263 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7264 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7265 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7266 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7267 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7268 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7269 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7276 * ata_pci_remove_one - PCI layer callback for device removal
7277 * @pdev: PCI device that was removed
7279 * PCI layer indicates to libata via this hook that hot-unplug or
7280 * module unload event has occurred. Detach all ports. Resource
7281 * release is handled via devres.
7284 * Inherited from PCI layer (may sleep).
7286 void ata_pci_remove_one(struct pci_dev *pdev)
7288 struct device *dev = &pdev->dev;
7289 struct ata_host *host = dev_get_drvdata(dev);
7291 ata_host_detach(host);
7294 /* move to PCI subsystem */
7295 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
7297 unsigned long tmp = 0;
7299 switch (bits->width) {
7302 pci_read_config_byte(pdev, bits->reg, &tmp8);
7308 pci_read_config_word(pdev, bits->reg, &tmp16);
7314 pci_read_config_dword(pdev, bits->reg, &tmp32);
7325 return (tmp == bits->val) ? 1 : 0;
7329 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
7331 pci_save_state(pdev);
7332 pci_disable_device(pdev);
7334 if (mesg.event == PM_EVENT_SUSPEND)
7335 pci_set_power_state(pdev, PCI_D3hot);
7338 int ata_pci_device_do_resume(struct pci_dev *pdev)
7342 pci_set_power_state(pdev, PCI_D0);
7343 pci_restore_state(pdev);
7345 rc = pcim_enable_device(pdev);
7347 dev_printk(KERN_ERR, &pdev->dev,
7348 "failed to enable device after resume (%d)\n", rc);
7352 pci_set_master(pdev);
7356 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
7358 struct ata_host *host = dev_get_drvdata(&pdev->dev);
7361 rc = ata_host_suspend(host, mesg);
7365 ata_pci_device_do_suspend(pdev, mesg);
7370 int ata_pci_device_resume(struct pci_dev *pdev)
7372 struct ata_host *host = dev_get_drvdata(&pdev->dev);
7375 rc = ata_pci_device_do_resume(pdev);
7377 ata_host_resume(host);
7380 #endif /* CONFIG_PM */
7382 #endif /* CONFIG_PCI */
7385 static int __init ata_init(void)
7387 ata_probe_timeout *= HZ;
7388 ata_wq = create_workqueue("ata");
7392 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7394 destroy_workqueue(ata_wq);
7398 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7402 static void __exit ata_exit(void)
7404 destroy_workqueue(ata_wq);
7405 destroy_workqueue(ata_aux_wq);
7408 subsys_initcall(ata_init);
7409 module_exit(ata_exit);
7411 static unsigned long ratelimit_time;
7412 static DEFINE_SPINLOCK(ata_ratelimit_lock);
7414 int ata_ratelimit(void)
7417 unsigned long flags;
7419 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7421 if (time_after(jiffies, ratelimit_time)) {
7423 ratelimit_time = jiffies + (HZ/5);
7427 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7433 * ata_wait_register - wait until register value changes
7434 * @reg: IO-mapped register
7435 * @mask: Mask to apply to read register value
7436 * @val: Wait condition
7437 * @interval_msec: polling interval in milliseconds
7438 * @timeout_msec: timeout in milliseconds
7440 * Waiting for some bits of register to change is a common
7441 * operation for ATA controllers. This function reads 32bit LE
7442 * IO-mapped register @reg and tests for the following condition.
7444 * (*@reg & mask) != val
7446 * If the condition is met, it returns; otherwise, the process is
7447 * repeated after @interval_msec until timeout.
7450 * Kernel thread context (may sleep)
7453 * The final register value.
7455 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7456 unsigned long interval_msec,
7457 unsigned long timeout_msec)
7459 unsigned long timeout;
7462 tmp = ioread32(reg);
7464 /* Calculate timeout _after_ the first read to make sure
7465 * preceding writes reach the controller before starting to
7466 * eat away the timeout.
7468 timeout = jiffies + (timeout_msec * HZ) / 1000;
7470 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7471 msleep(interval_msec);
7472 tmp = ioread32(reg);
7481 static void ata_dummy_noret(struct ata_port *ap) { }
7482 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7483 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7485 static u8 ata_dummy_check_status(struct ata_port *ap)
7490 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7492 return AC_ERR_SYSTEM;
7495 const struct ata_port_operations ata_dummy_port_ops = {
7496 .check_status = ata_dummy_check_status,
7497 .check_altstatus = ata_dummy_check_status,
7498 .dev_select = ata_noop_dev_select,
7499 .qc_prep = ata_noop_qc_prep,
7500 .qc_issue = ata_dummy_qc_issue,
7501 .freeze = ata_dummy_noret,
7502 .thaw = ata_dummy_noret,
7503 .error_handler = ata_dummy_noret,
7504 .post_internal_cmd = ata_dummy_qc_noret,
7505 .irq_clear = ata_dummy_noret,
7506 .port_start = ata_dummy_ret0,
7507 .port_stop = ata_dummy_noret,
7510 const struct ata_port_info ata_dummy_port_info = {
7511 .port_ops = &ata_dummy_port_ops,
7515 * libata is essentially a library of internal helper functions for
7516 * low-level ATA host controller drivers. As such, the API/ABI is
7517 * likely to change as new drivers are added and updated.
7518 * Do not depend on ABI/API stability.
7520 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7521 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7522 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7523 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7524 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7525 EXPORT_SYMBOL_GPL(ata_std_bios_param);
7526 EXPORT_SYMBOL_GPL(ata_std_ports);
7527 EXPORT_SYMBOL_GPL(ata_host_init);
7528 EXPORT_SYMBOL_GPL(ata_host_alloc);
7529 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7530 EXPORT_SYMBOL_GPL(ata_host_start);
7531 EXPORT_SYMBOL_GPL(ata_host_register);
7532 EXPORT_SYMBOL_GPL(ata_host_activate);
7533 EXPORT_SYMBOL_GPL(ata_host_detach);
7534 EXPORT_SYMBOL_GPL(ata_sg_init);
7535 EXPORT_SYMBOL_GPL(ata_sg_init_one);
7536 EXPORT_SYMBOL_GPL(ata_hsm_move);
7537 EXPORT_SYMBOL_GPL(ata_qc_complete);
7538 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7539 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
7540 EXPORT_SYMBOL_GPL(ata_tf_load);
7541 EXPORT_SYMBOL_GPL(ata_tf_read);
7542 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7543 EXPORT_SYMBOL_GPL(ata_std_dev_select);
7544 EXPORT_SYMBOL_GPL(sata_print_link_status);
7545 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7546 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7547 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7548 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7549 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7550 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7551 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7552 EXPORT_SYMBOL_GPL(ata_mode_string);
7553 EXPORT_SYMBOL_GPL(ata_id_xfermask);
7554 EXPORT_SYMBOL_GPL(ata_check_status);
7555 EXPORT_SYMBOL_GPL(ata_altstatus);
7556 EXPORT_SYMBOL_GPL(ata_exec_command);
7557 EXPORT_SYMBOL_GPL(ata_port_start);
7558 EXPORT_SYMBOL_GPL(ata_sff_port_start);
7559 EXPORT_SYMBOL_GPL(ata_interrupt);
7560 EXPORT_SYMBOL_GPL(ata_do_set_mode);
7561 EXPORT_SYMBOL_GPL(ata_data_xfer);
7562 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
7563 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7564 EXPORT_SYMBOL_GPL(ata_qc_prep);
7565 EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
7566 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7567 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7568 EXPORT_SYMBOL_GPL(ata_bmdma_start);
7569 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7570 EXPORT_SYMBOL_GPL(ata_bmdma_status);
7571 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
7572 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7573 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7574 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7575 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7576 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
7577 EXPORT_SYMBOL_GPL(ata_port_probe);
7578 EXPORT_SYMBOL_GPL(ata_dev_disable);
7579 EXPORT_SYMBOL_GPL(sata_set_spd);
7580 EXPORT_SYMBOL_GPL(sata_link_debounce);
7581 EXPORT_SYMBOL_GPL(sata_link_resume);
7582 EXPORT_SYMBOL_GPL(ata_bus_reset);
7583 EXPORT_SYMBOL_GPL(ata_std_prereset);
7584 EXPORT_SYMBOL_GPL(ata_std_softreset);
7585 EXPORT_SYMBOL_GPL(sata_link_hardreset);
7586 EXPORT_SYMBOL_GPL(sata_std_hardreset);
7587 EXPORT_SYMBOL_GPL(ata_std_postreset);
7588 EXPORT_SYMBOL_GPL(ata_dev_classify);
7589 EXPORT_SYMBOL_GPL(ata_dev_pair);
7590 EXPORT_SYMBOL_GPL(ata_port_disable);
7591 EXPORT_SYMBOL_GPL(ata_ratelimit);
7592 EXPORT_SYMBOL_GPL(ata_wait_register);
7593 EXPORT_SYMBOL_GPL(ata_busy_sleep);
7594 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7595 EXPORT_SYMBOL_GPL(ata_wait_ready);
7596 EXPORT_SYMBOL_GPL(ata_port_queue_task);
7597 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7598 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7599 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7600 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7601 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7602 EXPORT_SYMBOL_GPL(ata_host_intr);
7603 EXPORT_SYMBOL_GPL(sata_scr_valid);
7604 EXPORT_SYMBOL_GPL(sata_scr_read);
7605 EXPORT_SYMBOL_GPL(sata_scr_write);
7606 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7607 EXPORT_SYMBOL_GPL(ata_link_online);
7608 EXPORT_SYMBOL_GPL(ata_link_offline);
7610 EXPORT_SYMBOL_GPL(ata_host_suspend);
7611 EXPORT_SYMBOL_GPL(ata_host_resume);
7612 #endif /* CONFIG_PM */
7613 EXPORT_SYMBOL_GPL(ata_id_string);
7614 EXPORT_SYMBOL_GPL(ata_id_c_string);
7615 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7617 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7618 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7619 EXPORT_SYMBOL_GPL(ata_timing_compute);
7620 EXPORT_SYMBOL_GPL(ata_timing_merge);
7623 EXPORT_SYMBOL_GPL(pci_test_config_bits);
7624 EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
7625 EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7626 EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
7627 EXPORT_SYMBOL_GPL(ata_pci_init_one);
7628 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7630 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7631 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7632 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7633 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7634 #endif /* CONFIG_PM */
7635 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7636 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7637 #endif /* CONFIG_PCI */
7639 EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
7640 EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7641 EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7642 EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7643 EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7645 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7646 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7647 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7648 EXPORT_SYMBOL_GPL(ata_port_desc);
7650 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7651 #endif /* CONFIG_PCI */
7652 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7653 EXPORT_SYMBOL_GPL(ata_link_abort);
7654 EXPORT_SYMBOL_GPL(ata_port_abort);
7655 EXPORT_SYMBOL_GPL(ata_port_freeze);
7656 EXPORT_SYMBOL_GPL(sata_async_notification);
7657 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7658 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7659 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7660 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7661 EXPORT_SYMBOL_GPL(ata_do_eh);
7662 EXPORT_SYMBOL_GPL(ata_irq_on);
7663 EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7665 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7666 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7667 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7668 EXPORT_SYMBOL_GPL(ata_cable_sata);