2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
51 #include <linux/blkdev.h>
52 #include <linux/delay.h>
53 #include <linux/timer.h>
54 #include <linux/interrupt.h>
55 #include <linux/completion.h>
56 #include <linux/suspend.h>
57 #include <linux/workqueue.h>
58 #include <linux/jiffies.h>
59 #include <linux/scatterlist.h>
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_host.h>
64 #include <linux/libata.h>
65 #include <asm/semaphore.h>
66 #include <asm/byteorder.h>
67 #include <linux/cdrom.h>
72 /* debounce timing parameters in msecs { interval, duration, timeout } */
73 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
74 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
75 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
77 static unsigned int ata_dev_init_params(struct ata_device *dev,
78 u16 heads, u16 sectors);
79 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
80 static unsigned int ata_dev_set_feature(struct ata_device *dev,
81 u8 enable, u8 feature);
82 static void ata_dev_xfermask(struct ata_device *dev);
83 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
85 unsigned int ata_print_id = 1;
86 static struct workqueue_struct *ata_wq;
88 struct workqueue_struct *ata_aux_wq;
90 int atapi_enabled = 1;
91 module_param(atapi_enabled, int, 0444);
92 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
95 module_param(atapi_dmadir, int, 0444);
96 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
98 int atapi_passthru16 = 1;
99 module_param(atapi_passthru16, int, 0444);
100 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
103 module_param_named(fua, libata_fua, int, 0444);
104 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
106 static int ata_ignore_hpa;
107 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
108 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
110 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
111 module_param_named(dma, libata_dma_mask, int, 0444);
112 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
114 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
115 module_param(ata_probe_timeout, int, 0444);
116 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
118 int libata_noacpi = 0;
119 module_param_named(noacpi, libata_noacpi, int, 0444);
120 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
122 int libata_allow_tpm = 0;
123 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
124 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
126 MODULE_AUTHOR("Jeff Garzik");
127 MODULE_DESCRIPTION("Library module for ATA devices");
128 MODULE_LICENSE("GPL");
129 MODULE_VERSION(DRV_VERSION);
133 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
134 * @tf: Taskfile to convert
135 * @pmp: Port multiplier port
136 * @is_cmd: This FIS is for command
137 * @fis: Buffer into which data will output
139 * Converts a standard ATA taskfile to a Serial ATA
140 * FIS structure (Register - Host to Device).
143 * Inherited from caller.
145 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
147 fis[0] = 0x27; /* Register - Host to Device FIS */
148 fis[1] = pmp & 0xf; /* Port multiplier number*/
150 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
152 fis[2] = tf->command;
153 fis[3] = tf->feature;
160 fis[8] = tf->hob_lbal;
161 fis[9] = tf->hob_lbam;
162 fis[10] = tf->hob_lbah;
163 fis[11] = tf->hob_feature;
166 fis[13] = tf->hob_nsect;
177 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
178 * @fis: Buffer from which data will be input
179 * @tf: Taskfile to output
181 * Converts a serial ATA FIS structure to a standard ATA taskfile.
184 * Inherited from caller.
187 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
189 tf->command = fis[2]; /* status */
190 tf->feature = fis[3]; /* error */
197 tf->hob_lbal = fis[8];
198 tf->hob_lbam = fis[9];
199 tf->hob_lbah = fis[10];
202 tf->hob_nsect = fis[13];
205 static const u8 ata_rw_cmds[] = {
209 ATA_CMD_READ_MULTI_EXT,
210 ATA_CMD_WRITE_MULTI_EXT,
214 ATA_CMD_WRITE_MULTI_FUA_EXT,
218 ATA_CMD_PIO_READ_EXT,
219 ATA_CMD_PIO_WRITE_EXT,
232 ATA_CMD_WRITE_FUA_EXT
236 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
237 * @tf: command to examine and configure
238 * @dev: device tf belongs to
240 * Examine the device configuration and tf->flags to calculate
241 * the proper read/write commands and protocol to use.
246 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
250 int index, fua, lba48, write;
252 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
253 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
254 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
256 if (dev->flags & ATA_DFLAG_PIO) {
257 tf->protocol = ATA_PROT_PIO;
258 index = dev->multi_count ? 0 : 8;
259 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
260 /* Unable to use DMA due to host limitation */
261 tf->protocol = ATA_PROT_PIO;
262 index = dev->multi_count ? 0 : 8;
264 tf->protocol = ATA_PROT_DMA;
268 cmd = ata_rw_cmds[index + fua + lba48 + write];
277 * ata_tf_read_block - Read block address from ATA taskfile
278 * @tf: ATA taskfile of interest
279 * @dev: ATA device @tf belongs to
284 * Read block address from @tf. This function can handle all
285 * three address formats - LBA, LBA48 and CHS. tf->protocol and
286 * flags select the address format to use.
289 * Block address read from @tf.
291 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
295 if (tf->flags & ATA_TFLAG_LBA) {
296 if (tf->flags & ATA_TFLAG_LBA48) {
297 block |= (u64)tf->hob_lbah << 40;
298 block |= (u64)tf->hob_lbam << 32;
299 block |= tf->hob_lbal << 24;
301 block |= (tf->device & 0xf) << 24;
303 block |= tf->lbah << 16;
304 block |= tf->lbam << 8;
309 cyl = tf->lbam | (tf->lbah << 8);
310 head = tf->device & 0xf;
313 block = (cyl * dev->heads + head) * dev->sectors + sect;
320 * ata_build_rw_tf - Build ATA taskfile for given read/write request
321 * @tf: Target ATA taskfile
322 * @dev: ATA device @tf belongs to
323 * @block: Block address
324 * @n_block: Number of blocks
325 * @tf_flags: RW/FUA etc...
331 * Build ATA taskfile @tf for read/write request described by
332 * @block, @n_block, @tf_flags and @tag on @dev.
336 * 0 on success, -ERANGE if the request is too large for @dev,
337 * -EINVAL if the request is invalid.
339 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
340 u64 block, u32 n_block, unsigned int tf_flags,
343 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
344 tf->flags |= tf_flags;
346 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
348 if (!lba_48_ok(block, n_block))
351 tf->protocol = ATA_PROT_NCQ;
352 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
354 if (tf->flags & ATA_TFLAG_WRITE)
355 tf->command = ATA_CMD_FPDMA_WRITE;
357 tf->command = ATA_CMD_FPDMA_READ;
359 tf->nsect = tag << 3;
360 tf->hob_feature = (n_block >> 8) & 0xff;
361 tf->feature = n_block & 0xff;
363 tf->hob_lbah = (block >> 40) & 0xff;
364 tf->hob_lbam = (block >> 32) & 0xff;
365 tf->hob_lbal = (block >> 24) & 0xff;
366 tf->lbah = (block >> 16) & 0xff;
367 tf->lbam = (block >> 8) & 0xff;
368 tf->lbal = block & 0xff;
371 if (tf->flags & ATA_TFLAG_FUA)
372 tf->device |= 1 << 7;
373 } else if (dev->flags & ATA_DFLAG_LBA) {
374 tf->flags |= ATA_TFLAG_LBA;
376 if (lba_28_ok(block, n_block)) {
378 tf->device |= (block >> 24) & 0xf;
379 } else if (lba_48_ok(block, n_block)) {
380 if (!(dev->flags & ATA_DFLAG_LBA48))
384 tf->flags |= ATA_TFLAG_LBA48;
386 tf->hob_nsect = (n_block >> 8) & 0xff;
388 tf->hob_lbah = (block >> 40) & 0xff;
389 tf->hob_lbam = (block >> 32) & 0xff;
390 tf->hob_lbal = (block >> 24) & 0xff;
392 /* request too large even for LBA48 */
395 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
398 tf->nsect = n_block & 0xff;
400 tf->lbah = (block >> 16) & 0xff;
401 tf->lbam = (block >> 8) & 0xff;
402 tf->lbal = block & 0xff;
404 tf->device |= ATA_LBA;
407 u32 sect, head, cyl, track;
409 /* The request -may- be too large for CHS addressing. */
410 if (!lba_28_ok(block, n_block))
413 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
416 /* Convert LBA to CHS */
417 track = (u32)block / dev->sectors;
418 cyl = track / dev->heads;
419 head = track % dev->heads;
420 sect = (u32)block % dev->sectors + 1;
422 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
423 (u32)block, track, cyl, head, sect);
425 /* Check whether the converted CHS can fit.
429 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
432 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
443 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
444 * @pio_mask: pio_mask
445 * @mwdma_mask: mwdma_mask
446 * @udma_mask: udma_mask
448 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
449 * unsigned int xfer_mask.
457 unsigned int ata_pack_xfermask(unsigned int pio_mask,
458 unsigned int mwdma_mask, unsigned int udma_mask)
460 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
461 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
462 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
466 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
467 * @xfer_mask: xfer_mask to unpack
468 * @pio_mask: resulting pio_mask
469 * @mwdma_mask: resulting mwdma_mask
470 * @udma_mask: resulting udma_mask
472 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
473 * Any NULL distination masks will be ignored.
475 void ata_unpack_xfermask(unsigned int xfer_mask, unsigned int *pio_mask,
476 unsigned int *mwdma_mask, unsigned int *udma_mask)
479 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
481 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
483 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
486 static const struct ata_xfer_ent {
490 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
491 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
492 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
497 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
498 * @xfer_mask: xfer_mask of interest
500 * Return matching XFER_* value for @xfer_mask. Only the highest
501 * bit of @xfer_mask is considered.
507 * Matching XFER_* value, 0xff if no match found.
509 u8 ata_xfer_mask2mode(unsigned int xfer_mask)
511 int highbit = fls(xfer_mask) - 1;
512 const struct ata_xfer_ent *ent;
514 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
515 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
516 return ent->base + highbit - ent->shift;
521 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
522 * @xfer_mode: XFER_* of interest
524 * Return matching xfer_mask for @xfer_mode.
530 * Matching xfer_mask, 0 if no match found.
532 unsigned int ata_xfer_mode2mask(u8 xfer_mode)
534 const struct ata_xfer_ent *ent;
536 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
537 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
538 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
539 & ~((1 << ent->shift) - 1);
544 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
545 * @xfer_mode: XFER_* of interest
547 * Return matching xfer_shift for @xfer_mode.
553 * Matching xfer_shift, -1 if no match found.
555 int ata_xfer_mode2shift(unsigned int xfer_mode)
557 const struct ata_xfer_ent *ent;
559 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
560 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
566 * ata_mode_string - convert xfer_mask to string
567 * @xfer_mask: mask of bits supported; only highest bit counts.
569 * Determine string which represents the highest speed
570 * (highest bit in @modemask).
576 * Constant C string representing highest speed listed in
577 * @mode_mask, or the constant C string "<n/a>".
579 const char *ata_mode_string(unsigned int xfer_mask)
581 static const char * const xfer_mode_str[] = {
605 highbit = fls(xfer_mask) - 1;
606 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
607 return xfer_mode_str[highbit];
611 static const char *sata_spd_string(unsigned int spd)
613 static const char * const spd_str[] = {
618 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
620 return spd_str[spd - 1];
623 void ata_dev_disable(struct ata_device *dev)
625 if (ata_dev_enabled(dev)) {
626 if (ata_msg_drv(dev->link->ap))
627 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
628 ata_acpi_on_disable(dev);
629 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
635 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
637 struct ata_link *link = dev->link;
638 struct ata_port *ap = link->ap;
640 unsigned int err_mask;
644 * disallow DIPM for drivers which haven't set
645 * ATA_FLAG_IPM. This is because when DIPM is enabled,
646 * phy ready will be set in the interrupt status on
647 * state changes, which will cause some drivers to
648 * think there are errors - additionally drivers will
649 * need to disable hot plug.
651 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
652 ap->pm_policy = NOT_AVAILABLE;
657 * For DIPM, we will only enable it for the
660 * Why? Because Disks are too stupid to know that
661 * If the host rejects a request to go to SLUMBER
662 * they should retry at PARTIAL, and instead it
663 * just would give up. So, for medium_power to
664 * work at all, we need to only allow HIPM.
666 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
672 /* no restrictions on IPM transitions */
673 scontrol &= ~(0x3 << 8);
674 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
679 if (dev->flags & ATA_DFLAG_DIPM)
680 err_mask = ata_dev_set_feature(dev,
681 SETFEATURES_SATA_ENABLE, SATA_DIPM);
684 /* allow IPM to PARTIAL */
685 scontrol &= ~(0x1 << 8);
686 scontrol |= (0x2 << 8);
687 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
692 * we don't have to disable DIPM since IPM flags
693 * disallow transitions to SLUMBER, which effectively
694 * disable DIPM if it does not support PARTIAL
698 case MAX_PERFORMANCE:
699 /* disable all IPM transitions */
700 scontrol |= (0x3 << 8);
701 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
706 * we don't have to disable DIPM since IPM flags
707 * disallow all transitions which effectively
708 * disable DIPM anyway.
713 /* FIXME: handle SET FEATURES failure */
720 * ata_dev_enable_pm - enable SATA interface power management
721 * @dev: device to enable power management
722 * @policy: the link power management policy
724 * Enable SATA Interface power management. This will enable
725 * Device Interface Power Management (DIPM) for min_power
726 * policy, and then call driver specific callbacks for
727 * enabling Host Initiated Power management.
730 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
732 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
735 struct ata_port *ap = dev->link->ap;
737 /* set HIPM first, then DIPM */
738 if (ap->ops->enable_pm)
739 rc = ap->ops->enable_pm(ap, policy);
742 rc = ata_dev_set_dipm(dev, policy);
746 ap->pm_policy = MAX_PERFORMANCE;
748 ap->pm_policy = policy;
749 return /* rc */; /* hopefully we can use 'rc' eventually */
754 * ata_dev_disable_pm - disable SATA interface power management
755 * @dev: device to disable power management
757 * Disable SATA Interface power management. This will disable
758 * Device Interface Power Management (DIPM) without changing
759 * policy, call driver specific callbacks for disabling Host
760 * Initiated Power management.
765 static void ata_dev_disable_pm(struct ata_device *dev)
767 struct ata_port *ap = dev->link->ap;
769 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
770 if (ap->ops->disable_pm)
771 ap->ops->disable_pm(ap);
773 #endif /* CONFIG_PM */
775 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
777 ap->pm_policy = policy;
778 ap->link.eh_info.action |= ATA_EHI_LPM;
779 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
780 ata_port_schedule_eh(ap);
784 static void ata_lpm_enable(struct ata_host *host)
786 struct ata_link *link;
788 struct ata_device *dev;
791 for (i = 0; i < host->n_ports; i++) {
793 ata_port_for_each_link(link, ap) {
794 ata_link_for_each_dev(dev, link)
795 ata_dev_disable_pm(dev);
800 static void ata_lpm_disable(struct ata_host *host)
804 for (i = 0; i < host->n_ports; i++) {
805 struct ata_port *ap = host->ports[i];
806 ata_lpm_schedule(ap, ap->pm_policy);
809 #endif /* CONFIG_PM */
813 * ata_devchk - PATA device presence detection
814 * @ap: ATA channel to examine
815 * @device: Device to examine (starting at zero)
817 * This technique was originally described in
818 * Hale Landis's ATADRVR (www.ata-atapi.com), and
819 * later found its way into the ATA/ATAPI spec.
821 * Write a pattern to the ATA shadow registers,
822 * and if a device is present, it will respond by
823 * correctly storing and echoing back the
824 * ATA shadow register contents.
830 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
832 struct ata_ioports *ioaddr = &ap->ioaddr;
835 ap->ops->dev_select(ap, device);
837 iowrite8(0x55, ioaddr->nsect_addr);
838 iowrite8(0xaa, ioaddr->lbal_addr);
840 iowrite8(0xaa, ioaddr->nsect_addr);
841 iowrite8(0x55, ioaddr->lbal_addr);
843 iowrite8(0x55, ioaddr->nsect_addr);
844 iowrite8(0xaa, ioaddr->lbal_addr);
846 nsect = ioread8(ioaddr->nsect_addr);
847 lbal = ioread8(ioaddr->lbal_addr);
849 if ((nsect == 0x55) && (lbal == 0xaa))
850 return 1; /* we found a device */
852 return 0; /* nothing found */
856 * ata_dev_classify - determine device type based on ATA-spec signature
857 * @tf: ATA taskfile register set for device to be identified
859 * Determine from taskfile register contents whether a device is
860 * ATA or ATAPI, as per "Signature and persistence" section
861 * of ATA/PI spec (volume 1, sect 5.14).
867 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
868 * %ATA_DEV_UNKNOWN the event of failure.
870 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
872 /* Apple's open source Darwin code hints that some devices only
873 * put a proper signature into the LBA mid/high registers,
874 * So, we only check those. It's sufficient for uniqueness.
876 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
877 * signatures for ATA and ATAPI devices attached on SerialATA,
878 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
879 * spec has never mentioned about using different signatures
880 * for ATA/ATAPI devices. Then, Serial ATA II: Port
881 * Multiplier specification began to use 0x69/0x96 to identify
882 * port multpliers and 0x3c/0xc3 to identify SEMB device.
883 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
884 * 0x69/0x96 shortly and described them as reserved for
887 * We follow the current spec and consider that 0x69/0x96
888 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
890 if ((tf->lbam == 0) && (tf->lbah == 0)) {
891 DPRINTK("found ATA device by sig\n");
895 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
896 DPRINTK("found ATAPI device by sig\n");
897 return ATA_DEV_ATAPI;
900 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
901 DPRINTK("found PMP device by sig\n");
905 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
906 printk(KERN_INFO "ata: SEMB device ignored\n");
907 return ATA_DEV_SEMB_UNSUP; /* not yet */
910 DPRINTK("unknown device\n");
911 return ATA_DEV_UNKNOWN;
915 * ata_dev_try_classify - Parse returned ATA device signature
916 * @dev: ATA device to classify (starting at zero)
917 * @present: device seems present
918 * @r_err: Value of error register on completion
920 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
921 * an ATA/ATAPI-defined set of values is placed in the ATA
922 * shadow registers, indicating the results of device detection
925 * Select the ATA device, and read the values from the ATA shadow
926 * registers. Then parse according to the Error register value,
927 * and the spec-defined values examined by ata_dev_classify().
933 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
935 unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
938 struct ata_port *ap = dev->link->ap;
939 struct ata_taskfile tf;
943 ap->ops->dev_select(ap, dev->devno);
945 memset(&tf, 0, sizeof(tf));
947 ap->ops->tf_read(ap, &tf);
952 /* see if device passed diags: if master then continue and warn later */
953 if (err == 0 && dev->devno == 0)
954 /* diagnostic fail : do nothing _YET_ */
955 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
958 else if ((dev->devno == 0) && (err == 0x81))
963 /* determine if device is ATA or ATAPI */
964 class = ata_dev_classify(&tf);
966 if (class == ATA_DEV_UNKNOWN) {
967 /* If the device failed diagnostic, it's likely to
968 * have reported incorrect device signature too.
969 * Assume ATA device if the device seems present but
970 * device signature is invalid with diagnostic
973 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
976 class = ATA_DEV_NONE;
977 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
978 class = ATA_DEV_NONE;
984 * ata_id_string - Convert IDENTIFY DEVICE page into string
985 * @id: IDENTIFY DEVICE results we will examine
986 * @s: string into which data is output
987 * @ofs: offset into identify device page
988 * @len: length of string to return. must be an even number.
990 * The strings in the IDENTIFY DEVICE page are broken up into
991 * 16-bit chunks. Run through the string, and output each
992 * 8-bit chunk linearly, regardless of platform.
998 void ata_id_string(const u16 *id, unsigned char *s,
999 unsigned int ofs, unsigned int len)
1018 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1019 * @id: IDENTIFY DEVICE results we will examine
1020 * @s: string into which data is output
1021 * @ofs: offset into identify device page
1022 * @len: length of string to return. must be an odd number.
1024 * This function is identical to ata_id_string except that it
1025 * trims trailing spaces and terminates the resulting string with
1026 * null. @len must be actual maximum length (even number) + 1.
1031 void ata_id_c_string(const u16 *id, unsigned char *s,
1032 unsigned int ofs, unsigned int len)
1036 WARN_ON(!(len & 1));
1038 ata_id_string(id, s, ofs, len - 1);
1040 p = s + strnlen(s, len - 1);
1041 while (p > s && p[-1] == ' ')
1046 static u64 ata_id_n_sectors(const u16 *id)
1048 if (ata_id_has_lba(id)) {
1049 if (ata_id_has_lba48(id))
1050 return ata_id_u64(id, 100);
1052 return ata_id_u32(id, 60);
1054 if (ata_id_current_chs_valid(id))
1055 return ata_id_u32(id, 57);
1057 return id[1] * id[3] * id[6];
1061 static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1065 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1066 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1067 sectors |= (tf->hob_lbal & 0xff) << 24;
1068 sectors |= (tf->lbah & 0xff) << 16;
1069 sectors |= (tf->lbam & 0xff) << 8;
1070 sectors |= (tf->lbal & 0xff);
1075 static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1079 sectors |= (tf->device & 0x0f) << 24;
1080 sectors |= (tf->lbah & 0xff) << 16;
1081 sectors |= (tf->lbam & 0xff) << 8;
1082 sectors |= (tf->lbal & 0xff);
1088 * ata_read_native_max_address - Read native max address
1089 * @dev: target device
1090 * @max_sectors: out parameter for the result native max address
1092 * Perform an LBA48 or LBA28 native size query upon the device in
1096 * 0 on success, -EACCES if command is aborted by the drive.
1097 * -EIO on other errors.
1099 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1101 unsigned int err_mask;
1102 struct ata_taskfile tf;
1103 int lba48 = ata_id_has_lba48(dev->id);
1105 ata_tf_init(dev, &tf);
1107 /* always clear all address registers */
1108 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1111 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1112 tf.flags |= ATA_TFLAG_LBA48;
1114 tf.command = ATA_CMD_READ_NATIVE_MAX;
1116 tf.protocol |= ATA_PROT_NODATA;
1117 tf.device |= ATA_LBA;
1119 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1121 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1122 "max address (err_mask=0x%x)\n", err_mask);
1123 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1129 *max_sectors = ata_tf_to_lba48(&tf);
1131 *max_sectors = ata_tf_to_lba(&tf);
1132 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1138 * ata_set_max_sectors - Set max sectors
1139 * @dev: target device
1140 * @new_sectors: new max sectors value to set for the device
1142 * Set max sectors of @dev to @new_sectors.
1145 * 0 on success, -EACCES if command is aborted or denied (due to
1146 * previous non-volatile SET_MAX) by the drive. -EIO on other
1149 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1151 unsigned int err_mask;
1152 struct ata_taskfile tf;
1153 int lba48 = ata_id_has_lba48(dev->id);
1157 ata_tf_init(dev, &tf);
1159 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1162 tf.command = ATA_CMD_SET_MAX_EXT;
1163 tf.flags |= ATA_TFLAG_LBA48;
1165 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1166 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1167 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1169 tf.command = ATA_CMD_SET_MAX;
1171 tf.device |= (new_sectors >> 24) & 0xf;
1174 tf.protocol |= ATA_PROT_NODATA;
1175 tf.device |= ATA_LBA;
1177 tf.lbal = (new_sectors >> 0) & 0xff;
1178 tf.lbam = (new_sectors >> 8) & 0xff;
1179 tf.lbah = (new_sectors >> 16) & 0xff;
1181 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1183 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1184 "max address (err_mask=0x%x)\n", err_mask);
1185 if (err_mask == AC_ERR_DEV &&
1186 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1195 * ata_hpa_resize - Resize a device with an HPA set
1196 * @dev: Device to resize
1198 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1199 * it if required to the full size of the media. The caller must check
1200 * the drive has the HPA feature set enabled.
1203 * 0 on success, -errno on failure.
1205 static int ata_hpa_resize(struct ata_device *dev)
1207 struct ata_eh_context *ehc = &dev->link->eh_context;
1208 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1209 u64 sectors = ata_id_n_sectors(dev->id);
1213 /* do we need to do it? */
1214 if (dev->class != ATA_DEV_ATA ||
1215 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1216 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1219 /* read native max address */
1220 rc = ata_read_native_max_address(dev, &native_sectors);
1222 /* If HPA isn't going to be unlocked, skip HPA
1223 * resizing from the next try.
1225 if (!ata_ignore_hpa) {
1226 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1227 "broken, will skip HPA handling\n");
1228 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1230 /* we can continue if device aborted the command */
1238 /* nothing to do? */
1239 if (native_sectors <= sectors || !ata_ignore_hpa) {
1240 if (!print_info || native_sectors == sectors)
1243 if (native_sectors > sectors)
1244 ata_dev_printk(dev, KERN_INFO,
1245 "HPA detected: current %llu, native %llu\n",
1246 (unsigned long long)sectors,
1247 (unsigned long long)native_sectors);
1248 else if (native_sectors < sectors)
1249 ata_dev_printk(dev, KERN_WARNING,
1250 "native sectors (%llu) is smaller than "
1252 (unsigned long long)native_sectors,
1253 (unsigned long long)sectors);
1257 /* let's unlock HPA */
1258 rc = ata_set_max_sectors(dev, native_sectors);
1259 if (rc == -EACCES) {
1260 /* if device aborted the command, skip HPA resizing */
1261 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1262 "(%llu -> %llu), skipping HPA handling\n",
1263 (unsigned long long)sectors,
1264 (unsigned long long)native_sectors);
1265 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1270 /* re-read IDENTIFY data */
1271 rc = ata_dev_reread_id(dev, 0);
1273 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1274 "data after HPA resizing\n");
1279 u64 new_sectors = ata_id_n_sectors(dev->id);
1280 ata_dev_printk(dev, KERN_INFO,
1281 "HPA unlocked: %llu -> %llu, native %llu\n",
1282 (unsigned long long)sectors,
1283 (unsigned long long)new_sectors,
1284 (unsigned long long)native_sectors);
1291 * ata_noop_dev_select - Select device 0/1 on ATA bus
1292 * @ap: ATA channel to manipulate
1293 * @device: ATA device (numbered from zero) to select
1295 * This function performs no actual function.
1297 * May be used as the dev_select() entry in ata_port_operations.
1302 void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1308 * ata_std_dev_select - Select device 0/1 on ATA bus
1309 * @ap: ATA channel to manipulate
1310 * @device: ATA device (numbered from zero) to select
1312 * Use the method defined in the ATA specification to
1313 * make either device 0, or device 1, active on the
1314 * ATA channel. Works with both PIO and MMIO.
1316 * May be used as the dev_select() entry in ata_port_operations.
1322 void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1327 tmp = ATA_DEVICE_OBS;
1329 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1331 iowrite8(tmp, ap->ioaddr.device_addr);
1332 ata_pause(ap); /* needed; also flushes, for mmio */
1336 * ata_dev_select - Select device 0/1 on ATA bus
1337 * @ap: ATA channel to manipulate
1338 * @device: ATA device (numbered from zero) to select
1339 * @wait: non-zero to wait for Status register BSY bit to clear
1340 * @can_sleep: non-zero if context allows sleeping
1342 * Use the method defined in the ATA specification to
1343 * make either device 0, or device 1, active on the
1346 * This is a high-level version of ata_std_dev_select(),
1347 * which additionally provides the services of inserting
1348 * the proper pauses and status polling, where needed.
1354 void ata_dev_select(struct ata_port *ap, unsigned int device,
1355 unsigned int wait, unsigned int can_sleep)
1357 if (ata_msg_probe(ap))
1358 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1359 "device %u, wait %u\n", device, wait);
1364 ap->ops->dev_select(ap, device);
1367 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1374 * ata_dump_id - IDENTIFY DEVICE info debugging output
1375 * @id: IDENTIFY DEVICE page to dump
1377 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1384 static inline void ata_dump_id(const u16 *id)
1386 DPRINTK("49==0x%04x "
1396 DPRINTK("80==0x%04x "
1406 DPRINTK("88==0x%04x "
1413 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1414 * @id: IDENTIFY data to compute xfer mask from
1416 * Compute the xfermask for this device. This is not as trivial
1417 * as it seems if we must consider early devices correctly.
1419 * FIXME: pre IDE drive timing (do we care ?).
1427 unsigned int ata_id_xfermask(const u16 *id)
1429 unsigned int pio_mask, mwdma_mask, udma_mask;
1431 /* Usual case. Word 53 indicates word 64 is valid */
1432 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1433 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1437 /* If word 64 isn't valid then Word 51 high byte holds
1438 * the PIO timing number for the maximum. Turn it into
1441 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1442 if (mode < 5) /* Valid PIO range */
1443 pio_mask = (2 << mode) - 1;
1447 /* But wait.. there's more. Design your standards by
1448 * committee and you too can get a free iordy field to
1449 * process. However its the speeds not the modes that
1450 * are supported... Note drivers using the timing API
1451 * will get this right anyway
1455 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1457 if (ata_id_is_cfa(id)) {
1459 * Process compact flash extended modes
1461 int pio = id[163] & 0x7;
1462 int dma = (id[163] >> 3) & 7;
1465 pio_mask |= (1 << 5);
1467 pio_mask |= (1 << 6);
1469 mwdma_mask |= (1 << 3);
1471 mwdma_mask |= (1 << 4);
1475 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1476 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1478 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1482 * ata_port_queue_task - Queue port_task
1483 * @ap: The ata_port to queue port_task for
1484 * @fn: workqueue function to be scheduled
1485 * @data: data for @fn to use
1486 * @delay: delay time for workqueue function
1488 * Schedule @fn(@data) for execution after @delay jiffies using
1489 * port_task. There is one port_task per port and it's the
1490 * user(low level driver)'s responsibility to make sure that only
1491 * one task is active at any given time.
1493 * libata core layer takes care of synchronization between
1494 * port_task and EH. ata_port_queue_task() may be ignored for EH
1498 * Inherited from caller.
1500 void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1501 unsigned long delay)
1503 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1504 ap->port_task_data = data;
1506 /* may fail if ata_port_flush_task() in progress */
1507 queue_delayed_work(ata_wq, &ap->port_task, delay);
1511 * ata_port_flush_task - Flush port_task
1512 * @ap: The ata_port to flush port_task for
1514 * After this function completes, port_task is guranteed not to
1515 * be running or scheduled.
1518 * Kernel thread context (may sleep)
1520 void ata_port_flush_task(struct ata_port *ap)
1524 cancel_rearming_delayed_work(&ap->port_task);
1526 if (ata_msg_ctl(ap))
1527 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1530 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1532 struct completion *waiting = qc->private_data;
1538 * ata_exec_internal_sg - execute libata internal command
1539 * @dev: Device to which the command is sent
1540 * @tf: Taskfile registers for the command and the result
1541 * @cdb: CDB for packet command
1542 * @dma_dir: Data tranfer direction of the command
1543 * @sgl: sg list for the data buffer of the command
1544 * @n_elem: Number of sg entries
1545 * @timeout: Timeout in msecs (0 for default)
1547 * Executes libata internal command with timeout. @tf contains
1548 * command on entry and result on return. Timeout and error
1549 * conditions are reported via return value. No recovery action
1550 * is taken after a command times out. It's caller's duty to
1551 * clean up after timeout.
1554 * None. Should be called with kernel context, might sleep.
1557 * Zero on success, AC_ERR_* mask on failure
1559 unsigned ata_exec_internal_sg(struct ata_device *dev,
1560 struct ata_taskfile *tf, const u8 *cdb,
1561 int dma_dir, struct scatterlist *sgl,
1562 unsigned int n_elem, unsigned long timeout)
1564 struct ata_link *link = dev->link;
1565 struct ata_port *ap = link->ap;
1566 u8 command = tf->command;
1567 struct ata_queued_cmd *qc;
1568 unsigned int tag, preempted_tag;
1569 u32 preempted_sactive, preempted_qc_active;
1570 int preempted_nr_active_links;
1571 DECLARE_COMPLETION_ONSTACK(wait);
1572 unsigned long flags;
1573 unsigned int err_mask;
1576 spin_lock_irqsave(ap->lock, flags);
1578 /* no internal command while frozen */
1579 if (ap->pflags & ATA_PFLAG_FROZEN) {
1580 spin_unlock_irqrestore(ap->lock, flags);
1581 return AC_ERR_SYSTEM;
1584 /* initialize internal qc */
1586 /* XXX: Tag 0 is used for drivers with legacy EH as some
1587 * drivers choke if any other tag is given. This breaks
1588 * ata_tag_internal() test for those drivers. Don't use new
1589 * EH stuff without converting to it.
1591 if (ap->ops->error_handler)
1592 tag = ATA_TAG_INTERNAL;
1596 if (test_and_set_bit(tag, &ap->qc_allocated))
1598 qc = __ata_qc_from_tag(ap, tag);
1606 preempted_tag = link->active_tag;
1607 preempted_sactive = link->sactive;
1608 preempted_qc_active = ap->qc_active;
1609 preempted_nr_active_links = ap->nr_active_links;
1610 link->active_tag = ATA_TAG_POISON;
1613 ap->nr_active_links = 0;
1615 /* prepare & issue qc */
1618 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1619 qc->flags |= ATA_QCFLAG_RESULT_TF;
1620 qc->dma_dir = dma_dir;
1621 if (dma_dir != DMA_NONE) {
1622 unsigned int i, buflen = 0;
1623 struct scatterlist *sg;
1625 for_each_sg(sgl, sg, n_elem, i)
1626 buflen += sg->length;
1628 ata_sg_init(qc, sgl, n_elem);
1629 qc->nbytes = buflen;
1632 qc->private_data = &wait;
1633 qc->complete_fn = ata_qc_complete_internal;
1637 spin_unlock_irqrestore(ap->lock, flags);
1640 timeout = ata_probe_timeout * 1000 / HZ;
1642 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1644 ata_port_flush_task(ap);
1647 spin_lock_irqsave(ap->lock, flags);
1649 /* We're racing with irq here. If we lose, the
1650 * following test prevents us from completing the qc
1651 * twice. If we win, the port is frozen and will be
1652 * cleaned up by ->post_internal_cmd().
1654 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1655 qc->err_mask |= AC_ERR_TIMEOUT;
1657 if (ap->ops->error_handler)
1658 ata_port_freeze(ap);
1660 ata_qc_complete(qc);
1662 if (ata_msg_warn(ap))
1663 ata_dev_printk(dev, KERN_WARNING,
1664 "qc timeout (cmd 0x%x)\n", command);
1667 spin_unlock_irqrestore(ap->lock, flags);
1670 /* do post_internal_cmd */
1671 if (ap->ops->post_internal_cmd)
1672 ap->ops->post_internal_cmd(qc);
1674 /* perform minimal error analysis */
1675 if (qc->flags & ATA_QCFLAG_FAILED) {
1676 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1677 qc->err_mask |= AC_ERR_DEV;
1680 qc->err_mask |= AC_ERR_OTHER;
1682 if (qc->err_mask & ~AC_ERR_OTHER)
1683 qc->err_mask &= ~AC_ERR_OTHER;
1687 spin_lock_irqsave(ap->lock, flags);
1689 *tf = qc->result_tf;
1690 err_mask = qc->err_mask;
1693 link->active_tag = preempted_tag;
1694 link->sactive = preempted_sactive;
1695 ap->qc_active = preempted_qc_active;
1696 ap->nr_active_links = preempted_nr_active_links;
1698 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1699 * Until those drivers are fixed, we detect the condition
1700 * here, fail the command with AC_ERR_SYSTEM and reenable the
1703 * Note that this doesn't change any behavior as internal
1704 * command failure results in disabling the device in the
1705 * higher layer for LLDDs without new reset/EH callbacks.
1707 * Kill the following code as soon as those drivers are fixed.
1709 if (ap->flags & ATA_FLAG_DISABLED) {
1710 err_mask |= AC_ERR_SYSTEM;
1714 spin_unlock_irqrestore(ap->lock, flags);
1720 * ata_exec_internal - execute libata internal command
1721 * @dev: Device to which the command is sent
1722 * @tf: Taskfile registers for the command and the result
1723 * @cdb: CDB for packet command
1724 * @dma_dir: Data tranfer direction of the command
1725 * @buf: Data buffer of the command
1726 * @buflen: Length of data buffer
1727 * @timeout: Timeout in msecs (0 for default)
1729 * Wrapper around ata_exec_internal_sg() which takes simple
1730 * buffer instead of sg list.
1733 * None. Should be called with kernel context, might sleep.
1736 * Zero on success, AC_ERR_* mask on failure
1738 unsigned ata_exec_internal(struct ata_device *dev,
1739 struct ata_taskfile *tf, const u8 *cdb,
1740 int dma_dir, void *buf, unsigned int buflen,
1741 unsigned long timeout)
1743 struct scatterlist *psg = NULL, sg;
1744 unsigned int n_elem = 0;
1746 if (dma_dir != DMA_NONE) {
1748 sg_init_one(&sg, buf, buflen);
1753 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1758 * ata_do_simple_cmd - execute simple internal command
1759 * @dev: Device to which the command is sent
1760 * @cmd: Opcode to execute
1762 * Execute a 'simple' command, that only consists of the opcode
1763 * 'cmd' itself, without filling any other registers
1766 * Kernel thread context (may sleep).
1769 * Zero on success, AC_ERR_* mask on failure
1771 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1773 struct ata_taskfile tf;
1775 ata_tf_init(dev, &tf);
1778 tf.flags |= ATA_TFLAG_DEVICE;
1779 tf.protocol = ATA_PROT_NODATA;
1781 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1785 * ata_pio_need_iordy - check if iordy needed
1788 * Check if the current speed of the device requires IORDY. Used
1789 * by various controllers for chip configuration.
1792 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1794 /* Controller doesn't support IORDY. Probably a pointless check
1795 as the caller should know this */
1796 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1798 /* PIO3 and higher it is mandatory */
1799 if (adev->pio_mode > XFER_PIO_2)
1801 /* We turn it on when possible */
1802 if (ata_id_has_iordy(adev->id))
1808 * ata_pio_mask_no_iordy - Return the non IORDY mask
1811 * Compute the highest mode possible if we are not using iordy. Return
1812 * -1 if no iordy mode is available.
1815 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1817 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1818 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1819 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1820 /* Is the speed faster than the drive allows non IORDY ? */
1822 /* This is cycle times not frequency - watch the logic! */
1823 if (pio > 240) /* PIO2 is 240nS per cycle */
1824 return 3 << ATA_SHIFT_PIO;
1825 return 7 << ATA_SHIFT_PIO;
1828 return 3 << ATA_SHIFT_PIO;
1832 * ata_dev_read_id - Read ID data from the specified device
1833 * @dev: target device
1834 * @p_class: pointer to class of the target device (may be changed)
1835 * @flags: ATA_READID_* flags
1836 * @id: buffer to read IDENTIFY data into
1838 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1839 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1840 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1841 * for pre-ATA4 drives.
1843 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1844 * now we abort if we hit that case.
1847 * Kernel thread context (may sleep)
1850 * 0 on success, -errno otherwise.
1852 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1853 unsigned int flags, u16 *id)
1855 struct ata_port *ap = dev->link->ap;
1856 unsigned int class = *p_class;
1857 struct ata_taskfile tf;
1858 unsigned int err_mask = 0;
1860 int may_fallback = 1, tried_spinup = 0;
1863 if (ata_msg_ctl(ap))
1864 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1866 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1868 ata_tf_init(dev, &tf);
1872 tf.command = ATA_CMD_ID_ATA;
1875 tf.command = ATA_CMD_ID_ATAPI;
1879 reason = "unsupported class";
1883 tf.protocol = ATA_PROT_PIO;
1885 /* Some devices choke if TF registers contain garbage. Make
1886 * sure those are properly initialized.
1888 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1890 /* Device presence detection is unreliable on some
1891 * controllers. Always poll IDENTIFY if available.
1893 tf.flags |= ATA_TFLAG_POLLING;
1895 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1896 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1898 if (err_mask & AC_ERR_NODEV_HINT) {
1899 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1900 ap->print_id, dev->devno);
1904 /* Device or controller might have reported the wrong
1905 * device class. Give a shot at the other IDENTIFY if
1906 * the current one is aborted by the device.
1909 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1912 if (class == ATA_DEV_ATA)
1913 class = ATA_DEV_ATAPI;
1915 class = ATA_DEV_ATA;
1920 reason = "I/O error";
1924 /* Falling back doesn't make sense if ID data was read
1925 * successfully at least once.
1929 swap_buf_le16(id, ATA_ID_WORDS);
1933 reason = "device reports invalid type";
1935 if (class == ATA_DEV_ATA) {
1936 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1939 if (ata_id_is_ata(id))
1943 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1946 * Drive powered-up in standby mode, and requires a specific
1947 * SET_FEATURES spin-up subcommand before it will accept
1948 * anything other than the original IDENTIFY command.
1950 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1951 if (err_mask && id[2] != 0x738c) {
1953 reason = "SPINUP failed";
1957 * If the drive initially returned incomplete IDENTIFY info,
1958 * we now must reissue the IDENTIFY command.
1960 if (id[2] == 0x37c8)
1964 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1966 * The exact sequence expected by certain pre-ATA4 drives is:
1968 * IDENTIFY (optional in early ATA)
1969 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1971 * Some drives were very specific about that exact sequence.
1973 * Note that ATA4 says lba is mandatory so the second check
1974 * shoud never trigger.
1976 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1977 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1980 reason = "INIT_DEV_PARAMS failed";
1984 /* current CHS translation info (id[53-58]) might be
1985 * changed. reread the identify device info.
1987 flags &= ~ATA_READID_POSTRESET;
1997 if (ata_msg_warn(ap))
1998 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1999 "(%s, err_mask=0x%x)\n", reason, err_mask);
2003 static inline u8 ata_dev_knobble(struct ata_device *dev)
2005 struct ata_port *ap = dev->link->ap;
2006 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2009 static void ata_dev_config_ncq(struct ata_device *dev,
2010 char *desc, size_t desc_sz)
2012 struct ata_port *ap = dev->link->ap;
2013 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2015 if (!ata_id_has_ncq(dev->id)) {
2019 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2020 snprintf(desc, desc_sz, "NCQ (not used)");
2023 if (ap->flags & ATA_FLAG_NCQ) {
2024 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2025 dev->flags |= ATA_DFLAG_NCQ;
2028 if (hdepth >= ddepth)
2029 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2031 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2035 * ata_dev_configure - Configure the specified ATA/ATAPI device
2036 * @dev: Target device to configure
2038 * Configure @dev according to @dev->id. Generic and low-level
2039 * driver specific fixups are also applied.
2042 * Kernel thread context (may sleep)
2045 * 0 on success, -errno otherwise
2047 int ata_dev_configure(struct ata_device *dev)
2049 struct ata_port *ap = dev->link->ap;
2050 struct ata_eh_context *ehc = &dev->link->eh_context;
2051 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2052 const u16 *id = dev->id;
2053 unsigned int xfer_mask;
2054 char revbuf[7]; /* XYZ-99\0 */
2055 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2056 char modelbuf[ATA_ID_PROD_LEN+1];
2059 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2060 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2065 if (ata_msg_probe(ap))
2066 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
2069 dev->horkage |= ata_dev_blacklisted(dev);
2071 /* let ACPI work its magic */
2072 rc = ata_acpi_on_devcfg(dev);
2076 /* massage HPA, do it early as it might change IDENTIFY data */
2077 rc = ata_hpa_resize(dev);
2081 /* print device capabilities */
2082 if (ata_msg_probe(ap))
2083 ata_dev_printk(dev, KERN_DEBUG,
2084 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2085 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2087 id[49], id[82], id[83], id[84],
2088 id[85], id[86], id[87], id[88]);
2090 /* initialize to-be-configured parameters */
2091 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2092 dev->max_sectors = 0;
2100 * common ATA, ATAPI feature tests
2103 /* find max transfer mode; for printk only */
2104 xfer_mask = ata_id_xfermask(id);
2106 if (ata_msg_probe(ap))
2109 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2110 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2113 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2116 /* ATA-specific feature tests */
2117 if (dev->class == ATA_DEV_ATA) {
2118 if (ata_id_is_cfa(id)) {
2119 if (id[162] & 1) /* CPRM may make this media unusable */
2120 ata_dev_printk(dev, KERN_WARNING,
2121 "supports DRM functions and may "
2122 "not be fully accessable.\n");
2123 snprintf(revbuf, 7, "CFA");
2125 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2126 /* Warn the user if the device has TPM extensions */
2127 if (ata_id_has_tpm(id))
2128 ata_dev_printk(dev, KERN_WARNING,
2129 "supports DRM functions and may "
2130 "not be fully accessable.\n");
2133 dev->n_sectors = ata_id_n_sectors(id);
2135 if (dev->id[59] & 0x100)
2136 dev->multi_count = dev->id[59] & 0xff;
2138 if (ata_id_has_lba(id)) {
2139 const char *lba_desc;
2143 dev->flags |= ATA_DFLAG_LBA;
2144 if (ata_id_has_lba48(id)) {
2145 dev->flags |= ATA_DFLAG_LBA48;
2148 if (dev->n_sectors >= (1UL << 28) &&
2149 ata_id_has_flush_ext(id))
2150 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2154 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2156 /* print device info to dmesg */
2157 if (ata_msg_drv(ap) && print_info) {
2158 ata_dev_printk(dev, KERN_INFO,
2159 "%s: %s, %s, max %s\n",
2160 revbuf, modelbuf, fwrevbuf,
2161 ata_mode_string(xfer_mask));
2162 ata_dev_printk(dev, KERN_INFO,
2163 "%Lu sectors, multi %u: %s %s\n",
2164 (unsigned long long)dev->n_sectors,
2165 dev->multi_count, lba_desc, ncq_desc);
2170 /* Default translation */
2171 dev->cylinders = id[1];
2173 dev->sectors = id[6];
2175 if (ata_id_current_chs_valid(id)) {
2176 /* Current CHS translation is valid. */
2177 dev->cylinders = id[54];
2178 dev->heads = id[55];
2179 dev->sectors = id[56];
2182 /* print device info to dmesg */
2183 if (ata_msg_drv(ap) && print_info) {
2184 ata_dev_printk(dev, KERN_INFO,
2185 "%s: %s, %s, max %s\n",
2186 revbuf, modelbuf, fwrevbuf,
2187 ata_mode_string(xfer_mask));
2188 ata_dev_printk(dev, KERN_INFO,
2189 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2190 (unsigned long long)dev->n_sectors,
2191 dev->multi_count, dev->cylinders,
2192 dev->heads, dev->sectors);
2199 /* ATAPI-specific feature tests */
2200 else if (dev->class == ATA_DEV_ATAPI) {
2201 const char *cdb_intr_string = "";
2202 const char *atapi_an_string = "";
2205 rc = atapi_cdb_len(id);
2206 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2207 if (ata_msg_warn(ap))
2208 ata_dev_printk(dev, KERN_WARNING,
2209 "unsupported CDB len\n");
2213 dev->cdb_len = (unsigned int) rc;
2215 /* Enable ATAPI AN if both the host and device have
2216 * the support. If PMP is attached, SNTF is required
2217 * to enable ATAPI AN to discern between PHY status
2218 * changed notifications and ATAPI ANs.
2220 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2221 (!ap->nr_pmp_links ||
2222 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2223 unsigned int err_mask;
2225 /* issue SET feature command to turn this on */
2226 err_mask = ata_dev_set_feature(dev,
2227 SETFEATURES_SATA_ENABLE, SATA_AN);
2229 ata_dev_printk(dev, KERN_ERR,
2230 "failed to enable ATAPI AN "
2231 "(err_mask=0x%x)\n", err_mask);
2233 dev->flags |= ATA_DFLAG_AN;
2234 atapi_an_string = ", ATAPI AN";
2238 if (ata_id_cdb_intr(dev->id)) {
2239 dev->flags |= ATA_DFLAG_CDB_INTR;
2240 cdb_intr_string = ", CDB intr";
2243 /* print device info to dmesg */
2244 if (ata_msg_drv(ap) && print_info)
2245 ata_dev_printk(dev, KERN_INFO,
2246 "ATAPI: %s, %s, max %s%s%s\n",
2248 ata_mode_string(xfer_mask),
2249 cdb_intr_string, atapi_an_string);
2252 /* determine max_sectors */
2253 dev->max_sectors = ATA_MAX_SECTORS;
2254 if (dev->flags & ATA_DFLAG_LBA48)
2255 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2257 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2258 if (ata_id_has_hipm(dev->id))
2259 dev->flags |= ATA_DFLAG_HIPM;
2260 if (ata_id_has_dipm(dev->id))
2261 dev->flags |= ATA_DFLAG_DIPM;
2264 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2265 /* Let the user know. We don't want to disallow opens for
2266 rescue purposes, or in case the vendor is just a blithering
2269 ata_dev_printk(dev, KERN_WARNING,
2270 "Drive reports diagnostics failure. This may indicate a drive\n");
2271 ata_dev_printk(dev, KERN_WARNING,
2272 "fault or invalid emulation. Contact drive vendor for information.\n");
2276 /* limit bridge transfers to udma5, 200 sectors */
2277 if (ata_dev_knobble(dev)) {
2278 if (ata_msg_drv(ap) && print_info)
2279 ata_dev_printk(dev, KERN_INFO,
2280 "applying bridge limits\n");
2281 dev->udma_mask &= ATA_UDMA5;
2282 dev->max_sectors = ATA_MAX_SECTORS;
2285 if ((dev->class == ATA_DEV_ATAPI) &&
2286 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2287 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2288 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2291 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2292 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2295 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2296 dev->horkage |= ATA_HORKAGE_IPM;
2298 /* reset link pm_policy for this port to no pm */
2299 ap->pm_policy = MAX_PERFORMANCE;
2302 if (ap->ops->dev_config)
2303 ap->ops->dev_config(dev);
2305 if (ata_msg_probe(ap))
2306 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2307 __FUNCTION__, ata_chk_status(ap));
2311 if (ata_msg_probe(ap))
2312 ata_dev_printk(dev, KERN_DEBUG,
2313 "%s: EXIT, err\n", __FUNCTION__);
2318 * ata_cable_40wire - return 40 wire cable type
2321 * Helper method for drivers which want to hardwire 40 wire cable
2325 int ata_cable_40wire(struct ata_port *ap)
2327 return ATA_CBL_PATA40;
2331 * ata_cable_80wire - return 80 wire cable type
2334 * Helper method for drivers which want to hardwire 80 wire cable
2338 int ata_cable_80wire(struct ata_port *ap)
2340 return ATA_CBL_PATA80;
2344 * ata_cable_unknown - return unknown PATA cable.
2347 * Helper method for drivers which have no PATA cable detection.
2350 int ata_cable_unknown(struct ata_port *ap)
2352 return ATA_CBL_PATA_UNK;
2356 * ata_cable_sata - return SATA cable type
2359 * Helper method for drivers which have SATA cables
2362 int ata_cable_sata(struct ata_port *ap)
2364 return ATA_CBL_SATA;
2368 * ata_bus_probe - Reset and probe ATA bus
2371 * Master ATA bus probing function. Initiates a hardware-dependent
2372 * bus reset, then attempts to identify any devices found on
2376 * PCI/etc. bus probe sem.
2379 * Zero on success, negative errno otherwise.
2382 int ata_bus_probe(struct ata_port *ap)
2384 unsigned int classes[ATA_MAX_DEVICES];
2385 int tries[ATA_MAX_DEVICES];
2387 struct ata_device *dev;
2391 ata_link_for_each_dev(dev, &ap->link)
2392 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2395 ata_link_for_each_dev(dev, &ap->link) {
2396 /* If we issue an SRST then an ATA drive (not ATAPI)
2397 * may change configuration and be in PIO0 timing. If
2398 * we do a hard reset (or are coming from power on)
2399 * this is true for ATA or ATAPI. Until we've set a
2400 * suitable controller mode we should not touch the
2401 * bus as we may be talking too fast.
2403 dev->pio_mode = XFER_PIO_0;
2405 /* If the controller has a pio mode setup function
2406 * then use it to set the chipset to rights. Don't
2407 * touch the DMA setup as that will be dealt with when
2408 * configuring devices.
2410 if (ap->ops->set_piomode)
2411 ap->ops->set_piomode(ap, dev);
2414 /* reset and determine device classes */
2415 ap->ops->phy_reset(ap);
2417 ata_link_for_each_dev(dev, &ap->link) {
2418 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2419 dev->class != ATA_DEV_UNKNOWN)
2420 classes[dev->devno] = dev->class;
2422 classes[dev->devno] = ATA_DEV_NONE;
2424 dev->class = ATA_DEV_UNKNOWN;
2429 /* read IDENTIFY page and configure devices. We have to do the identify
2430 specific sequence bass-ackwards so that PDIAG- is released by
2433 ata_link_for_each_dev(dev, &ap->link) {
2434 if (tries[dev->devno])
2435 dev->class = classes[dev->devno];
2437 if (!ata_dev_enabled(dev))
2440 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2446 /* Now ask for the cable type as PDIAG- should have been released */
2447 if (ap->ops->cable_detect)
2448 ap->cbl = ap->ops->cable_detect(ap);
2450 /* We may have SATA bridge glue hiding here irrespective of the
2451 reported cable types and sensed types */
2452 ata_link_for_each_dev(dev, &ap->link) {
2453 if (!ata_dev_enabled(dev))
2455 /* SATA drives indicate we have a bridge. We don't know which
2456 end of the link the bridge is which is a problem */
2457 if (ata_id_is_sata(dev->id))
2458 ap->cbl = ATA_CBL_SATA;
2461 /* After the identify sequence we can now set up the devices. We do
2462 this in the normal order so that the user doesn't get confused */
2464 ata_link_for_each_dev(dev, &ap->link) {
2465 if (!ata_dev_enabled(dev))
2468 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2469 rc = ata_dev_configure(dev);
2470 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2475 /* configure transfer mode */
2476 rc = ata_set_mode(&ap->link, &dev);
2480 ata_link_for_each_dev(dev, &ap->link)
2481 if (ata_dev_enabled(dev))
2484 /* no device present, disable port */
2485 ata_port_disable(ap);
2489 tries[dev->devno]--;
2493 /* eeek, something went very wrong, give up */
2494 tries[dev->devno] = 0;
2498 /* give it just one more chance */
2499 tries[dev->devno] = min(tries[dev->devno], 1);
2501 if (tries[dev->devno] == 1) {
2502 /* This is the last chance, better to slow
2503 * down than lose it.
2505 sata_down_spd_limit(&ap->link);
2506 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2510 if (!tries[dev->devno])
2511 ata_dev_disable(dev);
2517 * ata_port_probe - Mark port as enabled
2518 * @ap: Port for which we indicate enablement
2520 * Modify @ap data structure such that the system
2521 * thinks that the entire port is enabled.
2523 * LOCKING: host lock, or some other form of
2527 void ata_port_probe(struct ata_port *ap)
2529 ap->flags &= ~ATA_FLAG_DISABLED;
2533 * sata_print_link_status - Print SATA link status
2534 * @link: SATA link to printk link status about
2536 * This function prints link speed and status of a SATA link.
2541 void sata_print_link_status(struct ata_link *link)
2543 u32 sstatus, scontrol, tmp;
2545 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2547 sata_scr_read(link, SCR_CONTROL, &scontrol);
2549 if (ata_link_online(link)) {
2550 tmp = (sstatus >> 4) & 0xf;
2551 ata_link_printk(link, KERN_INFO,
2552 "SATA link up %s (SStatus %X SControl %X)\n",
2553 sata_spd_string(tmp), sstatus, scontrol);
2555 ata_link_printk(link, KERN_INFO,
2556 "SATA link down (SStatus %X SControl %X)\n",
2562 * ata_dev_pair - return other device on cable
2565 * Obtain the other device on the same cable, or if none is
2566 * present NULL is returned
2569 struct ata_device *ata_dev_pair(struct ata_device *adev)
2571 struct ata_link *link = adev->link;
2572 struct ata_device *pair = &link->device[1 - adev->devno];
2573 if (!ata_dev_enabled(pair))
2579 * ata_port_disable - Disable port.
2580 * @ap: Port to be disabled.
2582 * Modify @ap data structure such that the system
2583 * thinks that the entire port is disabled, and should
2584 * never attempt to probe or communicate with devices
2587 * LOCKING: host lock, or some other form of
2591 void ata_port_disable(struct ata_port *ap)
2593 ap->link.device[0].class = ATA_DEV_NONE;
2594 ap->link.device[1].class = ATA_DEV_NONE;
2595 ap->flags |= ATA_FLAG_DISABLED;
2599 * sata_down_spd_limit - adjust SATA spd limit downward
2600 * @link: Link to adjust SATA spd limit for
2602 * Adjust SATA spd limit of @link downward. Note that this
2603 * function only adjusts the limit. The change must be applied
2604 * using sata_set_spd().
2607 * Inherited from caller.
2610 * 0 on success, negative errno on failure
2612 int sata_down_spd_limit(struct ata_link *link)
2614 u32 sstatus, spd, mask;
2617 if (!sata_scr_valid(link))
2620 /* If SCR can be read, use it to determine the current SPD.
2621 * If not, use cached value in link->sata_spd.
2623 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2625 spd = (sstatus >> 4) & 0xf;
2627 spd = link->sata_spd;
2629 mask = link->sata_spd_limit;
2633 /* unconditionally mask off the highest bit */
2634 highbit = fls(mask) - 1;
2635 mask &= ~(1 << highbit);
2637 /* Mask off all speeds higher than or equal to the current
2638 * one. Force 1.5Gbps if current SPD is not available.
2641 mask &= (1 << (spd - 1)) - 1;
2645 /* were we already at the bottom? */
2649 link->sata_spd_limit = mask;
2651 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2652 sata_spd_string(fls(mask)));
2657 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2659 struct ata_link *host_link = &link->ap->link;
2660 u32 limit, target, spd;
2662 limit = link->sata_spd_limit;
2664 /* Don't configure downstream link faster than upstream link.
2665 * It doesn't speed up anything and some PMPs choke on such
2668 if (!ata_is_host_link(link) && host_link->sata_spd)
2669 limit &= (1 << host_link->sata_spd) - 1;
2671 if (limit == UINT_MAX)
2674 target = fls(limit);
2676 spd = (*scontrol >> 4) & 0xf;
2677 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2679 return spd != target;
2683 * sata_set_spd_needed - is SATA spd configuration needed
2684 * @link: Link in question
2686 * Test whether the spd limit in SControl matches
2687 * @link->sata_spd_limit. This function is used to determine
2688 * whether hardreset is necessary to apply SATA spd
2692 * Inherited from caller.
2695 * 1 if SATA spd configuration is needed, 0 otherwise.
2697 int sata_set_spd_needed(struct ata_link *link)
2701 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2704 return __sata_set_spd_needed(link, &scontrol);
2708 * sata_set_spd - set SATA spd according to spd limit
2709 * @link: Link to set SATA spd for
2711 * Set SATA spd of @link according to sata_spd_limit.
2714 * Inherited from caller.
2717 * 0 if spd doesn't need to be changed, 1 if spd has been
2718 * changed. Negative errno if SCR registers are inaccessible.
2720 int sata_set_spd(struct ata_link *link)
2725 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2728 if (!__sata_set_spd_needed(link, &scontrol))
2731 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2738 * This mode timing computation functionality is ported over from
2739 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2742 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2743 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2744 * for UDMA6, which is currently supported only by Maxtor drives.
2746 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2749 static const struct ata_timing ata_timing[] = {
2750 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2751 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2752 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2753 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2754 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2755 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2756 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2757 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2759 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2760 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2761 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2763 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2764 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2765 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2766 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2767 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2769 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2770 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2771 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2772 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2773 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2774 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2775 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2776 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2781 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2782 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2784 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2786 q->setup = EZ(t->setup * 1000, T);
2787 q->act8b = EZ(t->act8b * 1000, T);
2788 q->rec8b = EZ(t->rec8b * 1000, T);
2789 q->cyc8b = EZ(t->cyc8b * 1000, T);
2790 q->active = EZ(t->active * 1000, T);
2791 q->recover = EZ(t->recover * 1000, T);
2792 q->cycle = EZ(t->cycle * 1000, T);
2793 q->udma = EZ(t->udma * 1000, UT);
2796 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2797 struct ata_timing *m, unsigned int what)
2799 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2800 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2801 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2802 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2803 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2804 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2805 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2806 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2809 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2811 const struct ata_timing *t = ata_timing;
2813 while (xfer_mode > t->mode)
2816 if (xfer_mode == t->mode)
2821 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2822 struct ata_timing *t, int T, int UT)
2824 const struct ata_timing *s;
2825 struct ata_timing p;
2831 if (!(s = ata_timing_find_mode(speed)))
2834 memcpy(t, s, sizeof(*s));
2837 * If the drive is an EIDE drive, it can tell us it needs extended
2838 * PIO/MW_DMA cycle timing.
2841 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2842 memset(&p, 0, sizeof(p));
2843 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2844 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2845 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2846 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2847 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2849 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2853 * Convert the timing to bus clock counts.
2856 ata_timing_quantize(t, t, T, UT);
2859 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2860 * S.M.A.R.T * and some other commands. We have to ensure that the
2861 * DMA cycle timing is slower/equal than the fastest PIO timing.
2864 if (speed > XFER_PIO_6) {
2865 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2866 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2870 * Lengthen active & recovery time so that cycle time is correct.
2873 if (t->act8b + t->rec8b < t->cyc8b) {
2874 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2875 t->rec8b = t->cyc8b - t->act8b;
2878 if (t->active + t->recover < t->cycle) {
2879 t->active += (t->cycle - (t->active + t->recover)) / 2;
2880 t->recover = t->cycle - t->active;
2883 /* In a few cases quantisation may produce enough errors to
2884 leave t->cycle too low for the sum of active and recovery
2885 if so we must correct this */
2886 if (t->active + t->recover > t->cycle)
2887 t->cycle = t->active + t->recover;
2893 * ata_down_xfermask_limit - adjust dev xfer masks downward
2894 * @dev: Device to adjust xfer masks
2895 * @sel: ATA_DNXFER_* selector
2897 * Adjust xfer masks of @dev downward. Note that this function
2898 * does not apply the change. Invoking ata_set_mode() afterwards
2899 * will apply the limit.
2902 * Inherited from caller.
2905 * 0 on success, negative errno on failure
2907 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2910 unsigned int orig_mask, xfer_mask;
2911 unsigned int pio_mask, mwdma_mask, udma_mask;
2914 quiet = !!(sel & ATA_DNXFER_QUIET);
2915 sel &= ~ATA_DNXFER_QUIET;
2917 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2920 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2923 case ATA_DNXFER_PIO:
2924 highbit = fls(pio_mask) - 1;
2925 pio_mask &= ~(1 << highbit);
2928 case ATA_DNXFER_DMA:
2930 highbit = fls(udma_mask) - 1;
2931 udma_mask &= ~(1 << highbit);
2934 } else if (mwdma_mask) {
2935 highbit = fls(mwdma_mask) - 1;
2936 mwdma_mask &= ~(1 << highbit);
2942 case ATA_DNXFER_40C:
2943 udma_mask &= ATA_UDMA_MASK_40C;
2946 case ATA_DNXFER_FORCE_PIO0:
2948 case ATA_DNXFER_FORCE_PIO:
2957 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2959 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2963 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2964 snprintf(buf, sizeof(buf), "%s:%s",
2965 ata_mode_string(xfer_mask),
2966 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2968 snprintf(buf, sizeof(buf), "%s",
2969 ata_mode_string(xfer_mask));
2971 ata_dev_printk(dev, KERN_WARNING,
2972 "limiting speed to %s\n", buf);
2975 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2981 static int ata_dev_set_mode(struct ata_device *dev)
2983 struct ata_eh_context *ehc = &dev->link->eh_context;
2984 unsigned int err_mask;
2987 dev->flags &= ~ATA_DFLAG_PIO;
2988 if (dev->xfer_shift == ATA_SHIFT_PIO)
2989 dev->flags |= ATA_DFLAG_PIO;
2991 err_mask = ata_dev_set_xfermode(dev);
2993 /* Old CFA may refuse this command, which is just fine */
2994 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2995 err_mask &= ~AC_ERR_DEV;
2997 /* Some very old devices and some bad newer ones fail any kind of
2998 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2999 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3000 dev->pio_mode <= XFER_PIO_2)
3001 err_mask &= ~AC_ERR_DEV;
3003 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3004 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3005 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3006 dev->dma_mode == XFER_MW_DMA_0 &&
3007 (dev->id[63] >> 8) & 1)
3008 err_mask &= ~AC_ERR_DEV;
3011 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3012 "(err_mask=0x%x)\n", err_mask);
3016 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3017 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3018 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3022 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3023 dev->xfer_shift, (int)dev->xfer_mode);
3025 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
3026 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
3031 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3032 * @link: link on which timings will be programmed
3033 * @r_failed_dev: out paramter for failed device
3035 * Standard implementation of the function used to tune and set
3036 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3037 * ata_dev_set_mode() fails, pointer to the failing device is
3038 * returned in @r_failed_dev.
3041 * PCI/etc. bus probe sem.
3044 * 0 on success, negative errno otherwise
3047 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3049 struct ata_port *ap = link->ap;
3050 struct ata_device *dev;
3051 int rc = 0, used_dma = 0, found = 0;
3053 /* step 1: calculate xfer_mask */
3054 ata_link_for_each_dev(dev, link) {
3055 unsigned int pio_mask, dma_mask;
3056 unsigned int mode_mask;
3058 if (!ata_dev_enabled(dev))
3061 mode_mask = ATA_DMA_MASK_ATA;
3062 if (dev->class == ATA_DEV_ATAPI)
3063 mode_mask = ATA_DMA_MASK_ATAPI;
3064 else if (ata_id_is_cfa(dev->id))
3065 mode_mask = ATA_DMA_MASK_CFA;
3067 ata_dev_xfermask(dev);
3069 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3070 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3072 if (libata_dma_mask & mode_mask)
3073 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3077 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3078 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3081 if (dev->dma_mode != 0xff)
3087 /* step 2: always set host PIO timings */
3088 ata_link_for_each_dev(dev, link) {
3089 if (!ata_dev_enabled(dev))
3092 if (dev->pio_mode == 0xff) {
3093 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3098 dev->xfer_mode = dev->pio_mode;
3099 dev->xfer_shift = ATA_SHIFT_PIO;
3100 if (ap->ops->set_piomode)
3101 ap->ops->set_piomode(ap, dev);
3104 /* step 3: set host DMA timings */
3105 ata_link_for_each_dev(dev, link) {
3106 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
3109 dev->xfer_mode = dev->dma_mode;
3110 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3111 if (ap->ops->set_dmamode)
3112 ap->ops->set_dmamode(ap, dev);
3115 /* step 4: update devices' xfer mode */
3116 ata_link_for_each_dev(dev, link) {
3117 /* don't update suspended devices' xfer mode */
3118 if (!ata_dev_enabled(dev))
3121 rc = ata_dev_set_mode(dev);
3126 /* Record simplex status. If we selected DMA then the other
3127 * host channels are not permitted to do so.
3129 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3130 ap->host->simplex_claimed = ap;
3134 *r_failed_dev = dev;
3139 * ata_tf_to_host - issue ATA taskfile to host controller
3140 * @ap: port to which command is being issued
3141 * @tf: ATA taskfile register set
3143 * Issues ATA taskfile register set to ATA host controller,
3144 * with proper synchronization with interrupt handler and
3148 * spin_lock_irqsave(host lock)
3151 static inline void ata_tf_to_host(struct ata_port *ap,
3152 const struct ata_taskfile *tf)
3154 ap->ops->tf_load(ap, tf);
3155 ap->ops->exec_command(ap, tf);
3159 * ata_busy_sleep - sleep until BSY clears, or timeout
3160 * @ap: port containing status register to be polled
3161 * @tmout_pat: impatience timeout
3162 * @tmout: overall timeout
3164 * Sleep until ATA Status register bit BSY clears,
3165 * or a timeout occurs.
3168 * Kernel thread context (may sleep).
3171 * 0 on success, -errno otherwise.
3173 int ata_busy_sleep(struct ata_port *ap,
3174 unsigned long tmout_pat, unsigned long tmout)
3176 unsigned long timer_start, timeout;
3179 status = ata_busy_wait(ap, ATA_BUSY, 300);
3180 timer_start = jiffies;
3181 timeout = timer_start + tmout_pat;
3182 while (status != 0xff && (status & ATA_BUSY) &&
3183 time_before(jiffies, timeout)) {
3185 status = ata_busy_wait(ap, ATA_BUSY, 3);
3188 if (status != 0xff && (status & ATA_BUSY))
3189 ata_port_printk(ap, KERN_WARNING,
3190 "port is slow to respond, please be patient "
3191 "(Status 0x%x)\n", status);
3193 timeout = timer_start + tmout;
3194 while (status != 0xff && (status & ATA_BUSY) &&
3195 time_before(jiffies, timeout)) {
3197 status = ata_chk_status(ap);
3203 if (status & ATA_BUSY) {
3204 ata_port_printk(ap, KERN_ERR, "port failed to respond "
3205 "(%lu secs, Status 0x%x)\n",
3206 tmout / HZ, status);
3214 * ata_wait_after_reset - wait before checking status after reset
3215 * @ap: port containing status register to be polled
3216 * @deadline: deadline jiffies for the operation
3218 * After reset, we need to pause a while before reading status.
3219 * Also, certain combination of controller and device report 0xff
3220 * for some duration (e.g. until SATA PHY is up and running)
3221 * which is interpreted as empty port in ATA world. This
3222 * function also waits for such devices to get out of 0xff
3226 * Kernel thread context (may sleep).
3228 void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3230 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3232 if (time_before(until, deadline))
3235 /* Spec mandates ">= 2ms" before checking status. We wait
3236 * 150ms, because that was the magic delay used for ATAPI
3237 * devices in Hale Landis's ATADRVR, for the period of time
3238 * between when the ATA command register is written, and then
3239 * status is checked. Because waiting for "a while" before
3240 * checking status is fine, post SRST, we perform this magic
3241 * delay here as well.
3243 * Old drivers/ide uses the 2mS rule and then waits for ready.
3247 /* Wait for 0xff to clear. Some SATA devices take a long time
3248 * to clear 0xff after reset. For example, HHD424020F7SV00
3249 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3252 * Note that some PATA controllers (pata_ali) explode if
3253 * status register is read more than once when there's no
3256 if (ap->flags & ATA_FLAG_SATA) {
3258 u8 status = ata_chk_status(ap);
3260 if (status != 0xff || time_after(jiffies, deadline))
3269 * ata_wait_ready - sleep until BSY clears, or timeout
3270 * @ap: port containing status register to be polled
3271 * @deadline: deadline jiffies for the operation
3273 * Sleep until ATA Status register bit BSY clears, or timeout
3277 * Kernel thread context (may sleep).
3280 * 0 on success, -errno otherwise.
3282 int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3284 unsigned long start = jiffies;
3288 u8 status = ata_chk_status(ap);
3289 unsigned long now = jiffies;
3291 if (!(status & ATA_BUSY))
3293 if (!ata_link_online(&ap->link) && status == 0xff)
3295 if (time_after(now, deadline))
3298 if (!warned && time_after(now, start + 5 * HZ) &&
3299 (deadline - now > 3 * HZ)) {
3300 ata_port_printk(ap, KERN_WARNING,
3301 "port is slow to respond, please be patient "
3302 "(Status 0x%x)\n", status);
3310 static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3311 unsigned long deadline)
3313 struct ata_ioports *ioaddr = &ap->ioaddr;
3314 unsigned int dev0 = devmask & (1 << 0);
3315 unsigned int dev1 = devmask & (1 << 1);
3318 /* if device 0 was found in ata_devchk, wait for its
3322 rc = ata_wait_ready(ap, deadline);
3330 /* if device 1 was found in ata_devchk, wait for register
3331 * access briefly, then wait for BSY to clear.
3336 ap->ops->dev_select(ap, 1);
3338 /* Wait for register access. Some ATAPI devices fail
3339 * to set nsect/lbal after reset, so don't waste too
3340 * much time on it. We're gonna wait for !BSY anyway.
3342 for (i = 0; i < 2; i++) {
3345 nsect = ioread8(ioaddr->nsect_addr);
3346 lbal = ioread8(ioaddr->lbal_addr);
3347 if ((nsect == 1) && (lbal == 1))
3349 msleep(50); /* give drive a breather */
3352 rc = ata_wait_ready(ap, deadline);
3360 /* is all this really necessary? */
3361 ap->ops->dev_select(ap, 0);
3363 ap->ops->dev_select(ap, 1);
3365 ap->ops->dev_select(ap, 0);
3370 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3371 unsigned long deadline)
3373 struct ata_ioports *ioaddr = &ap->ioaddr;
3375 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3377 /* software reset. causes dev0 to be selected */
3378 iowrite8(ap->ctl, ioaddr->ctl_addr);
3379 udelay(20); /* FIXME: flush */
3380 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3381 udelay(20); /* FIXME: flush */
3382 iowrite8(ap->ctl, ioaddr->ctl_addr);
3384 /* wait a while before checking status */
3385 ata_wait_after_reset(ap, deadline);
3387 /* Before we perform post reset processing we want to see if
3388 * the bus shows 0xFF because the odd clown forgets the D7
3389 * pulldown resistor.
3391 if (ata_chk_status(ap) == 0xFF)
3394 return ata_bus_post_reset(ap, devmask, deadline);
3398 * ata_bus_reset - reset host port and associated ATA channel
3399 * @ap: port to reset
3401 * This is typically the first time we actually start issuing
3402 * commands to the ATA channel. We wait for BSY to clear, then
3403 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3404 * result. Determine what devices, if any, are on the channel
3405 * by looking at the device 0/1 error register. Look at the signature
3406 * stored in each device's taskfile registers, to determine if
3407 * the device is ATA or ATAPI.
3410 * PCI/etc. bus probe sem.
3411 * Obtains host lock.
3414 * Sets ATA_FLAG_DISABLED if bus reset fails.
3417 void ata_bus_reset(struct ata_port *ap)
3419 struct ata_device *device = ap->link.device;
3420 struct ata_ioports *ioaddr = &ap->ioaddr;
3421 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3423 unsigned int dev0, dev1 = 0, devmask = 0;
3426 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3428 /* determine if device 0/1 are present */
3429 if (ap->flags & ATA_FLAG_SATA_RESET)
3432 dev0 = ata_devchk(ap, 0);
3434 dev1 = ata_devchk(ap, 1);
3438 devmask |= (1 << 0);
3440 devmask |= (1 << 1);
3442 /* select device 0 again */
3443 ap->ops->dev_select(ap, 0);
3445 /* issue bus reset */
3446 if (ap->flags & ATA_FLAG_SRST) {
3447 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3448 if (rc && rc != -ENODEV)
3453 * determine by signature whether we have ATA or ATAPI devices
3455 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
3456 if ((slave_possible) && (err != 0x81))
3457 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
3459 /* is double-select really necessary? */
3460 if (device[1].class != ATA_DEV_NONE)
3461 ap->ops->dev_select(ap, 1);
3462 if (device[0].class != ATA_DEV_NONE)
3463 ap->ops->dev_select(ap, 0);
3465 /* if no devices were detected, disable this port */
3466 if ((device[0].class == ATA_DEV_NONE) &&
3467 (device[1].class == ATA_DEV_NONE))
3470 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3471 /* set up device control for ATA_FLAG_SATA_RESET */
3472 iowrite8(ap->ctl, ioaddr->ctl_addr);
3479 ata_port_printk(ap, KERN_ERR, "disabling port\n");
3480 ata_port_disable(ap);
3486 * sata_link_debounce - debounce SATA phy status
3487 * @link: ATA link to debounce SATA phy status for
3488 * @params: timing parameters { interval, duratinon, timeout } in msec
3489 * @deadline: deadline jiffies for the operation
3491 * Make sure SStatus of @link reaches stable state, determined by
3492 * holding the same value where DET is not 1 for @duration polled
3493 * every @interval, before @timeout. Timeout constraints the
3494 * beginning of the stable state. Because DET gets stuck at 1 on
3495 * some controllers after hot unplugging, this functions waits
3496 * until timeout then returns 0 if DET is stable at 1.
3498 * @timeout is further limited by @deadline. The sooner of the
3502 * Kernel thread context (may sleep)
3505 * 0 on success, -errno on failure.
3507 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3508 unsigned long deadline)
3510 unsigned long interval_msec = params[0];
3511 unsigned long duration = msecs_to_jiffies(params[1]);
3512 unsigned long last_jiffies, t;
3516 t = jiffies + msecs_to_jiffies(params[2]);
3517 if (time_before(t, deadline))
3520 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3525 last_jiffies = jiffies;
3528 msleep(interval_msec);
3529 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3535 if (cur == 1 && time_before(jiffies, deadline))
3537 if (time_after(jiffies, last_jiffies + duration))
3542 /* unstable, start over */
3544 last_jiffies = jiffies;
3546 /* Check deadline. If debouncing failed, return
3547 * -EPIPE to tell upper layer to lower link speed.
3549 if (time_after(jiffies, deadline))
3555 * sata_link_resume - resume SATA link
3556 * @link: ATA link to resume SATA
3557 * @params: timing parameters { interval, duratinon, timeout } in msec
3558 * @deadline: deadline jiffies for the operation
3560 * Resume SATA phy @link and debounce it.
3563 * Kernel thread context (may sleep)
3566 * 0 on success, -errno on failure.
3568 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3569 unsigned long deadline)
3574 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3577 scontrol = (scontrol & 0x0f0) | 0x300;
3579 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3582 /* Some PHYs react badly if SStatus is pounded immediately
3583 * after resuming. Delay 200ms before debouncing.
3587 return sata_link_debounce(link, params, deadline);
3591 * ata_std_prereset - prepare for reset
3592 * @link: ATA link to be reset
3593 * @deadline: deadline jiffies for the operation
3595 * @link is about to be reset. Initialize it. Failure from
3596 * prereset makes libata abort whole reset sequence and give up
3597 * that port, so prereset should be best-effort. It does its
3598 * best to prepare for reset sequence but if things go wrong, it
3599 * should just whine, not fail.
3602 * Kernel thread context (may sleep)
3605 * 0 on success, -errno otherwise.
3607 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3609 struct ata_port *ap = link->ap;
3610 struct ata_eh_context *ehc = &link->eh_context;
3611 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3614 /* handle link resume */
3615 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3616 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3617 ehc->i.action |= ATA_EH_HARDRESET;
3619 /* Some PMPs don't work with only SRST, force hardreset if PMP
3622 if (ap->flags & ATA_FLAG_PMP)
3623 ehc->i.action |= ATA_EH_HARDRESET;
3625 /* if we're about to do hardreset, nothing more to do */
3626 if (ehc->i.action & ATA_EH_HARDRESET)
3629 /* if SATA, resume link */
3630 if (ap->flags & ATA_FLAG_SATA) {
3631 rc = sata_link_resume(link, timing, deadline);
3632 /* whine about phy resume failure but proceed */
3633 if (rc && rc != -EOPNOTSUPP)
3634 ata_link_printk(link, KERN_WARNING, "failed to resume "
3635 "link for reset (errno=%d)\n", rc);
3638 /* Wait for !BSY if the controller can wait for the first D2H
3639 * Reg FIS and we don't know that no device is attached.
3641 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3642 rc = ata_wait_ready(ap, deadline);
3643 if (rc && rc != -ENODEV) {
3644 ata_link_printk(link, KERN_WARNING, "device not ready "
3645 "(errno=%d), forcing hardreset\n", rc);
3646 ehc->i.action |= ATA_EH_HARDRESET;
3654 * ata_std_softreset - reset host port via ATA SRST
3655 * @link: ATA link to reset
3656 * @classes: resulting classes of attached devices
3657 * @deadline: deadline jiffies for the operation
3659 * Reset host port using ATA SRST.
3662 * Kernel thread context (may sleep)
3665 * 0 on success, -errno otherwise.
3667 int ata_std_softreset(struct ata_link *link, unsigned int *classes,
3668 unsigned long deadline)
3670 struct ata_port *ap = link->ap;
3671 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3672 unsigned int devmask = 0;
3678 if (ata_link_offline(link)) {
3679 classes[0] = ATA_DEV_NONE;
3683 /* determine if device 0/1 are present */
3684 if (ata_devchk(ap, 0))
3685 devmask |= (1 << 0);
3686 if (slave_possible && ata_devchk(ap, 1))
3687 devmask |= (1 << 1);
3689 /* select device 0 again */
3690 ap->ops->dev_select(ap, 0);
3692 /* issue bus reset */
3693 DPRINTK("about to softreset, devmask=%x\n", devmask);
3694 rc = ata_bus_softreset(ap, devmask, deadline);
3695 /* if link is occupied, -ENODEV too is an error */
3696 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
3697 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3701 /* determine by signature whether we have ATA or ATAPI devices */
3702 classes[0] = ata_dev_try_classify(&link->device[0],
3703 devmask & (1 << 0), &err);
3704 if (slave_possible && err != 0x81)
3705 classes[1] = ata_dev_try_classify(&link->device[1],
3706 devmask & (1 << 1), &err);
3709 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3714 * sata_link_hardreset - reset link via SATA phy reset
3715 * @link: link to reset
3716 * @timing: timing parameters { interval, duratinon, timeout } in msec
3717 * @deadline: deadline jiffies for the operation
3719 * SATA phy-reset @link using DET bits of SControl register.
3722 * Kernel thread context (may sleep)
3725 * 0 on success, -errno otherwise.
3727 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3728 unsigned long deadline)
3735 if (sata_set_spd_needed(link)) {
3736 /* SATA spec says nothing about how to reconfigure
3737 * spd. To be on the safe side, turn off phy during
3738 * reconfiguration. This works for at least ICH7 AHCI
3741 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3744 scontrol = (scontrol & 0x0f0) | 0x304;
3746 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3752 /* issue phy wake/reset */
3753 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3756 scontrol = (scontrol & 0x0f0) | 0x301;
3758 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3761 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3762 * 10.4.2 says at least 1 ms.
3766 /* bring link back */
3767 rc = sata_link_resume(link, timing, deadline);
3769 DPRINTK("EXIT, rc=%d\n", rc);
3774 * sata_std_hardreset - reset host port via SATA phy reset
3775 * @link: link to reset
3776 * @class: resulting class of attached device
3777 * @deadline: deadline jiffies for the operation
3779 * SATA phy-reset host port using DET bits of SControl register,
3780 * wait for !BSY and classify the attached device.
3783 * Kernel thread context (may sleep)
3786 * 0 on success, -errno otherwise.
3788 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3789 unsigned long deadline)
3791 struct ata_port *ap = link->ap;
3792 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3798 rc = sata_link_hardreset(link, timing, deadline);
3800 ata_link_printk(link, KERN_ERR,
3801 "COMRESET failed (errno=%d)\n", rc);
3805 /* TODO: phy layer with polling, timeouts, etc. */
3806 if (ata_link_offline(link)) {
3807 *class = ATA_DEV_NONE;
3808 DPRINTK("EXIT, link offline\n");
3812 /* wait a while before checking status */
3813 ata_wait_after_reset(ap, deadline);
3815 /* If PMP is supported, we have to do follow-up SRST. Note
3816 * that some PMPs don't send D2H Reg FIS after hardreset at
3817 * all if the first port is empty. Wait for it just for a
3818 * second and request follow-up SRST.
3820 if (ap->flags & ATA_FLAG_PMP) {
3821 ata_wait_ready(ap, jiffies + HZ);
3825 rc = ata_wait_ready(ap, deadline);
3826 /* link occupied, -ENODEV too is an error */
3828 ata_link_printk(link, KERN_ERR,
3829 "COMRESET failed (errno=%d)\n", rc);
3833 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3835 *class = ata_dev_try_classify(link->device, 1, NULL);
3837 DPRINTK("EXIT, class=%u\n", *class);
3842 * ata_std_postreset - standard postreset callback
3843 * @link: the target ata_link
3844 * @classes: classes of attached devices
3846 * This function is invoked after a successful reset. Note that
3847 * the device might have been reset more than once using
3848 * different reset methods before postreset is invoked.
3851 * Kernel thread context (may sleep)
3853 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3855 struct ata_port *ap = link->ap;
3860 /* print link status */
3861 sata_print_link_status(link);
3864 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3865 sata_scr_write(link, SCR_ERROR, serror);
3866 link->eh_info.serror = 0;
3868 /* is double-select really necessary? */
3869 if (classes[0] != ATA_DEV_NONE)
3870 ap->ops->dev_select(ap, 1);
3871 if (classes[1] != ATA_DEV_NONE)
3872 ap->ops->dev_select(ap, 0);
3874 /* bail out if no device is present */
3875 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3876 DPRINTK("EXIT, no device\n");
3880 /* set up device control */
3881 if (ap->ioaddr.ctl_addr)
3882 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3888 * ata_dev_same_device - Determine whether new ID matches configured device
3889 * @dev: device to compare against
3890 * @new_class: class of the new device
3891 * @new_id: IDENTIFY page of the new device
3893 * Compare @new_class and @new_id against @dev and determine
3894 * whether @dev is the device indicated by @new_class and
3901 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3903 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3906 const u16 *old_id = dev->id;
3907 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3908 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3910 if (dev->class != new_class) {
3911 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3912 dev->class, new_class);
3916 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3917 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3918 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3919 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3921 if (strcmp(model[0], model[1])) {
3922 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3923 "'%s' != '%s'\n", model[0], model[1]);
3927 if (strcmp(serial[0], serial[1])) {
3928 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3929 "'%s' != '%s'\n", serial[0], serial[1]);
3937 * ata_dev_reread_id - Re-read IDENTIFY data
3938 * @dev: target ATA device
3939 * @readid_flags: read ID flags
3941 * Re-read IDENTIFY page and make sure @dev is still attached to
3945 * Kernel thread context (may sleep)
3948 * 0 on success, negative errno otherwise
3950 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3952 unsigned int class = dev->class;
3953 u16 *id = (void *)dev->link->ap->sector_buf;
3957 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3961 /* is the device still there? */
3962 if (!ata_dev_same_device(dev, class, id))
3965 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3970 * ata_dev_revalidate - Revalidate ATA device
3971 * @dev: device to revalidate
3972 * @new_class: new class code
3973 * @readid_flags: read ID flags
3975 * Re-read IDENTIFY page, make sure @dev is still attached to the
3976 * port and reconfigure it according to the new IDENTIFY page.
3979 * Kernel thread context (may sleep)
3982 * 0 on success, negative errno otherwise
3984 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3985 unsigned int readid_flags)
3987 u64 n_sectors = dev->n_sectors;
3990 if (!ata_dev_enabled(dev))
3993 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3994 if (ata_class_enabled(new_class) &&
3995 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3996 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3997 dev->class, new_class);
4003 rc = ata_dev_reread_id(dev, readid_flags);
4007 /* configure device according to the new ID */
4008 rc = ata_dev_configure(dev);
4012 /* verify n_sectors hasn't changed */
4013 if (dev->class == ATA_DEV_ATA && n_sectors &&
4014 dev->n_sectors != n_sectors) {
4015 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4017 (unsigned long long)n_sectors,
4018 (unsigned long long)dev->n_sectors);
4020 /* restore original n_sectors */
4021 dev->n_sectors = n_sectors;
4030 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4034 struct ata_blacklist_entry {
4035 const char *model_num;
4036 const char *model_rev;
4037 unsigned long horkage;
4040 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4041 /* Devices with DMA related problems under Linux */
4042 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4043 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4044 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4045 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4046 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4047 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4048 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4049 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4050 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4051 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4052 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4053 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4054 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4055 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4056 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4057 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4058 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4059 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4060 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4061 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4062 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4063 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4064 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4065 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4066 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4067 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4068 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4069 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4070 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4071 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4072 /* Odd clown on sil3726/4726 PMPs */
4073 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4074 ATA_HORKAGE_SKIP_PM },
4076 /* Weird ATAPI devices */
4077 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4079 /* Devices we expect to fail diagnostics */
4081 /* Devices where NCQ should be avoided */
4083 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4084 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4085 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4086 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4088 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4089 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4090 { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
4091 { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
4092 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4093 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4095 /* Blacklist entries taken from Silicon Image 3124/3132
4096 Windows driver .inf file - also several Linux problem reports */
4097 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4098 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4099 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4101 /* devices which puke on READ_NATIVE_MAX */
4102 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4103 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4104 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4105 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4107 /* Devices which report 1 sector over size HPA */
4108 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4109 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4111 /* Devices which get the IVB wrong */
4112 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4113 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
4114 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4115 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4116 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
4122 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4128 * check for trailing wildcard: *\0
4130 p = strchr(patt, wildchar);
4131 if (p && ((*(p + 1)) == 0))
4142 return strncmp(patt, name, len);
4145 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4147 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4148 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4149 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4151 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4152 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4154 while (ad->model_num) {
4155 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
4156 if (ad->model_rev == NULL)
4158 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4166 static int ata_dma_blacklisted(const struct ata_device *dev)
4168 /* We don't support polling DMA.
4169 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4170 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4172 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4173 (dev->flags & ATA_DFLAG_CDB_INTR))
4175 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4179 * ata_is_40wire - check drive side detection
4182 * Perform drive side detection decoding, allowing for device vendors
4183 * who can't follow the documentation.
4186 static int ata_is_40wire(struct ata_device *dev)
4188 if (dev->horkage & ATA_HORKAGE_IVB)
4189 return ata_drive_40wire_relaxed(dev->id);
4190 return ata_drive_40wire(dev->id);
4194 * ata_dev_xfermask - Compute supported xfermask of the given device
4195 * @dev: Device to compute xfermask for
4197 * Compute supported xfermask of @dev and store it in
4198 * dev->*_mask. This function is responsible for applying all
4199 * known limits including host controller limits, device
4205 static void ata_dev_xfermask(struct ata_device *dev)
4207 struct ata_link *link = dev->link;
4208 struct ata_port *ap = link->ap;
4209 struct ata_host *host = ap->host;
4210 unsigned long xfer_mask;
4212 /* controller modes available */
4213 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4214 ap->mwdma_mask, ap->udma_mask);
4216 /* drive modes available */
4217 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4218 dev->mwdma_mask, dev->udma_mask);
4219 xfer_mask &= ata_id_xfermask(dev->id);
4222 * CFA Advanced TrueIDE timings are not allowed on a shared
4225 if (ata_dev_pair(dev)) {
4226 /* No PIO5 or PIO6 */
4227 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4228 /* No MWDMA3 or MWDMA 4 */
4229 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4232 if (ata_dma_blacklisted(dev)) {
4233 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4234 ata_dev_printk(dev, KERN_WARNING,
4235 "device is on DMA blacklist, disabling DMA\n");
4238 if ((host->flags & ATA_HOST_SIMPLEX) &&
4239 host->simplex_claimed && host->simplex_claimed != ap) {
4240 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4241 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4242 "other device, disabling DMA\n");
4245 if (ap->flags & ATA_FLAG_NO_IORDY)
4246 xfer_mask &= ata_pio_mask_no_iordy(dev);
4248 if (ap->ops->mode_filter)
4249 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4251 /* Apply cable rule here. Don't apply it early because when
4252 * we handle hot plug the cable type can itself change.
4253 * Check this last so that we know if the transfer rate was
4254 * solely limited by the cable.
4255 * Unknown or 80 wire cables reported host side are checked
4256 * drive side as well. Cases where we know a 40wire cable
4257 * is used safely for 80 are not checked here.
4259 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4260 /* UDMA/44 or higher would be available */
4261 if ((ap->cbl == ATA_CBL_PATA40) ||
4262 (ata_is_40wire(dev) &&
4263 (ap->cbl == ATA_CBL_PATA_UNK ||
4264 ap->cbl == ATA_CBL_PATA80))) {
4265 ata_dev_printk(dev, KERN_WARNING,
4266 "limited to UDMA/33 due to 40-wire cable\n");
4267 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4270 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4271 &dev->mwdma_mask, &dev->udma_mask);
4275 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4276 * @dev: Device to which command will be sent
4278 * Issue SET FEATURES - XFER MODE command to device @dev
4282 * PCI/etc. bus probe sem.
4285 * 0 on success, AC_ERR_* mask otherwise.
4288 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4290 struct ata_taskfile tf;
4291 unsigned int err_mask;
4293 /* set up set-features taskfile */
4294 DPRINTK("set features - xfer mode\n");
4296 /* Some controllers and ATAPI devices show flaky interrupt
4297 * behavior after setting xfer mode. Use polling instead.
4299 ata_tf_init(dev, &tf);
4300 tf.command = ATA_CMD_SET_FEATURES;
4301 tf.feature = SETFEATURES_XFER;
4302 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4303 tf.protocol = ATA_PROT_NODATA;
4304 /* If we are using IORDY we must send the mode setting command */
4305 if (ata_pio_need_iordy(dev))
4306 tf.nsect = dev->xfer_mode;
4307 /* If the device has IORDY and the controller does not - turn it off */
4308 else if (ata_id_has_iordy(dev->id))
4310 else /* In the ancient relic department - skip all of this */
4313 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4315 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4319 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4320 * @dev: Device to which command will be sent
4321 * @enable: Whether to enable or disable the feature
4322 * @feature: The sector count represents the feature to set
4324 * Issue SET FEATURES - SATA FEATURES command to device @dev
4325 * on port @ap with sector count
4328 * PCI/etc. bus probe sem.
4331 * 0 on success, AC_ERR_* mask otherwise.
4333 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4336 struct ata_taskfile tf;
4337 unsigned int err_mask;
4339 /* set up set-features taskfile */
4340 DPRINTK("set features - SATA features\n");
4342 ata_tf_init(dev, &tf);
4343 tf.command = ATA_CMD_SET_FEATURES;
4344 tf.feature = enable;
4345 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4346 tf.protocol = ATA_PROT_NODATA;
4349 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4351 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4356 * ata_dev_init_params - Issue INIT DEV PARAMS command
4357 * @dev: Device to which command will be sent
4358 * @heads: Number of heads (taskfile parameter)
4359 * @sectors: Number of sectors (taskfile parameter)
4362 * Kernel thread context (may sleep)
4365 * 0 on success, AC_ERR_* mask otherwise.
4367 static unsigned int ata_dev_init_params(struct ata_device *dev,
4368 u16 heads, u16 sectors)
4370 struct ata_taskfile tf;
4371 unsigned int err_mask;
4373 /* Number of sectors per track 1-255. Number of heads 1-16 */
4374 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4375 return AC_ERR_INVALID;
4377 /* set up init dev params taskfile */
4378 DPRINTK("init dev params \n");
4380 ata_tf_init(dev, &tf);
4381 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4382 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4383 tf.protocol = ATA_PROT_NODATA;
4385 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4387 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4388 /* A clean abort indicates an original or just out of spec drive
4389 and we should continue as we issue the setup based on the
4390 drive reported working geometry */
4391 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4394 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4399 * ata_sg_clean - Unmap DMA memory associated with command
4400 * @qc: Command containing DMA memory to be released
4402 * Unmap all mapped DMA memory associated with this command.
4405 * spin_lock_irqsave(host lock)
4407 void ata_sg_clean(struct ata_queued_cmd *qc)
4409 struct ata_port *ap = qc->ap;
4410 struct scatterlist *sg = qc->__sg;
4411 int dir = qc->dma_dir;
4412 void *pad_buf = NULL;
4414 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4415 WARN_ON(sg == NULL);
4417 if (qc->flags & ATA_QCFLAG_SINGLE)
4418 WARN_ON(qc->n_elem > 1);
4420 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4422 /* if we padded the buffer out to 32-bit bound, and data
4423 * xfer direction is from-device, we must copy from the
4424 * pad buffer back into the supplied buffer
4426 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4427 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4429 if (qc->flags & ATA_QCFLAG_SG) {
4431 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4432 /* restore last sg */
4433 sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
4435 struct scatterlist *psg = &qc->pad_sgent;
4436 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4437 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4438 kunmap_atomic(addr, KM_IRQ0);
4442 dma_unmap_single(ap->dev,
4443 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4446 sg->length += qc->pad_len;
4448 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4449 pad_buf, qc->pad_len);
4452 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4457 * ata_fill_sg - Fill PCI IDE PRD table
4458 * @qc: Metadata associated with taskfile to be transferred
4460 * Fill PCI IDE PRD (scatter-gather) table with segments
4461 * associated with the current disk command.
4464 * spin_lock_irqsave(host lock)
4467 static void ata_fill_sg(struct ata_queued_cmd *qc)
4469 struct ata_port *ap = qc->ap;
4470 struct scatterlist *sg;
4473 WARN_ON(qc->__sg == NULL);
4474 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4477 ata_for_each_sg(sg, qc) {
4481 /* determine if physical DMA addr spans 64K boundary.
4482 * Note h/w doesn't support 64-bit, so we unconditionally
4483 * truncate dma_addr_t to u32.
4485 addr = (u32) sg_dma_address(sg);
4486 sg_len = sg_dma_len(sg);
4489 offset = addr & 0xffff;
4491 if ((offset + sg_len) > 0x10000)
4492 len = 0x10000 - offset;
4494 ap->prd[idx].addr = cpu_to_le32(addr);
4495 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4496 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4505 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4509 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4510 * @qc: Metadata associated with taskfile to be transferred
4512 * Fill PCI IDE PRD (scatter-gather) table with segments
4513 * associated with the current disk command. Perform the fill
4514 * so that we avoid writing any length 64K records for
4515 * controllers that don't follow the spec.
4518 * spin_lock_irqsave(host lock)
4521 static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4523 struct ata_port *ap = qc->ap;
4524 struct scatterlist *sg;
4527 WARN_ON(qc->__sg == NULL);
4528 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4531 ata_for_each_sg(sg, qc) {
4533 u32 sg_len, len, blen;
4535 /* determine if physical DMA addr spans 64K boundary.
4536 * Note h/w doesn't support 64-bit, so we unconditionally
4537 * truncate dma_addr_t to u32.
4539 addr = (u32) sg_dma_address(sg);
4540 sg_len = sg_dma_len(sg);
4543 offset = addr & 0xffff;
4545 if ((offset + sg_len) > 0x10000)
4546 len = 0x10000 - offset;
4548 blen = len & 0xffff;
4549 ap->prd[idx].addr = cpu_to_le32(addr);
4551 /* Some PATA chipsets like the CS5530 can't
4552 cope with 0x0000 meaning 64K as the spec says */
4553 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4555 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4557 ap->prd[idx].flags_len = cpu_to_le32(blen);
4558 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4567 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4571 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4572 * @qc: Metadata associated with taskfile to check
4574 * Allow low-level driver to filter ATA PACKET commands, returning
4575 * a status indicating whether or not it is OK to use DMA for the
4576 * supplied PACKET command.
4579 * spin_lock_irqsave(host lock)
4581 * RETURNS: 0 when ATAPI DMA can be used
4584 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4586 struct ata_port *ap = qc->ap;
4588 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4589 * few ATAPI devices choke on such DMA requests.
4591 if (unlikely(qc->nbytes & 15))
4594 if (ap->ops->check_atapi_dma)
4595 return ap->ops->check_atapi_dma(qc);
4601 * atapi_qc_may_overflow - Check whether data transfer may overflow
4602 * @qc: ATA command in question
4604 * ATAPI commands which transfer variable length data to host
4605 * might overflow due to application error or hardare bug. This
4606 * function checks whether overflow should be drained and ignored
4613 * 1 if @qc may overflow; otherwise, 0.
4615 static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
4617 if (qc->tf.protocol != ATA_PROT_ATAPI &&
4618 qc->tf.protocol != ATA_PROT_ATAPI_DMA)
4621 if (qc->tf.flags & ATA_TFLAG_WRITE)
4624 switch (qc->cdb[0]) {
4630 case GPCMD_READ_CD_MSF:
4638 * ata_std_qc_defer - Check whether a qc needs to be deferred
4639 * @qc: ATA command in question
4641 * Non-NCQ commands cannot run with any other command, NCQ or
4642 * not. As upper layer only knows the queue depth, we are
4643 * responsible for maintaining exclusion. This function checks
4644 * whether a new command @qc can be issued.
4647 * spin_lock_irqsave(host lock)
4650 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4652 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4654 struct ata_link *link = qc->dev->link;
4656 if (qc->tf.protocol == ATA_PROT_NCQ) {
4657 if (!ata_tag_valid(link->active_tag))
4660 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4664 return ATA_DEFER_LINK;
4668 * ata_qc_prep - Prepare taskfile for submission
4669 * @qc: Metadata associated with taskfile to be prepared
4671 * Prepare ATA taskfile for submission.
4674 * spin_lock_irqsave(host lock)
4676 void ata_qc_prep(struct ata_queued_cmd *qc)
4678 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4685 * ata_dumb_qc_prep - Prepare taskfile for submission
4686 * @qc: Metadata associated with taskfile to be prepared
4688 * Prepare ATA taskfile for submission.
4691 * spin_lock_irqsave(host lock)
4693 void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4695 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4698 ata_fill_sg_dumb(qc);
4701 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4704 * ata_sg_init_one - Associate command with memory buffer
4705 * @qc: Command to be associated
4706 * @buf: Memory buffer
4707 * @buflen: Length of memory buffer, in bytes.
4709 * Initialize the data-related elements of queued_cmd @qc
4710 * to point to a single memory buffer, @buf of byte length @buflen.
4713 * spin_lock_irqsave(host lock)
4716 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4718 qc->flags |= ATA_QCFLAG_SINGLE;
4720 qc->__sg = &qc->sgent;
4722 qc->orig_n_elem = 1;
4724 qc->nbytes = buflen;
4725 qc->cursg = qc->__sg;
4727 sg_init_one(&qc->sgent, buf, buflen);
4731 * ata_sg_init - Associate command with scatter-gather table.
4732 * @qc: Command to be associated
4733 * @sg: Scatter-gather table.
4734 * @n_elem: Number of elements in s/g table.
4736 * Initialize the data-related elements of queued_cmd @qc
4737 * to point to a scatter-gather table @sg, containing @n_elem
4741 * spin_lock_irqsave(host lock)
4744 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4745 unsigned int n_elem)
4747 qc->flags |= ATA_QCFLAG_SG;
4749 qc->n_elem = n_elem;
4750 qc->orig_n_elem = n_elem;
4751 qc->cursg = qc->__sg;
4755 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4756 * @qc: Command with memory buffer to be mapped.
4758 * DMA-map the memory buffer associated with queued_cmd @qc.
4761 * spin_lock_irqsave(host lock)
4764 * Zero on success, negative on error.
4767 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4769 struct ata_port *ap = qc->ap;
4770 int dir = qc->dma_dir;
4771 struct scatterlist *sg = qc->__sg;
4772 dma_addr_t dma_address;
4775 /* we must lengthen transfers to end on a 32-bit boundary */
4776 qc->pad_len = sg->length & 3;
4778 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4779 struct scatterlist *psg = &qc->pad_sgent;
4781 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4783 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4785 if (qc->tf.flags & ATA_TFLAG_WRITE)
4786 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4789 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4790 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4792 sg->length -= qc->pad_len;
4793 if (sg->length == 0)
4796 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4797 sg->length, qc->pad_len);
4805 dma_address = dma_map_single(ap->dev, qc->buf_virt,
4807 if (dma_mapping_error(dma_address)) {
4809 sg->length += qc->pad_len;
4813 sg_dma_address(sg) = dma_address;
4814 sg_dma_len(sg) = sg->length;
4817 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4818 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4824 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4825 * @qc: Command with scatter-gather table to be mapped.
4827 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4830 * spin_lock_irqsave(host lock)
4833 * Zero on success, negative on error.
4837 static int ata_sg_setup(struct ata_queued_cmd *qc)
4839 struct ata_port *ap = qc->ap;
4840 struct scatterlist *sg = qc->__sg;
4841 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
4842 int n_elem, pre_n_elem, dir, trim_sg = 0;
4844 VPRINTK("ENTER, ata%u\n", ap->print_id);
4845 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4847 /* we must lengthen transfers to end on a 32-bit boundary */
4848 qc->pad_len = lsg->length & 3;
4850 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4851 struct scatterlist *psg = &qc->pad_sgent;
4852 unsigned int offset;
4854 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4856 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4859 * psg->page/offset are used to copy to-be-written
4860 * data in this function or read data in ata_sg_clean.
4862 offset = lsg->offset + lsg->length - qc->pad_len;
4863 sg_init_table(psg, 1);
4864 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4865 qc->pad_len, offset_in_page(offset));
4867 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4868 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4869 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4870 kunmap_atomic(addr, KM_IRQ0);
4873 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4874 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4876 lsg->length -= qc->pad_len;
4877 if (lsg->length == 0)
4880 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4881 qc->n_elem - 1, lsg->length, qc->pad_len);
4884 pre_n_elem = qc->n_elem;
4885 if (trim_sg && pre_n_elem)
4894 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4896 /* restore last sg */
4897 lsg->length += qc->pad_len;
4901 DPRINTK("%d sg elements mapped\n", n_elem);
4904 qc->n_elem = n_elem;
4910 * swap_buf_le16 - swap halves of 16-bit words in place
4911 * @buf: Buffer to swap
4912 * @buf_words: Number of 16-bit words in buffer.
4914 * Swap halves of 16-bit words if needed to convert from
4915 * little-endian byte order to native cpu byte order, or
4919 * Inherited from caller.
4921 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4926 for (i = 0; i < buf_words; i++)
4927 buf[i] = le16_to_cpu(buf[i]);
4928 #endif /* __BIG_ENDIAN */
4932 * ata_data_xfer - Transfer data by PIO
4933 * @adev: device to target
4935 * @buflen: buffer length
4936 * @write_data: read/write
4938 * Transfer data from/to the device data register by PIO.
4941 * Inherited from caller.
4943 void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4944 unsigned int buflen, int write_data)
4946 struct ata_port *ap = adev->link->ap;
4947 unsigned int words = buflen >> 1;
4949 /* Transfer multiple of 2 bytes */
4951 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4953 ioread16_rep(ap->ioaddr.data_addr, buf, words);
4955 /* Transfer trailing 1 byte, if any. */
4956 if (unlikely(buflen & 0x01)) {
4957 u16 align_buf[1] = { 0 };
4958 unsigned char *trailing_buf = buf + buflen - 1;
4961 memcpy(align_buf, trailing_buf, 1);
4962 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4964 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4965 memcpy(trailing_buf, align_buf, 1);
4971 * ata_data_xfer_noirq - Transfer data by PIO
4972 * @adev: device to target
4974 * @buflen: buffer length
4975 * @write_data: read/write
4977 * Transfer data from/to the device data register by PIO. Do the
4978 * transfer with interrupts disabled.
4981 * Inherited from caller.
4983 void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4984 unsigned int buflen, int write_data)
4986 unsigned long flags;
4987 local_irq_save(flags);
4988 ata_data_xfer(adev, buf, buflen, write_data);
4989 local_irq_restore(flags);
4994 * ata_pio_sector - Transfer a sector of data.
4995 * @qc: Command on going
4997 * Transfer qc->sect_size bytes of data from/to the ATA device.
5000 * Inherited from caller.
5003 static void ata_pio_sector(struct ata_queued_cmd *qc)
5005 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
5006 struct ata_port *ap = qc->ap;
5008 unsigned int offset;
5011 if (qc->curbytes == qc->nbytes - qc->sect_size)
5012 ap->hsm_task_state = HSM_ST_LAST;
5014 page = sg_page(qc->cursg);
5015 offset = qc->cursg->offset + qc->cursg_ofs;
5017 /* get the current page and offset */
5018 page = nth_page(page, (offset >> PAGE_SHIFT));
5019 offset %= PAGE_SIZE;
5021 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5023 if (PageHighMem(page)) {
5024 unsigned long flags;
5026 /* FIXME: use a bounce buffer */
5027 local_irq_save(flags);
5028 buf = kmap_atomic(page, KM_IRQ0);
5030 /* do the actual data transfer */
5031 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5033 kunmap_atomic(buf, KM_IRQ0);
5034 local_irq_restore(flags);
5036 buf = page_address(page);
5037 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5040 qc->curbytes += qc->sect_size;
5041 qc->cursg_ofs += qc->sect_size;
5043 if (qc->cursg_ofs == qc->cursg->length) {
5044 qc->cursg = sg_next(qc->cursg);
5050 * ata_pio_sectors - Transfer one or many sectors.
5051 * @qc: Command on going
5053 * Transfer one or many sectors of data from/to the
5054 * ATA device for the DRQ request.
5057 * Inherited from caller.
5060 static void ata_pio_sectors(struct ata_queued_cmd *qc)
5062 if (is_multi_taskfile(&qc->tf)) {
5063 /* READ/WRITE MULTIPLE */
5066 WARN_ON(qc->dev->multi_count == 0);
5068 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
5069 qc->dev->multi_count);
5075 ata_altstatus(qc->ap); /* flush */
5079 * atapi_send_cdb - Write CDB bytes to hardware
5080 * @ap: Port to which ATAPI device is attached.
5081 * @qc: Taskfile currently active
5083 * When device has indicated its readiness to accept
5084 * a CDB, this function is called. Send the CDB.
5090 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5093 DPRINTK("send cdb\n");
5094 WARN_ON(qc->dev->cdb_len < 12);
5096 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
5097 ata_altstatus(ap); /* flush */
5099 switch (qc->tf.protocol) {
5100 case ATA_PROT_ATAPI:
5101 ap->hsm_task_state = HSM_ST;
5103 case ATA_PROT_ATAPI_NODATA:
5104 ap->hsm_task_state = HSM_ST_LAST;
5106 case ATA_PROT_ATAPI_DMA:
5107 ap->hsm_task_state = HSM_ST_LAST;
5108 /* initiate bmdma */
5109 ap->ops->bmdma_start(qc);
5115 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5116 * @qc: Command on going
5117 * @bytes: number of bytes
5119 * Transfer Transfer data from/to the ATAPI device.
5122 * Inherited from caller.
5125 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
5127 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
5128 struct ata_port *ap = qc->ap;
5129 struct ata_eh_info *ehi = &qc->dev->link->eh_info;
5130 struct scatterlist *sg;
5133 unsigned int offset, count;
5137 if (unlikely(!sg)) {
5139 * The end of qc->sg is reached and the device expects
5140 * more data to transfer. In order not to overrun qc->sg
5141 * and fulfill length specified in the byte count register,
5142 * - for read case, discard trailing data from the device
5143 * - for write case, padding zero data to the device
5145 u16 pad_buf[1] = { 0 };
5148 if (bytes > qc->curbytes - qc->nbytes + ATAPI_MAX_DRAIN) {
5149 ata_ehi_push_desc(ehi, "too much trailing data "
5150 "buf=%u cur=%u bytes=%u",
5151 qc->nbytes, qc->curbytes, bytes);
5155 /* overflow is exptected for misc ATAPI commands */
5156 if (bytes && !atapi_qc_may_overflow(qc))
5157 ata_dev_printk(qc->dev, KERN_WARNING, "ATAPI %u bytes "
5158 "trailing data (cdb=%02x nbytes=%u)\n",
5159 bytes, qc->cdb[0], qc->nbytes);
5161 for (i = 0; i < (bytes + 1) / 2; i++)
5162 ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
5164 qc->curbytes += bytes;
5170 offset = sg->offset + qc->cursg_ofs;
5172 /* get the current page and offset */
5173 page = nth_page(page, (offset >> PAGE_SHIFT));
5174 offset %= PAGE_SIZE;
5176 /* don't overrun current sg */
5177 count = min(sg->length - qc->cursg_ofs, bytes);
5179 /* don't cross page boundaries */
5180 count = min(count, (unsigned int)PAGE_SIZE - offset);
5182 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5184 if (PageHighMem(page)) {
5185 unsigned long flags;
5187 /* FIXME: use bounce buffer */
5188 local_irq_save(flags);
5189 buf = kmap_atomic(page, KM_IRQ0);
5191 /* do the actual data transfer */
5192 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
5194 kunmap_atomic(buf, KM_IRQ0);
5195 local_irq_restore(flags);
5197 buf = page_address(page);
5198 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
5202 if ((count & 1) && bytes)
5204 qc->curbytes += count;
5205 qc->cursg_ofs += count;
5207 if (qc->cursg_ofs == sg->length) {
5208 qc->cursg = sg_next(qc->cursg);
5219 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5220 * @qc: Command on going
5222 * Transfer Transfer data from/to the ATAPI device.
5225 * Inherited from caller.
5228 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5230 struct ata_port *ap = qc->ap;
5231 struct ata_device *dev = qc->dev;
5232 unsigned int ireason, bc_lo, bc_hi, bytes;
5233 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5235 /* Abuse qc->result_tf for temp storage of intermediate TF
5236 * here to save some kernel stack usage.
5237 * For normal completion, qc->result_tf is not relevant. For
5238 * error, qc->result_tf is later overwritten by ata_qc_complete().
5239 * So, the correctness of qc->result_tf is not affected.
5241 ap->ops->tf_read(ap, &qc->result_tf);
5242 ireason = qc->result_tf.nsect;
5243 bc_lo = qc->result_tf.lbam;
5244 bc_hi = qc->result_tf.lbah;
5245 bytes = (bc_hi << 8) | bc_lo;
5247 /* shall be cleared to zero, indicating xfer of data */
5248 if (ireason & (1 << 0))
5251 /* make sure transfer direction matches expected */
5252 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5253 if (do_write != i_write)
5256 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
5258 if (__atapi_pio_bytes(qc, bytes))
5260 ata_altstatus(ap); /* flush */
5265 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
5266 qc->err_mask |= AC_ERR_HSM;
5267 ap->hsm_task_state = HSM_ST_ERR;
5271 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5272 * @ap: the target ata_port
5276 * 1 if ok in workqueue, 0 otherwise.
5279 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
5281 if (qc->tf.flags & ATA_TFLAG_POLLING)
5284 if (ap->hsm_task_state == HSM_ST_FIRST) {
5285 if (qc->tf.protocol == ATA_PROT_PIO &&
5286 (qc->tf.flags & ATA_TFLAG_WRITE))
5289 if (ata_is_atapi(qc->tf.protocol) &&
5290 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5298 * ata_hsm_qc_complete - finish a qc running on standard HSM
5299 * @qc: Command to complete
5300 * @in_wq: 1 if called from workqueue, 0 otherwise
5302 * Finish @qc which is running on standard HSM.
5305 * If @in_wq is zero, spin_lock_irqsave(host lock).
5306 * Otherwise, none on entry and grabs host lock.
5308 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5310 struct ata_port *ap = qc->ap;
5311 unsigned long flags;
5313 if (ap->ops->error_handler) {
5315 spin_lock_irqsave(ap->lock, flags);
5317 /* EH might have kicked in while host lock is
5320 qc = ata_qc_from_tag(ap, qc->tag);
5322 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
5323 ap->ops->irq_on(ap);
5324 ata_qc_complete(qc);
5326 ata_port_freeze(ap);
5329 spin_unlock_irqrestore(ap->lock, flags);
5331 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5332 ata_qc_complete(qc);
5334 ata_port_freeze(ap);
5338 spin_lock_irqsave(ap->lock, flags);
5339 ap->ops->irq_on(ap);
5340 ata_qc_complete(qc);
5341 spin_unlock_irqrestore(ap->lock, flags);
5343 ata_qc_complete(qc);
5348 * ata_hsm_move - move the HSM to the next state.
5349 * @ap: the target ata_port
5351 * @status: current device status
5352 * @in_wq: 1 if called from workqueue, 0 otherwise
5355 * 1 when poll next status needed, 0 otherwise.
5357 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5358 u8 status, int in_wq)
5360 unsigned long flags = 0;
5363 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5365 /* Make sure ata_qc_issue_prot() does not throw things
5366 * like DMA polling into the workqueue. Notice that
5367 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5369 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
5372 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
5373 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
5375 switch (ap->hsm_task_state) {
5377 /* Send first data block or PACKET CDB */
5379 /* If polling, we will stay in the work queue after
5380 * sending the data. Otherwise, interrupt handler
5381 * takes over after sending the data.
5383 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5385 /* check device status */
5386 if (unlikely((status & ATA_DRQ) == 0)) {
5387 /* handle BSY=0, DRQ=0 as error */
5388 if (likely(status & (ATA_ERR | ATA_DF)))
5389 /* device stops HSM for abort/error */
5390 qc->err_mask |= AC_ERR_DEV;
5392 /* HSM violation. Let EH handle this */
5393 qc->err_mask |= AC_ERR_HSM;
5395 ap->hsm_task_state = HSM_ST_ERR;
5399 /* Device should not ask for data transfer (DRQ=1)
5400 * when it finds something wrong.
5401 * We ignore DRQ here and stop the HSM by
5402 * changing hsm_task_state to HSM_ST_ERR and
5403 * let the EH abort the command or reset the device.
5405 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5406 /* Some ATAPI tape drives forget to clear the ERR bit
5407 * when doing the next command (mostly request sense).
5408 * We ignore ERR here to workaround and proceed sending
5411 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
5412 ata_port_printk(ap, KERN_WARNING,
5413 "DRQ=1 with device error, "
5414 "dev_stat 0x%X\n", status);
5415 qc->err_mask |= AC_ERR_HSM;
5416 ap->hsm_task_state = HSM_ST_ERR;
5421 /* Send the CDB (atapi) or the first data block (ata pio out).
5422 * During the state transition, interrupt handler shouldn't
5423 * be invoked before the data transfer is complete and
5424 * hsm_task_state is changed. Hence, the following locking.
5427 spin_lock_irqsave(ap->lock, flags);
5429 if (qc->tf.protocol == ATA_PROT_PIO) {
5430 /* PIO data out protocol.
5431 * send first data block.
5434 /* ata_pio_sectors() might change the state
5435 * to HSM_ST_LAST. so, the state is changed here
5436 * before ata_pio_sectors().
5438 ap->hsm_task_state = HSM_ST;
5439 ata_pio_sectors(qc);
5442 atapi_send_cdb(ap, qc);
5445 spin_unlock_irqrestore(ap->lock, flags);
5447 /* if polling, ata_pio_task() handles the rest.
5448 * otherwise, interrupt handler takes over from here.
5453 /* complete command or read/write the data register */
5454 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5455 /* ATAPI PIO protocol */
5456 if ((status & ATA_DRQ) == 0) {
5457 /* No more data to transfer or device error.
5458 * Device error will be tagged in HSM_ST_LAST.
5460 ap->hsm_task_state = HSM_ST_LAST;
5464 /* Device should not ask for data transfer (DRQ=1)
5465 * when it finds something wrong.
5466 * We ignore DRQ here and stop the HSM by
5467 * changing hsm_task_state to HSM_ST_ERR and
5468 * let the EH abort the command or reset the device.
5470 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5471 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5472 "device error, dev_stat 0x%X\n",
5474 qc->err_mask |= AC_ERR_HSM;
5475 ap->hsm_task_state = HSM_ST_ERR;
5479 atapi_pio_bytes(qc);
5481 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5482 /* bad ireason reported by device */
5486 /* ATA PIO protocol */
5487 if (unlikely((status & ATA_DRQ) == 0)) {
5488 /* handle BSY=0, DRQ=0 as error */
5489 if (likely(status & (ATA_ERR | ATA_DF)))
5490 /* device stops HSM for abort/error */
5491 qc->err_mask |= AC_ERR_DEV;
5493 /* HSM violation. Let EH handle this.
5494 * Phantom devices also trigger this
5495 * condition. Mark hint.
5497 qc->err_mask |= AC_ERR_HSM |
5500 ap->hsm_task_state = HSM_ST_ERR;
5504 /* For PIO reads, some devices may ask for
5505 * data transfer (DRQ=1) alone with ERR=1.
5506 * We respect DRQ here and transfer one
5507 * block of junk data before changing the
5508 * hsm_task_state to HSM_ST_ERR.
5510 * For PIO writes, ERR=1 DRQ=1 doesn't make
5511 * sense since the data block has been
5512 * transferred to the device.
5514 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5515 /* data might be corrputed */
5516 qc->err_mask |= AC_ERR_DEV;
5518 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5519 ata_pio_sectors(qc);
5520 status = ata_wait_idle(ap);
5523 if (status & (ATA_BUSY | ATA_DRQ))
5524 qc->err_mask |= AC_ERR_HSM;
5526 /* ata_pio_sectors() might change the
5527 * state to HSM_ST_LAST. so, the state
5528 * is changed after ata_pio_sectors().
5530 ap->hsm_task_state = HSM_ST_ERR;
5534 ata_pio_sectors(qc);
5536 if (ap->hsm_task_state == HSM_ST_LAST &&
5537 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5539 status = ata_wait_idle(ap);
5548 if (unlikely(!ata_ok(status))) {
5549 qc->err_mask |= __ac_err_mask(status);
5550 ap->hsm_task_state = HSM_ST_ERR;
5554 /* no more data to transfer */
5555 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
5556 ap->print_id, qc->dev->devno, status);
5558 WARN_ON(qc->err_mask);
5560 ap->hsm_task_state = HSM_ST_IDLE;
5562 /* complete taskfile transaction */
5563 ata_hsm_qc_complete(qc, in_wq);
5569 /* make sure qc->err_mask is available to
5570 * know what's wrong and recover
5572 WARN_ON(qc->err_mask == 0);
5574 ap->hsm_task_state = HSM_ST_IDLE;
5576 /* complete taskfile transaction */
5577 ata_hsm_qc_complete(qc, in_wq);
5589 static void ata_pio_task(struct work_struct *work)
5591 struct ata_port *ap =
5592 container_of(work, struct ata_port, port_task.work);
5593 struct ata_queued_cmd *qc = ap->port_task_data;
5598 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5601 * This is purely heuristic. This is a fast path.
5602 * Sometimes when we enter, BSY will be cleared in
5603 * a chk-status or two. If not, the drive is probably seeking
5604 * or something. Snooze for a couple msecs, then
5605 * chk-status again. If still busy, queue delayed work.
5607 status = ata_busy_wait(ap, ATA_BUSY, 5);
5608 if (status & ATA_BUSY) {
5610 status = ata_busy_wait(ap, ATA_BUSY, 10);
5611 if (status & ATA_BUSY) {
5612 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5618 poll_next = ata_hsm_move(ap, qc, status, 1);
5620 /* another command or interrupt handler
5621 * may be running at this point.
5628 * ata_qc_new - Request an available ATA command, for queueing
5629 * @ap: Port associated with device @dev
5630 * @dev: Device from whom we request an available command structure
5636 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5638 struct ata_queued_cmd *qc = NULL;
5641 /* no command while frozen */
5642 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5645 /* the last tag is reserved for internal command. */
5646 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5647 if (!test_and_set_bit(i, &ap->qc_allocated)) {
5648 qc = __ata_qc_from_tag(ap, i);
5659 * ata_qc_new_init - Request an available ATA command, and initialize it
5660 * @dev: Device from whom we request an available command structure
5666 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5668 struct ata_port *ap = dev->link->ap;
5669 struct ata_queued_cmd *qc;
5671 qc = ata_qc_new(ap);
5684 * ata_qc_free - free unused ata_queued_cmd
5685 * @qc: Command to complete
5687 * Designed to free unused ata_queued_cmd object
5688 * in case something prevents using it.
5691 * spin_lock_irqsave(host lock)
5693 void ata_qc_free(struct ata_queued_cmd *qc)
5695 struct ata_port *ap = qc->ap;
5698 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5702 if (likely(ata_tag_valid(tag))) {
5703 qc->tag = ATA_TAG_POISON;
5704 clear_bit(tag, &ap->qc_allocated);
5708 void __ata_qc_complete(struct ata_queued_cmd *qc)
5710 struct ata_port *ap = qc->ap;
5711 struct ata_link *link = qc->dev->link;
5713 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5714 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5716 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5719 /* command should be marked inactive atomically with qc completion */
5720 if (qc->tf.protocol == ATA_PROT_NCQ) {
5721 link->sactive &= ~(1 << qc->tag);
5723 ap->nr_active_links--;
5725 link->active_tag = ATA_TAG_POISON;
5726 ap->nr_active_links--;
5729 /* clear exclusive status */
5730 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5731 ap->excl_link == link))
5732 ap->excl_link = NULL;
5734 /* atapi: mark qc as inactive to prevent the interrupt handler
5735 * from completing the command twice later, before the error handler
5736 * is called. (when rc != 0 and atapi request sense is needed)
5738 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5739 ap->qc_active &= ~(1 << qc->tag);
5741 /* call completion callback */
5742 qc->complete_fn(qc);
5745 static void fill_result_tf(struct ata_queued_cmd *qc)
5747 struct ata_port *ap = qc->ap;
5749 qc->result_tf.flags = qc->tf.flags;
5750 ap->ops->tf_read(ap, &qc->result_tf);
5753 static void ata_verify_xfer(struct ata_queued_cmd *qc)
5755 struct ata_device *dev = qc->dev;
5757 if (ata_tag_internal(qc->tag))
5760 if (ata_is_nodata(qc->tf.protocol))
5763 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5766 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5770 * ata_qc_complete - Complete an active ATA command
5771 * @qc: Command to complete
5772 * @err_mask: ATA Status register contents
5774 * Indicate to the mid and upper layers that an ATA
5775 * command has completed, with either an ok or not-ok status.
5778 * spin_lock_irqsave(host lock)
5780 void ata_qc_complete(struct ata_queued_cmd *qc)
5782 struct ata_port *ap = qc->ap;
5784 /* XXX: New EH and old EH use different mechanisms to
5785 * synchronize EH with regular execution path.
5787 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5788 * Normal execution path is responsible for not accessing a
5789 * failed qc. libata core enforces the rule by returning NULL
5790 * from ata_qc_from_tag() for failed qcs.
5792 * Old EH depends on ata_qc_complete() nullifying completion
5793 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5794 * not synchronize with interrupt handler. Only PIO task is
5797 if (ap->ops->error_handler) {
5798 struct ata_device *dev = qc->dev;
5799 struct ata_eh_info *ehi = &dev->link->eh_info;
5801 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5803 if (unlikely(qc->err_mask))
5804 qc->flags |= ATA_QCFLAG_FAILED;
5806 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5807 if (!ata_tag_internal(qc->tag)) {
5808 /* always fill result TF for failed qc */
5810 ata_qc_schedule_eh(qc);
5815 /* read result TF if requested */
5816 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5819 /* Some commands need post-processing after successful
5822 switch (qc->tf.command) {
5823 case ATA_CMD_SET_FEATURES:
5824 if (qc->tf.feature != SETFEATURES_WC_ON &&
5825 qc->tf.feature != SETFEATURES_WC_OFF)
5828 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5829 case ATA_CMD_SET_MULTI: /* multi_count changed */
5830 /* revalidate device */
5831 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5832 ata_port_schedule_eh(ap);
5836 dev->flags |= ATA_DFLAG_SLEEPING;
5840 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5841 ata_verify_xfer(qc);
5843 __ata_qc_complete(qc);
5845 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5848 /* read result TF if failed or requested */
5849 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5852 __ata_qc_complete(qc);
5857 * ata_qc_complete_multiple - Complete multiple qcs successfully
5858 * @ap: port in question
5859 * @qc_active: new qc_active mask
5860 * @finish_qc: LLDD callback invoked before completing a qc
5862 * Complete in-flight commands. This functions is meant to be
5863 * called from low-level driver's interrupt routine to complete
5864 * requests normally. ap->qc_active and @qc_active is compared
5865 * and commands are completed accordingly.
5868 * spin_lock_irqsave(host lock)
5871 * Number of completed commands on success, -errno otherwise.
5873 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5874 void (*finish_qc)(struct ata_queued_cmd *))
5880 done_mask = ap->qc_active ^ qc_active;
5882 if (unlikely(done_mask & qc_active)) {
5883 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5884 "(%08x->%08x)\n", ap->qc_active, qc_active);
5888 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5889 struct ata_queued_cmd *qc;
5891 if (!(done_mask & (1 << i)))
5894 if ((qc = ata_qc_from_tag(ap, i))) {
5897 ata_qc_complete(qc);
5906 * ata_qc_issue - issue taskfile to device
5907 * @qc: command to issue to device
5909 * Prepare an ATA command to submission to device.
5910 * This includes mapping the data into a DMA-able
5911 * area, filling in the S/G table, and finally
5912 * writing the taskfile to hardware, starting the command.
5915 * spin_lock_irqsave(host lock)
5917 void ata_qc_issue(struct ata_queued_cmd *qc)
5919 struct ata_port *ap = qc->ap;
5920 struct ata_link *link = qc->dev->link;
5921 u8 prot = qc->tf.protocol;
5923 /* Make sure only one non-NCQ command is outstanding. The
5924 * check is skipped for old EH because it reuses active qc to
5925 * request ATAPI sense.
5927 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5929 if (prot == ATA_PROT_NCQ) {
5930 WARN_ON(link->sactive & (1 << qc->tag));
5933 ap->nr_active_links++;
5934 link->sactive |= 1 << qc->tag;
5936 WARN_ON(link->sactive);
5938 ap->nr_active_links++;
5939 link->active_tag = qc->tag;
5942 qc->flags |= ATA_QCFLAG_ACTIVE;
5943 ap->qc_active |= 1 << qc->tag;
5945 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5946 (ap->flags & ATA_FLAG_PIO_DMA))) {
5947 if (qc->flags & ATA_QCFLAG_SG) {
5948 if (ata_sg_setup(qc))
5950 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5951 if (ata_sg_setup_one(qc))
5955 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5958 /* if device is sleeping, schedule softreset and abort the link */
5959 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5960 link->eh_info.action |= ATA_EH_SOFTRESET;
5961 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5962 ata_link_abort(link);
5966 ap->ops->qc_prep(qc);
5968 qc->err_mask |= ap->ops->qc_issue(qc);
5969 if (unlikely(qc->err_mask))
5974 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5975 qc->err_mask |= AC_ERR_SYSTEM;
5977 ata_qc_complete(qc);
5981 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5982 * @qc: command to issue to device
5984 * Using various libata functions and hooks, this function
5985 * starts an ATA command. ATA commands are grouped into
5986 * classes called "protocols", and issuing each type of protocol
5987 * is slightly different.
5989 * May be used as the qc_issue() entry in ata_port_operations.
5992 * spin_lock_irqsave(host lock)
5995 * Zero on success, AC_ERR_* mask on failure
5998 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
6000 struct ata_port *ap = qc->ap;
6002 /* Use polling pio if the LLD doesn't handle
6003 * interrupt driven pio and atapi CDB interrupt.
6005 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6006 switch (qc->tf.protocol) {
6008 case ATA_PROT_NODATA:
6009 case ATA_PROT_ATAPI:
6010 case ATA_PROT_ATAPI_NODATA:
6011 qc->tf.flags |= ATA_TFLAG_POLLING;
6013 case ATA_PROT_ATAPI_DMA:
6014 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
6015 /* see ata_dma_blacklisted() */
6023 /* select the device */
6024 ata_dev_select(ap, qc->dev->devno, 1, 0);
6026 /* start the command */
6027 switch (qc->tf.protocol) {
6028 case ATA_PROT_NODATA:
6029 if (qc->tf.flags & ATA_TFLAG_POLLING)
6030 ata_qc_set_polling(qc);
6032 ata_tf_to_host(ap, &qc->tf);
6033 ap->hsm_task_state = HSM_ST_LAST;
6035 if (qc->tf.flags & ATA_TFLAG_POLLING)
6036 ata_port_queue_task(ap, ata_pio_task, qc, 0);
6041 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6043 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6044 ap->ops->bmdma_setup(qc); /* set up bmdma */
6045 ap->ops->bmdma_start(qc); /* initiate bmdma */
6046 ap->hsm_task_state = HSM_ST_LAST;
6050 if (qc->tf.flags & ATA_TFLAG_POLLING)
6051 ata_qc_set_polling(qc);
6053 ata_tf_to_host(ap, &qc->tf);
6055 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6056 /* PIO data out protocol */
6057 ap->hsm_task_state = HSM_ST_FIRST;
6058 ata_port_queue_task(ap, ata_pio_task, qc, 0);
6060 /* always send first data block using
6061 * the ata_pio_task() codepath.
6064 /* PIO data in protocol */
6065 ap->hsm_task_state = HSM_ST;
6067 if (qc->tf.flags & ATA_TFLAG_POLLING)
6068 ata_port_queue_task(ap, ata_pio_task, qc, 0);
6070 /* if polling, ata_pio_task() handles the rest.
6071 * otherwise, interrupt handler takes over from here.
6077 case ATA_PROT_ATAPI:
6078 case ATA_PROT_ATAPI_NODATA:
6079 if (qc->tf.flags & ATA_TFLAG_POLLING)
6080 ata_qc_set_polling(qc);
6082 ata_tf_to_host(ap, &qc->tf);
6084 ap->hsm_task_state = HSM_ST_FIRST;
6086 /* send cdb by polling if no cdb interrupt */
6087 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6088 (qc->tf.flags & ATA_TFLAG_POLLING))
6089 ata_port_queue_task(ap, ata_pio_task, qc, 0);
6092 case ATA_PROT_ATAPI_DMA:
6093 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6095 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6096 ap->ops->bmdma_setup(qc); /* set up bmdma */
6097 ap->hsm_task_state = HSM_ST_FIRST;
6099 /* send cdb by polling if no cdb interrupt */
6100 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6101 ata_port_queue_task(ap, ata_pio_task, qc, 0);
6106 return AC_ERR_SYSTEM;
6113 * ata_host_intr - Handle host interrupt for given (port, task)
6114 * @ap: Port on which interrupt arrived (possibly...)
6115 * @qc: Taskfile currently active in engine
6117 * Handle host interrupt for given queued command. Currently,
6118 * only DMA interrupts are handled. All other commands are
6119 * handled via polling with interrupts disabled (nIEN bit).
6122 * spin_lock_irqsave(host lock)
6125 * One if interrupt was handled, zero if not (shared irq).
6128 inline unsigned int ata_host_intr(struct ata_port *ap,
6129 struct ata_queued_cmd *qc)
6131 struct ata_eh_info *ehi = &ap->link.eh_info;
6132 u8 status, host_stat = 0;
6134 VPRINTK("ata%u: protocol %d task_state %d\n",
6135 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
6137 /* Check whether we are expecting interrupt in this state */
6138 switch (ap->hsm_task_state) {
6140 /* Some pre-ATAPI-4 devices assert INTRQ
6141 * at this state when ready to receive CDB.
6144 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6145 * The flag was turned on only for atapi devices. No
6146 * need to check ata_is_atapi(qc->tf.protocol) again.
6148 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6152 if (qc->tf.protocol == ATA_PROT_DMA ||
6153 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
6154 /* check status of DMA engine */
6155 host_stat = ap->ops->bmdma_status(ap);
6156 VPRINTK("ata%u: host_stat 0x%X\n",
6157 ap->print_id, host_stat);
6159 /* if it's not our irq... */
6160 if (!(host_stat & ATA_DMA_INTR))
6163 /* before we do anything else, clear DMA-Start bit */
6164 ap->ops->bmdma_stop(qc);
6166 if (unlikely(host_stat & ATA_DMA_ERR)) {
6167 /* error when transfering data to/from memory */
6168 qc->err_mask |= AC_ERR_HOST_BUS;
6169 ap->hsm_task_state = HSM_ST_ERR;
6179 /* check altstatus */
6180 status = ata_altstatus(ap);
6181 if (status & ATA_BUSY)
6184 /* check main status, clearing INTRQ */
6185 status = ata_chk_status(ap);
6186 if (unlikely(status & ATA_BUSY))
6189 /* ack bmdma irq events */
6190 ap->ops->irq_clear(ap);
6192 ata_hsm_move(ap, qc, status, 0);
6194 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
6195 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
6196 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6198 return 1; /* irq handled */
6201 ap->stats.idle_irq++;
6204 if ((ap->stats.idle_irq % 1000) == 0) {
6206 ap->ops->irq_clear(ap);
6207 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
6211 return 0; /* irq not handled */
6215 * ata_interrupt - Default ATA host interrupt handler
6216 * @irq: irq line (unused)
6217 * @dev_instance: pointer to our ata_host information structure
6219 * Default interrupt handler for PCI IDE devices. Calls
6220 * ata_host_intr() for each port that is not disabled.
6223 * Obtains host lock during operation.
6226 * IRQ_NONE or IRQ_HANDLED.
6229 irqreturn_t ata_interrupt(int irq, void *dev_instance)
6231 struct ata_host *host = dev_instance;
6233 unsigned int handled = 0;
6234 unsigned long flags;
6236 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
6237 spin_lock_irqsave(&host->lock, flags);
6239 for (i = 0; i < host->n_ports; i++) {
6240 struct ata_port *ap;
6242 ap = host->ports[i];
6244 !(ap->flags & ATA_FLAG_DISABLED)) {
6245 struct ata_queued_cmd *qc;
6247 qc = ata_qc_from_tag(ap, ap->link.active_tag);
6248 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
6249 (qc->flags & ATA_QCFLAG_ACTIVE))
6250 handled |= ata_host_intr(ap, qc);
6254 spin_unlock_irqrestore(&host->lock, flags);
6256 return IRQ_RETVAL(handled);
6260 * sata_scr_valid - test whether SCRs are accessible
6261 * @link: ATA link to test SCR accessibility for
6263 * Test whether SCRs are accessible for @link.
6269 * 1 if SCRs are accessible, 0 otherwise.
6271 int sata_scr_valid(struct ata_link *link)
6273 struct ata_port *ap = link->ap;
6275 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
6279 * sata_scr_read - read SCR register of the specified port
6280 * @link: ATA link to read SCR for
6282 * @val: Place to store read value
6284 * Read SCR register @reg of @link into *@val. This function is
6285 * guaranteed to succeed if @link is ap->link, the cable type of
6286 * the port is SATA and the port implements ->scr_read.
6289 * None if @link is ap->link. Kernel thread context otherwise.
6292 * 0 on success, negative errno on failure.
6294 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
6296 if (ata_is_host_link(link)) {
6297 struct ata_port *ap = link->ap;
6299 if (sata_scr_valid(link))
6300 return ap->ops->scr_read(ap, reg, val);
6304 return sata_pmp_scr_read(link, reg, val);
6308 * sata_scr_write - write SCR register of the specified port
6309 * @link: ATA link to write SCR for
6310 * @reg: SCR to write
6311 * @val: value to write
6313 * Write @val to SCR register @reg of @link. This function is
6314 * guaranteed to succeed if @link is ap->link, the cable type of
6315 * the port is SATA and the port implements ->scr_read.
6318 * None if @link is ap->link. Kernel thread context otherwise.
6321 * 0 on success, negative errno on failure.
6323 int sata_scr_write(struct ata_link *link, int reg, u32 val)
6325 if (ata_is_host_link(link)) {
6326 struct ata_port *ap = link->ap;
6328 if (sata_scr_valid(link))
6329 return ap->ops->scr_write(ap, reg, val);
6333 return sata_pmp_scr_write(link, reg, val);
6337 * sata_scr_write_flush - write SCR register of the specified port and flush
6338 * @link: ATA link to write SCR for
6339 * @reg: SCR to write
6340 * @val: value to write
6342 * This function is identical to sata_scr_write() except that this
6343 * function performs flush after writing to the register.
6346 * None if @link is ap->link. Kernel thread context otherwise.
6349 * 0 on success, negative errno on failure.
6351 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
6353 if (ata_is_host_link(link)) {
6354 struct ata_port *ap = link->ap;
6357 if (sata_scr_valid(link)) {
6358 rc = ap->ops->scr_write(ap, reg, val);
6360 rc = ap->ops->scr_read(ap, reg, &val);
6366 return sata_pmp_scr_write(link, reg, val);
6370 * ata_link_online - test whether the given link is online
6371 * @link: ATA link to test
6373 * Test whether @link is online. Note that this function returns
6374 * 0 if online status of @link cannot be obtained, so
6375 * ata_link_online(link) != !ata_link_offline(link).
6381 * 1 if the port online status is available and online.
6383 int ata_link_online(struct ata_link *link)
6387 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6388 (sstatus & 0xf) == 0x3)
6394 * ata_link_offline - test whether the given link is offline
6395 * @link: ATA link to test
6397 * Test whether @link is offline. Note that this function
6398 * returns 0 if offline status of @link cannot be obtained, so
6399 * ata_link_online(link) != !ata_link_offline(link).
6405 * 1 if the port offline status is available and offline.
6407 int ata_link_offline(struct ata_link *link)
6411 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6412 (sstatus & 0xf) != 0x3)
6417 int ata_flush_cache(struct ata_device *dev)
6419 unsigned int err_mask;
6422 if (!ata_try_flush_cache(dev))
6425 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
6426 cmd = ATA_CMD_FLUSH_EXT;
6428 cmd = ATA_CMD_FLUSH;
6430 /* This is wrong. On a failed flush we get back the LBA of the lost
6431 sector and we should (assuming it wasn't aborted as unknown) issue
6432 a further flush command to continue the writeback until it
6434 err_mask = ata_do_simple_cmd(dev, cmd);
6436 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6444 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6445 unsigned int action, unsigned int ehi_flags,
6448 unsigned long flags;
6451 for (i = 0; i < host->n_ports; i++) {
6452 struct ata_port *ap = host->ports[i];
6453 struct ata_link *link;
6455 /* Previous resume operation might still be in
6456 * progress. Wait for PM_PENDING to clear.
6458 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6459 ata_port_wait_eh(ap);
6460 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6463 /* request PM ops to EH */
6464 spin_lock_irqsave(ap->lock, flags);
6469 ap->pm_result = &rc;
6472 ap->pflags |= ATA_PFLAG_PM_PENDING;
6473 __ata_port_for_each_link(link, ap) {
6474 link->eh_info.action |= action;
6475 link->eh_info.flags |= ehi_flags;
6478 ata_port_schedule_eh(ap);
6480 spin_unlock_irqrestore(ap->lock, flags);
6482 /* wait and check result */
6484 ata_port_wait_eh(ap);
6485 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6495 * ata_host_suspend - suspend host
6496 * @host: host to suspend
6499 * Suspend @host. Actual operation is performed by EH. This
6500 * function requests EH to perform PM operations and waits for EH
6504 * Kernel thread context (may sleep).
6507 * 0 on success, -errno on failure.
6509 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
6514 * disable link pm on all ports before requesting
6517 ata_lpm_enable(host);
6519 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
6521 host->dev->power.power_state = mesg;
6526 * ata_host_resume - resume host
6527 * @host: host to resume
6529 * Resume @host. Actual operation is performed by EH. This
6530 * function requests EH to perform PM operations and returns.
6531 * Note that all resume operations are performed parallely.
6534 * Kernel thread context (may sleep).
6536 void ata_host_resume(struct ata_host *host)
6538 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6539 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6540 host->dev->power.power_state = PMSG_ON;
6542 /* reenable link pm */
6543 ata_lpm_disable(host);
6548 * ata_port_start - Set port up for dma.
6549 * @ap: Port to initialize
6551 * Called just after data structures for each port are
6552 * initialized. Allocates space for PRD table.
6554 * May be used as the port_start() entry in ata_port_operations.
6557 * Inherited from caller.
6559 int ata_port_start(struct ata_port *ap)
6561 struct device *dev = ap->dev;
6564 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6569 rc = ata_pad_alloc(ap, dev);
6573 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6574 (unsigned long long)ap->prd_dma);
6579 * ata_dev_init - Initialize an ata_device structure
6580 * @dev: Device structure to initialize
6582 * Initialize @dev in preparation for probing.
6585 * Inherited from caller.
6587 void ata_dev_init(struct ata_device *dev)
6589 struct ata_link *link = dev->link;
6590 struct ata_port *ap = link->ap;
6591 unsigned long flags;
6593 /* SATA spd limit is bound to the first device */
6594 link->sata_spd_limit = link->hw_sata_spd_limit;
6597 /* High bits of dev->flags are used to record warm plug
6598 * requests which occur asynchronously. Synchronize using
6601 spin_lock_irqsave(ap->lock, flags);
6602 dev->flags &= ~ATA_DFLAG_INIT_MASK;
6604 spin_unlock_irqrestore(ap->lock, flags);
6606 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6607 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6608 dev->pio_mask = UINT_MAX;
6609 dev->mwdma_mask = UINT_MAX;
6610 dev->udma_mask = UINT_MAX;
6614 * ata_link_init - Initialize an ata_link structure
6615 * @ap: ATA port link is attached to
6616 * @link: Link structure to initialize
6617 * @pmp: Port multiplier port number
6622 * Kernel thread context (may sleep)
6624 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
6628 /* clear everything except for devices */
6629 memset(link, 0, offsetof(struct ata_link, device[0]));
6633 link->active_tag = ATA_TAG_POISON;
6634 link->hw_sata_spd_limit = UINT_MAX;
6636 /* can't use iterator, ap isn't initialized yet */
6637 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6638 struct ata_device *dev = &link->device[i];
6641 dev->devno = dev - link->device;
6647 * sata_link_init_spd - Initialize link->sata_spd_limit
6648 * @link: Link to configure sata_spd_limit for
6650 * Initialize @link->[hw_]sata_spd_limit to the currently
6654 * Kernel thread context (may sleep).
6657 * 0 on success, -errno on failure.
6659 int sata_link_init_spd(struct ata_link *link)
6664 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6668 spd = (scontrol >> 4) & 0xf;
6670 link->hw_sata_spd_limit &= (1 << spd) - 1;
6672 link->sata_spd_limit = link->hw_sata_spd_limit;
6678 * ata_port_alloc - allocate and initialize basic ATA port resources
6679 * @host: ATA host this allocated port belongs to
6681 * Allocate and initialize basic ATA port resources.
6684 * Allocate ATA port on success, NULL on failure.
6687 * Inherited from calling layer (may sleep).
6689 struct ata_port *ata_port_alloc(struct ata_host *host)
6691 struct ata_port *ap;
6695 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6699 ap->pflags |= ATA_PFLAG_INITIALIZING;
6700 ap->lock = &host->lock;
6701 ap->flags = ATA_FLAG_DISABLED;
6703 ap->ctl = ATA_DEVCTL_OBS;
6705 ap->dev = host->dev;
6706 ap->last_ctl = 0xFF;
6708 #if defined(ATA_VERBOSE_DEBUG)
6709 /* turn on all debugging levels */
6710 ap->msg_enable = 0x00FF;
6711 #elif defined(ATA_DEBUG)
6712 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6714 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6717 INIT_DELAYED_WORK(&ap->port_task, NULL);
6718 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6719 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6720 INIT_LIST_HEAD(&ap->eh_done_q);
6721 init_waitqueue_head(&ap->eh_wait_q);
6722 init_timer_deferrable(&ap->fastdrain_timer);
6723 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6724 ap->fastdrain_timer.data = (unsigned long)ap;
6726 ap->cbl = ATA_CBL_NONE;
6728 ata_link_init(ap, &ap->link, 0);
6731 ap->stats.unhandled_irq = 1;
6732 ap->stats.idle_irq = 1;
6737 static void ata_host_release(struct device *gendev, void *res)
6739 struct ata_host *host = dev_get_drvdata(gendev);
6742 for (i = 0; i < host->n_ports; i++) {
6743 struct ata_port *ap = host->ports[i];
6749 scsi_host_put(ap->scsi_host);
6751 kfree(ap->pmp_link);
6753 host->ports[i] = NULL;
6756 dev_set_drvdata(gendev, NULL);
6760 * ata_host_alloc - allocate and init basic ATA host resources
6761 * @dev: generic device this host is associated with
6762 * @max_ports: maximum number of ATA ports associated with this host
6764 * Allocate and initialize basic ATA host resources. LLD calls
6765 * this function to allocate a host, initializes it fully and
6766 * attaches it using ata_host_register().
6768 * @max_ports ports are allocated and host->n_ports is
6769 * initialized to @max_ports. The caller is allowed to decrease
6770 * host->n_ports before calling ata_host_register(). The unused
6771 * ports will be automatically freed on registration.
6774 * Allocate ATA host on success, NULL on failure.
6777 * Inherited from calling layer (may sleep).
6779 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6781 struct ata_host *host;
6787 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6790 /* alloc a container for our list of ATA ports (buses) */
6791 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6792 /* alloc a container for our list of ATA ports (buses) */
6793 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6797 devres_add(dev, host);
6798 dev_set_drvdata(dev, host);
6800 spin_lock_init(&host->lock);
6802 host->n_ports = max_ports;
6804 /* allocate ports bound to this host */
6805 for (i = 0; i < max_ports; i++) {
6806 struct ata_port *ap;
6808 ap = ata_port_alloc(host);
6813 host->ports[i] = ap;
6816 devres_remove_group(dev, NULL);
6820 devres_release_group(dev, NULL);
6825 * ata_host_alloc_pinfo - alloc host and init with port_info array
6826 * @dev: generic device this host is associated with
6827 * @ppi: array of ATA port_info to initialize host with
6828 * @n_ports: number of ATA ports attached to this host
6830 * Allocate ATA host and initialize with info from @ppi. If NULL
6831 * terminated, @ppi may contain fewer entries than @n_ports. The
6832 * last entry will be used for the remaining ports.
6835 * Allocate ATA host on success, NULL on failure.
6838 * Inherited from calling layer (may sleep).
6840 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6841 const struct ata_port_info * const * ppi,
6844 const struct ata_port_info *pi;
6845 struct ata_host *host;
6848 host = ata_host_alloc(dev, n_ports);
6852 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6853 struct ata_port *ap = host->ports[i];
6858 ap->pio_mask = pi->pio_mask;
6859 ap->mwdma_mask = pi->mwdma_mask;
6860 ap->udma_mask = pi->udma_mask;
6861 ap->flags |= pi->flags;
6862 ap->link.flags |= pi->link_flags;
6863 ap->ops = pi->port_ops;
6865 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6866 host->ops = pi->port_ops;
6867 if (!host->private_data && pi->private_data)
6868 host->private_data = pi->private_data;
6874 static void ata_host_stop(struct device *gendev, void *res)
6876 struct ata_host *host = dev_get_drvdata(gendev);
6879 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6881 for (i = 0; i < host->n_ports; i++) {
6882 struct ata_port *ap = host->ports[i];
6884 if (ap->ops->port_stop)
6885 ap->ops->port_stop(ap);
6888 if (host->ops->host_stop)
6889 host->ops->host_stop(host);
6893 * ata_host_start - start and freeze ports of an ATA host
6894 * @host: ATA host to start ports for
6896 * Start and then freeze ports of @host. Started status is
6897 * recorded in host->flags, so this function can be called
6898 * multiple times. Ports are guaranteed to get started only
6899 * once. If host->ops isn't initialized yet, its set to the
6900 * first non-dummy port ops.
6903 * Inherited from calling layer (may sleep).
6906 * 0 if all ports are started successfully, -errno otherwise.
6908 int ata_host_start(struct ata_host *host)
6911 void *start_dr = NULL;
6914 if (host->flags & ATA_HOST_STARTED)
6917 for (i = 0; i < host->n_ports; i++) {
6918 struct ata_port *ap = host->ports[i];
6920 if (!host->ops && !ata_port_is_dummy(ap))
6921 host->ops = ap->ops;
6923 if (ap->ops->port_stop)
6927 if (host->ops->host_stop)
6931 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6936 for (i = 0; i < host->n_ports; i++) {
6937 struct ata_port *ap = host->ports[i];
6939 if (ap->ops->port_start) {
6940 rc = ap->ops->port_start(ap);
6943 dev_printk(KERN_ERR, host->dev,
6944 "failed to start port %d "
6945 "(errno=%d)\n", i, rc);
6949 ata_eh_freeze_port(ap);
6953 devres_add(host->dev, start_dr);
6954 host->flags |= ATA_HOST_STARTED;
6959 struct ata_port *ap = host->ports[i];
6961 if (ap->ops->port_stop)
6962 ap->ops->port_stop(ap);
6964 devres_free(start_dr);
6969 * ata_sas_host_init - Initialize a host struct
6970 * @host: host to initialize
6971 * @dev: device host is attached to
6972 * @flags: host flags
6976 * PCI/etc. bus probe sem.
6979 /* KILLME - the only user left is ipr */
6980 void ata_host_init(struct ata_host *host, struct device *dev,
6981 unsigned long flags, const struct ata_port_operations *ops)
6983 spin_lock_init(&host->lock);
6985 host->flags = flags;
6990 * ata_host_register - register initialized ATA host
6991 * @host: ATA host to register
6992 * @sht: template for SCSI host
6994 * Register initialized ATA host. @host is allocated using
6995 * ata_host_alloc() and fully initialized by LLD. This function
6996 * starts ports, registers @host with ATA and SCSI layers and
6997 * probe registered devices.
7000 * Inherited from calling layer (may sleep).
7003 * 0 on success, -errno otherwise.
7005 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7009 /* host must have been started */
7010 if (!(host->flags & ATA_HOST_STARTED)) {
7011 dev_printk(KERN_ERR, host->dev,
7012 "BUG: trying to register unstarted host\n");
7017 /* Blow away unused ports. This happens when LLD can't
7018 * determine the exact number of ports to allocate at
7021 for (i = host->n_ports; host->ports[i]; i++)
7022 kfree(host->ports[i]);
7024 /* give ports names and add SCSI hosts */
7025 for (i = 0; i < host->n_ports; i++)
7026 host->ports[i]->print_id = ata_print_id++;
7028 rc = ata_scsi_add_hosts(host, sht);
7032 /* associate with ACPI nodes */
7033 ata_acpi_associate(host);
7035 /* set cable, sata_spd_limit and report */
7036 for (i = 0; i < host->n_ports; i++) {
7037 struct ata_port *ap = host->ports[i];
7038 unsigned long xfer_mask;
7040 /* set SATA cable type if still unset */
7041 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7042 ap->cbl = ATA_CBL_SATA;
7044 /* init sata_spd_limit to the current value */
7045 sata_link_init_spd(&ap->link);
7047 /* print per-port info to dmesg */
7048 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7051 if (!ata_port_is_dummy(ap)) {
7052 ata_port_printk(ap, KERN_INFO,
7053 "%cATA max %s %s\n",
7054 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
7055 ata_mode_string(xfer_mask),
7056 ap->link.eh_info.desc);
7057 ata_ehi_clear_desc(&ap->link.eh_info);
7059 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7062 /* perform each probe synchronously */
7063 DPRINTK("probe begin\n");
7064 for (i = 0; i < host->n_ports; i++) {
7065 struct ata_port *ap = host->ports[i];
7069 if (ap->ops->error_handler) {
7070 struct ata_eh_info *ehi = &ap->link.eh_info;
7071 unsigned long flags;
7075 /* kick EH for boot probing */
7076 spin_lock_irqsave(ap->lock, flags);
7079 (1 << ata_link_max_devices(&ap->link)) - 1;
7080 ehi->action |= ATA_EH_SOFTRESET;
7081 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7083 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
7084 ap->pflags |= ATA_PFLAG_LOADING;
7085 ata_port_schedule_eh(ap);
7087 spin_unlock_irqrestore(ap->lock, flags);
7089 /* wait for EH to finish */
7090 ata_port_wait_eh(ap);
7092 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7093 rc = ata_bus_probe(ap);
7094 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7097 /* FIXME: do something useful here?
7098 * Current libata behavior will
7099 * tear down everything when
7100 * the module is removed
7101 * or the h/w is unplugged.
7107 /* probes are done, now scan each port's disk(s) */
7108 DPRINTK("host probe begin\n");
7109 for (i = 0; i < host->n_ports; i++) {
7110 struct ata_port *ap = host->ports[i];
7112 ata_scsi_scan_host(ap, 1);
7113 ata_lpm_schedule(ap, ap->pm_policy);
7120 * ata_host_activate - start host, request IRQ and register it
7121 * @host: target ATA host
7122 * @irq: IRQ to request
7123 * @irq_handler: irq_handler used when requesting IRQ
7124 * @irq_flags: irq_flags used when requesting IRQ
7125 * @sht: scsi_host_template to use when registering the host
7127 * After allocating an ATA host and initializing it, most libata
7128 * LLDs perform three steps to activate the host - start host,
7129 * request IRQ and register it. This helper takes necessasry
7130 * arguments and performs the three steps in one go.
7132 * An invalid IRQ skips the IRQ registration and expects the host to
7133 * have set polling mode on the port. In this case, @irq_handler
7137 * Inherited from calling layer (may sleep).
7140 * 0 on success, -errno otherwise.
7142 int ata_host_activate(struct ata_host *host, int irq,
7143 irq_handler_t irq_handler, unsigned long irq_flags,
7144 struct scsi_host_template *sht)
7148 rc = ata_host_start(host);
7152 /* Special case for polling mode */
7154 WARN_ON(irq_handler);
7155 return ata_host_register(host, sht);
7158 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7159 dev_driver_string(host->dev), host);
7163 for (i = 0; i < host->n_ports; i++)
7164 ata_port_desc(host->ports[i], "irq %d", irq);
7166 rc = ata_host_register(host, sht);
7167 /* if failed, just free the IRQ and leave ports alone */
7169 devm_free_irq(host->dev, irq, host);
7175 * ata_port_detach - Detach ATA port in prepration of device removal
7176 * @ap: ATA port to be detached
7178 * Detach all ATA devices and the associated SCSI devices of @ap;
7179 * then, remove the associated SCSI host. @ap is guaranteed to
7180 * be quiescent on return from this function.
7183 * Kernel thread context (may sleep).
7185 static void ata_port_detach(struct ata_port *ap)
7187 unsigned long flags;
7188 struct ata_link *link;
7189 struct ata_device *dev;
7191 if (!ap->ops->error_handler)
7194 /* tell EH we're leaving & flush EH */
7195 spin_lock_irqsave(ap->lock, flags);
7196 ap->pflags |= ATA_PFLAG_UNLOADING;
7197 spin_unlock_irqrestore(ap->lock, flags);
7199 ata_port_wait_eh(ap);
7201 /* EH is now guaranteed to see UNLOADING - EH context belongs
7202 * to us. Disable all existing devices.
7204 ata_port_for_each_link(link, ap) {
7205 ata_link_for_each_dev(dev, link)
7206 ata_dev_disable(dev);
7209 /* Final freeze & EH. All in-flight commands are aborted. EH
7210 * will be skipped and retrials will be terminated with bad
7213 spin_lock_irqsave(ap->lock, flags);
7214 ata_port_freeze(ap); /* won't be thawed */
7215 spin_unlock_irqrestore(ap->lock, flags);
7217 ata_port_wait_eh(ap);
7218 cancel_rearming_delayed_work(&ap->hotplug_task);
7221 /* remove the associated SCSI host */
7222 scsi_remove_host(ap->scsi_host);
7226 * ata_host_detach - Detach all ports of an ATA host
7227 * @host: Host to detach
7229 * Detach all ports of @host.
7232 * Kernel thread context (may sleep).
7234 void ata_host_detach(struct ata_host *host)
7238 for (i = 0; i < host->n_ports; i++)
7239 ata_port_detach(host->ports[i]);
7241 /* the host is dead now, dissociate ACPI */
7242 ata_acpi_dissociate(host);
7246 * ata_std_ports - initialize ioaddr with standard port offsets.
7247 * @ioaddr: IO address structure to be initialized
7249 * Utility function which initializes data_addr, error_addr,
7250 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7251 * device_addr, status_addr, and command_addr to standard offsets
7252 * relative to cmd_addr.
7254 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
7257 void ata_std_ports(struct ata_ioports *ioaddr)
7259 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7260 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7261 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7262 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7263 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7264 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7265 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7266 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7267 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7268 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7275 * ata_pci_remove_one - PCI layer callback for device removal
7276 * @pdev: PCI device that was removed
7278 * PCI layer indicates to libata via this hook that hot-unplug or
7279 * module unload event has occurred. Detach all ports. Resource
7280 * release is handled via devres.
7283 * Inherited from PCI layer (may sleep).
7285 void ata_pci_remove_one(struct pci_dev *pdev)
7287 struct device *dev = &pdev->dev;
7288 struct ata_host *host = dev_get_drvdata(dev);
7290 ata_host_detach(host);
7293 /* move to PCI subsystem */
7294 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
7296 unsigned long tmp = 0;
7298 switch (bits->width) {
7301 pci_read_config_byte(pdev, bits->reg, &tmp8);
7307 pci_read_config_word(pdev, bits->reg, &tmp16);
7313 pci_read_config_dword(pdev, bits->reg, &tmp32);
7324 return (tmp == bits->val) ? 1 : 0;
7328 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
7330 pci_save_state(pdev);
7331 pci_disable_device(pdev);
7333 if (mesg.event == PM_EVENT_SUSPEND)
7334 pci_set_power_state(pdev, PCI_D3hot);
7337 int ata_pci_device_do_resume(struct pci_dev *pdev)
7341 pci_set_power_state(pdev, PCI_D0);
7342 pci_restore_state(pdev);
7344 rc = pcim_enable_device(pdev);
7346 dev_printk(KERN_ERR, &pdev->dev,
7347 "failed to enable device after resume (%d)\n", rc);
7351 pci_set_master(pdev);
7355 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
7357 struct ata_host *host = dev_get_drvdata(&pdev->dev);
7360 rc = ata_host_suspend(host, mesg);
7364 ata_pci_device_do_suspend(pdev, mesg);
7369 int ata_pci_device_resume(struct pci_dev *pdev)
7371 struct ata_host *host = dev_get_drvdata(&pdev->dev);
7374 rc = ata_pci_device_do_resume(pdev);
7376 ata_host_resume(host);
7379 #endif /* CONFIG_PM */
7381 #endif /* CONFIG_PCI */
7384 static int __init ata_init(void)
7386 ata_probe_timeout *= HZ;
7387 ata_wq = create_workqueue("ata");
7391 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7393 destroy_workqueue(ata_wq);
7397 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7401 static void __exit ata_exit(void)
7403 destroy_workqueue(ata_wq);
7404 destroy_workqueue(ata_aux_wq);
7407 subsys_initcall(ata_init);
7408 module_exit(ata_exit);
7410 static unsigned long ratelimit_time;
7411 static DEFINE_SPINLOCK(ata_ratelimit_lock);
7413 int ata_ratelimit(void)
7416 unsigned long flags;
7418 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7420 if (time_after(jiffies, ratelimit_time)) {
7422 ratelimit_time = jiffies + (HZ/5);
7426 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7432 * ata_wait_register - wait until register value changes
7433 * @reg: IO-mapped register
7434 * @mask: Mask to apply to read register value
7435 * @val: Wait condition
7436 * @interval_msec: polling interval in milliseconds
7437 * @timeout_msec: timeout in milliseconds
7439 * Waiting for some bits of register to change is a common
7440 * operation for ATA controllers. This function reads 32bit LE
7441 * IO-mapped register @reg and tests for the following condition.
7443 * (*@reg & mask) != val
7445 * If the condition is met, it returns; otherwise, the process is
7446 * repeated after @interval_msec until timeout.
7449 * Kernel thread context (may sleep)
7452 * The final register value.
7454 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7455 unsigned long interval_msec,
7456 unsigned long timeout_msec)
7458 unsigned long timeout;
7461 tmp = ioread32(reg);
7463 /* Calculate timeout _after_ the first read to make sure
7464 * preceding writes reach the controller before starting to
7465 * eat away the timeout.
7467 timeout = jiffies + (timeout_msec * HZ) / 1000;
7469 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7470 msleep(interval_msec);
7471 tmp = ioread32(reg);
7480 static void ata_dummy_noret(struct ata_port *ap) { }
7481 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7482 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7484 static u8 ata_dummy_check_status(struct ata_port *ap)
7489 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7491 return AC_ERR_SYSTEM;
7494 const struct ata_port_operations ata_dummy_port_ops = {
7495 .check_status = ata_dummy_check_status,
7496 .check_altstatus = ata_dummy_check_status,
7497 .dev_select = ata_noop_dev_select,
7498 .qc_prep = ata_noop_qc_prep,
7499 .qc_issue = ata_dummy_qc_issue,
7500 .freeze = ata_dummy_noret,
7501 .thaw = ata_dummy_noret,
7502 .error_handler = ata_dummy_noret,
7503 .post_internal_cmd = ata_dummy_qc_noret,
7504 .irq_clear = ata_dummy_noret,
7505 .port_start = ata_dummy_ret0,
7506 .port_stop = ata_dummy_noret,
7509 const struct ata_port_info ata_dummy_port_info = {
7510 .port_ops = &ata_dummy_port_ops,
7514 * libata is essentially a library of internal helper functions for
7515 * low-level ATA host controller drivers. As such, the API/ABI is
7516 * likely to change as new drivers are added and updated.
7517 * Do not depend on ABI/API stability.
7519 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7520 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7521 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7522 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7523 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7524 EXPORT_SYMBOL_GPL(ata_std_bios_param);
7525 EXPORT_SYMBOL_GPL(ata_std_ports);
7526 EXPORT_SYMBOL_GPL(ata_host_init);
7527 EXPORT_SYMBOL_GPL(ata_host_alloc);
7528 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7529 EXPORT_SYMBOL_GPL(ata_host_start);
7530 EXPORT_SYMBOL_GPL(ata_host_register);
7531 EXPORT_SYMBOL_GPL(ata_host_activate);
7532 EXPORT_SYMBOL_GPL(ata_host_detach);
7533 EXPORT_SYMBOL_GPL(ata_sg_init);
7534 EXPORT_SYMBOL_GPL(ata_sg_init_one);
7535 EXPORT_SYMBOL_GPL(ata_hsm_move);
7536 EXPORT_SYMBOL_GPL(ata_qc_complete);
7537 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7538 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
7539 EXPORT_SYMBOL_GPL(ata_tf_load);
7540 EXPORT_SYMBOL_GPL(ata_tf_read);
7541 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7542 EXPORT_SYMBOL_GPL(ata_std_dev_select);
7543 EXPORT_SYMBOL_GPL(sata_print_link_status);
7544 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7545 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7546 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7547 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7548 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7549 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7550 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7551 EXPORT_SYMBOL_GPL(ata_mode_string);
7552 EXPORT_SYMBOL_GPL(ata_id_xfermask);
7553 EXPORT_SYMBOL_GPL(ata_check_status);
7554 EXPORT_SYMBOL_GPL(ata_altstatus);
7555 EXPORT_SYMBOL_GPL(ata_exec_command);
7556 EXPORT_SYMBOL_GPL(ata_port_start);
7557 EXPORT_SYMBOL_GPL(ata_sff_port_start);
7558 EXPORT_SYMBOL_GPL(ata_interrupt);
7559 EXPORT_SYMBOL_GPL(ata_do_set_mode);
7560 EXPORT_SYMBOL_GPL(ata_data_xfer);
7561 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
7562 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7563 EXPORT_SYMBOL_GPL(ata_qc_prep);
7564 EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
7565 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7566 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7567 EXPORT_SYMBOL_GPL(ata_bmdma_start);
7568 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7569 EXPORT_SYMBOL_GPL(ata_bmdma_status);
7570 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
7571 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7572 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7573 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7574 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7575 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
7576 EXPORT_SYMBOL_GPL(ata_port_probe);
7577 EXPORT_SYMBOL_GPL(ata_dev_disable);
7578 EXPORT_SYMBOL_GPL(sata_set_spd);
7579 EXPORT_SYMBOL_GPL(sata_link_debounce);
7580 EXPORT_SYMBOL_GPL(sata_link_resume);
7581 EXPORT_SYMBOL_GPL(ata_bus_reset);
7582 EXPORT_SYMBOL_GPL(ata_std_prereset);
7583 EXPORT_SYMBOL_GPL(ata_std_softreset);
7584 EXPORT_SYMBOL_GPL(sata_link_hardreset);
7585 EXPORT_SYMBOL_GPL(sata_std_hardreset);
7586 EXPORT_SYMBOL_GPL(ata_std_postreset);
7587 EXPORT_SYMBOL_GPL(ata_dev_classify);
7588 EXPORT_SYMBOL_GPL(ata_dev_pair);
7589 EXPORT_SYMBOL_GPL(ata_port_disable);
7590 EXPORT_SYMBOL_GPL(ata_ratelimit);
7591 EXPORT_SYMBOL_GPL(ata_wait_register);
7592 EXPORT_SYMBOL_GPL(ata_busy_sleep);
7593 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7594 EXPORT_SYMBOL_GPL(ata_wait_ready);
7595 EXPORT_SYMBOL_GPL(ata_port_queue_task);
7596 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7597 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7598 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7599 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7600 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7601 EXPORT_SYMBOL_GPL(ata_host_intr);
7602 EXPORT_SYMBOL_GPL(sata_scr_valid);
7603 EXPORT_SYMBOL_GPL(sata_scr_read);
7604 EXPORT_SYMBOL_GPL(sata_scr_write);
7605 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7606 EXPORT_SYMBOL_GPL(ata_link_online);
7607 EXPORT_SYMBOL_GPL(ata_link_offline);
7609 EXPORT_SYMBOL_GPL(ata_host_suspend);
7610 EXPORT_SYMBOL_GPL(ata_host_resume);
7611 #endif /* CONFIG_PM */
7612 EXPORT_SYMBOL_GPL(ata_id_string);
7613 EXPORT_SYMBOL_GPL(ata_id_c_string);
7614 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7616 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7617 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7618 EXPORT_SYMBOL_GPL(ata_timing_compute);
7619 EXPORT_SYMBOL_GPL(ata_timing_merge);
7622 EXPORT_SYMBOL_GPL(pci_test_config_bits);
7623 EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
7624 EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7625 EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
7626 EXPORT_SYMBOL_GPL(ata_pci_init_one);
7627 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7629 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7630 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7631 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7632 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7633 #endif /* CONFIG_PM */
7634 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7635 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7636 #endif /* CONFIG_PCI */
7638 EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
7639 EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7640 EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7641 EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7642 EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7644 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7645 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7646 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7647 EXPORT_SYMBOL_GPL(ata_port_desc);
7649 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7650 #endif /* CONFIG_PCI */
7651 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7652 EXPORT_SYMBOL_GPL(ata_link_abort);
7653 EXPORT_SYMBOL_GPL(ata_port_abort);
7654 EXPORT_SYMBOL_GPL(ata_port_freeze);
7655 EXPORT_SYMBOL_GPL(sata_async_notification);
7656 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7657 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7658 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7659 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7660 EXPORT_SYMBOL_GPL(ata_do_eh);
7661 EXPORT_SYMBOL_GPL(ata_irq_on);
7662 EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7664 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7665 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7666 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7667 EXPORT_SYMBOL_GPL(ata_cable_sata);