2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
62 /* debounce timing parameters in msecs { interval, duration, timeout } */
63 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
64 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
65 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
67 static unsigned int ata_dev_init_params(struct ata_device *dev,
68 u16 heads, u16 sectors);
69 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
70 static void ata_dev_xfermask(struct ata_device *dev);
72 static unsigned int ata_unique_id = 1;
73 static struct workqueue_struct *ata_wq;
75 struct workqueue_struct *ata_aux_wq;
77 int atapi_enabled = 1;
78 module_param(atapi_enabled, int, 0444);
79 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
82 module_param(atapi_dmadir, int, 0444);
83 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
86 module_param_named(fua, libata_fua, int, 0444);
87 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
89 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
90 module_param(ata_probe_timeout, int, 0444);
91 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
93 MODULE_AUTHOR("Jeff Garzik");
94 MODULE_DESCRIPTION("Library module for ATA devices");
95 MODULE_LICENSE("GPL");
96 MODULE_VERSION(DRV_VERSION);
100 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
101 * @tf: Taskfile to convert
102 * @fis: Buffer into which data will output
103 * @pmp: Port multiplier port
105 * Converts a standard ATA taskfile to a Serial ATA
106 * FIS structure (Register - Host to Device).
109 * Inherited from caller.
112 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
114 fis[0] = 0x27; /* Register - Host to Device FIS */
115 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
116 bit 7 indicates Command FIS */
117 fis[2] = tf->command;
118 fis[3] = tf->feature;
125 fis[8] = tf->hob_lbal;
126 fis[9] = tf->hob_lbam;
127 fis[10] = tf->hob_lbah;
128 fis[11] = tf->hob_feature;
131 fis[13] = tf->hob_nsect;
142 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
143 * @fis: Buffer from which data will be input
144 * @tf: Taskfile to output
146 * Converts a serial ATA FIS structure to a standard ATA taskfile.
149 * Inherited from caller.
152 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
154 tf->command = fis[2]; /* status */
155 tf->feature = fis[3]; /* error */
162 tf->hob_lbal = fis[8];
163 tf->hob_lbam = fis[9];
164 tf->hob_lbah = fis[10];
167 tf->hob_nsect = fis[13];
170 static const u8 ata_rw_cmds[] = {
174 ATA_CMD_READ_MULTI_EXT,
175 ATA_CMD_WRITE_MULTI_EXT,
179 ATA_CMD_WRITE_MULTI_FUA_EXT,
183 ATA_CMD_PIO_READ_EXT,
184 ATA_CMD_PIO_WRITE_EXT,
197 ATA_CMD_WRITE_FUA_EXT
201 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
202 * @qc: command to examine and configure
204 * Examine the device configuration and tf->flags to calculate
205 * the proper read/write commands and protocol to use.
210 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
212 struct ata_taskfile *tf = &qc->tf;
213 struct ata_device *dev = qc->dev;
216 int index, fua, lba48, write;
218 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
219 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
220 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
222 if (dev->flags & ATA_DFLAG_PIO) {
223 tf->protocol = ATA_PROT_PIO;
224 index = dev->multi_count ? 0 : 8;
225 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
226 /* Unable to use DMA due to host limitation */
227 tf->protocol = ATA_PROT_PIO;
228 index = dev->multi_count ? 0 : 8;
230 tf->protocol = ATA_PROT_DMA;
234 cmd = ata_rw_cmds[index + fua + lba48 + write];
243 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
244 * @pio_mask: pio_mask
245 * @mwdma_mask: mwdma_mask
246 * @udma_mask: udma_mask
248 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
249 * unsigned int xfer_mask.
257 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
258 unsigned int mwdma_mask,
259 unsigned int udma_mask)
261 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
262 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
263 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
267 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
268 * @xfer_mask: xfer_mask to unpack
269 * @pio_mask: resulting pio_mask
270 * @mwdma_mask: resulting mwdma_mask
271 * @udma_mask: resulting udma_mask
273 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
274 * Any NULL distination masks will be ignored.
276 static void ata_unpack_xfermask(unsigned int xfer_mask,
277 unsigned int *pio_mask,
278 unsigned int *mwdma_mask,
279 unsigned int *udma_mask)
282 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
284 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
286 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
289 static const struct ata_xfer_ent {
293 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
294 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
295 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
300 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
301 * @xfer_mask: xfer_mask of interest
303 * Return matching XFER_* value for @xfer_mask. Only the highest
304 * bit of @xfer_mask is considered.
310 * Matching XFER_* value, 0 if no match found.
312 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
314 int highbit = fls(xfer_mask) - 1;
315 const struct ata_xfer_ent *ent;
317 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
318 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
319 return ent->base + highbit - ent->shift;
324 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
325 * @xfer_mode: XFER_* of interest
327 * Return matching xfer_mask for @xfer_mode.
333 * Matching xfer_mask, 0 if no match found.
335 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
337 const struct ata_xfer_ent *ent;
339 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
340 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
341 return 1 << (ent->shift + xfer_mode - ent->base);
346 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
347 * @xfer_mode: XFER_* of interest
349 * Return matching xfer_shift for @xfer_mode.
355 * Matching xfer_shift, -1 if no match found.
357 static int ata_xfer_mode2shift(unsigned int xfer_mode)
359 const struct ata_xfer_ent *ent;
361 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
362 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
368 * ata_mode_string - convert xfer_mask to string
369 * @xfer_mask: mask of bits supported; only highest bit counts.
371 * Determine string which represents the highest speed
372 * (highest bit in @modemask).
378 * Constant C string representing highest speed listed in
379 * @mode_mask, or the constant C string "<n/a>".
381 static const char *ata_mode_string(unsigned int xfer_mask)
383 static const char * const xfer_mode_str[] = {
407 highbit = fls(xfer_mask) - 1;
408 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
409 return xfer_mode_str[highbit];
413 static const char *sata_spd_string(unsigned int spd)
415 static const char * const spd_str[] = {
420 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
422 return spd_str[spd - 1];
425 void ata_dev_disable(struct ata_device *dev)
427 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
428 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
434 * ata_pio_devchk - PATA device presence detection
435 * @ap: ATA channel to examine
436 * @device: Device to examine (starting at zero)
438 * This technique was originally described in
439 * Hale Landis's ATADRVR (www.ata-atapi.com), and
440 * later found its way into the ATA/ATAPI spec.
442 * Write a pattern to the ATA shadow registers,
443 * and if a device is present, it will respond by
444 * correctly storing and echoing back the
445 * ATA shadow register contents.
451 static unsigned int ata_pio_devchk(struct ata_port *ap,
454 struct ata_ioports *ioaddr = &ap->ioaddr;
457 ap->ops->dev_select(ap, device);
459 outb(0x55, ioaddr->nsect_addr);
460 outb(0xaa, ioaddr->lbal_addr);
462 outb(0xaa, ioaddr->nsect_addr);
463 outb(0x55, ioaddr->lbal_addr);
465 outb(0x55, ioaddr->nsect_addr);
466 outb(0xaa, ioaddr->lbal_addr);
468 nsect = inb(ioaddr->nsect_addr);
469 lbal = inb(ioaddr->lbal_addr);
471 if ((nsect == 0x55) && (lbal == 0xaa))
472 return 1; /* we found a device */
474 return 0; /* nothing found */
478 * ata_mmio_devchk - PATA device presence detection
479 * @ap: ATA channel to examine
480 * @device: Device to examine (starting at zero)
482 * This technique was originally described in
483 * Hale Landis's ATADRVR (www.ata-atapi.com), and
484 * later found its way into the ATA/ATAPI spec.
486 * Write a pattern to the ATA shadow registers,
487 * and if a device is present, it will respond by
488 * correctly storing and echoing back the
489 * ATA shadow register contents.
495 static unsigned int ata_mmio_devchk(struct ata_port *ap,
498 struct ata_ioports *ioaddr = &ap->ioaddr;
501 ap->ops->dev_select(ap, device);
503 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
504 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
506 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
507 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
509 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
510 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
512 nsect = readb((void __iomem *) ioaddr->nsect_addr);
513 lbal = readb((void __iomem *) ioaddr->lbal_addr);
515 if ((nsect == 0x55) && (lbal == 0xaa))
516 return 1; /* we found a device */
518 return 0; /* nothing found */
522 * ata_devchk - PATA device presence detection
523 * @ap: ATA channel to examine
524 * @device: Device to examine (starting at zero)
526 * Dispatch ATA device presence detection, depending
527 * on whether we are using PIO or MMIO to talk to the
528 * ATA shadow registers.
534 static unsigned int ata_devchk(struct ata_port *ap,
537 if (ap->flags & ATA_FLAG_MMIO)
538 return ata_mmio_devchk(ap, device);
539 return ata_pio_devchk(ap, device);
543 * ata_dev_classify - determine device type based on ATA-spec signature
544 * @tf: ATA taskfile register set for device to be identified
546 * Determine from taskfile register contents whether a device is
547 * ATA or ATAPI, as per "Signature and persistence" section
548 * of ATA/PI spec (volume 1, sect 5.14).
554 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
555 * the event of failure.
558 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
560 /* Apple's open source Darwin code hints that some devices only
561 * put a proper signature into the LBA mid/high registers,
562 * So, we only check those. It's sufficient for uniqueness.
565 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
566 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
567 DPRINTK("found ATA device by sig\n");
571 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
572 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
573 DPRINTK("found ATAPI device by sig\n");
574 return ATA_DEV_ATAPI;
577 DPRINTK("unknown device\n");
578 return ATA_DEV_UNKNOWN;
582 * ata_dev_try_classify - Parse returned ATA device signature
583 * @ap: ATA channel to examine
584 * @device: Device to examine (starting at zero)
585 * @r_err: Value of error register on completion
587 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
588 * an ATA/ATAPI-defined set of values is placed in the ATA
589 * shadow registers, indicating the results of device detection
592 * Select the ATA device, and read the values from the ATA shadow
593 * registers. Then parse according to the Error register value,
594 * and the spec-defined values examined by ata_dev_classify().
600 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
604 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
606 struct ata_taskfile tf;
610 ap->ops->dev_select(ap, device);
612 memset(&tf, 0, sizeof(tf));
614 ap->ops->tf_read(ap, &tf);
619 /* see if device passed diags: if master then continue and warn later */
620 if (err == 0 && device == 0)
621 /* diagnostic fail : do nothing _YET_ */
622 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
625 else if ((device == 0) && (err == 0x81))
630 /* determine if device is ATA or ATAPI */
631 class = ata_dev_classify(&tf);
633 if (class == ATA_DEV_UNKNOWN)
635 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
641 * ata_id_string - Convert IDENTIFY DEVICE page into string
642 * @id: IDENTIFY DEVICE results we will examine
643 * @s: string into which data is output
644 * @ofs: offset into identify device page
645 * @len: length of string to return. must be an even number.
647 * The strings in the IDENTIFY DEVICE page are broken up into
648 * 16-bit chunks. Run through the string, and output each
649 * 8-bit chunk linearly, regardless of platform.
655 void ata_id_string(const u16 *id, unsigned char *s,
656 unsigned int ofs, unsigned int len)
675 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
676 * @id: IDENTIFY DEVICE results we will examine
677 * @s: string into which data is output
678 * @ofs: offset into identify device page
679 * @len: length of string to return. must be an odd number.
681 * This function is identical to ata_id_string except that it
682 * trims trailing spaces and terminates the resulting string with
683 * null. @len must be actual maximum length (even number) + 1.
688 void ata_id_c_string(const u16 *id, unsigned char *s,
689 unsigned int ofs, unsigned int len)
695 ata_id_string(id, s, ofs, len - 1);
697 p = s + strnlen(s, len - 1);
698 while (p > s && p[-1] == ' ')
703 static u64 ata_id_n_sectors(const u16 *id)
705 if (ata_id_has_lba(id)) {
706 if (ata_id_has_lba48(id))
707 return ata_id_u64(id, 100);
709 return ata_id_u32(id, 60);
711 if (ata_id_current_chs_valid(id))
712 return ata_id_u32(id, 57);
714 return id[1] * id[3] * id[6];
719 * ata_noop_dev_select - Select device 0/1 on ATA bus
720 * @ap: ATA channel to manipulate
721 * @device: ATA device (numbered from zero) to select
723 * This function performs no actual function.
725 * May be used as the dev_select() entry in ata_port_operations.
730 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
736 * ata_std_dev_select - Select device 0/1 on ATA bus
737 * @ap: ATA channel to manipulate
738 * @device: ATA device (numbered from zero) to select
740 * Use the method defined in the ATA specification to
741 * make either device 0, or device 1, active on the
742 * ATA channel. Works with both PIO and MMIO.
744 * May be used as the dev_select() entry in ata_port_operations.
750 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
755 tmp = ATA_DEVICE_OBS;
757 tmp = ATA_DEVICE_OBS | ATA_DEV1;
759 if (ap->flags & ATA_FLAG_MMIO) {
760 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
762 outb(tmp, ap->ioaddr.device_addr);
764 ata_pause(ap); /* needed; also flushes, for mmio */
768 * ata_dev_select - Select device 0/1 on ATA bus
769 * @ap: ATA channel to manipulate
770 * @device: ATA device (numbered from zero) to select
771 * @wait: non-zero to wait for Status register BSY bit to clear
772 * @can_sleep: non-zero if context allows sleeping
774 * Use the method defined in the ATA specification to
775 * make either device 0, or device 1, active on the
778 * This is a high-level version of ata_std_dev_select(),
779 * which additionally provides the services of inserting
780 * the proper pauses and status polling, where needed.
786 void ata_dev_select(struct ata_port *ap, unsigned int device,
787 unsigned int wait, unsigned int can_sleep)
789 if (ata_msg_probe(ap))
790 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
791 "device %u, wait %u\n", ap->id, device, wait);
796 ap->ops->dev_select(ap, device);
799 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
806 * ata_dump_id - IDENTIFY DEVICE info debugging output
807 * @id: IDENTIFY DEVICE page to dump
809 * Dump selected 16-bit words from the given IDENTIFY DEVICE
816 static inline void ata_dump_id(const u16 *id)
818 DPRINTK("49==0x%04x "
828 DPRINTK("80==0x%04x "
838 DPRINTK("88==0x%04x "
845 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
846 * @id: IDENTIFY data to compute xfer mask from
848 * Compute the xfermask for this device. This is not as trivial
849 * as it seems if we must consider early devices correctly.
851 * FIXME: pre IDE drive timing (do we care ?).
859 static unsigned int ata_id_xfermask(const u16 *id)
861 unsigned int pio_mask, mwdma_mask, udma_mask;
863 /* Usual case. Word 53 indicates word 64 is valid */
864 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
865 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
869 /* If word 64 isn't valid then Word 51 high byte holds
870 * the PIO timing number for the maximum. Turn it into
873 u8 mode = id[ATA_ID_OLD_PIO_MODES] & 0xFF;
874 if (mode < 5) /* Valid PIO range */
875 pio_mask = (2 << mode) - 1;
879 /* But wait.. there's more. Design your standards by
880 * committee and you too can get a free iordy field to
881 * process. However its the speeds not the modes that
882 * are supported... Note drivers using the timing API
883 * will get this right anyway
887 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
889 if (ata_id_is_cfa(id)) {
891 * Process compact flash extended modes
893 int pio = id[163] & 0x7;
894 int dma = (id[163] >> 3) & 7;
897 pio_mask |= (1 << 5);
899 pio_mask |= (1 << 6);
901 mwdma_mask |= (1 << 3);
903 mwdma_mask |= (1 << 4);
907 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
908 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
910 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
914 * ata_port_queue_task - Queue port_task
915 * @ap: The ata_port to queue port_task for
916 * @fn: workqueue function to be scheduled
917 * @data: data value to pass to workqueue function
918 * @delay: delay time for workqueue function
920 * Schedule @fn(@data) for execution after @delay jiffies using
921 * port_task. There is one port_task per port and it's the
922 * user(low level driver)'s responsibility to make sure that only
923 * one task is active at any given time.
925 * libata core layer takes care of synchronization between
926 * port_task and EH. ata_port_queue_task() may be ignored for EH
930 * Inherited from caller.
932 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
937 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
940 PREPARE_DELAYED_WORK(&ap->port_task, fn, data);
942 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
944 /* rc == 0 means that another user is using port task */
949 * ata_port_flush_task - Flush port_task
950 * @ap: The ata_port to flush port_task for
952 * After this function completes, port_task is guranteed not to
953 * be running or scheduled.
956 * Kernel thread context (may sleep)
958 void ata_port_flush_task(struct ata_port *ap)
964 spin_lock_irqsave(ap->lock, flags);
965 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
966 spin_unlock_irqrestore(ap->lock, flags);
968 DPRINTK("flush #1\n");
969 flush_workqueue(ata_wq);
972 * At this point, if a task is running, it's guaranteed to see
973 * the FLUSH flag; thus, it will never queue pio tasks again.
976 if (!cancel_delayed_work(&ap->port_task)) {
978 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
980 flush_workqueue(ata_wq);
983 spin_lock_irqsave(ap->lock, flags);
984 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
985 spin_unlock_irqrestore(ap->lock, flags);
988 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
991 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
993 struct completion *waiting = qc->private_data;
999 * ata_exec_internal - execute libata internal command
1000 * @dev: Device to which the command is sent
1001 * @tf: Taskfile registers for the command and the result
1002 * @cdb: CDB for packet command
1003 * @dma_dir: Data tranfer direction of the command
1004 * @buf: Data buffer of the command
1005 * @buflen: Length of data buffer
1007 * Executes libata internal command with timeout. @tf contains
1008 * command on entry and result on return. Timeout and error
1009 * conditions are reported via return value. No recovery action
1010 * is taken after a command times out. It's caller's duty to
1011 * clean up after timeout.
1014 * None. Should be called with kernel context, might sleep.
1017 * Zero on success, AC_ERR_* mask on failure
1019 unsigned ata_exec_internal(struct ata_device *dev,
1020 struct ata_taskfile *tf, const u8 *cdb,
1021 int dma_dir, void *buf, unsigned int buflen)
1023 struct ata_port *ap = dev->ap;
1024 u8 command = tf->command;
1025 struct ata_queued_cmd *qc;
1026 unsigned int tag, preempted_tag;
1027 u32 preempted_sactive, preempted_qc_active;
1028 DECLARE_COMPLETION_ONSTACK(wait);
1029 unsigned long flags;
1030 unsigned int err_mask;
1033 spin_lock_irqsave(ap->lock, flags);
1035 /* no internal command while frozen */
1036 if (ap->pflags & ATA_PFLAG_FROZEN) {
1037 spin_unlock_irqrestore(ap->lock, flags);
1038 return AC_ERR_SYSTEM;
1041 /* initialize internal qc */
1043 /* XXX: Tag 0 is used for drivers with legacy EH as some
1044 * drivers choke if any other tag is given. This breaks
1045 * ata_tag_internal() test for those drivers. Don't use new
1046 * EH stuff without converting to it.
1048 if (ap->ops->error_handler)
1049 tag = ATA_TAG_INTERNAL;
1053 if (test_and_set_bit(tag, &ap->qc_allocated))
1055 qc = __ata_qc_from_tag(ap, tag);
1063 preempted_tag = ap->active_tag;
1064 preempted_sactive = ap->sactive;
1065 preempted_qc_active = ap->qc_active;
1066 ap->active_tag = ATA_TAG_POISON;
1070 /* prepare & issue qc */
1073 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1074 qc->flags |= ATA_QCFLAG_RESULT_TF;
1075 qc->dma_dir = dma_dir;
1076 if (dma_dir != DMA_NONE) {
1077 ata_sg_init_one(qc, buf, buflen);
1078 qc->nsect = buflen / ATA_SECT_SIZE;
1081 qc->private_data = &wait;
1082 qc->complete_fn = ata_qc_complete_internal;
1086 spin_unlock_irqrestore(ap->lock, flags);
1088 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1090 ata_port_flush_task(ap);
1093 spin_lock_irqsave(ap->lock, flags);
1095 /* We're racing with irq here. If we lose, the
1096 * following test prevents us from completing the qc
1097 * twice. If we win, the port is frozen and will be
1098 * cleaned up by ->post_internal_cmd().
1100 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1101 qc->err_mask |= AC_ERR_TIMEOUT;
1103 if (ap->ops->error_handler)
1104 ata_port_freeze(ap);
1106 ata_qc_complete(qc);
1108 if (ata_msg_warn(ap))
1109 ata_dev_printk(dev, KERN_WARNING,
1110 "qc timeout (cmd 0x%x)\n", command);
1113 spin_unlock_irqrestore(ap->lock, flags);
1116 /* do post_internal_cmd */
1117 if (ap->ops->post_internal_cmd)
1118 ap->ops->post_internal_cmd(qc);
1120 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1121 if (ata_msg_warn(ap))
1122 ata_dev_printk(dev, KERN_WARNING,
1123 "zero err_mask for failed "
1124 "internal command, assuming AC_ERR_OTHER\n");
1125 qc->err_mask |= AC_ERR_OTHER;
1129 spin_lock_irqsave(ap->lock, flags);
1131 *tf = qc->result_tf;
1132 err_mask = qc->err_mask;
1135 ap->active_tag = preempted_tag;
1136 ap->sactive = preempted_sactive;
1137 ap->qc_active = preempted_qc_active;
1139 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1140 * Until those drivers are fixed, we detect the condition
1141 * here, fail the command with AC_ERR_SYSTEM and reenable the
1144 * Note that this doesn't change any behavior as internal
1145 * command failure results in disabling the device in the
1146 * higher layer for LLDDs without new reset/EH callbacks.
1148 * Kill the following code as soon as those drivers are fixed.
1150 if (ap->flags & ATA_FLAG_DISABLED) {
1151 err_mask |= AC_ERR_SYSTEM;
1155 spin_unlock_irqrestore(ap->lock, flags);
1161 * ata_do_simple_cmd - execute simple internal command
1162 * @dev: Device to which the command is sent
1163 * @cmd: Opcode to execute
1165 * Execute a 'simple' command, that only consists of the opcode
1166 * 'cmd' itself, without filling any other registers
1169 * Kernel thread context (may sleep).
1172 * Zero on success, AC_ERR_* mask on failure
1174 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1176 struct ata_taskfile tf;
1178 ata_tf_init(dev, &tf);
1181 tf.flags |= ATA_TFLAG_DEVICE;
1182 tf.protocol = ATA_PROT_NODATA;
1184 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1188 * ata_pio_need_iordy - check if iordy needed
1191 * Check if the current speed of the device requires IORDY. Used
1192 * by various controllers for chip configuration.
1195 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1198 int speed = adev->pio_mode - XFER_PIO_0;
1205 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1207 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1208 pio = adev->id[ATA_ID_EIDE_PIO];
1209 /* Is the speed faster than the drive allows non IORDY ? */
1211 /* This is cycle times not frequency - watch the logic! */
1212 if (pio > 240) /* PIO2 is 240nS per cycle */
1221 * ata_dev_read_id - Read ID data from the specified device
1222 * @dev: target device
1223 * @p_class: pointer to class of the target device (may be changed)
1224 * @post_reset: is this read ID post-reset?
1225 * @id: buffer to read IDENTIFY data into
1227 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1228 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1229 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1230 * for pre-ATA4 drives.
1233 * Kernel thread context (may sleep)
1236 * 0 on success, -errno otherwise.
1238 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1239 int post_reset, u16 *id)
1241 struct ata_port *ap = dev->ap;
1242 unsigned int class = *p_class;
1243 struct ata_taskfile tf;
1244 unsigned int err_mask = 0;
1248 if (ata_msg_ctl(ap))
1249 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1250 __FUNCTION__, ap->id, dev->devno);
1252 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1255 ata_tf_init(dev, &tf);
1259 tf.command = ATA_CMD_ID_ATA;
1262 tf.command = ATA_CMD_ID_ATAPI;
1266 reason = "unsupported class";
1270 tf.protocol = ATA_PROT_PIO;
1272 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1273 id, sizeof(id[0]) * ATA_ID_WORDS);
1276 reason = "I/O error";
1280 swap_buf_le16(id, ATA_ID_WORDS);
1284 reason = "device reports illegal type";
1286 if (class == ATA_DEV_ATA) {
1287 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1290 if (ata_id_is_ata(id))
1294 if (post_reset && class == ATA_DEV_ATA) {
1296 * The exact sequence expected by certain pre-ATA4 drives is:
1299 * INITIALIZE DEVICE PARAMETERS
1301 * Some drives were very specific about that exact sequence.
1303 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1304 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1307 reason = "INIT_DEV_PARAMS failed";
1311 /* current CHS translation info (id[53-58]) might be
1312 * changed. reread the identify device info.
1324 if (ata_msg_warn(ap))
1325 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1326 "(%s, err_mask=0x%x)\n", reason, err_mask);
1330 static inline u8 ata_dev_knobble(struct ata_device *dev)
1332 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1335 static void ata_dev_config_ncq(struct ata_device *dev,
1336 char *desc, size_t desc_sz)
1338 struct ata_port *ap = dev->ap;
1339 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1341 if (!ata_id_has_ncq(dev->id)) {
1346 if (ap->flags & ATA_FLAG_NCQ) {
1347 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1348 dev->flags |= ATA_DFLAG_NCQ;
1351 if (hdepth >= ddepth)
1352 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1354 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1357 static void ata_set_port_max_cmd_len(struct ata_port *ap)
1361 if (ap->scsi_host) {
1362 unsigned int len = 0;
1364 for (i = 0; i < ATA_MAX_DEVICES; i++)
1365 len = max(len, ap->device[i].cdb_len);
1367 ap->scsi_host->max_cmd_len = len;
1372 * ata_dev_configure - Configure the specified ATA/ATAPI device
1373 * @dev: Target device to configure
1374 * @print_info: Enable device info printout
1376 * Configure @dev according to @dev->id. Generic and low-level
1377 * driver specific fixups are also applied.
1380 * Kernel thread context (may sleep)
1383 * 0 on success, -errno otherwise
1385 int ata_dev_configure(struct ata_device *dev, int print_info)
1387 struct ata_port *ap = dev->ap;
1388 const u16 *id = dev->id;
1389 unsigned int xfer_mask;
1390 char revbuf[7]; /* XYZ-99\0 */
1393 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1394 ata_dev_printk(dev, KERN_INFO,
1395 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1396 __FUNCTION__, ap->id, dev->devno);
1400 if (ata_msg_probe(ap))
1401 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1402 __FUNCTION__, ap->id, dev->devno);
1404 /* print device capabilities */
1405 if (ata_msg_probe(ap))
1406 ata_dev_printk(dev, KERN_DEBUG,
1407 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1408 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1410 id[49], id[82], id[83], id[84],
1411 id[85], id[86], id[87], id[88]);
1413 /* initialize to-be-configured parameters */
1414 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1415 dev->max_sectors = 0;
1423 * common ATA, ATAPI feature tests
1426 /* find max transfer mode; for printk only */
1427 xfer_mask = ata_id_xfermask(id);
1429 if (ata_msg_probe(ap))
1432 /* ATA-specific feature tests */
1433 if (dev->class == ATA_DEV_ATA) {
1434 if (ata_id_is_cfa(id)) {
1435 if (id[162] & 1) /* CPRM may make this media unusable */
1436 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1437 ap->id, dev->devno);
1438 snprintf(revbuf, 7, "CFA");
1441 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1443 dev->n_sectors = ata_id_n_sectors(id);
1445 if (ata_id_has_lba(id)) {
1446 const char *lba_desc;
1450 dev->flags |= ATA_DFLAG_LBA;
1451 if (ata_id_has_lba48(id)) {
1452 dev->flags |= ATA_DFLAG_LBA48;
1457 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1459 /* print device info to dmesg */
1460 if (ata_msg_drv(ap) && print_info)
1461 ata_dev_printk(dev, KERN_INFO, "%s, "
1462 "max %s, %Lu sectors: %s %s\n",
1464 ata_mode_string(xfer_mask),
1465 (unsigned long long)dev->n_sectors,
1466 lba_desc, ncq_desc);
1470 /* Default translation */
1471 dev->cylinders = id[1];
1473 dev->sectors = id[6];
1475 if (ata_id_current_chs_valid(id)) {
1476 /* Current CHS translation is valid. */
1477 dev->cylinders = id[54];
1478 dev->heads = id[55];
1479 dev->sectors = id[56];
1482 /* print device info to dmesg */
1483 if (ata_msg_drv(ap) && print_info)
1484 ata_dev_printk(dev, KERN_INFO, "%s, "
1485 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1487 ata_mode_string(xfer_mask),
1488 (unsigned long long)dev->n_sectors,
1489 dev->cylinders, dev->heads,
1493 if (dev->id[59] & 0x100) {
1494 dev->multi_count = dev->id[59] & 0xff;
1495 if (ata_msg_drv(ap) && print_info)
1496 ata_dev_printk(dev, KERN_INFO,
1497 "ata%u: dev %u multi count %u\n",
1498 ap->id, dev->devno, dev->multi_count);
1504 /* ATAPI-specific feature tests */
1505 else if (dev->class == ATA_DEV_ATAPI) {
1506 char *cdb_intr_string = "";
1508 rc = atapi_cdb_len(id);
1509 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1510 if (ata_msg_warn(ap))
1511 ata_dev_printk(dev, KERN_WARNING,
1512 "unsupported CDB len\n");
1516 dev->cdb_len = (unsigned int) rc;
1518 if (ata_id_cdb_intr(dev->id)) {
1519 dev->flags |= ATA_DFLAG_CDB_INTR;
1520 cdb_intr_string = ", CDB intr";
1523 /* print device info to dmesg */
1524 if (ata_msg_drv(ap) && print_info)
1525 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1526 ata_mode_string(xfer_mask),
1530 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1531 /* Let the user know. We don't want to disallow opens for
1532 rescue purposes, or in case the vendor is just a blithering
1535 ata_dev_printk(dev, KERN_WARNING,
1536 "Drive reports diagnostics failure. This may indicate a drive\n");
1537 ata_dev_printk(dev, KERN_WARNING,
1538 "fault or invalid emulation. Contact drive vendor for information.\n");
1542 ata_set_port_max_cmd_len(ap);
1544 /* limit bridge transfers to udma5, 200 sectors */
1545 if (ata_dev_knobble(dev)) {
1546 if (ata_msg_drv(ap) && print_info)
1547 ata_dev_printk(dev, KERN_INFO,
1548 "applying bridge limits\n");
1549 dev->udma_mask &= ATA_UDMA5;
1550 dev->max_sectors = ATA_MAX_SECTORS;
1553 if (ap->ops->dev_config)
1554 ap->ops->dev_config(ap, dev);
1556 if (ata_msg_probe(ap))
1557 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1558 __FUNCTION__, ata_chk_status(ap));
1562 if (ata_msg_probe(ap))
1563 ata_dev_printk(dev, KERN_DEBUG,
1564 "%s: EXIT, err\n", __FUNCTION__);
1569 * ata_bus_probe - Reset and probe ATA bus
1572 * Master ATA bus probing function. Initiates a hardware-dependent
1573 * bus reset, then attempts to identify any devices found on
1577 * PCI/etc. bus probe sem.
1580 * Zero on success, negative errno otherwise.
1583 int ata_bus_probe(struct ata_port *ap)
1585 unsigned int classes[ATA_MAX_DEVICES];
1586 int tries[ATA_MAX_DEVICES];
1587 int i, rc, down_xfermask;
1588 struct ata_device *dev;
1592 for (i = 0; i < ATA_MAX_DEVICES; i++)
1593 tries[i] = ATA_PROBE_MAX_TRIES;
1598 /* reset and determine device classes */
1599 ap->ops->phy_reset(ap);
1601 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1602 dev = &ap->device[i];
1604 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1605 dev->class != ATA_DEV_UNKNOWN)
1606 classes[dev->devno] = dev->class;
1608 classes[dev->devno] = ATA_DEV_NONE;
1610 dev->class = ATA_DEV_UNKNOWN;
1615 /* after the reset the device state is PIO 0 and the controller
1616 state is undefined. Record the mode */
1618 for (i = 0; i < ATA_MAX_DEVICES; i++)
1619 ap->device[i].pio_mode = XFER_PIO_0;
1621 /* read IDENTIFY page and configure devices */
1622 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1623 dev = &ap->device[i];
1626 dev->class = classes[i];
1628 if (!ata_dev_enabled(dev))
1631 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1635 rc = ata_dev_configure(dev, 1);
1640 /* configure transfer mode */
1641 rc = ata_set_mode(ap, &dev);
1647 for (i = 0; i < ATA_MAX_DEVICES; i++)
1648 if (ata_dev_enabled(&ap->device[i]))
1651 /* no device present, disable port */
1652 ata_port_disable(ap);
1653 ap->ops->port_disable(ap);
1660 tries[dev->devno] = 0;
1663 sata_down_spd_limit(ap);
1666 tries[dev->devno]--;
1667 if (down_xfermask &&
1668 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1669 tries[dev->devno] = 0;
1672 if (!tries[dev->devno]) {
1673 ata_down_xfermask_limit(dev, 1);
1674 ata_dev_disable(dev);
1681 * ata_port_probe - Mark port as enabled
1682 * @ap: Port for which we indicate enablement
1684 * Modify @ap data structure such that the system
1685 * thinks that the entire port is enabled.
1687 * LOCKING: host lock, or some other form of
1691 void ata_port_probe(struct ata_port *ap)
1693 ap->flags &= ~ATA_FLAG_DISABLED;
1697 * sata_print_link_status - Print SATA link status
1698 * @ap: SATA port to printk link status about
1700 * This function prints link speed and status of a SATA link.
1705 static void sata_print_link_status(struct ata_port *ap)
1707 u32 sstatus, scontrol, tmp;
1709 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1711 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1713 if (ata_port_online(ap)) {
1714 tmp = (sstatus >> 4) & 0xf;
1715 ata_port_printk(ap, KERN_INFO,
1716 "SATA link up %s (SStatus %X SControl %X)\n",
1717 sata_spd_string(tmp), sstatus, scontrol);
1719 ata_port_printk(ap, KERN_INFO,
1720 "SATA link down (SStatus %X SControl %X)\n",
1726 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1727 * @ap: SATA port associated with target SATA PHY.
1729 * This function issues commands to standard SATA Sxxx
1730 * PHY registers, to wake up the phy (and device), and
1731 * clear any reset condition.
1734 * PCI/etc. bus probe sem.
1737 void __sata_phy_reset(struct ata_port *ap)
1740 unsigned long timeout = jiffies + (HZ * 5);
1742 if (ap->flags & ATA_FLAG_SATA_RESET) {
1743 /* issue phy wake/reset */
1744 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1745 /* Couldn't find anything in SATA I/II specs, but
1746 * AHCI-1.1 10.4.2 says at least 1 ms. */
1749 /* phy wake/clear reset */
1750 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1752 /* wait for phy to become ready, if necessary */
1755 sata_scr_read(ap, SCR_STATUS, &sstatus);
1756 if ((sstatus & 0xf) != 1)
1758 } while (time_before(jiffies, timeout));
1760 /* print link status */
1761 sata_print_link_status(ap);
1763 /* TODO: phy layer with polling, timeouts, etc. */
1764 if (!ata_port_offline(ap))
1767 ata_port_disable(ap);
1769 if (ap->flags & ATA_FLAG_DISABLED)
1772 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1773 ata_port_disable(ap);
1777 ap->cbl = ATA_CBL_SATA;
1781 * sata_phy_reset - Reset SATA bus.
1782 * @ap: SATA port associated with target SATA PHY.
1784 * This function resets the SATA bus, and then probes
1785 * the bus for devices.
1788 * PCI/etc. bus probe sem.
1791 void sata_phy_reset(struct ata_port *ap)
1793 __sata_phy_reset(ap);
1794 if (ap->flags & ATA_FLAG_DISABLED)
1800 * ata_dev_pair - return other device on cable
1803 * Obtain the other device on the same cable, or if none is
1804 * present NULL is returned
1807 struct ata_device *ata_dev_pair(struct ata_device *adev)
1809 struct ata_port *ap = adev->ap;
1810 struct ata_device *pair = &ap->device[1 - adev->devno];
1811 if (!ata_dev_enabled(pair))
1817 * ata_port_disable - Disable port.
1818 * @ap: Port to be disabled.
1820 * Modify @ap data structure such that the system
1821 * thinks that the entire port is disabled, and should
1822 * never attempt to probe or communicate with devices
1825 * LOCKING: host lock, or some other form of
1829 void ata_port_disable(struct ata_port *ap)
1831 ap->device[0].class = ATA_DEV_NONE;
1832 ap->device[1].class = ATA_DEV_NONE;
1833 ap->flags |= ATA_FLAG_DISABLED;
1837 * sata_down_spd_limit - adjust SATA spd limit downward
1838 * @ap: Port to adjust SATA spd limit for
1840 * Adjust SATA spd limit of @ap downward. Note that this
1841 * function only adjusts the limit. The change must be applied
1842 * using sata_set_spd().
1845 * Inherited from caller.
1848 * 0 on success, negative errno on failure
1850 int sata_down_spd_limit(struct ata_port *ap)
1852 u32 sstatus, spd, mask;
1855 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1859 mask = ap->sata_spd_limit;
1862 highbit = fls(mask) - 1;
1863 mask &= ~(1 << highbit);
1865 spd = (sstatus >> 4) & 0xf;
1869 mask &= (1 << spd) - 1;
1873 ap->sata_spd_limit = mask;
1875 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1876 sata_spd_string(fls(mask)));
1881 static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1885 if (ap->sata_spd_limit == UINT_MAX)
1888 limit = fls(ap->sata_spd_limit);
1890 spd = (*scontrol >> 4) & 0xf;
1891 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1893 return spd != limit;
1897 * sata_set_spd_needed - is SATA spd configuration needed
1898 * @ap: Port in question
1900 * Test whether the spd limit in SControl matches
1901 * @ap->sata_spd_limit. This function is used to determine
1902 * whether hardreset is necessary to apply SATA spd
1906 * Inherited from caller.
1909 * 1 if SATA spd configuration is needed, 0 otherwise.
1911 int sata_set_spd_needed(struct ata_port *ap)
1915 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1918 return __sata_set_spd_needed(ap, &scontrol);
1922 * sata_set_spd - set SATA spd according to spd limit
1923 * @ap: Port to set SATA spd for
1925 * Set SATA spd of @ap according to sata_spd_limit.
1928 * Inherited from caller.
1931 * 0 if spd doesn't need to be changed, 1 if spd has been
1932 * changed. Negative errno if SCR registers are inaccessible.
1934 int sata_set_spd(struct ata_port *ap)
1939 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1942 if (!__sata_set_spd_needed(ap, &scontrol))
1945 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1952 * This mode timing computation functionality is ported over from
1953 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1956 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1957 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1958 * for UDMA6, which is currently supported only by Maxtor drives.
1960 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
1963 static const struct ata_timing ata_timing[] = {
1965 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1966 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1967 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1968 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1970 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
1971 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
1972 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1973 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1974 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1976 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1978 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1979 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1980 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1982 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1983 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1984 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1986 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
1987 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
1988 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1989 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1991 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1992 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1993 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1995 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2000 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2001 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2003 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2005 q->setup = EZ(t->setup * 1000, T);
2006 q->act8b = EZ(t->act8b * 1000, T);
2007 q->rec8b = EZ(t->rec8b * 1000, T);
2008 q->cyc8b = EZ(t->cyc8b * 1000, T);
2009 q->active = EZ(t->active * 1000, T);
2010 q->recover = EZ(t->recover * 1000, T);
2011 q->cycle = EZ(t->cycle * 1000, T);
2012 q->udma = EZ(t->udma * 1000, UT);
2015 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2016 struct ata_timing *m, unsigned int what)
2018 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2019 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2020 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2021 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2022 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2023 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2024 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2025 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2028 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2030 const struct ata_timing *t;
2032 for (t = ata_timing; t->mode != speed; t++)
2033 if (t->mode == 0xFF)
2038 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2039 struct ata_timing *t, int T, int UT)
2041 const struct ata_timing *s;
2042 struct ata_timing p;
2048 if (!(s = ata_timing_find_mode(speed)))
2051 memcpy(t, s, sizeof(*s));
2054 * If the drive is an EIDE drive, it can tell us it needs extended
2055 * PIO/MW_DMA cycle timing.
2058 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2059 memset(&p, 0, sizeof(p));
2060 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2061 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2062 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2063 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2064 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2066 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2070 * Convert the timing to bus clock counts.
2073 ata_timing_quantize(t, t, T, UT);
2076 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2077 * S.M.A.R.T * and some other commands. We have to ensure that the
2078 * DMA cycle timing is slower/equal than the fastest PIO timing.
2081 if (speed > XFER_PIO_4) {
2082 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2083 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2087 * Lengthen active & recovery time so that cycle time is correct.
2090 if (t->act8b + t->rec8b < t->cyc8b) {
2091 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2092 t->rec8b = t->cyc8b - t->act8b;
2095 if (t->active + t->recover < t->cycle) {
2096 t->active += (t->cycle - (t->active + t->recover)) / 2;
2097 t->recover = t->cycle - t->active;
2104 * ata_down_xfermask_limit - adjust dev xfer masks downward
2105 * @dev: Device to adjust xfer masks
2106 * @force_pio0: Force PIO0
2108 * Adjust xfer masks of @dev downward. Note that this function
2109 * does not apply the change. Invoking ata_set_mode() afterwards
2110 * will apply the limit.
2113 * Inherited from caller.
2116 * 0 on success, negative errno on failure
2118 int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2120 unsigned long xfer_mask;
2123 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2128 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2129 if (xfer_mask & ATA_MASK_UDMA)
2130 xfer_mask &= ~ATA_MASK_MWDMA;
2132 highbit = fls(xfer_mask) - 1;
2133 xfer_mask &= ~(1 << highbit);
2135 xfer_mask &= 1 << ATA_SHIFT_PIO;
2139 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2142 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2143 ata_mode_string(xfer_mask));
2151 static int ata_dev_set_mode(struct ata_device *dev)
2153 unsigned int err_mask;
2156 dev->flags &= ~ATA_DFLAG_PIO;
2157 if (dev->xfer_shift == ATA_SHIFT_PIO)
2158 dev->flags |= ATA_DFLAG_PIO;
2160 err_mask = ata_dev_set_xfermode(dev);
2162 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2163 "(err_mask=0x%x)\n", err_mask);
2167 rc = ata_dev_revalidate(dev, 0);
2171 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2172 dev->xfer_shift, (int)dev->xfer_mode);
2174 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2175 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2180 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2181 * @ap: port on which timings will be programmed
2182 * @r_failed_dev: out paramter for failed device
2184 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2185 * ata_set_mode() fails, pointer to the failing device is
2186 * returned in @r_failed_dev.
2189 * PCI/etc. bus probe sem.
2192 * 0 on success, negative errno otherwise
2194 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2196 struct ata_device *dev;
2197 int i, rc = 0, used_dma = 0, found = 0;
2199 /* has private set_mode? */
2200 if (ap->ops->set_mode) {
2201 /* FIXME: make ->set_mode handle no device case and
2202 * return error code and failing device on failure.
2204 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2205 if (ata_dev_ready(&ap->device[i])) {
2206 ap->ops->set_mode(ap);
2213 /* step 1: calculate xfer_mask */
2214 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2215 unsigned int pio_mask, dma_mask;
2217 dev = &ap->device[i];
2219 if (!ata_dev_enabled(dev))
2222 ata_dev_xfermask(dev);
2224 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2225 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2226 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2227 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2236 /* step 2: always set host PIO timings */
2237 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2238 dev = &ap->device[i];
2239 if (!ata_dev_enabled(dev))
2242 if (!dev->pio_mode) {
2243 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2248 dev->xfer_mode = dev->pio_mode;
2249 dev->xfer_shift = ATA_SHIFT_PIO;
2250 if (ap->ops->set_piomode)
2251 ap->ops->set_piomode(ap, dev);
2254 /* step 3: set host DMA timings */
2255 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2256 dev = &ap->device[i];
2258 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2261 dev->xfer_mode = dev->dma_mode;
2262 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2263 if (ap->ops->set_dmamode)
2264 ap->ops->set_dmamode(ap, dev);
2267 /* step 4: update devices' xfer mode */
2268 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2269 dev = &ap->device[i];
2271 /* don't udpate suspended devices' xfer mode */
2272 if (!ata_dev_ready(dev))
2275 rc = ata_dev_set_mode(dev);
2280 /* Record simplex status. If we selected DMA then the other
2281 * host channels are not permitted to do so.
2283 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2284 ap->host->simplex_claimed = 1;
2286 /* step5: chip specific finalisation */
2287 if (ap->ops->post_set_mode)
2288 ap->ops->post_set_mode(ap);
2292 *r_failed_dev = dev;
2297 * ata_tf_to_host - issue ATA taskfile to host controller
2298 * @ap: port to which command is being issued
2299 * @tf: ATA taskfile register set
2301 * Issues ATA taskfile register set to ATA host controller,
2302 * with proper synchronization with interrupt handler and
2306 * spin_lock_irqsave(host lock)
2309 static inline void ata_tf_to_host(struct ata_port *ap,
2310 const struct ata_taskfile *tf)
2312 ap->ops->tf_load(ap, tf);
2313 ap->ops->exec_command(ap, tf);
2317 * ata_busy_sleep - sleep until BSY clears, or timeout
2318 * @ap: port containing status register to be polled
2319 * @tmout_pat: impatience timeout
2320 * @tmout: overall timeout
2322 * Sleep until ATA Status register bit BSY clears,
2323 * or a timeout occurs.
2328 unsigned int ata_busy_sleep (struct ata_port *ap,
2329 unsigned long tmout_pat, unsigned long tmout)
2331 unsigned long timer_start, timeout;
2334 status = ata_busy_wait(ap, ATA_BUSY, 300);
2335 timer_start = jiffies;
2336 timeout = timer_start + tmout_pat;
2337 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2339 status = ata_busy_wait(ap, ATA_BUSY, 3);
2342 if (status & ATA_BUSY)
2343 ata_port_printk(ap, KERN_WARNING,
2344 "port is slow to respond, please be patient "
2345 "(Status 0x%x)\n", status);
2347 timeout = timer_start + tmout;
2348 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2350 status = ata_chk_status(ap);
2353 if (status & ATA_BUSY) {
2354 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2355 "(%lu secs, Status 0x%x)\n",
2356 tmout / HZ, status);
2363 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2365 struct ata_ioports *ioaddr = &ap->ioaddr;
2366 unsigned int dev0 = devmask & (1 << 0);
2367 unsigned int dev1 = devmask & (1 << 1);
2368 unsigned long timeout;
2370 /* if device 0 was found in ata_devchk, wait for its
2374 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2376 /* if device 1 was found in ata_devchk, wait for
2377 * register access, then wait for BSY to clear
2379 timeout = jiffies + ATA_TMOUT_BOOT;
2383 ap->ops->dev_select(ap, 1);
2384 if (ap->flags & ATA_FLAG_MMIO) {
2385 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2386 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2388 nsect = inb(ioaddr->nsect_addr);
2389 lbal = inb(ioaddr->lbal_addr);
2391 if ((nsect == 1) && (lbal == 1))
2393 if (time_after(jiffies, timeout)) {
2397 msleep(50); /* give drive a breather */
2400 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2402 /* is all this really necessary? */
2403 ap->ops->dev_select(ap, 0);
2405 ap->ops->dev_select(ap, 1);
2407 ap->ops->dev_select(ap, 0);
2410 static unsigned int ata_bus_softreset(struct ata_port *ap,
2411 unsigned int devmask)
2413 struct ata_ioports *ioaddr = &ap->ioaddr;
2415 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2417 /* software reset. causes dev0 to be selected */
2418 if (ap->flags & ATA_FLAG_MMIO) {
2419 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2420 udelay(20); /* FIXME: flush */
2421 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2422 udelay(20); /* FIXME: flush */
2423 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2425 outb(ap->ctl, ioaddr->ctl_addr);
2427 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2429 outb(ap->ctl, ioaddr->ctl_addr);
2432 /* spec mandates ">= 2ms" before checking status.
2433 * We wait 150ms, because that was the magic delay used for
2434 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2435 * between when the ATA command register is written, and then
2436 * status is checked. Because waiting for "a while" before
2437 * checking status is fine, post SRST, we perform this magic
2438 * delay here as well.
2440 * Old drivers/ide uses the 2mS rule and then waits for ready
2444 /* Before we perform post reset processing we want to see if
2445 * the bus shows 0xFF because the odd clown forgets the D7
2446 * pulldown resistor.
2448 if (ata_check_status(ap) == 0xFF) {
2449 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2450 return AC_ERR_OTHER;
2453 ata_bus_post_reset(ap, devmask);
2459 * ata_bus_reset - reset host port and associated ATA channel
2460 * @ap: port to reset
2462 * This is typically the first time we actually start issuing
2463 * commands to the ATA channel. We wait for BSY to clear, then
2464 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2465 * result. Determine what devices, if any, are on the channel
2466 * by looking at the device 0/1 error register. Look at the signature
2467 * stored in each device's taskfile registers, to determine if
2468 * the device is ATA or ATAPI.
2471 * PCI/etc. bus probe sem.
2472 * Obtains host lock.
2475 * Sets ATA_FLAG_DISABLED if bus reset fails.
2478 void ata_bus_reset(struct ata_port *ap)
2480 struct ata_ioports *ioaddr = &ap->ioaddr;
2481 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2483 unsigned int dev0, dev1 = 0, devmask = 0;
2485 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2487 /* determine if device 0/1 are present */
2488 if (ap->flags & ATA_FLAG_SATA_RESET)
2491 dev0 = ata_devchk(ap, 0);
2493 dev1 = ata_devchk(ap, 1);
2497 devmask |= (1 << 0);
2499 devmask |= (1 << 1);
2501 /* select device 0 again */
2502 ap->ops->dev_select(ap, 0);
2504 /* issue bus reset */
2505 if (ap->flags & ATA_FLAG_SRST)
2506 if (ata_bus_softreset(ap, devmask))
2510 * determine by signature whether we have ATA or ATAPI devices
2512 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2513 if ((slave_possible) && (err != 0x81))
2514 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2516 /* re-enable interrupts */
2517 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2520 /* is double-select really necessary? */
2521 if (ap->device[1].class != ATA_DEV_NONE)
2522 ap->ops->dev_select(ap, 1);
2523 if (ap->device[0].class != ATA_DEV_NONE)
2524 ap->ops->dev_select(ap, 0);
2526 /* if no devices were detected, disable this port */
2527 if ((ap->device[0].class == ATA_DEV_NONE) &&
2528 (ap->device[1].class == ATA_DEV_NONE))
2531 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2532 /* set up device control for ATA_FLAG_SATA_RESET */
2533 if (ap->flags & ATA_FLAG_MMIO)
2534 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2536 outb(ap->ctl, ioaddr->ctl_addr);
2543 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2544 ap->ops->port_disable(ap);
2550 * sata_phy_debounce - debounce SATA phy status
2551 * @ap: ATA port to debounce SATA phy status for
2552 * @params: timing parameters { interval, duratinon, timeout } in msec
2554 * Make sure SStatus of @ap reaches stable state, determined by
2555 * holding the same value where DET is not 1 for @duration polled
2556 * every @interval, before @timeout. Timeout constraints the
2557 * beginning of the stable state. Because, after hot unplugging,
2558 * DET gets stuck at 1 on some controllers, this functions waits
2559 * until timeout then returns 0 if DET is stable at 1.
2562 * Kernel thread context (may sleep)
2565 * 0 on success, -errno on failure.
2567 int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2569 unsigned long interval_msec = params[0];
2570 unsigned long duration = params[1] * HZ / 1000;
2571 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2572 unsigned long last_jiffies;
2576 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2581 last_jiffies = jiffies;
2584 msleep(interval_msec);
2585 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2591 if (cur == 1 && time_before(jiffies, timeout))
2593 if (time_after(jiffies, last_jiffies + duration))
2598 /* unstable, start over */
2600 last_jiffies = jiffies;
2603 if (time_after(jiffies, timeout))
2609 * sata_phy_resume - resume SATA phy
2610 * @ap: ATA port to resume SATA phy for
2611 * @params: timing parameters { interval, duratinon, timeout } in msec
2613 * Resume SATA phy of @ap and debounce it.
2616 * Kernel thread context (may sleep)
2619 * 0 on success, -errno on failure.
2621 int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2626 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2629 scontrol = (scontrol & 0x0f0) | 0x300;
2631 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2634 /* Some PHYs react badly if SStatus is pounded immediately
2635 * after resuming. Delay 200ms before debouncing.
2639 return sata_phy_debounce(ap, params);
2642 static void ata_wait_spinup(struct ata_port *ap)
2644 struct ata_eh_context *ehc = &ap->eh_context;
2645 unsigned long end, secs;
2648 /* first, debounce phy if SATA */
2649 if (ap->cbl == ATA_CBL_SATA) {
2650 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
2652 /* if debounced successfully and offline, no need to wait */
2653 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2657 /* okay, let's give the drive time to spin up */
2658 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2659 secs = ((end - jiffies) + HZ - 1) / HZ;
2661 if (time_after(jiffies, end))
2665 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2666 "(%lu secs)\n", secs);
2668 schedule_timeout_uninterruptible(end - jiffies);
2672 * ata_std_prereset - prepare for reset
2673 * @ap: ATA port to be reset
2675 * @ap is about to be reset. Initialize it.
2678 * Kernel thread context (may sleep)
2681 * 0 on success, -errno otherwise.
2683 int ata_std_prereset(struct ata_port *ap)
2685 struct ata_eh_context *ehc = &ap->eh_context;
2686 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2689 /* handle link resume & hotplug spinup */
2690 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2691 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2692 ehc->i.action |= ATA_EH_HARDRESET;
2694 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2695 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2696 ata_wait_spinup(ap);
2698 /* if we're about to do hardreset, nothing more to do */
2699 if (ehc->i.action & ATA_EH_HARDRESET)
2702 /* if SATA, resume phy */
2703 if (ap->cbl == ATA_CBL_SATA) {
2704 rc = sata_phy_resume(ap, timing);
2705 if (rc && rc != -EOPNOTSUPP) {
2706 /* phy resume failed */
2707 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2708 "link for reset (errno=%d)\n", rc);
2713 /* Wait for !BSY if the controller can wait for the first D2H
2714 * Reg FIS and we don't know that no device is attached.
2716 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2717 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2723 * ata_std_softreset - reset host port via ATA SRST
2724 * @ap: port to reset
2725 * @classes: resulting classes of attached devices
2727 * Reset host port using ATA SRST.
2730 * Kernel thread context (may sleep)
2733 * 0 on success, -errno otherwise.
2735 int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2737 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2738 unsigned int devmask = 0, err_mask;
2743 if (ata_port_offline(ap)) {
2744 classes[0] = ATA_DEV_NONE;
2748 /* determine if device 0/1 are present */
2749 if (ata_devchk(ap, 0))
2750 devmask |= (1 << 0);
2751 if (slave_possible && ata_devchk(ap, 1))
2752 devmask |= (1 << 1);
2754 /* select device 0 again */
2755 ap->ops->dev_select(ap, 0);
2757 /* issue bus reset */
2758 DPRINTK("about to softreset, devmask=%x\n", devmask);
2759 err_mask = ata_bus_softreset(ap, devmask);
2761 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2766 /* determine by signature whether we have ATA or ATAPI devices */
2767 classes[0] = ata_dev_try_classify(ap, 0, &err);
2768 if (slave_possible && err != 0x81)
2769 classes[1] = ata_dev_try_classify(ap, 1, &err);
2772 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2777 * sata_std_hardreset - reset host port via SATA phy reset
2778 * @ap: port to reset
2779 * @class: resulting class of attached device
2781 * SATA phy-reset host port using DET bits of SControl register.
2784 * Kernel thread context (may sleep)
2787 * 0 on success, -errno otherwise.
2789 int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2791 struct ata_eh_context *ehc = &ap->eh_context;
2792 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2798 if (sata_set_spd_needed(ap)) {
2799 /* SATA spec says nothing about how to reconfigure
2800 * spd. To be on the safe side, turn off phy during
2801 * reconfiguration. This works for at least ICH7 AHCI
2804 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2807 scontrol = (scontrol & 0x0f0) | 0x304;
2809 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2815 /* issue phy wake/reset */
2816 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2819 scontrol = (scontrol & 0x0f0) | 0x301;
2821 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2824 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2825 * 10.4.2 says at least 1 ms.
2829 /* bring phy back */
2830 sata_phy_resume(ap, timing);
2832 /* TODO: phy layer with polling, timeouts, etc. */
2833 if (ata_port_offline(ap)) {
2834 *class = ATA_DEV_NONE;
2835 DPRINTK("EXIT, link offline\n");
2839 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2840 ata_port_printk(ap, KERN_ERR,
2841 "COMRESET failed (device not ready)\n");
2845 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2847 *class = ata_dev_try_classify(ap, 0, NULL);
2849 DPRINTK("EXIT, class=%u\n", *class);
2854 * ata_std_postreset - standard postreset callback
2855 * @ap: the target ata_port
2856 * @classes: classes of attached devices
2858 * This function is invoked after a successful reset. Note that
2859 * the device might have been reset more than once using
2860 * different reset methods before postreset is invoked.
2863 * Kernel thread context (may sleep)
2865 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2871 /* print link status */
2872 sata_print_link_status(ap);
2875 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2876 sata_scr_write(ap, SCR_ERROR, serror);
2878 /* re-enable interrupts */
2879 if (!ap->ops->error_handler) {
2880 /* FIXME: hack. create a hook instead */
2881 if (ap->ioaddr.ctl_addr)
2885 /* is double-select really necessary? */
2886 if (classes[0] != ATA_DEV_NONE)
2887 ap->ops->dev_select(ap, 1);
2888 if (classes[1] != ATA_DEV_NONE)
2889 ap->ops->dev_select(ap, 0);
2891 /* bail out if no device is present */
2892 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2893 DPRINTK("EXIT, no device\n");
2897 /* set up device control */
2898 if (ap->ioaddr.ctl_addr) {
2899 if (ap->flags & ATA_FLAG_MMIO)
2900 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2902 outb(ap->ctl, ap->ioaddr.ctl_addr);
2909 * ata_dev_same_device - Determine whether new ID matches configured device
2910 * @dev: device to compare against
2911 * @new_class: class of the new device
2912 * @new_id: IDENTIFY page of the new device
2914 * Compare @new_class and @new_id against @dev and determine
2915 * whether @dev is the device indicated by @new_class and
2922 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2924 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2927 const u16 *old_id = dev->id;
2928 unsigned char model[2][41], serial[2][21];
2931 if (dev->class != new_class) {
2932 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2933 dev->class, new_class);
2937 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2938 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2939 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2940 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2941 new_n_sectors = ata_id_n_sectors(new_id);
2943 if (strcmp(model[0], model[1])) {
2944 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2945 "'%s' != '%s'\n", model[0], model[1]);
2949 if (strcmp(serial[0], serial[1])) {
2950 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2951 "'%s' != '%s'\n", serial[0], serial[1]);
2955 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2956 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2958 (unsigned long long)dev->n_sectors,
2959 (unsigned long long)new_n_sectors);
2967 * ata_dev_revalidate - Revalidate ATA device
2968 * @dev: device to revalidate
2969 * @post_reset: is this revalidation after reset?
2971 * Re-read IDENTIFY page and make sure @dev is still attached to
2975 * Kernel thread context (may sleep)
2978 * 0 on success, negative errno otherwise
2980 int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2982 unsigned int class = dev->class;
2983 u16 *id = (void *)dev->ap->sector_buf;
2986 if (!ata_dev_enabled(dev)) {
2992 rc = ata_dev_read_id(dev, &class, post_reset, id);
2996 /* is the device still there? */
2997 if (!ata_dev_same_device(dev, class, id)) {
3002 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3004 /* configure device according to the new ID */
3005 rc = ata_dev_configure(dev, 0);
3010 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3014 static const char * const ata_dma_blacklist [] = {
3015 "WDC AC11000H", NULL,
3016 "WDC AC22100H", NULL,
3017 "WDC AC32500H", NULL,
3018 "WDC AC33100H", NULL,
3019 "WDC AC31600H", NULL,
3020 "WDC AC32100H", "24.09P07",
3021 "WDC AC23200L", "21.10N21",
3022 "Compaq CRD-8241B", NULL,
3027 "SanDisk SDP3B", NULL,
3028 "SanDisk SDP3B-64", NULL,
3029 "SANYO CD-ROM CRD", NULL,
3030 "HITACHI CDR-8", NULL,
3031 "HITACHI CDR-8335", NULL,
3032 "HITACHI CDR-8435", NULL,
3033 "Toshiba CD-ROM XM-6202B", NULL,
3034 "TOSHIBA CD-ROM XM-1702BC", NULL,
3036 "E-IDE CD-ROM CR-840", NULL,
3037 "CD-ROM Drive/F5A", NULL,
3038 "WPI CDD-820", NULL,
3039 "SAMSUNG CD-ROM SC-148C", NULL,
3040 "SAMSUNG CD-ROM SC", NULL,
3041 "SanDisk SDP3B-64", NULL,
3042 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
3043 "_NEC DV5800A", NULL,
3044 "SAMSUNG CD-ROM SN-124", "N001"
3047 static int ata_strim(char *s, size_t len)
3049 len = strnlen(s, len);
3051 /* ATAPI specifies that empty space is blank-filled; remove blanks */
3052 while ((len > 0) && (s[len - 1] == ' ')) {
3059 static int ata_dma_blacklisted(const struct ata_device *dev)
3061 unsigned char model_num[40];
3062 unsigned char model_rev[16];
3063 unsigned int nlen, rlen;
3066 /* We don't support polling DMA.
3067 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3068 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3070 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3071 (dev->flags & ATA_DFLAG_CDB_INTR))
3074 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
3076 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
3078 nlen = ata_strim(model_num, sizeof(model_num));
3079 rlen = ata_strim(model_rev, sizeof(model_rev));
3081 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
3082 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
3083 if (ata_dma_blacklist[i+1] == NULL)
3085 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
3093 * ata_dev_xfermask - Compute supported xfermask of the given device
3094 * @dev: Device to compute xfermask for
3096 * Compute supported xfermask of @dev and store it in
3097 * dev->*_mask. This function is responsible for applying all
3098 * known limits including host controller limits, device
3104 static void ata_dev_xfermask(struct ata_device *dev)
3106 struct ata_port *ap = dev->ap;
3107 struct ata_host *host = ap->host;
3108 unsigned long xfer_mask;
3110 /* controller modes available */
3111 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3112 ap->mwdma_mask, ap->udma_mask);
3114 /* Apply cable rule here. Don't apply it early because when
3115 * we handle hot plug the cable type can itself change.
3117 if (ap->cbl == ATA_CBL_PATA40)
3118 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3120 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3121 dev->mwdma_mask, dev->udma_mask);
3122 xfer_mask &= ata_id_xfermask(dev->id);
3125 * CFA Advanced TrueIDE timings are not allowed on a shared
3128 if (ata_dev_pair(dev)) {
3129 /* No PIO5 or PIO6 */
3130 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3131 /* No MWDMA3 or MWDMA 4 */
3132 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3135 if (ata_dma_blacklisted(dev)) {
3136 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3137 ata_dev_printk(dev, KERN_WARNING,
3138 "device is on DMA blacklist, disabling DMA\n");
3141 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
3142 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3143 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3144 "other device, disabling DMA\n");
3147 if (ap->ops->mode_filter)
3148 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3150 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3151 &dev->mwdma_mask, &dev->udma_mask);
3155 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3156 * @dev: Device to which command will be sent
3158 * Issue SET FEATURES - XFER MODE command to device @dev
3162 * PCI/etc. bus probe sem.
3165 * 0 on success, AC_ERR_* mask otherwise.
3168 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3170 struct ata_taskfile tf;
3171 unsigned int err_mask;
3173 /* set up set-features taskfile */
3174 DPRINTK("set features - xfer mode\n");
3176 ata_tf_init(dev, &tf);
3177 tf.command = ATA_CMD_SET_FEATURES;
3178 tf.feature = SETFEATURES_XFER;
3179 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3180 tf.protocol = ATA_PROT_NODATA;
3181 tf.nsect = dev->xfer_mode;
3183 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3185 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3190 * ata_dev_init_params - Issue INIT DEV PARAMS command
3191 * @dev: Device to which command will be sent
3192 * @heads: Number of heads (taskfile parameter)
3193 * @sectors: Number of sectors (taskfile parameter)
3196 * Kernel thread context (may sleep)
3199 * 0 on success, AC_ERR_* mask otherwise.
3201 static unsigned int ata_dev_init_params(struct ata_device *dev,
3202 u16 heads, u16 sectors)
3204 struct ata_taskfile tf;
3205 unsigned int err_mask;
3207 /* Number of sectors per track 1-255. Number of heads 1-16 */
3208 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3209 return AC_ERR_INVALID;
3211 /* set up init dev params taskfile */
3212 DPRINTK("init dev params \n");
3214 ata_tf_init(dev, &tf);
3215 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3216 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3217 tf.protocol = ATA_PROT_NODATA;
3219 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3221 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3223 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3228 * ata_sg_clean - Unmap DMA memory associated with command
3229 * @qc: Command containing DMA memory to be released
3231 * Unmap all mapped DMA memory associated with this command.
3234 * spin_lock_irqsave(host lock)
3237 static void ata_sg_clean(struct ata_queued_cmd *qc)
3239 struct ata_port *ap = qc->ap;
3240 struct scatterlist *sg = qc->__sg;
3241 int dir = qc->dma_dir;
3242 void *pad_buf = NULL;
3244 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3245 WARN_ON(sg == NULL);
3247 if (qc->flags & ATA_QCFLAG_SINGLE)
3248 WARN_ON(qc->n_elem > 1);
3250 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3252 /* if we padded the buffer out to 32-bit bound, and data
3253 * xfer direction is from-device, we must copy from the
3254 * pad buffer back into the supplied buffer
3256 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3257 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3259 if (qc->flags & ATA_QCFLAG_SG) {
3261 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3262 /* restore last sg */
3263 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3265 struct scatterlist *psg = &qc->pad_sgent;
3266 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3267 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3268 kunmap_atomic(addr, KM_IRQ0);
3272 dma_unmap_single(ap->dev,
3273 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3276 sg->length += qc->pad_len;
3278 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3279 pad_buf, qc->pad_len);
3282 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3287 * ata_fill_sg - Fill PCI IDE PRD table
3288 * @qc: Metadata associated with taskfile to be transferred
3290 * Fill PCI IDE PRD (scatter-gather) table with segments
3291 * associated with the current disk command.
3294 * spin_lock_irqsave(host lock)
3297 static void ata_fill_sg(struct ata_queued_cmd *qc)
3299 struct ata_port *ap = qc->ap;
3300 struct scatterlist *sg;
3303 WARN_ON(qc->__sg == NULL);
3304 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3307 ata_for_each_sg(sg, qc) {
3311 /* determine if physical DMA addr spans 64K boundary.
3312 * Note h/w doesn't support 64-bit, so we unconditionally
3313 * truncate dma_addr_t to u32.
3315 addr = (u32) sg_dma_address(sg);
3316 sg_len = sg_dma_len(sg);
3319 offset = addr & 0xffff;
3321 if ((offset + sg_len) > 0x10000)
3322 len = 0x10000 - offset;
3324 ap->prd[idx].addr = cpu_to_le32(addr);
3325 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3326 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3335 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3338 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3339 * @qc: Metadata associated with taskfile to check
3341 * Allow low-level driver to filter ATA PACKET commands, returning
3342 * a status indicating whether or not it is OK to use DMA for the
3343 * supplied PACKET command.
3346 * spin_lock_irqsave(host lock)
3348 * RETURNS: 0 when ATAPI DMA can be used
3351 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3353 struct ata_port *ap = qc->ap;
3354 int rc = 0; /* Assume ATAPI DMA is OK by default */
3356 if (ap->ops->check_atapi_dma)
3357 rc = ap->ops->check_atapi_dma(qc);
3362 * ata_qc_prep - Prepare taskfile for submission
3363 * @qc: Metadata associated with taskfile to be prepared
3365 * Prepare ATA taskfile for submission.
3368 * spin_lock_irqsave(host lock)
3370 void ata_qc_prep(struct ata_queued_cmd *qc)
3372 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3378 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3381 * ata_sg_init_one - Associate command with memory buffer
3382 * @qc: Command to be associated
3383 * @buf: Memory buffer
3384 * @buflen: Length of memory buffer, in bytes.
3386 * Initialize the data-related elements of queued_cmd @qc
3387 * to point to a single memory buffer, @buf of byte length @buflen.
3390 * spin_lock_irqsave(host lock)
3393 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3395 struct scatterlist *sg;
3397 qc->flags |= ATA_QCFLAG_SINGLE;
3399 memset(&qc->sgent, 0, sizeof(qc->sgent));
3400 qc->__sg = &qc->sgent;
3402 qc->orig_n_elem = 1;
3404 qc->nbytes = buflen;
3407 sg_init_one(sg, buf, buflen);
3411 * ata_sg_init - Associate command with scatter-gather table.
3412 * @qc: Command to be associated
3413 * @sg: Scatter-gather table.
3414 * @n_elem: Number of elements in s/g table.
3416 * Initialize the data-related elements of queued_cmd @qc
3417 * to point to a scatter-gather table @sg, containing @n_elem
3421 * spin_lock_irqsave(host lock)
3424 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3425 unsigned int n_elem)
3427 qc->flags |= ATA_QCFLAG_SG;
3429 qc->n_elem = n_elem;
3430 qc->orig_n_elem = n_elem;
3434 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3435 * @qc: Command with memory buffer to be mapped.
3437 * DMA-map the memory buffer associated with queued_cmd @qc.
3440 * spin_lock_irqsave(host lock)
3443 * Zero on success, negative on error.
3446 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3448 struct ata_port *ap = qc->ap;
3449 int dir = qc->dma_dir;
3450 struct scatterlist *sg = qc->__sg;
3451 dma_addr_t dma_address;
3454 /* we must lengthen transfers to end on a 32-bit boundary */
3455 qc->pad_len = sg->length & 3;
3457 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3458 struct scatterlist *psg = &qc->pad_sgent;
3460 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3462 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3464 if (qc->tf.flags & ATA_TFLAG_WRITE)
3465 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3468 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3469 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3471 sg->length -= qc->pad_len;
3472 if (sg->length == 0)
3475 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3476 sg->length, qc->pad_len);
3484 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3486 if (dma_mapping_error(dma_address)) {
3488 sg->length += qc->pad_len;
3492 sg_dma_address(sg) = dma_address;
3493 sg_dma_len(sg) = sg->length;
3496 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3497 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3503 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3504 * @qc: Command with scatter-gather table to be mapped.
3506 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3509 * spin_lock_irqsave(host lock)
3512 * Zero on success, negative on error.
3516 static int ata_sg_setup(struct ata_queued_cmd *qc)
3518 struct ata_port *ap = qc->ap;
3519 struct scatterlist *sg = qc->__sg;
3520 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3521 int n_elem, pre_n_elem, dir, trim_sg = 0;
3523 VPRINTK("ENTER, ata%u\n", ap->id);
3524 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3526 /* we must lengthen transfers to end on a 32-bit boundary */
3527 qc->pad_len = lsg->length & 3;
3529 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3530 struct scatterlist *psg = &qc->pad_sgent;
3531 unsigned int offset;
3533 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3535 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3538 * psg->page/offset are used to copy to-be-written
3539 * data in this function or read data in ata_sg_clean.
3541 offset = lsg->offset + lsg->length - qc->pad_len;
3542 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3543 psg->offset = offset_in_page(offset);
3545 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3546 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3547 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3548 kunmap_atomic(addr, KM_IRQ0);
3551 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3552 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3554 lsg->length -= qc->pad_len;
3555 if (lsg->length == 0)
3558 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3559 qc->n_elem - 1, lsg->length, qc->pad_len);
3562 pre_n_elem = qc->n_elem;
3563 if (trim_sg && pre_n_elem)
3572 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3574 /* restore last sg */
3575 lsg->length += qc->pad_len;
3579 DPRINTK("%d sg elements mapped\n", n_elem);
3582 qc->n_elem = n_elem;
3588 * swap_buf_le16 - swap halves of 16-bit words in place
3589 * @buf: Buffer to swap
3590 * @buf_words: Number of 16-bit words in buffer.
3592 * Swap halves of 16-bit words if needed to convert from
3593 * little-endian byte order to native cpu byte order, or
3597 * Inherited from caller.
3599 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3604 for (i = 0; i < buf_words; i++)
3605 buf[i] = le16_to_cpu(buf[i]);
3606 #endif /* __BIG_ENDIAN */
3610 * ata_mmio_data_xfer - Transfer data by MMIO
3611 * @adev: device for this I/O
3613 * @buflen: buffer length
3614 * @write_data: read/write
3616 * Transfer data from/to the device data register by MMIO.
3619 * Inherited from caller.
3622 void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3623 unsigned int buflen, int write_data)
3625 struct ata_port *ap = adev->ap;
3627 unsigned int words = buflen >> 1;
3628 u16 *buf16 = (u16 *) buf;
3629 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3631 /* Transfer multiple of 2 bytes */
3633 for (i = 0; i < words; i++)
3634 writew(le16_to_cpu(buf16[i]), mmio);
3636 for (i = 0; i < words; i++)
3637 buf16[i] = cpu_to_le16(readw(mmio));
3640 /* Transfer trailing 1 byte, if any. */
3641 if (unlikely(buflen & 0x01)) {
3642 u16 align_buf[1] = { 0 };
3643 unsigned char *trailing_buf = buf + buflen - 1;
3646 memcpy(align_buf, trailing_buf, 1);
3647 writew(le16_to_cpu(align_buf[0]), mmio);
3649 align_buf[0] = cpu_to_le16(readw(mmio));
3650 memcpy(trailing_buf, align_buf, 1);
3656 * ata_pio_data_xfer - Transfer data by PIO
3657 * @adev: device to target
3659 * @buflen: buffer length
3660 * @write_data: read/write
3662 * Transfer data from/to the device data register by PIO.
3665 * Inherited from caller.
3668 void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3669 unsigned int buflen, int write_data)
3671 struct ata_port *ap = adev->ap;
3672 unsigned int words = buflen >> 1;
3674 /* Transfer multiple of 2 bytes */
3676 outsw(ap->ioaddr.data_addr, buf, words);
3678 insw(ap->ioaddr.data_addr, buf, words);
3680 /* Transfer trailing 1 byte, if any. */
3681 if (unlikely(buflen & 0x01)) {
3682 u16 align_buf[1] = { 0 };
3683 unsigned char *trailing_buf = buf + buflen - 1;
3686 memcpy(align_buf, trailing_buf, 1);
3687 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3689 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3690 memcpy(trailing_buf, align_buf, 1);
3696 * ata_pio_data_xfer_noirq - Transfer data by PIO
3697 * @adev: device to target
3699 * @buflen: buffer length
3700 * @write_data: read/write
3702 * Transfer data from/to the device data register by PIO. Do the
3703 * transfer with interrupts disabled.
3706 * Inherited from caller.
3709 void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3710 unsigned int buflen, int write_data)
3712 unsigned long flags;
3713 local_irq_save(flags);
3714 ata_pio_data_xfer(adev, buf, buflen, write_data);
3715 local_irq_restore(flags);
3720 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3721 * @qc: Command on going
3723 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3726 * Inherited from caller.
3729 static void ata_pio_sector(struct ata_queued_cmd *qc)
3731 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3732 struct scatterlist *sg = qc->__sg;
3733 struct ata_port *ap = qc->ap;
3735 unsigned int offset;
3738 if (qc->cursect == (qc->nsect - 1))
3739 ap->hsm_task_state = HSM_ST_LAST;
3741 page = sg[qc->cursg].page;
3742 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3744 /* get the current page and offset */
3745 page = nth_page(page, (offset >> PAGE_SHIFT));
3746 offset %= PAGE_SIZE;
3748 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3750 if (PageHighMem(page)) {
3751 unsigned long flags;
3753 /* FIXME: use a bounce buffer */
3754 local_irq_save(flags);
3755 buf = kmap_atomic(page, KM_IRQ0);
3757 /* do the actual data transfer */
3758 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3760 kunmap_atomic(buf, KM_IRQ0);
3761 local_irq_restore(flags);
3763 buf = page_address(page);
3764 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3770 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3777 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3778 * @qc: Command on going
3780 * Transfer one or many ATA_SECT_SIZE of data from/to the
3781 * ATA device for the DRQ request.
3784 * Inherited from caller.
3787 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3789 if (is_multi_taskfile(&qc->tf)) {
3790 /* READ/WRITE MULTIPLE */
3793 WARN_ON(qc->dev->multi_count == 0);
3795 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3803 * atapi_send_cdb - Write CDB bytes to hardware
3804 * @ap: Port to which ATAPI device is attached.
3805 * @qc: Taskfile currently active
3807 * When device has indicated its readiness to accept
3808 * a CDB, this function is called. Send the CDB.
3814 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3817 DPRINTK("send cdb\n");
3818 WARN_ON(qc->dev->cdb_len < 12);
3820 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3821 ata_altstatus(ap); /* flush */
3823 switch (qc->tf.protocol) {
3824 case ATA_PROT_ATAPI:
3825 ap->hsm_task_state = HSM_ST;
3827 case ATA_PROT_ATAPI_NODATA:
3828 ap->hsm_task_state = HSM_ST_LAST;
3830 case ATA_PROT_ATAPI_DMA:
3831 ap->hsm_task_state = HSM_ST_LAST;
3832 /* initiate bmdma */
3833 ap->ops->bmdma_start(qc);
3839 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3840 * @qc: Command on going
3841 * @bytes: number of bytes
3843 * Transfer Transfer data from/to the ATAPI device.
3846 * Inherited from caller.
3850 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3852 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3853 struct scatterlist *sg = qc->__sg;
3854 struct ata_port *ap = qc->ap;
3857 unsigned int offset, count;
3859 if (qc->curbytes + bytes >= qc->nbytes)
3860 ap->hsm_task_state = HSM_ST_LAST;
3863 if (unlikely(qc->cursg >= qc->n_elem)) {
3865 * The end of qc->sg is reached and the device expects
3866 * more data to transfer. In order not to overrun qc->sg
3867 * and fulfill length specified in the byte count register,
3868 * - for read case, discard trailing data from the device
3869 * - for write case, padding zero data to the device
3871 u16 pad_buf[1] = { 0 };
3872 unsigned int words = bytes >> 1;
3875 if (words) /* warning if bytes > 1 */
3876 ata_dev_printk(qc->dev, KERN_WARNING,
3877 "%u bytes trailing data\n", bytes);
3879 for (i = 0; i < words; i++)
3880 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
3882 ap->hsm_task_state = HSM_ST_LAST;
3886 sg = &qc->__sg[qc->cursg];
3889 offset = sg->offset + qc->cursg_ofs;
3891 /* get the current page and offset */
3892 page = nth_page(page, (offset >> PAGE_SHIFT));
3893 offset %= PAGE_SIZE;
3895 /* don't overrun current sg */
3896 count = min(sg->length - qc->cursg_ofs, bytes);
3898 /* don't cross page boundaries */
3899 count = min(count, (unsigned int)PAGE_SIZE - offset);
3901 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3903 if (PageHighMem(page)) {
3904 unsigned long flags;
3906 /* FIXME: use bounce buffer */
3907 local_irq_save(flags);
3908 buf = kmap_atomic(page, KM_IRQ0);
3910 /* do the actual data transfer */
3911 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3913 kunmap_atomic(buf, KM_IRQ0);
3914 local_irq_restore(flags);
3916 buf = page_address(page);
3917 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3921 qc->curbytes += count;
3922 qc->cursg_ofs += count;
3924 if (qc->cursg_ofs == sg->length) {
3934 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3935 * @qc: Command on going
3937 * Transfer Transfer data from/to the ATAPI device.
3940 * Inherited from caller.
3943 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3945 struct ata_port *ap = qc->ap;
3946 struct ata_device *dev = qc->dev;
3947 unsigned int ireason, bc_lo, bc_hi, bytes;
3948 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3950 /* Abuse qc->result_tf for temp storage of intermediate TF
3951 * here to save some kernel stack usage.
3952 * For normal completion, qc->result_tf is not relevant. For
3953 * error, qc->result_tf is later overwritten by ata_qc_complete().
3954 * So, the correctness of qc->result_tf is not affected.
3956 ap->ops->tf_read(ap, &qc->result_tf);
3957 ireason = qc->result_tf.nsect;
3958 bc_lo = qc->result_tf.lbam;
3959 bc_hi = qc->result_tf.lbah;
3960 bytes = (bc_hi << 8) | bc_lo;
3962 /* shall be cleared to zero, indicating xfer of data */
3963 if (ireason & (1 << 0))
3966 /* make sure transfer direction matches expected */
3967 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3968 if (do_write != i_write)
3971 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3973 __atapi_pio_bytes(qc, bytes);
3978 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3979 qc->err_mask |= AC_ERR_HSM;
3980 ap->hsm_task_state = HSM_ST_ERR;
3984 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3985 * @ap: the target ata_port
3989 * 1 if ok in workqueue, 0 otherwise.
3992 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3994 if (qc->tf.flags & ATA_TFLAG_POLLING)
3997 if (ap->hsm_task_state == HSM_ST_FIRST) {
3998 if (qc->tf.protocol == ATA_PROT_PIO &&
3999 (qc->tf.flags & ATA_TFLAG_WRITE))
4002 if (is_atapi_taskfile(&qc->tf) &&
4003 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4011 * ata_hsm_qc_complete - finish a qc running on standard HSM
4012 * @qc: Command to complete
4013 * @in_wq: 1 if called from workqueue, 0 otherwise
4015 * Finish @qc which is running on standard HSM.
4018 * If @in_wq is zero, spin_lock_irqsave(host lock).
4019 * Otherwise, none on entry and grabs host lock.
4021 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4023 struct ata_port *ap = qc->ap;
4024 unsigned long flags;
4026 if (ap->ops->error_handler) {
4028 spin_lock_irqsave(ap->lock, flags);
4030 /* EH might have kicked in while host lock is
4033 qc = ata_qc_from_tag(ap, qc->tag);
4035 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4037 ata_qc_complete(qc);
4039 ata_port_freeze(ap);
4042 spin_unlock_irqrestore(ap->lock, flags);
4044 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4045 ata_qc_complete(qc);
4047 ata_port_freeze(ap);
4051 spin_lock_irqsave(ap->lock, flags);
4053 ata_qc_complete(qc);
4054 spin_unlock_irqrestore(ap->lock, flags);
4056 ata_qc_complete(qc);
4059 ata_altstatus(ap); /* flush */
4063 * ata_hsm_move - move the HSM to the next state.
4064 * @ap: the target ata_port
4066 * @status: current device status
4067 * @in_wq: 1 if called from workqueue, 0 otherwise
4070 * 1 when poll next status needed, 0 otherwise.
4072 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4073 u8 status, int in_wq)
4075 unsigned long flags = 0;
4078 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4080 /* Make sure ata_qc_issue_prot() does not throw things
4081 * like DMA polling into the workqueue. Notice that
4082 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4084 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4087 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4088 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4090 switch (ap->hsm_task_state) {
4092 /* Send first data block or PACKET CDB */
4094 /* If polling, we will stay in the work queue after
4095 * sending the data. Otherwise, interrupt handler
4096 * takes over after sending the data.
4098 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4100 /* check device status */
4101 if (unlikely((status & ATA_DRQ) == 0)) {
4102 /* handle BSY=0, DRQ=0 as error */
4103 if (likely(status & (ATA_ERR | ATA_DF)))
4104 /* device stops HSM for abort/error */
4105 qc->err_mask |= AC_ERR_DEV;
4107 /* HSM violation. Let EH handle this */
4108 qc->err_mask |= AC_ERR_HSM;
4110 ap->hsm_task_state = HSM_ST_ERR;
4114 /* Device should not ask for data transfer (DRQ=1)
4115 * when it finds something wrong.
4116 * We ignore DRQ here and stop the HSM by
4117 * changing hsm_task_state to HSM_ST_ERR and
4118 * let the EH abort the command or reset the device.
4120 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4121 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4123 qc->err_mask |= AC_ERR_HSM;
4124 ap->hsm_task_state = HSM_ST_ERR;
4128 /* Send the CDB (atapi) or the first data block (ata pio out).
4129 * During the state transition, interrupt handler shouldn't
4130 * be invoked before the data transfer is complete and
4131 * hsm_task_state is changed. Hence, the following locking.
4134 spin_lock_irqsave(ap->lock, flags);
4136 if (qc->tf.protocol == ATA_PROT_PIO) {
4137 /* PIO data out protocol.
4138 * send first data block.
4141 /* ata_pio_sectors() might change the state
4142 * to HSM_ST_LAST. so, the state is changed here
4143 * before ata_pio_sectors().
4145 ap->hsm_task_state = HSM_ST;
4146 ata_pio_sectors(qc);
4147 ata_altstatus(ap); /* flush */
4150 atapi_send_cdb(ap, qc);
4153 spin_unlock_irqrestore(ap->lock, flags);
4155 /* if polling, ata_pio_task() handles the rest.
4156 * otherwise, interrupt handler takes over from here.
4161 /* complete command or read/write the data register */
4162 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4163 /* ATAPI PIO protocol */
4164 if ((status & ATA_DRQ) == 0) {
4165 /* No more data to transfer or device error.
4166 * Device error will be tagged in HSM_ST_LAST.
4168 ap->hsm_task_state = HSM_ST_LAST;
4172 /* Device should not ask for data transfer (DRQ=1)
4173 * when it finds something wrong.
4174 * We ignore DRQ here and stop the HSM by
4175 * changing hsm_task_state to HSM_ST_ERR and
4176 * let the EH abort the command or reset the device.
4178 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4179 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4181 qc->err_mask |= AC_ERR_HSM;
4182 ap->hsm_task_state = HSM_ST_ERR;
4186 atapi_pio_bytes(qc);
4188 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4189 /* bad ireason reported by device */
4193 /* ATA PIO protocol */
4194 if (unlikely((status & ATA_DRQ) == 0)) {
4195 /* handle BSY=0, DRQ=0 as error */
4196 if (likely(status & (ATA_ERR | ATA_DF)))
4197 /* device stops HSM for abort/error */
4198 qc->err_mask |= AC_ERR_DEV;
4200 /* HSM violation. Let EH handle this */
4201 qc->err_mask |= AC_ERR_HSM;
4203 ap->hsm_task_state = HSM_ST_ERR;
4207 /* For PIO reads, some devices may ask for
4208 * data transfer (DRQ=1) alone with ERR=1.
4209 * We respect DRQ here and transfer one
4210 * block of junk data before changing the
4211 * hsm_task_state to HSM_ST_ERR.
4213 * For PIO writes, ERR=1 DRQ=1 doesn't make
4214 * sense since the data block has been
4215 * transferred to the device.
4217 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4218 /* data might be corrputed */
4219 qc->err_mask |= AC_ERR_DEV;
4221 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4222 ata_pio_sectors(qc);
4224 status = ata_wait_idle(ap);
4227 if (status & (ATA_BUSY | ATA_DRQ))
4228 qc->err_mask |= AC_ERR_HSM;
4230 /* ata_pio_sectors() might change the
4231 * state to HSM_ST_LAST. so, the state
4232 * is changed after ata_pio_sectors().
4234 ap->hsm_task_state = HSM_ST_ERR;
4238 ata_pio_sectors(qc);
4240 if (ap->hsm_task_state == HSM_ST_LAST &&
4241 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4244 status = ata_wait_idle(ap);
4249 ata_altstatus(ap); /* flush */
4254 if (unlikely(!ata_ok(status))) {
4255 qc->err_mask |= __ac_err_mask(status);
4256 ap->hsm_task_state = HSM_ST_ERR;
4260 /* no more data to transfer */
4261 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4262 ap->id, qc->dev->devno, status);
4264 WARN_ON(qc->err_mask);
4266 ap->hsm_task_state = HSM_ST_IDLE;
4268 /* complete taskfile transaction */
4269 ata_hsm_qc_complete(qc, in_wq);
4275 /* make sure qc->err_mask is available to
4276 * know what's wrong and recover
4278 WARN_ON(qc->err_mask == 0);
4280 ap->hsm_task_state = HSM_ST_IDLE;
4282 /* complete taskfile transaction */
4283 ata_hsm_qc_complete(qc, in_wq);
4295 static void ata_pio_task(void *_data)
4297 struct ata_queued_cmd *qc = _data;
4298 struct ata_port *ap = qc->ap;
4303 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4306 * This is purely heuristic. This is a fast path.
4307 * Sometimes when we enter, BSY will be cleared in
4308 * a chk-status or two. If not, the drive is probably seeking
4309 * or something. Snooze for a couple msecs, then
4310 * chk-status again. If still busy, queue delayed work.
4312 status = ata_busy_wait(ap, ATA_BUSY, 5);
4313 if (status & ATA_BUSY) {
4315 status = ata_busy_wait(ap, ATA_BUSY, 10);
4316 if (status & ATA_BUSY) {
4317 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4323 poll_next = ata_hsm_move(ap, qc, status, 1);
4325 /* another command or interrupt handler
4326 * may be running at this point.
4333 * ata_qc_new - Request an available ATA command, for queueing
4334 * @ap: Port associated with device @dev
4335 * @dev: Device from whom we request an available command structure
4341 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4343 struct ata_queued_cmd *qc = NULL;
4346 /* no command while frozen */
4347 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4350 /* the last tag is reserved for internal command. */
4351 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4352 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4353 qc = __ata_qc_from_tag(ap, i);
4364 * ata_qc_new_init - Request an available ATA command, and initialize it
4365 * @dev: Device from whom we request an available command structure
4371 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4373 struct ata_port *ap = dev->ap;
4374 struct ata_queued_cmd *qc;
4376 qc = ata_qc_new(ap);
4389 * ata_qc_free - free unused ata_queued_cmd
4390 * @qc: Command to complete
4392 * Designed to free unused ata_queued_cmd object
4393 * in case something prevents using it.
4396 * spin_lock_irqsave(host lock)
4398 void ata_qc_free(struct ata_queued_cmd *qc)
4400 struct ata_port *ap = qc->ap;
4403 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4407 if (likely(ata_tag_valid(tag))) {
4408 qc->tag = ATA_TAG_POISON;
4409 clear_bit(tag, &ap->qc_allocated);
4413 void __ata_qc_complete(struct ata_queued_cmd *qc)
4415 struct ata_port *ap = qc->ap;
4417 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4418 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4420 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4423 /* command should be marked inactive atomically with qc completion */
4424 if (qc->tf.protocol == ATA_PROT_NCQ)
4425 ap->sactive &= ~(1 << qc->tag);
4427 ap->active_tag = ATA_TAG_POISON;
4429 /* atapi: mark qc as inactive to prevent the interrupt handler
4430 * from completing the command twice later, before the error handler
4431 * is called. (when rc != 0 and atapi request sense is needed)
4433 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4434 ap->qc_active &= ~(1 << qc->tag);
4436 /* call completion callback */
4437 qc->complete_fn(qc);
4441 * ata_qc_complete - Complete an active ATA command
4442 * @qc: Command to complete
4443 * @err_mask: ATA Status register contents
4445 * Indicate to the mid and upper layers that an ATA
4446 * command has completed, with either an ok or not-ok status.
4449 * spin_lock_irqsave(host lock)
4451 void ata_qc_complete(struct ata_queued_cmd *qc)
4453 struct ata_port *ap = qc->ap;
4455 /* XXX: New EH and old EH use different mechanisms to
4456 * synchronize EH with regular execution path.
4458 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4459 * Normal execution path is responsible for not accessing a
4460 * failed qc. libata core enforces the rule by returning NULL
4461 * from ata_qc_from_tag() for failed qcs.
4463 * Old EH depends on ata_qc_complete() nullifying completion
4464 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4465 * not synchronize with interrupt handler. Only PIO task is
4468 if (ap->ops->error_handler) {
4469 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4471 if (unlikely(qc->err_mask))
4472 qc->flags |= ATA_QCFLAG_FAILED;
4474 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4475 if (!ata_tag_internal(qc->tag)) {
4476 /* always fill result TF for failed qc */
4477 ap->ops->tf_read(ap, &qc->result_tf);
4478 ata_qc_schedule_eh(qc);
4483 /* read result TF if requested */
4484 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4485 ap->ops->tf_read(ap, &qc->result_tf);
4487 __ata_qc_complete(qc);
4489 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4492 /* read result TF if failed or requested */
4493 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4494 ap->ops->tf_read(ap, &qc->result_tf);
4496 __ata_qc_complete(qc);
4501 * ata_qc_complete_multiple - Complete multiple qcs successfully
4502 * @ap: port in question
4503 * @qc_active: new qc_active mask
4504 * @finish_qc: LLDD callback invoked before completing a qc
4506 * Complete in-flight commands. This functions is meant to be
4507 * called from low-level driver's interrupt routine to complete
4508 * requests normally. ap->qc_active and @qc_active is compared
4509 * and commands are completed accordingly.
4512 * spin_lock_irqsave(host lock)
4515 * Number of completed commands on success, -errno otherwise.
4517 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4518 void (*finish_qc)(struct ata_queued_cmd *))
4524 done_mask = ap->qc_active ^ qc_active;
4526 if (unlikely(done_mask & qc_active)) {
4527 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4528 "(%08x->%08x)\n", ap->qc_active, qc_active);
4532 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4533 struct ata_queued_cmd *qc;
4535 if (!(done_mask & (1 << i)))
4538 if ((qc = ata_qc_from_tag(ap, i))) {
4541 ata_qc_complete(qc);
4549 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4551 struct ata_port *ap = qc->ap;
4553 switch (qc->tf.protocol) {
4556 case ATA_PROT_ATAPI_DMA:
4559 case ATA_PROT_ATAPI:
4561 if (ap->flags & ATA_FLAG_PIO_DMA)
4574 * ata_qc_issue - issue taskfile to device
4575 * @qc: command to issue to device
4577 * Prepare an ATA command to submission to device.
4578 * This includes mapping the data into a DMA-able
4579 * area, filling in the S/G table, and finally
4580 * writing the taskfile to hardware, starting the command.
4583 * spin_lock_irqsave(host lock)
4585 void ata_qc_issue(struct ata_queued_cmd *qc)
4587 struct ata_port *ap = qc->ap;
4589 /* Make sure only one non-NCQ command is outstanding. The
4590 * check is skipped for old EH because it reuses active qc to
4591 * request ATAPI sense.
4593 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4595 if (qc->tf.protocol == ATA_PROT_NCQ) {
4596 WARN_ON(ap->sactive & (1 << qc->tag));
4597 ap->sactive |= 1 << qc->tag;
4599 WARN_ON(ap->sactive);
4600 ap->active_tag = qc->tag;
4603 qc->flags |= ATA_QCFLAG_ACTIVE;
4604 ap->qc_active |= 1 << qc->tag;
4606 if (ata_should_dma_map(qc)) {
4607 if (qc->flags & ATA_QCFLAG_SG) {
4608 if (ata_sg_setup(qc))
4610 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4611 if (ata_sg_setup_one(qc))
4615 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4618 ap->ops->qc_prep(qc);
4620 qc->err_mask |= ap->ops->qc_issue(qc);
4621 if (unlikely(qc->err_mask))
4626 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4627 qc->err_mask |= AC_ERR_SYSTEM;
4629 ata_qc_complete(qc);
4633 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4634 * @qc: command to issue to device
4636 * Using various libata functions and hooks, this function
4637 * starts an ATA command. ATA commands are grouped into
4638 * classes called "protocols", and issuing each type of protocol
4639 * is slightly different.
4641 * May be used as the qc_issue() entry in ata_port_operations.
4644 * spin_lock_irqsave(host lock)
4647 * Zero on success, AC_ERR_* mask on failure
4650 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4652 struct ata_port *ap = qc->ap;
4654 /* Use polling pio if the LLD doesn't handle
4655 * interrupt driven pio and atapi CDB interrupt.
4657 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4658 switch (qc->tf.protocol) {
4660 case ATA_PROT_ATAPI:
4661 case ATA_PROT_ATAPI_NODATA:
4662 qc->tf.flags |= ATA_TFLAG_POLLING;
4664 case ATA_PROT_ATAPI_DMA:
4665 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4666 /* see ata_dma_blacklisted() */
4674 /* select the device */
4675 ata_dev_select(ap, qc->dev->devno, 1, 0);
4677 /* start the command */
4678 switch (qc->tf.protocol) {
4679 case ATA_PROT_NODATA:
4680 if (qc->tf.flags & ATA_TFLAG_POLLING)
4681 ata_qc_set_polling(qc);
4683 ata_tf_to_host(ap, &qc->tf);
4684 ap->hsm_task_state = HSM_ST_LAST;
4686 if (qc->tf.flags & ATA_TFLAG_POLLING)
4687 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4692 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4694 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4695 ap->ops->bmdma_setup(qc); /* set up bmdma */
4696 ap->ops->bmdma_start(qc); /* initiate bmdma */
4697 ap->hsm_task_state = HSM_ST_LAST;
4701 if (qc->tf.flags & ATA_TFLAG_POLLING)
4702 ata_qc_set_polling(qc);
4704 ata_tf_to_host(ap, &qc->tf);
4706 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4707 /* PIO data out protocol */
4708 ap->hsm_task_state = HSM_ST_FIRST;
4709 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4711 /* always send first data block using
4712 * the ata_pio_task() codepath.
4715 /* PIO data in protocol */
4716 ap->hsm_task_state = HSM_ST;
4718 if (qc->tf.flags & ATA_TFLAG_POLLING)
4719 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4721 /* if polling, ata_pio_task() handles the rest.
4722 * otherwise, interrupt handler takes over from here.
4728 case ATA_PROT_ATAPI:
4729 case ATA_PROT_ATAPI_NODATA:
4730 if (qc->tf.flags & ATA_TFLAG_POLLING)
4731 ata_qc_set_polling(qc);
4733 ata_tf_to_host(ap, &qc->tf);
4735 ap->hsm_task_state = HSM_ST_FIRST;
4737 /* send cdb by polling if no cdb interrupt */
4738 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4739 (qc->tf.flags & ATA_TFLAG_POLLING))
4740 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4743 case ATA_PROT_ATAPI_DMA:
4744 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4746 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4747 ap->ops->bmdma_setup(qc); /* set up bmdma */
4748 ap->hsm_task_state = HSM_ST_FIRST;
4750 /* send cdb by polling if no cdb interrupt */
4751 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4752 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4757 return AC_ERR_SYSTEM;
4764 * ata_host_intr - Handle host interrupt for given (port, task)
4765 * @ap: Port on which interrupt arrived (possibly...)
4766 * @qc: Taskfile currently active in engine
4768 * Handle host interrupt for given queued command. Currently,
4769 * only DMA interrupts are handled. All other commands are
4770 * handled via polling with interrupts disabled (nIEN bit).
4773 * spin_lock_irqsave(host lock)
4776 * One if interrupt was handled, zero if not (shared irq).
4779 inline unsigned int ata_host_intr (struct ata_port *ap,
4780 struct ata_queued_cmd *qc)
4782 u8 status, host_stat = 0;
4784 VPRINTK("ata%u: protocol %d task_state %d\n",
4785 ap->id, qc->tf.protocol, ap->hsm_task_state);
4787 /* Check whether we are expecting interrupt in this state */
4788 switch (ap->hsm_task_state) {
4790 /* Some pre-ATAPI-4 devices assert INTRQ
4791 * at this state when ready to receive CDB.
4794 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4795 * The flag was turned on only for atapi devices.
4796 * No need to check is_atapi_taskfile(&qc->tf) again.
4798 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4802 if (qc->tf.protocol == ATA_PROT_DMA ||
4803 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4804 /* check status of DMA engine */
4805 host_stat = ap->ops->bmdma_status(ap);
4806 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4808 /* if it's not our irq... */
4809 if (!(host_stat & ATA_DMA_INTR))
4812 /* before we do anything else, clear DMA-Start bit */
4813 ap->ops->bmdma_stop(qc);
4815 if (unlikely(host_stat & ATA_DMA_ERR)) {
4816 /* error when transfering data to/from memory */
4817 qc->err_mask |= AC_ERR_HOST_BUS;
4818 ap->hsm_task_state = HSM_ST_ERR;
4828 /* check altstatus */
4829 status = ata_altstatus(ap);
4830 if (status & ATA_BUSY)
4833 /* check main status, clearing INTRQ */
4834 status = ata_chk_status(ap);
4835 if (unlikely(status & ATA_BUSY))
4838 /* ack bmdma irq events */
4839 ap->ops->irq_clear(ap);
4841 ata_hsm_move(ap, qc, status, 0);
4842 return 1; /* irq handled */
4845 ap->stats.idle_irq++;
4848 if ((ap->stats.idle_irq % 1000) == 0) {
4849 ata_irq_ack(ap, 0); /* debug trap */
4850 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4854 return 0; /* irq not handled */
4858 * ata_interrupt - Default ATA host interrupt handler
4859 * @irq: irq line (unused)
4860 * @dev_instance: pointer to our ata_host information structure
4862 * Default interrupt handler for PCI IDE devices. Calls
4863 * ata_host_intr() for each port that is not disabled.
4866 * Obtains host lock during operation.
4869 * IRQ_NONE or IRQ_HANDLED.
4872 irqreturn_t ata_interrupt (int irq, void *dev_instance)
4874 struct ata_host *host = dev_instance;
4876 unsigned int handled = 0;
4877 unsigned long flags;
4879 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4880 spin_lock_irqsave(&host->lock, flags);
4882 for (i = 0; i < host->n_ports; i++) {
4883 struct ata_port *ap;
4885 ap = host->ports[i];
4887 !(ap->flags & ATA_FLAG_DISABLED)) {
4888 struct ata_queued_cmd *qc;
4890 qc = ata_qc_from_tag(ap, ap->active_tag);
4891 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4892 (qc->flags & ATA_QCFLAG_ACTIVE))
4893 handled |= ata_host_intr(ap, qc);
4897 spin_unlock_irqrestore(&host->lock, flags);
4899 return IRQ_RETVAL(handled);
4903 * sata_scr_valid - test whether SCRs are accessible
4904 * @ap: ATA port to test SCR accessibility for
4906 * Test whether SCRs are accessible for @ap.
4912 * 1 if SCRs are accessible, 0 otherwise.
4914 int sata_scr_valid(struct ata_port *ap)
4916 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4920 * sata_scr_read - read SCR register of the specified port
4921 * @ap: ATA port to read SCR for
4923 * @val: Place to store read value
4925 * Read SCR register @reg of @ap into *@val. This function is
4926 * guaranteed to succeed if the cable type of the port is SATA
4927 * and the port implements ->scr_read.
4933 * 0 on success, negative errno on failure.
4935 int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4937 if (sata_scr_valid(ap)) {
4938 *val = ap->ops->scr_read(ap, reg);
4945 * sata_scr_write - write SCR register of the specified port
4946 * @ap: ATA port to write SCR for
4947 * @reg: SCR to write
4948 * @val: value to write
4950 * Write @val to SCR register @reg of @ap. This function is
4951 * guaranteed to succeed if the cable type of the port is SATA
4952 * and the port implements ->scr_read.
4958 * 0 on success, negative errno on failure.
4960 int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4962 if (sata_scr_valid(ap)) {
4963 ap->ops->scr_write(ap, reg, val);
4970 * sata_scr_write_flush - write SCR register of the specified port and flush
4971 * @ap: ATA port to write SCR for
4972 * @reg: SCR to write
4973 * @val: value to write
4975 * This function is identical to sata_scr_write() except that this
4976 * function performs flush after writing to the register.
4982 * 0 on success, negative errno on failure.
4984 int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4986 if (sata_scr_valid(ap)) {
4987 ap->ops->scr_write(ap, reg, val);
4988 ap->ops->scr_read(ap, reg);
4995 * ata_port_online - test whether the given port is online
4996 * @ap: ATA port to test
4998 * Test whether @ap is online. Note that this function returns 0
4999 * if online status of @ap cannot be obtained, so
5000 * ata_port_online(ap) != !ata_port_offline(ap).
5006 * 1 if the port online status is available and online.
5008 int ata_port_online(struct ata_port *ap)
5012 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5018 * ata_port_offline - test whether the given port is offline
5019 * @ap: ATA port to test
5021 * Test whether @ap is offline. Note that this function returns
5022 * 0 if offline status of @ap cannot be obtained, so
5023 * ata_port_online(ap) != !ata_port_offline(ap).
5029 * 1 if the port offline status is available and offline.
5031 int ata_port_offline(struct ata_port *ap)
5035 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5040 int ata_flush_cache(struct ata_device *dev)
5042 unsigned int err_mask;
5045 if (!ata_try_flush_cache(dev))
5048 if (ata_id_has_flush_ext(dev->id))
5049 cmd = ATA_CMD_FLUSH_EXT;
5051 cmd = ATA_CMD_FLUSH;
5053 err_mask = ata_do_simple_cmd(dev, cmd);
5055 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5062 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5063 unsigned int action, unsigned int ehi_flags,
5066 unsigned long flags;
5069 for (i = 0; i < host->n_ports; i++) {
5070 struct ata_port *ap = host->ports[i];
5072 /* Previous resume operation might still be in
5073 * progress. Wait for PM_PENDING to clear.
5075 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5076 ata_port_wait_eh(ap);
5077 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5080 /* request PM ops to EH */
5081 spin_lock_irqsave(ap->lock, flags);
5086 ap->pm_result = &rc;
5089 ap->pflags |= ATA_PFLAG_PM_PENDING;
5090 ap->eh_info.action |= action;
5091 ap->eh_info.flags |= ehi_flags;
5093 ata_port_schedule_eh(ap);
5095 spin_unlock_irqrestore(ap->lock, flags);
5097 /* wait and check result */
5099 ata_port_wait_eh(ap);
5100 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5110 * ata_host_suspend - suspend host
5111 * @host: host to suspend
5114 * Suspend @host. Actual operation is performed by EH. This
5115 * function requests EH to perform PM operations and waits for EH
5119 * Kernel thread context (may sleep).
5122 * 0 on success, -errno on failure.
5124 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5128 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5132 /* EH is quiescent now. Fail if we have any ready device.
5133 * This happens if hotplug occurs between completion of device
5134 * suspension and here.
5136 for (i = 0; i < host->n_ports; i++) {
5137 struct ata_port *ap = host->ports[i];
5139 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5140 struct ata_device *dev = &ap->device[j];
5142 if (ata_dev_ready(dev)) {
5143 ata_port_printk(ap, KERN_WARNING,
5144 "suspend failed, device %d "
5145 "still active\n", dev->devno);
5152 host->dev->power.power_state = mesg;
5156 ata_host_resume(host);
5161 * ata_host_resume - resume host
5162 * @host: host to resume
5164 * Resume @host. Actual operation is performed by EH. This
5165 * function requests EH to perform PM operations and returns.
5166 * Note that all resume operations are performed parallely.
5169 * Kernel thread context (may sleep).
5171 void ata_host_resume(struct ata_host *host)
5173 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5174 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5175 host->dev->power.power_state = PMSG_ON;
5179 * ata_port_start - Set port up for dma.
5180 * @ap: Port to initialize
5182 * Called just after data structures for each port are
5183 * initialized. Allocates space for PRD table.
5185 * May be used as the port_start() entry in ata_port_operations.
5188 * Inherited from caller.
5191 int ata_port_start (struct ata_port *ap)
5193 struct device *dev = ap->dev;
5196 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5200 rc = ata_pad_alloc(ap, dev);
5202 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5206 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5213 * ata_port_stop - Undo ata_port_start()
5214 * @ap: Port to shut down
5216 * Frees the PRD table.
5218 * May be used as the port_stop() entry in ata_port_operations.
5221 * Inherited from caller.
5224 void ata_port_stop (struct ata_port *ap)
5226 struct device *dev = ap->dev;
5228 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5229 ata_pad_free(ap, dev);
5232 void ata_host_stop (struct ata_host *host)
5234 if (host->mmio_base)
5235 iounmap(host->mmio_base);
5239 * ata_dev_init - Initialize an ata_device structure
5240 * @dev: Device structure to initialize
5242 * Initialize @dev in preparation for probing.
5245 * Inherited from caller.
5247 void ata_dev_init(struct ata_device *dev)
5249 struct ata_port *ap = dev->ap;
5250 unsigned long flags;
5252 /* SATA spd limit is bound to the first device */
5253 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5255 /* High bits of dev->flags are used to record warm plug
5256 * requests which occur asynchronously. Synchronize using
5259 spin_lock_irqsave(ap->lock, flags);
5260 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5261 spin_unlock_irqrestore(ap->lock, flags);
5263 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5264 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5265 dev->pio_mask = UINT_MAX;
5266 dev->mwdma_mask = UINT_MAX;
5267 dev->udma_mask = UINT_MAX;
5271 * ata_port_init - Initialize an ata_port structure
5272 * @ap: Structure to initialize
5273 * @host: Collection of hosts to which @ap belongs
5274 * @ent: Probe information provided by low-level driver
5275 * @port_no: Port number associated with this ata_port
5277 * Initialize a new ata_port structure.
5280 * Inherited from caller.
5282 void ata_port_init(struct ata_port *ap, struct ata_host *host,
5283 const struct ata_probe_ent *ent, unsigned int port_no)
5287 ap->lock = &host->lock;
5288 ap->flags = ATA_FLAG_DISABLED;
5289 ap->id = ata_unique_id++;
5290 ap->ctl = ATA_DEVCTL_OBS;
5293 ap->port_no = port_no;
5294 if (port_no == 1 && ent->pinfo2) {
5295 ap->pio_mask = ent->pinfo2->pio_mask;
5296 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5297 ap->udma_mask = ent->pinfo2->udma_mask;
5298 ap->flags |= ent->pinfo2->flags;
5299 ap->ops = ent->pinfo2->port_ops;
5301 ap->pio_mask = ent->pio_mask;
5302 ap->mwdma_mask = ent->mwdma_mask;
5303 ap->udma_mask = ent->udma_mask;
5304 ap->flags |= ent->port_flags;
5305 ap->ops = ent->port_ops;
5307 ap->hw_sata_spd_limit = UINT_MAX;
5308 ap->active_tag = ATA_TAG_POISON;
5309 ap->last_ctl = 0xFF;
5311 #if defined(ATA_VERBOSE_DEBUG)
5312 /* turn on all debugging levels */
5313 ap->msg_enable = 0x00FF;
5314 #elif defined(ATA_DEBUG)
5315 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5317 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5320 INIT_DELAYED_WORK(&ap->port_task, NULL, NULL);
5321 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
5322 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
5323 INIT_LIST_HEAD(&ap->eh_done_q);
5324 init_waitqueue_head(&ap->eh_wait_q);
5326 /* set cable type */
5327 ap->cbl = ATA_CBL_NONE;
5328 if (ap->flags & ATA_FLAG_SATA)
5329 ap->cbl = ATA_CBL_SATA;
5331 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5332 struct ata_device *dev = &ap->device[i];
5339 ap->stats.unhandled_irq = 1;
5340 ap->stats.idle_irq = 1;
5343 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5347 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5348 * @ap: ATA port to initialize SCSI host for
5349 * @shost: SCSI host associated with @ap
5351 * Initialize SCSI host @shost associated with ATA port @ap.
5354 * Inherited from caller.
5356 static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
5358 ap->scsi_host = shost;
5360 shost->unique_id = ap->id;
5363 shost->max_channel = 1;
5364 shost->max_cmd_len = 12;
5368 * ata_port_add - Attach low-level ATA driver to system
5369 * @ent: Information provided by low-level driver
5370 * @host: Collections of ports to which we add
5371 * @port_no: Port number associated with this host
5373 * Attach low-level ATA driver to system.
5376 * PCI/etc. bus probe sem.
5379 * New ata_port on success, for NULL on error.
5381 static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
5382 struct ata_host *host,
5383 unsigned int port_no)
5385 struct Scsi_Host *shost;
5386 struct ata_port *ap;
5390 if (!ent->port_ops->error_handler &&
5391 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5392 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5397 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5401 shost->transportt = &ata_scsi_transport_template;
5403 ap = ata_shost_to_port(shost);
5405 ata_port_init(ap, host, ent, port_no);
5406 ata_port_init_shost(ap, shost);
5412 * ata_sas_host_init - Initialize a host struct
5413 * @host: host to initialize
5414 * @dev: device host is attached to
5415 * @flags: host flags
5419 * PCI/etc. bus probe sem.
5423 void ata_host_init(struct ata_host *host, struct device *dev,
5424 unsigned long flags, const struct ata_port_operations *ops)
5426 spin_lock_init(&host->lock);
5428 host->flags = flags;
5433 * ata_device_add - Register hardware device with ATA and SCSI layers
5434 * @ent: Probe information describing hardware device to be registered
5436 * This function processes the information provided in the probe
5437 * information struct @ent, allocates the necessary ATA and SCSI
5438 * host information structures, initializes them, and registers
5439 * everything with requisite kernel subsystems.
5441 * This function requests irqs, probes the ATA bus, and probes
5445 * PCI/etc. bus probe sem.
5448 * Number of ports registered. Zero on error (no ports registered).
5450 int ata_device_add(const struct ata_probe_ent *ent)
5453 struct device *dev = ent->dev;
5454 struct ata_host *host;
5459 if (ent->irq == 0) {
5460 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5463 /* alloc a container for our list of ATA ports (buses) */
5464 host = kzalloc(sizeof(struct ata_host) +
5465 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5469 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5470 host->n_ports = ent->n_ports;
5471 host->irq = ent->irq;
5472 host->irq2 = ent->irq2;
5473 host->mmio_base = ent->mmio_base;
5474 host->private_data = ent->private_data;
5476 /* register each port bound to this device */
5477 for (i = 0; i < host->n_ports; i++) {
5478 struct ata_port *ap;
5479 unsigned long xfer_mode_mask;
5480 int irq_line = ent->irq;
5482 ap = ata_port_add(ent, host, i);
5483 host->ports[i] = ap;
5488 if (ent->dummy_port_mask & (1 << i)) {
5489 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5490 ap->ops = &ata_dummy_port_ops;
5495 rc = ap->ops->port_start(ap);
5497 host->ports[i] = NULL;
5498 scsi_host_put(ap->scsi_host);
5502 /* Report the secondary IRQ for second channel legacy */
5503 if (i == 1 && ent->irq2)
5504 irq_line = ent->irq2;
5506 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5507 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5508 (ap->pio_mask << ATA_SHIFT_PIO);
5510 /* print per-port info to dmesg */
5511 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
5512 "ctl 0x%lX bmdma 0x%lX irq %d\n",
5513 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5514 ata_mode_string(xfer_mode_mask),
5515 ap->ioaddr.cmd_addr,
5516 ap->ioaddr.ctl_addr,
5517 ap->ioaddr.bmdma_addr,
5521 host->ops->irq_clear(ap);
5522 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5525 /* obtain irq, that may be shared between channels */
5526 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5529 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5534 /* do we have a second IRQ for the other channel, eg legacy mode */
5536 /* We will get weird core code crashes later if this is true
5538 BUG_ON(ent->irq == ent->irq2);
5540 rc = request_irq(ent->irq2, ent->port_ops->irq_handler, ent->irq_flags,
5543 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5545 goto err_out_free_irq;
5549 /* perform each probe synchronously */
5550 DPRINTK("probe begin\n");
5551 for (i = 0; i < host->n_ports; i++) {
5552 struct ata_port *ap = host->ports[i];
5556 /* init sata_spd_limit to the current value */
5557 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5558 int spd = (scontrol >> 4) & 0xf;
5559 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5561 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5563 rc = scsi_add_host(ap->scsi_host, dev);
5565 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5566 /* FIXME: do something useful here */
5567 /* FIXME: handle unconditional calls to
5568 * scsi_scan_host and ata_host_remove, below,
5573 if (ap->ops->error_handler) {
5574 struct ata_eh_info *ehi = &ap->eh_info;
5575 unsigned long flags;
5579 /* kick EH for boot probing */
5580 spin_lock_irqsave(ap->lock, flags);
5582 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5583 ehi->action |= ATA_EH_SOFTRESET;
5584 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5586 ap->pflags |= ATA_PFLAG_LOADING;
5587 ata_port_schedule_eh(ap);
5589 spin_unlock_irqrestore(ap->lock, flags);
5591 /* wait for EH to finish */
5592 ata_port_wait_eh(ap);
5594 DPRINTK("ata%u: bus probe begin\n", ap->id);
5595 rc = ata_bus_probe(ap);
5596 DPRINTK("ata%u: bus probe end\n", ap->id);
5599 /* FIXME: do something useful here?
5600 * Current libata behavior will
5601 * tear down everything when
5602 * the module is removed
5603 * or the h/w is unplugged.
5609 /* probes are done, now scan each port's disk(s) */
5610 DPRINTK("host probe begin\n");
5611 for (i = 0; i < host->n_ports; i++) {
5612 struct ata_port *ap = host->ports[i];
5614 ata_scsi_scan_host(ap);
5617 dev_set_drvdata(dev, host);
5619 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5620 return ent->n_ports; /* success */
5623 free_irq(ent->irq, host);
5625 for (i = 0; i < host->n_ports; i++) {
5626 struct ata_port *ap = host->ports[i];
5628 ap->ops->port_stop(ap);
5629 scsi_host_put(ap->scsi_host);
5634 VPRINTK("EXIT, returning 0\n");
5639 * ata_port_detach - Detach ATA port in prepration of device removal
5640 * @ap: ATA port to be detached
5642 * Detach all ATA devices and the associated SCSI devices of @ap;
5643 * then, remove the associated SCSI host. @ap is guaranteed to
5644 * be quiescent on return from this function.
5647 * Kernel thread context (may sleep).
5649 void ata_port_detach(struct ata_port *ap)
5651 unsigned long flags;
5654 if (!ap->ops->error_handler)
5657 /* tell EH we're leaving & flush EH */
5658 spin_lock_irqsave(ap->lock, flags);
5659 ap->pflags |= ATA_PFLAG_UNLOADING;
5660 spin_unlock_irqrestore(ap->lock, flags);
5662 ata_port_wait_eh(ap);
5664 /* EH is now guaranteed to see UNLOADING, so no new device
5665 * will be attached. Disable all existing devices.
5667 spin_lock_irqsave(ap->lock, flags);
5669 for (i = 0; i < ATA_MAX_DEVICES; i++)
5670 ata_dev_disable(&ap->device[i]);
5672 spin_unlock_irqrestore(ap->lock, flags);
5674 /* Final freeze & EH. All in-flight commands are aborted. EH
5675 * will be skipped and retrials will be terminated with bad
5678 spin_lock_irqsave(ap->lock, flags);
5679 ata_port_freeze(ap); /* won't be thawed */
5680 spin_unlock_irqrestore(ap->lock, flags);
5682 ata_port_wait_eh(ap);
5684 /* Flush hotplug task. The sequence is similar to
5685 * ata_port_flush_task().
5687 flush_workqueue(ata_aux_wq);
5688 cancel_delayed_work(&ap->hotplug_task);
5689 flush_workqueue(ata_aux_wq);
5692 /* remove the associated SCSI host */
5693 scsi_remove_host(ap->scsi_host);
5697 * ata_host_remove - PCI layer callback for device removal
5698 * @host: ATA host set that was removed
5700 * Unregister all objects associated with this host set. Free those
5704 * Inherited from calling layer (may sleep).
5707 void ata_host_remove(struct ata_host *host)
5711 for (i = 0; i < host->n_ports; i++)
5712 ata_port_detach(host->ports[i]);
5714 free_irq(host->irq, host);
5716 free_irq(host->irq2, host);
5718 for (i = 0; i < host->n_ports; i++) {
5719 struct ata_port *ap = host->ports[i];
5721 ata_scsi_release(ap->scsi_host);
5723 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5724 struct ata_ioports *ioaddr = &ap->ioaddr;
5726 /* FIXME: Add -ac IDE pci mods to remove these special cases */
5727 if (ioaddr->cmd_addr == ATA_PRIMARY_CMD)
5728 release_region(ATA_PRIMARY_CMD, 8);
5729 else if (ioaddr->cmd_addr == ATA_SECONDARY_CMD)
5730 release_region(ATA_SECONDARY_CMD, 8);
5733 scsi_host_put(ap->scsi_host);
5736 if (host->ops->host_stop)
5737 host->ops->host_stop(host);
5743 * ata_scsi_release - SCSI layer callback hook for host unload
5744 * @shost: libata host to be unloaded
5746 * Performs all duties necessary to shut down a libata port...
5747 * Kill port kthread, disable port, and release resources.
5750 * Inherited from SCSI layer.
5756 int ata_scsi_release(struct Scsi_Host *shost)
5758 struct ata_port *ap = ata_shost_to_port(shost);
5762 ap->ops->port_disable(ap);
5763 ap->ops->port_stop(ap);
5769 struct ata_probe_ent *
5770 ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5772 struct ata_probe_ent *probe_ent;
5774 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
5776 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5777 kobject_name(&(dev->kobj)));
5781 INIT_LIST_HEAD(&probe_ent->node);
5782 probe_ent->dev = dev;
5784 probe_ent->sht = port->sht;
5785 probe_ent->port_flags = port->flags;
5786 probe_ent->pio_mask = port->pio_mask;
5787 probe_ent->mwdma_mask = port->mwdma_mask;
5788 probe_ent->udma_mask = port->udma_mask;
5789 probe_ent->port_ops = port->port_ops;
5790 probe_ent->private_data = port->private_data;
5796 * ata_std_ports - initialize ioaddr with standard port offsets.
5797 * @ioaddr: IO address structure to be initialized
5799 * Utility function which initializes data_addr, error_addr,
5800 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5801 * device_addr, status_addr, and command_addr to standard offsets
5802 * relative to cmd_addr.
5804 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5807 void ata_std_ports(struct ata_ioports *ioaddr)
5809 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5810 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5811 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5812 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5813 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5814 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5815 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5816 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5817 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5818 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5824 void ata_pci_host_stop (struct ata_host *host)
5826 struct pci_dev *pdev = to_pci_dev(host->dev);
5828 pci_iounmap(pdev, host->mmio_base);
5832 * ata_pci_remove_one - PCI layer callback for device removal
5833 * @pdev: PCI device that was removed
5835 * PCI layer indicates to libata via this hook that
5836 * hot-unplug or module unload event has occurred.
5837 * Handle this by unregistering all objects associated
5838 * with this PCI device. Free those objects. Then finally
5839 * release PCI resources and disable device.
5842 * Inherited from PCI layer (may sleep).
5845 void ata_pci_remove_one (struct pci_dev *pdev)
5847 struct device *dev = pci_dev_to_dev(pdev);
5848 struct ata_host *host = dev_get_drvdata(dev);
5850 ata_host_remove(host);
5852 pci_release_regions(pdev);
5853 pci_disable_device(pdev);
5854 dev_set_drvdata(dev, NULL);
5857 /* move to PCI subsystem */
5858 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5860 unsigned long tmp = 0;
5862 switch (bits->width) {
5865 pci_read_config_byte(pdev, bits->reg, &tmp8);
5871 pci_read_config_word(pdev, bits->reg, &tmp16);
5877 pci_read_config_dword(pdev, bits->reg, &tmp32);
5888 return (tmp == bits->val) ? 1 : 0;
5891 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
5893 pci_save_state(pdev);
5895 if (mesg.event == PM_EVENT_SUSPEND) {
5896 pci_disable_device(pdev);
5897 pci_set_power_state(pdev, PCI_D3hot);
5901 void ata_pci_device_do_resume(struct pci_dev *pdev)
5903 pci_set_power_state(pdev, PCI_D0);
5904 pci_restore_state(pdev);
5905 pci_enable_device(pdev);
5906 pci_set_master(pdev);
5909 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
5911 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5914 rc = ata_host_suspend(host, mesg);
5918 ata_pci_device_do_suspend(pdev, mesg);
5923 int ata_pci_device_resume(struct pci_dev *pdev)
5925 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5927 ata_pci_device_do_resume(pdev);
5928 ata_host_resume(host);
5931 #endif /* CONFIG_PCI */
5934 static int __init ata_init(void)
5936 ata_probe_timeout *= HZ;
5937 ata_wq = create_workqueue("ata");
5941 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5943 destroy_workqueue(ata_wq);
5947 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5951 static void __exit ata_exit(void)
5953 destroy_workqueue(ata_wq);
5954 destroy_workqueue(ata_aux_wq);
5957 subsys_initcall(ata_init);
5958 module_exit(ata_exit);
5960 static unsigned long ratelimit_time;
5961 static DEFINE_SPINLOCK(ata_ratelimit_lock);
5963 int ata_ratelimit(void)
5966 unsigned long flags;
5968 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5970 if (time_after(jiffies, ratelimit_time)) {
5972 ratelimit_time = jiffies + (HZ/5);
5976 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5982 * ata_wait_register - wait until register value changes
5983 * @reg: IO-mapped register
5984 * @mask: Mask to apply to read register value
5985 * @val: Wait condition
5986 * @interval_msec: polling interval in milliseconds
5987 * @timeout_msec: timeout in milliseconds
5989 * Waiting for some bits of register to change is a common
5990 * operation for ATA controllers. This function reads 32bit LE
5991 * IO-mapped register @reg and tests for the following condition.
5993 * (*@reg & mask) != val
5995 * If the condition is met, it returns; otherwise, the process is
5996 * repeated after @interval_msec until timeout.
5999 * Kernel thread context (may sleep)
6002 * The final register value.
6004 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6005 unsigned long interval_msec,
6006 unsigned long timeout_msec)
6008 unsigned long timeout;
6011 tmp = ioread32(reg);
6013 /* Calculate timeout _after_ the first read to make sure
6014 * preceding writes reach the controller before starting to
6015 * eat away the timeout.
6017 timeout = jiffies + (timeout_msec * HZ) / 1000;
6019 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6020 msleep(interval_msec);
6021 tmp = ioread32(reg);
6030 static void ata_dummy_noret(struct ata_port *ap) { }
6031 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6032 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6034 static u8 ata_dummy_check_status(struct ata_port *ap)
6039 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6041 return AC_ERR_SYSTEM;
6044 const struct ata_port_operations ata_dummy_port_ops = {
6045 .port_disable = ata_port_disable,
6046 .check_status = ata_dummy_check_status,
6047 .check_altstatus = ata_dummy_check_status,
6048 .dev_select = ata_noop_dev_select,
6049 .qc_prep = ata_noop_qc_prep,
6050 .qc_issue = ata_dummy_qc_issue,
6051 .freeze = ata_dummy_noret,
6052 .thaw = ata_dummy_noret,
6053 .error_handler = ata_dummy_noret,
6054 .post_internal_cmd = ata_dummy_qc_noret,
6055 .irq_clear = ata_dummy_noret,
6056 .port_start = ata_dummy_ret0,
6057 .port_stop = ata_dummy_noret,
6061 * libata is essentially a library of internal helper functions for
6062 * low-level ATA host controller drivers. As such, the API/ABI is
6063 * likely to change as new drivers are added and updated.
6064 * Do not depend on ABI/API stability.
6067 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6068 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6069 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6070 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6071 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6072 EXPORT_SYMBOL_GPL(ata_std_ports);
6073 EXPORT_SYMBOL_GPL(ata_host_init);
6074 EXPORT_SYMBOL_GPL(ata_device_add);
6075 EXPORT_SYMBOL_GPL(ata_port_detach);
6076 EXPORT_SYMBOL_GPL(ata_host_remove);
6077 EXPORT_SYMBOL_GPL(ata_sg_init);
6078 EXPORT_SYMBOL_GPL(ata_sg_init_one);
6079 EXPORT_SYMBOL_GPL(ata_hsm_move);
6080 EXPORT_SYMBOL_GPL(ata_qc_complete);
6081 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6082 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6083 EXPORT_SYMBOL_GPL(ata_tf_load);
6084 EXPORT_SYMBOL_GPL(ata_tf_read);
6085 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6086 EXPORT_SYMBOL_GPL(ata_std_dev_select);
6087 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6088 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6089 EXPORT_SYMBOL_GPL(ata_check_status);
6090 EXPORT_SYMBOL_GPL(ata_altstatus);
6091 EXPORT_SYMBOL_GPL(ata_exec_command);
6092 EXPORT_SYMBOL_GPL(ata_port_start);
6093 EXPORT_SYMBOL_GPL(ata_port_stop);
6094 EXPORT_SYMBOL_GPL(ata_host_stop);
6095 EXPORT_SYMBOL_GPL(ata_interrupt);
6096 EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
6097 EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
6098 EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
6099 EXPORT_SYMBOL_GPL(ata_qc_prep);
6100 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6101 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6102 EXPORT_SYMBOL_GPL(ata_bmdma_start);
6103 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6104 EXPORT_SYMBOL_GPL(ata_bmdma_status);
6105 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6106 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6107 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6108 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6109 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6110 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6111 EXPORT_SYMBOL_GPL(ata_port_probe);
6112 EXPORT_SYMBOL_GPL(sata_set_spd);
6113 EXPORT_SYMBOL_GPL(sata_phy_debounce);
6114 EXPORT_SYMBOL_GPL(sata_phy_resume);
6115 EXPORT_SYMBOL_GPL(sata_phy_reset);
6116 EXPORT_SYMBOL_GPL(__sata_phy_reset);
6117 EXPORT_SYMBOL_GPL(ata_bus_reset);
6118 EXPORT_SYMBOL_GPL(ata_std_prereset);
6119 EXPORT_SYMBOL_GPL(ata_std_softreset);
6120 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6121 EXPORT_SYMBOL_GPL(ata_std_postreset);
6122 EXPORT_SYMBOL_GPL(ata_dev_classify);
6123 EXPORT_SYMBOL_GPL(ata_dev_pair);
6124 EXPORT_SYMBOL_GPL(ata_port_disable);
6125 EXPORT_SYMBOL_GPL(ata_ratelimit);
6126 EXPORT_SYMBOL_GPL(ata_wait_register);
6127 EXPORT_SYMBOL_GPL(ata_busy_sleep);
6128 EXPORT_SYMBOL_GPL(ata_port_queue_task);
6129 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6130 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6131 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6132 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6133 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6134 EXPORT_SYMBOL_GPL(ata_scsi_release);
6135 EXPORT_SYMBOL_GPL(ata_host_intr);
6136 EXPORT_SYMBOL_GPL(sata_scr_valid);
6137 EXPORT_SYMBOL_GPL(sata_scr_read);
6138 EXPORT_SYMBOL_GPL(sata_scr_write);
6139 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6140 EXPORT_SYMBOL_GPL(ata_port_online);
6141 EXPORT_SYMBOL_GPL(ata_port_offline);
6142 EXPORT_SYMBOL_GPL(ata_host_suspend);
6143 EXPORT_SYMBOL_GPL(ata_host_resume);
6144 EXPORT_SYMBOL_GPL(ata_id_string);
6145 EXPORT_SYMBOL_GPL(ata_id_c_string);
6146 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6148 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6149 EXPORT_SYMBOL_GPL(ata_timing_compute);
6150 EXPORT_SYMBOL_GPL(ata_timing_merge);
6153 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6154 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
6155 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6156 EXPORT_SYMBOL_GPL(ata_pci_init_one);
6157 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6158 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6159 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6160 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6161 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6162 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6163 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6164 #endif /* CONFIG_PCI */
6166 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6167 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6169 EXPORT_SYMBOL_GPL(ata_eng_timeout);
6170 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6171 EXPORT_SYMBOL_GPL(ata_port_abort);
6172 EXPORT_SYMBOL_GPL(ata_port_freeze);
6173 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6174 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6175 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6176 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6177 EXPORT_SYMBOL_GPL(ata_do_eh);