2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
64 /* debounce timing parameters in msecs { interval, duration, timeout } */
65 const unsigned long sata_deb_timing_boot[] = { 5, 100, 2000 };
66 const unsigned long sata_deb_timing_eh[] = { 25, 500, 2000 };
67 const unsigned long sata_deb_timing_before_fsrst[] = { 100, 2000, 5000 };
69 static unsigned int ata_dev_init_params(struct ata_device *dev,
70 u16 heads, u16 sectors);
71 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
72 static void ata_dev_xfermask(struct ata_device *dev);
74 static unsigned int ata_unique_id = 1;
75 static struct workqueue_struct *ata_wq;
77 struct workqueue_struct *ata_aux_wq;
79 int atapi_enabled = 1;
80 module_param(atapi_enabled, int, 0444);
81 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84 module_param(atapi_dmadir, int, 0444);
85 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88 module_param_named(fua, libata_fua, int, 0444);
89 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91 MODULE_AUTHOR("Jeff Garzik");
92 MODULE_DESCRIPTION("Library module for ATA devices");
93 MODULE_LICENSE("GPL");
94 MODULE_VERSION(DRV_VERSION);
98 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
99 * @tf: Taskfile to convert
100 * @fis: Buffer into which data will output
101 * @pmp: Port multiplier port
103 * Converts a standard ATA taskfile to a Serial ATA
104 * FIS structure (Register - Host to Device).
107 * Inherited from caller.
110 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
112 fis[0] = 0x27; /* Register - Host to Device FIS */
113 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
114 bit 7 indicates Command FIS */
115 fis[2] = tf->command;
116 fis[3] = tf->feature;
123 fis[8] = tf->hob_lbal;
124 fis[9] = tf->hob_lbam;
125 fis[10] = tf->hob_lbah;
126 fis[11] = tf->hob_feature;
129 fis[13] = tf->hob_nsect;
140 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
141 * @fis: Buffer from which data will be input
142 * @tf: Taskfile to output
144 * Converts a serial ATA FIS structure to a standard ATA taskfile.
147 * Inherited from caller.
150 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
152 tf->command = fis[2]; /* status */
153 tf->feature = fis[3]; /* error */
160 tf->hob_lbal = fis[8];
161 tf->hob_lbam = fis[9];
162 tf->hob_lbah = fis[10];
165 tf->hob_nsect = fis[13];
168 static const u8 ata_rw_cmds[] = {
172 ATA_CMD_READ_MULTI_EXT,
173 ATA_CMD_WRITE_MULTI_EXT,
177 ATA_CMD_WRITE_MULTI_FUA_EXT,
181 ATA_CMD_PIO_READ_EXT,
182 ATA_CMD_PIO_WRITE_EXT,
195 ATA_CMD_WRITE_FUA_EXT
199 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
200 * @qc: command to examine and configure
202 * Examine the device configuration and tf->flags to calculate
203 * the proper read/write commands and protocol to use.
208 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
210 struct ata_taskfile *tf = &qc->tf;
211 struct ata_device *dev = qc->dev;
214 int index, fua, lba48, write;
216 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
217 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
218 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
220 if (dev->flags & ATA_DFLAG_PIO) {
221 tf->protocol = ATA_PROT_PIO;
222 index = dev->multi_count ? 0 : 8;
223 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
224 /* Unable to use DMA due to host limitation */
225 tf->protocol = ATA_PROT_PIO;
226 index = dev->multi_count ? 0 : 8;
228 tf->protocol = ATA_PROT_DMA;
232 cmd = ata_rw_cmds[index + fua + lba48 + write];
241 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
242 * @pio_mask: pio_mask
243 * @mwdma_mask: mwdma_mask
244 * @udma_mask: udma_mask
246 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
247 * unsigned int xfer_mask.
255 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
256 unsigned int mwdma_mask,
257 unsigned int udma_mask)
259 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
260 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
261 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
265 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
266 * @xfer_mask: xfer_mask to unpack
267 * @pio_mask: resulting pio_mask
268 * @mwdma_mask: resulting mwdma_mask
269 * @udma_mask: resulting udma_mask
271 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
272 * Any NULL distination masks will be ignored.
274 static void ata_unpack_xfermask(unsigned int xfer_mask,
275 unsigned int *pio_mask,
276 unsigned int *mwdma_mask,
277 unsigned int *udma_mask)
280 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
282 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
284 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
287 static const struct ata_xfer_ent {
291 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
292 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
293 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
298 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
299 * @xfer_mask: xfer_mask of interest
301 * Return matching XFER_* value for @xfer_mask. Only the highest
302 * bit of @xfer_mask is considered.
308 * Matching XFER_* value, 0 if no match found.
310 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
312 int highbit = fls(xfer_mask) - 1;
313 const struct ata_xfer_ent *ent;
315 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
316 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
317 return ent->base + highbit - ent->shift;
322 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
323 * @xfer_mode: XFER_* of interest
325 * Return matching xfer_mask for @xfer_mode.
331 * Matching xfer_mask, 0 if no match found.
333 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
335 const struct ata_xfer_ent *ent;
337 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
338 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
339 return 1 << (ent->shift + xfer_mode - ent->base);
344 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
345 * @xfer_mode: XFER_* of interest
347 * Return matching xfer_shift for @xfer_mode.
353 * Matching xfer_shift, -1 if no match found.
355 static int ata_xfer_mode2shift(unsigned int xfer_mode)
357 const struct ata_xfer_ent *ent;
359 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
360 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
366 * ata_mode_string - convert xfer_mask to string
367 * @xfer_mask: mask of bits supported; only highest bit counts.
369 * Determine string which represents the highest speed
370 * (highest bit in @modemask).
376 * Constant C string representing highest speed listed in
377 * @mode_mask, or the constant C string "<n/a>".
379 static const char *ata_mode_string(unsigned int xfer_mask)
381 static const char * const xfer_mode_str[] = {
401 highbit = fls(xfer_mask) - 1;
402 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
403 return xfer_mode_str[highbit];
407 static const char *sata_spd_string(unsigned int spd)
409 static const char * const spd_str[] = {
414 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
416 return spd_str[spd - 1];
419 void ata_dev_disable(struct ata_device *dev)
421 if (ata_dev_enabled(dev)) {
422 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
428 * ata_pio_devchk - PATA device presence detection
429 * @ap: ATA channel to examine
430 * @device: Device to examine (starting at zero)
432 * This technique was originally described in
433 * Hale Landis's ATADRVR (www.ata-atapi.com), and
434 * later found its way into the ATA/ATAPI spec.
436 * Write a pattern to the ATA shadow registers,
437 * and if a device is present, it will respond by
438 * correctly storing and echoing back the
439 * ATA shadow register contents.
445 static unsigned int ata_pio_devchk(struct ata_port *ap,
448 struct ata_ioports *ioaddr = &ap->ioaddr;
451 ap->ops->dev_select(ap, device);
453 outb(0x55, ioaddr->nsect_addr);
454 outb(0xaa, ioaddr->lbal_addr);
456 outb(0xaa, ioaddr->nsect_addr);
457 outb(0x55, ioaddr->lbal_addr);
459 outb(0x55, ioaddr->nsect_addr);
460 outb(0xaa, ioaddr->lbal_addr);
462 nsect = inb(ioaddr->nsect_addr);
463 lbal = inb(ioaddr->lbal_addr);
465 if ((nsect == 0x55) && (lbal == 0xaa))
466 return 1; /* we found a device */
468 return 0; /* nothing found */
472 * ata_mmio_devchk - PATA device presence detection
473 * @ap: ATA channel to examine
474 * @device: Device to examine (starting at zero)
476 * This technique was originally described in
477 * Hale Landis's ATADRVR (www.ata-atapi.com), and
478 * later found its way into the ATA/ATAPI spec.
480 * Write a pattern to the ATA shadow registers,
481 * and if a device is present, it will respond by
482 * correctly storing and echoing back the
483 * ATA shadow register contents.
489 static unsigned int ata_mmio_devchk(struct ata_port *ap,
492 struct ata_ioports *ioaddr = &ap->ioaddr;
495 ap->ops->dev_select(ap, device);
497 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
498 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
500 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
501 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
503 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
504 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
506 nsect = readb((void __iomem *) ioaddr->nsect_addr);
507 lbal = readb((void __iomem *) ioaddr->lbal_addr);
509 if ((nsect == 0x55) && (lbal == 0xaa))
510 return 1; /* we found a device */
512 return 0; /* nothing found */
516 * ata_devchk - PATA device presence detection
517 * @ap: ATA channel to examine
518 * @device: Device to examine (starting at zero)
520 * Dispatch ATA device presence detection, depending
521 * on whether we are using PIO or MMIO to talk to the
522 * ATA shadow registers.
528 static unsigned int ata_devchk(struct ata_port *ap,
531 if (ap->flags & ATA_FLAG_MMIO)
532 return ata_mmio_devchk(ap, device);
533 return ata_pio_devchk(ap, device);
537 * ata_dev_classify - determine device type based on ATA-spec signature
538 * @tf: ATA taskfile register set for device to be identified
540 * Determine from taskfile register contents whether a device is
541 * ATA or ATAPI, as per "Signature and persistence" section
542 * of ATA/PI spec (volume 1, sect 5.14).
548 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
549 * the event of failure.
552 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
554 /* Apple's open source Darwin code hints that some devices only
555 * put a proper signature into the LBA mid/high registers,
556 * So, we only check those. It's sufficient for uniqueness.
559 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
560 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
561 DPRINTK("found ATA device by sig\n");
565 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
566 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
567 DPRINTK("found ATAPI device by sig\n");
568 return ATA_DEV_ATAPI;
571 DPRINTK("unknown device\n");
572 return ATA_DEV_UNKNOWN;
576 * ata_dev_try_classify - Parse returned ATA device signature
577 * @ap: ATA channel to examine
578 * @device: Device to examine (starting at zero)
579 * @r_err: Value of error register on completion
581 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
582 * an ATA/ATAPI-defined set of values is placed in the ATA
583 * shadow registers, indicating the results of device detection
586 * Select the ATA device, and read the values from the ATA shadow
587 * registers. Then parse according to the Error register value,
588 * and the spec-defined values examined by ata_dev_classify().
594 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
598 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
600 struct ata_taskfile tf;
604 ap->ops->dev_select(ap, device);
606 memset(&tf, 0, sizeof(tf));
608 ap->ops->tf_read(ap, &tf);
613 /* see if device passed diags */
616 else if ((device == 0) && (err == 0x81))
621 /* determine if device is ATA or ATAPI */
622 class = ata_dev_classify(&tf);
624 if (class == ATA_DEV_UNKNOWN)
626 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
632 * ata_id_string - Convert IDENTIFY DEVICE page into string
633 * @id: IDENTIFY DEVICE results we will examine
634 * @s: string into which data is output
635 * @ofs: offset into identify device page
636 * @len: length of string to return. must be an even number.
638 * The strings in the IDENTIFY DEVICE page are broken up into
639 * 16-bit chunks. Run through the string, and output each
640 * 8-bit chunk linearly, regardless of platform.
646 void ata_id_string(const u16 *id, unsigned char *s,
647 unsigned int ofs, unsigned int len)
666 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
667 * @id: IDENTIFY DEVICE results we will examine
668 * @s: string into which data is output
669 * @ofs: offset into identify device page
670 * @len: length of string to return. must be an odd number.
672 * This function is identical to ata_id_string except that it
673 * trims trailing spaces and terminates the resulting string with
674 * null. @len must be actual maximum length (even number) + 1.
679 void ata_id_c_string(const u16 *id, unsigned char *s,
680 unsigned int ofs, unsigned int len)
686 ata_id_string(id, s, ofs, len - 1);
688 p = s + strnlen(s, len - 1);
689 while (p > s && p[-1] == ' ')
694 static u64 ata_id_n_sectors(const u16 *id)
696 if (ata_id_has_lba(id)) {
697 if (ata_id_has_lba48(id))
698 return ata_id_u64(id, 100);
700 return ata_id_u32(id, 60);
702 if (ata_id_current_chs_valid(id))
703 return ata_id_u32(id, 57);
705 return id[1] * id[3] * id[6];
710 * ata_noop_dev_select - Select device 0/1 on ATA bus
711 * @ap: ATA channel to manipulate
712 * @device: ATA device (numbered from zero) to select
714 * This function performs no actual function.
716 * May be used as the dev_select() entry in ata_port_operations.
721 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
727 * ata_std_dev_select - Select device 0/1 on ATA bus
728 * @ap: ATA channel to manipulate
729 * @device: ATA device (numbered from zero) to select
731 * Use the method defined in the ATA specification to
732 * make either device 0, or device 1, active on the
733 * ATA channel. Works with both PIO and MMIO.
735 * May be used as the dev_select() entry in ata_port_operations.
741 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
746 tmp = ATA_DEVICE_OBS;
748 tmp = ATA_DEVICE_OBS | ATA_DEV1;
750 if (ap->flags & ATA_FLAG_MMIO) {
751 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
753 outb(tmp, ap->ioaddr.device_addr);
755 ata_pause(ap); /* needed; also flushes, for mmio */
759 * ata_dev_select - Select device 0/1 on ATA bus
760 * @ap: ATA channel to manipulate
761 * @device: ATA device (numbered from zero) to select
762 * @wait: non-zero to wait for Status register BSY bit to clear
763 * @can_sleep: non-zero if context allows sleeping
765 * Use the method defined in the ATA specification to
766 * make either device 0, or device 1, active on the
769 * This is a high-level version of ata_std_dev_select(),
770 * which additionally provides the services of inserting
771 * the proper pauses and status polling, where needed.
777 void ata_dev_select(struct ata_port *ap, unsigned int device,
778 unsigned int wait, unsigned int can_sleep)
780 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
781 ap->id, device, wait);
786 ap->ops->dev_select(ap, device);
789 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
796 * ata_dump_id - IDENTIFY DEVICE info debugging output
797 * @id: IDENTIFY DEVICE page to dump
799 * Dump selected 16-bit words from the given IDENTIFY DEVICE
806 static inline void ata_dump_id(const u16 *id)
808 DPRINTK("49==0x%04x "
818 DPRINTK("80==0x%04x "
828 DPRINTK("88==0x%04x "
835 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
836 * @id: IDENTIFY data to compute xfer mask from
838 * Compute the xfermask for this device. This is not as trivial
839 * as it seems if we must consider early devices correctly.
841 * FIXME: pre IDE drive timing (do we care ?).
849 static unsigned int ata_id_xfermask(const u16 *id)
851 unsigned int pio_mask, mwdma_mask, udma_mask;
853 /* Usual case. Word 53 indicates word 64 is valid */
854 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
855 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
859 /* If word 64 isn't valid then Word 51 high byte holds
860 * the PIO timing number for the maximum. Turn it into
863 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
865 /* But wait.. there's more. Design your standards by
866 * committee and you too can get a free iordy field to
867 * process. However its the speeds not the modes that
868 * are supported... Note drivers using the timing API
869 * will get this right anyway
873 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
876 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
877 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
879 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
883 * ata_port_queue_task - Queue port_task
884 * @ap: The ata_port to queue port_task for
885 * @fn: workqueue function to be scheduled
886 * @data: data value to pass to workqueue function
887 * @delay: delay time for workqueue function
889 * Schedule @fn(@data) for execution after @delay jiffies using
890 * port_task. There is one port_task per port and it's the
891 * user(low level driver)'s responsibility to make sure that only
892 * one task is active at any given time.
894 * libata core layer takes care of synchronization between
895 * port_task and EH. ata_port_queue_task() may be ignored for EH
899 * Inherited from caller.
901 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
906 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
909 PREPARE_WORK(&ap->port_task, fn, data);
912 rc = queue_work(ata_wq, &ap->port_task);
914 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
916 /* rc == 0 means that another user is using port task */
921 * ata_port_flush_task - Flush port_task
922 * @ap: The ata_port to flush port_task for
924 * After this function completes, port_task is guranteed not to
925 * be running or scheduled.
928 * Kernel thread context (may sleep)
930 void ata_port_flush_task(struct ata_port *ap)
936 spin_lock_irqsave(&ap->host_set->lock, flags);
937 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
938 spin_unlock_irqrestore(&ap->host_set->lock, flags);
940 DPRINTK("flush #1\n");
941 flush_workqueue(ata_wq);
944 * At this point, if a task is running, it's guaranteed to see
945 * the FLUSH flag; thus, it will never queue pio tasks again.
948 if (!cancel_delayed_work(&ap->port_task)) {
949 DPRINTK("flush #2\n");
950 flush_workqueue(ata_wq);
953 spin_lock_irqsave(&ap->host_set->lock, flags);
954 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
955 spin_unlock_irqrestore(&ap->host_set->lock, flags);
960 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
962 struct completion *waiting = qc->private_data;
968 * ata_exec_internal - execute libata internal command
969 * @dev: Device to which the command is sent
970 * @tf: Taskfile registers for the command and the result
971 * @cdb: CDB for packet command
972 * @dma_dir: Data tranfer direction of the command
973 * @buf: Data buffer of the command
974 * @buflen: Length of data buffer
976 * Executes libata internal command with timeout. @tf contains
977 * command on entry and result on return. Timeout and error
978 * conditions are reported via return value. No recovery action
979 * is taken after a command times out. It's caller's duty to
980 * clean up after timeout.
983 * None. Should be called with kernel context, might sleep.
986 unsigned ata_exec_internal(struct ata_device *dev,
987 struct ata_taskfile *tf, const u8 *cdb,
988 int dma_dir, void *buf, unsigned int buflen)
990 struct ata_port *ap = dev->ap;
991 u8 command = tf->command;
992 struct ata_queued_cmd *qc;
993 unsigned int tag, preempted_tag;
994 u32 preempted_sactive, preempted_qc_active;
995 DECLARE_COMPLETION(wait);
997 unsigned int err_mask;
1000 spin_lock_irqsave(&ap->host_set->lock, flags);
1002 /* no internal command while frozen */
1003 if (ap->flags & ATA_FLAG_FROZEN) {
1004 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1005 return AC_ERR_SYSTEM;
1008 /* initialize internal qc */
1010 /* XXX: Tag 0 is used for drivers with legacy EH as some
1011 * drivers choke if any other tag is given. This breaks
1012 * ata_tag_internal() test for those drivers. Don't use new
1013 * EH stuff without converting to it.
1015 if (ap->ops->error_handler)
1016 tag = ATA_TAG_INTERNAL;
1020 if (test_and_set_bit(tag, &ap->qc_allocated))
1022 qc = __ata_qc_from_tag(ap, tag);
1030 preempted_tag = ap->active_tag;
1031 preempted_sactive = ap->sactive;
1032 preempted_qc_active = ap->qc_active;
1033 ap->active_tag = ATA_TAG_POISON;
1037 /* prepare & issue qc */
1040 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1041 qc->flags |= ATA_QCFLAG_RESULT_TF;
1042 qc->dma_dir = dma_dir;
1043 if (dma_dir != DMA_NONE) {
1044 ata_sg_init_one(qc, buf, buflen);
1045 qc->nsect = buflen / ATA_SECT_SIZE;
1048 qc->private_data = &wait;
1049 qc->complete_fn = ata_qc_complete_internal;
1053 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1055 rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
1057 ata_port_flush_task(ap);
1060 spin_lock_irqsave(&ap->host_set->lock, flags);
1062 /* We're racing with irq here. If we lose, the
1063 * following test prevents us from completing the qc
1064 * twice. If we win, the port is frozen and will be
1065 * cleaned up by ->post_internal_cmd().
1067 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1068 qc->err_mask |= AC_ERR_TIMEOUT;
1070 if (ap->ops->error_handler)
1071 ata_port_freeze(ap);
1073 ata_qc_complete(qc);
1075 ata_dev_printk(dev, KERN_WARNING,
1076 "qc timeout (cmd 0x%x)\n", command);
1079 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1082 /* do post_internal_cmd */
1083 if (ap->ops->post_internal_cmd)
1084 ap->ops->post_internal_cmd(qc);
1086 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1087 ata_dev_printk(dev, KERN_WARNING, "zero err_mask for failed "
1088 "internal command, assuming AC_ERR_OTHER\n");
1089 qc->err_mask |= AC_ERR_OTHER;
1093 spin_lock_irqsave(&ap->host_set->lock, flags);
1095 *tf = qc->result_tf;
1096 err_mask = qc->err_mask;
1099 ap->active_tag = preempted_tag;
1100 ap->sactive = preempted_sactive;
1101 ap->qc_active = preempted_qc_active;
1103 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1104 * Until those drivers are fixed, we detect the condition
1105 * here, fail the command with AC_ERR_SYSTEM and reenable the
1108 * Note that this doesn't change any behavior as internal
1109 * command failure results in disabling the device in the
1110 * higher layer for LLDDs without new reset/EH callbacks.
1112 * Kill the following code as soon as those drivers are fixed.
1114 if (ap->flags & ATA_FLAG_DISABLED) {
1115 err_mask |= AC_ERR_SYSTEM;
1119 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1125 * ata_pio_need_iordy - check if iordy needed
1128 * Check if the current speed of the device requires IORDY. Used
1129 * by various controllers for chip configuration.
1132 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1135 int speed = adev->pio_mode - XFER_PIO_0;
1142 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1144 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1145 pio = adev->id[ATA_ID_EIDE_PIO];
1146 /* Is the speed faster than the drive allows non IORDY ? */
1148 /* This is cycle times not frequency - watch the logic! */
1149 if (pio > 240) /* PIO2 is 240nS per cycle */
1158 * ata_dev_read_id - Read ID data from the specified device
1159 * @dev: target device
1160 * @p_class: pointer to class of the target device (may be changed)
1161 * @post_reset: is this read ID post-reset?
1162 * @id: buffer to read IDENTIFY data into
1164 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1165 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1166 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1167 * for pre-ATA4 drives.
1170 * Kernel thread context (may sleep)
1173 * 0 on success, -errno otherwise.
1175 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1176 int post_reset, u16 *id)
1178 struct ata_port *ap = dev->ap;
1179 unsigned int class = *p_class;
1180 struct ata_taskfile tf;
1181 unsigned int err_mask = 0;
1185 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1187 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1190 ata_tf_init(dev, &tf);
1194 tf.command = ATA_CMD_ID_ATA;
1197 tf.command = ATA_CMD_ID_ATAPI;
1201 reason = "unsupported class";
1205 tf.protocol = ATA_PROT_PIO;
1207 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1208 id, sizeof(id[0]) * ATA_ID_WORDS);
1211 reason = "I/O error";
1215 swap_buf_le16(id, ATA_ID_WORDS);
1218 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1220 reason = "device reports illegal type";
1224 if (post_reset && class == ATA_DEV_ATA) {
1226 * The exact sequence expected by certain pre-ATA4 drives is:
1229 * INITIALIZE DEVICE PARAMETERS
1231 * Some drives were very specific about that exact sequence.
1233 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1234 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1237 reason = "INIT_DEV_PARAMS failed";
1241 /* current CHS translation info (id[53-58]) might be
1242 * changed. reread the identify device info.
1254 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1255 "(%s, err_mask=0x%x)\n", reason, err_mask);
1259 static inline u8 ata_dev_knobble(struct ata_device *dev)
1261 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1264 static void ata_dev_config_ncq(struct ata_device *dev,
1265 char *desc, size_t desc_sz)
1267 struct ata_port *ap = dev->ap;
1268 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1270 if (!ata_id_has_ncq(dev->id)) {
1275 if (ap->flags & ATA_FLAG_NCQ) {
1276 hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
1277 dev->flags |= ATA_DFLAG_NCQ;
1280 if (hdepth >= ddepth)
1281 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1283 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1287 * ata_dev_configure - Configure the specified ATA/ATAPI device
1288 * @dev: Target device to configure
1289 * @print_info: Enable device info printout
1291 * Configure @dev according to @dev->id. Generic and low-level
1292 * driver specific fixups are also applied.
1295 * Kernel thread context (may sleep)
1298 * 0 on success, -errno otherwise
1300 int ata_dev_configure(struct ata_device *dev, int print_info)
1302 struct ata_port *ap = dev->ap;
1303 const u16 *id = dev->id;
1304 unsigned int xfer_mask;
1307 if (!ata_dev_enabled(dev)) {
1308 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1309 ap->id, dev->devno);
1313 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1315 /* print device capabilities */
1317 ata_dev_printk(dev, KERN_DEBUG, "cfg 49:%04x 82:%04x 83:%04x "
1318 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1319 id[49], id[82], id[83], id[84],
1320 id[85], id[86], id[87], id[88]);
1322 /* initialize to-be-configured parameters */
1323 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1324 dev->max_sectors = 0;
1332 * common ATA, ATAPI feature tests
1335 /* find max transfer mode; for printk only */
1336 xfer_mask = ata_id_xfermask(id);
1340 /* ATA-specific feature tests */
1341 if (dev->class == ATA_DEV_ATA) {
1342 dev->n_sectors = ata_id_n_sectors(id);
1344 if (ata_id_has_lba(id)) {
1345 const char *lba_desc;
1349 dev->flags |= ATA_DFLAG_LBA;
1350 if (ata_id_has_lba48(id)) {
1351 dev->flags |= ATA_DFLAG_LBA48;
1356 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1358 /* print device info to dmesg */
1360 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1361 "max %s, %Lu sectors: %s %s\n",
1362 ata_id_major_version(id),
1363 ata_mode_string(xfer_mask),
1364 (unsigned long long)dev->n_sectors,
1365 lba_desc, ncq_desc);
1369 /* Default translation */
1370 dev->cylinders = id[1];
1372 dev->sectors = id[6];
1374 if (ata_id_current_chs_valid(id)) {
1375 /* Current CHS translation is valid. */
1376 dev->cylinders = id[54];
1377 dev->heads = id[55];
1378 dev->sectors = id[56];
1381 /* print device info to dmesg */
1383 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1384 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1385 ata_id_major_version(id),
1386 ata_mode_string(xfer_mask),
1387 (unsigned long long)dev->n_sectors,
1388 dev->cylinders, dev->heads, dev->sectors);
1391 if (dev->id[59] & 0x100) {
1392 dev->multi_count = dev->id[59] & 0xff;
1393 DPRINTK("ata%u: dev %u multi count %u\n",
1394 ap->id, dev->devno, dev->multi_count);
1400 /* ATAPI-specific feature tests */
1401 else if (dev->class == ATA_DEV_ATAPI) {
1402 char *cdb_intr_string = "";
1404 rc = atapi_cdb_len(id);
1405 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1406 ata_dev_printk(dev, KERN_WARNING,
1407 "unsupported CDB len\n");
1411 dev->cdb_len = (unsigned int) rc;
1413 if (ata_id_cdb_intr(dev->id)) {
1414 dev->flags |= ATA_DFLAG_CDB_INTR;
1415 cdb_intr_string = ", CDB intr";
1418 /* print device info to dmesg */
1420 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1421 ata_mode_string(xfer_mask),
1425 ap->host->max_cmd_len = 0;
1426 for (i = 0; i < ATA_MAX_DEVICES; i++)
1427 ap->host->max_cmd_len = max_t(unsigned int,
1428 ap->host->max_cmd_len,
1429 ap->device[i].cdb_len);
1431 /* limit bridge transfers to udma5, 200 sectors */
1432 if (ata_dev_knobble(dev)) {
1434 ata_dev_printk(dev, KERN_INFO,
1435 "applying bridge limits\n");
1436 dev->udma_mask &= ATA_UDMA5;
1437 dev->max_sectors = ATA_MAX_SECTORS;
1440 if (ap->ops->dev_config)
1441 ap->ops->dev_config(ap, dev);
1443 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1447 DPRINTK("EXIT, err\n");
1452 * ata_bus_probe - Reset and probe ATA bus
1455 * Master ATA bus probing function. Initiates a hardware-dependent
1456 * bus reset, then attempts to identify any devices found on
1460 * PCI/etc. bus probe sem.
1463 * Zero on success, negative errno otherwise.
1466 static int ata_bus_probe(struct ata_port *ap)
1468 unsigned int classes[ATA_MAX_DEVICES];
1469 int tries[ATA_MAX_DEVICES];
1470 int i, rc, down_xfermask;
1471 struct ata_device *dev;
1475 for (i = 0; i < ATA_MAX_DEVICES; i++)
1476 tries[i] = ATA_PROBE_MAX_TRIES;
1481 /* reset and determine device classes */
1482 ap->ops->phy_reset(ap);
1484 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1485 dev = &ap->device[i];
1487 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1488 dev->class != ATA_DEV_UNKNOWN)
1489 classes[dev->devno] = dev->class;
1491 classes[dev->devno] = ATA_DEV_NONE;
1493 dev->class = ATA_DEV_UNKNOWN;
1498 /* after the reset the device state is PIO 0 and the controller
1499 state is undefined. Record the mode */
1501 for (i = 0; i < ATA_MAX_DEVICES; i++)
1502 ap->device[i].pio_mode = XFER_PIO_0;
1504 /* read IDENTIFY page and configure devices */
1505 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1506 dev = &ap->device[i];
1509 dev->class = classes[i];
1511 if (!ata_dev_enabled(dev))
1514 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1518 rc = ata_dev_configure(dev, 1);
1523 /* configure transfer mode */
1524 rc = ata_set_mode(ap, &dev);
1530 for (i = 0; i < ATA_MAX_DEVICES; i++)
1531 if (ata_dev_enabled(&ap->device[i]))
1534 /* no device present, disable port */
1535 ata_port_disable(ap);
1536 ap->ops->port_disable(ap);
1543 tries[dev->devno] = 0;
1546 sata_down_spd_limit(ap);
1549 tries[dev->devno]--;
1550 if (down_xfermask &&
1551 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1552 tries[dev->devno] = 0;
1555 if (!tries[dev->devno]) {
1556 ata_down_xfermask_limit(dev, 1);
1557 ata_dev_disable(dev);
1564 * ata_port_probe - Mark port as enabled
1565 * @ap: Port for which we indicate enablement
1567 * Modify @ap data structure such that the system
1568 * thinks that the entire port is enabled.
1570 * LOCKING: host_set lock, or some other form of
1574 void ata_port_probe(struct ata_port *ap)
1576 ap->flags &= ~ATA_FLAG_DISABLED;
1580 * sata_print_link_status - Print SATA link status
1581 * @ap: SATA port to printk link status about
1583 * This function prints link speed and status of a SATA link.
1588 static void sata_print_link_status(struct ata_port *ap)
1590 u32 sstatus, scontrol, tmp;
1592 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1594 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1596 if (ata_port_online(ap)) {
1597 tmp = (sstatus >> 4) & 0xf;
1598 ata_port_printk(ap, KERN_INFO,
1599 "SATA link up %s (SStatus %X SControl %X)\n",
1600 sata_spd_string(tmp), sstatus, scontrol);
1602 ata_port_printk(ap, KERN_INFO,
1603 "SATA link down (SStatus %X SControl %X)\n",
1609 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1610 * @ap: SATA port associated with target SATA PHY.
1612 * This function issues commands to standard SATA Sxxx
1613 * PHY registers, to wake up the phy (and device), and
1614 * clear any reset condition.
1617 * PCI/etc. bus probe sem.
1620 void __sata_phy_reset(struct ata_port *ap)
1623 unsigned long timeout = jiffies + (HZ * 5);
1625 if (ap->flags & ATA_FLAG_SATA_RESET) {
1626 /* issue phy wake/reset */
1627 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1628 /* Couldn't find anything in SATA I/II specs, but
1629 * AHCI-1.1 10.4.2 says at least 1 ms. */
1632 /* phy wake/clear reset */
1633 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1635 /* wait for phy to become ready, if necessary */
1638 sata_scr_read(ap, SCR_STATUS, &sstatus);
1639 if ((sstatus & 0xf) != 1)
1641 } while (time_before(jiffies, timeout));
1643 /* print link status */
1644 sata_print_link_status(ap);
1646 /* TODO: phy layer with polling, timeouts, etc. */
1647 if (!ata_port_offline(ap))
1650 ata_port_disable(ap);
1652 if (ap->flags & ATA_FLAG_DISABLED)
1655 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1656 ata_port_disable(ap);
1660 ap->cbl = ATA_CBL_SATA;
1664 * sata_phy_reset - Reset SATA bus.
1665 * @ap: SATA port associated with target SATA PHY.
1667 * This function resets the SATA bus, and then probes
1668 * the bus for devices.
1671 * PCI/etc. bus probe sem.
1674 void sata_phy_reset(struct ata_port *ap)
1676 __sata_phy_reset(ap);
1677 if (ap->flags & ATA_FLAG_DISABLED)
1683 * ata_dev_pair - return other device on cable
1686 * Obtain the other device on the same cable, or if none is
1687 * present NULL is returned
1690 struct ata_device *ata_dev_pair(struct ata_device *adev)
1692 struct ata_port *ap = adev->ap;
1693 struct ata_device *pair = &ap->device[1 - adev->devno];
1694 if (!ata_dev_enabled(pair))
1700 * ata_port_disable - Disable port.
1701 * @ap: Port to be disabled.
1703 * Modify @ap data structure such that the system
1704 * thinks that the entire port is disabled, and should
1705 * never attempt to probe or communicate with devices
1708 * LOCKING: host_set lock, or some other form of
1712 void ata_port_disable(struct ata_port *ap)
1714 ap->device[0].class = ATA_DEV_NONE;
1715 ap->device[1].class = ATA_DEV_NONE;
1716 ap->flags |= ATA_FLAG_DISABLED;
1720 * sata_down_spd_limit - adjust SATA spd limit downward
1721 * @ap: Port to adjust SATA spd limit for
1723 * Adjust SATA spd limit of @ap downward. Note that this
1724 * function only adjusts the limit. The change must be applied
1725 * using sata_set_spd().
1728 * Inherited from caller.
1731 * 0 on success, negative errno on failure
1733 int sata_down_spd_limit(struct ata_port *ap)
1735 u32 sstatus, spd, mask;
1738 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1742 mask = ap->sata_spd_limit;
1745 highbit = fls(mask) - 1;
1746 mask &= ~(1 << highbit);
1748 spd = (sstatus >> 4) & 0xf;
1752 mask &= (1 << spd) - 1;
1756 ap->sata_spd_limit = mask;
1758 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1759 sata_spd_string(fls(mask)));
1764 static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1768 if (ap->sata_spd_limit == UINT_MAX)
1771 limit = fls(ap->sata_spd_limit);
1773 spd = (*scontrol >> 4) & 0xf;
1774 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1776 return spd != limit;
1780 * sata_set_spd_needed - is SATA spd configuration needed
1781 * @ap: Port in question
1783 * Test whether the spd limit in SControl matches
1784 * @ap->sata_spd_limit. This function is used to determine
1785 * whether hardreset is necessary to apply SATA spd
1789 * Inherited from caller.
1792 * 1 if SATA spd configuration is needed, 0 otherwise.
1794 int sata_set_spd_needed(struct ata_port *ap)
1798 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1801 return __sata_set_spd_needed(ap, &scontrol);
1805 * sata_set_spd - set SATA spd according to spd limit
1806 * @ap: Port to set SATA spd for
1808 * Set SATA spd of @ap according to sata_spd_limit.
1811 * Inherited from caller.
1814 * 0 if spd doesn't need to be changed, 1 if spd has been
1815 * changed. Negative errno if SCR registers are inaccessible.
1817 int sata_set_spd(struct ata_port *ap)
1822 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1825 if (!__sata_set_spd_needed(ap, &scontrol))
1828 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1835 * This mode timing computation functionality is ported over from
1836 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1839 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1840 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1841 * for PIO 5, which is a nonstandard extension and UDMA6, which
1842 * is currently supported only by Maxtor drives.
1845 static const struct ata_timing ata_timing[] = {
1847 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1848 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1849 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1850 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1852 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1853 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1854 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1856 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1858 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1859 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1860 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1862 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1863 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1864 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1866 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1867 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1868 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1870 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1871 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1872 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1874 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1879 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1880 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1882 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1884 q->setup = EZ(t->setup * 1000, T);
1885 q->act8b = EZ(t->act8b * 1000, T);
1886 q->rec8b = EZ(t->rec8b * 1000, T);
1887 q->cyc8b = EZ(t->cyc8b * 1000, T);
1888 q->active = EZ(t->active * 1000, T);
1889 q->recover = EZ(t->recover * 1000, T);
1890 q->cycle = EZ(t->cycle * 1000, T);
1891 q->udma = EZ(t->udma * 1000, UT);
1894 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1895 struct ata_timing *m, unsigned int what)
1897 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1898 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1899 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1900 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1901 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1902 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1903 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1904 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1907 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1909 const struct ata_timing *t;
1911 for (t = ata_timing; t->mode != speed; t++)
1912 if (t->mode == 0xFF)
1917 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1918 struct ata_timing *t, int T, int UT)
1920 const struct ata_timing *s;
1921 struct ata_timing p;
1927 if (!(s = ata_timing_find_mode(speed)))
1930 memcpy(t, s, sizeof(*s));
1933 * If the drive is an EIDE drive, it can tell us it needs extended
1934 * PIO/MW_DMA cycle timing.
1937 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1938 memset(&p, 0, sizeof(p));
1939 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1940 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1941 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1942 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1943 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1945 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1949 * Convert the timing to bus clock counts.
1952 ata_timing_quantize(t, t, T, UT);
1955 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1956 * S.M.A.R.T * and some other commands. We have to ensure that the
1957 * DMA cycle timing is slower/equal than the fastest PIO timing.
1960 if (speed > XFER_PIO_4) {
1961 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1962 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1966 * Lengthen active & recovery time so that cycle time is correct.
1969 if (t->act8b + t->rec8b < t->cyc8b) {
1970 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1971 t->rec8b = t->cyc8b - t->act8b;
1974 if (t->active + t->recover < t->cycle) {
1975 t->active += (t->cycle - (t->active + t->recover)) / 2;
1976 t->recover = t->cycle - t->active;
1983 * ata_down_xfermask_limit - adjust dev xfer masks downward
1984 * @dev: Device to adjust xfer masks
1985 * @force_pio0: Force PIO0
1987 * Adjust xfer masks of @dev downward. Note that this function
1988 * does not apply the change. Invoking ata_set_mode() afterwards
1989 * will apply the limit.
1992 * Inherited from caller.
1995 * 0 on success, negative errno on failure
1997 int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
1999 unsigned long xfer_mask;
2002 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2007 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2008 if (xfer_mask & ATA_MASK_UDMA)
2009 xfer_mask &= ~ATA_MASK_MWDMA;
2011 highbit = fls(xfer_mask) - 1;
2012 xfer_mask &= ~(1 << highbit);
2014 xfer_mask &= 1 << ATA_SHIFT_PIO;
2018 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2021 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2022 ata_mode_string(xfer_mask));
2030 static int ata_dev_set_mode(struct ata_device *dev)
2032 unsigned int err_mask;
2035 dev->flags &= ~ATA_DFLAG_PIO;
2036 if (dev->xfer_shift == ATA_SHIFT_PIO)
2037 dev->flags |= ATA_DFLAG_PIO;
2039 err_mask = ata_dev_set_xfermode(dev);
2041 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2042 "(err_mask=0x%x)\n", err_mask);
2046 rc = ata_dev_revalidate(dev, 0);
2050 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2051 dev->xfer_shift, (int)dev->xfer_mode);
2053 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2054 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2059 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2060 * @ap: port on which timings will be programmed
2061 * @r_failed_dev: out paramter for failed device
2063 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2064 * ata_set_mode() fails, pointer to the failing device is
2065 * returned in @r_failed_dev.
2068 * PCI/etc. bus probe sem.
2071 * 0 on success, negative errno otherwise
2073 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2075 struct ata_device *dev;
2076 int i, rc = 0, used_dma = 0, found = 0;
2078 /* has private set_mode? */
2079 if (ap->ops->set_mode) {
2080 /* FIXME: make ->set_mode handle no device case and
2081 * return error code and failing device on failure.
2083 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2084 if (ata_dev_enabled(&ap->device[i])) {
2085 ap->ops->set_mode(ap);
2092 /* step 1: calculate xfer_mask */
2093 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2094 unsigned int pio_mask, dma_mask;
2096 dev = &ap->device[i];
2098 if (!ata_dev_enabled(dev))
2101 ata_dev_xfermask(dev);
2103 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2104 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2105 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2106 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2115 /* step 2: always set host PIO timings */
2116 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2117 dev = &ap->device[i];
2118 if (!ata_dev_enabled(dev))
2121 if (!dev->pio_mode) {
2122 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2127 dev->xfer_mode = dev->pio_mode;
2128 dev->xfer_shift = ATA_SHIFT_PIO;
2129 if (ap->ops->set_piomode)
2130 ap->ops->set_piomode(ap, dev);
2133 /* step 3: set host DMA timings */
2134 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2135 dev = &ap->device[i];
2137 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2140 dev->xfer_mode = dev->dma_mode;
2141 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2142 if (ap->ops->set_dmamode)
2143 ap->ops->set_dmamode(ap, dev);
2146 /* step 4: update devices' xfer mode */
2147 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2148 dev = &ap->device[i];
2150 if (!ata_dev_enabled(dev))
2153 rc = ata_dev_set_mode(dev);
2158 /* Record simplex status. If we selected DMA then the other
2159 * host channels are not permitted to do so.
2161 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
2162 ap->host_set->simplex_claimed = 1;
2164 /* step5: chip specific finalisation */
2165 if (ap->ops->post_set_mode)
2166 ap->ops->post_set_mode(ap);
2170 *r_failed_dev = dev;
2175 * ata_tf_to_host - issue ATA taskfile to host controller
2176 * @ap: port to which command is being issued
2177 * @tf: ATA taskfile register set
2179 * Issues ATA taskfile register set to ATA host controller,
2180 * with proper synchronization with interrupt handler and
2184 * spin_lock_irqsave(host_set lock)
2187 static inline void ata_tf_to_host(struct ata_port *ap,
2188 const struct ata_taskfile *tf)
2190 ap->ops->tf_load(ap, tf);
2191 ap->ops->exec_command(ap, tf);
2195 * ata_busy_sleep - sleep until BSY clears, or timeout
2196 * @ap: port containing status register to be polled
2197 * @tmout_pat: impatience timeout
2198 * @tmout: overall timeout
2200 * Sleep until ATA Status register bit BSY clears,
2201 * or a timeout occurs.
2206 unsigned int ata_busy_sleep (struct ata_port *ap,
2207 unsigned long tmout_pat, unsigned long tmout)
2209 unsigned long timer_start, timeout;
2212 status = ata_busy_wait(ap, ATA_BUSY, 300);
2213 timer_start = jiffies;
2214 timeout = timer_start + tmout_pat;
2215 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2217 status = ata_busy_wait(ap, ATA_BUSY, 3);
2220 if (status & ATA_BUSY)
2221 ata_port_printk(ap, KERN_WARNING,
2222 "port is slow to respond, please be patient\n");
2224 timeout = timer_start + tmout;
2225 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2227 status = ata_chk_status(ap);
2230 if (status & ATA_BUSY) {
2231 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2232 "(%lu secs)\n", tmout / HZ);
2239 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2241 struct ata_ioports *ioaddr = &ap->ioaddr;
2242 unsigned int dev0 = devmask & (1 << 0);
2243 unsigned int dev1 = devmask & (1 << 1);
2244 unsigned long timeout;
2246 /* if device 0 was found in ata_devchk, wait for its
2250 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2252 /* if device 1 was found in ata_devchk, wait for
2253 * register access, then wait for BSY to clear
2255 timeout = jiffies + ATA_TMOUT_BOOT;
2259 ap->ops->dev_select(ap, 1);
2260 if (ap->flags & ATA_FLAG_MMIO) {
2261 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2262 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2264 nsect = inb(ioaddr->nsect_addr);
2265 lbal = inb(ioaddr->lbal_addr);
2267 if ((nsect == 1) && (lbal == 1))
2269 if (time_after(jiffies, timeout)) {
2273 msleep(50); /* give drive a breather */
2276 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2278 /* is all this really necessary? */
2279 ap->ops->dev_select(ap, 0);
2281 ap->ops->dev_select(ap, 1);
2283 ap->ops->dev_select(ap, 0);
2286 static unsigned int ata_bus_softreset(struct ata_port *ap,
2287 unsigned int devmask)
2289 struct ata_ioports *ioaddr = &ap->ioaddr;
2291 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2293 /* software reset. causes dev0 to be selected */
2294 if (ap->flags & ATA_FLAG_MMIO) {
2295 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2296 udelay(20); /* FIXME: flush */
2297 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2298 udelay(20); /* FIXME: flush */
2299 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2301 outb(ap->ctl, ioaddr->ctl_addr);
2303 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2305 outb(ap->ctl, ioaddr->ctl_addr);
2308 /* spec mandates ">= 2ms" before checking status.
2309 * We wait 150ms, because that was the magic delay used for
2310 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2311 * between when the ATA command register is written, and then
2312 * status is checked. Because waiting for "a while" before
2313 * checking status is fine, post SRST, we perform this magic
2314 * delay here as well.
2316 * Old drivers/ide uses the 2mS rule and then waits for ready
2320 /* Before we perform post reset processing we want to see if
2321 * the bus shows 0xFF because the odd clown forgets the D7
2322 * pulldown resistor.
2324 if (ata_check_status(ap) == 0xFF) {
2325 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2326 return AC_ERR_OTHER;
2329 ata_bus_post_reset(ap, devmask);
2335 * ata_bus_reset - reset host port and associated ATA channel
2336 * @ap: port to reset
2338 * This is typically the first time we actually start issuing
2339 * commands to the ATA channel. We wait for BSY to clear, then
2340 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2341 * result. Determine what devices, if any, are on the channel
2342 * by looking at the device 0/1 error register. Look at the signature
2343 * stored in each device's taskfile registers, to determine if
2344 * the device is ATA or ATAPI.
2347 * PCI/etc. bus probe sem.
2348 * Obtains host_set lock.
2351 * Sets ATA_FLAG_DISABLED if bus reset fails.
2354 void ata_bus_reset(struct ata_port *ap)
2356 struct ata_ioports *ioaddr = &ap->ioaddr;
2357 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2359 unsigned int dev0, dev1 = 0, devmask = 0;
2361 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2363 /* determine if device 0/1 are present */
2364 if (ap->flags & ATA_FLAG_SATA_RESET)
2367 dev0 = ata_devchk(ap, 0);
2369 dev1 = ata_devchk(ap, 1);
2373 devmask |= (1 << 0);
2375 devmask |= (1 << 1);
2377 /* select device 0 again */
2378 ap->ops->dev_select(ap, 0);
2380 /* issue bus reset */
2381 if (ap->flags & ATA_FLAG_SRST)
2382 if (ata_bus_softreset(ap, devmask))
2386 * determine by signature whether we have ATA or ATAPI devices
2388 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2389 if ((slave_possible) && (err != 0x81))
2390 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2392 /* re-enable interrupts */
2393 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2396 /* is double-select really necessary? */
2397 if (ap->device[1].class != ATA_DEV_NONE)
2398 ap->ops->dev_select(ap, 1);
2399 if (ap->device[0].class != ATA_DEV_NONE)
2400 ap->ops->dev_select(ap, 0);
2402 /* if no devices were detected, disable this port */
2403 if ((ap->device[0].class == ATA_DEV_NONE) &&
2404 (ap->device[1].class == ATA_DEV_NONE))
2407 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2408 /* set up device control for ATA_FLAG_SATA_RESET */
2409 if (ap->flags & ATA_FLAG_MMIO)
2410 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2412 outb(ap->ctl, ioaddr->ctl_addr);
2419 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2420 ap->ops->port_disable(ap);
2426 * sata_phy_debounce - debounce SATA phy status
2427 * @ap: ATA port to debounce SATA phy status for
2428 * @params: timing parameters { interval, duratinon, timeout } in msec
2430 * Make sure SStatus of @ap reaches stable state, determined by
2431 * holding the same value where DET is not 1 for @duration polled
2432 * every @interval, before @timeout. Timeout constraints the
2433 * beginning of the stable state. Because, after hot unplugging,
2434 * DET gets stuck at 1 on some controllers, this functions waits
2435 * until timeout then returns 0 if DET is stable at 1.
2438 * Kernel thread context (may sleep)
2441 * 0 on success, -errno on failure.
2443 int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2445 unsigned long interval_msec = params[0];
2446 unsigned long duration = params[1] * HZ / 1000;
2447 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2448 unsigned long last_jiffies;
2452 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2457 last_jiffies = jiffies;
2460 msleep(interval_msec);
2461 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2467 if (cur == 1 && time_before(jiffies, timeout))
2469 if (time_after(jiffies, last_jiffies + duration))
2474 /* unstable, start over */
2476 last_jiffies = jiffies;
2479 if (time_after(jiffies, timeout))
2485 * sata_phy_resume - resume SATA phy
2486 * @ap: ATA port to resume SATA phy for
2487 * @params: timing parameters { interval, duratinon, timeout } in msec
2489 * Resume SATA phy of @ap and debounce it.
2492 * Kernel thread context (may sleep)
2495 * 0 on success, -errno on failure.
2497 int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2502 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2505 scontrol = (scontrol & 0x0f0) | 0x300;
2507 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2510 /* Some PHYs react badly if SStatus is pounded immediately
2511 * after resuming. Delay 200ms before debouncing.
2515 return sata_phy_debounce(ap, params);
2518 static void ata_wait_spinup(struct ata_port *ap)
2520 struct ata_eh_context *ehc = &ap->eh_context;
2521 unsigned long end, secs;
2524 /* first, debounce phy if SATA */
2525 if (ap->cbl == ATA_CBL_SATA) {
2526 rc = sata_phy_debounce(ap, sata_deb_timing_eh);
2528 /* if debounced successfully and offline, no need to wait */
2529 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2533 /* okay, let's give the drive time to spin up */
2534 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2535 secs = ((end - jiffies) + HZ - 1) / HZ;
2537 if (time_after(jiffies, end))
2541 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2542 "(%lu secs)\n", secs);
2544 schedule_timeout_uninterruptible(end - jiffies);
2548 * ata_std_prereset - prepare for reset
2549 * @ap: ATA port to be reset
2551 * @ap is about to be reset. Initialize it.
2554 * Kernel thread context (may sleep)
2557 * 0 on success, -errno otherwise.
2559 int ata_std_prereset(struct ata_port *ap)
2561 struct ata_eh_context *ehc = &ap->eh_context;
2562 const unsigned long *timing;
2566 if (ehc->i.flags & ATA_EHI_HOTPLUGGED) {
2567 if (ap->flags & ATA_FLAG_HRST_TO_RESUME)
2568 ehc->i.action |= ATA_EH_HARDRESET;
2569 if (ap->flags & ATA_FLAG_SKIP_D2H_BSY)
2570 ata_wait_spinup(ap);
2573 /* if we're about to do hardreset, nothing more to do */
2574 if (ehc->i.action & ATA_EH_HARDRESET)
2577 /* if SATA, resume phy */
2578 if (ap->cbl == ATA_CBL_SATA) {
2579 if (ap->flags & ATA_FLAG_LOADING)
2580 timing = sata_deb_timing_boot;
2582 timing = sata_deb_timing_eh;
2584 rc = sata_phy_resume(ap, timing);
2585 if (rc && rc != -EOPNOTSUPP) {
2586 /* phy resume failed */
2587 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2588 "link for reset (errno=%d)\n", rc);
2593 /* Wait for !BSY if the controller can wait for the first D2H
2594 * Reg FIS and we don't know that no device is attached.
2596 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2597 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2603 * ata_std_softreset - reset host port via ATA SRST
2604 * @ap: port to reset
2605 * @classes: resulting classes of attached devices
2607 * Reset host port using ATA SRST.
2610 * Kernel thread context (may sleep)
2613 * 0 on success, -errno otherwise.
2615 int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2617 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2618 unsigned int devmask = 0, err_mask;
2623 if (ata_port_offline(ap)) {
2624 classes[0] = ATA_DEV_NONE;
2628 /* determine if device 0/1 are present */
2629 if (ata_devchk(ap, 0))
2630 devmask |= (1 << 0);
2631 if (slave_possible && ata_devchk(ap, 1))
2632 devmask |= (1 << 1);
2634 /* select device 0 again */
2635 ap->ops->dev_select(ap, 0);
2637 /* issue bus reset */
2638 DPRINTK("about to softreset, devmask=%x\n", devmask);
2639 err_mask = ata_bus_softreset(ap, devmask);
2641 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2646 /* determine by signature whether we have ATA or ATAPI devices */
2647 classes[0] = ata_dev_try_classify(ap, 0, &err);
2648 if (slave_possible && err != 0x81)
2649 classes[1] = ata_dev_try_classify(ap, 1, &err);
2652 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2657 * sata_std_hardreset - reset host port via SATA phy reset
2658 * @ap: port to reset
2659 * @class: resulting class of attached device
2661 * SATA phy-reset host port using DET bits of SControl register.
2664 * Kernel thread context (may sleep)
2667 * 0 on success, -errno otherwise.
2669 int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2676 if (sata_set_spd_needed(ap)) {
2677 /* SATA spec says nothing about how to reconfigure
2678 * spd. To be on the safe side, turn off phy during
2679 * reconfiguration. This works for at least ICH7 AHCI
2682 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2685 scontrol = (scontrol & 0x0f0) | 0x302;
2687 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2693 /* issue phy wake/reset */
2694 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2697 scontrol = (scontrol & 0x0f0) | 0x301;
2699 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2702 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2703 * 10.4.2 says at least 1 ms.
2707 /* bring phy back */
2708 sata_phy_resume(ap, sata_deb_timing_eh);
2710 /* TODO: phy layer with polling, timeouts, etc. */
2711 if (ata_port_offline(ap)) {
2712 *class = ATA_DEV_NONE;
2713 DPRINTK("EXIT, link offline\n");
2717 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2718 ata_port_printk(ap, KERN_ERR,
2719 "COMRESET failed (device not ready)\n");
2723 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2725 *class = ata_dev_try_classify(ap, 0, NULL);
2727 DPRINTK("EXIT, class=%u\n", *class);
2732 * ata_std_postreset - standard postreset callback
2733 * @ap: the target ata_port
2734 * @classes: classes of attached devices
2736 * This function is invoked after a successful reset. Note that
2737 * the device might have been reset more than once using
2738 * different reset methods before postreset is invoked.
2741 * Kernel thread context (may sleep)
2743 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2749 /* print link status */
2750 sata_print_link_status(ap);
2753 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2754 sata_scr_write(ap, SCR_ERROR, serror);
2756 /* re-enable interrupts */
2757 if (!ap->ops->error_handler) {
2758 /* FIXME: hack. create a hook instead */
2759 if (ap->ioaddr.ctl_addr)
2763 /* is double-select really necessary? */
2764 if (classes[0] != ATA_DEV_NONE)
2765 ap->ops->dev_select(ap, 1);
2766 if (classes[1] != ATA_DEV_NONE)
2767 ap->ops->dev_select(ap, 0);
2769 /* bail out if no device is present */
2770 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2771 DPRINTK("EXIT, no device\n");
2775 /* set up device control */
2776 if (ap->ioaddr.ctl_addr) {
2777 if (ap->flags & ATA_FLAG_MMIO)
2778 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2780 outb(ap->ctl, ap->ioaddr.ctl_addr);
2787 * ata_dev_same_device - Determine whether new ID matches configured device
2788 * @dev: device to compare against
2789 * @new_class: class of the new device
2790 * @new_id: IDENTIFY page of the new device
2792 * Compare @new_class and @new_id against @dev and determine
2793 * whether @dev is the device indicated by @new_class and
2800 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2802 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2805 const u16 *old_id = dev->id;
2806 unsigned char model[2][41], serial[2][21];
2809 if (dev->class != new_class) {
2810 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2811 dev->class, new_class);
2815 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2816 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2817 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2818 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2819 new_n_sectors = ata_id_n_sectors(new_id);
2821 if (strcmp(model[0], model[1])) {
2822 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2823 "'%s' != '%s'\n", model[0], model[1]);
2827 if (strcmp(serial[0], serial[1])) {
2828 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2829 "'%s' != '%s'\n", serial[0], serial[1]);
2833 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2834 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2836 (unsigned long long)dev->n_sectors,
2837 (unsigned long long)new_n_sectors);
2845 * ata_dev_revalidate - Revalidate ATA device
2846 * @dev: device to revalidate
2847 * @post_reset: is this revalidation after reset?
2849 * Re-read IDENTIFY page and make sure @dev is still attached to
2853 * Kernel thread context (may sleep)
2856 * 0 on success, negative errno otherwise
2858 int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2860 unsigned int class = dev->class;
2861 u16 *id = (void *)dev->ap->sector_buf;
2864 if (!ata_dev_enabled(dev)) {
2870 rc = ata_dev_read_id(dev, &class, post_reset, id);
2874 /* is the device still there? */
2875 if (!ata_dev_same_device(dev, class, id)) {
2880 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
2882 /* configure device according to the new ID */
2883 rc = ata_dev_configure(dev, 0);
2888 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
2892 static const char * const ata_dma_blacklist [] = {
2893 "WDC AC11000H", NULL,
2894 "WDC AC22100H", NULL,
2895 "WDC AC32500H", NULL,
2896 "WDC AC33100H", NULL,
2897 "WDC AC31600H", NULL,
2898 "WDC AC32100H", "24.09P07",
2899 "WDC AC23200L", "21.10N21",
2900 "Compaq CRD-8241B", NULL,
2905 "SanDisk SDP3B", NULL,
2906 "SanDisk SDP3B-64", NULL,
2907 "SANYO CD-ROM CRD", NULL,
2908 "HITACHI CDR-8", NULL,
2909 "HITACHI CDR-8335", NULL,
2910 "HITACHI CDR-8435", NULL,
2911 "Toshiba CD-ROM XM-6202B", NULL,
2912 "TOSHIBA CD-ROM XM-1702BC", NULL,
2914 "E-IDE CD-ROM CR-840", NULL,
2915 "CD-ROM Drive/F5A", NULL,
2916 "WPI CDD-820", NULL,
2917 "SAMSUNG CD-ROM SC-148C", NULL,
2918 "SAMSUNG CD-ROM SC", NULL,
2919 "SanDisk SDP3B-64", NULL,
2920 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2921 "_NEC DV5800A", NULL,
2922 "SAMSUNG CD-ROM SN-124", "N001"
2925 static int ata_strim(char *s, size_t len)
2927 len = strnlen(s, len);
2929 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2930 while ((len > 0) && (s[len - 1] == ' ')) {
2937 static int ata_dma_blacklisted(const struct ata_device *dev)
2939 unsigned char model_num[40];
2940 unsigned char model_rev[16];
2941 unsigned int nlen, rlen;
2944 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2946 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2948 nlen = ata_strim(model_num, sizeof(model_num));
2949 rlen = ata_strim(model_rev, sizeof(model_rev));
2951 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2952 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2953 if (ata_dma_blacklist[i+1] == NULL)
2955 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2963 * ata_dev_xfermask - Compute supported xfermask of the given device
2964 * @dev: Device to compute xfermask for
2966 * Compute supported xfermask of @dev and store it in
2967 * dev->*_mask. This function is responsible for applying all
2968 * known limits including host controller limits, device
2971 * FIXME: The current implementation limits all transfer modes to
2972 * the fastest of the lowested device on the port. This is not
2973 * required on most controllers.
2978 static void ata_dev_xfermask(struct ata_device *dev)
2980 struct ata_port *ap = dev->ap;
2981 struct ata_host_set *hs = ap->host_set;
2982 unsigned long xfer_mask;
2985 xfer_mask = ata_pack_xfermask(ap->pio_mask,
2986 ap->mwdma_mask, ap->udma_mask);
2988 /* Apply cable rule here. Don't apply it early because when
2989 * we handle hot plug the cable type can itself change.
2991 if (ap->cbl == ATA_CBL_PATA40)
2992 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2994 /* FIXME: Use port-wide xfermask for now */
2995 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2996 struct ata_device *d = &ap->device[i];
2998 if (ata_dev_absent(d))
3001 if (ata_dev_disabled(d)) {
3002 /* to avoid violating device selection timing */
3003 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3004 UINT_MAX, UINT_MAX);
3008 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3009 d->mwdma_mask, d->udma_mask);
3010 xfer_mask &= ata_id_xfermask(d->id);
3011 if (ata_dma_blacklisted(d))
3012 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3015 if (ata_dma_blacklisted(dev))
3016 ata_dev_printk(dev, KERN_WARNING,
3017 "device is on DMA blacklist, disabling DMA\n");
3019 if (hs->flags & ATA_HOST_SIMPLEX) {
3020 if (hs->simplex_claimed)
3021 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3024 if (ap->ops->mode_filter)
3025 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3027 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3028 &dev->mwdma_mask, &dev->udma_mask);
3032 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3033 * @dev: Device to which command will be sent
3035 * Issue SET FEATURES - XFER MODE command to device @dev
3039 * PCI/etc. bus probe sem.
3042 * 0 on success, AC_ERR_* mask otherwise.
3045 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3047 struct ata_taskfile tf;
3048 unsigned int err_mask;
3050 /* set up set-features taskfile */
3051 DPRINTK("set features - xfer mode\n");
3053 ata_tf_init(dev, &tf);
3054 tf.command = ATA_CMD_SET_FEATURES;
3055 tf.feature = SETFEATURES_XFER;
3056 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3057 tf.protocol = ATA_PROT_NODATA;
3058 tf.nsect = dev->xfer_mode;
3060 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3062 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3067 * ata_dev_init_params - Issue INIT DEV PARAMS command
3068 * @dev: Device to which command will be sent
3069 * @heads: Number of heads (taskfile parameter)
3070 * @sectors: Number of sectors (taskfile parameter)
3073 * Kernel thread context (may sleep)
3076 * 0 on success, AC_ERR_* mask otherwise.
3078 static unsigned int ata_dev_init_params(struct ata_device *dev,
3079 u16 heads, u16 sectors)
3081 struct ata_taskfile tf;
3082 unsigned int err_mask;
3084 /* Number of sectors per track 1-255. Number of heads 1-16 */
3085 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3086 return AC_ERR_INVALID;
3088 /* set up init dev params taskfile */
3089 DPRINTK("init dev params \n");
3091 ata_tf_init(dev, &tf);
3092 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3093 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3094 tf.protocol = ATA_PROT_NODATA;
3096 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3098 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3100 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3105 * ata_sg_clean - Unmap DMA memory associated with command
3106 * @qc: Command containing DMA memory to be released
3108 * Unmap all mapped DMA memory associated with this command.
3111 * spin_lock_irqsave(host_set lock)
3114 static void ata_sg_clean(struct ata_queued_cmd *qc)
3116 struct ata_port *ap = qc->ap;
3117 struct scatterlist *sg = qc->__sg;
3118 int dir = qc->dma_dir;
3119 void *pad_buf = NULL;
3121 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3122 WARN_ON(sg == NULL);
3124 if (qc->flags & ATA_QCFLAG_SINGLE)
3125 WARN_ON(qc->n_elem > 1);
3127 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3129 /* if we padded the buffer out to 32-bit bound, and data
3130 * xfer direction is from-device, we must copy from the
3131 * pad buffer back into the supplied buffer
3133 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3134 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3136 if (qc->flags & ATA_QCFLAG_SG) {
3138 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3139 /* restore last sg */
3140 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3142 struct scatterlist *psg = &qc->pad_sgent;
3143 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3144 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3145 kunmap_atomic(addr, KM_IRQ0);
3149 dma_unmap_single(ap->dev,
3150 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3153 sg->length += qc->pad_len;
3155 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3156 pad_buf, qc->pad_len);
3159 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3164 * ata_fill_sg - Fill PCI IDE PRD table
3165 * @qc: Metadata associated with taskfile to be transferred
3167 * Fill PCI IDE PRD (scatter-gather) table with segments
3168 * associated with the current disk command.
3171 * spin_lock_irqsave(host_set lock)
3174 static void ata_fill_sg(struct ata_queued_cmd *qc)
3176 struct ata_port *ap = qc->ap;
3177 struct scatterlist *sg;
3180 WARN_ON(qc->__sg == NULL);
3181 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3184 ata_for_each_sg(sg, qc) {
3188 /* determine if physical DMA addr spans 64K boundary.
3189 * Note h/w doesn't support 64-bit, so we unconditionally
3190 * truncate dma_addr_t to u32.
3192 addr = (u32) sg_dma_address(sg);
3193 sg_len = sg_dma_len(sg);
3196 offset = addr & 0xffff;
3198 if ((offset + sg_len) > 0x10000)
3199 len = 0x10000 - offset;
3201 ap->prd[idx].addr = cpu_to_le32(addr);
3202 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3203 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3212 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3215 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3216 * @qc: Metadata associated with taskfile to check
3218 * Allow low-level driver to filter ATA PACKET commands, returning
3219 * a status indicating whether or not it is OK to use DMA for the
3220 * supplied PACKET command.
3223 * spin_lock_irqsave(host_set lock)
3225 * RETURNS: 0 when ATAPI DMA can be used
3228 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3230 struct ata_port *ap = qc->ap;
3231 int rc = 0; /* Assume ATAPI DMA is OK by default */
3233 if (ap->ops->check_atapi_dma)
3234 rc = ap->ops->check_atapi_dma(qc);
3236 /* We don't support polling DMA.
3237 * Use PIO if the LLDD handles only interrupts in
3238 * the HSM_ST_LAST state and the ATAPI device
3239 * generates CDB interrupts.
3241 if ((ap->flags & ATA_FLAG_PIO_POLLING) &&
3242 (qc->dev->flags & ATA_DFLAG_CDB_INTR))
3248 * ata_qc_prep - Prepare taskfile for submission
3249 * @qc: Metadata associated with taskfile to be prepared
3251 * Prepare ATA taskfile for submission.
3254 * spin_lock_irqsave(host_set lock)
3256 void ata_qc_prep(struct ata_queued_cmd *qc)
3258 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3264 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3267 * ata_sg_init_one - Associate command with memory buffer
3268 * @qc: Command to be associated
3269 * @buf: Memory buffer
3270 * @buflen: Length of memory buffer, in bytes.
3272 * Initialize the data-related elements of queued_cmd @qc
3273 * to point to a single memory buffer, @buf of byte length @buflen.
3276 * spin_lock_irqsave(host_set lock)
3279 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3281 struct scatterlist *sg;
3283 qc->flags |= ATA_QCFLAG_SINGLE;
3285 memset(&qc->sgent, 0, sizeof(qc->sgent));
3286 qc->__sg = &qc->sgent;
3288 qc->orig_n_elem = 1;
3292 sg_init_one(sg, buf, buflen);
3296 * ata_sg_init - Associate command with scatter-gather table.
3297 * @qc: Command to be associated
3298 * @sg: Scatter-gather table.
3299 * @n_elem: Number of elements in s/g table.
3301 * Initialize the data-related elements of queued_cmd @qc
3302 * to point to a scatter-gather table @sg, containing @n_elem
3306 * spin_lock_irqsave(host_set lock)
3309 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3310 unsigned int n_elem)
3312 qc->flags |= ATA_QCFLAG_SG;
3314 qc->n_elem = n_elem;
3315 qc->orig_n_elem = n_elem;
3319 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3320 * @qc: Command with memory buffer to be mapped.
3322 * DMA-map the memory buffer associated with queued_cmd @qc.
3325 * spin_lock_irqsave(host_set lock)
3328 * Zero on success, negative on error.
3331 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3333 struct ata_port *ap = qc->ap;
3334 int dir = qc->dma_dir;
3335 struct scatterlist *sg = qc->__sg;
3336 dma_addr_t dma_address;
3339 /* we must lengthen transfers to end on a 32-bit boundary */
3340 qc->pad_len = sg->length & 3;
3342 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3343 struct scatterlist *psg = &qc->pad_sgent;
3345 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3347 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3349 if (qc->tf.flags & ATA_TFLAG_WRITE)
3350 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3353 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3354 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3356 sg->length -= qc->pad_len;
3357 if (sg->length == 0)
3360 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3361 sg->length, qc->pad_len);
3369 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3371 if (dma_mapping_error(dma_address)) {
3373 sg->length += qc->pad_len;
3377 sg_dma_address(sg) = dma_address;
3378 sg_dma_len(sg) = sg->length;
3381 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3382 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3388 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3389 * @qc: Command with scatter-gather table to be mapped.
3391 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3394 * spin_lock_irqsave(host_set lock)
3397 * Zero on success, negative on error.
3401 static int ata_sg_setup(struct ata_queued_cmd *qc)
3403 struct ata_port *ap = qc->ap;
3404 struct scatterlist *sg = qc->__sg;
3405 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3406 int n_elem, pre_n_elem, dir, trim_sg = 0;
3408 VPRINTK("ENTER, ata%u\n", ap->id);
3409 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3411 /* we must lengthen transfers to end on a 32-bit boundary */
3412 qc->pad_len = lsg->length & 3;
3414 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3415 struct scatterlist *psg = &qc->pad_sgent;
3416 unsigned int offset;
3418 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3420 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3423 * psg->page/offset are used to copy to-be-written
3424 * data in this function or read data in ata_sg_clean.
3426 offset = lsg->offset + lsg->length - qc->pad_len;
3427 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3428 psg->offset = offset_in_page(offset);
3430 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3431 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3432 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3433 kunmap_atomic(addr, KM_IRQ0);
3436 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3437 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3439 lsg->length -= qc->pad_len;
3440 if (lsg->length == 0)
3443 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3444 qc->n_elem - 1, lsg->length, qc->pad_len);
3447 pre_n_elem = qc->n_elem;
3448 if (trim_sg && pre_n_elem)
3457 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3459 /* restore last sg */
3460 lsg->length += qc->pad_len;
3464 DPRINTK("%d sg elements mapped\n", n_elem);
3467 qc->n_elem = n_elem;
3473 * swap_buf_le16 - swap halves of 16-bit words in place
3474 * @buf: Buffer to swap
3475 * @buf_words: Number of 16-bit words in buffer.
3477 * Swap halves of 16-bit words if needed to convert from
3478 * little-endian byte order to native cpu byte order, or
3482 * Inherited from caller.
3484 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3489 for (i = 0; i < buf_words; i++)
3490 buf[i] = le16_to_cpu(buf[i]);
3491 #endif /* __BIG_ENDIAN */
3495 * ata_mmio_data_xfer - Transfer data by MMIO
3496 * @dev: device for this I/O
3498 * @buflen: buffer length
3499 * @write_data: read/write
3501 * Transfer data from/to the device data register by MMIO.
3504 * Inherited from caller.
3507 void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3508 unsigned int buflen, int write_data)
3510 struct ata_port *ap = adev->ap;
3512 unsigned int words = buflen >> 1;
3513 u16 *buf16 = (u16 *) buf;
3514 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3516 /* Transfer multiple of 2 bytes */
3518 for (i = 0; i < words; i++)
3519 writew(le16_to_cpu(buf16[i]), mmio);
3521 for (i = 0; i < words; i++)
3522 buf16[i] = cpu_to_le16(readw(mmio));
3525 /* Transfer trailing 1 byte, if any. */
3526 if (unlikely(buflen & 0x01)) {
3527 u16 align_buf[1] = { 0 };
3528 unsigned char *trailing_buf = buf + buflen - 1;
3531 memcpy(align_buf, trailing_buf, 1);
3532 writew(le16_to_cpu(align_buf[0]), mmio);
3534 align_buf[0] = cpu_to_le16(readw(mmio));
3535 memcpy(trailing_buf, align_buf, 1);
3541 * ata_pio_data_xfer - Transfer data by PIO
3542 * @adev: device to target
3544 * @buflen: buffer length
3545 * @write_data: read/write
3547 * Transfer data from/to the device data register by PIO.
3550 * Inherited from caller.
3553 void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3554 unsigned int buflen, int write_data)
3556 struct ata_port *ap = adev->ap;
3557 unsigned int words = buflen >> 1;
3559 /* Transfer multiple of 2 bytes */
3561 outsw(ap->ioaddr.data_addr, buf, words);
3563 insw(ap->ioaddr.data_addr, buf, words);
3565 /* Transfer trailing 1 byte, if any. */
3566 if (unlikely(buflen & 0x01)) {
3567 u16 align_buf[1] = { 0 };
3568 unsigned char *trailing_buf = buf + buflen - 1;
3571 memcpy(align_buf, trailing_buf, 1);
3572 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3574 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3575 memcpy(trailing_buf, align_buf, 1);
3581 * ata_pio_data_xfer_noirq - Transfer data by PIO
3582 * @adev: device to target
3584 * @buflen: buffer length
3585 * @write_data: read/write
3587 * Transfer data from/to the device data register by PIO. Do the
3588 * transfer with interrupts disabled.
3591 * Inherited from caller.
3594 void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3595 unsigned int buflen, int write_data)
3597 unsigned long flags;
3598 local_irq_save(flags);
3599 ata_pio_data_xfer(adev, buf, buflen, write_data);
3600 local_irq_restore(flags);
3605 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3606 * @qc: Command on going
3608 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3611 * Inherited from caller.
3614 static void ata_pio_sector(struct ata_queued_cmd *qc)
3616 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3617 struct scatterlist *sg = qc->__sg;
3618 struct ata_port *ap = qc->ap;
3620 unsigned int offset;
3623 if (qc->cursect == (qc->nsect - 1))
3624 ap->hsm_task_state = HSM_ST_LAST;
3626 page = sg[qc->cursg].page;
3627 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3629 /* get the current page and offset */
3630 page = nth_page(page, (offset >> PAGE_SHIFT));
3631 offset %= PAGE_SIZE;
3633 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3635 if (PageHighMem(page)) {
3636 unsigned long flags;
3638 /* FIXME: use a bounce buffer */
3639 local_irq_save(flags);
3640 buf = kmap_atomic(page, KM_IRQ0);
3642 /* do the actual data transfer */
3643 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3645 kunmap_atomic(buf, KM_IRQ0);
3646 local_irq_restore(flags);
3648 buf = page_address(page);
3649 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3655 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3662 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3663 * @qc: Command on going
3665 * Transfer one or many ATA_SECT_SIZE of data from/to the
3666 * ATA device for the DRQ request.
3669 * Inherited from caller.
3672 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3674 if (is_multi_taskfile(&qc->tf)) {
3675 /* READ/WRITE MULTIPLE */
3678 WARN_ON(qc->dev->multi_count == 0);
3680 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3688 * atapi_send_cdb - Write CDB bytes to hardware
3689 * @ap: Port to which ATAPI device is attached.
3690 * @qc: Taskfile currently active
3692 * When device has indicated its readiness to accept
3693 * a CDB, this function is called. Send the CDB.
3699 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3702 DPRINTK("send cdb\n");
3703 WARN_ON(qc->dev->cdb_len < 12);
3705 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3706 ata_altstatus(ap); /* flush */
3708 switch (qc->tf.protocol) {
3709 case ATA_PROT_ATAPI:
3710 ap->hsm_task_state = HSM_ST;
3712 case ATA_PROT_ATAPI_NODATA:
3713 ap->hsm_task_state = HSM_ST_LAST;
3715 case ATA_PROT_ATAPI_DMA:
3716 ap->hsm_task_state = HSM_ST_LAST;
3717 /* initiate bmdma */
3718 ap->ops->bmdma_start(qc);
3724 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3725 * @qc: Command on going
3726 * @bytes: number of bytes
3728 * Transfer Transfer data from/to the ATAPI device.
3731 * Inherited from caller.
3735 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3737 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3738 struct scatterlist *sg = qc->__sg;
3739 struct ata_port *ap = qc->ap;
3742 unsigned int offset, count;
3744 if (qc->curbytes + bytes >= qc->nbytes)
3745 ap->hsm_task_state = HSM_ST_LAST;
3748 if (unlikely(qc->cursg >= qc->n_elem)) {
3750 * The end of qc->sg is reached and the device expects
3751 * more data to transfer. In order not to overrun qc->sg
3752 * and fulfill length specified in the byte count register,
3753 * - for read case, discard trailing data from the device
3754 * - for write case, padding zero data to the device
3756 u16 pad_buf[1] = { 0 };
3757 unsigned int words = bytes >> 1;
3760 if (words) /* warning if bytes > 1 */
3761 ata_dev_printk(qc->dev, KERN_WARNING,
3762 "%u bytes trailing data\n", bytes);
3764 for (i = 0; i < words; i++)
3765 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
3767 ap->hsm_task_state = HSM_ST_LAST;
3771 sg = &qc->__sg[qc->cursg];
3774 offset = sg->offset + qc->cursg_ofs;
3776 /* get the current page and offset */
3777 page = nth_page(page, (offset >> PAGE_SHIFT));
3778 offset %= PAGE_SIZE;
3780 /* don't overrun current sg */
3781 count = min(sg->length - qc->cursg_ofs, bytes);
3783 /* don't cross page boundaries */
3784 count = min(count, (unsigned int)PAGE_SIZE - offset);
3786 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3788 if (PageHighMem(page)) {
3789 unsigned long flags;
3791 /* FIXME: use bounce buffer */
3792 local_irq_save(flags);
3793 buf = kmap_atomic(page, KM_IRQ0);
3795 /* do the actual data transfer */
3796 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3798 kunmap_atomic(buf, KM_IRQ0);
3799 local_irq_restore(flags);
3801 buf = page_address(page);
3802 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3806 qc->curbytes += count;
3807 qc->cursg_ofs += count;
3809 if (qc->cursg_ofs == sg->length) {
3819 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3820 * @qc: Command on going
3822 * Transfer Transfer data from/to the ATAPI device.
3825 * Inherited from caller.
3828 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3830 struct ata_port *ap = qc->ap;
3831 struct ata_device *dev = qc->dev;
3832 unsigned int ireason, bc_lo, bc_hi, bytes;
3833 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3835 /* Abuse qc->result_tf for temp storage of intermediate TF
3836 * here to save some kernel stack usage.
3837 * For normal completion, qc->result_tf is not relevant. For
3838 * error, qc->result_tf is later overwritten by ata_qc_complete().
3839 * So, the correctness of qc->result_tf is not affected.
3841 ap->ops->tf_read(ap, &qc->result_tf);
3842 ireason = qc->result_tf.nsect;
3843 bc_lo = qc->result_tf.lbam;
3844 bc_hi = qc->result_tf.lbah;
3845 bytes = (bc_hi << 8) | bc_lo;
3847 /* shall be cleared to zero, indicating xfer of data */
3848 if (ireason & (1 << 0))
3851 /* make sure transfer direction matches expected */
3852 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3853 if (do_write != i_write)
3856 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3858 __atapi_pio_bytes(qc, bytes);
3863 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3864 qc->err_mask |= AC_ERR_HSM;
3865 ap->hsm_task_state = HSM_ST_ERR;
3869 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3870 * @ap: the target ata_port
3874 * 1 if ok in workqueue, 0 otherwise.
3877 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3879 if (qc->tf.flags & ATA_TFLAG_POLLING)
3882 if (ap->hsm_task_state == HSM_ST_FIRST) {
3883 if (qc->tf.protocol == ATA_PROT_PIO &&
3884 (qc->tf.flags & ATA_TFLAG_WRITE))
3887 if (is_atapi_taskfile(&qc->tf) &&
3888 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3896 * ata_hsm_qc_complete - finish a qc running on standard HSM
3897 * @qc: Command to complete
3898 * @in_wq: 1 if called from workqueue, 0 otherwise
3900 * Finish @qc which is running on standard HSM.
3903 * If @in_wq is zero, spin_lock_irqsave(host_set lock).
3904 * Otherwise, none on entry and grabs host lock.
3906 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3908 struct ata_port *ap = qc->ap;
3909 unsigned long flags;
3911 if (ap->ops->error_handler) {
3913 spin_lock_irqsave(&ap->host_set->lock, flags);
3915 /* EH might have kicked in while host_set lock
3918 qc = ata_qc_from_tag(ap, qc->tag);
3920 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
3922 ata_qc_complete(qc);
3924 ata_port_freeze(ap);
3927 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3929 if (likely(!(qc->err_mask & AC_ERR_HSM)))
3930 ata_qc_complete(qc);
3932 ata_port_freeze(ap);
3936 spin_lock_irqsave(&ap->host_set->lock, flags);
3938 ata_qc_complete(qc);
3939 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3941 ata_qc_complete(qc);
3944 ata_altstatus(ap); /* flush */
3948 * ata_hsm_move - move the HSM to the next state.
3949 * @ap: the target ata_port
3951 * @status: current device status
3952 * @in_wq: 1 if called from workqueue, 0 otherwise
3955 * 1 when poll next status needed, 0 otherwise.
3957 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
3958 u8 status, int in_wq)
3960 unsigned long flags = 0;
3963 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
3965 /* Make sure ata_qc_issue_prot() does not throw things
3966 * like DMA polling into the workqueue. Notice that
3967 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
3969 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
3972 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
3973 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
3975 switch (ap->hsm_task_state) {
3977 /* Send first data block or PACKET CDB */
3979 /* If polling, we will stay in the work queue after
3980 * sending the data. Otherwise, interrupt handler
3981 * takes over after sending the data.
3983 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3985 /* check device status */
3986 if (unlikely((status & ATA_DRQ) == 0)) {
3987 /* handle BSY=0, DRQ=0 as error */
3988 if (likely(status & (ATA_ERR | ATA_DF)))
3989 /* device stops HSM for abort/error */
3990 qc->err_mask |= AC_ERR_DEV;
3992 /* HSM violation. Let EH handle this */
3993 qc->err_mask |= AC_ERR_HSM;
3995 ap->hsm_task_state = HSM_ST_ERR;
3999 /* Device should not ask for data transfer (DRQ=1)
4000 * when it finds something wrong.
4001 * We ignore DRQ here and stop the HSM by
4002 * changing hsm_task_state to HSM_ST_ERR and
4003 * let the EH abort the command or reset the device.
4005 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4006 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4008 qc->err_mask |= AC_ERR_HSM;
4009 ap->hsm_task_state = HSM_ST_ERR;
4013 /* Send the CDB (atapi) or the first data block (ata pio out).
4014 * During the state transition, interrupt handler shouldn't
4015 * be invoked before the data transfer is complete and
4016 * hsm_task_state is changed. Hence, the following locking.
4019 spin_lock_irqsave(&ap->host_set->lock, flags);
4021 if (qc->tf.protocol == ATA_PROT_PIO) {
4022 /* PIO data out protocol.
4023 * send first data block.
4026 /* ata_pio_sectors() might change the state
4027 * to HSM_ST_LAST. so, the state is changed here
4028 * before ata_pio_sectors().
4030 ap->hsm_task_state = HSM_ST;
4031 ata_pio_sectors(qc);
4032 ata_altstatus(ap); /* flush */
4035 atapi_send_cdb(ap, qc);
4038 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4040 /* if polling, ata_pio_task() handles the rest.
4041 * otherwise, interrupt handler takes over from here.
4046 /* complete command or read/write the data register */
4047 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4048 /* ATAPI PIO protocol */
4049 if ((status & ATA_DRQ) == 0) {
4050 /* No more data to transfer or device error.
4051 * Device error will be tagged in HSM_ST_LAST.
4053 ap->hsm_task_state = HSM_ST_LAST;
4057 /* Device should not ask for data transfer (DRQ=1)
4058 * when it finds something wrong.
4059 * We ignore DRQ here and stop the HSM by
4060 * changing hsm_task_state to HSM_ST_ERR and
4061 * let the EH abort the command or reset the device.
4063 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4064 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4066 qc->err_mask |= AC_ERR_HSM;
4067 ap->hsm_task_state = HSM_ST_ERR;
4071 atapi_pio_bytes(qc);
4073 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4074 /* bad ireason reported by device */
4078 /* ATA PIO protocol */
4079 if (unlikely((status & ATA_DRQ) == 0)) {
4080 /* handle BSY=0, DRQ=0 as error */
4081 if (likely(status & (ATA_ERR | ATA_DF)))
4082 /* device stops HSM for abort/error */
4083 qc->err_mask |= AC_ERR_DEV;
4085 /* HSM violation. Let EH handle this */
4086 qc->err_mask |= AC_ERR_HSM;
4088 ap->hsm_task_state = HSM_ST_ERR;
4092 /* For PIO reads, some devices may ask for
4093 * data transfer (DRQ=1) alone with ERR=1.
4094 * We respect DRQ here and transfer one
4095 * block of junk data before changing the
4096 * hsm_task_state to HSM_ST_ERR.
4098 * For PIO writes, ERR=1 DRQ=1 doesn't make
4099 * sense since the data block has been
4100 * transferred to the device.
4102 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4103 /* data might be corrputed */
4104 qc->err_mask |= AC_ERR_DEV;
4106 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4107 ata_pio_sectors(qc);
4109 status = ata_wait_idle(ap);
4112 if (status & (ATA_BUSY | ATA_DRQ))
4113 qc->err_mask |= AC_ERR_HSM;
4115 /* ata_pio_sectors() might change the
4116 * state to HSM_ST_LAST. so, the state
4117 * is changed after ata_pio_sectors().
4119 ap->hsm_task_state = HSM_ST_ERR;
4123 ata_pio_sectors(qc);
4125 if (ap->hsm_task_state == HSM_ST_LAST &&
4126 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4129 status = ata_wait_idle(ap);
4134 ata_altstatus(ap); /* flush */
4139 if (unlikely(!ata_ok(status))) {
4140 qc->err_mask |= __ac_err_mask(status);
4141 ap->hsm_task_state = HSM_ST_ERR;
4145 /* no more data to transfer */
4146 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4147 ap->id, qc->dev->devno, status);
4149 WARN_ON(qc->err_mask);
4151 ap->hsm_task_state = HSM_ST_IDLE;
4153 /* complete taskfile transaction */
4154 ata_hsm_qc_complete(qc, in_wq);
4160 /* make sure qc->err_mask is available to
4161 * know what's wrong and recover
4163 WARN_ON(qc->err_mask == 0);
4165 ap->hsm_task_state = HSM_ST_IDLE;
4167 /* complete taskfile transaction */
4168 ata_hsm_qc_complete(qc, in_wq);
4180 static void ata_pio_task(void *_data)
4182 struct ata_queued_cmd *qc = _data;
4183 struct ata_port *ap = qc->ap;
4188 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4191 * This is purely heuristic. This is a fast path.
4192 * Sometimes when we enter, BSY will be cleared in
4193 * a chk-status or two. If not, the drive is probably seeking
4194 * or something. Snooze for a couple msecs, then
4195 * chk-status again. If still busy, queue delayed work.
4197 status = ata_busy_wait(ap, ATA_BUSY, 5);
4198 if (status & ATA_BUSY) {
4200 status = ata_busy_wait(ap, ATA_BUSY, 10);
4201 if (status & ATA_BUSY) {
4202 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4208 poll_next = ata_hsm_move(ap, qc, status, 1);
4210 /* another command or interrupt handler
4211 * may be running at this point.
4218 * ata_qc_new - Request an available ATA command, for queueing
4219 * @ap: Port associated with device @dev
4220 * @dev: Device from whom we request an available command structure
4226 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4228 struct ata_queued_cmd *qc = NULL;
4231 /* no command while frozen */
4232 if (unlikely(ap->flags & ATA_FLAG_FROZEN))
4235 /* the last tag is reserved for internal command. */
4236 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4237 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4238 qc = __ata_qc_from_tag(ap, i);
4249 * ata_qc_new_init - Request an available ATA command, and initialize it
4250 * @dev: Device from whom we request an available command structure
4256 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4258 struct ata_port *ap = dev->ap;
4259 struct ata_queued_cmd *qc;
4261 qc = ata_qc_new(ap);
4274 * ata_qc_free - free unused ata_queued_cmd
4275 * @qc: Command to complete
4277 * Designed to free unused ata_queued_cmd object
4278 * in case something prevents using it.
4281 * spin_lock_irqsave(host_set lock)
4283 void ata_qc_free(struct ata_queued_cmd *qc)
4285 struct ata_port *ap = qc->ap;
4288 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4292 if (likely(ata_tag_valid(tag))) {
4293 qc->tag = ATA_TAG_POISON;
4294 clear_bit(tag, &ap->qc_allocated);
4298 void __ata_qc_complete(struct ata_queued_cmd *qc)
4300 struct ata_port *ap = qc->ap;
4302 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4303 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4305 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4308 /* command should be marked inactive atomically with qc completion */
4309 if (qc->tf.protocol == ATA_PROT_NCQ)
4310 ap->sactive &= ~(1 << qc->tag);
4312 ap->active_tag = ATA_TAG_POISON;
4314 /* atapi: mark qc as inactive to prevent the interrupt handler
4315 * from completing the command twice later, before the error handler
4316 * is called. (when rc != 0 and atapi request sense is needed)
4318 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4319 ap->qc_active &= ~(1 << qc->tag);
4321 /* call completion callback */
4322 qc->complete_fn(qc);
4326 * ata_qc_complete - Complete an active ATA command
4327 * @qc: Command to complete
4328 * @err_mask: ATA Status register contents
4330 * Indicate to the mid and upper layers that an ATA
4331 * command has completed, with either an ok or not-ok status.
4334 * spin_lock_irqsave(host_set lock)
4336 void ata_qc_complete(struct ata_queued_cmd *qc)
4338 struct ata_port *ap = qc->ap;
4340 /* XXX: New EH and old EH use different mechanisms to
4341 * synchronize EH with regular execution path.
4343 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4344 * Normal execution path is responsible for not accessing a
4345 * failed qc. libata core enforces the rule by returning NULL
4346 * from ata_qc_from_tag() for failed qcs.
4348 * Old EH depends on ata_qc_complete() nullifying completion
4349 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4350 * not synchronize with interrupt handler. Only PIO task is
4353 if (ap->ops->error_handler) {
4354 WARN_ON(ap->flags & ATA_FLAG_FROZEN);
4356 if (unlikely(qc->err_mask))
4357 qc->flags |= ATA_QCFLAG_FAILED;
4359 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4360 if (!ata_tag_internal(qc->tag)) {
4361 /* always fill result TF for failed qc */
4362 ap->ops->tf_read(ap, &qc->result_tf);
4363 ata_qc_schedule_eh(qc);
4368 /* read result TF if requested */
4369 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4370 ap->ops->tf_read(ap, &qc->result_tf);
4372 __ata_qc_complete(qc);
4374 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4377 /* read result TF if failed or requested */
4378 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4379 ap->ops->tf_read(ap, &qc->result_tf);
4381 __ata_qc_complete(qc);
4386 * ata_qc_complete_multiple - Complete multiple qcs successfully
4387 * @ap: port in question
4388 * @qc_active: new qc_active mask
4389 * @finish_qc: LLDD callback invoked before completing a qc
4391 * Complete in-flight commands. This functions is meant to be
4392 * called from low-level driver's interrupt routine to complete
4393 * requests normally. ap->qc_active and @qc_active is compared
4394 * and commands are completed accordingly.
4397 * spin_lock_irqsave(host_set lock)
4400 * Number of completed commands on success, -errno otherwise.
4402 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4403 void (*finish_qc)(struct ata_queued_cmd *))
4409 done_mask = ap->qc_active ^ qc_active;
4411 if (unlikely(done_mask & qc_active)) {
4412 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4413 "(%08x->%08x)\n", ap->qc_active, qc_active);
4417 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4418 struct ata_queued_cmd *qc;
4420 if (!(done_mask & (1 << i)))
4423 if ((qc = ata_qc_from_tag(ap, i))) {
4426 ata_qc_complete(qc);
4434 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4436 struct ata_port *ap = qc->ap;
4438 switch (qc->tf.protocol) {
4441 case ATA_PROT_ATAPI_DMA:
4444 case ATA_PROT_ATAPI:
4446 if (ap->flags & ATA_FLAG_PIO_DMA)
4459 * ata_qc_issue - issue taskfile to device
4460 * @qc: command to issue to device
4462 * Prepare an ATA command to submission to device.
4463 * This includes mapping the data into a DMA-able
4464 * area, filling in the S/G table, and finally
4465 * writing the taskfile to hardware, starting the command.
4468 * spin_lock_irqsave(host_set lock)
4470 void ata_qc_issue(struct ata_queued_cmd *qc)
4472 struct ata_port *ap = qc->ap;
4474 /* Make sure only one non-NCQ command is outstanding. The
4475 * check is skipped for old EH because it reuses active qc to
4476 * request ATAPI sense.
4478 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4480 if (qc->tf.protocol == ATA_PROT_NCQ) {
4481 WARN_ON(ap->sactive & (1 << qc->tag));
4482 ap->sactive |= 1 << qc->tag;
4484 WARN_ON(ap->sactive);
4485 ap->active_tag = qc->tag;
4488 qc->flags |= ATA_QCFLAG_ACTIVE;
4489 ap->qc_active |= 1 << qc->tag;
4491 if (ata_should_dma_map(qc)) {
4492 if (qc->flags & ATA_QCFLAG_SG) {
4493 if (ata_sg_setup(qc))
4495 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4496 if (ata_sg_setup_one(qc))
4500 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4503 ap->ops->qc_prep(qc);
4505 qc->err_mask |= ap->ops->qc_issue(qc);
4506 if (unlikely(qc->err_mask))
4511 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4512 qc->err_mask |= AC_ERR_SYSTEM;
4514 ata_qc_complete(qc);
4518 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4519 * @qc: command to issue to device
4521 * Using various libata functions and hooks, this function
4522 * starts an ATA command. ATA commands are grouped into
4523 * classes called "protocols", and issuing each type of protocol
4524 * is slightly different.
4526 * May be used as the qc_issue() entry in ata_port_operations.
4529 * spin_lock_irqsave(host_set lock)
4532 * Zero on success, AC_ERR_* mask on failure
4535 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4537 struct ata_port *ap = qc->ap;
4539 /* Use polling pio if the LLD doesn't handle
4540 * interrupt driven pio and atapi CDB interrupt.
4542 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4543 switch (qc->tf.protocol) {
4545 case ATA_PROT_ATAPI:
4546 case ATA_PROT_ATAPI_NODATA:
4547 qc->tf.flags |= ATA_TFLAG_POLLING;
4549 case ATA_PROT_ATAPI_DMA:
4550 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4551 /* see ata_check_atapi_dma() */
4559 /* select the device */
4560 ata_dev_select(ap, qc->dev->devno, 1, 0);
4562 /* start the command */
4563 switch (qc->tf.protocol) {
4564 case ATA_PROT_NODATA:
4565 if (qc->tf.flags & ATA_TFLAG_POLLING)
4566 ata_qc_set_polling(qc);
4568 ata_tf_to_host(ap, &qc->tf);
4569 ap->hsm_task_state = HSM_ST_LAST;
4571 if (qc->tf.flags & ATA_TFLAG_POLLING)
4572 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4577 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4579 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4580 ap->ops->bmdma_setup(qc); /* set up bmdma */
4581 ap->ops->bmdma_start(qc); /* initiate bmdma */
4582 ap->hsm_task_state = HSM_ST_LAST;
4586 if (qc->tf.flags & ATA_TFLAG_POLLING)
4587 ata_qc_set_polling(qc);
4589 ata_tf_to_host(ap, &qc->tf);
4591 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4592 /* PIO data out protocol */
4593 ap->hsm_task_state = HSM_ST_FIRST;
4594 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4596 /* always send first data block using
4597 * the ata_pio_task() codepath.
4600 /* PIO data in protocol */
4601 ap->hsm_task_state = HSM_ST;
4603 if (qc->tf.flags & ATA_TFLAG_POLLING)
4604 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4606 /* if polling, ata_pio_task() handles the rest.
4607 * otherwise, interrupt handler takes over from here.
4613 case ATA_PROT_ATAPI:
4614 case ATA_PROT_ATAPI_NODATA:
4615 if (qc->tf.flags & ATA_TFLAG_POLLING)
4616 ata_qc_set_polling(qc);
4618 ata_tf_to_host(ap, &qc->tf);
4620 ap->hsm_task_state = HSM_ST_FIRST;
4622 /* send cdb by polling if no cdb interrupt */
4623 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4624 (qc->tf.flags & ATA_TFLAG_POLLING))
4625 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4628 case ATA_PROT_ATAPI_DMA:
4629 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4631 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4632 ap->ops->bmdma_setup(qc); /* set up bmdma */
4633 ap->hsm_task_state = HSM_ST_FIRST;
4635 /* send cdb by polling if no cdb interrupt */
4636 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4637 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4642 return AC_ERR_SYSTEM;
4649 * ata_host_intr - Handle host interrupt for given (port, task)
4650 * @ap: Port on which interrupt arrived (possibly...)
4651 * @qc: Taskfile currently active in engine
4653 * Handle host interrupt for given queued command. Currently,
4654 * only DMA interrupts are handled. All other commands are
4655 * handled via polling with interrupts disabled (nIEN bit).
4658 * spin_lock_irqsave(host_set lock)
4661 * One if interrupt was handled, zero if not (shared irq).
4664 inline unsigned int ata_host_intr (struct ata_port *ap,
4665 struct ata_queued_cmd *qc)
4667 u8 status, host_stat = 0;
4669 VPRINTK("ata%u: protocol %d task_state %d\n",
4670 ap->id, qc->tf.protocol, ap->hsm_task_state);
4672 /* Check whether we are expecting interrupt in this state */
4673 switch (ap->hsm_task_state) {
4675 /* Some pre-ATAPI-4 devices assert INTRQ
4676 * at this state when ready to receive CDB.
4679 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4680 * The flag was turned on only for atapi devices.
4681 * No need to check is_atapi_taskfile(&qc->tf) again.
4683 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4687 if (qc->tf.protocol == ATA_PROT_DMA ||
4688 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4689 /* check status of DMA engine */
4690 host_stat = ap->ops->bmdma_status(ap);
4691 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4693 /* if it's not our irq... */
4694 if (!(host_stat & ATA_DMA_INTR))
4697 /* before we do anything else, clear DMA-Start bit */
4698 ap->ops->bmdma_stop(qc);
4700 if (unlikely(host_stat & ATA_DMA_ERR)) {
4701 /* error when transfering data to/from memory */
4702 qc->err_mask |= AC_ERR_HOST_BUS;
4703 ap->hsm_task_state = HSM_ST_ERR;
4713 /* check altstatus */
4714 status = ata_altstatus(ap);
4715 if (status & ATA_BUSY)
4718 /* check main status, clearing INTRQ */
4719 status = ata_chk_status(ap);
4720 if (unlikely(status & ATA_BUSY))
4723 /* ack bmdma irq events */
4724 ap->ops->irq_clear(ap);
4726 ata_hsm_move(ap, qc, status, 0);
4727 return 1; /* irq handled */
4730 ap->stats.idle_irq++;
4733 if ((ap->stats.idle_irq % 1000) == 0) {
4734 ata_irq_ack(ap, 0); /* debug trap */
4735 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4739 return 0; /* irq not handled */
4743 * ata_interrupt - Default ATA host interrupt handler
4744 * @irq: irq line (unused)
4745 * @dev_instance: pointer to our ata_host_set information structure
4748 * Default interrupt handler for PCI IDE devices. Calls
4749 * ata_host_intr() for each port that is not disabled.
4752 * Obtains host_set lock during operation.
4755 * IRQ_NONE or IRQ_HANDLED.
4758 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4760 struct ata_host_set *host_set = dev_instance;
4762 unsigned int handled = 0;
4763 unsigned long flags;
4765 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4766 spin_lock_irqsave(&host_set->lock, flags);
4768 for (i = 0; i < host_set->n_ports; i++) {
4769 struct ata_port *ap;
4771 ap = host_set->ports[i];
4773 !(ap->flags & ATA_FLAG_DISABLED)) {
4774 struct ata_queued_cmd *qc;
4776 qc = ata_qc_from_tag(ap, ap->active_tag);
4777 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4778 (qc->flags & ATA_QCFLAG_ACTIVE))
4779 handled |= ata_host_intr(ap, qc);
4783 spin_unlock_irqrestore(&host_set->lock, flags);
4785 return IRQ_RETVAL(handled);
4789 * sata_scr_valid - test whether SCRs are accessible
4790 * @ap: ATA port to test SCR accessibility for
4792 * Test whether SCRs are accessible for @ap.
4798 * 1 if SCRs are accessible, 0 otherwise.
4800 int sata_scr_valid(struct ata_port *ap)
4802 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4806 * sata_scr_read - read SCR register of the specified port
4807 * @ap: ATA port to read SCR for
4809 * @val: Place to store read value
4811 * Read SCR register @reg of @ap into *@val. This function is
4812 * guaranteed to succeed if the cable type of the port is SATA
4813 * and the port implements ->scr_read.
4819 * 0 on success, negative errno on failure.
4821 int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4823 if (sata_scr_valid(ap)) {
4824 *val = ap->ops->scr_read(ap, reg);
4831 * sata_scr_write - write SCR register of the specified port
4832 * @ap: ATA port to write SCR for
4833 * @reg: SCR to write
4834 * @val: value to write
4836 * Write @val to SCR register @reg of @ap. This function is
4837 * guaranteed to succeed if the cable type of the port is SATA
4838 * and the port implements ->scr_read.
4844 * 0 on success, negative errno on failure.
4846 int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4848 if (sata_scr_valid(ap)) {
4849 ap->ops->scr_write(ap, reg, val);
4856 * sata_scr_write_flush - write SCR register of the specified port and flush
4857 * @ap: ATA port to write SCR for
4858 * @reg: SCR to write
4859 * @val: value to write
4861 * This function is identical to sata_scr_write() except that this
4862 * function performs flush after writing to the register.
4868 * 0 on success, negative errno on failure.
4870 int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4872 if (sata_scr_valid(ap)) {
4873 ap->ops->scr_write(ap, reg, val);
4874 ap->ops->scr_read(ap, reg);
4881 * ata_port_online - test whether the given port is online
4882 * @ap: ATA port to test
4884 * Test whether @ap is online. Note that this function returns 0
4885 * if online status of @ap cannot be obtained, so
4886 * ata_port_online(ap) != !ata_port_offline(ap).
4892 * 1 if the port online status is available and online.
4894 int ata_port_online(struct ata_port *ap)
4898 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
4904 * ata_port_offline - test whether the given port is offline
4905 * @ap: ATA port to test
4907 * Test whether @ap is offline. Note that this function returns
4908 * 0 if offline status of @ap cannot be obtained, so
4909 * ata_port_online(ap) != !ata_port_offline(ap).
4915 * 1 if the port offline status is available and offline.
4917 int ata_port_offline(struct ata_port *ap)
4921 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
4927 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4928 * without filling any other registers
4930 static int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
4932 struct ata_taskfile tf;
4935 ata_tf_init(dev, &tf);
4938 tf.flags |= ATA_TFLAG_DEVICE;
4939 tf.protocol = ATA_PROT_NODATA;
4941 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4943 ata_dev_printk(dev, KERN_ERR, "%s: ata command failed: %d\n",
4949 static int ata_flush_cache(struct ata_device *dev)
4953 if (!ata_try_flush_cache(dev))
4956 if (ata_id_has_flush_ext(dev->id))
4957 cmd = ATA_CMD_FLUSH_EXT;
4959 cmd = ATA_CMD_FLUSH;
4961 return ata_do_simple_cmd(dev, cmd);
4964 static int ata_standby_drive(struct ata_device *dev)
4966 return ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
4969 static int ata_start_drive(struct ata_device *dev)
4971 return ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
4975 * ata_device_resume - wakeup a previously suspended devices
4976 * @dev: the device to resume
4978 * Kick the drive back into action, by sending it an idle immediate
4979 * command and making sure its transfer mode matches between drive
4983 int ata_device_resume(struct ata_device *dev)
4985 struct ata_port *ap = dev->ap;
4987 if (ap->flags & ATA_FLAG_SUSPENDED) {
4988 struct ata_device *failed_dev;
4990 ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000);
4992 ap->flags &= ~ATA_FLAG_SUSPENDED;
4993 while (ata_set_mode(ap, &failed_dev))
4994 ata_dev_disable(failed_dev);
4996 if (!ata_dev_enabled(dev))
4998 if (dev->class == ATA_DEV_ATA)
4999 ata_start_drive(dev);
5005 * ata_device_suspend - prepare a device for suspend
5006 * @dev: the device to suspend
5007 * @state: target power management state
5009 * Flush the cache on the drive, if appropriate, then issue a
5010 * standbynow command.
5012 int ata_device_suspend(struct ata_device *dev, pm_message_t state)
5014 struct ata_port *ap = dev->ap;
5016 if (!ata_dev_enabled(dev))
5018 if (dev->class == ATA_DEV_ATA)
5019 ata_flush_cache(dev);
5021 if (state.event != PM_EVENT_FREEZE)
5022 ata_standby_drive(dev);
5023 ap->flags |= ATA_FLAG_SUSPENDED;
5028 * ata_port_start - Set port up for dma.
5029 * @ap: Port to initialize
5031 * Called just after data structures for each port are
5032 * initialized. Allocates space for PRD table.
5034 * May be used as the port_start() entry in ata_port_operations.
5037 * Inherited from caller.
5040 int ata_port_start (struct ata_port *ap)
5042 struct device *dev = ap->dev;
5045 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5049 rc = ata_pad_alloc(ap, dev);
5051 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5055 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5062 * ata_port_stop - Undo ata_port_start()
5063 * @ap: Port to shut down
5065 * Frees the PRD table.
5067 * May be used as the port_stop() entry in ata_port_operations.
5070 * Inherited from caller.
5073 void ata_port_stop (struct ata_port *ap)
5075 struct device *dev = ap->dev;
5077 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5078 ata_pad_free(ap, dev);
5081 void ata_host_stop (struct ata_host_set *host_set)
5083 if (host_set->mmio_base)
5084 iounmap(host_set->mmio_base);
5089 * ata_host_remove - Unregister SCSI host structure with upper layers
5090 * @ap: Port to unregister
5091 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
5094 * Inherited from caller.
5097 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
5099 struct Scsi_Host *sh = ap->host;
5104 scsi_remove_host(sh);
5106 ap->ops->port_stop(ap);
5110 * ata_dev_init - Initialize an ata_device structure
5111 * @dev: Device structure to initialize
5113 * Initialize @dev in preparation for probing.
5116 * Inherited from caller.
5118 void ata_dev_init(struct ata_device *dev)
5120 struct ata_port *ap = dev->ap;
5121 unsigned long flags;
5123 /* SATA spd limit is bound to the first device */
5124 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5126 /* High bits of dev->flags are used to record warm plug
5127 * requests which occur asynchronously. Synchronize using
5130 spin_lock_irqsave(&ap->host_set->lock, flags);
5131 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5132 spin_unlock_irqrestore(&ap->host_set->lock, flags);
5134 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5135 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5136 dev->pio_mask = UINT_MAX;
5137 dev->mwdma_mask = UINT_MAX;
5138 dev->udma_mask = UINT_MAX;
5142 * ata_host_init - Initialize an ata_port structure
5143 * @ap: Structure to initialize
5144 * @host: associated SCSI mid-layer structure
5145 * @host_set: Collection of hosts to which @ap belongs
5146 * @ent: Probe information provided by low-level driver
5147 * @port_no: Port number associated with this ata_port
5149 * Initialize a new ata_port structure, and its associated
5153 * Inherited from caller.
5155 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
5156 struct ata_host_set *host_set,
5157 const struct ata_probe_ent *ent, unsigned int port_no)
5163 host->max_channel = 1;
5164 host->unique_id = ata_unique_id++;
5165 host->max_cmd_len = 12;
5167 ap->flags = ATA_FLAG_DISABLED;
5168 ap->id = host->unique_id;
5170 ap->ctl = ATA_DEVCTL_OBS;
5171 ap->host_set = host_set;
5173 ap->port_no = port_no;
5175 ent->legacy_mode ? ent->hard_port_no : port_no;
5176 ap->pio_mask = ent->pio_mask;
5177 ap->mwdma_mask = ent->mwdma_mask;
5178 ap->udma_mask = ent->udma_mask;
5179 ap->flags |= ent->host_flags;
5180 ap->ops = ent->port_ops;
5181 ap->hw_sata_spd_limit = UINT_MAX;
5182 ap->active_tag = ATA_TAG_POISON;
5183 ap->last_ctl = 0xFF;
5185 #if defined(ATA_VERBOSE_DEBUG)
5186 /* turn on all debugging levels */
5187 ap->msg_enable = 0x00FF;
5188 #elif defined(ATA_DEBUG)
5189 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5191 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR;
5194 INIT_WORK(&ap->port_task, NULL, NULL);
5195 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
5196 INIT_LIST_HEAD(&ap->eh_done_q);
5197 init_waitqueue_head(&ap->eh_wait_q);
5199 /* set cable type */
5200 ap->cbl = ATA_CBL_NONE;
5201 if (ap->flags & ATA_FLAG_SATA)
5202 ap->cbl = ATA_CBL_SATA;
5204 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5205 struct ata_device *dev = &ap->device[i];
5212 ap->stats.unhandled_irq = 1;
5213 ap->stats.idle_irq = 1;
5216 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5220 * ata_host_add - Attach low-level ATA driver to system
5221 * @ent: Information provided by low-level driver
5222 * @host_set: Collections of ports to which we add
5223 * @port_no: Port number associated with this host
5225 * Attach low-level ATA driver to system.
5228 * PCI/etc. bus probe sem.
5231 * New ata_port on success, for NULL on error.
5234 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
5235 struct ata_host_set *host_set,
5236 unsigned int port_no)
5238 struct Scsi_Host *host;
5239 struct ata_port *ap;
5244 if (!ent->port_ops->error_handler &&
5245 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5246 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5251 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5255 host->transportt = &ata_scsi_transport_template;
5257 ap = ata_shost_to_port(host);
5259 ata_host_init(ap, host, host_set, ent, port_no);
5261 rc = ap->ops->port_start(ap);
5268 scsi_host_put(host);
5273 * ata_device_add - Register hardware device with ATA and SCSI layers
5274 * @ent: Probe information describing hardware device to be registered
5276 * This function processes the information provided in the probe
5277 * information struct @ent, allocates the necessary ATA and SCSI
5278 * host information structures, initializes them, and registers
5279 * everything with requisite kernel subsystems.
5281 * This function requests irqs, probes the ATA bus, and probes
5285 * PCI/etc. bus probe sem.
5288 * Number of ports registered. Zero on error (no ports registered).
5290 int ata_device_add(const struct ata_probe_ent *ent)
5292 unsigned int count = 0, i;
5293 struct device *dev = ent->dev;
5294 struct ata_host_set *host_set;
5297 /* alloc a container for our list of ATA ports (buses) */
5298 host_set = kzalloc(sizeof(struct ata_host_set) +
5299 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5302 spin_lock_init(&host_set->lock);
5304 host_set->dev = dev;
5305 host_set->n_ports = ent->n_ports;
5306 host_set->irq = ent->irq;
5307 host_set->mmio_base = ent->mmio_base;
5308 host_set->private_data = ent->private_data;
5309 host_set->ops = ent->port_ops;
5310 host_set->flags = ent->host_set_flags;
5312 /* register each port bound to this device */
5313 for (i = 0; i < ent->n_ports; i++) {
5314 struct ata_port *ap;
5315 unsigned long xfer_mode_mask;
5317 ap = ata_host_add(ent, host_set, i);
5321 host_set->ports[i] = ap;
5322 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5323 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5324 (ap->pio_mask << ATA_SHIFT_PIO);
5326 /* print per-port info to dmesg */
5327 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
5328 "ctl 0x%lX bmdma 0x%lX irq %lu\n",
5329 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5330 ata_mode_string(xfer_mode_mask),
5331 ap->ioaddr.cmd_addr,
5332 ap->ioaddr.ctl_addr,
5333 ap->ioaddr.bmdma_addr,
5337 host_set->ops->irq_clear(ap);
5338 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5345 /* obtain irq, that is shared between channels */
5346 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5347 DRV_NAME, host_set))
5350 /* perform each probe synchronously */
5351 DPRINTK("probe begin\n");
5352 for (i = 0; i < count; i++) {
5353 struct ata_port *ap;
5357 ap = host_set->ports[i];
5359 /* init sata_spd_limit to the current value */
5360 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5361 int spd = (scontrol >> 4) & 0xf;
5362 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5364 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5366 rc = scsi_add_host(ap->host, dev);
5368 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5369 /* FIXME: do something useful here */
5370 /* FIXME: handle unconditional calls to
5371 * scsi_scan_host and ata_host_remove, below,
5376 if (ap->ops->error_handler) {
5377 unsigned long flags;
5381 /* kick EH for boot probing */
5382 spin_lock_irqsave(&ap->host_set->lock, flags);
5384 ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5385 ap->eh_info.action |= ATA_EH_SOFTRESET;
5387 ap->flags |= ATA_FLAG_LOADING;
5388 ata_port_schedule_eh(ap);
5390 spin_unlock_irqrestore(&ap->host_set->lock, flags);
5392 /* wait for EH to finish */
5393 ata_port_wait_eh(ap);
5395 DPRINTK("ata%u: bus probe begin\n", ap->id);
5396 rc = ata_bus_probe(ap);
5397 DPRINTK("ata%u: bus probe end\n", ap->id);
5400 /* FIXME: do something useful here?
5401 * Current libata behavior will
5402 * tear down everything when
5403 * the module is removed
5404 * or the h/w is unplugged.
5410 /* probes are done, now scan each port's disk(s) */
5411 DPRINTK("host probe begin\n");
5412 for (i = 0; i < count; i++) {
5413 struct ata_port *ap = host_set->ports[i];
5415 ata_scsi_scan_host(ap);
5418 dev_set_drvdata(dev, host_set);
5420 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5421 return ent->n_ports; /* success */
5424 for (i = 0; i < count; i++) {
5425 ata_host_remove(host_set->ports[i], 1);
5426 scsi_host_put(host_set->ports[i]->host);
5430 VPRINTK("EXIT, returning 0\n");
5435 * ata_port_detach - Detach ATA port in prepration of device removal
5436 * @ap: ATA port to be detached
5438 * Detach all ATA devices and the associated SCSI devices of @ap;
5439 * then, remove the associated SCSI host. @ap is guaranteed to
5440 * be quiescent on return from this function.
5443 * Kernel thread context (may sleep).
5445 void ata_port_detach(struct ata_port *ap)
5447 unsigned long flags;
5450 if (!ap->ops->error_handler)
5453 /* tell EH we're leaving & flush EH */
5454 spin_lock_irqsave(&ap->host_set->lock, flags);
5455 ap->flags |= ATA_FLAG_UNLOADING;
5456 spin_unlock_irqrestore(&ap->host_set->lock, flags);
5458 ata_port_wait_eh(ap);
5460 /* EH is now guaranteed to see UNLOADING, so no new device
5461 * will be attached. Disable all existing devices.
5463 spin_lock_irqsave(&ap->host_set->lock, flags);
5465 for (i = 0; i < ATA_MAX_DEVICES; i++)
5466 ata_dev_disable(&ap->device[i]);
5468 spin_unlock_irqrestore(&ap->host_set->lock, flags);
5470 /* Final freeze & EH. All in-flight commands are aborted. EH
5471 * will be skipped and retrials will be terminated with bad
5474 spin_lock_irqsave(&ap->host_set->lock, flags);
5475 ata_port_freeze(ap); /* won't be thawed */
5476 spin_unlock_irqrestore(&ap->host_set->lock, flags);
5478 ata_port_wait_eh(ap);
5480 /* Flush hotplug task. The sequence is similar to
5481 * ata_port_flush_task().
5483 flush_workqueue(ata_aux_wq);
5484 cancel_delayed_work(&ap->hotplug_task);
5485 flush_workqueue(ata_aux_wq);
5487 /* remove the associated SCSI host */
5488 scsi_remove_host(ap->host);
5492 * ata_host_set_remove - PCI layer callback for device removal
5493 * @host_set: ATA host set that was removed
5495 * Unregister all objects associated with this host set. Free those
5499 * Inherited from calling layer (may sleep).
5502 void ata_host_set_remove(struct ata_host_set *host_set)
5506 for (i = 0; i < host_set->n_ports; i++)
5507 ata_port_detach(host_set->ports[i]);
5509 free_irq(host_set->irq, host_set);
5511 for (i = 0; i < host_set->n_ports; i++) {
5512 struct ata_port *ap = host_set->ports[i];
5514 ata_scsi_release(ap->host);
5516 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5517 struct ata_ioports *ioaddr = &ap->ioaddr;
5519 if (ioaddr->cmd_addr == 0x1f0)
5520 release_region(0x1f0, 8);
5521 else if (ioaddr->cmd_addr == 0x170)
5522 release_region(0x170, 8);
5525 scsi_host_put(ap->host);
5528 if (host_set->ops->host_stop)
5529 host_set->ops->host_stop(host_set);
5535 * ata_scsi_release - SCSI layer callback hook for host unload
5536 * @host: libata host to be unloaded
5538 * Performs all duties necessary to shut down a libata port...
5539 * Kill port kthread, disable port, and release resources.
5542 * Inherited from SCSI layer.
5548 int ata_scsi_release(struct Scsi_Host *host)
5550 struct ata_port *ap = ata_shost_to_port(host);
5554 ap->ops->port_disable(ap);
5555 ata_host_remove(ap, 0);
5562 * ata_std_ports - initialize ioaddr with standard port offsets.
5563 * @ioaddr: IO address structure to be initialized
5565 * Utility function which initializes data_addr, error_addr,
5566 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5567 * device_addr, status_addr, and command_addr to standard offsets
5568 * relative to cmd_addr.
5570 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5573 void ata_std_ports(struct ata_ioports *ioaddr)
5575 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5576 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5577 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5578 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5579 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5580 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5581 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5582 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5583 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5584 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5590 void ata_pci_host_stop (struct ata_host_set *host_set)
5592 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5594 pci_iounmap(pdev, host_set->mmio_base);
5598 * ata_pci_remove_one - PCI layer callback for device removal
5599 * @pdev: PCI device that was removed
5601 * PCI layer indicates to libata via this hook that
5602 * hot-unplug or module unload event has occurred.
5603 * Handle this by unregistering all objects associated
5604 * with this PCI device. Free those objects. Then finally
5605 * release PCI resources and disable device.
5608 * Inherited from PCI layer (may sleep).
5611 void ata_pci_remove_one (struct pci_dev *pdev)
5613 struct device *dev = pci_dev_to_dev(pdev);
5614 struct ata_host_set *host_set = dev_get_drvdata(dev);
5616 ata_host_set_remove(host_set);
5617 pci_release_regions(pdev);
5618 pci_disable_device(pdev);
5619 dev_set_drvdata(dev, NULL);
5622 /* move to PCI subsystem */
5623 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5625 unsigned long tmp = 0;
5627 switch (bits->width) {
5630 pci_read_config_byte(pdev, bits->reg, &tmp8);
5636 pci_read_config_word(pdev, bits->reg, &tmp16);
5642 pci_read_config_dword(pdev, bits->reg, &tmp32);
5653 return (tmp == bits->val) ? 1 : 0;
5656 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5658 pci_save_state(pdev);
5659 pci_disable_device(pdev);
5660 pci_set_power_state(pdev, PCI_D3hot);
5664 int ata_pci_device_resume(struct pci_dev *pdev)
5666 pci_set_power_state(pdev, PCI_D0);
5667 pci_restore_state(pdev);
5668 pci_enable_device(pdev);
5669 pci_set_master(pdev);
5672 #endif /* CONFIG_PCI */
5675 static int __init ata_init(void)
5677 ata_wq = create_workqueue("ata");
5681 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5683 destroy_workqueue(ata_wq);
5687 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5691 static void __exit ata_exit(void)
5693 destroy_workqueue(ata_wq);
5694 destroy_workqueue(ata_aux_wq);
5697 module_init(ata_init);
5698 module_exit(ata_exit);
5700 static unsigned long ratelimit_time;
5701 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5703 int ata_ratelimit(void)
5706 unsigned long flags;
5708 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5710 if (time_after(jiffies, ratelimit_time)) {
5712 ratelimit_time = jiffies + (HZ/5);
5716 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5722 * ata_wait_register - wait until register value changes
5723 * @reg: IO-mapped register
5724 * @mask: Mask to apply to read register value
5725 * @val: Wait condition
5726 * @interval_msec: polling interval in milliseconds
5727 * @timeout_msec: timeout in milliseconds
5729 * Waiting for some bits of register to change is a common
5730 * operation for ATA controllers. This function reads 32bit LE
5731 * IO-mapped register @reg and tests for the following condition.
5733 * (*@reg & mask) != val
5735 * If the condition is met, it returns; otherwise, the process is
5736 * repeated after @interval_msec until timeout.
5739 * Kernel thread context (may sleep)
5742 * The final register value.
5744 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5745 unsigned long interval_msec,
5746 unsigned long timeout_msec)
5748 unsigned long timeout;
5751 tmp = ioread32(reg);
5753 /* Calculate timeout _after_ the first read to make sure
5754 * preceding writes reach the controller before starting to
5755 * eat away the timeout.
5757 timeout = jiffies + (timeout_msec * HZ) / 1000;
5759 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
5760 msleep(interval_msec);
5761 tmp = ioread32(reg);
5768 * libata is essentially a library of internal helper functions for
5769 * low-level ATA host controller drivers. As such, the API/ABI is
5770 * likely to change as new drivers are added and updated.
5771 * Do not depend on ABI/API stability.
5774 EXPORT_SYMBOL_GPL(sata_deb_timing_boot);
5775 EXPORT_SYMBOL_GPL(sata_deb_timing_eh);
5776 EXPORT_SYMBOL_GPL(sata_deb_timing_before_fsrst);
5777 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5778 EXPORT_SYMBOL_GPL(ata_std_ports);
5779 EXPORT_SYMBOL_GPL(ata_device_add);
5780 EXPORT_SYMBOL_GPL(ata_port_detach);
5781 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5782 EXPORT_SYMBOL_GPL(ata_sg_init);
5783 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5784 EXPORT_SYMBOL_GPL(ata_hsm_move);
5785 EXPORT_SYMBOL_GPL(ata_qc_complete);
5786 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
5787 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5788 EXPORT_SYMBOL_GPL(ata_tf_load);
5789 EXPORT_SYMBOL_GPL(ata_tf_read);
5790 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5791 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5792 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5793 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5794 EXPORT_SYMBOL_GPL(ata_check_status);
5795 EXPORT_SYMBOL_GPL(ata_altstatus);
5796 EXPORT_SYMBOL_GPL(ata_exec_command);
5797 EXPORT_SYMBOL_GPL(ata_port_start);
5798 EXPORT_SYMBOL_GPL(ata_port_stop);
5799 EXPORT_SYMBOL_GPL(ata_host_stop);
5800 EXPORT_SYMBOL_GPL(ata_interrupt);
5801 EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
5802 EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
5803 EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
5804 EXPORT_SYMBOL_GPL(ata_qc_prep);
5805 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5806 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5807 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5808 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5809 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5810 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5811 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
5812 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
5813 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
5814 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
5815 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
5816 EXPORT_SYMBOL_GPL(ata_port_probe);
5817 EXPORT_SYMBOL_GPL(sata_set_spd);
5818 EXPORT_SYMBOL_GPL(sata_phy_debounce);
5819 EXPORT_SYMBOL_GPL(sata_phy_resume);
5820 EXPORT_SYMBOL_GPL(sata_phy_reset);
5821 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5822 EXPORT_SYMBOL_GPL(ata_bus_reset);
5823 EXPORT_SYMBOL_GPL(ata_std_prereset);
5824 EXPORT_SYMBOL_GPL(ata_std_softreset);
5825 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5826 EXPORT_SYMBOL_GPL(ata_std_postreset);
5827 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5828 EXPORT_SYMBOL_GPL(ata_dev_classify);
5829 EXPORT_SYMBOL_GPL(ata_dev_pair);
5830 EXPORT_SYMBOL_GPL(ata_port_disable);
5831 EXPORT_SYMBOL_GPL(ata_ratelimit);
5832 EXPORT_SYMBOL_GPL(ata_wait_register);
5833 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5834 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5835 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5836 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5837 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5838 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
5839 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
5840 EXPORT_SYMBOL_GPL(ata_scsi_release);
5841 EXPORT_SYMBOL_GPL(ata_host_intr);
5842 EXPORT_SYMBOL_GPL(sata_scr_valid);
5843 EXPORT_SYMBOL_GPL(sata_scr_read);
5844 EXPORT_SYMBOL_GPL(sata_scr_write);
5845 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5846 EXPORT_SYMBOL_GPL(ata_port_online);
5847 EXPORT_SYMBOL_GPL(ata_port_offline);
5848 EXPORT_SYMBOL_GPL(ata_id_string);
5849 EXPORT_SYMBOL_GPL(ata_id_c_string);
5850 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5852 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5853 EXPORT_SYMBOL_GPL(ata_timing_compute);
5854 EXPORT_SYMBOL_GPL(ata_timing_merge);
5857 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5858 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5859 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5860 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5861 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5862 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5863 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5864 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5865 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5866 #endif /* CONFIG_PCI */
5868 EXPORT_SYMBOL_GPL(ata_device_suspend);
5869 EXPORT_SYMBOL_GPL(ata_device_resume);
5870 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5871 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5873 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5874 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
5875 EXPORT_SYMBOL_GPL(ata_port_abort);
5876 EXPORT_SYMBOL_GPL(ata_port_freeze);
5877 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
5878 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
5879 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5880 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5881 EXPORT_SYMBOL_GPL(ata_do_eh);