Merge git://git.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-hrt
[pandora-kernel.git] / drivers / ide / ide-iops.c
1 /*
2  *  Copyright (C) 2000-2002     Andre Hedrick <andre@linux-ide.org>
3  *  Copyright (C) 2003          Red Hat <alan@redhat.com>
4  *
5  */
6
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/string.h>
10 #include <linux/kernel.h>
11 #include <linux/timer.h>
12 #include <linux/mm.h>
13 #include <linux/interrupt.h>
14 #include <linux/major.h>
15 #include <linux/errno.h>
16 #include <linux/genhd.h>
17 #include <linux/blkpg.h>
18 #include <linux/slab.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/hdreg.h>
22 #include <linux/ide.h>
23 #include <linux/bitops.h>
24 #include <linux/nmi.h>
25
26 #include <asm/byteorder.h>
27 #include <asm/irq.h>
28 #include <asm/uaccess.h>
29 #include <asm/io.h>
30
31 /*
32  *      Conventional PIO operations for ATA devices
33  */
34
35 static u8 ide_inb (unsigned long port)
36 {
37         return (u8) inb(port);
38 }
39
40 static u16 ide_inw (unsigned long port)
41 {
42         return (u16) inw(port);
43 }
44
45 static void ide_insw (unsigned long port, void *addr, u32 count)
46 {
47         insw(port, addr, count);
48 }
49
50 static void ide_insl (unsigned long port, void *addr, u32 count)
51 {
52         insl(port, addr, count);
53 }
54
55 static void ide_outb (u8 val, unsigned long port)
56 {
57         outb(val, port);
58 }
59
60 static void ide_outbsync (ide_drive_t *drive, u8 addr, unsigned long port)
61 {
62         outb(addr, port);
63 }
64
65 static void ide_outw (u16 val, unsigned long port)
66 {
67         outw(val, port);
68 }
69
70 static void ide_outsw (unsigned long port, void *addr, u32 count)
71 {
72         outsw(port, addr, count);
73 }
74
75 static void ide_outsl (unsigned long port, void *addr, u32 count)
76 {
77         outsl(port, addr, count);
78 }
79
80 void default_hwif_iops (ide_hwif_t *hwif)
81 {
82         hwif->OUTB      = ide_outb;
83         hwif->OUTBSYNC  = ide_outbsync;
84         hwif->OUTW      = ide_outw;
85         hwif->OUTSW     = ide_outsw;
86         hwif->OUTSL     = ide_outsl;
87         hwif->INB       = ide_inb;
88         hwif->INW       = ide_inw;
89         hwif->INSW      = ide_insw;
90         hwif->INSL      = ide_insl;
91 }
92
93 /*
94  *      MMIO operations, typically used for SATA controllers
95  */
96
97 static u8 ide_mm_inb (unsigned long port)
98 {
99         return (u8) readb((void __iomem *) port);
100 }
101
102 static u16 ide_mm_inw (unsigned long port)
103 {
104         return (u16) readw((void __iomem *) port);
105 }
106
107 static void ide_mm_insw (unsigned long port, void *addr, u32 count)
108 {
109         __ide_mm_insw((void __iomem *) port, addr, count);
110 }
111
112 static void ide_mm_insl (unsigned long port, void *addr, u32 count)
113 {
114         __ide_mm_insl((void __iomem *) port, addr, count);
115 }
116
117 static void ide_mm_outb (u8 value, unsigned long port)
118 {
119         writeb(value, (void __iomem *) port);
120 }
121
122 static void ide_mm_outbsync (ide_drive_t *drive, u8 value, unsigned long port)
123 {
124         writeb(value, (void __iomem *) port);
125 }
126
127 static void ide_mm_outw (u16 value, unsigned long port)
128 {
129         writew(value, (void __iomem *) port);
130 }
131
132 static void ide_mm_outsw (unsigned long port, void *addr, u32 count)
133 {
134         __ide_mm_outsw((void __iomem *) port, addr, count);
135 }
136
137 static void ide_mm_outsl (unsigned long port, void *addr, u32 count)
138 {
139         __ide_mm_outsl((void __iomem *) port, addr, count);
140 }
141
142 void default_hwif_mmiops (ide_hwif_t *hwif)
143 {
144         hwif->OUTB      = ide_mm_outb;
145         /* Most systems will need to override OUTBSYNC, alas however
146            this one is controller specific! */
147         hwif->OUTBSYNC  = ide_mm_outbsync;
148         hwif->OUTW      = ide_mm_outw;
149         hwif->OUTSW     = ide_mm_outsw;
150         hwif->OUTSL     = ide_mm_outsl;
151         hwif->INB       = ide_mm_inb;
152         hwif->INW       = ide_mm_inw;
153         hwif->INSW      = ide_mm_insw;
154         hwif->INSL      = ide_mm_insl;
155 }
156
157 EXPORT_SYMBOL(default_hwif_mmiops);
158
159 void SELECT_DRIVE (ide_drive_t *drive)
160 {
161         ide_hwif_t *hwif = drive->hwif;
162         const struct ide_port_ops *port_ops = hwif->port_ops;
163
164         if (port_ops && port_ops->selectproc)
165                 port_ops->selectproc(drive);
166
167         hwif->OUTB(drive->select.all, hwif->io_ports.device_addr);
168 }
169
170 void SELECT_MASK (ide_drive_t *drive, int mask)
171 {
172         const struct ide_port_ops *port_ops = drive->hwif->port_ops;
173
174         if (port_ops && port_ops->maskproc)
175                 port_ops->maskproc(drive, mask);
176 }
177
178 /*
179  * Some localbus EIDE interfaces require a special access sequence
180  * when using 32-bit I/O instructions to transfer data.  We call this
181  * the "vlb_sync" sequence, which consists of three successive reads
182  * of the sector count register location, with interrupts disabled
183  * to ensure that the reads all happen together.
184  */
185 static void ata_vlb_sync(ide_drive_t *drive, unsigned long port)
186 {
187         (void) HWIF(drive)->INB(port);
188         (void) HWIF(drive)->INB(port);
189         (void) HWIF(drive)->INB(port);
190 }
191
192 /*
193  * This is used for most PIO data transfers *from* the IDE interface
194  */
195 static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
196 {
197         ide_hwif_t *hwif = drive->hwif;
198         struct ide_io_ports *io_ports = &hwif->io_ports;
199         u8 io_32bit = drive->io_32bit;
200
201         if (io_32bit) {
202                 if (io_32bit & 2) {
203                         unsigned long flags;
204
205                         local_irq_save(flags);
206                         ata_vlb_sync(drive, io_ports->nsect_addr);
207                         hwif->INSL(io_ports->data_addr, buffer, wcount);
208                         local_irq_restore(flags);
209                 } else
210                         hwif->INSL(io_ports->data_addr, buffer, wcount);
211         } else
212                 hwif->INSW(io_ports->data_addr, buffer, wcount << 1);
213 }
214
215 /*
216  * This is used for most PIO data transfers *to* the IDE interface
217  */
218 static void ata_output_data(ide_drive_t *drive, void *buffer, u32 wcount)
219 {
220         ide_hwif_t *hwif = drive->hwif;
221         struct ide_io_ports *io_ports = &hwif->io_ports;
222         u8 io_32bit = drive->io_32bit;
223
224         if (io_32bit) {
225                 if (io_32bit & 2) {
226                         unsigned long flags;
227
228                         local_irq_save(flags);
229                         ata_vlb_sync(drive, io_ports->nsect_addr);
230                         hwif->OUTSL(io_ports->data_addr, buffer, wcount);
231                         local_irq_restore(flags);
232                 } else
233                         hwif->OUTSL(io_ports->data_addr, buffer, wcount);
234         } else
235                 hwif->OUTSW(io_ports->data_addr, buffer, wcount << 1);
236 }
237
238 /*
239  * The following routines are mainly used by the ATAPI drivers.
240  *
241  * These routines will round up any request for an odd number of bytes,
242  * so if an odd bytecount is specified, be sure that there's at least one
243  * extra byte allocated for the buffer.
244  */
245
246 static void atapi_input_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
247 {
248         ide_hwif_t *hwif = HWIF(drive);
249
250         ++bytecount;
251 #if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
252         if (MACH_IS_ATARI || MACH_IS_Q40) {
253                 /* Atari has a byte-swapped IDE interface */
254                 insw_swapw(hwif->io_ports.data_addr, buffer, bytecount / 2);
255                 return;
256         }
257 #endif /* CONFIG_ATARI || CONFIG_Q40 */
258         hwif->ata_input_data(drive, buffer, bytecount / 4);
259         if ((bytecount & 0x03) >= 2)
260                 hwif->INSW(hwif->io_ports.data_addr,
261                            (u8 *)buffer + (bytecount & ~0x03), 1);
262 }
263
264 static void atapi_output_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
265 {
266         ide_hwif_t *hwif = HWIF(drive);
267
268         ++bytecount;
269 #if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
270         if (MACH_IS_ATARI || MACH_IS_Q40) {
271                 /* Atari has a byte-swapped IDE interface */
272                 outsw_swapw(hwif->io_ports.data_addr, buffer, bytecount / 2);
273                 return;
274         }
275 #endif /* CONFIG_ATARI || CONFIG_Q40 */
276         hwif->ata_output_data(drive, buffer, bytecount / 4);
277         if ((bytecount & 0x03) >= 2)
278                 hwif->OUTSW(hwif->io_ports.data_addr,
279                             (u8 *)buffer + (bytecount & ~0x03), 1);
280 }
281
282 void default_hwif_transport(ide_hwif_t *hwif)
283 {
284         hwif->ata_input_data            = ata_input_data;
285         hwif->ata_output_data           = ata_output_data;
286         hwif->atapi_input_bytes         = atapi_input_bytes;
287         hwif->atapi_output_bytes        = atapi_output_bytes;
288 }
289
290 void ide_fix_driveid (struct hd_driveid *id)
291 {
292 #ifndef __LITTLE_ENDIAN
293 # ifdef __BIG_ENDIAN
294         int i;
295         u16 *stringcast;
296
297         id->config         = __le16_to_cpu(id->config);
298         id->cyls           = __le16_to_cpu(id->cyls);
299         id->reserved2      = __le16_to_cpu(id->reserved2);
300         id->heads          = __le16_to_cpu(id->heads);
301         id->track_bytes    = __le16_to_cpu(id->track_bytes);
302         id->sector_bytes   = __le16_to_cpu(id->sector_bytes);
303         id->sectors        = __le16_to_cpu(id->sectors);
304         id->vendor0        = __le16_to_cpu(id->vendor0);
305         id->vendor1        = __le16_to_cpu(id->vendor1);
306         id->vendor2        = __le16_to_cpu(id->vendor2);
307         stringcast = (u16 *)&id->serial_no[0];
308         for (i = 0; i < (20/2); i++)
309                 stringcast[i] = __le16_to_cpu(stringcast[i]);
310         id->buf_type       = __le16_to_cpu(id->buf_type);
311         id->buf_size       = __le16_to_cpu(id->buf_size);
312         id->ecc_bytes      = __le16_to_cpu(id->ecc_bytes);
313         stringcast = (u16 *)&id->fw_rev[0];
314         for (i = 0; i < (8/2); i++)
315                 stringcast[i] = __le16_to_cpu(stringcast[i]);
316         stringcast = (u16 *)&id->model[0];
317         for (i = 0; i < (40/2); i++)
318                 stringcast[i] = __le16_to_cpu(stringcast[i]);
319         id->dword_io       = __le16_to_cpu(id->dword_io);
320         id->reserved50     = __le16_to_cpu(id->reserved50);
321         id->field_valid    = __le16_to_cpu(id->field_valid);
322         id->cur_cyls       = __le16_to_cpu(id->cur_cyls);
323         id->cur_heads      = __le16_to_cpu(id->cur_heads);
324         id->cur_sectors    = __le16_to_cpu(id->cur_sectors);
325         id->cur_capacity0  = __le16_to_cpu(id->cur_capacity0);
326         id->cur_capacity1  = __le16_to_cpu(id->cur_capacity1);
327         id->lba_capacity   = __le32_to_cpu(id->lba_capacity);
328         id->dma_1word      = __le16_to_cpu(id->dma_1word);
329         id->dma_mword      = __le16_to_cpu(id->dma_mword);
330         id->eide_pio_modes = __le16_to_cpu(id->eide_pio_modes);
331         id->eide_dma_min   = __le16_to_cpu(id->eide_dma_min);
332         id->eide_dma_time  = __le16_to_cpu(id->eide_dma_time);
333         id->eide_pio       = __le16_to_cpu(id->eide_pio);
334         id->eide_pio_iordy = __le16_to_cpu(id->eide_pio_iordy);
335         for (i = 0; i < 2; ++i)
336                 id->words69_70[i] = __le16_to_cpu(id->words69_70[i]);
337         for (i = 0; i < 4; ++i)
338                 id->words71_74[i] = __le16_to_cpu(id->words71_74[i]);
339         id->queue_depth    = __le16_to_cpu(id->queue_depth);
340         for (i = 0; i < 4; ++i)
341                 id->words76_79[i] = __le16_to_cpu(id->words76_79[i]);
342         id->major_rev_num  = __le16_to_cpu(id->major_rev_num);
343         id->minor_rev_num  = __le16_to_cpu(id->minor_rev_num);
344         id->command_set_1  = __le16_to_cpu(id->command_set_1);
345         id->command_set_2  = __le16_to_cpu(id->command_set_2);
346         id->cfsse          = __le16_to_cpu(id->cfsse);
347         id->cfs_enable_1   = __le16_to_cpu(id->cfs_enable_1);
348         id->cfs_enable_2   = __le16_to_cpu(id->cfs_enable_2);
349         id->csf_default    = __le16_to_cpu(id->csf_default);
350         id->dma_ultra      = __le16_to_cpu(id->dma_ultra);
351         id->trseuc         = __le16_to_cpu(id->trseuc);
352         id->trsEuc         = __le16_to_cpu(id->trsEuc);
353         id->CurAPMvalues   = __le16_to_cpu(id->CurAPMvalues);
354         id->mprc           = __le16_to_cpu(id->mprc);
355         id->hw_config      = __le16_to_cpu(id->hw_config);
356         id->acoustic       = __le16_to_cpu(id->acoustic);
357         id->msrqs          = __le16_to_cpu(id->msrqs);
358         id->sxfert         = __le16_to_cpu(id->sxfert);
359         id->sal            = __le16_to_cpu(id->sal);
360         id->spg            = __le32_to_cpu(id->spg);
361         id->lba_capacity_2 = __le64_to_cpu(id->lba_capacity_2);
362         for (i = 0; i < 22; i++)
363                 id->words104_125[i]   = __le16_to_cpu(id->words104_125[i]);
364         id->last_lun       = __le16_to_cpu(id->last_lun);
365         id->word127        = __le16_to_cpu(id->word127);
366         id->dlf            = __le16_to_cpu(id->dlf);
367         id->csfo           = __le16_to_cpu(id->csfo);
368         for (i = 0; i < 26; i++)
369                 id->words130_155[i] = __le16_to_cpu(id->words130_155[i]);
370         id->word156        = __le16_to_cpu(id->word156);
371         for (i = 0; i < 3; i++)
372                 id->words157_159[i] = __le16_to_cpu(id->words157_159[i]);
373         id->cfa_power      = __le16_to_cpu(id->cfa_power);
374         for (i = 0; i < 14; i++)
375                 id->words161_175[i] = __le16_to_cpu(id->words161_175[i]);
376         for (i = 0; i < 31; i++)
377                 id->words176_205[i] = __le16_to_cpu(id->words176_205[i]);
378         for (i = 0; i < 48; i++)
379                 id->words206_254[i] = __le16_to_cpu(id->words206_254[i]);
380         id->integrity_word  = __le16_to_cpu(id->integrity_word);
381 # else
382 #  error "Please fix <asm/byteorder.h>"
383 # endif
384 #endif
385 }
386
387 /*
388  * ide_fixstring() cleans up and (optionally) byte-swaps a text string,
389  * removing leading/trailing blanks and compressing internal blanks.
390  * It is primarily used to tidy up the model name/number fields as
391  * returned by the WIN_[P]IDENTIFY commands.
392  */
393
394 void ide_fixstring (u8 *s, const int bytecount, const int byteswap)
395 {
396         u8 *p = s, *end = &s[bytecount & ~1]; /* bytecount must be even */
397
398         if (byteswap) {
399                 /* convert from big-endian to host byte order */
400                 for (p = end ; p != s;) {
401                         unsigned short *pp = (unsigned short *) (p -= 2);
402                         *pp = ntohs(*pp);
403                 }
404         }
405         /* strip leading blanks */
406         while (s != end && *s == ' ')
407                 ++s;
408         /* compress internal blanks and strip trailing blanks */
409         while (s != end && *s) {
410                 if (*s++ != ' ' || (s != end && *s && *s != ' '))
411                         *p++ = *(s-1);
412         }
413         /* wipe out trailing garbage */
414         while (p != end)
415                 *p++ = '\0';
416 }
417
418 EXPORT_SYMBOL(ide_fixstring);
419
420 /*
421  * Needed for PCI irq sharing
422  */
423 int drive_is_ready (ide_drive_t *drive)
424 {
425         ide_hwif_t *hwif        = HWIF(drive);
426         u8 stat                 = 0;
427
428         if (drive->waiting_for_dma)
429                 return hwif->dma_ops->dma_test_irq(drive);
430
431 #if 0
432         /* need to guarantee 400ns since last command was issued */
433         udelay(1);
434 #endif
435
436         /*
437          * We do a passive status test under shared PCI interrupts on
438          * cards that truly share the ATA side interrupt, but may also share
439          * an interrupt with another pci card/device.  We make no assumptions
440          * about possible isa-pnp and pci-pnp issues yet.
441          */
442         if (hwif->io_ports.ctl_addr)
443                 stat = ide_read_altstatus(drive);
444         else
445                 /* Note: this may clear a pending IRQ!! */
446                 stat = ide_read_status(drive);
447
448         if (stat & BUSY_STAT)
449                 /* drive busy:  definitely not interrupting */
450                 return 0;
451
452         /* drive ready: *might* be interrupting */
453         return 1;
454 }
455
456 EXPORT_SYMBOL(drive_is_ready);
457
458 /*
459  * This routine busy-waits for the drive status to be not "busy".
460  * It then checks the status for all of the "good" bits and none
461  * of the "bad" bits, and if all is okay it returns 0.  All other
462  * cases return error -- caller may then invoke ide_error().
463  *
464  * This routine should get fixed to not hog the cpu during extra long waits..
465  * That could be done by busy-waiting for the first jiffy or two, and then
466  * setting a timer to wake up at half second intervals thereafter,
467  * until timeout is achieved, before timing out.
468  */
469 static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat)
470 {
471         unsigned long flags;
472         int i;
473         u8 stat;
474
475         udelay(1);      /* spec allows drive 400ns to assert "BUSY" */
476         stat = ide_read_status(drive);
477
478         if (stat & BUSY_STAT) {
479                 local_irq_set(flags);
480                 timeout += jiffies;
481                 while ((stat = ide_read_status(drive)) & BUSY_STAT) {
482                         if (time_after(jiffies, timeout)) {
483                                 /*
484                                  * One last read after the timeout in case
485                                  * heavy interrupt load made us not make any
486                                  * progress during the timeout..
487                                  */
488                                 stat = ide_read_status(drive);
489                                 if (!(stat & BUSY_STAT))
490                                         break;
491
492                                 local_irq_restore(flags);
493                                 *rstat = stat;
494                                 return -EBUSY;
495                         }
496                 }
497                 local_irq_restore(flags);
498         }
499         /*
500          * Allow status to settle, then read it again.
501          * A few rare drives vastly violate the 400ns spec here,
502          * so we'll wait up to 10usec for a "good" status
503          * rather than expensively fail things immediately.
504          * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
505          */
506         for (i = 0; i < 10; i++) {
507                 udelay(1);
508                 stat = ide_read_status(drive);
509
510                 if (OK_STAT(stat, good, bad)) {
511                         *rstat = stat;
512                         return 0;
513                 }
514         }
515         *rstat = stat;
516         return -EFAULT;
517 }
518
519 /*
520  * In case of error returns error value after doing "*startstop = ide_error()".
521  * The caller should return the updated value of "startstop" in this case,
522  * "startstop" is unchanged when the function returns 0.
523  */
524 int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout)
525 {
526         int err;
527         u8 stat;
528
529         /* bail early if we've exceeded max_failures */
530         if (drive->max_failures && (drive->failures > drive->max_failures)) {
531                 *startstop = ide_stopped;
532                 return 1;
533         }
534
535         err = __ide_wait_stat(drive, good, bad, timeout, &stat);
536
537         if (err) {
538                 char *s = (err == -EBUSY) ? "status timeout" : "status error";
539                 *startstop = ide_error(drive, s, stat);
540         }
541
542         return err;
543 }
544
545 EXPORT_SYMBOL(ide_wait_stat);
546
547 /**
548  *      ide_in_drive_list       -       look for drive in black/white list
549  *      @id: drive identifier
550  *      @drive_table: list to inspect
551  *
552  *      Look for a drive in the blacklist and the whitelist tables
553  *      Returns 1 if the drive is found in the table.
554  */
555
556 int ide_in_drive_list(struct hd_driveid *id, const struct drive_list_entry *drive_table)
557 {
558         for ( ; drive_table->id_model; drive_table++)
559                 if ((!strcmp(drive_table->id_model, id->model)) &&
560                     (!drive_table->id_firmware ||
561                      strstr(id->fw_rev, drive_table->id_firmware)))
562                         return 1;
563         return 0;
564 }
565
566 EXPORT_SYMBOL_GPL(ide_in_drive_list);
567
568 /*
569  * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid.
570  * We list them here and depend on the device side cable detection for them.
571  *
572  * Some optical devices with the buggy firmwares have the same problem.
573  */
574 static const struct drive_list_entry ivb_list[] = {
575         { "QUANTUM FIREBALLlct10 05"    , "A03.0900"    },
576         { "TSSTcorp CDDVDW SH-S202J"    , "SB00"        },
577         { "TSSTcorp CDDVDW SH-S202J"    , "SB01"        },
578         { "TSSTcorp CDDVDW SH-S202N"    , "SB00"        },
579         { "TSSTcorp CDDVDW SH-S202N"    , "SB01"        },
580         { NULL                          , NULL          }
581 };
582
583 /*
584  *  All hosts that use the 80c ribbon must use!
585  *  The name is derived from upper byte of word 93 and the 80c ribbon.
586  */
587 u8 eighty_ninty_three (ide_drive_t *drive)
588 {
589         ide_hwif_t *hwif = drive->hwif;
590         struct hd_driveid *id = drive->id;
591         int ivb = ide_in_drive_list(id, ivb_list);
592
593         if (hwif->cbl == ATA_CBL_PATA40_SHORT)
594                 return 1;
595
596         if (ivb)
597                 printk(KERN_DEBUG "%s: skipping word 93 validity check\n",
598                                   drive->name);
599
600         if (ide_dev_is_sata(id) && !ivb)
601                 return 1;
602
603         if (hwif->cbl != ATA_CBL_PATA80 && !ivb)
604                 goto no_80w;
605
606         /*
607          * FIXME:
608          * - change master/slave IDENTIFY order
609          * - force bit13 (80c cable present) check also for !ivb devices
610          *   (unless the slave device is pre-ATA3)
611          */
612         if ((id->hw_config & 0x4000) || (ivb && (id->hw_config & 0x2000)))
613                 return 1;
614
615 no_80w:
616         if (drive->udma33_warned == 1)
617                 return 0;
618
619         printk(KERN_WARNING "%s: %s side 80-wire cable detection failed, "
620                             "limiting max speed to UDMA33\n",
621                             drive->name,
622                             hwif->cbl == ATA_CBL_PATA80 ? "drive" : "host");
623
624         drive->udma33_warned = 1;
625
626         return 0;
627 }
628
629 int ide_driveid_update(ide_drive_t *drive)
630 {
631         ide_hwif_t *hwif = drive->hwif;
632         struct hd_driveid *id;
633         unsigned long timeout, flags;
634         u8 stat;
635
636         /*
637          * Re-read drive->id for possible DMA mode
638          * change (copied from ide-probe.c)
639          */
640
641         SELECT_MASK(drive, 1);
642         ide_set_irq(drive, 1);
643         msleep(50);
644         hwif->OUTB(WIN_IDENTIFY, hwif->io_ports.command_addr);
645         timeout = jiffies + WAIT_WORSTCASE;
646         do {
647                 if (time_after(jiffies, timeout)) {
648                         SELECT_MASK(drive, 0);
649                         return 0;       /* drive timed-out */
650                 }
651
652                 msleep(50);     /* give drive a breather */
653                 stat = ide_read_altstatus(drive);
654         } while (stat & BUSY_STAT);
655
656         msleep(50);     /* wait for IRQ and DRQ_STAT */
657         stat = ide_read_status(drive);
658
659         if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) {
660                 SELECT_MASK(drive, 0);
661                 printk("%s: CHECK for good STATUS\n", drive->name);
662                 return 0;
663         }
664         local_irq_save(flags);
665         SELECT_MASK(drive, 0);
666         id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
667         if (!id) {
668                 local_irq_restore(flags);
669                 return 0;
670         }
671         hwif->ata_input_data(drive, id, SECTOR_WORDS);
672         (void)ide_read_status(drive);   /* clear drive IRQ */
673         local_irq_enable();
674         local_irq_restore(flags);
675         ide_fix_driveid(id);
676         if (id) {
677                 drive->id->dma_ultra = id->dma_ultra;
678                 drive->id->dma_mword = id->dma_mword;
679                 drive->id->dma_1word = id->dma_1word;
680                 /* anything more ? */
681                 kfree(id);
682
683                 if (drive->using_dma && ide_id_dma_bug(drive))
684                         ide_dma_off(drive);
685         }
686
687         return 1;
688 }
689
690 int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
691 {
692         ide_hwif_t *hwif = drive->hwif;
693         struct ide_io_ports *io_ports = &hwif->io_ports;
694         int error = 0;
695         u8 stat;
696
697 //      while (HWGROUP(drive)->busy)
698 //              msleep(50);
699
700 #ifdef CONFIG_BLK_DEV_IDEDMA
701         if (hwif->dma_ops)      /* check if host supports DMA */
702                 hwif->dma_ops->dma_host_set(drive, 0);
703 #endif
704
705         /* Skip setting PIO flow-control modes on pre-EIDE drives */
706         if ((speed & 0xf8) == XFER_PIO_0 && !(drive->id->capability & 0x08))
707                 goto skip;
708
709         /*
710          * Don't use ide_wait_cmd here - it will
711          * attempt to set_geometry and recalibrate,
712          * but for some reason these don't work at
713          * this point (lost interrupt).
714          */
715         /*
716          * Select the drive, and issue the SETFEATURES command
717          */
718         disable_irq_nosync(hwif->irq);
719         
720         /*
721          *      FIXME: we race against the running IRQ here if
722          *      this is called from non IRQ context. If we use
723          *      disable_irq() we hang on the error path. Work
724          *      is needed.
725          */
726          
727         udelay(1);
728         SELECT_DRIVE(drive);
729         SELECT_MASK(drive, 0);
730         udelay(1);
731         ide_set_irq(drive, 0);
732         hwif->OUTB(speed, io_ports->nsect_addr);
733         hwif->OUTB(SETFEATURES_XFER, io_ports->feature_addr);
734         hwif->OUTBSYNC(drive, WIN_SETFEATURES, io_ports->command_addr);
735         if (drive->quirk_list == 2)
736                 ide_set_irq(drive, 1);
737
738         error = __ide_wait_stat(drive, drive->ready_stat,
739                                 BUSY_STAT|DRQ_STAT|ERR_STAT,
740                                 WAIT_CMD, &stat);
741
742         SELECT_MASK(drive, 0);
743
744         enable_irq(hwif->irq);
745
746         if (error) {
747                 (void) ide_dump_status(drive, "set_drive_speed_status", stat);
748                 return error;
749         }
750
751         drive->id->dma_ultra &= ~0xFF00;
752         drive->id->dma_mword &= ~0x0F00;
753         drive->id->dma_1word &= ~0x0F00;
754
755  skip:
756 #ifdef CONFIG_BLK_DEV_IDEDMA
757         if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) &&
758             drive->using_dma)
759                 hwif->dma_ops->dma_host_set(drive, 1);
760         else if (hwif->dma_ops) /* check if host supports DMA */
761                 ide_dma_off_quietly(drive);
762 #endif
763
764         switch(speed) {
765                 case XFER_UDMA_7:   drive->id->dma_ultra |= 0x8080; break;
766                 case XFER_UDMA_6:   drive->id->dma_ultra |= 0x4040; break;
767                 case XFER_UDMA_5:   drive->id->dma_ultra |= 0x2020; break;
768                 case XFER_UDMA_4:   drive->id->dma_ultra |= 0x1010; break;
769                 case XFER_UDMA_3:   drive->id->dma_ultra |= 0x0808; break;
770                 case XFER_UDMA_2:   drive->id->dma_ultra |= 0x0404; break;
771                 case XFER_UDMA_1:   drive->id->dma_ultra |= 0x0202; break;
772                 case XFER_UDMA_0:   drive->id->dma_ultra |= 0x0101; break;
773                 case XFER_MW_DMA_2: drive->id->dma_mword |= 0x0404; break;
774                 case XFER_MW_DMA_1: drive->id->dma_mword |= 0x0202; break;
775                 case XFER_MW_DMA_0: drive->id->dma_mword |= 0x0101; break;
776                 case XFER_SW_DMA_2: drive->id->dma_1word |= 0x0404; break;
777                 case XFER_SW_DMA_1: drive->id->dma_1word |= 0x0202; break;
778                 case XFER_SW_DMA_0: drive->id->dma_1word |= 0x0101; break;
779                 default: break;
780         }
781         if (!drive->init_speed)
782                 drive->init_speed = speed;
783         drive->current_speed = speed;
784         return error;
785 }
786
787 /*
788  * This should get invoked any time we exit the driver to
789  * wait for an interrupt response from a drive.  handler() points
790  * at the appropriate code to handle the next interrupt, and a
791  * timer is started to prevent us from waiting forever in case
792  * something goes wrong (see the ide_timer_expiry() handler later on).
793  *
794  * See also ide_execute_command
795  */
796 static void __ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
797                       unsigned int timeout, ide_expiry_t *expiry)
798 {
799         ide_hwgroup_t *hwgroup = HWGROUP(drive);
800
801         BUG_ON(hwgroup->handler);
802         hwgroup->handler        = handler;
803         hwgroup->expiry         = expiry;
804         hwgroup->timer.expires  = jiffies + timeout;
805         hwgroup->req_gen_timer  = hwgroup->req_gen;
806         add_timer(&hwgroup->timer);
807 }
808
809 void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
810                       unsigned int timeout, ide_expiry_t *expiry)
811 {
812         unsigned long flags;
813         spin_lock_irqsave(&ide_lock, flags);
814         __ide_set_handler(drive, handler, timeout, expiry);
815         spin_unlock_irqrestore(&ide_lock, flags);
816 }
817
818 EXPORT_SYMBOL(ide_set_handler);
819  
820 /**
821  *      ide_execute_command     -       execute an IDE command
822  *      @drive: IDE drive to issue the command against
823  *      @command: command byte to write
824  *      @handler: handler for next phase
825  *      @timeout: timeout for command
826  *      @expiry:  handler to run on timeout
827  *
828  *      Helper function to issue an IDE command. This handles the
829  *      atomicity requirements, command timing and ensures that the 
830  *      handler and IRQ setup do not race. All IDE command kick off
831  *      should go via this function or do equivalent locking.
832  */
833
834 void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
835                          unsigned timeout, ide_expiry_t *expiry)
836 {
837         unsigned long flags;
838         ide_hwif_t *hwif = HWIF(drive);
839
840         spin_lock_irqsave(&ide_lock, flags);
841         __ide_set_handler(drive, handler, timeout, expiry);
842         hwif->OUTBSYNC(drive, cmd, hwif->io_ports.command_addr);
843         /*
844          * Drive takes 400nS to respond, we must avoid the IRQ being
845          * serviced before that.
846          *
847          * FIXME: we could skip this delay with care on non shared devices
848          */
849         ndelay(400);
850         spin_unlock_irqrestore(&ide_lock, flags);
851 }
852
853 EXPORT_SYMBOL(ide_execute_command);
854
855
856 /* needed below */
857 static ide_startstop_t do_reset1 (ide_drive_t *, int);
858
859 /*
860  * atapi_reset_pollfunc() gets invoked to poll the interface for completion every 50ms
861  * during an atapi drive reset operation. If the drive has not yet responded,
862  * and we have not yet hit our maximum waiting time, then the timer is restarted
863  * for another 50ms.
864  */
865 static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
866 {
867         ide_hwgroup_t *hwgroup  = HWGROUP(drive);
868         u8 stat;
869
870         SELECT_DRIVE(drive);
871         udelay (10);
872         stat = ide_read_status(drive);
873
874         if (OK_STAT(stat, 0, BUSY_STAT))
875                 printk("%s: ATAPI reset complete\n", drive->name);
876         else {
877                 if (time_before(jiffies, hwgroup->poll_timeout)) {
878                         ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
879                         /* continue polling */
880                         return ide_started;
881                 }
882                 /* end of polling */
883                 hwgroup->polling = 0;
884                 printk("%s: ATAPI reset timed-out, status=0x%02x\n",
885                                 drive->name, stat);
886                 /* do it the old fashioned way */
887                 return do_reset1(drive, 1);
888         }
889         /* done polling */
890         hwgroup->polling = 0;
891         hwgroup->resetting = 0;
892         return ide_stopped;
893 }
894
895 /*
896  * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
897  * during an ide reset operation. If the drives have not yet responded,
898  * and we have not yet hit our maximum waiting time, then the timer is restarted
899  * for another 50ms.
900  */
901 static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
902 {
903         ide_hwgroup_t *hwgroup  = HWGROUP(drive);
904         ide_hwif_t *hwif        = HWIF(drive);
905         const struct ide_port_ops *port_ops = hwif->port_ops;
906         u8 tmp;
907
908         if (port_ops && port_ops->reset_poll) {
909                 if (port_ops->reset_poll(drive)) {
910                         printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
911                                 hwif->name, drive->name);
912                         return ide_stopped;
913                 }
914         }
915
916         tmp = ide_read_status(drive);
917
918         if (!OK_STAT(tmp, 0, BUSY_STAT)) {
919                 if (time_before(jiffies, hwgroup->poll_timeout)) {
920                         ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
921                         /* continue polling */
922                         return ide_started;
923                 }
924                 printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp);
925                 drive->failures++;
926         } else  {
927                 printk("%s: reset: ", hwif->name);
928                 tmp = ide_read_error(drive);
929
930                 if (tmp == 1) {
931                         printk("success\n");
932                         drive->failures = 0;
933                 } else {
934                         drive->failures++;
935                         printk("master: ");
936                         switch (tmp & 0x7f) {
937                                 case 1: printk("passed");
938                                         break;
939                                 case 2: printk("formatter device error");
940                                         break;
941                                 case 3: printk("sector buffer error");
942                                         break;
943                                 case 4: printk("ECC circuitry error");
944                                         break;
945                                 case 5: printk("controlling MPU error");
946                                         break;
947                                 default:printk("error (0x%02x?)", tmp);
948                         }
949                         if (tmp & 0x80)
950                                 printk("; slave: failed");
951                         printk("\n");
952                 }
953         }
954         hwgroup->polling = 0;   /* done polling */
955         hwgroup->resetting = 0; /* done reset attempt */
956         return ide_stopped;
957 }
958
959 static void ide_disk_pre_reset(ide_drive_t *drive)
960 {
961         int legacy = (drive->id->cfs_enable_2 & 0x0400) ? 0 : 1;
962
963         drive->special.all = 0;
964         drive->special.b.set_geometry = legacy;
965         drive->special.b.recalibrate  = legacy;
966         drive->mult_count = 0;
967         if (!drive->keep_settings && !drive->using_dma)
968                 drive->mult_req = 0;
969         if (drive->mult_req != drive->mult_count)
970                 drive->special.b.set_multmode = 1;
971 }
972
973 static void pre_reset(ide_drive_t *drive)
974 {
975         const struct ide_port_ops *port_ops = drive->hwif->port_ops;
976
977         if (drive->media == ide_disk)
978                 ide_disk_pre_reset(drive);
979         else
980                 drive->post_reset = 1;
981
982         if (drive->using_dma) {
983                 if (drive->crc_count)
984                         ide_check_dma_crc(drive);
985                 else
986                         ide_dma_off(drive);
987         }
988
989         if (!drive->keep_settings) {
990                 if (!drive->using_dma) {
991                         drive->unmask = 0;
992                         drive->io_32bit = 0;
993                 }
994                 return;
995         }
996
997         if (port_ops && port_ops->pre_reset)
998                 port_ops->pre_reset(drive);
999
1000         if (drive->current_speed != 0xff)
1001                 drive->desired_speed = drive->current_speed;
1002         drive->current_speed = 0xff;
1003 }
1004
1005 /*
1006  * do_reset1() attempts to recover a confused drive by resetting it.
1007  * Unfortunately, resetting a disk drive actually resets all devices on
1008  * the same interface, so it can really be thought of as resetting the
1009  * interface rather than resetting the drive.
1010  *
1011  * ATAPI devices have their own reset mechanism which allows them to be
1012  * individually reset without clobbering other devices on the same interface.
1013  *
1014  * Unfortunately, the IDE interface does not generate an interrupt to let
1015  * us know when the reset operation has finished, so we must poll for this.
1016  * Equally poor, though, is the fact that this may a very long time to complete,
1017  * (up to 30 seconds worstcase).  So, instead of busy-waiting here for it,
1018  * we set a timer to poll at 50ms intervals.
1019  */
1020 static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1021 {
1022         unsigned int unit;
1023         unsigned long flags;
1024         ide_hwif_t *hwif;
1025         ide_hwgroup_t *hwgroup;
1026         struct ide_io_ports *io_ports;
1027         const struct ide_port_ops *port_ops;
1028         u8 ctl;
1029
1030         spin_lock_irqsave(&ide_lock, flags);
1031         hwif = HWIF(drive);
1032         hwgroup = HWGROUP(drive);
1033
1034         io_ports = &hwif->io_ports;
1035
1036         /* We must not reset with running handlers */
1037         BUG_ON(hwgroup->handler != NULL);
1038
1039         /* For an ATAPI device, first try an ATAPI SRST. */
1040         if (drive->media != ide_disk && !do_not_try_atapi) {
1041                 hwgroup->resetting = 1;
1042                 pre_reset(drive);
1043                 SELECT_DRIVE(drive);
1044                 udelay (20);
1045                 hwif->OUTBSYNC(drive, WIN_SRST, io_ports->command_addr);
1046                 ndelay(400);
1047                 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
1048                 hwgroup->polling = 1;
1049                 __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
1050                 spin_unlock_irqrestore(&ide_lock, flags);
1051                 return ide_started;
1052         }
1053
1054         /*
1055          * First, reset any device state data we were maintaining
1056          * for any of the drives on this interface.
1057          */
1058         for (unit = 0; unit < MAX_DRIVES; ++unit)
1059                 pre_reset(&hwif->drives[unit]);
1060
1061         if (io_ports->ctl_addr == 0) {
1062                 spin_unlock_irqrestore(&ide_lock, flags);
1063                 return ide_stopped;
1064         }
1065
1066         hwgroup->resetting = 1;
1067         /*
1068          * Note that we also set nIEN while resetting the device,
1069          * to mask unwanted interrupts from the interface during the reset.
1070          * However, due to the design of PC hardware, this will cause an
1071          * immediate interrupt due to the edge transition it produces.
1072          * This single interrupt gives us a "fast poll" for drives that
1073          * recover from reset very quickly, saving us the first 50ms wait time.
1074          */
1075         /* set SRST and nIEN */
1076         hwif->OUTBSYNC(drive, drive->ctl|6, io_ports->ctl_addr);
1077         /* more than enough time */
1078         udelay(10);
1079         if (drive->quirk_list == 2)
1080                 ctl = drive->ctl;       /* clear SRST and nIEN */
1081         else
1082                 ctl = drive->ctl | 2;   /* clear SRST, leave nIEN */
1083         hwif->OUTBSYNC(drive, ctl, io_ports->ctl_addr);
1084         /* more than enough time */
1085         udelay(10);
1086         hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
1087         hwgroup->polling = 1;
1088         __ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
1089
1090         /*
1091          * Some weird controller like resetting themselves to a strange
1092          * state when the disks are reset this way. At least, the Winbond
1093          * 553 documentation says that
1094          */
1095         port_ops = hwif->port_ops;
1096         if (port_ops && port_ops->resetproc)
1097                 port_ops->resetproc(drive);
1098
1099         spin_unlock_irqrestore(&ide_lock, flags);
1100         return ide_started;
1101 }
1102
1103 /*
1104  * ide_do_reset() is the entry point to the drive/interface reset code.
1105  */
1106
1107 ide_startstop_t ide_do_reset (ide_drive_t *drive)
1108 {
1109         return do_reset1(drive, 0);
1110 }
1111
1112 EXPORT_SYMBOL(ide_do_reset);
1113
1114 /*
1115  * ide_wait_not_busy() waits for the currently selected device on the hwif
1116  * to report a non-busy status, see comments in ide_probe_port().
1117  */
1118 int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
1119 {
1120         u8 stat = 0;
1121
1122         while(timeout--) {
1123                 /*
1124                  * Turn this into a schedule() sleep once I'm sure
1125                  * about locking issues (2.5 work ?).
1126                  */
1127                 mdelay(1);
1128                 stat = hwif->INB(hwif->io_ports.status_addr);
1129                 if ((stat & BUSY_STAT) == 0)
1130                         return 0;
1131                 /*
1132                  * Assume a value of 0xff means nothing is connected to
1133                  * the interface and it doesn't implement the pull-down
1134                  * resistor on D7.
1135                  */
1136                 if (stat == 0xff)
1137                         return -ENODEV;
1138                 touch_softlockup_watchdog();
1139                 touch_nmi_watchdog();
1140         }
1141         return -EBUSY;
1142 }
1143
1144 EXPORT_SYMBOL_GPL(ide_wait_not_busy);
1145