Merge branch 'bkl-removal' into next
[pandora-kernel.git] / drivers / ide / ide-taskfile.c
1 /*
2  *  Copyright (C) 2000-2002        Michael Cornwell <cornwell@acm.org>
3  *  Copyright (C) 2000-2002        Andre Hedrick <andre@linux-ide.org>
4  *  Copyright (C) 2001-2002        Klaus Smolin
5  *                                      IBM Storage Technology Division
6  *  Copyright (C) 2003-2004, 2007  Bartlomiej Zolnierkiewicz
7  *
8  *  The big the bad and the ugly.
9  */
10
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/string.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/mm.h>
17 #include <linux/sched.h>
18 #include <linux/interrupt.h>
19 #include <linux/major.h>
20 #include <linux/errno.h>
21 #include <linux/genhd.h>
22 #include <linux/blkpg.h>
23 #include <linux/slab.h>
24 #include <linux/pci.h>
25 #include <linux/delay.h>
26 #include <linux/hdreg.h>
27 #include <linux/ide.h>
28 #include <linux/bitops.h>
29 #include <linux/scatterlist.h>
30
31 #include <asm/byteorder.h>
32 #include <asm/irq.h>
33 #include <asm/uaccess.h>
34 #include <asm/io.h>
35
36 void ide_tf_dump(const char *s, struct ide_taskfile *tf)
37 {
38 #ifdef DEBUG
39         printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
40                 "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
41                 s, tf->feature, tf->nsect, tf->lbal,
42                 tf->lbam, tf->lbah, tf->device, tf->command);
43         printk("%s: hob: nsect 0x%02x lbal 0x%02x "
44                 "lbam 0x%02x lbah 0x%02x\n",
45                 s, tf->hob_nsect, tf->hob_lbal,
46                 tf->hob_lbam, tf->hob_lbah);
47 #endif
48 }
49
50 int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
51 {
52         ide_task_t args;
53
54         memset(&args, 0, sizeof(ide_task_t));
55         args.tf.nsect = 0x01;
56         if (drive->media == ide_disk)
57                 args.tf.command = WIN_IDENTIFY;
58         else
59                 args.tf.command = WIN_PIDENTIFY;
60         args.tf_flags   = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
61         args.data_phase = TASKFILE_IN;
62         return ide_raw_taskfile(drive, &args, buf, 1);
63 }
64
65 static int inline task_dma_ok(ide_task_t *task)
66 {
67         if (blk_fs_request(task->rq) || (task->tf_flags & IDE_TFLAG_FLAGGED))
68                 return 1;
69
70         switch (task->tf.command) {
71                 case WIN_WRITEDMA_ONCE:
72                 case WIN_WRITEDMA:
73                 case WIN_WRITEDMA_EXT:
74                 case WIN_READDMA_ONCE:
75                 case WIN_READDMA:
76                 case WIN_READDMA_EXT:
77                 case WIN_IDENTIFY_DMA:
78                         return 1;
79         }
80
81         return 0;
82 }
83
84 static ide_startstop_t task_no_data_intr(ide_drive_t *);
85 static ide_startstop_t set_geometry_intr(ide_drive_t *);
86 static ide_startstop_t recal_intr(ide_drive_t *);
87 static ide_startstop_t set_multmode_intr(ide_drive_t *);
88 static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct request *);
89 static ide_startstop_t task_in_intr(ide_drive_t *);
90
91 ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
92 {
93         ide_hwif_t *hwif        = HWIF(drive);
94         struct ide_taskfile *tf = &task->tf;
95         ide_handler_t *handler = NULL;
96         const struct ide_dma_ops *dma_ops = hwif->dma_ops;
97
98         if (task->data_phase == TASKFILE_MULTI_IN ||
99             task->data_phase == TASKFILE_MULTI_OUT) {
100                 if (!drive->mult_count) {
101                         printk(KERN_ERR "%s: multimode not set!\n",
102                                         drive->name);
103                         return ide_stopped;
104                 }
105         }
106
107         if (task->tf_flags & IDE_TFLAG_FLAGGED)
108                 task->tf_flags |= IDE_TFLAG_FLAGGED_SET_IN_FLAGS;
109
110         if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
111                 ide_tf_dump(drive->name, tf);
112                 ide_set_irq(drive, 1);
113                 SELECT_MASK(drive, 0);
114                 hwif->tf_load(drive, task);
115         }
116
117         switch (task->data_phase) {
118         case TASKFILE_MULTI_OUT:
119         case TASKFILE_OUT:
120                 hwif->OUTBSYNC(hwif, tf->command, hwif->io_ports.command_addr);
121                 ndelay(400);    /* FIXME */
122                 return pre_task_out_intr(drive, task->rq);
123         case TASKFILE_MULTI_IN:
124         case TASKFILE_IN:
125                 handler = task_in_intr;
126                 /* fall-through */
127         case TASKFILE_NO_DATA:
128                 if (handler == NULL)
129                         handler = task_no_data_intr;
130                 /* WIN_{SPECIFY,RESTORE,SETMULT} use custom handlers */
131                 if (task->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) {
132                         switch (tf->command) {
133                         case WIN_SPECIFY: handler = set_geometry_intr;  break;
134                         case WIN_RESTORE: handler = recal_intr;         break;
135                         case WIN_SETMULT: handler = set_multmode_intr;  break;
136                         }
137                 }
138                 ide_execute_command(drive, tf->command, handler,
139                                     WAIT_WORSTCASE, NULL);
140                 return ide_started;
141         default:
142                 if (task_dma_ok(task) == 0 || drive->using_dma == 0 ||
143                     dma_ops->dma_setup(drive))
144                         return ide_stopped;
145                 dma_ops->dma_exec_cmd(drive, tf->command);
146                 dma_ops->dma_start(drive);
147                 return ide_started;
148         }
149 }
150 EXPORT_SYMBOL_GPL(do_rw_taskfile);
151
152 /*
153  * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
154  */
155 static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
156 {
157         u8 stat = ide_read_status(drive);
158
159         if (OK_STAT(stat, READY_STAT, BAD_STAT))
160                 drive->mult_count = drive->mult_req;
161         else {
162                 drive->mult_req = drive->mult_count = 0;
163                 drive->special.b.recalibrate = 1;
164                 (void) ide_dump_status(drive, "set_multmode", stat);
165         }
166         return ide_stopped;
167 }
168
169 /*
170  * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
171  */
172 static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
173 {
174         int retries = 5;
175         u8 stat;
176
177         while (((stat = ide_read_status(drive)) & BUSY_STAT) && retries--)
178                 udelay(10);
179
180         if (OK_STAT(stat, READY_STAT, BAD_STAT))
181                 return ide_stopped;
182
183         if (stat & (ERR_STAT|DRQ_STAT))
184                 return ide_error(drive, "set_geometry_intr", stat);
185
186         BUG_ON(HWGROUP(drive)->handler != NULL);
187         ide_set_handler(drive, &set_geometry_intr, WAIT_WORSTCASE, NULL);
188         return ide_started;
189 }
190
191 /*
192  * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
193  */
194 static ide_startstop_t recal_intr(ide_drive_t *drive)
195 {
196         u8 stat = ide_read_status(drive);
197
198         if (!OK_STAT(stat, READY_STAT, BAD_STAT))
199                 return ide_error(drive, "recal_intr", stat);
200         return ide_stopped;
201 }
202
203 /*
204  * Handler for commands without a data phase
205  */
206 static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
207 {
208         ide_task_t *args        = HWGROUP(drive)->rq->special;
209         u8 stat;
210
211         local_irq_enable_in_hardirq();
212         stat = ide_read_status(drive);
213
214         if (!OK_STAT(stat, READY_STAT, BAD_STAT))
215                 return ide_error(drive, "task_no_data_intr", stat);
216                 /* calls ide_end_drive_cmd */
217
218         if (args)
219                 ide_end_drive_cmd(drive, stat, ide_read_error(drive));
220
221         return ide_stopped;
222 }
223
224 static u8 wait_drive_not_busy(ide_drive_t *drive)
225 {
226         int retries;
227         u8 stat;
228
229         /*
230          * Last sector was transfered, wait until device is ready.  This can
231          * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
232          */
233         for (retries = 0; retries < 1000; retries++) {
234                 stat = ide_read_status(drive);
235
236                 if (stat & BUSY_STAT)
237                         udelay(10);
238                 else
239                         break;
240         }
241
242         if (stat & BUSY_STAT)
243                 printk(KERN_ERR "%s: drive still BUSY!\n", drive->name);
244
245         return stat;
246 }
247
248 static void ide_pio_sector(ide_drive_t *drive, struct request *rq,
249                            unsigned int write)
250 {
251         ide_hwif_t *hwif = drive->hwif;
252         struct scatterlist *sg = hwif->sg_table;
253         struct scatterlist *cursg = hwif->cursg;
254         struct page *page;
255 #ifdef CONFIG_HIGHMEM
256         unsigned long flags;
257 #endif
258         unsigned int offset;
259         u8 *buf;
260
261         cursg = hwif->cursg;
262         if (!cursg) {
263                 cursg = sg;
264                 hwif->cursg = sg;
265         }
266
267         page = sg_page(cursg);
268         offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
269
270         /* get the current page and offset */
271         page = nth_page(page, (offset >> PAGE_SHIFT));
272         offset %= PAGE_SIZE;
273
274 #ifdef CONFIG_HIGHMEM
275         local_irq_save(flags);
276 #endif
277         buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset;
278
279         hwif->nleft--;
280         hwif->cursg_ofs++;
281
282         if ((hwif->cursg_ofs * SECTOR_SIZE) == cursg->length) {
283                 hwif->cursg = sg_next(hwif->cursg);
284                 hwif->cursg_ofs = 0;
285         }
286
287         /* do the actual data transfer */
288         if (write)
289                 hwif->output_data(drive, rq, buf, SECTOR_SIZE);
290         else
291                 hwif->input_data(drive, rq, buf, SECTOR_SIZE);
292
293         kunmap_atomic(buf, KM_BIO_SRC_IRQ);
294 #ifdef CONFIG_HIGHMEM
295         local_irq_restore(flags);
296 #endif
297 }
298
299 static void ide_pio_multi(ide_drive_t *drive, struct request *rq,
300                           unsigned int write)
301 {
302         unsigned int nsect;
303
304         nsect = min_t(unsigned int, drive->hwif->nleft, drive->mult_count);
305         while (nsect--)
306                 ide_pio_sector(drive, rq, write);
307 }
308
309 static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
310                                      unsigned int write)
311 {
312         u8 saved_io_32bit = drive->io_32bit;
313
314         if (rq->bio)    /* fs request */
315                 rq->errors = 0;
316
317         if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
318                 ide_task_t *task = rq->special;
319
320                 if (task->tf_flags & IDE_TFLAG_IO_16BIT)
321                         drive->io_32bit = 0;
322         }
323
324         touch_softlockup_watchdog();
325
326         switch (drive->hwif->data_phase) {
327         case TASKFILE_MULTI_IN:
328         case TASKFILE_MULTI_OUT:
329                 ide_pio_multi(drive, rq, write);
330                 break;
331         default:
332                 ide_pio_sector(drive, rq, write);
333                 break;
334         }
335
336         drive->io_32bit = saved_io_32bit;
337 }
338
339 static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
340                                   const char *s, u8 stat)
341 {
342         if (rq->bio) {
343                 ide_hwif_t *hwif = drive->hwif;
344                 int sectors = hwif->nsect - hwif->nleft;
345
346                 switch (hwif->data_phase) {
347                 case TASKFILE_IN:
348                         if (hwif->nleft)
349                                 break;
350                         /* fall through */
351                 case TASKFILE_OUT:
352                         sectors--;
353                         break;
354                 case TASKFILE_MULTI_IN:
355                         if (hwif->nleft)
356                                 break;
357                         /* fall through */
358                 case TASKFILE_MULTI_OUT:
359                         sectors -= drive->mult_count;
360                 default:
361                         break;
362                 }
363
364                 if (sectors > 0) {
365                         ide_driver_t *drv;
366
367                         drv = *(ide_driver_t **)rq->rq_disk->private_data;
368                         drv->end_request(drive, 1, sectors);
369                 }
370         }
371         return ide_error(drive, s, stat);
372 }
373
374 void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
375 {
376         if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
377                 u8 err = ide_read_error(drive);
378
379                 ide_end_drive_cmd(drive, stat, err);
380                 return;
381         }
382
383         if (rq->rq_disk) {
384                 ide_driver_t *drv;
385
386                 drv = *(ide_driver_t **)rq->rq_disk->private_data;;
387                 drv->end_request(drive, 1, rq->nr_sectors);
388         } else
389                 ide_end_request(drive, 1, rq->nr_sectors);
390 }
391
392 /*
393  * We got an interrupt on a task_in case, but no errors and no DRQ.
394  *
395  * It might be a spurious irq (shared irq), but it might be a
396  * command that had no output.
397  */
398 static ide_startstop_t task_in_unexpected(ide_drive_t *drive, struct request *rq, u8 stat)
399 {
400         /* Command all done? */
401         if (OK_STAT(stat, READY_STAT, BUSY_STAT)) {
402                 task_end_request(drive, rq, stat);
403                 return ide_stopped;
404         }
405
406         /* Assume it was a spurious irq */
407         ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
408         return ide_started;
409 }
410
411 /*
412  * Handler for command with PIO data-in phase (Read/Read Multiple).
413  */
414 static ide_startstop_t task_in_intr(ide_drive_t *drive)
415 {
416         ide_hwif_t *hwif = drive->hwif;
417         struct request *rq = HWGROUP(drive)->rq;
418         u8 stat = ide_read_status(drive);
419
420         /* Error? */
421         if (stat & ERR_STAT)
422                 return task_error(drive, rq, __func__, stat);
423
424         /* Didn't want any data? Odd. */
425         if (!(stat & DRQ_STAT))
426                 return task_in_unexpected(drive, rq, stat);
427
428         ide_pio_datablock(drive, rq, 0);
429
430         /* Are we done? Check status and finish transfer. */
431         if (!hwif->nleft) {
432                 stat = wait_drive_not_busy(drive);
433                 if (!OK_STAT(stat, 0, BAD_STAT))
434                         return task_error(drive, rq, __func__, stat);
435                 task_end_request(drive, rq, stat);
436                 return ide_stopped;
437         }
438
439         /* Still data left to transfer. */
440         ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
441
442         return ide_started;
443 }
444
445 /*
446  * Handler for command with PIO data-out phase (Write/Write Multiple).
447  */
448 static ide_startstop_t task_out_intr (ide_drive_t *drive)
449 {
450         ide_hwif_t *hwif = drive->hwif;
451         struct request *rq = HWGROUP(drive)->rq;
452         u8 stat = ide_read_status(drive);
453
454         if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
455                 return task_error(drive, rq, __func__, stat);
456
457         /* Deal with unexpected ATA data phase. */
458         if (((stat & DRQ_STAT) == 0) ^ !hwif->nleft)
459                 return task_error(drive, rq, __func__, stat);
460
461         if (!hwif->nleft) {
462                 task_end_request(drive, rq, stat);
463                 return ide_stopped;
464         }
465
466         /* Still data left to transfer. */
467         ide_pio_datablock(drive, rq, 1);
468         ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
469
470         return ide_started;
471 }
472
473 static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, struct request *rq)
474 {
475         ide_startstop_t startstop;
476
477         if (ide_wait_stat(&startstop, drive, DRQ_STAT,
478                           drive->bad_wstat, WAIT_DRQ)) {
479                 printk(KERN_ERR "%s: no DRQ after issuing %sWRITE%s\n",
480                                 drive->name,
481                                 drive->hwif->data_phase ? "MULT" : "",
482                                 drive->addressing ? "_EXT" : "");
483                 return startstop;
484         }
485
486         if (!drive->unmask)
487                 local_irq_disable();
488
489         ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
490         ide_pio_datablock(drive, rq, 1);
491
492         return ide_started;
493 }
494
495 int ide_raw_taskfile(ide_drive_t *drive, ide_task_t *task, u8 *buf, u16 nsect)
496 {
497         struct request *rq;
498         int error;
499
500         rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
501         rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
502         rq->buffer = buf;
503
504         /*
505          * (ks) We transfer currently only whole sectors.
506          * This is suffient for now.  But, it would be great,
507          * if we would find a solution to transfer any size.
508          * To support special commands like READ LONG.
509          */
510         rq->hard_nr_sectors = rq->nr_sectors = nsect;
511         rq->hard_cur_sectors = rq->current_nr_sectors = nsect;
512
513         if (task->tf_flags & IDE_TFLAG_WRITE)
514                 rq->cmd_flags |= REQ_RW;
515
516         rq->special = task;
517         task->rq = rq;
518
519         error = blk_execute_rq(drive->queue, NULL, rq, 0);
520         blk_put_request(rq);
521
522         return error;
523 }
524
525 EXPORT_SYMBOL(ide_raw_taskfile);
526
527 int ide_no_data_taskfile(ide_drive_t *drive, ide_task_t *task)
528 {
529         task->data_phase = TASKFILE_NO_DATA;
530
531         return ide_raw_taskfile(drive, task, NULL, 0);
532 }
533 EXPORT_SYMBOL_GPL(ide_no_data_taskfile);
534
535 #ifdef CONFIG_IDE_TASK_IOCTL
536 int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
537 {
538         ide_task_request_t      *req_task;
539         ide_task_t              args;
540         u8 *outbuf              = NULL;
541         u8 *inbuf               = NULL;
542         u8 *data_buf            = NULL;
543         int err                 = 0;
544         int tasksize            = sizeof(struct ide_task_request_s);
545         unsigned int taskin     = 0;
546         unsigned int taskout    = 0;
547         u16 nsect               = 0;
548         char __user *buf = (char __user *)arg;
549
550 //      printk("IDE Taskfile ...\n");
551
552         req_task = kzalloc(tasksize, GFP_KERNEL);
553         if (req_task == NULL) return -ENOMEM;
554         if (copy_from_user(req_task, buf, tasksize)) {
555                 kfree(req_task);
556                 return -EFAULT;
557         }
558
559         taskout = req_task->out_size;
560         taskin  = req_task->in_size;
561         
562         if (taskin > 65536 || taskout > 65536) {
563                 err = -EINVAL;
564                 goto abort;
565         }
566
567         if (taskout) {
568                 int outtotal = tasksize;
569                 outbuf = kzalloc(taskout, GFP_KERNEL);
570                 if (outbuf == NULL) {
571                         err = -ENOMEM;
572                         goto abort;
573                 }
574                 if (copy_from_user(outbuf, buf + outtotal, taskout)) {
575                         err = -EFAULT;
576                         goto abort;
577                 }
578         }
579
580         if (taskin) {
581                 int intotal = tasksize + taskout;
582                 inbuf = kzalloc(taskin, GFP_KERNEL);
583                 if (inbuf == NULL) {
584                         err = -ENOMEM;
585                         goto abort;
586                 }
587                 if (copy_from_user(inbuf, buf + intotal, taskin)) {
588                         err = -EFAULT;
589                         goto abort;
590                 }
591         }
592
593         memset(&args, 0, sizeof(ide_task_t));
594
595         memcpy(&args.tf_array[0], req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2);
596         memcpy(&args.tf_array[6], req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
597
598         args.data_phase = req_task->data_phase;
599
600         args.tf_flags = IDE_TFLAG_IO_16BIT | IDE_TFLAG_DEVICE |
601                         IDE_TFLAG_IN_TF;
602         if (drive->addressing == 1)
603                 args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_IN_HOB);
604
605         if (req_task->out_flags.all) {
606                 args.tf_flags |= IDE_TFLAG_FLAGGED;
607
608                 if (req_task->out_flags.b.data)
609                         args.tf_flags |= IDE_TFLAG_OUT_DATA;
610
611                 if (req_task->out_flags.b.nsector_hob)
612                         args.tf_flags |= IDE_TFLAG_OUT_HOB_NSECT;
613                 if (req_task->out_flags.b.sector_hob)
614                         args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAL;
615                 if (req_task->out_flags.b.lcyl_hob)
616                         args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAM;
617                 if (req_task->out_flags.b.hcyl_hob)
618                         args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAH;
619
620                 if (req_task->out_flags.b.error_feature)
621                         args.tf_flags |= IDE_TFLAG_OUT_FEATURE;
622                 if (req_task->out_flags.b.nsector)
623                         args.tf_flags |= IDE_TFLAG_OUT_NSECT;
624                 if (req_task->out_flags.b.sector)
625                         args.tf_flags |= IDE_TFLAG_OUT_LBAL;
626                 if (req_task->out_flags.b.lcyl)
627                         args.tf_flags |= IDE_TFLAG_OUT_LBAM;
628                 if (req_task->out_flags.b.hcyl)
629                         args.tf_flags |= IDE_TFLAG_OUT_LBAH;
630         } else {
631                 args.tf_flags |= IDE_TFLAG_OUT_TF;
632                 if (args.tf_flags & IDE_TFLAG_LBA48)
633                         args.tf_flags |= IDE_TFLAG_OUT_HOB;
634         }
635
636         if (req_task->in_flags.b.data)
637                 args.tf_flags |= IDE_TFLAG_IN_DATA;
638
639         switch(req_task->data_phase) {
640                 case TASKFILE_MULTI_OUT:
641                         if (!drive->mult_count) {
642                                 /* (hs): give up if multcount is not set */
643                                 printk(KERN_ERR "%s: %s Multimode Write " \
644                                         "multcount is not set\n",
645                                         drive->name, __func__);
646                                 err = -EPERM;
647                                 goto abort;
648                         }
649                         /* fall through */
650                 case TASKFILE_OUT:
651                         /* fall through */
652                 case TASKFILE_OUT_DMAQ:
653                 case TASKFILE_OUT_DMA:
654                         nsect = taskout / SECTOR_SIZE;
655                         data_buf = outbuf;
656                         break;
657                 case TASKFILE_MULTI_IN:
658                         if (!drive->mult_count) {
659                                 /* (hs): give up if multcount is not set */
660                                 printk(KERN_ERR "%s: %s Multimode Read failure " \
661                                         "multcount is not set\n",
662                                         drive->name, __func__);
663                                 err = -EPERM;
664                                 goto abort;
665                         }
666                         /* fall through */
667                 case TASKFILE_IN:
668                         /* fall through */
669                 case TASKFILE_IN_DMAQ:
670                 case TASKFILE_IN_DMA:
671                         nsect = taskin / SECTOR_SIZE;
672                         data_buf = inbuf;
673                         break;
674                 case TASKFILE_NO_DATA:
675                         break;
676                 default:
677                         err = -EFAULT;
678                         goto abort;
679         }
680
681         if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA)
682                 nsect = 0;
683         else if (!nsect) {
684                 nsect = (args.tf.hob_nsect << 8) | args.tf.nsect;
685
686                 if (!nsect) {
687                         printk(KERN_ERR "%s: in/out command without data\n",
688                                         drive->name);
689                         err = -EFAULT;
690                         goto abort;
691                 }
692         }
693
694         if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE)
695                 args.tf_flags |= IDE_TFLAG_WRITE;
696
697         err = ide_raw_taskfile(drive, &args, data_buf, nsect);
698
699         memcpy(req_task->hob_ports, &args.tf_array[0], HDIO_DRIVE_HOB_HDR_SIZE - 2);
700         memcpy(req_task->io_ports, &args.tf_array[6], HDIO_DRIVE_TASK_HDR_SIZE);
701
702         if ((args.tf_flags & IDE_TFLAG_FLAGGED_SET_IN_FLAGS) &&
703             req_task->in_flags.all == 0) {
704                 req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
705                 if (drive->addressing == 1)
706                         req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8);
707         }
708
709         if (copy_to_user(buf, req_task, tasksize)) {
710                 err = -EFAULT;
711                 goto abort;
712         }
713         if (taskout) {
714                 int outtotal = tasksize;
715                 if (copy_to_user(buf + outtotal, outbuf, taskout)) {
716                         err = -EFAULT;
717                         goto abort;
718                 }
719         }
720         if (taskin) {
721                 int intotal = tasksize + taskout;
722                 if (copy_to_user(buf + intotal, inbuf, taskin)) {
723                         err = -EFAULT;
724                         goto abort;
725                 }
726         }
727 abort:
728         kfree(req_task);
729         kfree(outbuf);
730         kfree(inbuf);
731
732 //      printk("IDE Taskfile ioctl ended. rc = %i\n", err);
733
734         return err;
735 }
736 #endif
737
738 int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
739 {
740         u8 *buf = NULL;
741         int bufsize = 0, err = 0;
742         u8 args[4], xfer_rate = 0;
743         ide_task_t tfargs;
744         struct ide_taskfile *tf = &tfargs.tf;
745         struct hd_driveid *id = drive->id;
746
747         if (NULL == (void *) arg) {
748                 struct request *rq;
749
750                 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
751                 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
752                 err = blk_execute_rq(drive->queue, NULL, rq, 0);
753                 blk_put_request(rq);
754
755                 return err;
756         }
757
758         if (copy_from_user(args, (void __user *)arg, 4))
759                 return -EFAULT;
760
761         memset(&tfargs, 0, sizeof(ide_task_t));
762         tf->feature = args[2];
763         if (args[0] == WIN_SMART) {
764                 tf->nsect = args[3];
765                 tf->lbal  = args[1];
766                 tf->lbam  = 0x4f;
767                 tf->lbah  = 0xc2;
768                 tfargs.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_IN_NSECT;
769         } else {
770                 tf->nsect = args[1];
771                 tfargs.tf_flags = IDE_TFLAG_OUT_FEATURE |
772                                   IDE_TFLAG_OUT_NSECT | IDE_TFLAG_IN_NSECT;
773         }
774         tf->command = args[0];
775         tfargs.data_phase = args[3] ? TASKFILE_IN : TASKFILE_NO_DATA;
776
777         if (args[3]) {
778                 tfargs.tf_flags |= IDE_TFLAG_IO_16BIT;
779                 bufsize = SECTOR_WORDS * 4 * args[3];
780                 buf = kzalloc(bufsize, GFP_KERNEL);
781                 if (buf == NULL)
782                         return -ENOMEM;
783         }
784
785         if (tf->command == WIN_SETFEATURES &&
786             tf->feature == SETFEATURES_XFER &&
787             tf->nsect >= XFER_SW_DMA_0 &&
788             (id->dma_ultra || id->dma_mword || id->dma_1word)) {
789                 xfer_rate = args[1];
790                 if (tf->nsect > XFER_UDMA_2 && !eighty_ninty_three(drive)) {
791                         printk(KERN_WARNING "%s: UDMA speeds >UDMA33 cannot "
792                                             "be set\n", drive->name);
793                         goto abort;
794                 }
795         }
796
797         err = ide_raw_taskfile(drive, &tfargs, buf, args[3]);
798
799         args[0] = tf->status;
800         args[1] = tf->error;
801         args[2] = tf->nsect;
802
803         if (!err && xfer_rate) {
804                 /* active-retuning-calls future */
805                 ide_set_xfer_rate(drive, xfer_rate);
806                 ide_driveid_update(drive);
807         }
808 abort:
809         if (copy_to_user((void __user *)arg, &args, 4))
810                 err = -EFAULT;
811         if (buf) {
812                 if (copy_to_user((void __user *)(arg + 4), buf, bufsize))
813                         err = -EFAULT;
814                 kfree(buf);
815         }
816         return err;
817 }
818
819 int ide_task_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
820 {
821         void __user *p = (void __user *)arg;
822         int err = 0;
823         u8 args[7];
824         ide_task_t task;
825
826         if (copy_from_user(args, p, 7))
827                 return -EFAULT;
828
829         memset(&task, 0, sizeof(task));
830         memcpy(&task.tf_array[7], &args[1], 6);
831         task.tf.command = args[0];
832         task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
833
834         err = ide_no_data_taskfile(drive, &task);
835
836         args[0] = task.tf.command;
837         memcpy(&args[1], &task.tf_array[7], 6);
838
839         if (copy_to_user(p, args, 7))
840                 err = -EFAULT;
841
842         return err;
843 }