2 * IDE ATAPI streaming tape driver.
4 * Copyright (C) 1995-1999 Gadi Oxman <gadio@netvision.net.il>
5 * Copyright (C) 2003-2005 Bartlomiej Zolnierkiewicz
7 * This driver was constructed as a student project in the software laboratory
8 * of the faculty of electrical engineering in the Technion - Israel's
9 * Institute Of Technology, with the guide of Avner Lottem and Dr. Ilana David.
11 * It is hereby placed under the terms of the GNU general public license.
12 * (See linux/COPYING).
14 * For a historical changelog see
15 * Documentation/ide/ChangeLog.ide-tape.1995-2002
18 #define IDETAPE_VERSION "1.19"
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/string.h>
23 #include <linux/kernel.h>
24 #include <linux/delay.h>
25 #include <linux/timer.h>
27 #include <linux/interrupt.h>
28 #include <linux/jiffies.h>
29 #include <linux/major.h>
30 #include <linux/errno.h>
31 #include <linux/genhd.h>
32 #include <linux/slab.h>
33 #include <linux/pci.h>
34 #include <linux/ide.h>
35 #include <linux/smp_lock.h>
36 #include <linux/completion.h>
37 #include <linux/bitops.h>
38 #include <linux/mutex.h>
39 #include <scsi/scsi.h>
41 #include <asm/byteorder.h>
43 #include <asm/uaccess.h>
45 #include <asm/unaligned.h>
46 #include <linux/mtio.h>
49 /* output errors only */
51 /* output all sense key/asc */
53 /* info regarding all chrdev-related procedures */
54 DBG_CHRDEV = (1 << 2),
55 /* all remaining procedures */
57 /* buffer alloc info (pc_stack & rq_stack) */
58 DBG_PCRQ_STACK = (1 << 4),
61 /* define to see debug info */
62 #define IDETAPE_DEBUG_LOG 0
65 #define debug_log(lvl, fmt, args...) \
67 if (tape->debug_mask & lvl) \
68 printk(KERN_INFO "ide-tape: " fmt, ## args); \
71 #define debug_log(lvl, fmt, args...) do {} while (0)
74 /**************************** Tunable parameters *****************************/
78 * Pipelined mode parameters.
80 * We try to use the minimum number of stages which is enough to
81 * keep the tape constantly streaming. To accomplish that, we implement
82 * a feedback loop around the maximum number of stages:
84 * We start from MIN maximum stages (we will not even use MIN stages
85 * if we don't need them), increment it by RATE*(MAX-MIN)
86 * whenever we sense that the pipeline is empty, until we reach
87 * the optimum value or until we reach MAX.
89 * Setting the following parameter to 0 is illegal: the pipelined mode
90 * cannot be disabled (calculate_speeds() divides by tape->max_stages.)
92 #define IDETAPE_MIN_PIPELINE_STAGES 1
93 #define IDETAPE_MAX_PIPELINE_STAGES 400
94 #define IDETAPE_INCREASE_STAGES_RATE 20
97 * After each failed packet command we issue a request sense command
98 * and retry the packet command IDETAPE_MAX_PC_RETRIES times.
100 * Setting IDETAPE_MAX_PC_RETRIES to 0 will disable retries.
102 #define IDETAPE_MAX_PC_RETRIES 3
105 * With each packet command, we allocate a buffer of
106 * IDETAPE_PC_BUFFER_SIZE bytes. This is used for several packet
107 * commands (Not for READ/WRITE commands).
109 #define IDETAPE_PC_BUFFER_SIZE 256
112 * In various places in the driver, we need to allocate storage
113 * for packet commands and requests, which will remain valid while
114 * we leave the driver to wait for an interrupt or a timeout event.
116 #define IDETAPE_PC_STACK (10 + IDETAPE_MAX_PC_RETRIES)
119 * Some drives (for example, Seagate STT3401A Travan) require a very long
120 * timeout, because they don't return an interrupt or clear their busy bit
121 * until after the command completes (even retension commands).
123 #define IDETAPE_WAIT_CMD (900*HZ)
126 * The following parameter is used to select the point in the internal
127 * tape fifo in which we will start to refill the buffer. Decreasing
128 * the following parameter will improve the system's latency and
129 * interactive response, while using a high value might improve system
132 #define IDETAPE_FIFO_THRESHOLD 2
135 * DSC polling parameters.
137 * Polling for DSC (a single bit in the status register) is a very
138 * important function in ide-tape. There are two cases in which we
141 * 1. Before a read/write packet command, to ensure that we
142 * can transfer data from/to the tape's data buffers, without
143 * causing an actual media access. In case the tape is not
144 * ready yet, we take out our request from the device
145 * request queue, so that ide.c will service requests from
146 * the other device on the same interface meanwhile.
148 * 2. After the successful initialization of a "media access
149 * packet command", which is a command which can take a long
150 * time to complete (it can be several seconds or even an hour).
152 * Again, we postpone our request in the middle to free the bus
153 * for the other device. The polling frequency here should be
154 * lower than the read/write frequency since those media access
155 * commands are slow. We start from a "fast" frequency -
156 * IDETAPE_DSC_MA_FAST (one second), and if we don't receive DSC
157 * after IDETAPE_DSC_MA_THRESHOLD (5 minutes), we switch it to a
158 * lower frequency - IDETAPE_DSC_MA_SLOW (1 minute).
160 * We also set a timeout for the timer, in case something goes wrong.
161 * The timeout should be longer then the maximum execution time of a
168 #define IDETAPE_DSC_RW_MIN 5*HZ/100 /* 50 msec */
169 #define IDETAPE_DSC_RW_MAX 40*HZ/100 /* 400 msec */
170 #define IDETAPE_DSC_RW_TIMEOUT 2*60*HZ /* 2 minutes */
171 #define IDETAPE_DSC_MA_FAST 2*HZ /* 2 seconds */
172 #define IDETAPE_DSC_MA_THRESHOLD 5*60*HZ /* 5 minutes */
173 #define IDETAPE_DSC_MA_SLOW 30*HZ /* 30 seconds */
174 #define IDETAPE_DSC_MA_TIMEOUT 2*60*60*HZ /* 2 hours */
176 /*************************** End of tunable parameters ***********************/
179 * Read/Write error simulation
181 #define SIMULATE_ERRORS 0
184 * For general magnetic tape device compatibility.
187 idetape_direction_none,
188 idetape_direction_read,
189 idetape_direction_write
190 } idetape_chrdev_direction_t;
195 struct idetape_bh *b_reqnext;
200 * Our view of a packet command.
202 typedef struct idetape_packet_command_s {
203 u8 c[12]; /* Actual packet bytes */
204 int retries; /* On each retry, we increment retries */
205 int error; /* Error code */
206 int request_transfer; /* Bytes to transfer */
207 int actually_transferred; /* Bytes actually transferred */
208 int buffer_size; /* Size of our data buffer */
209 struct idetape_bh *bh;
212 u8 *buffer; /* Data buffer */
213 u8 *current_position; /* Pointer into the above buffer */
214 ide_startstop_t (*callback) (ide_drive_t *); /* Called when this packet command is completed */
215 u8 pc_buffer[IDETAPE_PC_BUFFER_SIZE]; /* Temporary buffer */
216 unsigned long flags; /* Status/Action bit flags: long for set_bit */
220 * Packet command flag bits.
222 /* Set when an error is considered normal - We won't retry */
224 /* 1 When polling for DSC on a media access command */
225 #define PC_WAIT_FOR_DSC 1
226 /* 1 when we prefer to use DMA if possible */
227 #define PC_DMA_RECOMMENDED 2
228 /* 1 while DMA in progress */
229 #define PC_DMA_IN_PROGRESS 3
230 /* 1 when encountered problem during DMA */
231 #define PC_DMA_ERROR 4
238 typedef struct idetape_stage_s {
239 struct request rq; /* The corresponding request */
240 struct idetape_bh *bh; /* The data buffers */
241 struct idetape_stage_s *next; /* Pointer to the next stage */
245 * Most of our global data which we need to save even as we leave the
246 * driver due to an interrupt or a timer event is stored in a variable
247 * of type idetape_tape_t, defined below.
249 typedef struct ide_tape_obj {
251 ide_driver_t *driver;
252 struct gendisk *disk;
256 * Since a typical character device operation requires more
257 * than one packet command, we provide here enough memory
258 * for the maximum of interconnected packet commands.
259 * The packet commands are stored in the circular array pc_stack.
260 * pc_stack_index points to the last used entry, and warps around
261 * to the start when we get to the last array entry.
263 * pc points to the current processed packet command.
265 * failed_pc points to the last failed packet command, or contains
266 * NULL if we do not need to retry any packet command. This is
267 * required since an additional packet command is needed before the
268 * retry, to get detailed information on what went wrong.
270 /* Current packet command */
272 /* Last failed packet command */
273 idetape_pc_t *failed_pc;
274 /* Packet command stack */
275 idetape_pc_t pc_stack[IDETAPE_PC_STACK];
276 /* Next free packet command storage space */
278 struct request rq_stack[IDETAPE_PC_STACK];
279 /* We implement a circular array */
283 * DSC polling variables.
285 * While polling for DSC we use postponed_rq to postpone the
286 * current request so that ide.c will be able to service
287 * pending requests on the other device. Note that at most
288 * we will have only one DSC (usually data transfer) request
289 * in the device request queue. Additional requests can be
290 * queued in our internal pipeline, but they will be visible
291 * to ide.c only one at a time.
293 struct request *postponed_rq;
294 /* The time in which we started polling for DSC */
295 unsigned long dsc_polling_start;
296 /* Timer used to poll for dsc */
297 struct timer_list dsc_timer;
298 /* Read/Write dsc polling frequency */
299 unsigned long best_dsc_rw_frequency;
300 /* The current polling frequency */
301 unsigned long dsc_polling_frequency;
302 /* Maximum waiting time */
303 unsigned long dsc_timeout;
306 * Read position information
310 unsigned int first_frame_position;
311 unsigned int last_frame_position;
312 unsigned int blocks_in_buffer;
315 * Last error information
317 u8 sense_key, asc, ascq;
320 * Character device operation
325 /* Current character device data transfer direction */
326 idetape_chrdev_direction_t chrdev_direction;
331 /* Usually 512 or 1024 bytes */
332 unsigned short tape_block_size;
335 /* Copy of the tape's Capabilities and Mechanical Page */
339 * Active data transfer request parameters.
341 * At most, there is only one ide-tape originated data transfer
342 * request in the device request queue. This allows ide.c to
343 * easily service requests from the other device when we
344 * postpone our active request. In the pipelined operation
345 * mode, we use our internal pipeline structure to hold
346 * more data requests.
348 * The data buffer size is chosen based on the tape's
351 /* Pointer to the request which is waiting in the device request queue */
352 struct request *active_data_request;
353 /* Data buffer size (chosen based on the tape's recommendation */
355 idetape_stage_t *merge_stage;
356 int merge_stage_size;
357 struct idetape_bh *bh;
362 * Pipeline parameters.
364 * To accomplish non-pipelined mode, we simply set the following
365 * variables to zero (or NULL, where appropriate).
367 /* Number of currently used stages */
369 /* Number of pending stages */
370 int nr_pending_stages;
371 /* We will not allocate more than this number of stages */
372 int max_stages, min_pipeline, max_pipeline;
373 /* The first stage which will be removed from the pipeline */
374 idetape_stage_t *first_stage;
375 /* The currently active stage */
376 idetape_stage_t *active_stage;
377 /* Will be serviced after the currently active request */
378 idetape_stage_t *next_stage;
379 /* New requests will be added to the pipeline here */
380 idetape_stage_t *last_stage;
381 /* Optional free stage which we can use */
382 idetape_stage_t *cache_stage;
384 /* Wasted space in each stage */
387 /* Status/Action flags: long for set_bit */
389 /* protects the ide-tape queue */
393 * Measures average tape speed
395 unsigned long avg_time;
401 char firmware_revision[6];
402 int firmware_revision_num;
404 /* the door is currently locked */
406 /* the tape hardware is write protected */
408 /* the tape is write protected (hardware or opened as read-only) */
412 * Limit the number of times a request can
413 * be postponed, to avoid an infinite postpone
416 /* request postpone count limit */
420 * Measures number of frames:
422 * 1. written/read to/from the driver pipeline (pipeline_head).
423 * 2. written/read to/from the tape buffers (idetape_bh).
424 * 3. written/read by the tape to/from the media (tape_head).
432 * Speed control at the tape buffers input/output
434 unsigned long insert_time;
437 int max_insert_speed;
438 int measure_insert_time;
441 * Measure tape still time, in milliseconds
443 unsigned long tape_still_time_begin;
447 * Speed regulation negative feedback loop
450 int pipeline_head_speed;
451 int controlled_pipeline_head_speed;
452 int uncontrolled_pipeline_head_speed;
453 int controlled_last_pipeline_head;
454 int uncontrolled_last_pipeline_head;
455 unsigned long uncontrolled_pipeline_head_time;
456 unsigned long controlled_pipeline_head_time;
457 int controlled_previous_pipeline_head;
458 int uncontrolled_previous_pipeline_head;
459 unsigned long controlled_previous_head_time;
460 unsigned long uncontrolled_previous_head_time;
461 int restart_speed_control_req;
466 static DEFINE_MUTEX(idetape_ref_mutex);
468 static struct class *idetape_sysfs_class;
470 #define to_ide_tape(obj) container_of(obj, struct ide_tape_obj, kref)
472 #define ide_tape_g(disk) \
473 container_of((disk)->private_data, struct ide_tape_obj, driver)
475 static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
477 struct ide_tape_obj *tape = NULL;
479 mutex_lock(&idetape_ref_mutex);
480 tape = ide_tape_g(disk);
482 kref_get(&tape->kref);
483 mutex_unlock(&idetape_ref_mutex);
487 static void ide_tape_release(struct kref *);
489 static void ide_tape_put(struct ide_tape_obj *tape)
491 mutex_lock(&idetape_ref_mutex);
492 kref_put(&tape->kref, ide_tape_release);
493 mutex_unlock(&idetape_ref_mutex);
499 #define DOOR_UNLOCKED 0
500 #define DOOR_LOCKED 1
501 #define DOOR_EXPLICITLY_LOCKED 2
504 * Tape flag bits values.
506 #define IDETAPE_IGNORE_DSC 0
507 #define IDETAPE_ADDRESS_VALID 1 /* 0 When the tape position is unknown */
508 #define IDETAPE_BUSY 2 /* Device already opened */
509 #define IDETAPE_PIPELINE_ERROR 3 /* Error detected in a pipeline stage */
510 #define IDETAPE_DETECT_BS 4 /* Attempt to auto-detect the current user block size */
511 #define IDETAPE_FILEMARK 5 /* Currently on a filemark */
512 #define IDETAPE_DRQ_INTERRUPT 6 /* DRQ interrupt device */
513 #define IDETAPE_READ_ERROR 7
514 #define IDETAPE_PIPELINE_ACTIVE 8 /* pipeline active */
515 /* 0 = no tape is loaded, so we don't rewind after ejecting */
516 #define IDETAPE_MEDIUM_PRESENT 9
519 * Some defines for the READ BUFFER command
521 #define IDETAPE_RETRIEVE_FAULTY_BLOCK 6
524 * Some defines for the SPACE command
526 #define IDETAPE_SPACE_OVER_FILEMARK 1
527 #define IDETAPE_SPACE_TO_EOD 3
530 * Some defines for the LOAD UNLOAD command
532 #define IDETAPE_LU_LOAD_MASK 1
533 #define IDETAPE_LU_RETENSION_MASK 2
534 #define IDETAPE_LU_EOT_MASK 4
537 * Special requests for our block device strategy routine.
539 * In order to service a character device command, we add special
540 * requests to the tail of our block device request queue and wait
541 * for their completion.
545 REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
546 REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
547 REQ_IDETAPE_READ = (1 << 2),
548 REQ_IDETAPE_WRITE = (1 << 3),
549 REQ_IDETAPE_READ_BUFFER = (1 << 4),
553 * Error codes which are returned in rq->errors to the higher part
556 #define IDETAPE_ERROR_GENERAL 101
557 #define IDETAPE_ERROR_FILEMARK 102
558 #define IDETAPE_ERROR_EOD 103
561 * The following is used to format the general configuration word of
562 * the ATAPI IDENTIFY DEVICE command.
564 struct idetape_id_gcw {
565 unsigned packet_size :2; /* Packet Size */
566 unsigned reserved234 :3; /* Reserved */
567 unsigned drq_type :2; /* Command packet DRQ type */
568 unsigned removable :1; /* Removable media */
569 unsigned device_type :5; /* Device type */
570 unsigned reserved13 :1; /* Reserved */
571 unsigned protocol :2; /* Protocol type */
574 /* Structures related to the SELECT SENSE / MODE SENSE packet commands. */
575 #define IDETAPE_BLOCK_DESCRIPTOR 0
576 #define IDETAPE_CAPABILITIES_PAGE 0x2a
579 * The variables below are used for the character device interface.
580 * Additional state variables are defined in our ide_drive_t structure.
582 static struct ide_tape_obj * idetape_devs[MAX_HWIFS * MAX_DRIVES];
584 #define ide_tape_f(file) ((file)->private_data)
586 static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
588 struct ide_tape_obj *tape = NULL;
590 mutex_lock(&idetape_ref_mutex);
591 tape = idetape_devs[i];
593 kref_get(&tape->kref);
594 mutex_unlock(&idetape_ref_mutex);
599 * Function declarations
602 static int idetape_chrdev_release (struct inode *inode, struct file *filp);
603 static void idetape_write_release (ide_drive_t *drive, unsigned int minor);
606 * Too bad. The drive wants to send us data which we are not ready to accept.
607 * Just throw it away.
609 static void idetape_discard_data (ide_drive_t *drive, unsigned int bcount)
612 (void) HWIF(drive)->INB(IDE_DATA_REG);
615 static void idetape_input_buffers (ide_drive_t *drive, idetape_pc_t *pc, unsigned int bcount)
617 struct idetape_bh *bh = pc->bh;
622 printk(KERN_ERR "ide-tape: bh == NULL in "
623 "idetape_input_buffers\n");
624 idetape_discard_data(drive, bcount);
627 count = min((unsigned int)(bh->b_size - atomic_read(&bh->b_count)), bcount);
628 HWIF(drive)->atapi_input_bytes(drive, bh->b_data + atomic_read(&bh->b_count), count);
630 atomic_add(count, &bh->b_count);
631 if (atomic_read(&bh->b_count) == bh->b_size) {
634 atomic_set(&bh->b_count, 0);
640 static void idetape_output_buffers (ide_drive_t *drive, idetape_pc_t *pc, unsigned int bcount)
642 struct idetape_bh *bh = pc->bh;
647 printk(KERN_ERR "ide-tape: bh == NULL in "
648 "idetape_output_buffers\n");
651 count = min((unsigned int)pc->b_count, (unsigned int)bcount);
652 HWIF(drive)->atapi_output_bytes(drive, pc->b_data, count);
655 pc->b_count -= count;
657 pc->bh = bh = bh->b_reqnext;
659 pc->b_data = bh->b_data;
660 pc->b_count = atomic_read(&bh->b_count);
666 static void idetape_update_buffers (idetape_pc_t *pc)
668 struct idetape_bh *bh = pc->bh;
670 unsigned int bcount = pc->actually_transferred;
672 if (test_bit(PC_WRITING, &pc->flags))
676 printk(KERN_ERR "ide-tape: bh == NULL in "
677 "idetape_update_buffers\n");
680 count = min((unsigned int)bh->b_size, (unsigned int)bcount);
681 atomic_set(&bh->b_count, count);
682 if (atomic_read(&bh->b_count) == bh->b_size)
690 * idetape_next_pc_storage returns a pointer to a place in which we can
691 * safely store a packet command, even though we intend to leave the
692 * driver. A storage space for a maximum of IDETAPE_PC_STACK packet
693 * commands is allocated at initialization time.
695 static idetape_pc_t *idetape_next_pc_storage (ide_drive_t *drive)
697 idetape_tape_t *tape = drive->driver_data;
699 debug_log(DBG_PCRQ_STACK, "pc_stack_index=%d\n", tape->pc_stack_index);
701 if (tape->pc_stack_index == IDETAPE_PC_STACK)
702 tape->pc_stack_index=0;
703 return (&tape->pc_stack[tape->pc_stack_index++]);
707 * idetape_next_rq_storage is used along with idetape_next_pc_storage.
708 * Since we queue packet commands in the request queue, we need to
709 * allocate a request, along with the allocation of a packet command.
712 /**************************************************************
714 * This should get fixed to use kmalloc(.., GFP_ATOMIC) *
715 * followed later on by kfree(). -ml *
717 **************************************************************/
719 static struct request *idetape_next_rq_storage (ide_drive_t *drive)
721 idetape_tape_t *tape = drive->driver_data;
723 debug_log(DBG_PCRQ_STACK, "rq_stack_index=%d\n", tape->rq_stack_index);
725 if (tape->rq_stack_index == IDETAPE_PC_STACK)
726 tape->rq_stack_index=0;
727 return (&tape->rq_stack[tape->rq_stack_index++]);
731 * idetape_init_pc initializes a packet command.
733 static void idetape_init_pc (idetape_pc_t *pc)
735 memset(pc->c, 0, 12);
738 pc->request_transfer = 0;
739 pc->buffer = pc->pc_buffer;
740 pc->buffer_size = IDETAPE_PC_BUFFER_SIZE;
746 * called on each failed packet command retry to analyze the request sense. We
747 * currently do not utilize this information.
749 static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
751 idetape_tape_t *tape = drive->driver_data;
752 idetape_pc_t *pc = tape->failed_pc;
754 tape->sense_key = sense[2] & 0xF;
755 tape->asc = sense[12];
756 tape->ascq = sense[13];
758 debug_log(DBG_ERR, "pc = %x, sense key = %x, asc = %x, ascq = %x\n",
759 pc->c[0], tape->sense_key, tape->asc, tape->ascq);
761 /* Correct pc->actually_transferred by asking the tape. */
762 if (test_bit(PC_DMA_ERROR, &pc->flags)) {
763 pc->actually_transferred = pc->request_transfer -
764 tape->tape_block_size *
765 be32_to_cpu(get_unaligned((u32 *)&sense[3]));
766 idetape_update_buffers(pc);
770 * If error was the result of a zero-length read or write command,
771 * with sense key=5, asc=0x22, ascq=0, let it slide. Some drives
772 * (i.e. Seagate STT3401A Travan) don't support 0-length read/writes.
774 if ((pc->c[0] == READ_6 || pc->c[0] == WRITE_6)
776 && pc->c[4] == 0 && pc->c[3] == 0 && pc->c[2] == 0) {
777 if (tape->sense_key == 5) {
778 /* don't report an error, everything's ok */
780 /* don't retry read/write */
781 set_bit(PC_ABORT, &pc->flags);
784 if (pc->c[0] == READ_6 && (sense[2] & 0x80)) {
785 pc->error = IDETAPE_ERROR_FILEMARK;
786 set_bit(PC_ABORT, &pc->flags);
788 if (pc->c[0] == WRITE_6) {
789 if ((sense[2] & 0x40) || (tape->sense_key == 0xd
790 && tape->asc == 0x0 && tape->ascq == 0x2)) {
791 pc->error = IDETAPE_ERROR_EOD;
792 set_bit(PC_ABORT, &pc->flags);
795 if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
796 if (tape->sense_key == 8) {
797 pc->error = IDETAPE_ERROR_EOD;
798 set_bit(PC_ABORT, &pc->flags);
800 if (!test_bit(PC_ABORT, &pc->flags) &&
801 pc->actually_transferred)
802 pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
806 static void idetape_activate_next_stage(ide_drive_t *drive)
808 idetape_tape_t *tape = drive->driver_data;
809 idetape_stage_t *stage = tape->next_stage;
810 struct request *rq = &stage->rq;
812 debug_log(DBG_PROCS, "Enter %s\n", __func__);
815 printk(KERN_ERR "ide-tape: bug: Trying to activate a non"
816 " existing stage\n");
820 rq->rq_disk = tape->disk;
822 rq->special = (void *)stage->bh;
823 tape->active_data_request = rq;
824 tape->active_stage = stage;
825 tape->next_stage = stage->next;
829 * idetape_increase_max_pipeline_stages is a part of the feedback
830 * loop which tries to find the optimum number of stages. In the
831 * feedback loop, we are starting from a minimum maximum number of
832 * stages, and if we sense that the pipeline is empty, we try to
833 * increase it, until we reach the user compile time memory limit.
835 static void idetape_increase_max_pipeline_stages (ide_drive_t *drive)
837 idetape_tape_t *tape = drive->driver_data;
838 int increase = (tape->max_pipeline - tape->min_pipeline) / 10;
840 debug_log(DBG_PROCS, "Enter %s\n", __func__);
842 tape->max_stages += max(increase, 1);
843 tape->max_stages = max(tape->max_stages, tape->min_pipeline);
844 tape->max_stages = min(tape->max_stages, tape->max_pipeline);
848 * idetape_kfree_stage calls kfree to completely free a stage, along with
849 * its related buffers.
851 static void __idetape_kfree_stage (idetape_stage_t *stage)
853 struct idetape_bh *prev_bh, *bh = stage->bh;
857 if (bh->b_data != NULL) {
858 size = (int) bh->b_size;
860 free_page((unsigned long) bh->b_data);
862 bh->b_data += PAGE_SIZE;
872 static void idetape_kfree_stage (idetape_tape_t *tape, idetape_stage_t *stage)
874 __idetape_kfree_stage(stage);
878 * idetape_remove_stage_head removes tape->first_stage from the pipeline.
879 * The caller should avoid race conditions.
881 static void idetape_remove_stage_head (ide_drive_t *drive)
883 idetape_tape_t *tape = drive->driver_data;
884 idetape_stage_t *stage;
886 debug_log(DBG_PROCS, "Enter %s\n", __func__);
888 if (tape->first_stage == NULL) {
889 printk(KERN_ERR "ide-tape: bug: tape->first_stage is NULL\n");
892 if (tape->active_stage == tape->first_stage) {
893 printk(KERN_ERR "ide-tape: bug: Trying to free our active "
897 stage = tape->first_stage;
898 tape->first_stage = stage->next;
899 idetape_kfree_stage(tape, stage);
901 if (tape->first_stage == NULL) {
902 tape->last_stage = NULL;
903 if (tape->next_stage != NULL)
904 printk(KERN_ERR "ide-tape: bug: tape->next_stage != NULL\n");
906 printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 now\n");
911 * This will free all the pipeline stages starting from new_last_stage->next
912 * to the end of the list, and point tape->last_stage to new_last_stage.
914 static void idetape_abort_pipeline(ide_drive_t *drive,
915 idetape_stage_t *new_last_stage)
917 idetape_tape_t *tape = drive->driver_data;
918 idetape_stage_t *stage = new_last_stage->next;
919 idetape_stage_t *nstage;
921 debug_log(DBG_PROCS, "%s: Enter %s\n", tape->name, __func__);
924 nstage = stage->next;
925 idetape_kfree_stage(tape, stage);
927 --tape->nr_pending_stages;
931 new_last_stage->next = NULL;
932 tape->last_stage = new_last_stage;
933 tape->next_stage = NULL;
937 * idetape_end_request is used to finish servicing a request, and to
938 * insert a pending pipeline request into the main device queue.
940 static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
942 struct request *rq = HWGROUP(drive)->rq;
943 idetape_tape_t *tape = drive->driver_data;
946 int remove_stage = 0;
947 idetape_stage_t *active_stage;
949 debug_log(DBG_PROCS, "Enter %s\n", __func__);
952 case 0: error = IDETAPE_ERROR_GENERAL; break;
953 case 1: error = 0; break;
954 default: error = uptodate;
958 tape->failed_pc = NULL;
960 if (!blk_special_request(rq)) {
961 ide_end_request(drive, uptodate, nr_sects);
965 spin_lock_irqsave(&tape->spinlock, flags);
967 /* The request was a pipelined data transfer request */
968 if (tape->active_data_request == rq) {
969 active_stage = tape->active_stage;
970 tape->active_stage = NULL;
971 tape->active_data_request = NULL;
972 tape->nr_pending_stages--;
973 if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
976 set_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
977 if (error == IDETAPE_ERROR_EOD)
978 idetape_abort_pipeline(drive, active_stage);
980 } else if (rq->cmd[0] & REQ_IDETAPE_READ) {
981 if (error == IDETAPE_ERROR_EOD) {
982 set_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
983 idetape_abort_pipeline(drive, active_stage);
986 if (tape->next_stage != NULL) {
987 idetape_activate_next_stage(drive);
990 * Insert the next request into the request queue.
992 (void) ide_do_drive_cmd(drive, tape->active_data_request, ide_end);
994 idetape_increase_max_pipeline_stages(drive);
997 ide_end_drive_cmd(drive, 0, 0);
998 // blkdev_dequeue_request(rq);
1000 // end_that_request_last(rq);
1003 idetape_remove_stage_head(drive);
1004 if (tape->active_data_request == NULL)
1005 clear_bit(IDETAPE_PIPELINE_ACTIVE, &tape->flags);
1006 spin_unlock_irqrestore(&tape->spinlock, flags);
1010 static ide_startstop_t idetape_request_sense_callback (ide_drive_t *drive)
1012 idetape_tape_t *tape = drive->driver_data;
1014 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1016 if (!tape->pc->error) {
1017 idetape_analyze_error(drive, tape->pc->buffer);
1018 idetape_end_request(drive, 1, 0);
1020 printk(KERN_ERR "ide-tape: Error in REQUEST SENSE itself - Aborting request!\n");
1021 idetape_end_request(drive, 0, 0);
1026 static void idetape_create_request_sense_cmd (idetape_pc_t *pc)
1028 idetape_init_pc(pc);
1029 pc->c[0] = REQUEST_SENSE;
1031 pc->request_transfer = 20;
1032 pc->callback = &idetape_request_sense_callback;
1035 static void idetape_init_rq(struct request *rq, u8 cmd)
1037 memset(rq, 0, sizeof(*rq));
1038 rq->cmd_type = REQ_TYPE_SPECIAL;
1043 * idetape_queue_pc_head generates a new packet command request in front
1044 * of the request queue, before the current request, so that it will be
1045 * processed immediately, on the next pass through the driver.
1047 * idetape_queue_pc_head is called from the request handling part of
1048 * the driver (the "bottom" part). Safe storage for the request should
1049 * be allocated with idetape_next_pc_storage and idetape_next_rq_storage
1050 * before calling idetape_queue_pc_head.
1052 * Memory for those requests is pre-allocated at initialization time, and
1053 * is limited to IDETAPE_PC_STACK requests. We assume that we have enough
1054 * space for the maximum possible number of inter-dependent packet commands.
1056 * The higher level of the driver - The ioctl handler and the character
1057 * device handling functions should queue request to the lower level part
1058 * and wait for their completion using idetape_queue_pc_tail or
1059 * idetape_queue_rw_tail.
1061 static void idetape_queue_pc_head (ide_drive_t *drive, idetape_pc_t *pc,struct request *rq)
1063 struct ide_tape_obj *tape = drive->driver_data;
1065 idetape_init_rq(rq, REQ_IDETAPE_PC1);
1066 rq->buffer = (char *) pc;
1067 rq->rq_disk = tape->disk;
1068 (void) ide_do_drive_cmd(drive, rq, ide_preempt);
1072 * idetape_retry_pc is called when an error was detected during the
1073 * last packet command. We queue a request sense packet command in
1074 * the head of the request list.
1076 static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
1078 idetape_tape_t *tape = drive->driver_data;
1082 (void)ide_read_error(drive);
1083 pc = idetape_next_pc_storage(drive);
1084 rq = idetape_next_rq_storage(drive);
1085 idetape_create_request_sense_cmd(pc);
1086 set_bit(IDETAPE_IGNORE_DSC, &tape->flags);
1087 idetape_queue_pc_head(drive, pc, rq);
1092 * idetape_postpone_request postpones the current request so that
1093 * ide.c will be able to service requests from another device on
1094 * the same hwgroup while we are polling for DSC.
1096 static void idetape_postpone_request (ide_drive_t *drive)
1098 idetape_tape_t *tape = drive->driver_data;
1100 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1102 tape->postponed_rq = HWGROUP(drive)->rq;
1103 ide_stall_queue(drive, tape->dsc_polling_frequency);
1107 * idetape_pc_intr is the usual interrupt handler which will be called
1108 * during a packet command. We will transfer some of the data (as
1109 * requested by the drive) and will re-point interrupt handler to us.
1110 * When data transfer is finished, we will act according to the
1111 * algorithm described before idetape_issue_packet_command.
1114 static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
1116 ide_hwif_t *hwif = drive->hwif;
1117 idetape_tape_t *tape = drive->driver_data;
1118 idetape_pc_t *pc = tape->pc;
1121 static int error_sim_count = 0;
1126 debug_log(DBG_PROCS, "Enter %s - interrupt handler\n", __func__);
1128 /* Clear the interrupt */
1129 stat = ide_read_status(drive);
1131 if (test_bit(PC_DMA_IN_PROGRESS, &pc->flags)) {
1132 if (hwif->ide_dma_end(drive) || (stat & ERR_STAT)) {
1134 * A DMA error is sometimes expected. For example,
1135 * if the tape is crossing a filemark during a
1136 * READ command, it will issue an irq and position
1137 * itself before the filemark, so that only a partial
1138 * data transfer will occur (which causes the DMA
1139 * error). In that case, we will later ask the tape
1140 * how much bytes of the original request were
1141 * actually transferred (we can't receive that
1142 * information from the DMA engine on most chipsets).
1146 * On the contrary, a DMA error is never expected;
1147 * it usually indicates a hardware error or abort.
1148 * If the tape crosses a filemark during a READ
1149 * command, it will issue an irq and position itself
1150 * after the filemark (not before). Only a partial
1151 * data transfer will occur, but no DMA error.
1154 set_bit(PC_DMA_ERROR, &pc->flags);
1156 pc->actually_transferred = pc->request_transfer;
1157 idetape_update_buffers(pc);
1159 debug_log(DBG_PROCS, "DMA finished\n");
1163 /* No more interrupts */
1164 if ((stat & DRQ_STAT) == 0) {
1165 debug_log(DBG_SENSE, "Packet command completed, %d bytes"
1166 " transferred\n", pc->actually_transferred);
1168 clear_bit(PC_DMA_IN_PROGRESS, &pc->flags);
1172 if ((pc->c[0] == WRITE_6 || pc->c[0] == READ_6) &&
1173 (++error_sim_count % 100) == 0) {
1174 printk(KERN_INFO "ide-tape: %s: simulating error\n",
1179 if ((stat & ERR_STAT) && pc->c[0] == REQUEST_SENSE)
1181 if ((stat & ERR_STAT) || test_bit(PC_DMA_ERROR, &pc->flags)) {
1182 /* Error detected */
1183 debug_log(DBG_ERR, "%s: I/O error\n", tape->name);
1185 if (pc->c[0] == REQUEST_SENSE) {
1186 printk(KERN_ERR "ide-tape: I/O error in request sense command\n");
1187 return ide_do_reset(drive);
1189 debug_log(DBG_ERR, "[cmd %x]: check condition\n",
1192 /* Retry operation */
1193 return idetape_retry_pc(drive);
1196 if (test_bit(PC_WAIT_FOR_DSC, &pc->flags) &&
1197 (stat & SEEK_STAT) == 0) {
1198 /* Media access command */
1199 tape->dsc_polling_start = jiffies;
1200 tape->dsc_polling_frequency = IDETAPE_DSC_MA_FAST;
1201 tape->dsc_timeout = jiffies + IDETAPE_DSC_MA_TIMEOUT;
1202 /* Allow ide.c to handle other requests */
1203 idetape_postpone_request(drive);
1206 if (tape->failed_pc == pc)
1207 tape->failed_pc = NULL;
1208 /* Command finished - Call the callback function */
1209 return pc->callback(drive);
1211 if (test_and_clear_bit(PC_DMA_IN_PROGRESS, &pc->flags)) {
1212 printk(KERN_ERR "ide-tape: The tape wants to issue more "
1213 "interrupts in DMA mode\n");
1214 printk(KERN_ERR "ide-tape: DMA disabled, reverting to PIO\n");
1216 return ide_do_reset(drive);
1218 /* Get the number of bytes to transfer on this interrupt. */
1219 bcount = (hwif->INB(IDE_BCOUNTH_REG) << 8) |
1220 hwif->INB(IDE_BCOUNTL_REG);
1222 ireason = hwif->INB(IDE_IREASON_REG);
1225 printk(KERN_ERR "ide-tape: CoD != 0 in idetape_pc_intr\n");
1226 return ide_do_reset(drive);
1228 if (((ireason & IO) == IO) == test_bit(PC_WRITING, &pc->flags)) {
1229 /* Hopefully, we will never get here */
1230 printk(KERN_ERR "ide-tape: We wanted to %s, ",
1231 (ireason & IO) ? "Write" : "Read");
1232 printk(KERN_ERR "ide-tape: but the tape wants us to %s !\n",
1233 (ireason & IO) ? "Read" : "Write");
1234 return ide_do_reset(drive);
1236 if (!test_bit(PC_WRITING, &pc->flags)) {
1237 /* Reading - Check that we have enough space */
1238 temp = pc->actually_transferred + bcount;
1239 if (temp > pc->request_transfer) {
1240 if (temp > pc->buffer_size) {
1241 printk(KERN_ERR "ide-tape: The tape wants to send us more data than expected - discarding data\n");
1242 idetape_discard_data(drive, bcount);
1243 ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
1246 debug_log(DBG_SENSE, "The tape wants to send us more "
1247 "data than expected - allowing transfer\n");
1251 if (test_bit(PC_WRITING, &pc->flags)) {
1253 idetape_output_buffers(drive, pc, bcount);
1255 /* Write the current buffer */
1256 hwif->atapi_output_bytes(drive, pc->current_position,
1260 idetape_input_buffers(drive, pc, bcount);
1262 /* Read the current buffer */
1263 hwif->atapi_input_bytes(drive, pc->current_position,
1266 /* Update the current position */
1267 pc->actually_transferred += bcount;
1268 pc->current_position += bcount;
1270 debug_log(DBG_SENSE, "[cmd %x] transferred %d bytes on that intr.\n",
1273 /* And set the interrupt handler again */
1274 ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
1279 * Packet Command Interface
1281 * The current Packet Command is available in tape->pc, and will not
1282 * change until we finish handling it. Each packet command is associated
1283 * with a callback function that will be called when the command is
1286 * The handling will be done in three stages:
1288 * 1. idetape_issue_packet_command will send the packet command to the
1289 * drive, and will set the interrupt handler to idetape_pc_intr.
1291 * 2. On each interrupt, idetape_pc_intr will be called. This step
1292 * will be repeated until the device signals us that no more
1293 * interrupts will be issued.
1295 * 3. ATAPI Tape media access commands have immediate status with a
1296 * delayed process. In case of a successful initiation of a
1297 * media access packet command, the DSC bit will be set when the
1298 * actual execution of the command is finished.
1299 * Since the tape drive will not issue an interrupt, we have to
1300 * poll for this event. In this case, we define the request as
1301 * "low priority request" by setting rq_status to
1302 * IDETAPE_RQ_POSTPONED, set a timer to poll for DSC and exit
1305 * ide.c will then give higher priority to requests which
1306 * originate from the other device, until will change rq_status
1309 * 4. When the packet command is finished, it will be checked for errors.
1311 * 5. In case an error was found, we queue a request sense packet
1312 * command in front of the request queue and retry the operation
1313 * up to IDETAPE_MAX_PC_RETRIES times.
1315 * 6. In case no error was found, or we decided to give up and not
1316 * to retry again, the callback function will be called and then
1317 * we will handle the next request.
1320 static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
1322 ide_hwif_t *hwif = drive->hwif;
1323 idetape_tape_t *tape = drive->driver_data;
1324 idetape_pc_t *pc = tape->pc;
1326 ide_startstop_t startstop;
1329 if (ide_wait_stat(&startstop,drive,DRQ_STAT,BUSY_STAT,WAIT_READY)) {
1330 printk(KERN_ERR "ide-tape: Strange, packet command initiated yet DRQ isn't asserted\n");
1333 ireason = hwif->INB(IDE_IREASON_REG);
1334 while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
1335 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
1336 "a packet command, retrying\n");
1338 ireason = hwif->INB(IDE_IREASON_REG);
1340 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
1341 "issuing a packet command, ignoring\n");
1346 if ((ireason & CD) == 0 || (ireason & IO)) {
1347 printk(KERN_ERR "ide-tape: (IO,CoD) != (0,1) while issuing "
1348 "a packet command\n");
1349 return ide_do_reset(drive);
1351 /* Set the interrupt routine */
1352 ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
1353 #ifdef CONFIG_BLK_DEV_IDEDMA
1354 /* Begin DMA, if necessary */
1355 if (test_bit(PC_DMA_IN_PROGRESS, &pc->flags))
1356 hwif->dma_start(drive);
1358 /* Send the actual packet */
1359 HWIF(drive)->atapi_output_bytes(drive, pc->c, 12);
1363 static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape_pc_t *pc)
1365 ide_hwif_t *hwif = drive->hwif;
1366 idetape_tape_t *tape = drive->driver_data;
1370 if (tape->pc->c[0] == REQUEST_SENSE &&
1371 pc->c[0] == REQUEST_SENSE) {
1372 printk(KERN_ERR "ide-tape: possible ide-tape.c bug - "
1373 "Two request sense in serial were issued\n");
1376 if (tape->failed_pc == NULL && pc->c[0] != REQUEST_SENSE)
1377 tape->failed_pc = pc;
1378 /* Set the current packet command */
1381 if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
1382 test_bit(PC_ABORT, &pc->flags)) {
1384 * We will "abort" retrying a packet command in case
1385 * a legitimate error code was received (crossing a
1386 * filemark, or end of the media, for example).
1388 if (!test_bit(PC_ABORT, &pc->flags)) {
1389 if (!(pc->c[0] == TEST_UNIT_READY &&
1390 tape->sense_key == 2 && tape->asc == 4 &&
1391 (tape->ascq == 1 || tape->ascq == 8))) {
1392 printk(KERN_ERR "ide-tape: %s: I/O error, "
1393 "pc = %2x, key = %2x, "
1394 "asc = %2x, ascq = %2x\n",
1395 tape->name, pc->c[0],
1396 tape->sense_key, tape->asc,
1400 pc->error = IDETAPE_ERROR_GENERAL;
1402 tape->failed_pc = NULL;
1403 return pc->callback(drive);
1405 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
1408 /* We haven't transferred any data yet */
1409 pc->actually_transferred = 0;
1410 pc->current_position = pc->buffer;
1411 /* Request to transfer the entire buffer at once */
1412 bcount = pc->request_transfer;
1414 if (test_and_clear_bit(PC_DMA_ERROR, &pc->flags)) {
1415 printk(KERN_WARNING "ide-tape: DMA disabled, "
1416 "reverting to PIO\n");
1419 if (test_bit(PC_DMA_RECOMMENDED, &pc->flags) && drive->using_dma)
1420 dma_ok = !hwif->dma_setup(drive);
1422 ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
1423 IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
1425 if (dma_ok) /* Will begin DMA later */
1426 set_bit(PC_DMA_IN_PROGRESS, &pc->flags);
1427 if (test_bit(IDETAPE_DRQ_INTERRUPT, &tape->flags)) {
1428 ide_execute_command(drive, WIN_PACKETCMD, &idetape_transfer_pc,
1429 IDETAPE_WAIT_CMD, NULL);
1432 hwif->OUTB(WIN_PACKETCMD, IDE_COMMAND_REG);
1433 return idetape_transfer_pc(drive);
1438 * General packet command callback function.
1440 static ide_startstop_t idetape_pc_callback (ide_drive_t *drive)
1442 idetape_tape_t *tape = drive->driver_data;
1444 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1446 idetape_end_request(drive, tape->pc->error ? 0 : 1, 0);
1451 * A mode sense command is used to "sense" tape parameters.
1453 static void idetape_create_mode_sense_cmd (idetape_pc_t *pc, u8 page_code)
1455 idetape_init_pc(pc);
1456 pc->c[0] = MODE_SENSE;
1457 if (page_code != IDETAPE_BLOCK_DESCRIPTOR)
1458 pc->c[1] = 8; /* DBD = 1 - Don't return block descriptors */
1459 pc->c[2] = page_code;
1461 * Changed pc->c[3] to 0 (255 will at best return unused info).
1463 * For SCSI this byte is defined as subpage instead of high byte
1464 * of length and some IDE drives seem to interpret it this way
1465 * and return an error when 255 is used.
1468 pc->c[4] = 255; /* (We will just discard data in that case) */
1469 if (page_code == IDETAPE_BLOCK_DESCRIPTOR)
1470 pc->request_transfer = 12;
1471 else if (page_code == IDETAPE_CAPABILITIES_PAGE)
1472 pc->request_transfer = 24;
1474 pc->request_transfer = 50;
1475 pc->callback = &idetape_pc_callback;
1478 static void calculate_speeds(ide_drive_t *drive)
1480 idetape_tape_t *tape = drive->driver_data;
1481 int full = 125, empty = 75;
1483 if (time_after(jiffies, tape->controlled_pipeline_head_time + 120 * HZ)) {
1484 tape->controlled_previous_pipeline_head = tape->controlled_last_pipeline_head;
1485 tape->controlled_previous_head_time = tape->controlled_pipeline_head_time;
1486 tape->controlled_last_pipeline_head = tape->pipeline_head;
1487 tape->controlled_pipeline_head_time = jiffies;
1489 if (time_after(jiffies, tape->controlled_pipeline_head_time + 60 * HZ))
1490 tape->controlled_pipeline_head_speed = (tape->pipeline_head - tape->controlled_last_pipeline_head) * 32 * HZ / (jiffies - tape->controlled_pipeline_head_time);
1491 else if (time_after(jiffies, tape->controlled_previous_head_time))
1492 tape->controlled_pipeline_head_speed = (tape->pipeline_head - tape->controlled_previous_pipeline_head) * 32 * HZ / (jiffies - tape->controlled_previous_head_time);
1494 if (tape->nr_pending_stages < tape->max_stages /*- 1 */) {
1495 /* -1 for read mode error recovery */
1496 if (time_after(jiffies, tape->uncontrolled_previous_head_time + 10 * HZ)) {
1497 tape->uncontrolled_pipeline_head_time = jiffies;
1498 tape->uncontrolled_pipeline_head_speed = (tape->pipeline_head - tape->uncontrolled_previous_pipeline_head) * 32 * HZ / (jiffies - tape->uncontrolled_previous_head_time);
1501 tape->uncontrolled_previous_head_time = jiffies;
1502 tape->uncontrolled_previous_pipeline_head = tape->pipeline_head;
1503 if (time_after(jiffies, tape->uncontrolled_pipeline_head_time + 30 * HZ)) {
1504 tape->uncontrolled_pipeline_head_time = jiffies;
1507 tape->pipeline_head_speed = max(tape->uncontrolled_pipeline_head_speed, tape->controlled_pipeline_head_speed);
1508 if (tape->speed_control == 0) {
1509 tape->max_insert_speed = 5000;
1510 } else if (tape->speed_control == 1) {
1511 if (tape->nr_pending_stages >= tape->max_stages / 2)
1512 tape->max_insert_speed = tape->pipeline_head_speed +
1513 (1100 - tape->pipeline_head_speed) * 2 * (tape->nr_pending_stages - tape->max_stages / 2) / tape->max_stages;
1515 tape->max_insert_speed = 500 +
1516 (tape->pipeline_head_speed - 500) * 2 * tape->nr_pending_stages / tape->max_stages;
1517 if (tape->nr_pending_stages >= tape->max_stages * 99 / 100)
1518 tape->max_insert_speed = 5000;
1519 } else if (tape->speed_control == 2) {
1520 tape->max_insert_speed = tape->pipeline_head_speed * empty / 100 +
1521 (tape->pipeline_head_speed * full / 100 - tape->pipeline_head_speed * empty / 100) * tape->nr_pending_stages / tape->max_stages;
1523 tape->max_insert_speed = tape->speed_control;
1524 tape->max_insert_speed = max(tape->max_insert_speed, 500);
1527 static ide_startstop_t idetape_media_access_finished (ide_drive_t *drive)
1529 idetape_tape_t *tape = drive->driver_data;
1530 idetape_pc_t *pc = tape->pc;
1533 stat = ide_read_status(drive);
1535 if (stat & SEEK_STAT) {
1536 if (stat & ERR_STAT) {
1537 /* Error detected */
1538 if (pc->c[0] != TEST_UNIT_READY)
1539 printk(KERN_ERR "ide-tape: %s: I/O error, ",
1541 /* Retry operation */
1542 return idetape_retry_pc(drive);
1545 if (tape->failed_pc == pc)
1546 tape->failed_pc = NULL;
1548 pc->error = IDETAPE_ERROR_GENERAL;
1549 tape->failed_pc = NULL;
1551 return pc->callback(drive);
1554 static ide_startstop_t idetape_rw_callback (ide_drive_t *drive)
1556 idetape_tape_t *tape = drive->driver_data;
1557 struct request *rq = HWGROUP(drive)->rq;
1558 int blocks = tape->pc->actually_transferred / tape->tape_block_size;
1560 tape->avg_size += blocks * tape->tape_block_size;
1561 tape->insert_size += blocks * tape->tape_block_size;
1562 if (tape->insert_size > 1024 * 1024)
1563 tape->measure_insert_time = 1;
1564 if (tape->measure_insert_time) {
1565 tape->measure_insert_time = 0;
1566 tape->insert_time = jiffies;
1567 tape->insert_size = 0;
1569 if (time_after(jiffies, tape->insert_time))
1570 tape->insert_speed = tape->insert_size / 1024 * HZ / (jiffies - tape->insert_time);
1571 if (time_after_eq(jiffies, tape->avg_time + HZ)) {
1572 tape->avg_speed = tape->avg_size * HZ / (jiffies - tape->avg_time) / 1024;
1574 tape->avg_time = jiffies;
1576 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1578 tape->first_frame_position += blocks;
1579 rq->current_nr_sectors -= blocks;
1581 if (!tape->pc->error)
1582 idetape_end_request(drive, 1, 0);
1584 idetape_end_request(drive, tape->pc->error, 0);
1588 static void idetape_create_read_cmd(idetape_tape_t *tape, idetape_pc_t *pc, unsigned int length, struct idetape_bh *bh)
1590 idetape_init_pc(pc);
1592 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
1594 pc->callback = &idetape_rw_callback;
1596 atomic_set(&bh->b_count, 0);
1598 pc->request_transfer = pc->buffer_size = length * tape->tape_block_size;
1599 if (pc->request_transfer == tape->stage_size)
1600 set_bit(PC_DMA_RECOMMENDED, &pc->flags);
1603 static void idetape_create_read_buffer_cmd(idetape_tape_t *tape, idetape_pc_t *pc, unsigned int length, struct idetape_bh *bh)
1606 struct idetape_bh *p = bh;
1608 idetape_init_pc(pc);
1609 pc->c[0] = READ_BUFFER;
1610 pc->c[1] = IDETAPE_RETRIEVE_FAULTY_BLOCK;
1611 pc->c[7] = size >> 8;
1612 pc->c[8] = size & 0xff;
1613 pc->callback = &idetape_pc_callback;
1615 atomic_set(&bh->b_count, 0);
1618 atomic_set(&p->b_count, 0);
1621 pc->request_transfer = pc->buffer_size = size;
1624 static void idetape_create_write_cmd(idetape_tape_t *tape, idetape_pc_t *pc, unsigned int length, struct idetape_bh *bh)
1626 idetape_init_pc(pc);
1628 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
1630 pc->callback = &idetape_rw_callback;
1631 set_bit(PC_WRITING, &pc->flags);
1633 pc->b_data = bh->b_data;
1634 pc->b_count = atomic_read(&bh->b_count);
1636 pc->request_transfer = pc->buffer_size = length * tape->tape_block_size;
1637 if (pc->request_transfer == tape->stage_size)
1638 set_bit(PC_DMA_RECOMMENDED, &pc->flags);
1642 * idetape_do_request is our request handling function.
1644 static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1645 struct request *rq, sector_t block)
1647 idetape_tape_t *tape = drive->driver_data;
1648 idetape_pc_t *pc = NULL;
1649 struct request *postponed_rq = tape->postponed_rq;
1652 debug_log(DBG_SENSE, "sector: %ld, nr_sectors: %ld,"
1653 " current_nr_sectors: %d\n",
1654 rq->sector, rq->nr_sectors, rq->current_nr_sectors);
1656 if (!blk_special_request(rq)) {
1658 * We do not support buffer cache originated requests.
1660 printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
1661 "request queue (%d)\n", drive->name, rq->cmd_type);
1662 ide_end_request(drive, 0, 0);
1667 * Retry a failed packet command
1669 if (tape->failed_pc != NULL &&
1670 tape->pc->c[0] == REQUEST_SENSE) {
1671 return idetape_issue_packet_command(drive, tape->failed_pc);
1673 if (postponed_rq != NULL)
1674 if (rq != postponed_rq) {
1675 printk(KERN_ERR "ide-tape: ide-tape.c bug - "
1676 "Two DSC requests were queued\n");
1677 idetape_end_request(drive, 0, 0);
1681 tape->postponed_rq = NULL;
1684 * If the tape is still busy, postpone our request and service
1685 * the other device meanwhile.
1687 stat = ide_read_status(drive);
1689 if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
1690 set_bit(IDETAPE_IGNORE_DSC, &tape->flags);
1692 if (drive->post_reset == 1) {
1693 set_bit(IDETAPE_IGNORE_DSC, &tape->flags);
1694 drive->post_reset = 0;
1697 if (tape->tape_still_time > 100 && tape->tape_still_time < 200)
1698 tape->measure_insert_time = 1;
1699 if (time_after(jiffies, tape->insert_time))
1700 tape->insert_speed = tape->insert_size / 1024 * HZ / (jiffies - tape->insert_time);
1701 calculate_speeds(drive);
1702 if (!test_and_clear_bit(IDETAPE_IGNORE_DSC, &tape->flags) &&
1703 (stat & SEEK_STAT) == 0) {
1704 if (postponed_rq == NULL) {
1705 tape->dsc_polling_start = jiffies;
1706 tape->dsc_polling_frequency = tape->best_dsc_rw_frequency;
1707 tape->dsc_timeout = jiffies + IDETAPE_DSC_RW_TIMEOUT;
1708 } else if (time_after(jiffies, tape->dsc_timeout)) {
1709 printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
1711 if (rq->cmd[0] & REQ_IDETAPE_PC2) {
1712 idetape_media_access_finished(drive);
1715 return ide_do_reset(drive);
1717 } else if (time_after(jiffies, tape->dsc_polling_start + IDETAPE_DSC_MA_THRESHOLD))
1718 tape->dsc_polling_frequency = IDETAPE_DSC_MA_SLOW;
1719 idetape_postpone_request(drive);
1722 if (rq->cmd[0] & REQ_IDETAPE_READ) {
1723 tape->buffer_head++;
1724 tape->postpone_cnt = 0;
1725 pc = idetape_next_pc_storage(drive);
1726 idetape_create_read_cmd(tape, pc, rq->current_nr_sectors, (struct idetape_bh *)rq->special);
1729 if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
1730 tape->buffer_head++;
1731 tape->postpone_cnt = 0;
1732 pc = idetape_next_pc_storage(drive);
1733 idetape_create_write_cmd(tape, pc, rq->current_nr_sectors, (struct idetape_bh *)rq->special);
1736 if (rq->cmd[0] & REQ_IDETAPE_READ_BUFFER) {
1737 tape->postpone_cnt = 0;
1738 pc = idetape_next_pc_storage(drive);
1739 idetape_create_read_buffer_cmd(tape, pc, rq->current_nr_sectors, (struct idetape_bh *)rq->special);
1742 if (rq->cmd[0] & REQ_IDETAPE_PC1) {
1743 pc = (idetape_pc_t *) rq->buffer;
1744 rq->cmd[0] &= ~(REQ_IDETAPE_PC1);
1745 rq->cmd[0] |= REQ_IDETAPE_PC2;
1748 if (rq->cmd[0] & REQ_IDETAPE_PC2) {
1749 idetape_media_access_finished(drive);
1754 return idetape_issue_packet_command(drive, pc);
1758 * Pipeline related functions
1760 static inline int idetape_pipeline_active (idetape_tape_t *tape)
1764 rc1 = test_bit(IDETAPE_PIPELINE_ACTIVE, &tape->flags);
1765 rc2 = (tape->active_data_request != NULL);
1770 * idetape_kmalloc_stage uses __get_free_page to allocate a pipeline
1771 * stage, along with all the necessary small buffers which together make
1772 * a buffer of size tape->stage_size (or a bit more). We attempt to
1773 * combine sequential pages as much as possible.
1775 * Returns a pointer to the new allocated stage, or NULL if we
1776 * can't (or don't want to) allocate a stage.
1778 * Pipeline stages are optional and are used to increase performance.
1779 * If we can't allocate them, we'll manage without them.
1781 static idetape_stage_t *__idetape_kmalloc_stage (idetape_tape_t *tape, int full, int clear)
1783 idetape_stage_t *stage;
1784 struct idetape_bh *prev_bh, *bh;
1785 int pages = tape->pages_per_stage;
1786 char *b_data = NULL;
1788 if ((stage = kmalloc(sizeof (idetape_stage_t),GFP_KERNEL)) == NULL)
1792 bh = stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
1795 bh->b_reqnext = NULL;
1796 if ((bh->b_data = (char *) __get_free_page (GFP_KERNEL)) == NULL)
1799 memset(bh->b_data, 0, PAGE_SIZE);
1800 bh->b_size = PAGE_SIZE;
1801 atomic_set(&bh->b_count, full ? bh->b_size : 0);
1804 if ((b_data = (char *) __get_free_page (GFP_KERNEL)) == NULL)
1807 memset(b_data, 0, PAGE_SIZE);
1808 if (bh->b_data == b_data + PAGE_SIZE) {
1809 bh->b_size += PAGE_SIZE;
1810 bh->b_data -= PAGE_SIZE;
1812 atomic_add(PAGE_SIZE, &bh->b_count);
1815 if (b_data == bh->b_data + bh->b_size) {
1816 bh->b_size += PAGE_SIZE;
1818 atomic_add(PAGE_SIZE, &bh->b_count);
1822 if ((bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL)) == NULL) {
1823 free_page((unsigned long) b_data);
1826 bh->b_reqnext = NULL;
1827 bh->b_data = b_data;
1828 bh->b_size = PAGE_SIZE;
1829 atomic_set(&bh->b_count, full ? bh->b_size : 0);
1830 prev_bh->b_reqnext = bh;
1832 bh->b_size -= tape->excess_bh_size;
1834 atomic_sub(tape->excess_bh_size, &bh->b_count);
1837 __idetape_kfree_stage(stage);
1841 static idetape_stage_t *idetape_kmalloc_stage (idetape_tape_t *tape)
1843 idetape_stage_t *cache_stage = tape->cache_stage;
1845 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1847 if (tape->nr_stages >= tape->max_stages)
1849 if (cache_stage != NULL) {
1850 tape->cache_stage = NULL;
1853 return __idetape_kmalloc_stage(tape, 0, 0);
1856 static int idetape_copy_stage_from_user (idetape_tape_t *tape, idetape_stage_t *stage, const char __user *buf, int n)
1858 struct idetape_bh *bh = tape->bh;
1864 printk(KERN_ERR "ide-tape: bh == NULL in "
1865 "idetape_copy_stage_from_user\n");
1868 count = min((unsigned int)(bh->b_size - atomic_read(&bh->b_count)), (unsigned int)n);
1869 if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf, count))
1872 atomic_add(count, &bh->b_count);
1874 if (atomic_read(&bh->b_count) == bh->b_size) {
1877 atomic_set(&bh->b_count, 0);
1884 static int idetape_copy_stage_to_user (idetape_tape_t *tape, char __user *buf, idetape_stage_t *stage, int n)
1886 struct idetape_bh *bh = tape->bh;
1892 printk(KERN_ERR "ide-tape: bh == NULL in "
1893 "idetape_copy_stage_to_user\n");
1896 count = min(tape->b_count, n);
1897 if (copy_to_user(buf, tape->b_data, count))
1900 tape->b_data += count;
1901 tape->b_count -= count;
1903 if (!tape->b_count) {
1904 tape->bh = bh = bh->b_reqnext;
1906 tape->b_data = bh->b_data;
1907 tape->b_count = atomic_read(&bh->b_count);
1914 static void idetape_init_merge_stage (idetape_tape_t *tape)
1916 struct idetape_bh *bh = tape->merge_stage->bh;
1919 if (tape->chrdev_direction == idetape_direction_write)
1920 atomic_set(&bh->b_count, 0);
1922 tape->b_data = bh->b_data;
1923 tape->b_count = atomic_read(&bh->b_count);
1927 static void idetape_switch_buffers (idetape_tape_t *tape, idetape_stage_t *stage)
1929 struct idetape_bh *tmp;
1932 stage->bh = tape->merge_stage->bh;
1933 tape->merge_stage->bh = tmp;
1934 idetape_init_merge_stage(tape);
1938 * idetape_add_stage_tail adds a new stage at the end of the pipeline.
1940 static void idetape_add_stage_tail (ide_drive_t *drive,idetape_stage_t *stage)
1942 idetape_tape_t *tape = drive->driver_data;
1943 unsigned long flags;
1945 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1947 spin_lock_irqsave(&tape->spinlock, flags);
1949 if (tape->last_stage != NULL)
1950 tape->last_stage->next=stage;
1952 tape->first_stage = tape->next_stage=stage;
1953 tape->last_stage = stage;
1954 if (tape->next_stage == NULL)
1955 tape->next_stage = tape->last_stage;
1957 tape->nr_pending_stages++;
1958 spin_unlock_irqrestore(&tape->spinlock, flags);
1962 * idetape_wait_for_request installs a completion in a pending request
1963 * and sleeps until it is serviced.
1965 * The caller should ensure that the request will not be serviced
1966 * before we install the completion (usually by disabling interrupts).
1968 static void idetape_wait_for_request (ide_drive_t *drive, struct request *rq)
1970 DECLARE_COMPLETION_ONSTACK(wait);
1971 idetape_tape_t *tape = drive->driver_data;
1973 if (rq == NULL || !blk_special_request(rq)) {
1974 printk (KERN_ERR "ide-tape: bug: Trying to sleep on non-valid request\n");
1977 rq->end_io_data = &wait;
1978 rq->end_io = blk_end_sync_rq;
1979 spin_unlock_irq(&tape->spinlock);
1980 wait_for_completion(&wait);
1981 /* The stage and its struct request have been deallocated */
1982 spin_lock_irq(&tape->spinlock);
1985 static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
1987 idetape_tape_t *tape = drive->driver_data;
1988 u8 *readpos = tape->pc->buffer;
1990 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1992 if (!tape->pc->error) {
1993 debug_log(DBG_SENSE, "BOP - %s\n",
1994 (readpos[0] & 0x80) ? "Yes" : "No");
1995 debug_log(DBG_SENSE, "EOP - %s\n",
1996 (readpos[0] & 0x40) ? "Yes" : "No");
1998 if (readpos[0] & 0x4) {
1999 printk(KERN_INFO "ide-tape: Block location is unknown"
2001 clear_bit(IDETAPE_ADDRESS_VALID, &tape->flags);
2002 idetape_end_request(drive, 0, 0);
2004 debug_log(DBG_SENSE, "Block Location - %u\n",
2005 be32_to_cpu(*(u32 *)&readpos[4]));
2007 tape->partition = readpos[1];
2008 tape->first_frame_position =
2009 be32_to_cpu(*(u32 *)&readpos[4]);
2010 tape->last_frame_position =
2011 be32_to_cpu(*(u32 *)&readpos[8]);
2012 tape->blocks_in_buffer = readpos[15];
2013 set_bit(IDETAPE_ADDRESS_VALID, &tape->flags);
2014 idetape_end_request(drive, 1, 0);
2017 idetape_end_request(drive, 0, 0);
2023 * idetape_create_write_filemark_cmd will:
2025 * 1. Write a filemark if write_filemark=1.
2026 * 2. Flush the device buffers without writing a filemark
2027 * if write_filemark=0.
2030 static void idetape_create_write_filemark_cmd (ide_drive_t *drive, idetape_pc_t *pc,int write_filemark)
2032 idetape_init_pc(pc);
2033 pc->c[0] = WRITE_FILEMARKS;
2034 pc->c[4] = write_filemark;
2035 set_bit(PC_WAIT_FOR_DSC, &pc->flags);
2036 pc->callback = &idetape_pc_callback;
2039 static void idetape_create_test_unit_ready_cmd(idetape_pc_t *pc)
2041 idetape_init_pc(pc);
2042 pc->c[0] = TEST_UNIT_READY;
2043 pc->callback = &idetape_pc_callback;
2047 * idetape_queue_pc_tail is based on the following functions:
2049 * ide_do_drive_cmd from ide.c
2050 * cdrom_queue_request and cdrom_queue_packet_command from ide-cd.c
2052 * We add a special packet command request to the tail of the request
2053 * queue, and wait for it to be serviced.
2055 * This is not to be called from within the request handling part
2056 * of the driver ! We allocate here data in the stack, and it is valid
2057 * until the request is finished. This is not the case for the bottom
2058 * part of the driver, where we are always leaving the functions to wait
2059 * for an interrupt or a timer event.
2061 * From the bottom part of the driver, we should allocate safe memory
2062 * using idetape_next_pc_storage and idetape_next_rq_storage, and add
2063 * the request to the request list without waiting for it to be serviced !
2064 * In that case, we usually use idetape_queue_pc_head.
2066 static int __idetape_queue_pc_tail (ide_drive_t *drive, idetape_pc_t *pc)
2068 struct ide_tape_obj *tape = drive->driver_data;
2071 idetape_init_rq(&rq, REQ_IDETAPE_PC1);
2072 rq.buffer = (char *) pc;
2073 rq.rq_disk = tape->disk;
2074 return ide_do_drive_cmd(drive, &rq, ide_wait);
2077 static void idetape_create_load_unload_cmd (ide_drive_t *drive, idetape_pc_t *pc,int cmd)
2079 idetape_init_pc(pc);
2080 pc->c[0] = START_STOP;
2082 set_bit(PC_WAIT_FOR_DSC, &pc->flags);
2083 pc->callback = &idetape_pc_callback;
2086 static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
2088 idetape_tape_t *tape = drive->driver_data;
2090 int load_attempted = 0;
2093 * Wait for the tape to become ready
2095 set_bit(IDETAPE_MEDIUM_PRESENT, &tape->flags);
2097 while (time_before(jiffies, timeout)) {
2098 idetape_create_test_unit_ready_cmd(&pc);
2099 if (!__idetape_queue_pc_tail(drive, &pc))
2101 if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
2102 || (tape->asc == 0x3A)) { /* no media */
2105 idetape_create_load_unload_cmd(drive, &pc, IDETAPE_LU_LOAD_MASK);
2106 __idetape_queue_pc_tail(drive, &pc);
2108 /* not about to be ready */
2109 } else if (!(tape->sense_key == 2 && tape->asc == 4 &&
2110 (tape->ascq == 1 || tape->ascq == 8)))
2117 static int idetape_queue_pc_tail (ide_drive_t *drive,idetape_pc_t *pc)
2119 return __idetape_queue_pc_tail(drive, pc);
2122 static int idetape_flush_tape_buffers (ide_drive_t *drive)
2127 idetape_create_write_filemark_cmd(drive, &pc, 0);
2128 if ((rc = idetape_queue_pc_tail(drive, &pc)))
2130 idetape_wait_ready(drive, 60 * 5 * HZ);
2134 static void idetape_create_read_position_cmd (idetape_pc_t *pc)
2136 idetape_init_pc(pc);
2137 pc->c[0] = READ_POSITION;
2138 pc->request_transfer = 20;
2139 pc->callback = &idetape_read_position_callback;
2142 static int idetape_read_position (ide_drive_t *drive)
2144 idetape_tape_t *tape = drive->driver_data;
2148 debug_log(DBG_PROCS, "Enter %s\n", __func__);
2150 idetape_create_read_position_cmd(&pc);
2151 if (idetape_queue_pc_tail(drive, &pc))
2153 position = tape->first_frame_position;
2157 static void idetape_create_locate_cmd (ide_drive_t *drive, idetape_pc_t *pc, unsigned int block, u8 partition, int skip)
2159 idetape_init_pc(pc);
2160 pc->c[0] = POSITION_TO_ELEMENT;
2162 put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[3]);
2163 pc->c[8] = partition;
2164 set_bit(PC_WAIT_FOR_DSC, &pc->flags);
2165 pc->callback = &idetape_pc_callback;
2168 static int idetape_create_prevent_cmd (ide_drive_t *drive, idetape_pc_t *pc, int prevent)
2170 idetape_tape_t *tape = drive->driver_data;
2172 /* device supports locking according to capabilities page */
2173 if (!(tape->caps[6] & 0x01))
2176 idetape_init_pc(pc);
2177 pc->c[0] = ALLOW_MEDIUM_REMOVAL;
2179 pc->callback = &idetape_pc_callback;
2183 static int __idetape_discard_read_pipeline (ide_drive_t *drive)
2185 idetape_tape_t *tape = drive->driver_data;
2186 unsigned long flags;
2189 if (tape->chrdev_direction != idetape_direction_read)
2192 /* Remove merge stage. */
2193 cnt = tape->merge_stage_size / tape->tape_block_size;
2194 if (test_and_clear_bit(IDETAPE_FILEMARK, &tape->flags))
2195 ++cnt; /* Filemarks count as 1 sector */
2196 tape->merge_stage_size = 0;
2197 if (tape->merge_stage != NULL) {
2198 __idetape_kfree_stage(tape->merge_stage);
2199 tape->merge_stage = NULL;
2202 /* Clear pipeline flags. */
2203 clear_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
2204 tape->chrdev_direction = idetape_direction_none;
2206 /* Remove pipeline stages. */
2207 if (tape->first_stage == NULL)
2210 spin_lock_irqsave(&tape->spinlock, flags);
2211 tape->next_stage = NULL;
2212 if (idetape_pipeline_active(tape))
2213 idetape_wait_for_request(drive, tape->active_data_request);
2214 spin_unlock_irqrestore(&tape->spinlock, flags);
2216 while (tape->first_stage != NULL) {
2217 struct request *rq_ptr = &tape->first_stage->rq;
2219 cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
2220 if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
2222 idetape_remove_stage_head(drive);
2224 tape->nr_pending_stages = 0;
2225 tape->max_stages = tape->min_pipeline;
2230 * idetape_position_tape positions the tape to the requested block
2231 * using the LOCATE packet command. A READ POSITION command is then
2232 * issued to check where we are positioned.
2234 * Like all higher level operations, we queue the commands at the tail
2235 * of the request queue and wait for their completion.
2238 static int idetape_position_tape (ide_drive_t *drive, unsigned int block, u8 partition, int skip)
2240 idetape_tape_t *tape = drive->driver_data;
2244 if (tape->chrdev_direction == idetape_direction_read)
2245 __idetape_discard_read_pipeline(drive);
2246 idetape_wait_ready(drive, 60 * 5 * HZ);
2247 idetape_create_locate_cmd(drive, &pc, block, partition, skip);
2248 retval = idetape_queue_pc_tail(drive, &pc);
2252 idetape_create_read_position_cmd(&pc);
2253 return (idetape_queue_pc_tail(drive, &pc));
2256 static void idetape_discard_read_pipeline (ide_drive_t *drive, int restore_position)
2258 idetape_tape_t *tape = drive->driver_data;
2262 cnt = __idetape_discard_read_pipeline(drive);
2263 if (restore_position) {
2264 position = idetape_read_position(drive);
2265 seek = position > cnt ? position - cnt : 0;
2266 if (idetape_position_tape(drive, seek, 0, 0)) {
2267 printk(KERN_INFO "ide-tape: %s: position_tape failed in discard_pipeline()\n", tape->name);
2274 * idetape_queue_rw_tail generates a read/write request for the block
2275 * device interface and wait for it to be serviced.
2277 static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks, struct idetape_bh *bh)
2279 idetape_tape_t *tape = drive->driver_data;
2282 debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
2284 if (idetape_pipeline_active(tape)) {
2285 printk(KERN_ERR "ide-tape: bug: the pipeline is active in %s\n",
2290 idetape_init_rq(&rq, cmd);
2291 rq.rq_disk = tape->disk;
2292 rq.special = (void *)bh;
2293 rq.sector = tape->first_frame_position;
2294 rq.nr_sectors = rq.current_nr_sectors = blocks;
2295 (void) ide_do_drive_cmd(drive, &rq, ide_wait);
2297 if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
2300 if (tape->merge_stage)
2301 idetape_init_merge_stage(tape);
2302 if (rq.errors == IDETAPE_ERROR_GENERAL)
2304 return (tape->tape_block_size * (blocks-rq.current_nr_sectors));
2308 * idetape_insert_pipeline_into_queue is used to start servicing the
2309 * pipeline stages, starting from tape->next_stage.
2311 static void idetape_insert_pipeline_into_queue (ide_drive_t *drive)
2313 idetape_tape_t *tape = drive->driver_data;
2315 if (tape->next_stage == NULL)
2317 if (!idetape_pipeline_active(tape)) {
2318 set_bit(IDETAPE_PIPELINE_ACTIVE, &tape->flags);
2319 idetape_activate_next_stage(drive);
2320 (void) ide_do_drive_cmd(drive, tape->active_data_request, ide_end);
2324 static void idetape_create_inquiry_cmd (idetape_pc_t *pc)
2326 idetape_init_pc(pc);
2328 pc->c[4] = pc->request_transfer = 254;
2329 pc->callback = &idetape_pc_callback;
2332 static void idetape_create_rewind_cmd (ide_drive_t *drive, idetape_pc_t *pc)
2334 idetape_init_pc(pc);
2335 pc->c[0] = REZERO_UNIT;
2336 set_bit(PC_WAIT_FOR_DSC, &pc->flags);
2337 pc->callback = &idetape_pc_callback;
2340 static void idetape_create_erase_cmd (idetape_pc_t *pc)
2342 idetape_init_pc(pc);
2345 set_bit(PC_WAIT_FOR_DSC, &pc->flags);
2346 pc->callback = &idetape_pc_callback;
2349 static void idetape_create_space_cmd (idetape_pc_t *pc,int count, u8 cmd)
2351 idetape_init_pc(pc);
2353 put_unaligned(cpu_to_be32(count), (unsigned int *) &pc->c[1]);
2355 set_bit(PC_WAIT_FOR_DSC, &pc->flags);
2356 pc->callback = &idetape_pc_callback;
2359 static void idetape_wait_first_stage (ide_drive_t *drive)
2361 idetape_tape_t *tape = drive->driver_data;
2362 unsigned long flags;
2364 if (tape->first_stage == NULL)
2366 spin_lock_irqsave(&tape->spinlock, flags);
2367 if (tape->active_stage == tape->first_stage)
2368 idetape_wait_for_request(drive, tape->active_data_request);
2369 spin_unlock_irqrestore(&tape->spinlock, flags);
2373 * idetape_add_chrdev_write_request tries to add a character device
2374 * originated write request to our pipeline. In case we don't succeed,
2375 * we revert to non-pipelined operation mode for this request.
2377 * 1. Try to allocate a new pipeline stage.
2378 * 2. If we can't, wait for more and more requests to be serviced
2379 * and try again each time.
2380 * 3. If we still can't allocate a stage, fallback to
2381 * non-pipelined operation mode for this request.
2383 static int idetape_add_chrdev_write_request (ide_drive_t *drive, int blocks)
2385 idetape_tape_t *tape = drive->driver_data;
2386 idetape_stage_t *new_stage;
2387 unsigned long flags;
2390 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
2393 * Attempt to allocate a new stage.
2394 * Pay special attention to possible race conditions.
2396 while ((new_stage = idetape_kmalloc_stage(tape)) == NULL) {
2397 spin_lock_irqsave(&tape->spinlock, flags);
2398 if (idetape_pipeline_active(tape)) {
2399 idetape_wait_for_request(drive, tape->active_data_request);
2400 spin_unlock_irqrestore(&tape->spinlock, flags);
2402 spin_unlock_irqrestore(&tape->spinlock, flags);
2403 idetape_insert_pipeline_into_queue(drive);
2404 if (idetape_pipeline_active(tape))
2407 * Linux is short on memory. Fallback to
2408 * non-pipelined operation mode for this request.
2410 return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks, tape->merge_stage->bh);
2413 rq = &new_stage->rq;
2414 idetape_init_rq(rq, REQ_IDETAPE_WRITE);
2415 /* Doesn't actually matter - We always assume sequential access */
2416 rq->sector = tape->first_frame_position;
2417 rq->nr_sectors = rq->current_nr_sectors = blocks;
2419 idetape_switch_buffers(tape, new_stage);
2420 idetape_add_stage_tail(drive, new_stage);
2421 tape->pipeline_head++;
2422 calculate_speeds(drive);
2425 * Estimate whether the tape has stopped writing by checking
2426 * if our write pipeline is currently empty. If we are not
2427 * writing anymore, wait for the pipeline to be full enough
2428 * (90%) before starting to service requests, so that we will
2429 * be able to keep up with the higher speeds of the tape.
2431 if (!idetape_pipeline_active(tape)) {
2432 if (tape->nr_stages >= tape->max_stages * 9 / 10 ||
2433 tape->nr_stages >= tape->max_stages - tape->uncontrolled_pipeline_head_speed * 3 * 1024 / tape->tape_block_size) {
2434 tape->measure_insert_time = 1;
2435 tape->insert_time = jiffies;
2436 tape->insert_size = 0;
2437 tape->insert_speed = 0;
2438 idetape_insert_pipeline_into_queue(drive);
2441 if (test_and_clear_bit(IDETAPE_PIPELINE_ERROR, &tape->flags))
2442 /* Return a deferred error */
2448 * idetape_wait_for_pipeline will wait until all pending pipeline
2449 * requests are serviced. Typically called on device close.
2451 static void idetape_wait_for_pipeline (ide_drive_t *drive)
2453 idetape_tape_t *tape = drive->driver_data;
2454 unsigned long flags;
2456 while (tape->next_stage || idetape_pipeline_active(tape)) {
2457 idetape_insert_pipeline_into_queue(drive);
2458 spin_lock_irqsave(&tape->spinlock, flags);
2459 if (idetape_pipeline_active(tape))
2460 idetape_wait_for_request(drive, tape->active_data_request);
2461 spin_unlock_irqrestore(&tape->spinlock, flags);
2465 static void idetape_empty_write_pipeline (ide_drive_t *drive)
2467 idetape_tape_t *tape = drive->driver_data;
2469 struct idetape_bh *bh;
2471 if (tape->chrdev_direction != idetape_direction_write) {
2472 printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline, but we are not writing.\n");
2475 if (tape->merge_stage_size > tape->stage_size) {
2476 printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
2477 tape->merge_stage_size = tape->stage_size;
2479 if (tape->merge_stage_size) {
2480 blocks = tape->merge_stage_size / tape->tape_block_size;
2481 if (tape->merge_stage_size % tape->tape_block_size) {
2485 i = tape->tape_block_size - tape->merge_stage_size % tape->tape_block_size;
2486 bh = tape->bh->b_reqnext;
2488 atomic_set(&bh->b_count, 0);
2495 printk(KERN_INFO "ide-tape: bug, bh NULL\n");
2498 min = min(i, (unsigned int)(bh->b_size - atomic_read(&bh->b_count)));
2499 memset(bh->b_data + atomic_read(&bh->b_count), 0, min);
2500 atomic_add(min, &bh->b_count);
2505 (void) idetape_add_chrdev_write_request(drive, blocks);
2506 tape->merge_stage_size = 0;
2508 idetape_wait_for_pipeline(drive);
2509 if (tape->merge_stage != NULL) {
2510 __idetape_kfree_stage(tape->merge_stage);
2511 tape->merge_stage = NULL;
2513 clear_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
2514 tape->chrdev_direction = idetape_direction_none;
2517 * On the next backup, perform the feedback loop again.
2518 * (I don't want to keep sense information between backups,
2519 * as some systems are constantly on, and the system load
2520 * can be totally different on the next backup).
2522 tape->max_stages = tape->min_pipeline;
2523 if (tape->first_stage != NULL ||
2524 tape->next_stage != NULL ||
2525 tape->last_stage != NULL ||
2526 tape->nr_stages != 0) {
2527 printk(KERN_ERR "ide-tape: ide-tape pipeline bug, "
2528 "first_stage %p, next_stage %p, "
2529 "last_stage %p, nr_stages %d\n",
2530 tape->first_stage, tape->next_stage,
2531 tape->last_stage, tape->nr_stages);
2535 static void idetape_restart_speed_control (ide_drive_t *drive)
2537 idetape_tape_t *tape = drive->driver_data;
2539 tape->restart_speed_control_req = 0;
2540 tape->pipeline_head = 0;
2541 tape->controlled_last_pipeline_head = tape->uncontrolled_last_pipeline_head = 0;
2542 tape->controlled_previous_pipeline_head = tape->uncontrolled_previous_pipeline_head = 0;
2543 tape->pipeline_head_speed = tape->controlled_pipeline_head_speed = 5000;
2544 tape->uncontrolled_pipeline_head_speed = 0;
2545 tape->controlled_pipeline_head_time = tape->uncontrolled_pipeline_head_time = jiffies;
2546 tape->controlled_previous_head_time = tape->uncontrolled_previous_head_time = jiffies;
2549 static int idetape_initiate_read (ide_drive_t *drive, int max_stages)
2551 idetape_tape_t *tape = drive->driver_data;
2552 idetape_stage_t *new_stage;
2555 u16 blocks = *(u16 *)&tape->caps[12];
2557 /* Initialize read operation */
2558 if (tape->chrdev_direction != idetape_direction_read) {
2559 if (tape->chrdev_direction == idetape_direction_write) {
2560 idetape_empty_write_pipeline(drive);
2561 idetape_flush_tape_buffers(drive);
2563 if (tape->merge_stage || tape->merge_stage_size) {
2564 printk (KERN_ERR "ide-tape: merge_stage_size should be 0 now\n");
2565 tape->merge_stage_size = 0;
2567 if ((tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0)) == NULL)
2569 tape->chrdev_direction = idetape_direction_read;
2572 * Issue a read 0 command to ensure that DSC handshake
2573 * is switched from completion mode to buffer available
2575 * No point in issuing this if DSC overlap isn't supported,
2576 * some drives (Seagate STT3401A) will return an error.
2578 if (drive->dsc_overlap) {
2579 bytes_read = idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, 0, tape->merge_stage->bh);
2580 if (bytes_read < 0) {
2581 __idetape_kfree_stage(tape->merge_stage);
2582 tape->merge_stage = NULL;
2583 tape->chrdev_direction = idetape_direction_none;
2588 if (tape->restart_speed_control_req)
2589 idetape_restart_speed_control(drive);
2590 idetape_init_rq(&rq, REQ_IDETAPE_READ);
2591 rq.sector = tape->first_frame_position;
2592 rq.nr_sectors = rq.current_nr_sectors = blocks;
2593 if (!test_bit(IDETAPE_PIPELINE_ERROR, &tape->flags) &&
2594 tape->nr_stages < max_stages) {
2595 new_stage = idetape_kmalloc_stage(tape);
2596 while (new_stage != NULL) {
2598 idetape_add_stage_tail(drive, new_stage);
2599 if (tape->nr_stages >= max_stages)
2601 new_stage = idetape_kmalloc_stage(tape);
2604 if (!idetape_pipeline_active(tape)) {
2605 if (tape->nr_pending_stages >= 3 * max_stages / 4) {
2606 tape->measure_insert_time = 1;
2607 tape->insert_time = jiffies;
2608 tape->insert_size = 0;
2609 tape->insert_speed = 0;
2610 idetape_insert_pipeline_into_queue(drive);
2617 * idetape_add_chrdev_read_request is called from idetape_chrdev_read
2618 * to service a character device read request and add read-ahead
2619 * requests to our pipeline.
2621 static int idetape_add_chrdev_read_request (ide_drive_t *drive,int blocks)
2623 idetape_tape_t *tape = drive->driver_data;
2624 unsigned long flags;
2625 struct request *rq_ptr;
2628 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
2631 * If we are at a filemark, return a read length of 0
2633 if (test_bit(IDETAPE_FILEMARK, &tape->flags))
2637 * Wait for the next block to be available at the head
2640 idetape_initiate_read(drive, tape->max_stages);
2641 if (tape->first_stage == NULL) {
2642 if (test_bit(IDETAPE_PIPELINE_ERROR, &tape->flags))
2644 return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks, tape->merge_stage->bh);
2646 idetape_wait_first_stage(drive);
2647 rq_ptr = &tape->first_stage->rq;
2648 bytes_read = tape->tape_block_size * (rq_ptr->nr_sectors - rq_ptr->current_nr_sectors);
2649 rq_ptr->nr_sectors = rq_ptr->current_nr_sectors = 0;
2652 if (rq_ptr->errors == IDETAPE_ERROR_EOD)
2655 idetape_switch_buffers(tape, tape->first_stage);
2656 if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
2657 set_bit(IDETAPE_FILEMARK, &tape->flags);
2658 spin_lock_irqsave(&tape->spinlock, flags);
2659 idetape_remove_stage_head(drive);
2660 spin_unlock_irqrestore(&tape->spinlock, flags);
2661 tape->pipeline_head++;
2662 calculate_speeds(drive);
2664 if (bytes_read > blocks * tape->tape_block_size) {
2665 printk(KERN_ERR "ide-tape: bug: trying to return more bytes than requested\n");
2666 bytes_read = blocks * tape->tape_block_size;
2668 return (bytes_read);
2671 static void idetape_pad_zeros (ide_drive_t *drive, int bcount)
2673 idetape_tape_t *tape = drive->driver_data;
2674 struct idetape_bh *bh;
2680 bh = tape->merge_stage->bh;
2681 count = min(tape->stage_size, bcount);
2683 blocks = count / tape->tape_block_size;
2685 atomic_set(&bh->b_count, min(count, (unsigned int)bh->b_size));
2686 memset(bh->b_data, 0, atomic_read(&bh->b_count));
2687 count -= atomic_read(&bh->b_count);
2690 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks, tape->merge_stage->bh);
2694 static int idetape_pipeline_size (ide_drive_t *drive)
2696 idetape_tape_t *tape = drive->driver_data;
2697 idetape_stage_t *stage;
2701 idetape_wait_for_pipeline(drive);
2702 stage = tape->first_stage;
2703 while (stage != NULL) {
2705 size += tape->tape_block_size * (rq->nr_sectors-rq->current_nr_sectors);
2706 if (rq->errors == IDETAPE_ERROR_FILEMARK)
2707 size += tape->tape_block_size;
2708 stage = stage->next;
2710 size += tape->merge_stage_size;
2715 * Rewinds the tape to the Beginning Of the current Partition (BOP).
2717 * We currently support only one partition.
2719 static int idetape_rewind_tape (ide_drive_t *drive)
2723 idetape_tape_t *tape;
2724 tape = drive->driver_data;
2726 debug_log(DBG_SENSE, "Enter %s\n", __func__);
2728 idetape_create_rewind_cmd(drive, &pc);
2729 retval = idetape_queue_pc_tail(drive, &pc);
2733 idetape_create_read_position_cmd(&pc);
2734 retval = idetape_queue_pc_tail(drive, &pc);
2741 * Our special ide-tape ioctl's.
2743 * Currently there aren't any ioctl's.
2744 * mtio.h compatible commands should be issued to the character device
2747 static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd, unsigned long arg)
2749 idetape_tape_t *tape = drive->driver_data;
2750 void __user *argp = (void __user *)arg;
2752 struct idetape_config {
2753 int dsc_rw_frequency;
2754 int dsc_media_access_frequency;
2758 debug_log(DBG_PROCS, "Enter %s\n", __func__);
2762 if (copy_from_user(&config, argp, sizeof(config)))
2764 tape->best_dsc_rw_frequency = config.dsc_rw_frequency;
2765 tape->max_stages = config.nr_stages;
2768 config.dsc_rw_frequency = (int) tape->best_dsc_rw_frequency;
2769 config.nr_stages = tape->max_stages;
2770 if (copy_to_user(argp, &config, sizeof(config)))
2780 * idetape_space_over_filemarks is now a bit more complicated than just
2781 * passing the command to the tape since we may have crossed some
2782 * filemarks during our pipelined read-ahead mode.
2784 * As a minor side effect, the pipeline enables us to support MTFSFM when
2785 * the filemark is in our internal pipeline even if the tape doesn't
2786 * support spacing over filemarks in the reverse direction.
2788 static int idetape_space_over_filemarks (ide_drive_t *drive,short mt_op,int mt_count)
2790 idetape_tape_t *tape = drive->driver_data;
2792 unsigned long flags;
2794 int sprev = !!(tape->caps[4] & 0x20);
2798 if (MTBSF == mt_op || MTBSFM == mt_op) {
2801 mt_count = - mt_count;
2804 if (tape->chrdev_direction == idetape_direction_read) {
2806 * We have a read-ahead buffer. Scan it for crossed
2809 tape->merge_stage_size = 0;
2810 if (test_and_clear_bit(IDETAPE_FILEMARK, &tape->flags))
2812 while (tape->first_stage != NULL) {
2813 if (count == mt_count) {
2814 if (mt_op == MTFSFM)
2815 set_bit(IDETAPE_FILEMARK, &tape->flags);
2818 spin_lock_irqsave(&tape->spinlock, flags);
2819 if (tape->first_stage == tape->active_stage) {
2821 * We have reached the active stage in the read pipeline.
2822 * There is no point in allowing the drive to continue
2823 * reading any farther, so we stop the pipeline.
2825 * This section should be moved to a separate subroutine,
2826 * because a similar function is performed in
2827 * __idetape_discard_read_pipeline(), for example.
2829 tape->next_stage = NULL;
2830 spin_unlock_irqrestore(&tape->spinlock, flags);
2831 idetape_wait_first_stage(drive);
2832 tape->next_stage = tape->first_stage->next;
2834 spin_unlock_irqrestore(&tape->spinlock, flags);
2835 if (tape->first_stage->rq.errors == IDETAPE_ERROR_FILEMARK)
2837 idetape_remove_stage_head(drive);
2839 idetape_discard_read_pipeline(drive, 0);
2843 * The filemark was not found in our internal pipeline.
2844 * Now we can issue the space command.
2849 idetape_create_space_cmd(&pc,mt_count-count,IDETAPE_SPACE_OVER_FILEMARK);
2850 return (idetape_queue_pc_tail(drive, &pc));
2855 retval = idetape_space_over_filemarks(drive, MTFSF, mt_count-count);
2856 if (retval) return (retval);
2857 count = (MTBSFM == mt_op ? 1 : -1);
2858 return (idetape_space_over_filemarks(drive, MTFSF, count));
2860 printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",mt_op);
2867 * Our character device read / write functions.
2869 * The tape is optimized to maximize throughput when it is transferring
2870 * an integral number of the "continuous transfer limit", which is
2871 * a parameter of the specific tape (26 KB on my particular tape).
2872 * (32 kB for Onstream)
2874 * As of version 1.3 of the driver, the character device provides an
2875 * abstract continuous view of the media - any mix of block sizes (even 1
2876 * byte) on the same backup/restore procedure is supported. The driver
2877 * will internally convert the requests to the recommended transfer unit,
2878 * so that an unmatch between the user's block size to the recommended
2879 * size will only result in a (slightly) increased driver overhead, but
2880 * will no longer hit performance.
2881 * This is not applicable to Onstream.
2883 static ssize_t idetape_chrdev_read (struct file *file, char __user *buf,
2884 size_t count, loff_t *ppos)
2886 struct ide_tape_obj *tape = ide_tape_f(file);
2887 ide_drive_t *drive = tape->drive;
2888 ssize_t bytes_read,temp, actually_read = 0, rc;
2890 u16 ctl = *(u16 *)&tape->caps[12];
2892 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
2894 if (tape->chrdev_direction != idetape_direction_read) {
2895 if (test_bit(IDETAPE_DETECT_BS, &tape->flags))
2896 if (count > tape->tape_block_size &&
2897 (count % tape->tape_block_size) == 0)
2898 tape->user_bs_factor = count / tape->tape_block_size;
2900 if ((rc = idetape_initiate_read(drive, tape->max_stages)) < 0)
2904 if (tape->merge_stage_size) {
2905 actually_read = min((unsigned int)(tape->merge_stage_size), (unsigned int)count);
2906 if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, actually_read))
2908 buf += actually_read;
2909 tape->merge_stage_size -= actually_read;
2910 count -= actually_read;
2912 while (count >= tape->stage_size) {
2913 bytes_read = idetape_add_chrdev_read_request(drive, ctl);
2914 if (bytes_read <= 0)
2916 if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, bytes_read))
2919 count -= bytes_read;
2920 actually_read += bytes_read;
2923 bytes_read = idetape_add_chrdev_read_request(drive, ctl);
2924 if (bytes_read <= 0)
2926 temp = min((unsigned long)count, (unsigned long)bytes_read);
2927 if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, temp))
2929 actually_read += temp;
2930 tape->merge_stage_size = bytes_read-temp;
2933 if (!actually_read && test_bit(IDETAPE_FILEMARK, &tape->flags)) {
2934 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
2936 idetape_space_over_filemarks(drive, MTFSF, 1);
2940 return (ret) ? ret : actually_read;
2943 static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf,
2944 size_t count, loff_t *ppos)
2946 struct ide_tape_obj *tape = ide_tape_f(file);
2947 ide_drive_t *drive = tape->drive;
2948 ssize_t actually_written = 0;
2950 u16 ctl = *(u16 *)&tape->caps[12];
2952 /* The drive is write protected. */
2953 if (tape->write_prot)
2956 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
2958 /* Initialize write operation */
2959 if (tape->chrdev_direction != idetape_direction_write) {
2960 if (tape->chrdev_direction == idetape_direction_read)
2961 idetape_discard_read_pipeline(drive, 1);
2962 if (tape->merge_stage || tape->merge_stage_size) {
2963 printk(KERN_ERR "ide-tape: merge_stage_size "
2964 "should be 0 now\n");
2965 tape->merge_stage_size = 0;
2967 if ((tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0)) == NULL)
2969 tape->chrdev_direction = idetape_direction_write;
2970 idetape_init_merge_stage(tape);
2973 * Issue a write 0 command to ensure that DSC handshake
2974 * is switched from completion mode to buffer available
2976 * No point in issuing this if DSC overlap isn't supported,
2977 * some drives (Seagate STT3401A) will return an error.
2979 if (drive->dsc_overlap) {
2980 ssize_t retval = idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, 0, tape->merge_stage->bh);
2982 __idetape_kfree_stage(tape->merge_stage);
2983 tape->merge_stage = NULL;
2984 tape->chrdev_direction = idetape_direction_none;
2991 if (tape->restart_speed_control_req)
2992 idetape_restart_speed_control(drive);
2993 if (tape->merge_stage_size) {
2994 if (tape->merge_stage_size >= tape->stage_size) {
2995 printk(KERN_ERR "ide-tape: bug: merge buffer too big\n");
2996 tape->merge_stage_size = 0;
2998 actually_written = min((unsigned int)(tape->stage_size - tape->merge_stage_size), (unsigned int)count);
2999 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, actually_written))
3001 buf += actually_written;
3002 tape->merge_stage_size += actually_written;
3003 count -= actually_written;
3005 if (tape->merge_stage_size == tape->stage_size) {
3007 tape->merge_stage_size = 0;
3008 retval = idetape_add_chrdev_write_request(drive, ctl);
3013 while (count >= tape->stage_size) {
3015 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, tape->stage_size))
3017 buf += tape->stage_size;
3018 count -= tape->stage_size;
3019 retval = idetape_add_chrdev_write_request(drive, ctl);
3020 actually_written += tape->stage_size;
3025 actually_written += count;
3026 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, count))
3028 tape->merge_stage_size += count;
3030 return (ret) ? ret : actually_written;
3033 static int idetape_write_filemark (ide_drive_t *drive)
3037 /* Write a filemark */
3038 idetape_create_write_filemark_cmd(drive, &pc, 1);
3039 if (idetape_queue_pc_tail(drive, &pc)) {
3040 printk(KERN_ERR "ide-tape: Couldn't write a filemark\n");
3047 * Called from idetape_chrdev_ioctl when the general mtio MTIOCTOP ioctl is
3050 * Note: MTBSF and MTBSFM are not supported when the tape doesn't support
3051 * spacing over filemarks in the reverse direction. In this case, MTFSFM is also
3052 * usually not supported (it is supported in the rare case in which we crossed
3053 * the filemark during our read-ahead pipelined operation mode).
3055 * The following commands are currently not supported:
3057 * MTFSS, MTBSS, MTWSM, MTSETDENSITY, MTSETDRVBUFFER, MT_ST_BOOLEANS,
3058 * MT_ST_WRITE_THRESHOLD.
3060 static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
3062 idetape_tape_t *tape = drive->driver_data;
3066 debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
3069 * Commands which need our pipelined read-ahead stages.
3078 return (idetape_space_over_filemarks(drive,mt_op,mt_count));
3084 if (tape->write_prot)
3086 idetape_discard_read_pipeline(drive, 1);
3087 for (i = 0; i < mt_count; i++) {
3088 retval = idetape_write_filemark(drive);
3094 idetape_discard_read_pipeline(drive, 0);
3095 if (idetape_rewind_tape(drive))
3099 idetape_discard_read_pipeline(drive, 0);
3100 idetape_create_load_unload_cmd(drive, &pc, IDETAPE_LU_LOAD_MASK);
3101 return (idetape_queue_pc_tail(drive, &pc));
3105 * If door is locked, attempt to unlock before
3106 * attempting to eject.
3108 if (tape->door_locked) {
3109 if (idetape_create_prevent_cmd(drive, &pc, 0))
3110 if (!idetape_queue_pc_tail(drive, &pc))
3111 tape->door_locked = DOOR_UNLOCKED;
3113 idetape_discard_read_pipeline(drive, 0);
3114 idetape_create_load_unload_cmd(drive, &pc,!IDETAPE_LU_LOAD_MASK);
3115 retval = idetape_queue_pc_tail(drive, &pc);
3117 clear_bit(IDETAPE_MEDIUM_PRESENT, &tape->flags);
3120 idetape_discard_read_pipeline(drive, 0);
3121 return (idetape_flush_tape_buffers(drive));
3123 idetape_discard_read_pipeline(drive, 0);
3124 idetape_create_load_unload_cmd(drive, &pc,IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
3125 return (idetape_queue_pc_tail(drive, &pc));
3127 idetape_create_space_cmd(&pc, 0, IDETAPE_SPACE_TO_EOD);
3128 return (idetape_queue_pc_tail(drive, &pc));
3130 (void) idetape_rewind_tape(drive);
3131 idetape_create_erase_cmd(&pc);
3132 return (idetape_queue_pc_tail(drive, &pc));
3135 if (mt_count < tape->tape_block_size || mt_count % tape->tape_block_size)
3137 tape->user_bs_factor = mt_count / tape->tape_block_size;
3138 clear_bit(IDETAPE_DETECT_BS, &tape->flags);
3140 set_bit(IDETAPE_DETECT_BS, &tape->flags);
3143 idetape_discard_read_pipeline(drive, 0);
3144 return idetape_position_tape(drive, mt_count * tape->user_bs_factor, tape->partition, 0);
3146 idetape_discard_read_pipeline(drive, 0);
3147 return (idetape_position_tape(drive, 0, mt_count, 0));
3151 if (!idetape_create_prevent_cmd(drive, &pc, 1))
3153 retval = idetape_queue_pc_tail(drive, &pc);
3154 if (retval) return retval;
3155 tape->door_locked = DOOR_EXPLICITLY_LOCKED;
3158 if (!idetape_create_prevent_cmd(drive, &pc, 0))
3160 retval = idetape_queue_pc_tail(drive, &pc);
3161 if (retval) return retval;
3162 tape->door_locked = DOOR_UNLOCKED;
3165 printk(KERN_ERR "ide-tape: MTIO operation %d not "
3166 "supported\n", mt_op);
3172 * Our character device ioctls. General mtio.h magnetic io commands are
3173 * supported here, and not in the corresponding block interface. Our own
3174 * ide-tape ioctls are supported on both interfaces.
3176 static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
3177 unsigned int cmd, unsigned long arg)
3179 struct ide_tape_obj *tape = ide_tape_f(file);
3180 ide_drive_t *drive = tape->drive;
3184 int block_offset = 0, position = tape->first_frame_position;
3185 void __user *argp = (void __user *)arg;
3187 debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd);
3189 tape->restart_speed_control_req = 1;
3190 if (tape->chrdev_direction == idetape_direction_write) {
3191 idetape_empty_write_pipeline(drive);
3192 idetape_flush_tape_buffers(drive);
3194 if (cmd == MTIOCGET || cmd == MTIOCPOS) {
3195 block_offset = idetape_pipeline_size(drive) / (tape->tape_block_size * tape->user_bs_factor);
3196 if ((position = idetape_read_position(drive)) < 0)
3201 if (copy_from_user(&mtop, argp, sizeof (struct mtop)))
3203 return (idetape_mtioctop(drive,mtop.mt_op,mtop.mt_count));
3205 memset(&mtget, 0, sizeof (struct mtget));
3206 mtget.mt_type = MT_ISSCSI2;
3207 mtget.mt_blkno = position / tape->user_bs_factor - block_offset;
3208 mtget.mt_dsreg = ((tape->tape_block_size * tape->user_bs_factor) << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK;
3209 if (tape->drv_write_prot) {
3210 mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
3212 if (copy_to_user(argp, &mtget, sizeof(struct mtget)))
3216 mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
3217 if (copy_to_user(argp, &mtpos, sizeof(struct mtpos)))
3221 if (tape->chrdev_direction == idetape_direction_read)
3222 idetape_discard_read_pipeline(drive, 1);
3223 return idetape_blkdev_ioctl(drive, cmd, arg);
3228 * Do a mode sense page 0 with block descriptor and if it succeeds set the tape
3229 * block size with the reported value.
3231 static void ide_tape_get_bsize_from_bdesc(ide_drive_t *drive)
3233 idetape_tape_t *tape = drive->driver_data;
3236 idetape_create_mode_sense_cmd(&pc, IDETAPE_BLOCK_DESCRIPTOR);
3237 if (idetape_queue_pc_tail(drive, &pc)) {
3238 printk(KERN_ERR "ide-tape: Can't get block descriptor\n");
3239 if (tape->tape_block_size == 0) {
3240 printk(KERN_WARNING "ide-tape: Cannot deal with zero "
3241 "block size, assuming 32k\n");
3242 tape->tape_block_size = 32768;
3246 tape->tape_block_size = (pc.buffer[4 + 5] << 16) +
3247 (pc.buffer[4 + 6] << 8) +
3249 tape->drv_write_prot = (pc.buffer[2] & 0x80) >> 7;
3253 * Our character device open function.
3255 static int idetape_chrdev_open (struct inode *inode, struct file *filp)
3257 unsigned int minor = iminor(inode), i = minor & ~0xc0;
3259 idetape_tape_t *tape;
3263 if (i >= MAX_HWIFS * MAX_DRIVES)
3266 tape = ide_tape_chrdev_get(i);
3270 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
3273 * We really want to do nonseekable_open(inode, filp); here, but some
3274 * versions of tar incorrectly call lseek on tapes and bail out if that
3275 * fails. So we disallow pread() and pwrite(), but permit lseeks.
3277 filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
3279 drive = tape->drive;
3281 filp->private_data = tape;
3283 if (test_and_set_bit(IDETAPE_BUSY, &tape->flags)) {
3288 retval = idetape_wait_ready(drive, 60 * HZ);
3290 clear_bit(IDETAPE_BUSY, &tape->flags);
3291 printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
3295 idetape_read_position(drive);
3296 if (!test_bit(IDETAPE_ADDRESS_VALID, &tape->flags))
3297 (void)idetape_rewind_tape(drive);
3299 if (tape->chrdev_direction != idetape_direction_read)
3300 clear_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
3302 /* Read block size and write protect status from drive. */
3303 ide_tape_get_bsize_from_bdesc(drive);
3305 /* Set write protect flag if device is opened as read-only. */
3306 if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
3307 tape->write_prot = 1;
3309 tape->write_prot = tape->drv_write_prot;
3311 /* Make sure drive isn't write protected if user wants to write. */
3312 if (tape->write_prot) {
3313 if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
3314 (filp->f_flags & O_ACCMODE) == O_RDWR) {
3315 clear_bit(IDETAPE_BUSY, &tape->flags);
3322 * Lock the tape drive door so user can't eject.
3324 if (tape->chrdev_direction == idetape_direction_none) {
3325 if (idetape_create_prevent_cmd(drive, &pc, 1)) {
3326 if (!idetape_queue_pc_tail(drive, &pc)) {
3327 if (tape->door_locked != DOOR_EXPLICITLY_LOCKED)
3328 tape->door_locked = DOOR_LOCKED;
3332 idetape_restart_speed_control(drive);
3333 tape->restart_speed_control_req = 0;
3341 static void idetape_write_release (ide_drive_t *drive, unsigned int minor)
3343 idetape_tape_t *tape = drive->driver_data;
3345 idetape_empty_write_pipeline(drive);
3346 tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0);
3347 if (tape->merge_stage != NULL) {
3348 idetape_pad_zeros(drive, tape->tape_block_size * (tape->user_bs_factor - 1));
3349 __idetape_kfree_stage(tape->merge_stage);
3350 tape->merge_stage = NULL;
3352 idetape_write_filemark(drive);
3353 idetape_flush_tape_buffers(drive);
3354 idetape_flush_tape_buffers(drive);
3358 * Our character device release function.
3360 static int idetape_chrdev_release (struct inode *inode, struct file *filp)
3362 struct ide_tape_obj *tape = ide_tape_f(filp);
3363 ide_drive_t *drive = tape->drive;
3365 unsigned int minor = iminor(inode);
3368 tape = drive->driver_data;
3370 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
3372 if (tape->chrdev_direction == idetape_direction_write)
3373 idetape_write_release(drive, minor);
3374 if (tape->chrdev_direction == idetape_direction_read) {
3376 idetape_discard_read_pipeline(drive, 1);
3378 idetape_wait_for_pipeline(drive);
3380 if (tape->cache_stage != NULL) {
3381 __idetape_kfree_stage(tape->cache_stage);
3382 tape->cache_stage = NULL;
3384 if (minor < 128 && test_bit(IDETAPE_MEDIUM_PRESENT, &tape->flags))
3385 (void) idetape_rewind_tape(drive);
3386 if (tape->chrdev_direction == idetape_direction_none) {
3387 if (tape->door_locked == DOOR_LOCKED) {
3388 if (idetape_create_prevent_cmd(drive, &pc, 0)) {
3389 if (!idetape_queue_pc_tail(drive, &pc))
3390 tape->door_locked = DOOR_UNLOCKED;
3394 clear_bit(IDETAPE_BUSY, &tape->flags);
3401 * idetape_identify_device is called to check the contents of the
3402 * ATAPI IDENTIFY command results. We return:
3404 * 1 If the tape can be supported by us, based on the information
3407 * 0 If this tape driver is not currently supported by us.
3409 static int idetape_identify_device (ide_drive_t *drive)
3411 struct idetape_id_gcw gcw;
3412 struct hd_driveid *id = drive->id;
3414 if (drive->id_read == 0)
3417 *((unsigned short *) &gcw) = id->config;
3419 /* Check that we can support this device */
3421 if (gcw.protocol != 2)
3422 printk(KERN_ERR "ide-tape: Protocol (0x%02x) is not ATAPI\n",
3424 else if (gcw.device_type != 1)
3425 printk(KERN_ERR "ide-tape: Device type (0x%02x) is not set "
3426 "to tape\n", gcw.device_type);
3427 else if (!gcw.removable)
3428 printk(KERN_ERR "ide-tape: The removable flag is not set\n");
3429 else if (gcw.packet_size != 0) {
3430 printk(KERN_ERR "ide-tape: Packet size (0x%02x) is not 12 "
3431 "bytes long\n", gcw.packet_size);
3437 static void idetape_get_inquiry_results(ide_drive_t *drive)
3440 idetape_tape_t *tape = drive->driver_data;
3443 idetape_create_inquiry_cmd(&pc);
3444 if (idetape_queue_pc_tail(drive, &pc)) {
3445 printk(KERN_ERR "ide-tape: %s: can't get INQUIRY results\n",
3449 memcpy(tape->vendor_id, &pc.buffer[8], 8);
3450 memcpy(tape->product_id, &pc.buffer[16], 16);
3451 memcpy(tape->firmware_revision, &pc.buffer[32], 4);
3453 ide_fixstring(tape->vendor_id, 10, 0);
3454 ide_fixstring(tape->product_id, 18, 0);
3455 ide_fixstring(tape->firmware_revision, 6, 0);
3456 r = tape->firmware_revision;
3457 if (*(r + 1) == '.')
3458 tape->firmware_revision_num = (*r - '0') * 100 +
3459 (*(r + 2) - '0') * 10 + *(r + 3) - '0';
3460 printk(KERN_INFO "ide-tape: %s <-> %s: %s %s rev %s\n",
3461 drive->name, tape->name, tape->vendor_id,
3462 tape->product_id, tape->firmware_revision);
3466 * Ask the tape about its various parameters. In particular, we will adjust our
3467 * data transfer buffer size to the recommended value as returned by the tape.
3469 static void idetape_get_mode_sense_results (ide_drive_t *drive)
3471 idetape_tape_t *tape = drive->driver_data;
3474 u8 speed, max_speed;
3476 idetape_create_mode_sense_cmd(&pc, IDETAPE_CAPABILITIES_PAGE);
3477 if (idetape_queue_pc_tail(drive, &pc)) {
3478 printk(KERN_ERR "ide-tape: Can't get tape parameters - assuming"
3479 " some default values\n");
3480 tape->tape_block_size = 512;
3481 put_unaligned(52, (u16 *)&tape->caps[12]);
3482 put_unaligned(540, (u16 *)&tape->caps[14]);
3483 put_unaligned(6*52, (u16 *)&tape->caps[16]);
3486 caps = pc.buffer + 4 + pc.buffer[3];
3488 /* convert to host order and save for later use */
3489 speed = be16_to_cpu(*(u16 *)&caps[14]);
3490 max_speed = be16_to_cpu(*(u16 *)&caps[8]);
3492 put_unaligned(max_speed, (u16 *)&caps[8]);
3493 put_unaligned(be16_to_cpu(*(u16 *)&caps[12]), (u16 *)&caps[12]);
3494 put_unaligned(speed, (u16 *)&caps[14]);
3495 put_unaligned(be16_to_cpu(*(u16 *)&caps[16]), (u16 *)&caps[16]);
3498 printk(KERN_INFO "ide-tape: %s: invalid tape speed "
3499 "(assuming 650KB/sec)\n", drive->name);
3500 put_unaligned(650, (u16 *)&caps[14]);
3503 printk(KERN_INFO "ide-tape: %s: invalid max_speed "
3504 "(assuming 650KB/sec)\n", drive->name);
3505 put_unaligned(650, (u16 *)&caps[8]);
3508 memcpy(&tape->caps, caps, 20);
3510 tape->tape_block_size = 512;
3511 else if (caps[7] & 0x04)
3512 tape->tape_block_size = 1024;
3515 #ifdef CONFIG_IDE_PROC_FS
3516 static void idetape_add_settings (ide_drive_t *drive)
3518 idetape_tape_t *tape = drive->driver_data;
3521 * drive setting name read/write data type min max mul_factor div_factor data pointer set function
3523 ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff,
3524 1, 2, (u16 *)&tape->caps[16], NULL);
3525 ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
3526 ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_stages, NULL);
3527 ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
3528 ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages, NULL);
3529 ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_pending_stages, NULL);
3530 ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff,
3531 1, 1, (u16 *)&tape->caps[14], NULL);
3532 ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1024, &tape->stage_size, NULL);
3533 ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN, IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_frequency, NULL);
3534 ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL);
3535 ide_add_setting(drive, "pipeline_head_speed_c",SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed, NULL);
3536 ide_add_setting(drive, "pipeline_head_speed_u",SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->uncontrolled_pipeline_head_speed,NULL);
3537 ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->avg_speed, NULL);
3538 ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1,
3539 1, &tape->debug_mask, NULL);
3542 static inline void idetape_add_settings(ide_drive_t *drive) { ; }
3546 * ide_setup is called to:
3548 * 1. Initialize our various state variables.
3549 * 2. Ask the tape for its capabilities.
3550 * 3. Allocate a buffer which will be used for data
3551 * transfer. The buffer size is chosen based on
3552 * the recommendation which we received in step (2).
3554 * Note that at this point ide.c already assigned us an irq, so that
3555 * we can queue requests here and wait for their completion.
3557 static void idetape_setup (ide_drive_t *drive, idetape_tape_t *tape, int minor)
3559 unsigned long t1, tmid, tn, t;
3561 struct idetape_id_gcw gcw;
3564 u16 *ctl = (u16 *)&tape->caps[12];
3566 spin_lock_init(&tape->spinlock);
3567 drive->dsc_overlap = 1;
3568 if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
3569 printk(KERN_INFO "ide-tape: %s: disabling DSC overlap\n",
3571 drive->dsc_overlap = 0;
3573 /* Seagate Travan drives do not support DSC overlap. */
3574 if (strstr(drive->id->model, "Seagate STT3401"))
3575 drive->dsc_overlap = 0;
3576 tape->minor = minor;
3577 tape->name[0] = 'h';
3578 tape->name[1] = 't';
3579 tape->name[2] = '0' + minor;
3580 tape->chrdev_direction = idetape_direction_none;
3581 tape->pc = tape->pc_stack;
3582 tape->max_insert_speed = 10000;
3583 tape->speed_control = 1;
3584 *((unsigned short *) &gcw) = drive->id->config;
3585 if (gcw.drq_type == 1)
3586 set_bit(IDETAPE_DRQ_INTERRUPT, &tape->flags);
3588 tape->min_pipeline = tape->max_pipeline = tape->max_stages = 10;
3590 idetape_get_inquiry_results(drive);
3591 idetape_get_mode_sense_results(drive);
3592 ide_tape_get_bsize_from_bdesc(drive);
3593 tape->user_bs_factor = 1;
3594 tape->stage_size = *ctl * tape->tape_block_size;
3595 while (tape->stage_size > 0xffff) {
3596 printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
3598 tape->stage_size = *ctl * tape->tape_block_size;
3600 stage_size = tape->stage_size;
3601 tape->pages_per_stage = stage_size / PAGE_SIZE;
3602 if (stage_size % PAGE_SIZE) {
3603 tape->pages_per_stage++;
3604 tape->excess_bh_size = PAGE_SIZE - stage_size % PAGE_SIZE;
3607 /* Select the "best" DSC read/write polling freq and pipeline size. */
3608 speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
3610 tape->max_stages = speed * 1000 * 10 / tape->stage_size;
3613 * Limit memory use for pipeline to 10% of physical memory
3616 if (tape->max_stages * tape->stage_size > si.totalram * si.mem_unit / 10)
3617 tape->max_stages = si.totalram * si.mem_unit / (10 * tape->stage_size);
3618 tape->max_stages = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES);
3619 tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES);
3620 tape->max_pipeline = min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
3621 if (tape->max_stages == 0)
3622 tape->max_stages = tape->min_pipeline = tape->max_pipeline = 1;
3624 t1 = (tape->stage_size * HZ) / (speed * 1000);
3625 tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125);
3626 tn = (IDETAPE_FIFO_THRESHOLD * tape->stage_size * HZ) / (speed * 1000);
3628 if (tape->max_stages)
3634 * Ensure that the number we got makes sense; limit
3635 * it within IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX.
3637 tape->best_dsc_rw_frequency = max_t(unsigned long, min_t(unsigned long, t, IDETAPE_DSC_RW_MAX), IDETAPE_DSC_RW_MIN);
3638 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
3639 "%dkB pipeline, %lums tDSC%s\n",
3640 drive->name, tape->name, *(u16 *)&tape->caps[14],
3641 (*(u16 *)&tape->caps[16] * 512) / tape->stage_size,
3642 tape->stage_size / 1024,
3643 tape->max_stages * tape->stage_size / 1024,
3644 tape->best_dsc_rw_frequency * 1000 / HZ,
3645 drive->using_dma ? ", DMA":"");
3647 idetape_add_settings(drive);
3650 static void ide_tape_remove(ide_drive_t *drive)
3652 idetape_tape_t *tape = drive->driver_data;
3654 ide_proc_unregister_driver(drive, tape->driver);
3656 ide_unregister_region(tape->disk);
3661 static void ide_tape_release(struct kref *kref)
3663 struct ide_tape_obj *tape = to_ide_tape(kref);
3664 ide_drive_t *drive = tape->drive;
3665 struct gendisk *g = tape->disk;
3667 BUG_ON(tape->first_stage != NULL || tape->merge_stage_size);
3669 drive->dsc_overlap = 0;
3670 drive->driver_data = NULL;
3671 device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor));
3672 device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor + 128));
3673 idetape_devs[tape->minor] = NULL;
3674 g->private_data = NULL;
3679 #ifdef CONFIG_IDE_PROC_FS
3680 static int proc_idetape_read_name
3681 (char *page, char **start, off_t off, int count, int *eof, void *data)
3683 ide_drive_t *drive = (ide_drive_t *) data;
3684 idetape_tape_t *tape = drive->driver_data;
3688 len = sprintf(out, "%s\n", tape->name);
3689 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
3692 static ide_proc_entry_t idetape_proc[] = {
3693 { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
3694 { "name", S_IFREG|S_IRUGO, proc_idetape_read_name, NULL },
3695 { NULL, 0, NULL, NULL }
3699 static int ide_tape_probe(ide_drive_t *);
3701 static ide_driver_t idetape_driver = {
3703 .owner = THIS_MODULE,
3705 .bus = &ide_bus_type,
3707 .probe = ide_tape_probe,
3708 .remove = ide_tape_remove,
3709 .version = IDETAPE_VERSION,
3711 .supports_dsc_overlap = 1,
3712 .do_request = idetape_do_request,
3713 .end_request = idetape_end_request,
3714 .error = __ide_error,
3715 .abort = __ide_abort,
3716 #ifdef CONFIG_IDE_PROC_FS
3717 .proc = idetape_proc,
3722 * Our character device supporting functions, passed to register_chrdev.
3724 static const struct file_operations idetape_fops = {
3725 .owner = THIS_MODULE,
3726 .read = idetape_chrdev_read,
3727 .write = idetape_chrdev_write,
3728 .ioctl = idetape_chrdev_ioctl,
3729 .open = idetape_chrdev_open,
3730 .release = idetape_chrdev_release,
3733 static int idetape_open(struct inode *inode, struct file *filp)
3735 struct gendisk *disk = inode->i_bdev->bd_disk;
3736 struct ide_tape_obj *tape;
3738 if (!(tape = ide_tape_get(disk)))
3744 static int idetape_release(struct inode *inode, struct file *filp)
3746 struct gendisk *disk = inode->i_bdev->bd_disk;
3747 struct ide_tape_obj *tape = ide_tape_g(disk);
3754 static int idetape_ioctl(struct inode *inode, struct file *file,
3755 unsigned int cmd, unsigned long arg)
3757 struct block_device *bdev = inode->i_bdev;
3758 struct ide_tape_obj *tape = ide_tape_g(bdev->bd_disk);
3759 ide_drive_t *drive = tape->drive;
3760 int err = generic_ide_ioctl(drive, file, bdev, cmd, arg);
3762 err = idetape_blkdev_ioctl(drive, cmd, arg);
3766 static struct block_device_operations idetape_block_ops = {
3767 .owner = THIS_MODULE,
3768 .open = idetape_open,
3769 .release = idetape_release,
3770 .ioctl = idetape_ioctl,
3773 static int ide_tape_probe(ide_drive_t *drive)
3775 idetape_tape_t *tape;
3779 if (!strstr("ide-tape", drive->driver_req))
3781 if (!drive->present)
3783 if (drive->media != ide_tape)
3785 if (!idetape_identify_device (drive)) {
3786 printk(KERN_ERR "ide-tape: %s: not supported by this version of ide-tape\n", drive->name);
3790 printk("ide-tape: passing drive %s to ide-scsi emulation.\n", drive->name);
3793 if (strstr(drive->id->model, "OnStream DI-")) {
3794 printk(KERN_WARNING "ide-tape: Use drive %s with ide-scsi emulation and osst.\n", drive->name);
3795 printk(KERN_WARNING "ide-tape: OnStream support will be removed soon from ide-tape!\n");
3797 tape = kzalloc(sizeof (idetape_tape_t), GFP_KERNEL);
3799 printk(KERN_ERR "ide-tape: %s: Can't allocate a tape structure\n", drive->name);
3803 g = alloc_disk(1 << PARTN_BITS);
3807 ide_init_disk(g, drive);
3809 ide_proc_register_driver(drive, &idetape_driver);
3811 kref_init(&tape->kref);
3813 tape->drive = drive;
3814 tape->driver = &idetape_driver;
3817 g->private_data = &tape->driver;
3819 drive->driver_data = tape;
3821 mutex_lock(&idetape_ref_mutex);
3822 for (minor = 0; idetape_devs[minor]; minor++)
3824 idetape_devs[minor] = tape;
3825 mutex_unlock(&idetape_ref_mutex);
3827 idetape_setup(drive, tape, minor);
3829 device_create(idetape_sysfs_class, &drive->gendev,
3830 MKDEV(IDETAPE_MAJOR, minor), "%s", tape->name);
3831 device_create(idetape_sysfs_class, &drive->gendev,
3832 MKDEV(IDETAPE_MAJOR, minor + 128), "n%s", tape->name);
3834 g->fops = &idetape_block_ops;
3835 ide_register_region(g);
3845 MODULE_DESCRIPTION("ATAPI Streaming TAPE Driver");
3846 MODULE_LICENSE("GPL");
3848 static void __exit idetape_exit (void)
3850 driver_unregister(&idetape_driver.gen_driver);
3851 class_destroy(idetape_sysfs_class);
3852 unregister_chrdev(IDETAPE_MAJOR, "ht");
3855 static int __init idetape_init(void)
3858 idetape_sysfs_class = class_create(THIS_MODULE, "ide_tape");
3859 if (IS_ERR(idetape_sysfs_class)) {
3860 idetape_sysfs_class = NULL;
3861 printk(KERN_ERR "Unable to create sysfs class for ide tapes\n");
3866 if (register_chrdev(IDETAPE_MAJOR, "ht", &idetape_fops)) {
3867 printk(KERN_ERR "ide-tape: Failed to register character device interface\n");
3869 goto out_free_class;
3872 error = driver_register(&idetape_driver.gen_driver);
3874 goto out_free_driver;
3879 driver_unregister(&idetape_driver.gen_driver);
3881 class_destroy(idetape_sysfs_class);
3886 MODULE_ALIAS("ide:*m-tape*");
3887 module_init(idetape_init);
3888 module_exit(idetape_exit);
3889 MODULE_ALIAS_CHARDEV_MAJOR(IDETAPE_MAJOR);