2 * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
3 * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
4 * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
9 * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
12 * Theory of operation:
14 * At the lowest level, there is the standard driver for the CD/DVD device,
15 * typically ide-cd.c or sr.c. This driver can handle read and write requests,
16 * but it doesn't know anything about the special restrictions that apply to
17 * packet writing. One restriction is that write requests must be aligned to
18 * packet boundaries on the physical media, and the size of a write request
19 * must be equal to the packet size. Another restriction is that a
20 * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
21 * command, if the previous command was a write.
23 * The purpose of the packet writing driver is to hide these restrictions from
24 * higher layers, such as file systems, and present a block device that can be
25 * randomly read and written using 2kB-sized blocks.
27 * The lowest layer in the packet writing driver is the packet I/O scheduler.
28 * Its data is defined by the struct packet_iosched and includes two bio
29 * queues with pending read and write requests. These queues are processed
30 * by the pkt_iosched_process_queue() function. The write requests in this
31 * queue are already properly aligned and sized. This layer is responsible for
32 * issuing the flush cache commands and scheduling the I/O in a good order.
34 * The next layer transforms unaligned write requests to aligned writes. This
35 * transformation requires reading missing pieces of data from the underlying
36 * block device, assembling the pieces to full packets and queuing them to the
37 * packet I/O scheduler.
39 * At the top layer there is a custom make_request_fn function that forwards
40 * read requests directly to the iosched queue and puts write requests in the
41 * unaligned write queue. A kernel thread performs the necessary read
42 * gathering to convert the unaligned writes to aligned writes and then feeds
43 * them to the packet I/O scheduler.
45 *************************************************************************/
47 #include <linux/pktcdvd.h>
48 #include <linux/module.h>
49 #include <linux/types.h>
50 #include <linux/kernel.h>
51 #include <linux/kthread.h>
52 #include <linux/errno.h>
53 #include <linux/spinlock.h>
54 #include <linux/file.h>
55 #include <linux/proc_fs.h>
56 #include <linux/seq_file.h>
57 #include <linux/miscdevice.h>
58 #include <linux/freezer.h>
59 #include <linux/mutex.h>
60 #include <scsi/scsi_cmnd.h>
61 #include <scsi/scsi_ioctl.h>
62 #include <scsi/scsi.h>
64 #include <asm/uaccess.h>
66 #define DRIVER_NAME "pktcdvd"
69 #define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
71 #define DPRINTK(fmt, args...)
75 #define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
77 #define VPRINTK(fmt, args...)
80 #define MAX_SPEED 0xffff
82 #define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
84 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
85 static struct proc_dir_entry *pkt_proc;
86 static int pktdev_major;
87 static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
88 static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
89 static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
90 static mempool_t *psd_pool;
93 static void pkt_bio_finished(struct pktcdvd_device *pd)
95 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
96 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
97 VPRINTK(DRIVER_NAME": queue empty\n");
98 atomic_set(&pd->iosched.attention, 1);
103 static void pkt_bio_destructor(struct bio *bio)
105 kfree(bio->bi_io_vec);
109 static struct bio *pkt_bio_alloc(int nr_iovecs)
111 struct bio_vec *bvl = NULL;
114 bio = kmalloc(sizeof(struct bio), GFP_KERNEL);
119 bvl = kcalloc(nr_iovecs, sizeof(struct bio_vec), GFP_KERNEL);
123 bio->bi_max_vecs = nr_iovecs;
124 bio->bi_io_vec = bvl;
125 bio->bi_destructor = pkt_bio_destructor;
136 * Allocate a packet_data struct
138 static struct packet_data *pkt_alloc_packet_data(int frames)
141 struct packet_data *pkt;
143 pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
147 pkt->frames = frames;
148 pkt->w_bio = pkt_bio_alloc(frames);
152 for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
153 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
158 spin_lock_init(&pkt->lock);
160 for (i = 0; i < frames; i++) {
161 struct bio *bio = pkt_bio_alloc(1);
164 pkt->r_bios[i] = bio;
170 for (i = 0; i < frames; i++) {
171 struct bio *bio = pkt->r_bios[i];
177 for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
179 __free_page(pkt->pages[i]);
188 * Free a packet_data struct
190 static void pkt_free_packet_data(struct packet_data *pkt)
194 for (i = 0; i < pkt->frames; i++) {
195 struct bio *bio = pkt->r_bios[i];
199 for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
200 __free_page(pkt->pages[i]);
205 static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
207 struct packet_data *pkt, *next;
209 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
211 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
212 pkt_free_packet_data(pkt);
214 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
217 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
219 struct packet_data *pkt;
221 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
223 while (nr_packets > 0) {
224 pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
226 pkt_shrink_pktlist(pd);
229 pkt->id = nr_packets;
231 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
237 static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
239 struct rb_node *n = rb_next(&node->rb_node);
242 return rb_entry(n, struct pkt_rb_node, rb_node);
245 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
247 rb_erase(&node->rb_node, &pd->bio_queue);
248 mempool_free(node, pd->rb_pool);
249 pd->bio_queue_size--;
250 BUG_ON(pd->bio_queue_size < 0);
254 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
256 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
258 struct rb_node *n = pd->bio_queue.rb_node;
259 struct rb_node *next;
260 struct pkt_rb_node *tmp;
263 BUG_ON(pd->bio_queue_size > 0);
268 tmp = rb_entry(n, struct pkt_rb_node, rb_node);
269 if (s <= tmp->bio->bi_sector)
278 if (s > tmp->bio->bi_sector) {
279 tmp = pkt_rbtree_next(tmp);
283 BUG_ON(s > tmp->bio->bi_sector);
288 * Insert a node into the pd->bio_queue rb tree.
290 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
292 struct rb_node **p = &pd->bio_queue.rb_node;
293 struct rb_node *parent = NULL;
294 sector_t s = node->bio->bi_sector;
295 struct pkt_rb_node *tmp;
299 tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
300 if (s < tmp->bio->bi_sector)
305 rb_link_node(&node->rb_node, parent, p);
306 rb_insert_color(&node->rb_node, &pd->bio_queue);
307 pd->bio_queue_size++;
311 * Add a bio to a single linked list defined by its head and tail pointers.
313 static void pkt_add_list_last(struct bio *bio, struct bio **list_head, struct bio **list_tail)
317 BUG_ON((*list_head) == NULL);
318 (*list_tail)->bi_next = bio;
321 BUG_ON((*list_head) != NULL);
328 * Remove and return the first bio from a single linked list defined by its
329 * head and tail pointers.
331 static inline struct bio *pkt_get_list_first(struct bio **list_head, struct bio **list_tail)
335 if (*list_head == NULL)
339 *list_head = bio->bi_next;
340 if (*list_head == NULL)
348 * Send a packet_command to the underlying block device and
349 * wait for completion.
351 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
353 char sense[SCSI_SENSE_BUFFERSIZE];
356 DECLARE_COMPLETION_ONSTACK(wait);
359 q = bdev_get_queue(pd->bdev);
361 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? WRITE : READ,
364 rq->rq_disk = pd->bdev->bd_disk;
368 rq->data = cgc->buffer;
369 rq->data_len = cgc->buflen;
371 memset(sense, 0, sizeof(sense));
373 rq->cmd_type = REQ_TYPE_BLOCK_PC;
374 rq->cmd_flags |= REQ_HARDBARRIER;
376 rq->cmd_flags |= REQ_QUIET;
377 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
378 if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
379 memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
380 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
383 rq->end_io_data = &wait;
384 rq->end_io = blk_end_sync_rq;
385 elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
386 generic_unplug_device(q);
387 wait_for_completion(&wait);
397 * A generic sense dump / resolve mechanism should be implemented across
398 * all ATAPI + SCSI devices.
400 static void pkt_dump_sense(struct packet_command *cgc)
402 static char *info[9] = { "No sense", "Recovered error", "Not ready",
403 "Medium error", "Hardware error", "Illegal request",
404 "Unit attention", "Data protect", "Blank check" };
406 struct request_sense *sense = cgc->sense;
408 printk(DRIVER_NAME":");
409 for (i = 0; i < CDROM_PACKET_SIZE; i++)
410 printk(" %02x", cgc->cmd[i]);
414 printk("no sense\n");
418 printk("sense %02x.%02x.%02x", sense->sense_key, sense->asc, sense->ascq);
420 if (sense->sense_key > 8) {
421 printk(" (INVALID)\n");
425 printk(" (%s)\n", info[sense->sense_key]);
429 * flush the drive cache to media
431 static int pkt_flush_cache(struct pktcdvd_device *pd)
433 struct packet_command cgc;
435 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
436 cgc.cmd[0] = GPCMD_FLUSH_CACHE;
440 * the IMMED bit -- we default to not setting it, although that
441 * would allow a much faster close, this is safer
446 return pkt_generic_packet(pd, &cgc);
450 * speed is given as the normal factor, e.g. 4 for 4x
452 static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsigned read_speed)
454 struct packet_command cgc;
455 struct request_sense sense;
458 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
460 cgc.cmd[0] = GPCMD_SET_SPEED;
461 cgc.cmd[2] = (read_speed >> 8) & 0xff;
462 cgc.cmd[3] = read_speed & 0xff;
463 cgc.cmd[4] = (write_speed >> 8) & 0xff;
464 cgc.cmd[5] = write_speed & 0xff;
466 if ((ret = pkt_generic_packet(pd, &cgc)))
467 pkt_dump_sense(&cgc);
473 * Queue a bio for processing by the low-level CD device. Must be called
474 * from process context.
476 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
478 spin_lock(&pd->iosched.lock);
479 if (bio_data_dir(bio) == READ) {
480 pkt_add_list_last(bio, &pd->iosched.read_queue,
481 &pd->iosched.read_queue_tail);
483 pkt_add_list_last(bio, &pd->iosched.write_queue,
484 &pd->iosched.write_queue_tail);
486 spin_unlock(&pd->iosched.lock);
488 atomic_set(&pd->iosched.attention, 1);
489 wake_up(&pd->wqueue);
493 * Process the queued read/write requests. This function handles special
494 * requirements for CDRW drives:
495 * - A cache flush command must be inserted before a read request if the
496 * previous request was a write.
497 * - Switching between reading and writing is slow, so don't do it more often
499 * - Optimize for throughput at the expense of latency. This means that streaming
500 * writes will never be interrupted by a read, but if the drive has to seek
501 * before the next write, switch to reading instead if there are any pending
503 * - Set the read speed according to current usage pattern. When only reading
504 * from the device, it's best to use the highest possible read speed, but
505 * when switching often between reading and writing, it's better to have the
506 * same read and write speeds.
508 static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
511 if (atomic_read(&pd->iosched.attention) == 0)
513 atomic_set(&pd->iosched.attention, 0);
517 int reads_queued, writes_queued;
519 spin_lock(&pd->iosched.lock);
520 reads_queued = (pd->iosched.read_queue != NULL);
521 writes_queued = (pd->iosched.write_queue != NULL);
522 spin_unlock(&pd->iosched.lock);
524 if (!reads_queued && !writes_queued)
527 if (pd->iosched.writing) {
528 int need_write_seek = 1;
529 spin_lock(&pd->iosched.lock);
530 bio = pd->iosched.write_queue;
531 spin_unlock(&pd->iosched.lock);
532 if (bio && (bio->bi_sector == pd->iosched.last_write))
534 if (need_write_seek && reads_queued) {
535 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
536 VPRINTK(DRIVER_NAME": write, waiting\n");
540 pd->iosched.writing = 0;
543 if (!reads_queued && writes_queued) {
544 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
545 VPRINTK(DRIVER_NAME": read, waiting\n");
548 pd->iosched.writing = 1;
552 spin_lock(&pd->iosched.lock);
553 if (pd->iosched.writing) {
554 bio = pkt_get_list_first(&pd->iosched.write_queue,
555 &pd->iosched.write_queue_tail);
557 bio = pkt_get_list_first(&pd->iosched.read_queue,
558 &pd->iosched.read_queue_tail);
560 spin_unlock(&pd->iosched.lock);
565 if (bio_data_dir(bio) == READ)
566 pd->iosched.successive_reads += bio->bi_size >> 10;
568 pd->iosched.successive_reads = 0;
569 pd->iosched.last_write = bio->bi_sector + bio_sectors(bio);
571 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
572 if (pd->read_speed == pd->write_speed) {
573 pd->read_speed = MAX_SPEED;
574 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
577 if (pd->read_speed != pd->write_speed) {
578 pd->read_speed = pd->write_speed;
579 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
583 atomic_inc(&pd->cdrw.pending_bios);
584 generic_make_request(bio);
589 * Special care is needed if the underlying block device has a small
590 * max_phys_segments value.
592 static int pkt_set_segment_merging(struct pktcdvd_device *pd, request_queue_t *q)
594 if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
596 * The cdrom device can handle one segment/frame
598 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
600 } else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) {
602 * We can handle this case at the expense of some extra memory
603 * copies during write operations
605 set_bit(PACKET_MERGE_SEGS, &pd->flags);
608 printk(DRIVER_NAME": cdrom max_phys_segments too small\n");
614 * Copy CD_FRAMESIZE bytes from src_bio into a destination page
616 static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct page *dst_page, int dst_offs)
618 unsigned int copy_size = CD_FRAMESIZE;
620 while (copy_size > 0) {
621 struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
622 void *vfrom = kmap_atomic(src_bvl->bv_page, KM_USER0) +
623 src_bvl->bv_offset + offs;
624 void *vto = page_address(dst_page) + dst_offs;
625 int len = min_t(int, copy_size, src_bvl->bv_len - offs);
628 memcpy(vto, vfrom, len);
629 kunmap_atomic(vfrom, KM_USER0);
639 * Copy all data for this packet to pkt->pages[], so that
640 * a) The number of required segments for the write bio is minimized, which
641 * is necessary for some scsi controllers.
642 * b) The data can be used as cache to avoid read requests if we receive a
643 * new write request for the same zone.
645 static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
649 /* Copy all data to pkt->pages[] */
652 for (f = 0; f < pkt->frames; f++) {
653 if (bvec[f].bv_page != pkt->pages[p]) {
654 void *vfrom = kmap_atomic(bvec[f].bv_page, KM_USER0) + bvec[f].bv_offset;
655 void *vto = page_address(pkt->pages[p]) + offs;
656 memcpy(vto, vfrom, CD_FRAMESIZE);
657 kunmap_atomic(vfrom, KM_USER0);
658 bvec[f].bv_page = pkt->pages[p];
659 bvec[f].bv_offset = offs;
661 BUG_ON(bvec[f].bv_offset != offs);
663 offs += CD_FRAMESIZE;
664 if (offs >= PAGE_SIZE) {
671 static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
673 struct packet_data *pkt = bio->bi_private;
674 struct pktcdvd_device *pd = pkt->pd;
680 VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
681 (unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
684 atomic_inc(&pkt->io_errors);
685 if (atomic_dec_and_test(&pkt->io_wait)) {
686 atomic_inc(&pkt->run_sm);
687 wake_up(&pd->wqueue);
689 pkt_bio_finished(pd);
694 static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int err)
696 struct packet_data *pkt = bio->bi_private;
697 struct pktcdvd_device *pd = pkt->pd;
703 VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
705 pd->stats.pkt_ended++;
707 pkt_bio_finished(pd);
708 atomic_dec(&pkt->io_wait);
709 atomic_inc(&pkt->run_sm);
710 wake_up(&pd->wqueue);
715 * Schedule reads for the holes in a packet
717 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
722 char written[PACKET_MAX_SIZE];
724 BUG_ON(!pkt->orig_bios);
726 atomic_set(&pkt->io_wait, 0);
727 atomic_set(&pkt->io_errors, 0);
730 * Figure out which frames we need to read before we can write.
732 memset(written, 0, sizeof(written));
733 spin_lock(&pkt->lock);
734 for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {
735 int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
736 int num_frames = bio->bi_size / CD_FRAMESIZE;
737 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
738 BUG_ON(first_frame < 0);
739 BUG_ON(first_frame + num_frames > pkt->frames);
740 for (f = first_frame; f < first_frame + num_frames; f++)
743 spin_unlock(&pkt->lock);
745 if (pkt->cache_valid) {
746 VPRINTK("pkt_gather_data: zone %llx cached\n",
747 (unsigned long long)pkt->sector);
752 * Schedule reads for missing parts of the packet.
754 for (f = 0; f < pkt->frames; f++) {
758 bio = pkt->r_bios[f];
760 bio->bi_max_vecs = 1;
761 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
762 bio->bi_bdev = pd->bdev;
763 bio->bi_end_io = pkt_end_io_read;
764 bio->bi_private = pkt;
766 p = (f * CD_FRAMESIZE) / PAGE_SIZE;
767 offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
768 VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n",
769 f, pkt->pages[p], offset);
770 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
773 atomic_inc(&pkt->io_wait);
775 pkt_queue_bio(pd, bio);
780 VPRINTK("pkt_gather_data: need %d frames for zone %llx\n",
781 frames_read, (unsigned long long)pkt->sector);
782 pd->stats.pkt_started++;
783 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
787 * Find a packet matching zone, or the least recently used packet if
790 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
792 struct packet_data *pkt;
794 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
795 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
796 list_del_init(&pkt->list);
797 if (pkt->sector != zone)
798 pkt->cache_valid = 0;
806 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
808 if (pkt->cache_valid) {
809 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
811 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
816 * recover a failed write, query for relocation if possible
818 * returns 1 if recovery is possible, or 0 if not
821 static int pkt_start_recovery(struct packet_data *pkt)
824 * FIXME. We need help from the file system to implement
829 struct request *rq = pkt->rq;
830 struct pktcdvd_device *pd = rq->rq_disk->private_data;
831 struct block_device *pkt_bdev;
832 struct super_block *sb = NULL;
833 unsigned long old_block, new_block;
836 pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev));
838 sb = get_super(pkt_bdev);
845 if (!sb->s_op || !sb->s_op->relocate_blocks)
848 old_block = pkt->sector / (CD_FRAMESIZE >> 9);
849 if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
852 new_sector = new_block * (CD_FRAMESIZE >> 9);
853 pkt->sector = new_sector;
855 pkt->bio->bi_sector = new_sector;
856 pkt->bio->bi_next = NULL;
857 pkt->bio->bi_flags = 1 << BIO_UPTODATE;
858 pkt->bio->bi_idx = 0;
860 BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW));
861 BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
862 BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
863 BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
864 BUG_ON(pkt->bio->bi_private != pkt);
875 static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
878 static const char *state_name[] = {
879 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
881 enum packet_data_state old_state = pkt->state;
882 VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector,
883 state_name[old_state], state_name[state]);
889 * Scan the work queue to see if we can start a new packet.
890 * returns non-zero if any work was done.
892 static int pkt_handle_queue(struct pktcdvd_device *pd)
894 struct packet_data *pkt, *p;
895 struct bio *bio = NULL;
896 sector_t zone = 0; /* Suppress gcc warning */
897 struct pkt_rb_node *node, *first_node;
901 VPRINTK("handle_queue\n");
903 atomic_set(&pd->scan_queue, 0);
905 if (list_empty(&pd->cdrw.pkt_free_list)) {
906 VPRINTK("handle_queue: no pkt\n");
911 * Try to find a zone we are not already working on.
913 spin_lock(&pd->lock);
914 first_node = pkt_rbtree_find(pd, pd->current_sector);
916 n = rb_first(&pd->bio_queue);
918 first_node = rb_entry(n, struct pkt_rb_node, rb_node);
923 zone = ZONE(bio->bi_sector, pd);
924 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
925 if (p->sector == zone) {
932 node = pkt_rbtree_next(node);
934 n = rb_first(&pd->bio_queue);
936 node = rb_entry(n, struct pkt_rb_node, rb_node);
938 if (node == first_node)
941 spin_unlock(&pd->lock);
943 VPRINTK("handle_queue: no bio\n");
947 pkt = pkt_get_packet_data(pd, zone);
949 pd->current_sector = zone + pd->settings.size;
951 BUG_ON(pkt->frames != pd->settings.size >> 2);
955 * Scan work queue for bios in the same zone and link them
958 spin_lock(&pd->lock);
959 VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone);
960 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
962 VPRINTK("pkt_handle_queue: found zone=%llx\n",
963 (unsigned long long)ZONE(bio->bi_sector, pd));
964 if (ZONE(bio->bi_sector, pd) != zone)
966 pkt_rbtree_erase(pd, node);
967 spin_lock(&pkt->lock);
968 pkt_add_list_last(bio, &pkt->orig_bios, &pkt->orig_bios_tail);
969 pkt->write_size += bio->bi_size / CD_FRAMESIZE;
970 spin_unlock(&pkt->lock);
972 /* check write congestion marks, and if bio_queue_size is
973 below, wake up any waiters */
974 wakeup = (pd->write_congestion_on > 0
975 && pd->bio_queue_size <= pd->write_congestion_off);
976 spin_unlock(&pd->lock);
978 blk_clear_queue_congested(pd->disk->queue, WRITE);
980 pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
981 pkt_set_state(pkt, PACKET_WAITING_STATE);
982 atomic_set(&pkt->run_sm, 1);
984 spin_lock(&pd->cdrw.active_list_lock);
985 list_add(&pkt->list, &pd->cdrw.pkt_active_list);
986 spin_unlock(&pd->cdrw.active_list_lock);
992 * Assemble a bio to write one packet and queue the bio for processing
993 * by the underlying block device.
995 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1000 struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
1002 for (f = 0; f < pkt->frames; f++) {
1003 bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
1004 bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1008 * Fill-in bvec with data from orig_bios.
1011 spin_lock(&pkt->lock);
1012 for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {
1013 int segment = bio->bi_idx;
1015 int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
1016 int num_frames = bio->bi_size / CD_FRAMESIZE;
1017 BUG_ON(first_frame < 0);
1018 BUG_ON(first_frame + num_frames > pkt->frames);
1019 for (f = first_frame; f < first_frame + num_frames; f++) {
1020 struct bio_vec *src_bvl = bio_iovec_idx(bio, segment);
1022 while (src_offs >= src_bvl->bv_len) {
1023 src_offs -= src_bvl->bv_len;
1025 BUG_ON(segment >= bio->bi_vcnt);
1026 src_bvl = bio_iovec_idx(bio, segment);
1029 if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) {
1030 bvec[f].bv_page = src_bvl->bv_page;
1031 bvec[f].bv_offset = src_bvl->bv_offset + src_offs;
1033 pkt_copy_bio_data(bio, segment, src_offs,
1034 bvec[f].bv_page, bvec[f].bv_offset);
1036 src_offs += CD_FRAMESIZE;
1040 pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
1041 spin_unlock(&pkt->lock);
1043 VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
1044 frames_write, (unsigned long long)pkt->sector);
1045 BUG_ON(frames_write != pkt->write_size);
1047 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
1048 pkt_make_local_copy(pkt, bvec);
1049 pkt->cache_valid = 1;
1051 pkt->cache_valid = 0;
1054 /* Start the write request */
1055 bio_init(pkt->w_bio);
1056 pkt->w_bio->bi_max_vecs = PACKET_MAX_SIZE;
1057 pkt->w_bio->bi_sector = pkt->sector;
1058 pkt->w_bio->bi_bdev = pd->bdev;
1059 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1060 pkt->w_bio->bi_private = pkt;
1061 for (f = 0; f < pkt->frames; f++)
1062 if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
1064 VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
1066 atomic_set(&pkt->io_wait, 1);
1067 pkt->w_bio->bi_rw = WRITE;
1068 pkt_queue_bio(pd, pkt->w_bio);
1071 static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
1073 struct bio *bio, *next;
1076 pkt->cache_valid = 0;
1078 /* Finish all bios corresponding to this packet */
1079 bio = pkt->orig_bios;
1081 next = bio->bi_next;
1082 bio->bi_next = NULL;
1083 bio_endio(bio, bio->bi_size, uptodate ? 0 : -EIO);
1086 pkt->orig_bios = pkt->orig_bios_tail = NULL;
1089 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1093 VPRINTK("run_state_machine: pkt %d\n", pkt->id);
1096 switch (pkt->state) {
1097 case PACKET_WAITING_STATE:
1098 if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
1101 pkt->sleep_time = 0;
1102 pkt_gather_data(pd, pkt);
1103 pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
1106 case PACKET_READ_WAIT_STATE:
1107 if (atomic_read(&pkt->io_wait) > 0)
1110 if (atomic_read(&pkt->io_errors) > 0) {
1111 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1113 pkt_start_write(pd, pkt);
1117 case PACKET_WRITE_WAIT_STATE:
1118 if (atomic_read(&pkt->io_wait) > 0)
1121 if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) {
1122 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1124 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1128 case PACKET_RECOVERY_STATE:
1129 if (pkt_start_recovery(pkt)) {
1130 pkt_start_write(pd, pkt);
1132 VPRINTK("No recovery possible\n");
1133 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1137 case PACKET_FINISHED_STATE:
1138 uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags);
1139 pkt_finish_packet(pkt, uptodate);
1149 static void pkt_handle_packets(struct pktcdvd_device *pd)
1151 struct packet_data *pkt, *next;
1153 VPRINTK("pkt_handle_packets\n");
1156 * Run state machine for active packets
1158 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1159 if (atomic_read(&pkt->run_sm) > 0) {
1160 atomic_set(&pkt->run_sm, 0);
1161 pkt_run_state_machine(pd, pkt);
1166 * Move no longer active packets to the free list
1168 spin_lock(&pd->cdrw.active_list_lock);
1169 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1170 if (pkt->state == PACKET_FINISHED_STATE) {
1171 list_del(&pkt->list);
1172 pkt_put_packet_data(pd, pkt);
1173 pkt_set_state(pkt, PACKET_IDLE_STATE);
1174 atomic_set(&pd->scan_queue, 1);
1177 spin_unlock(&pd->cdrw.active_list_lock);
1180 static void pkt_count_states(struct pktcdvd_device *pd, int *states)
1182 struct packet_data *pkt;
1185 for (i = 0; i < PACKET_NUM_STATES; i++)
1188 spin_lock(&pd->cdrw.active_list_lock);
1189 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1190 states[pkt->state]++;
1192 spin_unlock(&pd->cdrw.active_list_lock);
1196 * kcdrwd is woken up when writes have been queued for one of our
1197 * registered devices
1199 static int kcdrwd(void *foobar)
1201 struct pktcdvd_device *pd = foobar;
1202 struct packet_data *pkt;
1203 long min_sleep_time, residue;
1205 set_user_nice(current, -20);
1208 DECLARE_WAITQUEUE(wait, current);
1211 * Wait until there is something to do
1213 add_wait_queue(&pd->wqueue, &wait);
1215 set_current_state(TASK_INTERRUPTIBLE);
1217 /* Check if we need to run pkt_handle_queue */
1218 if (atomic_read(&pd->scan_queue) > 0)
1221 /* Check if we need to run the state machine for some packet */
1222 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1223 if (atomic_read(&pkt->run_sm) > 0)
1227 /* Check if we need to process the iosched queues */
1228 if (atomic_read(&pd->iosched.attention) != 0)
1231 /* Otherwise, go to sleep */
1232 if (PACKET_DEBUG > 1) {
1233 int states[PACKET_NUM_STATES];
1234 pkt_count_states(pd, states);
1235 VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1236 states[0], states[1], states[2], states[3],
1237 states[4], states[5]);
1240 min_sleep_time = MAX_SCHEDULE_TIMEOUT;
1241 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1242 if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
1243 min_sleep_time = pkt->sleep_time;
1246 generic_unplug_device(bdev_get_queue(pd->bdev));
1248 VPRINTK("kcdrwd: sleeping\n");
1249 residue = schedule_timeout(min_sleep_time);
1250 VPRINTK("kcdrwd: wake up\n");
1252 /* make swsusp happy with our thread */
1255 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1256 if (!pkt->sleep_time)
1258 pkt->sleep_time -= min_sleep_time - residue;
1259 if (pkt->sleep_time <= 0) {
1260 pkt->sleep_time = 0;
1261 atomic_inc(&pkt->run_sm);
1265 if (signal_pending(current)) {
1266 flush_signals(current);
1268 if (kthread_should_stop())
1272 set_current_state(TASK_RUNNING);
1273 remove_wait_queue(&pd->wqueue, &wait);
1275 if (kthread_should_stop())
1279 * if pkt_handle_queue returns true, we can queue
1282 while (pkt_handle_queue(pd))
1286 * Handle packet state machine
1288 pkt_handle_packets(pd);
1291 * Handle iosched queues
1293 pkt_iosched_process_queue(pd);
1299 static void pkt_print_settings(struct pktcdvd_device *pd)
1301 printk(DRIVER_NAME": %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
1302 printk("%u blocks, ", pd->settings.size >> 2);
1303 printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
1306 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
1308 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1310 cgc->cmd[0] = GPCMD_MODE_SENSE_10;
1311 cgc->cmd[2] = page_code | (page_control << 6);
1312 cgc->cmd[7] = cgc->buflen >> 8;
1313 cgc->cmd[8] = cgc->buflen & 0xff;
1314 cgc->data_direction = CGC_DATA_READ;
1315 return pkt_generic_packet(pd, cgc);
1318 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1320 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1321 memset(cgc->buffer, 0, 2);
1322 cgc->cmd[0] = GPCMD_MODE_SELECT_10;
1323 cgc->cmd[1] = 0x10; /* PF */
1324 cgc->cmd[7] = cgc->buflen >> 8;
1325 cgc->cmd[8] = cgc->buflen & 0xff;
1326 cgc->data_direction = CGC_DATA_WRITE;
1327 return pkt_generic_packet(pd, cgc);
1330 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1332 struct packet_command cgc;
1335 /* set up command and get the disc info */
1336 init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
1337 cgc.cmd[0] = GPCMD_READ_DISC_INFO;
1338 cgc.cmd[8] = cgc.buflen = 2;
1341 if ((ret = pkt_generic_packet(pd, &cgc)))
1344 /* not all drives have the same disc_info length, so requeue
1345 * packet with the length the drive tells us it can supply
1347 cgc.buflen = be16_to_cpu(di->disc_information_length) +
1348 sizeof(di->disc_information_length);
1350 if (cgc.buflen > sizeof(disc_information))
1351 cgc.buflen = sizeof(disc_information);
1353 cgc.cmd[8] = cgc.buflen;
1354 return pkt_generic_packet(pd, &cgc);
1357 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1359 struct packet_command cgc;
1362 init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
1363 cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
1364 cgc.cmd[1] = type & 3;
1365 cgc.cmd[4] = (track & 0xff00) >> 8;
1366 cgc.cmd[5] = track & 0xff;
1370 if ((ret = pkt_generic_packet(pd, &cgc)))
1373 cgc.buflen = be16_to_cpu(ti->track_information_length) +
1374 sizeof(ti->track_information_length);
1376 if (cgc.buflen > sizeof(track_information))
1377 cgc.buflen = sizeof(track_information);
1379 cgc.cmd[8] = cgc.buflen;
1380 return pkt_generic_packet(pd, &cgc);
1383 static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written)
1385 disc_information di;
1386 track_information ti;
1390 if ((ret = pkt_get_disc_info(pd, &di)))
1393 last_track = (di.last_track_msb << 8) | di.last_track_lsb;
1394 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1397 /* if this track is blank, try the previous. */
1400 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1404 /* if last recorded field is valid, return it. */
1406 *last_written = be32_to_cpu(ti.last_rec_address);
1408 /* make it up instead */
1409 *last_written = be32_to_cpu(ti.track_start) +
1410 be32_to_cpu(ti.track_size);
1412 *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
1418 * write mode select package based on pd->settings
1420 static int pkt_set_write_settings(struct pktcdvd_device *pd)
1422 struct packet_command cgc;
1423 struct request_sense sense;
1424 write_param_page *wp;
1428 /* doesn't apply to DVD+RW or DVD-RAM */
1429 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
1432 memset(buffer, 0, sizeof(buffer));
1433 init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
1435 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1436 pkt_dump_sense(&cgc);
1440 size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1441 pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1442 if (size > sizeof(buffer))
1443 size = sizeof(buffer);
1448 init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
1450 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1451 pkt_dump_sense(&cgc);
1456 * write page is offset header + block descriptor length
1458 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1460 wp->fp = pd->settings.fp;
1461 wp->track_mode = pd->settings.track_mode;
1462 wp->write_type = pd->settings.write_type;
1463 wp->data_block_type = pd->settings.block_mode;
1465 wp->multi_session = 0;
1467 #ifdef PACKET_USE_LS
1472 if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1473 wp->session_format = 0;
1475 } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1476 wp->session_format = 0x20;
1480 memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1486 printk(DRIVER_NAME": write mode wrong %d\n", wp->data_block_type);
1489 wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1491 cgc.buflen = cgc.cmd[8] = size;
1492 if ((ret = pkt_mode_select(pd, &cgc))) {
1493 pkt_dump_sense(&cgc);
1497 pkt_print_settings(pd);
1502 * 1 -- we can write to this track, 0 -- we can't
1504 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
1506 switch (pd->mmc3_profile) {
1507 case 0x1a: /* DVD+RW */
1508 case 0x12: /* DVD-RAM */
1509 /* The track is always writable on DVD+RW/DVD-RAM */
1515 if (!ti->packet || !ti->fp)
1519 * "good" settings as per Mt Fuji.
1521 if (ti->rt == 0 && ti->blank == 0)
1524 if (ti->rt == 0 && ti->blank == 1)
1527 if (ti->rt == 1 && ti->blank == 0)
1530 printk(DRIVER_NAME": bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1535 * 1 -- we can write to this disc, 0 -- we can't
1537 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1539 switch (pd->mmc3_profile) {
1540 case 0x0a: /* CD-RW */
1541 case 0xffff: /* MMC3 not supported */
1543 case 0x1a: /* DVD+RW */
1544 case 0x13: /* DVD-RW */
1545 case 0x12: /* DVD-RAM */
1548 VPRINTK(DRIVER_NAME": Wrong disc profile (%x)\n", pd->mmc3_profile);
1553 * for disc type 0xff we should probably reserve a new track.
1554 * but i'm not sure, should we leave this to user apps? probably.
1556 if (di->disc_type == 0xff) {
1557 printk(DRIVER_NAME": Unknown disc. No track?\n");
1561 if (di->disc_type != 0x20 && di->disc_type != 0) {
1562 printk(DRIVER_NAME": Wrong disc type (%x)\n", di->disc_type);
1566 if (di->erasable == 0) {
1567 printk(DRIVER_NAME": Disc not erasable\n");
1571 if (di->border_status == PACKET_SESSION_RESERVED) {
1572 printk(DRIVER_NAME": Can't write to last track (reserved)\n");
1579 static int pkt_probe_settings(struct pktcdvd_device *pd)
1581 struct packet_command cgc;
1582 unsigned char buf[12];
1583 disc_information di;
1584 track_information ti;
1587 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1588 cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
1590 ret = pkt_generic_packet(pd, &cgc);
1591 pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
1593 memset(&di, 0, sizeof(disc_information));
1594 memset(&ti, 0, sizeof(track_information));
1596 if ((ret = pkt_get_disc_info(pd, &di))) {
1597 printk("failed get_disc\n");
1601 if (!pkt_writable_disc(pd, &di))
1604 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1606 track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1607 if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
1608 printk(DRIVER_NAME": failed get_track\n");
1612 if (!pkt_writable_track(pd, &ti)) {
1613 printk(DRIVER_NAME": can't write to this track\n");
1618 * we keep packet size in 512 byte units, makes it easier to
1619 * deal with request calculations.
1621 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1622 if (pd->settings.size == 0) {
1623 printk(DRIVER_NAME": detected zero packet size!\n");
1626 if (pd->settings.size > PACKET_MAX_SECTORS) {
1627 printk(DRIVER_NAME": packet size is too big\n");
1630 pd->settings.fp = ti.fp;
1631 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1634 pd->nwa = be32_to_cpu(ti.next_writable);
1635 set_bit(PACKET_NWA_VALID, &pd->flags);
1639 * in theory we could use lra on -RW media as well and just zero
1640 * blocks that haven't been written yet, but in practice that
1641 * is just a no-go. we'll use that for -R, naturally.
1644 pd->lra = be32_to_cpu(ti.last_rec_address);
1645 set_bit(PACKET_LRA_VALID, &pd->flags);
1647 pd->lra = 0xffffffff;
1648 set_bit(PACKET_LRA_VALID, &pd->flags);
1654 pd->settings.link_loss = 7;
1655 pd->settings.write_type = 0; /* packet */
1656 pd->settings.track_mode = ti.track_mode;
1659 * mode1 or mode2 disc
1661 switch (ti.data_mode) {
1663 pd->settings.block_mode = PACKET_BLOCK_MODE1;
1666 pd->settings.block_mode = PACKET_BLOCK_MODE2;
1669 printk(DRIVER_NAME": unknown data mode\n");
1676 * enable/disable write caching on drive
1678 static int pkt_write_caching(struct pktcdvd_device *pd, int set)
1680 struct packet_command cgc;
1681 struct request_sense sense;
1682 unsigned char buf[64];
1685 memset(buf, 0, sizeof(buf));
1686 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1688 cgc.buflen = pd->mode_offset + 12;
1691 * caching mode page might not be there, so quiet this command
1695 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
1698 buf[pd->mode_offset + 10] |= (!!set << 2);
1700 cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1701 ret = pkt_mode_select(pd, &cgc);
1703 printk(DRIVER_NAME": write caching control failed\n");
1704 pkt_dump_sense(&cgc);
1705 } else if (!ret && set)
1706 printk(DRIVER_NAME": enabled write caching on %s\n", pd->name);
1710 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
1712 struct packet_command cgc;
1714 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1715 cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
1716 cgc.cmd[4] = lockflag ? 1 : 0;
1717 return pkt_generic_packet(pd, &cgc);
1721 * Returns drive maximum write speed
1723 static int pkt_get_max_speed(struct pktcdvd_device *pd, unsigned *write_speed)
1725 struct packet_command cgc;
1726 struct request_sense sense;
1727 unsigned char buf[256+18];
1728 unsigned char *cap_buf;
1731 memset(buf, 0, sizeof(buf));
1732 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
1733 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
1736 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1738 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
1739 sizeof(struct mode_page_header);
1740 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1742 pkt_dump_sense(&cgc);
1747 offset = 20; /* Obsoleted field, used by older drives */
1748 if (cap_buf[1] >= 28)
1749 offset = 28; /* Current write speed selected */
1750 if (cap_buf[1] >= 30) {
1751 /* If the drive reports at least one "Logical Unit Write
1752 * Speed Performance Descriptor Block", use the information
1753 * in the first block. (contains the highest speed)
1755 int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
1760 *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
1764 /* These tables from cdrecord - I don't have orange book */
1765 /* standard speed CD-RW (1-4x) */
1766 static char clv_to_speed[16] = {
1767 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1768 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1770 /* high speed CD-RW (-10x) */
1771 static char hs_clv_to_speed[16] = {
1772 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1773 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1775 /* ultra high speed CD-RW */
1776 static char us_clv_to_speed[16] = {
1777 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1778 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
1782 * reads the maximum media speed from ATIP
1784 static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed)
1786 struct packet_command cgc;
1787 struct request_sense sense;
1788 unsigned char buf[64];
1789 unsigned int size, st, sp;
1792 init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
1794 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
1796 cgc.cmd[2] = 4; /* READ ATIP */
1798 ret = pkt_generic_packet(pd, &cgc);
1800 pkt_dump_sense(&cgc);
1803 size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
1804 if (size > sizeof(buf))
1807 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
1809 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
1813 ret = pkt_generic_packet(pd, &cgc);
1815 pkt_dump_sense(&cgc);
1819 if (!buf[6] & 0x40) {
1820 printk(DRIVER_NAME": Disc type is not CD-RW\n");
1823 if (!buf[6] & 0x4) {
1824 printk(DRIVER_NAME": A1 values on media are not valid, maybe not CDRW?\n");
1828 st = (buf[6] >> 3) & 0x7; /* disc sub-type */
1830 sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
1832 /* Info from cdrecord */
1834 case 0: /* standard speed */
1835 *speed = clv_to_speed[sp];
1837 case 1: /* high speed */
1838 *speed = hs_clv_to_speed[sp];
1840 case 2: /* ultra high speed */
1841 *speed = us_clv_to_speed[sp];
1844 printk(DRIVER_NAME": Unknown disc sub-type %d\n",st);
1848 printk(DRIVER_NAME": Max. media speed: %d\n",*speed);
1851 printk(DRIVER_NAME": Unknown speed %d for sub-type %d\n",sp,st);
1856 static int pkt_perform_opc(struct pktcdvd_device *pd)
1858 struct packet_command cgc;
1859 struct request_sense sense;
1862 VPRINTK(DRIVER_NAME": Performing OPC\n");
1864 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1866 cgc.timeout = 60*HZ;
1867 cgc.cmd[0] = GPCMD_SEND_OPC;
1869 if ((ret = pkt_generic_packet(pd, &cgc)))
1870 pkt_dump_sense(&cgc);
1874 static int pkt_open_write(struct pktcdvd_device *pd)
1877 unsigned int write_speed, media_write_speed, read_speed;
1879 if ((ret = pkt_probe_settings(pd))) {
1880 VPRINTK(DRIVER_NAME": %s failed probe\n", pd->name);
1884 if ((ret = pkt_set_write_settings(pd))) {
1885 DPRINTK(DRIVER_NAME": %s failed saving write settings\n", pd->name);
1889 pkt_write_caching(pd, USE_WCACHING);
1891 if ((ret = pkt_get_max_speed(pd, &write_speed)))
1892 write_speed = 16 * 177;
1893 switch (pd->mmc3_profile) {
1894 case 0x13: /* DVD-RW */
1895 case 0x1a: /* DVD+RW */
1896 case 0x12: /* DVD-RAM */
1897 DPRINTK(DRIVER_NAME": write speed %ukB/s\n", write_speed);
1900 if ((ret = pkt_media_speed(pd, &media_write_speed)))
1901 media_write_speed = 16;
1902 write_speed = min(write_speed, media_write_speed * 177);
1903 DPRINTK(DRIVER_NAME": write speed %ux\n", write_speed / 176);
1906 read_speed = write_speed;
1908 if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
1909 DPRINTK(DRIVER_NAME": %s couldn't set write speed\n", pd->name);
1912 pd->write_speed = write_speed;
1913 pd->read_speed = read_speed;
1915 if ((ret = pkt_perform_opc(pd))) {
1916 DPRINTK(DRIVER_NAME": %s Optimum Power Calibration failed\n", pd->name);
1923 * called at open time.
1925 static int pkt_open_dev(struct pktcdvd_device *pd, int write)
1932 * We need to re-open the cdrom device without O_NONBLOCK to be able
1933 * to read/write from/to it. It is already opened in O_NONBLOCK mode
1934 * so bdget() can't fail.
1936 bdget(pd->bdev->bd_dev);
1937 if ((ret = blkdev_get(pd->bdev, FMODE_READ, O_RDONLY)))
1940 if ((ret = bd_claim(pd->bdev, pd)))
1943 if ((ret = pkt_get_last_written(pd, &lba))) {
1944 printk(DRIVER_NAME": pkt_get_last_written failed\n");
1948 set_capacity(pd->disk, lba << 2);
1949 set_capacity(pd->bdev->bd_disk, lba << 2);
1950 bd_set_size(pd->bdev, (loff_t)lba << 11);
1952 q = bdev_get_queue(pd->bdev);
1954 if ((ret = pkt_open_write(pd)))
1957 * Some CDRW drives can not handle writes larger than one packet,
1958 * even if the size is a multiple of the packet size.
1960 spin_lock_irq(q->queue_lock);
1961 blk_queue_max_sectors(q, pd->settings.size);
1962 spin_unlock_irq(q->queue_lock);
1963 set_bit(PACKET_WRITABLE, &pd->flags);
1965 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
1966 clear_bit(PACKET_WRITABLE, &pd->flags);
1969 if ((ret = pkt_set_segment_merging(pd, q)))
1973 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
1974 printk(DRIVER_NAME": not enough memory for buffers\n");
1978 printk(DRIVER_NAME": %lukB available on disc\n", lba << 1);
1984 bd_release(pd->bdev);
1986 blkdev_put(pd->bdev);
1992 * called when the device is closed. makes sure that the device flushes
1993 * the internal cache before we close.
1995 static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
1997 if (flush && pkt_flush_cache(pd))
1998 DPRINTK(DRIVER_NAME": %s not flushing cache\n", pd->name);
2000 pkt_lock_door(pd, 0);
2002 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2003 bd_release(pd->bdev);
2004 blkdev_put(pd->bdev);
2006 pkt_shrink_pktlist(pd);
2009 static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
2011 if (dev_minor >= MAX_WRITERS)
2013 return pkt_devs[dev_minor];
2016 static int pkt_open(struct inode *inode, struct file *file)
2018 struct pktcdvd_device *pd = NULL;
2021 VPRINTK(DRIVER_NAME": entering open\n");
2023 mutex_lock(&ctl_mutex);
2024 pd = pkt_find_dev_from_minor(iminor(inode));
2029 BUG_ON(pd->refcnt < 0);
2032 if (pd->refcnt > 1) {
2033 if ((file->f_mode & FMODE_WRITE) &&
2034 !test_bit(PACKET_WRITABLE, &pd->flags)) {
2039 ret = pkt_open_dev(pd, file->f_mode & FMODE_WRITE);
2043 * needed here as well, since ext2 (among others) may change
2044 * the blocksize at mount time
2046 set_blocksize(inode->i_bdev, CD_FRAMESIZE);
2049 mutex_unlock(&ctl_mutex);
2055 VPRINTK(DRIVER_NAME": failed open (%d)\n", ret);
2056 mutex_unlock(&ctl_mutex);
2060 static int pkt_close(struct inode *inode, struct file *file)
2062 struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
2065 mutex_lock(&ctl_mutex);
2067 BUG_ON(pd->refcnt < 0);
2068 if (pd->refcnt == 0) {
2069 int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2070 pkt_release_dev(pd, flush);
2072 mutex_unlock(&ctl_mutex);
2077 static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err)
2079 struct packet_stacked_data *psd = bio->bi_private;
2080 struct pktcdvd_device *pd = psd->pd;
2086 bio_endio(psd->bio, psd->bio->bi_size, err);
2087 mempool_free(psd, psd_pool);
2088 pkt_bio_finished(pd);
2092 static int pkt_make_request(request_queue_t *q, struct bio *bio)
2094 struct pktcdvd_device *pd;
2095 char b[BDEVNAME_SIZE];
2097 struct packet_data *pkt;
2098 int was_empty, blocked_bio;
2099 struct pkt_rb_node *node;
2103 printk(DRIVER_NAME": %s incorrect request queue\n", bdevname(bio->bi_bdev, b));
2108 * Clone READ bios so we can have our own bi_end_io callback.
2110 if (bio_data_dir(bio) == READ) {
2111 struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
2112 struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
2116 cloned_bio->bi_bdev = pd->bdev;
2117 cloned_bio->bi_private = psd;
2118 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2119 pd->stats.secs_r += bio->bi_size >> 9;
2120 pkt_queue_bio(pd, cloned_bio);
2124 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2125 printk(DRIVER_NAME": WRITE for ro device %s (%llu)\n",
2126 pd->name, (unsigned long long)bio->bi_sector);
2130 if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
2131 printk(DRIVER_NAME": wrong bio size\n");
2135 blk_queue_bounce(q, &bio);
2137 zone = ZONE(bio->bi_sector, pd);
2138 VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
2139 (unsigned long long)bio->bi_sector,
2140 (unsigned long long)(bio->bi_sector + bio_sectors(bio)));
2142 /* Check if we have to split the bio */
2144 struct bio_pair *bp;
2148 last_zone = ZONE(bio->bi_sector + bio_sectors(bio) - 1, pd);
2149 if (last_zone != zone) {
2150 BUG_ON(last_zone != zone + pd->settings.size);
2151 first_sectors = last_zone - bio->bi_sector;
2152 bp = bio_split(bio, bio_split_pool, first_sectors);
2154 pkt_make_request(q, &bp->bio1);
2155 pkt_make_request(q, &bp->bio2);
2156 bio_pair_release(bp);
2162 * If we find a matching packet in state WAITING or READ_WAIT, we can
2163 * just append this bio to that packet.
2165 spin_lock(&pd->cdrw.active_list_lock);
2167 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
2168 if (pkt->sector == zone) {
2169 spin_lock(&pkt->lock);
2170 if ((pkt->state == PACKET_WAITING_STATE) ||
2171 (pkt->state == PACKET_READ_WAIT_STATE)) {
2172 pkt_add_list_last(bio, &pkt->orig_bios,
2173 &pkt->orig_bios_tail);
2174 pkt->write_size += bio->bi_size / CD_FRAMESIZE;
2175 if ((pkt->write_size >= pkt->frames) &&
2176 (pkt->state == PACKET_WAITING_STATE)) {
2177 atomic_inc(&pkt->run_sm);
2178 wake_up(&pd->wqueue);
2180 spin_unlock(&pkt->lock);
2181 spin_unlock(&pd->cdrw.active_list_lock);
2186 spin_unlock(&pkt->lock);
2189 spin_unlock(&pd->cdrw.active_list_lock);
2192 * Test if there is enough room left in the bio work queue
2193 * (queue size >= congestion on mark).
2194 * If not, wait till the work queue size is below the congestion off mark.
2196 spin_lock(&pd->lock);
2197 if (pd->write_congestion_on > 0
2198 && pd->bio_queue_size >= pd->write_congestion_on) {
2199 blk_set_queue_congested(q, WRITE);
2201 spin_unlock(&pd->lock);
2202 congestion_wait(WRITE, HZ);
2203 spin_lock(&pd->lock);
2204 } while(pd->bio_queue_size > pd->write_congestion_off);
2206 spin_unlock(&pd->lock);
2209 * No matching packet found. Store the bio in the work queue.
2211 node = mempool_alloc(pd->rb_pool, GFP_NOIO);
2213 spin_lock(&pd->lock);
2214 BUG_ON(pd->bio_queue_size < 0);
2215 was_empty = (pd->bio_queue_size == 0);
2216 pkt_rbtree_insert(pd, node);
2217 spin_unlock(&pd->lock);
2220 * Wake up the worker thread.
2222 atomic_set(&pd->scan_queue, 1);
2224 /* This wake_up is required for correct operation */
2225 wake_up(&pd->wqueue);
2226 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
2228 * This wake up is not required for correct operation,
2229 * but improves performance in some cases.
2231 wake_up(&pd->wqueue);
2235 bio_io_error(bio, bio->bi_size);
2241 static int pkt_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *bvec)
2243 struct pktcdvd_device *pd = q->queuedata;
2244 sector_t zone = ZONE(bio->bi_sector, pd);
2245 int used = ((bio->bi_sector - zone) << 9) + bio->bi_size;
2246 int remaining = (pd->settings.size << 9) - used;
2250 * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
2251 * boundary, pkt_make_request() will split the bio.
2253 remaining2 = PAGE_SIZE - bio->bi_size;
2254 remaining = max(remaining, remaining2);
2256 BUG_ON(remaining < 0);
2260 static void pkt_init_queue(struct pktcdvd_device *pd)
2262 request_queue_t *q = pd->disk->queue;
2264 blk_queue_make_request(q, pkt_make_request);
2265 blk_queue_hardsect_size(q, CD_FRAMESIZE);
2266 blk_queue_max_sectors(q, PACKET_MAX_SECTORS);
2267 blk_queue_merge_bvec(q, pkt_merge_bvec);
2271 static int pkt_seq_show(struct seq_file *m, void *p)
2273 struct pktcdvd_device *pd = m->private;
2275 char bdev_buf[BDEVNAME_SIZE];
2276 int states[PACKET_NUM_STATES];
2278 seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
2279 bdevname(pd->bdev, bdev_buf));
2281 seq_printf(m, "\nSettings:\n");
2282 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2284 if (pd->settings.write_type == 0)
2288 seq_printf(m, "\twrite type:\t\t%s\n", msg);
2290 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2291 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2293 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2295 if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2297 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2301 seq_printf(m, "\tblock mode:\t\t%s\n", msg);
2303 seq_printf(m, "\nStatistics:\n");
2304 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
2305 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
2306 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
2307 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
2308 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
2310 seq_printf(m, "\nMisc:\n");
2311 seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
2312 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
2313 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
2314 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
2315 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
2316 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
2318 seq_printf(m, "\nQueue state:\n");
2319 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
2320 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
2321 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
2323 pkt_count_states(pd, states);
2324 seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
2325 states[0], states[1], states[2], states[3], states[4], states[5]);
2327 seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
2328 pd->write_congestion_off,
2329 pd->write_congestion_on);
2333 static int pkt_seq_open(struct inode *inode, struct file *file)
2335 return single_open(file, pkt_seq_show, PDE(inode)->data);
2338 static struct file_operations pkt_proc_fops = {
2339 .open = pkt_seq_open,
2341 .llseek = seq_lseek,
2342 .release = single_release
2345 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2349 char b[BDEVNAME_SIZE];
2350 struct proc_dir_entry *proc;
2351 struct block_device *bdev;
2353 if (pd->pkt_dev == dev) {
2354 printk(DRIVER_NAME": Recursive setup not allowed\n");
2357 for (i = 0; i < MAX_WRITERS; i++) {
2358 struct pktcdvd_device *pd2 = pkt_devs[i];
2361 if (pd2->bdev->bd_dev == dev) {
2362 printk(DRIVER_NAME": %s already setup\n", bdevname(pd2->bdev, b));
2365 if (pd2->pkt_dev == dev) {
2366 printk(DRIVER_NAME": Can't chain pktcdvd devices\n");
2374 ret = blkdev_get(bdev, FMODE_READ, O_RDONLY | O_NONBLOCK);
2378 /* This is safe, since we have a reference from open(). */
2379 __module_get(THIS_MODULE);
2382 set_blocksize(bdev, CD_FRAMESIZE);
2386 atomic_set(&pd->cdrw.pending_bios, 0);
2387 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
2388 if (IS_ERR(pd->cdrw.thread)) {
2389 printk(DRIVER_NAME": can't start kernel thread\n");
2394 proc = create_proc_entry(pd->name, 0, pkt_proc);
2397 proc->proc_fops = &pkt_proc_fops;
2399 DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
2404 /* This is safe: open() is still holding a reference. */
2405 module_put(THIS_MODULE);
2409 static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
2411 struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
2413 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode));
2417 * forward selected CDROM ioctls to CD-ROM, for UDF
2419 case CDROMMULTISESSION:
2420 case CDROMREADTOCENTRY:
2421 case CDROM_LAST_WRITTEN:
2422 case CDROM_SEND_PACKET:
2423 case SCSI_IOCTL_SEND_COMMAND:
2424 return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
2428 * The door gets locked when the device is opened, so we
2429 * have to unlock it or else the eject command fails.
2431 if (pd->refcnt == 1)
2432 pkt_lock_door(pd, 0);
2433 return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
2436 VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
2443 static int pkt_media_changed(struct gendisk *disk)
2445 struct pktcdvd_device *pd = disk->private_data;
2446 struct gendisk *attached_disk;
2452 attached_disk = pd->bdev->bd_disk;
2455 return attached_disk->fops->media_changed(attached_disk);
2458 static struct block_device_operations pktcdvd_ops = {
2459 .owner = THIS_MODULE,
2461 .release = pkt_close,
2463 .media_changed = pkt_media_changed,
2467 * Set up mapping from pktcdvd device to CD-ROM device.
2469 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
2473 struct pktcdvd_device *pd;
2474 struct gendisk *disk;
2476 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2478 for (idx = 0; idx < MAX_WRITERS; idx++)
2481 if (idx == MAX_WRITERS) {
2482 printk(DRIVER_NAME": max %d writers supported\n", MAX_WRITERS);
2487 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
2491 pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE,
2492 sizeof(struct pkt_rb_node));
2496 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
2497 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
2498 spin_lock_init(&pd->cdrw.active_list_lock);
2500 spin_lock_init(&pd->lock);
2501 spin_lock_init(&pd->iosched.lock);
2502 sprintf(pd->name, DRIVER_NAME"%d", idx);
2503 init_waitqueue_head(&pd->wqueue);
2504 pd->bio_queue = RB_ROOT;
2506 pd->write_congestion_on = write_congestion_on;
2507 pd->write_congestion_off = write_congestion_off;
2509 disk = alloc_disk(1);
2513 disk->major = pktdev_major;
2514 disk->first_minor = idx;
2515 disk->fops = &pktcdvd_ops;
2516 disk->flags = GENHD_FL_REMOVABLE;
2517 strcpy(disk->disk_name, pd->name);
2518 disk->private_data = pd;
2519 disk->queue = blk_alloc_queue(GFP_KERNEL);
2523 pd->pkt_dev = MKDEV(disk->major, disk->first_minor);
2524 ret = pkt_new_dev(pd, dev);
2532 *pkt_dev = pd->pkt_dev;
2534 mutex_unlock(&ctl_mutex);
2538 blk_cleanup_queue(disk->queue);
2543 mempool_destroy(pd->rb_pool);
2546 mutex_unlock(&ctl_mutex);
2547 printk(DRIVER_NAME": setup of pktcdvd device failed\n");
2552 * Tear down mapping from pktcdvd device to CD-ROM device.
2554 static int pkt_remove_dev(dev_t pkt_dev)
2556 struct pktcdvd_device *pd;
2560 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2562 for (idx = 0; idx < MAX_WRITERS; idx++) {
2564 if (pd && (pd->pkt_dev == pkt_dev))
2567 if (idx == MAX_WRITERS) {
2568 DPRINTK(DRIVER_NAME": dev not setup\n");
2573 if (pd->refcnt > 0) {
2577 if (!IS_ERR(pd->cdrw.thread))
2578 kthread_stop(pd->cdrw.thread);
2580 blkdev_put(pd->bdev);
2582 remove_proc_entry(pd->name, pkt_proc);
2583 DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);
2585 del_gendisk(pd->disk);
2586 blk_cleanup_queue(pd->disk->queue);
2589 pkt_devs[idx] = NULL;
2590 mempool_destroy(pd->rb_pool);
2593 /* This is safe: open() is still holding a reference. */
2594 module_put(THIS_MODULE);
2597 mutex_unlock(&ctl_mutex);
2601 static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
2603 struct pktcdvd_device *pd;
2605 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2607 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
2609 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
2610 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
2613 ctrl_cmd->pkt_dev = 0;
2615 ctrl_cmd->num_devices = MAX_WRITERS;
2617 mutex_unlock(&ctl_mutex);
2620 static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
2622 void __user *argp = (void __user *)arg;
2623 struct pkt_ctrl_command ctrl_cmd;
2627 if (cmd != PACKET_CTRL_CMD)
2630 if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
2633 switch (ctrl_cmd.command) {
2634 case PKT_CTRL_CMD_SETUP:
2635 if (!capable(CAP_SYS_ADMIN))
2637 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
2638 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
2640 case PKT_CTRL_CMD_TEARDOWN:
2641 if (!capable(CAP_SYS_ADMIN))
2643 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
2645 case PKT_CTRL_CMD_STATUS:
2646 pkt_get_status(&ctrl_cmd);
2652 if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
2658 static struct file_operations pkt_ctl_fops = {
2659 .ioctl = pkt_ctl_ioctl,
2660 .owner = THIS_MODULE,
2663 static struct miscdevice pkt_misc = {
2664 .minor = MISC_DYNAMIC_MINOR,
2665 .name = DRIVER_NAME,
2666 .fops = &pkt_ctl_fops
2669 static int __init pkt_init(void)
2673 psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE,
2674 sizeof(struct packet_stacked_data));
2678 ret = register_blkdev(pktdev_major, DRIVER_NAME);
2680 printk(DRIVER_NAME": Unable to register block device\n");
2686 ret = misc_register(&pkt_misc);
2688 printk(DRIVER_NAME": Unable to register misc device\n");
2692 mutex_init(&ctl_mutex);
2694 pkt_proc = proc_mkdir(DRIVER_NAME, proc_root_driver);
2699 unregister_blkdev(pktdev_major, DRIVER_NAME);
2701 mempool_destroy(psd_pool);
2705 static void __exit pkt_exit(void)
2707 remove_proc_entry(DRIVER_NAME, proc_root_driver);
2708 misc_deregister(&pkt_misc);
2709 unregister_blkdev(pktdev_major, DRIVER_NAME);
2710 mempool_destroy(psd_pool);
2713 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
2714 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
2715 MODULE_LICENSE("GPL");
2717 module_init(pkt_init);
2718 module_exit(pkt_exit);