3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4 * to allow user process control of SCSI devices.
5 * Development Sponsored by Killy Corp. NY NY
7 * Original driver (sg.c):
8 * Copyright (C) 1992 Lawrence Foard
9 * Version 2 and 3 extensions to driver:
10 * Copyright (C) 1998 - 2005 Douglas Gilbert
12 * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
21 static int sg_version_num = 30534; /* 2 digits for each component */
22 #define SG_VERSION_STR "3.5.34"
25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
26 * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
27 * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
28 * (otherwise the macros compile to empty statements).
31 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/mtio.h>
40 #include <linux/ioctl.h>
41 #include <linux/slab.h>
42 #include <linux/fcntl.h>
43 #include <linux/init.h>
44 #include <linux/poll.h>
45 #include <linux/moduleparam.h>
46 #include <linux/cdev.h>
47 #include <linux/idr.h>
48 #include <linux/seq_file.h>
49 #include <linux/blkdev.h>
50 #include <linux/delay.h>
51 #include <linux/blktrace_api.h>
52 #include <linux/mutex.h>
53 #include <linux/ratelimit.h>
56 #include <scsi/scsi_dbg.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsi_driver.h>
59 #include <scsi/scsi_ioctl.h>
62 #include "scsi_logging.h"
64 #ifdef CONFIG_SCSI_PROC_FS
65 #include <linux/proc_fs.h>
66 static char *sg_version_date = "20061027";
68 static int sg_proc_init(void);
69 static void sg_proc_cleanup(void);
72 #define SG_ALLOW_DIO_DEF 0
74 #define SG_MAX_DEVS 32768
77 * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
78 * Then when using 32 bit integers x * m may overflow during the calculation.
79 * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
80 * calculates the same, but prevents the overflow when both m and d
81 * are "small" numbers (like HZ and USER_HZ).
82 * Of course an overflow is inavoidable if the result of muldiv doesn't fit
85 #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
87 #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
89 int sg_big_buff = SG_DEF_RESERVED_SIZE;
90 /* N.B. This variable is readable and writeable via
91 /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
92 of this size (or less if there is not enough memory) will be reserved
93 for use by this file descriptor. [Deprecated usage: this variable is also
94 readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
95 the kernel (i.e. it is not a module).] */
96 static int def_reserved_size = -1; /* picks up init parameter */
97 static int sg_allow_dio = SG_ALLOW_DIO_DEF;
99 static int scatter_elem_sz = SG_SCATTER_SZ;
100 static int scatter_elem_sz_prev = SG_SCATTER_SZ;
102 #define SG_SECTOR_SZ 512
104 static int sg_add(struct device *, struct class_interface *);
105 static void sg_remove(struct device *, struct class_interface *);
107 static DEFINE_MUTEX(sg_mutex);
109 static DEFINE_IDR(sg_index_idr);
110 static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
111 file descriptor list for device */
113 static struct class_interface sg_interface = {
115 .remove_dev = sg_remove,
118 typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
119 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
120 unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
121 unsigned bufflen; /* Size of (aggregate) data buffer */
124 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
125 unsigned char cmd_opcode; /* first byte of command */
128 struct sg_device; /* forward declarations */
131 typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
132 struct sg_request *nextrp; /* NULL -> tail request (slist) */
133 struct sg_fd *parentfp; /* NULL -> not in use */
134 Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
135 sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
136 unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
137 char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
138 char orphan; /* 1 -> drop on sight, 0 -> normal */
139 char sg_io_owned; /* 1 -> packet belongs to SG_IO */
140 volatile char done; /* 0->before bh, 1->before read, 2->read */
143 struct execute_work ew;
146 typedef struct sg_fd { /* holds the state of a file descriptor */
147 struct list_head sfd_siblings;
148 struct sg_device *parentdp; /* owning device */
149 wait_queue_head_t read_wait; /* queue read until command done */
150 rwlock_t rq_list_lock; /* protect access to list in req_arr */
151 int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
152 int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
153 Sg_scatter_hold reserve; /* buffer held for this file descriptor */
154 unsigned save_scat_len; /* original length of trunc. scat. element */
155 Sg_request *headrp; /* head of request slist, NULL->empty */
156 struct fasync_struct *async_qp; /* used by asynchronous notification */
157 Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
158 char low_dma; /* as in parent but possibly overridden to 1 */
159 char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
160 volatile char closed; /* 1 -> fd closed but request(s) outstanding */
161 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
162 char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
163 char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
164 char mmap_called; /* 0 -> mmap() never called on this fd */
166 struct execute_work ew;
169 typedef struct sg_device { /* holds the state of each scsi generic device */
170 struct scsi_device *device;
171 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
172 int sg_tablesize; /* adapter's max scatter-gather table size */
173 u32 index; /* device index number */
174 struct list_head sfds;
175 volatile char detached; /* 0->attached, 1->detached pending removal */
176 volatile char exclude; /* opened for exclusive access */
177 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
178 struct gendisk *disk;
179 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
183 /* tasklet or soft irq callback */
184 static void sg_rq_end_io(struct request *rq, int uptodate);
185 static int sg_start_req(Sg_request *srp, unsigned char *cmd);
186 static int sg_finish_rem_req(Sg_request * srp);
187 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
188 static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
190 static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
191 const char __user *buf, size_t count, int blocking,
192 int read_only, int sg_io_owned, Sg_request **o_srp);
193 static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
194 unsigned char *cmnd, int timeout, int blocking);
195 static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
196 static void sg_remove_scat(Sg_scatter_hold * schp);
197 static void sg_build_reserve(Sg_fd * sfp, int req_size);
198 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
199 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
200 static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
201 static void sg_remove_sfp(struct kref *);
202 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
203 static Sg_request *sg_add_request(Sg_fd * sfp);
204 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
205 static int sg_res_in_use(Sg_fd * sfp);
206 static Sg_device *sg_get_dev(int dev);
207 static void sg_put_dev(Sg_device *sdp);
209 #define SZ_SG_HEADER sizeof(struct sg_header)
210 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
211 #define SZ_SG_IOVEC sizeof(sg_iovec_t)
212 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
214 static int sg_allow_access(struct file *filp, unsigned char *cmd)
216 struct sg_fd *sfp = filp->private_data;
218 if (sfp->parentdp->device->type == TYPE_SCANNER)
221 return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
225 sg_open(struct inode *inode, struct file *filp)
227 int dev = iminor(inode);
228 int flags = filp->f_flags;
229 struct request_queue *q;
235 mutex_lock(&sg_mutex);
236 nonseekable_open(inode, filp);
237 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
238 sdp = sg_get_dev(dev);
240 retval = PTR_ERR(sdp);
245 /* This driver's module count bumped by fops_get in <linux/fs.h> */
246 /* Prevent the device driver from vanishing while we sleep */
247 retval = scsi_device_get(sdp->device);
251 retval = scsi_autopm_get_device(sdp->device);
255 if (!((flags & O_NONBLOCK) ||
256 scsi_block_when_processing_errors(sdp->device))) {
258 /* we are in error recovery for this device */
262 if (flags & O_EXCL) {
263 if (O_RDONLY == (flags & O_ACCMODE)) {
264 retval = -EPERM; /* Can't lock it with read only access */
267 if (!list_empty(&sdp->sfds) && (flags & O_NONBLOCK)) {
272 __wait_event_interruptible(sdp->o_excl_wait,
273 ((!list_empty(&sdp->sfds) || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
275 retval = res; /* -ERESTARTSYS because signal hit process */
278 } else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */
279 if (flags & O_NONBLOCK) {
284 __wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude),
287 retval = res; /* -ERESTARTSYS because signal hit process */
295 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
297 q = sdp->device->request_queue;
298 sdp->sg_tablesize = queue_max_segments(q);
300 if ((sfp = sg_add_sfp(sdp, dev)))
301 filp->private_data = sfp;
303 if (flags & O_EXCL) {
304 sdp->exclude = 0; /* undo if error */
305 wake_up_interruptible(&sdp->o_excl_wait);
313 scsi_autopm_put_device(sdp->device);
315 scsi_device_put(sdp->device);
320 mutex_unlock(&sg_mutex);
324 /* Following function was formerly called 'sg_close' */
326 sg_release(struct inode *inode, struct file *filp)
331 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
333 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
338 wake_up_interruptible(&sdp->o_excl_wait);
340 scsi_autopm_put_device(sdp->device);
341 kref_put(&sfp->f_ref, sg_remove_sfp);
346 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
351 int req_pack_id = -1;
353 struct sg_header *old_hdr = NULL;
356 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
358 SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
359 sdp->disk->disk_name, (int) count));
361 if (!access_ok(VERIFY_WRITE, buf, count))
363 if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
364 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
367 if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
371 if (old_hdr->reply_len < 0) {
372 if (count >= SZ_SG_IO_HDR) {
373 sg_io_hdr_t *new_hdr;
374 new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
379 retval =__copy_from_user
380 (new_hdr, buf, SZ_SG_IO_HDR);
381 req_pack_id = new_hdr->pack_id;
389 req_pack_id = old_hdr->pack_id;
391 srp = sg_get_rq_mark(sfp, req_pack_id);
392 if (!srp) { /* now wait on packet to arrive */
397 if (filp->f_flags & O_NONBLOCK) {
402 retval = 0; /* following macro beats race condition */
403 __wait_event_interruptible(sfp->read_wait,
405 (srp = sg_get_rq_mark(sfp, req_pack_id))),
414 /* -ERESTARTSYS as signal hit process */
418 if (srp->header.interface_id != '\0') {
419 retval = sg_new_read(sfp, buf, count, srp);
424 if (old_hdr == NULL) {
425 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
431 memset(old_hdr, 0, SZ_SG_HEADER);
432 old_hdr->reply_len = (int) hp->timeout;
433 old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
434 old_hdr->pack_id = hp->pack_id;
435 old_hdr->twelve_byte =
436 ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
437 old_hdr->target_status = hp->masked_status;
438 old_hdr->host_status = hp->host_status;
439 old_hdr->driver_status = hp->driver_status;
440 if ((CHECK_CONDITION & hp->masked_status) ||
441 (DRIVER_SENSE & hp->driver_status))
442 memcpy(old_hdr->sense_buffer, srp->sense_b,
443 sizeof (old_hdr->sense_buffer));
444 switch (hp->host_status) {
445 /* This setup of 'result' is for backward compatibility and is best
446 ignored by the user who should use target, host + driver status */
448 case DID_PASSTHROUGH:
455 old_hdr->result = EBUSY;
462 old_hdr->result = EIO;
465 old_hdr->result = (srp->sense_b[0] == 0 &&
466 hp->masked_status == GOOD) ? 0 : EIO;
469 old_hdr->result = EIO;
473 /* Now copy the result back to the user buffer. */
474 if (count >= SZ_SG_HEADER) {
475 if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
480 if (count > old_hdr->reply_len)
481 count = old_hdr->reply_len;
482 if (count > SZ_SG_HEADER) {
483 if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
489 count = (old_hdr->result == 0) ? 0 : -EIO;
490 sg_finish_rem_req(srp);
498 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
500 sg_io_hdr_t *hp = &srp->header;
504 if (count < SZ_SG_IO_HDR) {
509 if ((hp->mx_sb_len > 0) && hp->sbp) {
510 if ((CHECK_CONDITION & hp->masked_status) ||
511 (DRIVER_SENSE & hp->driver_status)) {
512 int sb_len = SCSI_SENSE_BUFFERSIZE;
513 sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
514 len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
515 len = (len > sb_len) ? sb_len : len;
516 if (copy_to_user(hp->sbp, srp->sense_b, len)) {
523 if (hp->masked_status || hp->host_status || hp->driver_status)
524 hp->info |= SG_INFO_CHECK;
525 if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
530 err2 = sg_finish_rem_req(srp);
531 return err ? : err2 ? : count;
535 sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
537 int mxsize, cmd_size, k;
538 int input_size, blocking;
539 unsigned char opcode;
543 struct sg_header old_hdr;
545 unsigned char cmnd[MAX_COMMAND_SIZE];
547 if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
550 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
552 SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
553 sdp->disk->disk_name, (int) count));
556 if (!((filp->f_flags & O_NONBLOCK) ||
557 scsi_block_when_processing_errors(sdp->device)))
560 if (!access_ok(VERIFY_READ, buf, count))
561 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
562 if (count < SZ_SG_HEADER)
564 if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
566 blocking = !(filp->f_flags & O_NONBLOCK);
567 if (old_hdr.reply_len < 0)
568 return sg_new_write(sfp, filp, buf, count,
569 blocking, 0, 0, NULL);
570 if (count < (SZ_SG_HEADER + 6))
571 return -EIO; /* The minimum scsi command length is 6 bytes. */
573 if (!(srp = sg_add_request(sfp))) {
574 SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
578 __get_user(opcode, buf);
579 if (sfp->next_cmd_len > 0) {
580 if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
581 SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
582 sfp->next_cmd_len = 0;
583 sg_remove_request(sfp, srp);
586 cmd_size = sfp->next_cmd_len;
587 sfp->next_cmd_len = 0; /* reset so only this write() effected */
589 cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
590 if ((opcode >= 0xc0) && old_hdr.twelve_byte)
593 SCSI_LOG_TIMEOUT(4, printk(
594 "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
595 /* Determine buffer size. */
596 input_size = count - cmd_size;
597 mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
598 mxsize -= SZ_SG_HEADER;
599 input_size -= SZ_SG_HEADER;
600 if (input_size < 0) {
601 sg_remove_request(sfp, srp);
602 return -EIO; /* User did not pass enough bytes for this command. */
605 hp->interface_id = '\0'; /* indicator of old interface tunnelled */
606 hp->cmd_len = (unsigned char) cmd_size;
610 hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
611 SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
613 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
614 hp->dxfer_len = mxsize;
615 if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
616 (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
617 hp->dxferp = (char __user *)buf + cmd_size;
621 hp->timeout = old_hdr.reply_len; /* structure abuse ... */
622 hp->flags = input_size; /* structure abuse ... */
623 hp->pack_id = old_hdr.pack_id;
625 if (__copy_from_user(cmnd, buf, cmd_size))
628 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
629 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
630 * is a non-zero input_size, so emit a warning.
632 if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
633 static char cmd[TASK_COMM_LEN];
634 if (strcmp(current->comm, cmd)) {
635 printk_ratelimited(KERN_WARNING
636 "sg_write: data in/out %d/%d bytes "
637 "for SCSI command 0x%x-- guessing "
638 "data in;\n program %s not setting "
639 "count and/or reply_len properly\n",
640 old_hdr.reply_len - (int)SZ_SG_HEADER,
641 input_size, (unsigned int) cmnd[0],
643 strcpy(cmd, current->comm);
646 k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
647 return (k < 0) ? k : count;
651 sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
652 size_t count, int blocking, int read_only, int sg_io_owned,
658 unsigned char cmnd[MAX_COMMAND_SIZE];
660 unsigned long ul_timeout;
662 if (count < SZ_SG_IO_HDR)
664 if (!access_ok(VERIFY_READ, buf, count))
665 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
667 sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
668 if (!(srp = sg_add_request(sfp))) {
669 SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
672 srp->sg_io_owned = sg_io_owned;
674 if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
675 sg_remove_request(sfp, srp);
678 if (hp->interface_id != 'S') {
679 sg_remove_request(sfp, srp);
682 if (hp->flags & SG_FLAG_MMAP_IO) {
683 if (hp->dxfer_len > sfp->reserve.bufflen) {
684 sg_remove_request(sfp, srp);
685 return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
687 if (hp->flags & SG_FLAG_DIRECT_IO) {
688 sg_remove_request(sfp, srp);
689 return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
691 if (sg_res_in_use(sfp)) {
692 sg_remove_request(sfp, srp);
693 return -EBUSY; /* reserve buffer already being used */
696 ul_timeout = msecs_to_jiffies(srp->header.timeout);
697 timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
698 if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
699 sg_remove_request(sfp, srp);
702 if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
703 sg_remove_request(sfp, srp);
704 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
706 if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
707 sg_remove_request(sfp, srp);
710 if (read_only && sg_allow_access(file, cmnd)) {
711 sg_remove_request(sfp, srp);
714 k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
723 sg_common_write(Sg_fd * sfp, Sg_request * srp,
724 unsigned char *cmnd, int timeout, int blocking)
727 Sg_device *sdp = sfp->parentdp;
728 sg_io_hdr_t *hp = &srp->header;
730 srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
732 hp->masked_status = 0;
736 hp->driver_status = 0;
738 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
739 (int) cmnd[0], (int) hp->cmd_len));
741 k = sg_start_req(srp, cmnd);
743 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
744 sg_finish_rem_req(srp);
745 return k; /* probably out of space --> ENOMEM */
749 blk_end_request_all(srp->rq, -EIO);
753 sg_finish_rem_req(srp);
757 switch (hp->dxfer_direction) {
758 case SG_DXFER_TO_FROM_DEV:
759 case SG_DXFER_FROM_DEV:
760 data_dir = DMA_FROM_DEVICE;
762 case SG_DXFER_TO_DEV:
763 data_dir = DMA_TO_DEVICE;
765 case SG_DXFER_UNKNOWN:
766 data_dir = DMA_BIDIRECTIONAL;
772 hp->duration = jiffies_to_msecs(jiffies);
774 srp->rq->timeout = timeout;
775 kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
776 blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
777 srp->rq, 1, sg_rq_end_io);
782 sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
784 void __user *p = (void __user *)arg;
786 int result, val, read_only;
790 unsigned long iflags;
792 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
795 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
796 sdp->disk->disk_name, (int) cmd_in));
797 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
802 int blocking = 1; /* ignore O_NONBLOCK flag */
806 if (!scsi_block_when_processing_errors(sdp->device))
808 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
811 sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
812 blocking, read_only, 1, &srp);
816 result = 0; /* following macro to beat race condition */
817 __wait_event_interruptible(sfp->read_wait,
818 (srp->done || sdp->detached),
822 write_lock_irq(&sfp->rq_list_lock);
825 write_unlock_irq(&sfp->rq_list_lock);
829 write_unlock_irq(&sfp->rq_list_lock);
830 return result; /* -ERESTARTSYS because signal hit process */
832 result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
833 return (result < 0) ? result : 0;
836 result = get_user(val, ip);
841 if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
842 val = MULDIV (INT_MAX, USER_HZ, HZ);
843 sfp->timeout_user = val;
844 sfp->timeout = MULDIV (val, HZ, USER_HZ);
847 case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
848 /* strange ..., for backward compatibility */
849 return sfp->timeout_user;
850 case SG_SET_FORCE_LOW_DMA:
851 result = get_user(val, ip);
856 if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
857 val = (int) sfp->reserve.bufflen;
858 sg_remove_scat(&sfp->reserve);
859 sg_build_reserve(sfp, val);
864 sfp->low_dma = sdp->device->host->unchecked_isa_dma;
868 return put_user((int) sfp->low_dma, ip);
870 if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
873 sg_scsi_id_t __user *sg_idp = p;
877 __put_user((int) sdp->device->host->host_no,
879 __put_user((int) sdp->device->channel,
881 __put_user((int) sdp->device->id, &sg_idp->scsi_id);
882 __put_user((int) sdp->device->lun, &sg_idp->lun);
883 __put_user((int) sdp->device->type, &sg_idp->scsi_type);
884 __put_user((short) sdp->device->host->cmd_per_lun,
885 &sg_idp->h_cmd_per_lun);
886 __put_user((short) sdp->device->queue_depth,
887 &sg_idp->d_queue_depth);
888 __put_user(0, &sg_idp->unused[0]);
889 __put_user(0, &sg_idp->unused[1]);
892 case SG_SET_FORCE_PACK_ID:
893 result = get_user(val, ip);
896 sfp->force_packid = val ? 1 : 0;
899 if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
901 read_lock_irqsave(&sfp->rq_list_lock, iflags);
902 for (srp = sfp->headrp; srp; srp = srp->nextrp) {
903 if ((1 == srp->done) && (!srp->sg_io_owned)) {
904 read_unlock_irqrestore(&sfp->rq_list_lock,
906 __put_user(srp->header.pack_id, ip);
910 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
913 case SG_GET_NUM_WAITING:
914 read_lock_irqsave(&sfp->rq_list_lock, iflags);
915 for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
916 if ((1 == srp->done) && (!srp->sg_io_owned))
919 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
920 return put_user(val, ip);
921 case SG_GET_SG_TABLESIZE:
922 return put_user(sdp->sg_tablesize, ip);
923 case SG_SET_RESERVED_SIZE:
924 result = get_user(val, ip);
929 val = min_t(int, val,
930 queue_max_sectors(sdp->device->request_queue) * 512);
931 if (val != sfp->reserve.bufflen) {
932 if (sg_res_in_use(sfp) || sfp->mmap_called)
934 sg_remove_scat(&sfp->reserve);
935 sg_build_reserve(sfp, val);
938 case SG_GET_RESERVED_SIZE:
939 val = min_t(int, sfp->reserve.bufflen,
940 queue_max_sectors(sdp->device->request_queue) * 512);
941 return put_user(val, ip);
942 case SG_SET_COMMAND_Q:
943 result = get_user(val, ip);
946 sfp->cmd_q = val ? 1 : 0;
948 case SG_GET_COMMAND_Q:
949 return put_user((int) sfp->cmd_q, ip);
950 case SG_SET_KEEP_ORPHAN:
951 result = get_user(val, ip);
954 sfp->keep_orphan = val;
956 case SG_GET_KEEP_ORPHAN:
957 return put_user((int) sfp->keep_orphan, ip);
958 case SG_NEXT_CMD_LEN:
959 result = get_user(val, ip);
962 sfp->next_cmd_len = (val > 0) ? val : 0;
964 case SG_GET_VERSION_NUM:
965 return put_user(sg_version_num, ip);
966 case SG_GET_ACCESS_COUNT:
967 /* faked - we don't have a real access count anymore */
968 val = (sdp->device ? 1 : 0);
969 return put_user(val, ip);
970 case SG_GET_REQUEST_TABLE:
971 if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
974 sg_req_info_t *rinfo;
977 rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
981 read_lock_irqsave(&sfp->rq_list_lock, iflags);
982 for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
983 ++val, srp = srp ? srp->nextrp : srp) {
984 memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
986 rinfo[val].req_state = srp->done + 1;
988 srp->header.masked_status &
989 srp->header.host_status &
990 srp->header.driver_status;
992 rinfo[val].duration =
993 srp->header.duration;
995 ms = jiffies_to_msecs(jiffies);
996 rinfo[val].duration =
997 (ms > srp->header.duration) ?
998 (ms - srp->header.duration) : 0;
1000 rinfo[val].orphan = srp->orphan;
1001 rinfo[val].sg_io_owned =
1003 rinfo[val].pack_id =
1004 srp->header.pack_id;
1005 rinfo[val].usr_ptr =
1006 srp->header.usr_ptr;
1009 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1010 result = __copy_to_user(p, rinfo,
1011 SZ_SG_REQ_INFO * SG_MAX_QUEUE);
1012 result = result ? -EFAULT : 0;
1016 case SG_EMULATED_HOST:
1019 return put_user(sdp->device->host->hostt->emulated, ip);
1023 if (filp->f_flags & O_NONBLOCK) {
1024 if (scsi_host_in_recovery(sdp->device->host))
1026 } else if (!scsi_block_when_processing_errors(sdp->device))
1028 result = get_user(val, ip);
1031 if (SG_SCSI_RESET_NOTHING == val)
1034 case SG_SCSI_RESET_DEVICE:
1035 val = SCSI_TRY_RESET_DEVICE;
1037 case SG_SCSI_RESET_TARGET:
1038 val = SCSI_TRY_RESET_TARGET;
1040 case SG_SCSI_RESET_BUS:
1041 val = SCSI_TRY_RESET_BUS;
1043 case SG_SCSI_RESET_HOST:
1044 val = SCSI_TRY_RESET_HOST;
1049 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1051 return (scsi_reset_provider(sdp->device, val) ==
1052 SUCCESS) ? 0 : -EIO;
1053 case SCSI_IOCTL_SEND_COMMAND:
1057 unsigned char opcode = WRITE_6;
1058 Scsi_Ioctl_Command __user *siocp = p;
1060 if (copy_from_user(&opcode, siocp->data, 1))
1062 if (sg_allow_access(filp, &opcode))
1065 return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
1067 result = get_user(val, ip);
1070 sdp->sgdebug = (char) val;
1072 case SCSI_IOCTL_GET_IDLUN:
1073 case SCSI_IOCTL_GET_BUS_NUMBER:
1074 case SCSI_IOCTL_PROBE_HOST:
1075 case SG_GET_TRANSFORM:
1078 return scsi_ioctl(sdp->device, cmd_in, p);
1080 return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
1083 return blk_trace_setup(sdp->device->request_queue,
1084 sdp->disk->disk_name,
1085 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
1089 return blk_trace_startstop(sdp->device->request_queue, 1);
1091 return blk_trace_startstop(sdp->device->request_queue, 0);
1092 case BLKTRACETEARDOWN:
1093 return blk_trace_remove(sdp->device->request_queue);
1096 return -EPERM; /* don't know so take safe approach */
1097 return scsi_ioctl(sdp->device, cmd_in, p);
1102 sg_unlocked_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1106 mutex_lock(&sg_mutex);
1107 ret = sg_ioctl(filp, cmd_in, arg);
1108 mutex_unlock(&sg_mutex);
1113 #ifdef CONFIG_COMPAT
1114 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1118 struct scsi_device *sdev;
1120 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1124 if (sdev->host->hostt->compat_ioctl) {
1127 ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
1132 return -ENOIOCTLCMD;
1137 sg_poll(struct file *filp, poll_table * wait)
1139 unsigned int res = 0;
1144 unsigned long iflags;
1146 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))
1149 poll_wait(filp, &sfp->read_wait, wait);
1150 read_lock_irqsave(&sfp->rq_list_lock, iflags);
1151 for (srp = sfp->headrp; srp; srp = srp->nextrp) {
1152 /* if any read waiting, flag it */
1153 if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
1154 res = POLLIN | POLLRDNORM;
1157 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1161 else if (!sfp->cmd_q) {
1163 res |= POLLOUT | POLLWRNORM;
1164 } else if (count < SG_MAX_QUEUE)
1165 res |= POLLOUT | POLLWRNORM;
1166 SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",
1167 sdp->disk->disk_name, (int) res));
1172 sg_fasync(int fd, struct file *filp, int mode)
1177 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1179 SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",
1180 sdp->disk->disk_name, mode));
1182 return fasync_helper(fd, filp, mode, &sfp->async_qp);
1186 sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1189 unsigned long offset, len, sa;
1190 Sg_scatter_hold *rsv_schp;
1193 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1194 return VM_FAULT_SIGBUS;
1195 rsv_schp = &sfp->reserve;
1196 offset = vmf->pgoff << PAGE_SHIFT;
1197 if (offset >= rsv_schp->bufflen)
1198 return VM_FAULT_SIGBUS;
1199 SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
1200 offset, rsv_schp->k_use_sg));
1202 length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1203 for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1204 len = vma->vm_end - sa;
1205 len = (len < length) ? len : length;
1207 struct page *page = nth_page(rsv_schp->pages[k],
1208 offset >> PAGE_SHIFT);
1209 get_page(page); /* increment page count */
1211 return 0; /* success */
1217 return VM_FAULT_SIGBUS;
1220 static const struct vm_operations_struct sg_mmap_vm_ops = {
1221 .fault = sg_vma_fault,
1225 sg_mmap(struct file *filp, struct vm_area_struct *vma)
1228 unsigned long req_sz, len, sa;
1229 Sg_scatter_hold *rsv_schp;
1232 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1234 req_sz = vma->vm_end - vma->vm_start;
1235 SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
1236 (void *) vma->vm_start, (int) req_sz));
1238 return -EINVAL; /* want no offset */
1239 rsv_schp = &sfp->reserve;
1240 if (req_sz > rsv_schp->bufflen)
1241 return -ENOMEM; /* cannot map more than reserved buffer */
1244 length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1245 for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1246 len = vma->vm_end - sa;
1247 len = (len < length) ? len : length;
1251 sfp->mmap_called = 1;
1252 vma->vm_flags |= VM_RESERVED;
1253 vma->vm_private_data = sfp;
1254 vma->vm_ops = &sg_mmap_vm_ops;
1258 static void sg_rq_end_io_usercontext(struct work_struct *work)
1260 struct sg_request *srp = container_of(work, struct sg_request, ew.work);
1261 struct sg_fd *sfp = srp->parentfp;
1263 sg_finish_rem_req(srp);
1264 kref_put(&sfp->f_ref, sg_remove_sfp);
1268 * This function is a "bottom half" handler that is called by the mid
1269 * level when a command is completed (or has failed).
1271 static void sg_rq_end_io(struct request *rq, int uptodate)
1273 struct sg_request *srp = rq->end_io_data;
1276 unsigned long iflags;
1279 int result, resid, done = 1;
1281 if (WARN_ON(srp->done != 0))
1284 sfp = srp->parentfp;
1285 if (WARN_ON(sfp == NULL))
1288 sdp = sfp->parentdp;
1289 if (unlikely(sdp->detached))
1290 printk(KERN_INFO "sg_rq_end_io: device detached\n");
1293 result = rq->errors;
1294 resid = rq->resid_len;
1296 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1297 sdp->disk->disk_name, srp->header.pack_id, result));
1298 srp->header.resid = resid;
1299 ms = jiffies_to_msecs(jiffies);
1300 srp->header.duration = (ms > srp->header.duration) ?
1301 (ms - srp->header.duration) : 0;
1303 struct scsi_sense_hdr sshdr;
1305 srp->header.status = 0xff & result;
1306 srp->header.masked_status = status_byte(result);
1307 srp->header.msg_status = msg_byte(result);
1308 srp->header.host_status = host_byte(result);
1309 srp->header.driver_status = driver_byte(result);
1310 if ((sdp->sgdebug > 0) &&
1311 ((CHECK_CONDITION == srp->header.masked_status) ||
1312 (COMMAND_TERMINATED == srp->header.masked_status)))
1313 __scsi_print_sense("sg_cmd_done", sense,
1314 SCSI_SENSE_BUFFERSIZE);
1316 /* Following if statement is a patch supplied by Eric Youngdale */
1317 if (driver_byte(result) != 0
1318 && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
1319 && !scsi_sense_is_deferred(&sshdr)
1320 && sshdr.sense_key == UNIT_ATTENTION
1321 && sdp->device->removable) {
1322 /* Detected possible disc change. Set the bit - this */
1323 /* may be used if there are filesystems using this device */
1324 sdp->device->changed = 1;
1327 /* Rely on write phase to clean out srp status values, so no "else" */
1329 write_lock_irqsave(&sfp->rq_list_lock, iflags);
1330 if (unlikely(srp->orphan)) {
1331 if (sfp->keep_orphan)
1332 srp->sg_io_owned = 0;
1337 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1340 /* Now wake up any sg_read() that is waiting for this
1343 wake_up_interruptible(&sfp->read_wait);
1344 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
1345 kref_put(&sfp->f_ref, sg_remove_sfp);
1347 INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
1348 schedule_work(&srp->ew.work);
1352 static const struct file_operations sg_fops = {
1353 .owner = THIS_MODULE,
1357 .unlocked_ioctl = sg_unlocked_ioctl,
1358 #ifdef CONFIG_COMPAT
1359 .compat_ioctl = sg_compat_ioctl,
1363 .release = sg_release,
1364 .fasync = sg_fasync,
1365 .llseek = no_llseek,
1368 static struct class *sg_sysfs_class;
1370 static int sg_sysfs_valid = 0;
1372 static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1374 struct request_queue *q = scsidp->request_queue;
1376 unsigned long iflags;
1380 sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
1382 printk(KERN_WARNING "kmalloc Sg_device failure\n");
1383 return ERR_PTR(-ENOMEM);
1386 if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) {
1387 printk(KERN_WARNING "idr expansion Sg_device failure\n");
1392 write_lock_irqsave(&sg_index_lock, iflags);
1394 error = idr_get_new(&sg_index_idr, sdp, &k);
1396 write_unlock_irqrestore(&sg_index_lock, iflags);
1397 printk(KERN_WARNING "idr allocation Sg_device failure: %d\n",
1402 if (unlikely(k >= SG_MAX_DEVS))
1405 SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
1406 sprintf(disk->disk_name, "sg%d", k);
1407 disk->first_minor = k;
1409 sdp->device = scsidp;
1410 INIT_LIST_HEAD(&sdp->sfds);
1411 init_waitqueue_head(&sdp->o_excl_wait);
1412 sdp->sg_tablesize = queue_max_segments(q);
1414 kref_init(&sdp->d_ref);
1416 write_unlock_irqrestore(&sg_index_lock, iflags);
1422 return ERR_PTR(error);
1427 idr_remove(&sg_index_idr, k);
1428 write_unlock_irqrestore(&sg_index_lock, iflags);
1429 sdev_printk(KERN_WARNING, scsidp,
1430 "Unable to attach sg device type=%d, minor "
1431 "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
1437 sg_add(struct device *cl_dev, struct class_interface *cl_intf)
1439 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1440 struct gendisk *disk;
1441 Sg_device *sdp = NULL;
1442 struct cdev * cdev = NULL;
1444 unsigned long iflags;
1446 disk = alloc_disk(1);
1448 printk(KERN_WARNING "alloc_disk failed\n");
1451 disk->major = SCSI_GENERIC_MAJOR;
1454 cdev = cdev_alloc();
1456 printk(KERN_WARNING "cdev_alloc failed\n");
1459 cdev->owner = THIS_MODULE;
1460 cdev->ops = &sg_fops;
1462 sdp = sg_alloc(disk, scsidp);
1464 printk(KERN_WARNING "sg_alloc failed\n");
1465 error = PTR_ERR(sdp);
1469 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
1474 if (sg_sysfs_valid) {
1475 struct device *sg_class_member;
1477 sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
1478 MKDEV(SCSI_GENERIC_MAJOR,
1480 sdp, "%s", disk->disk_name);
1481 if (IS_ERR(sg_class_member)) {
1482 printk(KERN_ERR "sg_add: "
1483 "device_create failed\n");
1484 error = PTR_ERR(sg_class_member);
1487 error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
1488 &sg_class_member->kobj, "generic");
1490 printk(KERN_ERR "sg_add: unable to make symlink "
1491 "'generic' back to sg%d\n", sdp->index);
1493 printk(KERN_WARNING "sg_add: sg_sys Invalid\n");
1495 sdev_printk(KERN_NOTICE, scsidp,
1496 "Attached scsi generic sg%d type %d\n", sdp->index,
1499 dev_set_drvdata(cl_dev, sdp);
1504 write_lock_irqsave(&sg_index_lock, iflags);
1505 idr_remove(&sg_index_idr, sdp->index);
1506 write_unlock_irqrestore(&sg_index_lock, iflags);
1516 static void sg_device_destroy(struct kref *kref)
1518 struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
1519 unsigned long flags;
1521 /* CAUTION! Note that the device can still be found via idr_find()
1522 * even though the refcount is 0. Therefore, do idr_remove() BEFORE
1523 * any other cleanup.
1526 write_lock_irqsave(&sg_index_lock, flags);
1527 idr_remove(&sg_index_idr, sdp->index);
1528 write_unlock_irqrestore(&sg_index_lock, flags);
1531 printk("sg_device_destroy: %s\n",
1532 sdp->disk->disk_name));
1534 put_disk(sdp->disk);
1538 static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
1540 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1541 Sg_device *sdp = dev_get_drvdata(cl_dev);
1542 unsigned long iflags;
1545 if (!sdp || sdp->detached)
1548 SCSI_LOG_TIMEOUT(3, printk("sg_remove: %s\n", sdp->disk->disk_name));
1550 /* Need a write lock to set sdp->detached. */
1551 write_lock_irqsave(&sg_index_lock, iflags);
1553 list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
1554 wake_up_interruptible(&sfp->read_wait);
1555 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
1557 write_unlock_irqrestore(&sg_index_lock, iflags);
1559 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
1560 device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
1561 cdev_del(sdp->cdev);
1567 module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
1568 module_param_named(def_reserved_size, def_reserved_size, int,
1570 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
1572 MODULE_AUTHOR("Douglas Gilbert");
1573 MODULE_DESCRIPTION("SCSI generic (sg) driver");
1574 MODULE_LICENSE("GPL");
1575 MODULE_VERSION(SG_VERSION_STR);
1576 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
1578 MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
1579 "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
1580 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
1581 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
1588 if (scatter_elem_sz < PAGE_SIZE) {
1589 scatter_elem_sz = PAGE_SIZE;
1590 scatter_elem_sz_prev = scatter_elem_sz;
1592 if (def_reserved_size >= 0)
1593 sg_big_buff = def_reserved_size;
1595 def_reserved_size = sg_big_buff;
1597 rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1601 sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
1602 if ( IS_ERR(sg_sysfs_class) ) {
1603 rc = PTR_ERR(sg_sysfs_class);
1607 rc = scsi_register_interface(&sg_interface);
1609 #ifdef CONFIG_SCSI_PROC_FS
1611 #endif /* CONFIG_SCSI_PROC_FS */
1614 class_destroy(sg_sysfs_class);
1616 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
1623 #ifdef CONFIG_SCSI_PROC_FS
1625 #endif /* CONFIG_SCSI_PROC_FS */
1626 scsi_unregister_interface(&sg_interface);
1627 class_destroy(sg_sysfs_class);
1629 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1631 idr_destroy(&sg_index_idr);
1634 static int sg_start_req(Sg_request *srp, unsigned char *cmd)
1638 Sg_fd *sfp = srp->parentfp;
1639 sg_io_hdr_t *hp = &srp->header;
1640 int dxfer_len = (int) hp->dxfer_len;
1641 int dxfer_dir = hp->dxfer_direction;
1642 unsigned int iov_count = hp->iovec_count;
1643 Sg_scatter_hold *req_schp = &srp->data;
1644 Sg_scatter_hold *rsv_schp = &sfp->reserve;
1645 struct request_queue *q = sfp->parentdp->device->request_queue;
1646 struct rq_map_data *md, map_data;
1647 int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
1649 SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n",
1652 rq = blk_get_request(q, rw, GFP_ATOMIC);
1656 memcpy(rq->cmd, cmd, hp->cmd_len);
1658 rq->cmd_len = hp->cmd_len;
1659 rq->cmd_type = REQ_TYPE_BLOCK_PC;
1662 rq->end_io_data = srp;
1663 rq->sense = srp->sense_b;
1664 rq->retries = SG_DEFAULT_RETRIES;
1666 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1669 if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
1670 dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
1671 !sfp->parentdp->device->host->unchecked_isa_dma &&
1672 blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
1678 if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
1679 sg_link_reserve(sfp, srp, dxfer_len);
1681 res = sg_build_indirect(req_schp, sfp, dxfer_len);
1686 md->pages = req_schp->pages;
1687 md->page_order = req_schp->page_order;
1688 md->nr_entries = req_schp->k_use_sg;
1690 md->null_mapped = hp->dxferp ? 0 : 1;
1691 if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
1697 if (unlikely(iov_count > UIO_MAXIOV))
1701 int len, size = sizeof(struct sg_iovec) * iov_count;
1704 iov = memdup_user(hp->dxferp, size);
1706 return PTR_ERR(iov);
1708 len = iov_length(iov, iov_count);
1709 if (hp->dxfer_len < len) {
1710 iov_count = iov_shorten(iov, iov_count, hp->dxfer_len);
1711 len = hp->dxfer_len;
1718 res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov,
1723 res = blk_rq_map_user(q, rq, md, hp->dxferp,
1724 hp->dxfer_len, GFP_ATOMIC);
1730 req_schp->dio_in_use = 1;
1731 hp->info |= SG_INFO_DIRECT_IO;
1737 static int sg_finish_rem_req(Sg_request * srp)
1741 Sg_fd *sfp = srp->parentfp;
1742 Sg_scatter_hold *req_schp = &srp->data;
1744 SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used));
1747 ret = blk_rq_unmap_user(srp->bio);
1749 blk_put_request(srp->rq);
1753 sg_unlink_reserve(sfp, srp);
1755 sg_remove_scat(req_schp);
1757 sg_remove_request(sfp, srp);
1763 sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1765 int sg_bufflen = tablesize * sizeof(struct page *);
1766 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
1768 schp->pages = kzalloc(sg_bufflen, gfp_flags);
1771 schp->sglist_len = sg_bufflen;
1772 return tablesize; /* number of scat_gath elements allocated */
1776 sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1778 int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
1779 int sg_tablesize = sfp->parentdp->sg_tablesize;
1780 int blk_size = buff_size, order;
1781 gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
1786 ++blk_size; /* don't know why */
1787 /* round request up to next highest SG_SECTOR_SZ byte boundary */
1788 blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
1789 SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
1790 buff_size, blk_size));
1792 /* N.B. ret_sz carried into this block ... */
1793 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1794 if (mx_sc_elems < 0)
1795 return mx_sc_elems; /* most likely -ENOMEM */
1797 num = scatter_elem_sz;
1798 if (unlikely(num != scatter_elem_sz_prev)) {
1799 if (num < PAGE_SIZE) {
1800 scatter_elem_sz = PAGE_SIZE;
1801 scatter_elem_sz_prev = PAGE_SIZE;
1803 scatter_elem_sz_prev = num;
1807 gfp_mask |= GFP_DMA;
1809 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1810 gfp_mask |= __GFP_ZERO;
1812 order = get_order(num);
1814 ret_sz = 1 << (PAGE_SHIFT + order);
1816 for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
1817 k++, rem_sz -= ret_sz) {
1819 num = (rem_sz > scatter_elem_sz_prev) ?
1820 scatter_elem_sz_prev : rem_sz;
1822 schp->pages[k] = alloc_pages(gfp_mask, order);
1823 if (!schp->pages[k])
1826 if (num == scatter_elem_sz_prev) {
1827 if (unlikely(ret_sz > scatter_elem_sz_prev)) {
1828 scatter_elem_sz = ret_sz;
1829 scatter_elem_sz_prev = ret_sz;
1833 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
1834 "ret_sz=%d\n", k, num, ret_sz));
1835 } /* end of for loop */
1837 schp->page_order = order;
1839 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
1840 "rem_sz=%d\n", k, rem_sz));
1842 schp->bufflen = blk_size;
1843 if (rem_sz > 0) /* must have failed */
1847 for (i = 0; i < k; i++)
1848 __free_pages(schp->pages[i], order);
1857 sg_remove_scat(Sg_scatter_hold * schp)
1859 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
1860 if (schp->pages && schp->sglist_len > 0) {
1861 if (!schp->dio_in_use) {
1864 for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
1865 SCSI_LOG_TIMEOUT(5, printk(
1866 "sg_remove_scat: k=%d, pg=0x%p\n",
1867 k, schp->pages[k]));
1868 __free_pages(schp->pages[k], schp->page_order);
1874 memset(schp, 0, sizeof (*schp));
1878 sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
1880 Sg_scatter_hold *schp = &srp->data;
1883 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
1885 if ((!outp) || (num_read_xfer <= 0))
1888 num = 1 << (PAGE_SHIFT + schp->page_order);
1889 for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
1890 if (num > num_read_xfer) {
1891 if (__copy_to_user(outp, page_address(schp->pages[k]),
1896 if (__copy_to_user(outp, page_address(schp->pages[k]),
1899 num_read_xfer -= num;
1900 if (num_read_xfer <= 0)
1910 sg_build_reserve(Sg_fd * sfp, int req_size)
1912 Sg_scatter_hold *schp = &sfp->reserve;
1914 SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
1916 if (req_size < PAGE_SIZE)
1917 req_size = PAGE_SIZE;
1918 if (0 == sg_build_indirect(schp, sfp, req_size))
1921 sg_remove_scat(schp);
1922 req_size >>= 1; /* divide by 2 */
1923 } while (req_size > (PAGE_SIZE / 2));
1927 sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
1929 Sg_scatter_hold *req_schp = &srp->data;
1930 Sg_scatter_hold *rsv_schp = &sfp->reserve;
1934 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
1937 num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1938 for (k = 0; k < rsv_schp->k_use_sg; k++) {
1940 req_schp->k_use_sg = k + 1;
1941 req_schp->sglist_len = rsv_schp->sglist_len;
1942 req_schp->pages = rsv_schp->pages;
1944 req_schp->bufflen = size;
1945 req_schp->page_order = rsv_schp->page_order;
1951 if (k >= rsv_schp->k_use_sg)
1952 SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
1956 sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
1958 Sg_scatter_hold *req_schp = &srp->data;
1960 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
1961 (int) req_schp->k_use_sg));
1962 req_schp->k_use_sg = 0;
1963 req_schp->bufflen = 0;
1964 req_schp->pages = NULL;
1965 req_schp->page_order = 0;
1966 req_schp->sglist_len = 0;
1967 sfp->save_scat_len = 0;
1972 sg_get_rq_mark(Sg_fd * sfp, int pack_id)
1975 unsigned long iflags;
1977 write_lock_irqsave(&sfp->rq_list_lock, iflags);
1978 for (resp = sfp->headrp; resp; resp = resp->nextrp) {
1979 /* look for requests that are ready + not SG_IO owned */
1980 if ((1 == resp->done) && (!resp->sg_io_owned) &&
1981 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
1982 resp->done = 2; /* guard against other readers */
1986 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1990 /* always adds to end of list */
1992 sg_add_request(Sg_fd * sfp)
1995 unsigned long iflags;
1997 Sg_request *rp = sfp->req_arr;
1999 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2002 memset(rp, 0, sizeof (Sg_request));
2007 if (0 == sfp->cmd_q)
2008 resp = NULL; /* command queuing disallowed */
2010 for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
2014 if (k < SG_MAX_QUEUE) {
2015 memset(rp, 0, sizeof (Sg_request));
2017 while (resp->nextrp)
2018 resp = resp->nextrp;
2026 resp->nextrp = NULL;
2027 resp->header.duration = jiffies_to_msecs(jiffies);
2029 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2033 /* Return of 1 for found; 0 for not found */
2035 sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2037 Sg_request *prev_rp;
2039 unsigned long iflags;
2042 if ((!sfp) || (!srp) || (!sfp->headrp))
2044 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2045 prev_rp = sfp->headrp;
2046 if (srp == prev_rp) {
2047 sfp->headrp = prev_rp->nextrp;
2048 prev_rp->parentfp = NULL;
2051 while ((rp = prev_rp->nextrp)) {
2053 prev_rp->nextrp = rp->nextrp;
2054 rp->parentfp = NULL;
2061 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2066 sg_add_sfp(Sg_device * sdp, int dev)
2069 unsigned long iflags;
2072 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2076 init_waitqueue_head(&sfp->read_wait);
2077 rwlock_init(&sfp->rq_list_lock);
2079 kref_init(&sfp->f_ref);
2080 sfp->timeout = SG_DEFAULT_TIMEOUT;
2081 sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
2082 sfp->force_packid = SG_DEF_FORCE_PACK_ID;
2083 sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
2084 sdp->device->host->unchecked_isa_dma : 1;
2085 sfp->cmd_q = SG_DEF_COMMAND_Q;
2086 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2087 sfp->parentdp = sdp;
2088 write_lock_irqsave(&sg_index_lock, iflags);
2089 list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
2090 write_unlock_irqrestore(&sg_index_lock, iflags);
2091 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2092 if (unlikely(sg_big_buff != def_reserved_size))
2093 sg_big_buff = def_reserved_size;
2095 bufflen = min_t(int, sg_big_buff,
2096 queue_max_sectors(sdp->device->request_queue) * 512);
2097 sg_build_reserve(sfp, bufflen);
2098 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2099 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2101 kref_get(&sdp->d_ref);
2102 __module_get(THIS_MODULE);
2106 static void sg_remove_sfp_usercontext(struct work_struct *work)
2108 struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
2109 struct sg_device *sdp = sfp->parentdp;
2111 /* Cleanup any responses which were never read(). */
2113 sg_finish_rem_req(sfp->headrp);
2115 if (sfp->reserve.bufflen > 0) {
2117 printk("sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
2118 (int) sfp->reserve.bufflen,
2119 (int) sfp->reserve.k_use_sg));
2120 sg_remove_scat(&sfp->reserve);
2124 printk("sg_remove_sfp: %s, sfp=0x%p\n",
2125 sdp->disk->disk_name,
2129 scsi_device_put(sdp->device);
2131 module_put(THIS_MODULE);
2134 static void sg_remove_sfp(struct kref *kref)
2136 struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
2137 struct sg_device *sdp = sfp->parentdp;
2138 unsigned long iflags;
2140 write_lock_irqsave(&sg_index_lock, iflags);
2141 list_del(&sfp->sfd_siblings);
2142 write_unlock_irqrestore(&sg_index_lock, iflags);
2143 wake_up_interruptible(&sdp->o_excl_wait);
2145 INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
2146 schedule_work(&sfp->ew.work);
2150 sg_res_in_use(Sg_fd * sfp)
2152 const Sg_request *srp;
2153 unsigned long iflags;
2155 read_lock_irqsave(&sfp->rq_list_lock, iflags);
2156 for (srp = sfp->headrp; srp; srp = srp->nextrp)
2159 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2163 #ifdef CONFIG_SCSI_PROC_FS
2165 sg_idr_max_id(int id, void *p, void *data)
2179 unsigned long iflags;
2181 read_lock_irqsave(&sg_index_lock, iflags);
2182 idr_for_each(&sg_index_idr, sg_idr_max_id, &k);
2183 read_unlock_irqrestore(&sg_index_lock, iflags);
2184 return k + 1; /* origin 1 */
2188 /* must be called with sg_index_lock held */
2189 static Sg_device *sg_lookup_dev(int dev)
2191 return idr_find(&sg_index_idr, dev);
2194 static Sg_device *sg_get_dev(int dev)
2196 struct sg_device *sdp;
2197 unsigned long flags;
2199 read_lock_irqsave(&sg_index_lock, flags);
2200 sdp = sg_lookup_dev(dev);
2202 sdp = ERR_PTR(-ENXIO);
2203 else if (sdp->detached) {
2204 /* If sdp->detached, then the refcount may already be 0, in
2205 * which case it would be a bug to do kref_get().
2207 sdp = ERR_PTR(-ENODEV);
2209 kref_get(&sdp->d_ref);
2210 read_unlock_irqrestore(&sg_index_lock, flags);
2215 static void sg_put_dev(struct sg_device *sdp)
2217 kref_put(&sdp->d_ref, sg_device_destroy);
2220 #ifdef CONFIG_SCSI_PROC_FS
2222 static struct proc_dir_entry *sg_proc_sgp = NULL;
2224 static char sg_proc_sg_dirname[] = "scsi/sg";
2226 static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2228 static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2229 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2230 size_t count, loff_t *off);
2231 static const struct file_operations adio_fops = {
2232 .owner = THIS_MODULE,
2233 .open = sg_proc_single_open_adio,
2235 .llseek = seq_lseek,
2236 .write = sg_proc_write_adio,
2237 .release = single_release,
2240 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2241 static ssize_t sg_proc_write_dressz(struct file *filp,
2242 const char __user *buffer, size_t count, loff_t *off);
2243 static const struct file_operations dressz_fops = {
2244 .owner = THIS_MODULE,
2245 .open = sg_proc_single_open_dressz,
2247 .llseek = seq_lseek,
2248 .write = sg_proc_write_dressz,
2249 .release = single_release,
2252 static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2253 static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2254 static const struct file_operations version_fops = {
2255 .owner = THIS_MODULE,
2256 .open = sg_proc_single_open_version,
2258 .llseek = seq_lseek,
2259 .release = single_release,
2262 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2263 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2264 static const struct file_operations devhdr_fops = {
2265 .owner = THIS_MODULE,
2266 .open = sg_proc_single_open_devhdr,
2268 .llseek = seq_lseek,
2269 .release = single_release,
2272 static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
2273 static int sg_proc_open_dev(struct inode *inode, struct file *file);
2274 static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2275 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2276 static void dev_seq_stop(struct seq_file *s, void *v);
2277 static const struct file_operations dev_fops = {
2278 .owner = THIS_MODULE,
2279 .open = sg_proc_open_dev,
2281 .llseek = seq_lseek,
2282 .release = seq_release,
2284 static const struct seq_operations dev_seq_ops = {
2285 .start = dev_seq_start,
2286 .next = dev_seq_next,
2287 .stop = dev_seq_stop,
2288 .show = sg_proc_seq_show_dev,
2291 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2292 static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2293 static const struct file_operations devstrs_fops = {
2294 .owner = THIS_MODULE,
2295 .open = sg_proc_open_devstrs,
2297 .llseek = seq_lseek,
2298 .release = seq_release,
2300 static const struct seq_operations devstrs_seq_ops = {
2301 .start = dev_seq_start,
2302 .next = dev_seq_next,
2303 .stop = dev_seq_stop,
2304 .show = sg_proc_seq_show_devstrs,
2307 static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2308 static int sg_proc_open_debug(struct inode *inode, struct file *file);
2309 static const struct file_operations debug_fops = {
2310 .owner = THIS_MODULE,
2311 .open = sg_proc_open_debug,
2313 .llseek = seq_lseek,
2314 .release = seq_release,
2316 static const struct seq_operations debug_seq_ops = {
2317 .start = dev_seq_start,
2318 .next = dev_seq_next,
2319 .stop = dev_seq_stop,
2320 .show = sg_proc_seq_show_debug,
2324 struct sg_proc_leaf {
2326 const struct file_operations * fops;
2329 static struct sg_proc_leaf sg_proc_leaf_arr[] = {
2330 {"allow_dio", &adio_fops},
2331 {"debug", &debug_fops},
2332 {"def_reserved_size", &dressz_fops},
2333 {"device_hdr", &devhdr_fops},
2334 {"devices", &dev_fops},
2335 {"device_strs", &devstrs_fops},
2336 {"version", &version_fops}
2343 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2344 struct sg_proc_leaf * leaf;
2346 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
2349 for (k = 0; k < num_leaves; ++k) {
2350 leaf = &sg_proc_leaf_arr[k];
2351 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2352 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
2358 sg_proc_cleanup(void)
2361 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2365 for (k = 0; k < num_leaves; ++k)
2366 remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
2367 remove_proc_entry(sg_proc_sg_dirname, NULL);
2371 static int sg_proc_seq_show_int(struct seq_file *s, void *v)
2373 seq_printf(s, "%d\n", *((int *)s->private));
2377 static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
2379 return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
2383 sg_proc_write_adio(struct file *filp, const char __user *buffer,
2384 size_t count, loff_t *off)
2389 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2391 num = (count < 10) ? count : 10;
2392 if (copy_from_user(buff, buffer, num))
2395 sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
2399 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
2401 return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
2405 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2406 size_t count, loff_t *off)
2409 unsigned long k = ULONG_MAX;
2412 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2414 num = (count < 10) ? count : 10;
2415 if (copy_from_user(buff, buffer, num))
2418 k = simple_strtoul(buff, NULL, 10);
2419 if (k <= 1048576) { /* limit "big buff" to 1 MB */
2426 static int sg_proc_seq_show_version(struct seq_file *s, void *v)
2428 seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
2433 static int sg_proc_single_open_version(struct inode *inode, struct file *file)
2435 return single_open(file, sg_proc_seq_show_version, NULL);
2438 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
2440 seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t"
2445 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
2447 return single_open(file, sg_proc_seq_show_devhdr, NULL);
2450 struct sg_proc_deviter {
2455 static void * dev_seq_start(struct seq_file *s, loff_t *pos)
2457 struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
2464 it->max = sg_last_dev();
2465 if (it->index >= it->max)
2470 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
2472 struct sg_proc_deviter * it = s->private;
2475 return (it->index < it->max) ? it : NULL;
2478 static void dev_seq_stop(struct seq_file *s, void *v)
2483 static int sg_proc_open_dev(struct inode *inode, struct file *file)
2485 return seq_open(file, &dev_seq_ops);
2488 static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2490 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2492 struct scsi_device *scsidp;
2493 unsigned long iflags;
2495 read_lock_irqsave(&sg_index_lock, iflags);
2496 sdp = it ? sg_lookup_dev(it->index) : NULL;
2497 if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2498 seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
2499 scsidp->host->host_no, scsidp->channel,
2500 scsidp->id, scsidp->lun, (int) scsidp->type,
2502 (int) scsidp->queue_depth,
2503 (int) scsidp->device_busy,
2504 (int) scsi_device_online(scsidp));
2506 seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2507 read_unlock_irqrestore(&sg_index_lock, iflags);
2511 static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
2513 return seq_open(file, &devstrs_seq_ops);
2516 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2518 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2520 struct scsi_device *scsidp;
2521 unsigned long iflags;
2523 read_lock_irqsave(&sg_index_lock, iflags);
2524 sdp = it ? sg_lookup_dev(it->index) : NULL;
2525 if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2526 seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
2527 scsidp->vendor, scsidp->model, scsidp->rev);
2529 seq_printf(s, "<no active device>\n");
2530 read_unlock_irqrestore(&sg_index_lock, iflags);
2534 /* must be called while holding sg_index_lock */
2535 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2537 int k, m, new_interface, blen, usg;
2540 const sg_io_hdr_t *hp;
2545 list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
2547 read_lock(&fp->rq_list_lock); /* irqs already disabled */
2548 seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
2549 "(res)sgat=%d low_dma=%d\n", k,
2550 jiffies_to_msecs(fp->timeout),
2551 fp->reserve.bufflen,
2552 (int) fp->reserve.k_use_sg,
2554 seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
2555 (int) fp->cmd_q, (int) fp->force_packid,
2556 (int) fp->keep_orphan, (int) fp->closed);
2557 for (m = 0, srp = fp->headrp;
2559 ++m, srp = srp->nextrp) {
2561 new_interface = (hp->interface_id == '\0') ? 0 : 1;
2562 if (srp->res_used) {
2563 if (new_interface &&
2564 (SG_FLAG_MMAP_IO & hp->flags))
2569 if (SG_INFO_DIRECT_IO_MASK & hp->info)
2575 blen = srp->data.bufflen;
2576 usg = srp->data.k_use_sg;
2577 seq_printf(s, srp->done ?
2578 ((1 == srp->done) ? "rcv:" : "fin:")
2580 seq_printf(s, " id=%d blen=%d",
2581 srp->header.pack_id, blen);
2583 seq_printf(s, " dur=%d", hp->duration);
2585 ms = jiffies_to_msecs(jiffies);
2586 seq_printf(s, " t_o/elap=%d/%d",
2587 (new_interface ? hp->timeout :
2588 jiffies_to_msecs(fp->timeout)),
2589 (ms > hp->duration ? ms - hp->duration : 0));
2591 seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
2592 (int) srp->data.cmd_opcode);
2595 seq_printf(s, " No requests active\n");
2596 read_unlock(&fp->rq_list_lock);
2600 static int sg_proc_open_debug(struct inode *inode, struct file *file)
2602 return seq_open(file, &debug_seq_ops);
2605 static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2607 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2609 unsigned long iflags;
2611 if (it && (0 == it->index)) {
2612 seq_printf(s, "max_active_device=%d(origin 1)\n",
2614 seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
2617 read_lock_irqsave(&sg_index_lock, iflags);
2618 sdp = it ? sg_lookup_dev(it->index) : NULL;
2619 if (sdp && !list_empty(&sdp->sfds)) {
2620 struct scsi_device *scsidp = sdp->device;
2622 seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
2624 seq_printf(s, "detached pending close ");
2627 (s, "scsi%d chan=%d id=%d lun=%d em=%d",
2628 scsidp->host->host_no,
2629 scsidp->channel, scsidp->id,
2631 scsidp->host->hostt->emulated);
2632 seq_printf(s, " sg_tablesize=%d excl=%d\n",
2633 sdp->sg_tablesize, sdp->exclude);
2634 sg_proc_debug_helper(s, sdp);
2636 read_unlock_irqrestore(&sg_index_lock, iflags);
2640 #endif /* CONFIG_SCSI_PROC_FS */
2642 module_init(init_sg);
2643 module_exit(exit_sg);