2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/completion.h>
13 #include <linux/kernel.h>
14 #include <linux/mempool.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_dbg.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_driver.h>
24 #include <scsi/scsi_eh.h>
25 #include <scsi/scsi_host.h>
26 #include <scsi/scsi_request.h>
28 #include "scsi_priv.h"
29 #include "scsi_logging.h"
32 #define SG_MEMPOOL_NR (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33 #define SG_MEMPOOL_SIZE 32
35 struct scsi_host_sg_pool {
42 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
43 #error SCSI_MAX_PHYS_SEGMENTS is too small
46 #define SP(x) { x, "sgpool-" #x }
47 static struct scsi_host_sg_pool scsi_sg_pools[] = {
51 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
53 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
55 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
57 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
58 #error SCSI_MAX_PHYS_SEGMENTS is too large
68 * Function: scsi_insert_special_req()
70 * Purpose: Insert pre-formed request into request queue.
72 * Arguments: sreq - request that is ready to be queued.
73 * at_head - boolean. True if we should insert at head
74 * of queue, false if we should insert at tail.
76 * Lock status: Assumed that lock is not held upon entry.
80 * Notes: This function is called from character device and from
81 * ioctl types of functions where the caller knows exactly
82 * what SCSI command needs to be issued. The idea is that
83 * we merely inject the command into the queue (at the head
84 * for now), and then call the queue request function to actually
87 int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
90 * Because users of this function are apt to reuse requests with no
91 * modification, we have to sanitise the request flags here
93 sreq->sr_request->flags &= ~REQ_DONTPREP;
94 blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
99 static void scsi_run_queue(struct request_queue *q);
102 * Function: scsi_queue_insert()
104 * Purpose: Insert a command in the midlevel queue.
106 * Arguments: cmd - command that we are adding to queue.
107 * reason - why we are inserting command to queue.
109 * Lock status: Assumed that lock is not held upon entry.
113 * Notes: We do this for one of two cases. Either the host is busy
114 * and it cannot accept any more commands for the time being,
115 * or the device returned QUEUE_FULL and can accept no more
117 * Notes: This could be called either from an interrupt context or a
118 * normal process context.
120 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
122 struct Scsi_Host *host = cmd->device->host;
123 struct scsi_device *device = cmd->device;
124 struct request_queue *q = device->request_queue;
128 printk("Inserting command %p into mlqueue\n", cmd));
131 * Set the appropriate busy bit for the device/host.
133 * If the host/device isn't busy, assume that something actually
134 * completed, and that we should be able to queue a command now.
136 * Note that the prior mid-layer assumption that any host could
137 * always queue at least one command is now broken. The mid-layer
138 * will implement a user specifiable stall (see
139 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
140 * if a command is requeued with no other commands outstanding
141 * either for the device or for the host.
143 if (reason == SCSI_MLQUEUE_HOST_BUSY)
144 host->host_blocked = host->max_host_blocked;
145 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
146 device->device_blocked = device->max_device_blocked;
149 * Decrement the counters, since these commands are no longer
150 * active on the host/device.
152 scsi_device_unbusy(device);
155 * Requeue this command. It will go before all other commands
156 * that are already in the queue.
158 * NOTE: there is magic here about the way the queue is plugged if
159 * we have no outstanding commands.
161 * Although we *don't* plug the queue, we call the request
162 * function. The SCSI request function detects the blocked condition
163 * and plugs the queue appropriately.
165 spin_lock_irqsave(q->queue_lock, flags);
166 blk_requeue_request(q, cmd->request);
167 spin_unlock_irqrestore(q->queue_lock, flags);
175 * Function: scsi_do_req
177 * Purpose: Queue a SCSI request
179 * Arguments: sreq - command descriptor.
180 * cmnd - actual SCSI command to be performed.
181 * buffer - data buffer.
182 * bufflen - size of data buffer.
183 * done - completion function to be run.
184 * timeout - how long to let it run before timeout.
185 * retries - number of retries we allow.
187 * Lock status: No locks held upon entry.
191 * Notes: This function is only used for queueing requests for things
192 * like ioctls and character device requests - this is because
193 * we essentially just inject a request into the queue for the
196 * In order to support the scsi_device_quiesce function, we
197 * now inject requests on the *head* of the device queue
198 * rather than the tail.
200 void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
201 void *buffer, unsigned bufflen,
202 void (*done)(struct scsi_cmnd *),
203 int timeout, int retries)
206 * If the upper level driver is reusing these things, then
207 * we should release the low-level block now. Another one will
208 * be allocated later when this request is getting queued.
210 __scsi_release_request(sreq);
213 * Our own function scsi_done (which marks the host as not busy,
214 * disables the timeout counter, etc) will be called by us or by the
215 * scsi_hosts[host].queuecommand() function needs to also call
216 * the completion function for the high level driver.
218 memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
219 sreq->sr_bufflen = bufflen;
220 sreq->sr_buffer = buffer;
221 sreq->sr_allowed = retries;
222 sreq->sr_done = done;
223 sreq->sr_timeout_per_command = timeout;
225 if (sreq->sr_cmd_len == 0)
226 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
229 * head injection *required* here otherwise quiesce won't work
231 scsi_insert_special_req(sreq, 1);
233 EXPORT_SYMBOL(scsi_do_req);
235 /* This is the end routine we get to if a command was never attached
236 * to the request. Simply complete the request without changing
237 * rq_status; this will cause a DRIVER_ERROR. */
238 static void scsi_wait_req_end_io(struct request *req)
240 BUG_ON(!req->waiting);
242 complete(req->waiting);
245 void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
246 unsigned bufflen, int timeout, int retries)
248 DECLARE_COMPLETION(wait);
249 int write = (sreq->sr_data_direction == DMA_TO_DEVICE);
252 req = blk_get_request(sreq->sr_device->request_queue, write,
254 if (bufflen && blk_rq_map_kern(sreq->sr_device->request_queue, req,
255 buffer, bufflen, __GFP_WAIT)) {
256 sreq->sr_result = DRIVER_ERROR << 24;
257 blk_put_request(req);
261 req->flags |= REQ_NOMERGE;
262 req->waiting = &wait;
263 req->end_io = scsi_wait_req_end_io;
264 req->cmd_len = COMMAND_SIZE(((u8 *)cmnd)[0]);
265 req->sense = sreq->sr_sense_buffer;
267 memcpy(req->cmd, cmnd, req->cmd_len);
268 req->timeout = timeout;
269 req->flags |= REQ_BLOCK_PC;
271 blk_insert_request(sreq->sr_device->request_queue, req,
272 sreq->sr_data_direction == DMA_TO_DEVICE, NULL);
273 wait_for_completion(&wait);
274 sreq->sr_request->waiting = NULL;
275 sreq->sr_result = req->errors;
277 sreq->sr_result |= (DRIVER_ERROR << 24);
279 blk_put_request(req);
282 EXPORT_SYMBOL(scsi_wait_req);
285 * scsi_execute - insert request and wait for the result
288 * @data_direction: data direction
289 * @buffer: data buffer
290 * @bufflen: len of buffer
291 * @sense: optional sense buffer
292 * @timeout: request timeout in seconds
293 * @retries: number of times to retry request
294 * @flags: or into request flags;
296 * scsi_execute_req returns the req->errors value which is the
297 * the scsi_cmnd result field.
299 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
300 int data_direction, void *buffer, unsigned bufflen,
301 unsigned char *sense, int timeout, int retries, int flags)
304 int write = (data_direction == DMA_TO_DEVICE);
305 int ret = DRIVER_ERROR << 24;
307 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
309 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
310 buffer, bufflen, __GFP_WAIT))
313 req->cmd_len = COMMAND_SIZE(cmd[0]);
314 memcpy(req->cmd, cmd, req->cmd_len);
317 req->timeout = timeout;
318 req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL;
321 * head injection *required* here otherwise quiesce won't work
323 blk_execute_rq(req->q, NULL, req, 1);
327 blk_put_request(req);
332 EXPORT_SYMBOL(scsi_execute);
335 * Function: scsi_init_cmd_errh()
337 * Purpose: Initialize cmd fields related to error handling.
339 * Arguments: cmd - command that is ready to be queued.
343 * Notes: This function has the job of initializing a number of
344 * fields related to error handling. Typically this will
345 * be called once for each command, as required.
347 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
349 cmd->serial_number = 0;
351 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
353 if (cmd->cmd_len == 0)
354 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
357 * We need saved copies of a number of fields - this is because
358 * error handling may need to overwrite these with different values
359 * to run different commands, and once error handling is complete,
360 * we will need to restore these values prior to running the actual
363 cmd->old_use_sg = cmd->use_sg;
364 cmd->old_cmd_len = cmd->cmd_len;
365 cmd->sc_old_data_direction = cmd->sc_data_direction;
366 cmd->old_underflow = cmd->underflow;
367 memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
368 cmd->buffer = cmd->request_buffer;
369 cmd->bufflen = cmd->request_bufflen;
375 * Function: scsi_setup_cmd_retry()
377 * Purpose: Restore the command state for a retry
379 * Arguments: cmd - command to be restored
383 * Notes: Immediately prior to retrying a command, we need
384 * to restore certain fields that we saved above.
386 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
388 memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
389 cmd->request_buffer = cmd->buffer;
390 cmd->request_bufflen = cmd->bufflen;
391 cmd->use_sg = cmd->old_use_sg;
392 cmd->cmd_len = cmd->old_cmd_len;
393 cmd->sc_data_direction = cmd->sc_old_data_direction;
394 cmd->underflow = cmd->old_underflow;
397 void scsi_device_unbusy(struct scsi_device *sdev)
399 struct Scsi_Host *shost = sdev->host;
402 spin_lock_irqsave(shost->host_lock, flags);
404 if (unlikely((shost->shost_state == SHOST_RECOVERY) &&
406 scsi_eh_wakeup(shost);
407 spin_unlock(shost->host_lock);
408 spin_lock(sdev->request_queue->queue_lock);
410 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
414 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
415 * and call blk_run_queue for all the scsi_devices on the target -
416 * including current_sdev first.
418 * Called with *no* scsi locks held.
420 static void scsi_single_lun_run(struct scsi_device *current_sdev)
422 struct Scsi_Host *shost = current_sdev->host;
423 struct scsi_device *sdev, *tmp;
424 struct scsi_target *starget = scsi_target(current_sdev);
427 spin_lock_irqsave(shost->host_lock, flags);
428 starget->starget_sdev_user = NULL;
429 spin_unlock_irqrestore(shost->host_lock, flags);
432 * Call blk_run_queue for all LUNs on the target, starting with
433 * current_sdev. We race with others (to set starget_sdev_user),
434 * but in most cases, we will be first. Ideally, each LU on the
435 * target would get some limited time or requests on the target.
437 blk_run_queue(current_sdev->request_queue);
439 spin_lock_irqsave(shost->host_lock, flags);
440 if (starget->starget_sdev_user)
442 list_for_each_entry_safe(sdev, tmp, &starget->devices,
443 same_target_siblings) {
444 if (sdev == current_sdev)
446 if (scsi_device_get(sdev))
449 spin_unlock_irqrestore(shost->host_lock, flags);
450 blk_run_queue(sdev->request_queue);
451 spin_lock_irqsave(shost->host_lock, flags);
453 scsi_device_put(sdev);
456 spin_unlock_irqrestore(shost->host_lock, flags);
460 * Function: scsi_run_queue()
462 * Purpose: Select a proper request queue to serve next
464 * Arguments: q - last request's queue
468 * Notes: The previous command was completely finished, start
469 * a new one if possible.
471 static void scsi_run_queue(struct request_queue *q)
473 struct scsi_device *sdev = q->queuedata;
474 struct Scsi_Host *shost = sdev->host;
477 if (sdev->single_lun)
478 scsi_single_lun_run(sdev);
480 spin_lock_irqsave(shost->host_lock, flags);
481 while (!list_empty(&shost->starved_list) &&
482 !shost->host_blocked && !shost->host_self_blocked &&
483 !((shost->can_queue > 0) &&
484 (shost->host_busy >= shost->can_queue))) {
486 * As long as shost is accepting commands and we have
487 * starved queues, call blk_run_queue. scsi_request_fn
488 * drops the queue_lock and can add us back to the
491 * host_lock protects the starved_list and starved_entry.
492 * scsi_request_fn must get the host_lock before checking
493 * or modifying starved_list or starved_entry.
495 sdev = list_entry(shost->starved_list.next,
496 struct scsi_device, starved_entry);
497 list_del_init(&sdev->starved_entry);
498 spin_unlock_irqrestore(shost->host_lock, flags);
500 blk_run_queue(sdev->request_queue);
502 spin_lock_irqsave(shost->host_lock, flags);
503 if (unlikely(!list_empty(&sdev->starved_entry)))
505 * sdev lost a race, and was put back on the
506 * starved list. This is unlikely but without this
507 * in theory we could loop forever.
511 spin_unlock_irqrestore(shost->host_lock, flags);
517 * Function: scsi_requeue_command()
519 * Purpose: Handle post-processing of completed commands.
521 * Arguments: q - queue to operate on
522 * cmd - command that may need to be requeued.
526 * Notes: After command completion, there may be blocks left
527 * over which weren't finished by the previous command
528 * this can be for a number of reasons - the main one is
529 * I/O errors in the middle of the request, in which case
530 * we need to request the blocks that come after the bad
533 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
537 cmd->request->flags &= ~REQ_DONTPREP;
539 spin_lock_irqsave(q->queue_lock, flags);
540 blk_requeue_request(q, cmd->request);
541 spin_unlock_irqrestore(q->queue_lock, flags);
546 void scsi_next_command(struct scsi_cmnd *cmd)
548 struct request_queue *q = cmd->device->request_queue;
550 scsi_put_command(cmd);
554 void scsi_run_host_queues(struct Scsi_Host *shost)
556 struct scsi_device *sdev;
558 shost_for_each_device(sdev, shost)
559 scsi_run_queue(sdev->request_queue);
563 * Function: scsi_end_request()
565 * Purpose: Post-processing of completed commands (usually invoked at end
566 * of upper level post-processing and scsi_io_completion).
568 * Arguments: cmd - command that is complete.
569 * uptodate - 1 if I/O indicates success, <= 0 for I/O error.
570 * bytes - number of bytes of completed I/O
571 * requeue - indicates whether we should requeue leftovers.
573 * Lock status: Assumed that lock is not held upon entry.
575 * Returns: cmd if requeue done or required, NULL otherwise
577 * Notes: This is called for block device requests in order to
578 * mark some number of sectors as complete.
580 * We are guaranteeing that the request queue will be goosed
581 * at some point during this call.
583 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
584 int bytes, int requeue)
586 request_queue_t *q = cmd->device->request_queue;
587 struct request *req = cmd->request;
591 * If there are blocks left over at the end, set up the command
592 * to queue the remainder of them.
594 if (end_that_request_chunk(req, uptodate, bytes)) {
595 int leftover = (req->hard_nr_sectors << 9);
597 if (blk_pc_request(req))
598 leftover = req->data_len;
600 /* kill remainder if no retrys */
601 if (!uptodate && blk_noretry_request(req))
602 end_that_request_chunk(req, 0, leftover);
606 * Bleah. Leftovers again. Stick the
607 * leftovers in the front of the
608 * queue, and goose the queue again.
610 scsi_requeue_command(q, cmd);
616 add_disk_randomness(req->rq_disk);
618 spin_lock_irqsave(q->queue_lock, flags);
619 if (blk_rq_tagged(req))
620 blk_queue_end_tag(q, req);
621 end_that_request_last(req);
622 spin_unlock_irqrestore(q->queue_lock, flags);
625 * This will goose the queue request function at the end, so we don't
626 * need to worry about launching another command.
628 scsi_next_command(cmd);
632 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
634 struct scsi_host_sg_pool *sgp;
635 struct scatterlist *sgl;
637 BUG_ON(!cmd->use_sg);
639 switch (cmd->use_sg) {
649 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
653 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
657 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
668 sgp = scsi_sg_pools + cmd->sglist_len;
669 sgl = mempool_alloc(sgp->pool, gfp_mask);
673 static void scsi_free_sgtable(struct scatterlist *sgl, int index)
675 struct scsi_host_sg_pool *sgp;
677 BUG_ON(index >= SG_MEMPOOL_NR);
679 sgp = scsi_sg_pools + index;
680 mempool_free(sgl, sgp->pool);
684 * Function: scsi_release_buffers()
686 * Purpose: Completion processing for block device I/O requests.
688 * Arguments: cmd - command that we are bailing.
690 * Lock status: Assumed that no lock is held upon entry.
694 * Notes: In the event that an upper level driver rejects a
695 * command, we must release resources allocated during
696 * the __init_io() function. Primarily this would involve
697 * the scatter-gather table, and potentially any bounce
700 static void scsi_release_buffers(struct scsi_cmnd *cmd)
702 struct request *req = cmd->request;
705 * Free up any indirection buffers we allocated for DMA purposes.
708 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
709 else if (cmd->request_buffer != req->buffer)
710 kfree(cmd->request_buffer);
713 * Zero these out. They now point to freed memory, and it is
714 * dangerous to hang onto the pointers.
718 cmd->request_buffer = NULL;
719 cmd->request_bufflen = 0;
723 * Function: scsi_io_completion()
725 * Purpose: Completion processing for block device I/O requests.
727 * Arguments: cmd - command that is finished.
729 * Lock status: Assumed that no lock is held upon entry.
733 * Notes: This function is matched in terms of capabilities to
734 * the function that created the scatter-gather list.
735 * In other words, if there are no bounce buffers
736 * (the normal case for most drivers), we don't need
737 * the logic to deal with cleaning up afterwards.
739 * We must do one of several things here:
741 * a) Call scsi_end_request. This will finish off the
742 * specified number of sectors. If we are done, the
743 * command block will be released, and the queue
744 * function will be goosed. If we are not done, then
745 * scsi_end_request will directly goose the queue.
747 * b) We can just use scsi_requeue_command() here. This would
748 * be used if we just wanted to retry, for example.
750 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
751 unsigned int block_bytes)
753 int result = cmd->result;
754 int this_count = cmd->bufflen;
755 request_queue_t *q = cmd->device->request_queue;
756 struct request *req = cmd->request;
757 int clear_errors = 1;
758 struct scsi_sense_hdr sshdr;
760 int sense_deferred = 0;
762 if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
766 * Free up any indirection buffers we allocated for DMA purposes.
767 * For the case of a READ, we need to copy the data out of the
768 * bounce buffer and into the real buffer.
771 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
772 else if (cmd->buffer != req->buffer) {
773 if (rq_data_dir(req) == READ) {
775 char *to = bio_kmap_irq(req->bio, &flags);
776 memcpy(to, cmd->buffer, cmd->bufflen);
777 bio_kunmap_irq(to, &flags);
783 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
785 sense_deferred = scsi_sense_is_deferred(&sshdr);
787 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
788 req->errors = result;
791 if (sense_valid && req->sense) {
793 * SG_IO wants current and deferred errors
795 int len = 8 + cmd->sense_buffer[7];
797 if (len > SCSI_SENSE_BUFFERSIZE)
798 len = SCSI_SENSE_BUFFERSIZE;
799 memcpy(req->sense, cmd->sense_buffer, len);
800 req->sense_len = len;
803 req->data_len = cmd->resid;
807 * Zero these out. They now point to freed memory, and it is
808 * dangerous to hang onto the pointers.
812 cmd->request_buffer = NULL;
813 cmd->request_bufflen = 0;
816 * Next deal with any sectors which we were able to correctly
819 if (good_bytes >= 0) {
820 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
821 req->nr_sectors, good_bytes));
822 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
827 * If multiple sectors are requested in one buffer, then
828 * they will have been finished off by the first command.
829 * If not, then we have a multi-buffer command.
831 * If block_bytes != 0, it means we had a medium error
832 * of some sort, and that we want to mark some number of
833 * sectors as not uptodate. Thus we want to inhibit
834 * requeueing right here - we will requeue down below
835 * when we handle the bad sectors.
837 cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
840 * If the command completed without error, then either finish off the
841 * rest of the command, or start a new one.
843 if (result == 0 || cmd == NULL ) {
848 * Now, if we were good little boys and girls, Santa left us a request
849 * sense buffer. We can extract information from this, so we
850 * can choose a block to remap, etc.
852 if (sense_valid && !sense_deferred) {
853 switch (sshdr.sense_key) {
855 if (cmd->device->removable) {
856 /* detected disc change. set a bit
857 * and quietly refuse further access.
859 cmd->device->changed = 1;
860 cmd = scsi_end_request(cmd, 0,
865 * Must have been a power glitch, or a
866 * bus reset. Could not have been a
867 * media change, so we just retry the
868 * request and see what happens.
870 scsi_requeue_command(q, cmd);
874 case ILLEGAL_REQUEST:
876 * If we had an ILLEGAL REQUEST returned, then we may
877 * have performed an unsupported command. The only
878 * thing this should be would be a ten byte read where
879 * only a six byte read was supported. Also, on a
880 * system where READ CAPACITY failed, we may have read
881 * past the end of the disk.
883 if (cmd->device->use_10_for_rw &&
884 (cmd->cmnd[0] == READ_10 ||
885 cmd->cmnd[0] == WRITE_10)) {
886 cmd->device->use_10_for_rw = 0;
888 * This will cause a retry with a 6-byte
891 scsi_requeue_command(q, cmd);
894 cmd = scsi_end_request(cmd, 0, this_count, 1);
900 * If the device is in the process of becoming ready,
903 if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
904 scsi_requeue_command(q, cmd);
907 printk(KERN_INFO "Device %s not ready.\n",
908 req->rq_disk ? req->rq_disk->disk_name : "");
909 cmd = scsi_end_request(cmd, 0, this_count, 1);
911 case VOLUME_OVERFLOW:
912 printk(KERN_INFO "Volume overflow <%d %d %d %d> CDB: ",
913 cmd->device->host->host_no,
914 (int)cmd->device->channel,
915 (int)cmd->device->id, (int)cmd->device->lun);
916 __scsi_print_command(cmd->data_cmnd);
917 scsi_print_sense("", cmd);
918 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
923 } /* driver byte != 0 */
924 if (host_byte(result) == DID_RESET) {
926 * Third party bus reset or reset for error
927 * recovery reasons. Just retry the request
928 * and see what happens.
930 scsi_requeue_command(q, cmd);
934 if (!(req->flags & REQ_SPECIAL))
935 printk(KERN_INFO "SCSI error : <%d %d %d %d> return code "
936 "= 0x%x\n", cmd->device->host->host_no,
937 cmd->device->channel,
939 cmd->device->lun, result);
941 if (driver_byte(result) & DRIVER_SENSE)
942 scsi_print_sense("", cmd);
944 * Mark a single buffer as not uptodate. Queue the remainder.
945 * We sometimes get this cruft in the event that a medium error
946 * isn't properly reported.
948 block_bytes = req->hard_cur_sectors << 9;
950 block_bytes = req->data_len;
951 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
954 EXPORT_SYMBOL(scsi_io_completion);
957 * Function: scsi_init_io()
959 * Purpose: SCSI I/O initialize function.
961 * Arguments: cmd - Command descriptor we wish to initialize
963 * Returns: 0 on success
964 * BLKPREP_DEFER if the failure is retryable
965 * BLKPREP_KILL if the failure is fatal
967 static int scsi_init_io(struct scsi_cmnd *cmd)
969 struct request *req = cmd->request;
970 struct scatterlist *sgpnt;
974 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
976 if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
977 cmd->request_bufflen = req->data_len;
978 cmd->request_buffer = req->data;
979 req->buffer = req->data;
985 * we used to not use scatter-gather for single segment request,
986 * but now we do (it makes highmem I/O easier to support without
989 cmd->use_sg = req->nr_phys_segments;
992 * if sg table allocation fails, requeue request later.
994 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
995 if (unlikely(!sgpnt))
996 return BLKPREP_DEFER;
998 cmd->request_buffer = (char *) sgpnt;
999 cmd->request_bufflen = req->nr_sectors << 9;
1000 if (blk_pc_request(req))
1001 cmd->request_bufflen = req->data_len;
1005 * Next, walk the list, and fill in the addresses and sizes of
1008 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1011 * mapped well, send it off
1013 if (likely(count <= cmd->use_sg)) {
1014 cmd->use_sg = count;
1018 printk(KERN_ERR "Incorrect number of segments after building list\n");
1019 printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
1020 printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
1021 req->current_nr_sectors);
1023 /* release the command and kill it */
1024 scsi_release_buffers(cmd);
1025 scsi_put_command(cmd);
1026 return BLKPREP_KILL;
1029 static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
1031 struct scsi_device *sdev = q->queuedata;
1032 struct scsi_driver *drv;
1034 if (sdev->sdev_state == SDEV_RUNNING) {
1035 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1037 if (drv->prepare_flush)
1038 return drv->prepare_flush(q, rq);
1044 static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
1046 struct scsi_device *sdev = q->queuedata;
1047 struct request *flush_rq = rq->end_io_data;
1048 struct scsi_driver *drv;
1050 if (flush_rq->errors) {
1051 printk("scsi: barrier error, disabling flush support\n");
1052 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1055 if (sdev->sdev_state == SDEV_RUNNING) {
1056 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1057 drv->end_flush(q, rq);
1061 static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1062 sector_t *error_sector)
1064 struct scsi_device *sdev = q->queuedata;
1065 struct scsi_driver *drv;
1067 if (sdev->sdev_state != SDEV_RUNNING)
1070 drv = *(struct scsi_driver **) disk->private_data;
1071 if (drv->issue_flush)
1072 return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1077 static void scsi_generic_done(struct scsi_cmnd *cmd)
1079 BUG_ON(!blk_pc_request(cmd->request));
1080 scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
1083 static int scsi_prep_fn(struct request_queue *q, struct request *req)
1085 struct scsi_device *sdev = q->queuedata;
1086 struct scsi_cmnd *cmd;
1087 int specials_only = 0;
1090 * Just check to see if the device is online. If it isn't, we
1091 * refuse to process any commands. The device must be brought
1092 * online before trying any recovery commands
1094 if (unlikely(!scsi_device_online(sdev))) {
1095 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1096 sdev->host->host_no, sdev->id, sdev->lun);
1097 return BLKPREP_KILL;
1099 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1100 /* OK, we're not in a running state don't prep
1102 if (sdev->sdev_state == SDEV_DEL) {
1103 /* Device is fully deleted, no commands
1104 * at all allowed down */
1105 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
1106 sdev->host->host_no, sdev->id, sdev->lun);
1107 return BLKPREP_KILL;
1109 /* OK, we only allow special commands (i.e. not
1110 * user initiated ones */
1111 specials_only = sdev->sdev_state;
1115 * Find the actual device driver associated with this command.
1116 * The SPECIAL requests are things like character device or
1117 * ioctls, which did not originate from ll_rw_blk. Note that
1118 * the special field is also used to indicate the cmd for
1119 * the remainder of a partially fulfilled request that can
1120 * come up when there is a medium error. We have to treat
1121 * these two cases differently. We differentiate by looking
1122 * at request->cmd, as this tells us the real story.
1124 if (req->flags & REQ_SPECIAL && req->special) {
1125 struct scsi_request *sreq = req->special;
1127 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1128 cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1131 scsi_init_cmd_from_req(cmd, sreq);
1134 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1136 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
1137 if(specials_only == SDEV_QUIESCE ||
1138 specials_only == SDEV_BLOCK)
1139 return BLKPREP_DEFER;
1141 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1142 sdev->host->host_no, sdev->id, sdev->lun);
1143 return BLKPREP_KILL;
1148 * Now try and find a command block that we can use.
1150 if (!req->special) {
1151 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1157 /* pull a tag out of the request if we have one */
1158 cmd->tag = req->tag;
1160 blk_dump_rq_flags(req, "SCSI bad req");
1161 return BLKPREP_KILL;
1164 /* note the overloading of req->special. When the tag
1165 * is active it always means cmd. If the tag goes
1166 * back for re-queueing, it may be reset */
1171 * FIXME: drop the lock here because the functions below
1172 * expect to be called without the queue lock held. Also,
1173 * previously, we dequeued the request before dropping the
1174 * lock. We hope REQ_STARTED prevents anything untoward from
1177 if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1178 struct scsi_driver *drv;
1182 * This will do a couple of things:
1183 * 1) Fill in the actual SCSI command.
1184 * 2) Fill in any other upper-level specific fields
1187 * If this returns 0, it means that the request failed
1188 * (reading past end of disk, reading offline device,
1189 * etc). This won't actually talk to the device, but
1190 * some kinds of consistency checking may cause the
1191 * request to be rejected immediately.
1195 * This sets up the scatter-gather table (allocating if
1198 ret = scsi_init_io(cmd);
1199 if (ret) /* BLKPREP_KILL return also releases the command */
1203 * Initialize the actual SCSI command for this request.
1206 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1207 if (unlikely(!drv->init_command(cmd))) {
1208 scsi_release_buffers(cmd);
1209 scsi_put_command(cmd);
1210 return BLKPREP_KILL;
1213 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1214 if (rq_data_dir(req) == WRITE)
1215 cmd->sc_data_direction = DMA_TO_DEVICE;
1216 else if (req->data_len)
1217 cmd->sc_data_direction = DMA_FROM_DEVICE;
1219 cmd->sc_data_direction = DMA_NONE;
1221 cmd->transfersize = req->data_len;
1223 cmd->timeout_per_command = req->timeout;
1224 cmd->done = scsi_generic_done;
1229 * The request is now prepped, no need to come back here
1231 req->flags |= REQ_DONTPREP;
1235 /* If we defer, the elv_next_request() returns NULL, but the
1236 * queue must be restarted, so we plug here if no returning
1237 * command will automatically do that. */
1238 if (sdev->device_busy == 0)
1240 return BLKPREP_DEFER;
1244 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1247 * Called with the queue_lock held.
1249 static inline int scsi_dev_queue_ready(struct request_queue *q,
1250 struct scsi_device *sdev)
1252 if (sdev->device_busy >= sdev->queue_depth)
1254 if (sdev->device_busy == 0 && sdev->device_blocked) {
1256 * unblock after device_blocked iterates to zero
1258 if (--sdev->device_blocked == 0) {
1260 printk("scsi%d (%d:%d) unblocking device at"
1261 " zero depth\n", sdev->host->host_no,
1262 sdev->id, sdev->lun));
1268 if (sdev->device_blocked)
1275 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1276 * return 0. We must end up running the queue again whenever 0 is
1277 * returned, else IO can hang.
1279 * Called with host_lock held.
1281 static inline int scsi_host_queue_ready(struct request_queue *q,
1282 struct Scsi_Host *shost,
1283 struct scsi_device *sdev)
1285 if (shost->shost_state == SHOST_RECOVERY)
1287 if (shost->host_busy == 0 && shost->host_blocked) {
1289 * unblock after host_blocked iterates to zero
1291 if (--shost->host_blocked == 0) {
1293 printk("scsi%d unblocking host at zero depth\n",
1300 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1301 shost->host_blocked || shost->host_self_blocked) {
1302 if (list_empty(&sdev->starved_entry))
1303 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1307 /* We're OK to process the command, so we can't be starved */
1308 if (!list_empty(&sdev->starved_entry))
1309 list_del_init(&sdev->starved_entry);
1315 * Kill requests for a dead device
1317 static void scsi_kill_requests(request_queue_t *q)
1319 struct request *req;
1321 while ((req = elv_next_request(q)) != NULL) {
1322 blkdev_dequeue_request(req);
1323 req->flags |= REQ_QUIET;
1324 while (end_that_request_first(req, 0, req->nr_sectors))
1326 end_that_request_last(req);
1331 * Function: scsi_request_fn()
1333 * Purpose: Main strategy routine for SCSI.
1335 * Arguments: q - Pointer to actual queue.
1339 * Lock status: IO request lock assumed to be held when called.
1341 static void scsi_request_fn(struct request_queue *q)
1343 struct scsi_device *sdev = q->queuedata;
1344 struct Scsi_Host *shost;
1345 struct scsi_cmnd *cmd;
1346 struct request *req;
1349 printk("scsi: killing requests for dead queue\n");
1350 scsi_kill_requests(q);
1354 if(!get_device(&sdev->sdev_gendev))
1355 /* We must be tearing the block queue down already */
1359 * To start with, we keep looping until the queue is empty, or until
1360 * the host is no longer able to accept any more requests.
1363 while (!blk_queue_plugged(q)) {
1366 * get next queueable request. We do this early to make sure
1367 * that the request is fully prepared even if we cannot
1370 req = elv_next_request(q);
1371 if (!req || !scsi_dev_queue_ready(q, sdev))
1374 if (unlikely(!scsi_device_online(sdev))) {
1375 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1376 sdev->host->host_no, sdev->id, sdev->lun);
1377 blkdev_dequeue_request(req);
1378 req->flags |= REQ_QUIET;
1379 while (end_that_request_first(req, 0, req->nr_sectors))
1381 end_that_request_last(req);
1387 * Remove the request from the request list.
1389 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1390 blkdev_dequeue_request(req);
1391 sdev->device_busy++;
1393 spin_unlock(q->queue_lock);
1394 spin_lock(shost->host_lock);
1396 if (!scsi_host_queue_ready(q, shost, sdev))
1398 if (sdev->single_lun) {
1399 if (scsi_target(sdev)->starget_sdev_user &&
1400 scsi_target(sdev)->starget_sdev_user != sdev)
1402 scsi_target(sdev)->starget_sdev_user = sdev;
1407 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1408 * take the lock again.
1410 spin_unlock_irq(shost->host_lock);
1413 if (unlikely(cmd == NULL)) {
1414 printk(KERN_CRIT "impossible request in %s.\n"
1415 "please mail a stack trace to "
1416 "linux-scsi@vger.kernel.org",
1422 * Finally, initialize any error handling parameters, and set up
1423 * the timers for timeouts.
1425 scsi_init_cmd_errh(cmd);
1428 * Dispatch the command to the low-level driver.
1430 rtn = scsi_dispatch_cmd(cmd);
1431 spin_lock_irq(q->queue_lock);
1433 /* we're refusing the command; because of
1434 * the way locks get dropped, we need to
1435 * check here if plugging is required */
1436 if(sdev->device_busy == 0)
1446 spin_unlock_irq(shost->host_lock);
1449 * lock q, handle tag, requeue req, and decrement device_busy. We
1450 * must return with queue_lock held.
1452 * Decrementing device_busy without checking it is OK, as all such
1453 * cases (host limits or settings) should run the queue at some
1456 spin_lock_irq(q->queue_lock);
1457 blk_requeue_request(q, req);
1458 sdev->device_busy--;
1459 if(sdev->device_busy == 0)
1462 /* must be careful here...if we trigger the ->remove() function
1463 * we cannot be holding the q lock */
1464 spin_unlock_irq(q->queue_lock);
1465 put_device(&sdev->sdev_gendev);
1466 spin_lock_irq(q->queue_lock);
1469 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1471 struct device *host_dev;
1472 u64 bounce_limit = 0xffffffff;
1474 if (shost->unchecked_isa_dma)
1475 return BLK_BOUNCE_ISA;
1477 * Platforms with virtual-DMA translation
1478 * hardware have no practical limit.
1480 if (!PCI_DMA_BUS_IS_PHYS)
1481 return BLK_BOUNCE_ANY;
1483 host_dev = scsi_get_device(shost);
1484 if (host_dev && host_dev->dma_mask)
1485 bounce_limit = *host_dev->dma_mask;
1487 return bounce_limit;
1489 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1491 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1493 struct Scsi_Host *shost = sdev->host;
1494 struct request_queue *q;
1496 q = blk_init_queue(scsi_request_fn, NULL);
1500 blk_queue_prep_rq(q, scsi_prep_fn);
1502 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1503 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1504 blk_queue_max_sectors(q, shost->max_sectors);
1505 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1506 blk_queue_segment_boundary(q, shost->dma_boundary);
1507 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1510 * ordered tags are superior to flush ordering
1512 if (shost->ordered_tag)
1513 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1514 else if (shost->ordered_flush) {
1515 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1516 q->prepare_flush_fn = scsi_prepare_flush_fn;
1517 q->end_flush_fn = scsi_end_flush_fn;
1520 if (!shost->use_clustering)
1521 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1525 void scsi_free_queue(struct request_queue *q)
1527 blk_cleanup_queue(q);
1531 * Function: scsi_block_requests()
1533 * Purpose: Utility function used by low-level drivers to prevent further
1534 * commands from being queued to the device.
1536 * Arguments: shost - Host in question
1540 * Lock status: No locks are assumed held.
1542 * Notes: There is no timer nor any other means by which the requests
1543 * get unblocked other than the low-level driver calling
1544 * scsi_unblock_requests().
1546 void scsi_block_requests(struct Scsi_Host *shost)
1548 shost->host_self_blocked = 1;
1550 EXPORT_SYMBOL(scsi_block_requests);
1553 * Function: scsi_unblock_requests()
1555 * Purpose: Utility function used by low-level drivers to allow further
1556 * commands from being queued to the device.
1558 * Arguments: shost - Host in question
1562 * Lock status: No locks are assumed held.
1564 * Notes: There is no timer nor any other means by which the requests
1565 * get unblocked other than the low-level driver calling
1566 * scsi_unblock_requests().
1568 * This is done as an API function so that changes to the
1569 * internals of the scsi mid-layer won't require wholesale
1570 * changes to drivers that use this feature.
1572 void scsi_unblock_requests(struct Scsi_Host *shost)
1574 shost->host_self_blocked = 0;
1575 scsi_run_host_queues(shost);
1577 EXPORT_SYMBOL(scsi_unblock_requests);
1579 int __init scsi_init_queue(void)
1583 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1584 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1585 int size = sgp->size * sizeof(struct scatterlist);
1587 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1588 SLAB_HWCACHE_ALIGN, NULL, NULL);
1590 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1594 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1595 mempool_alloc_slab, mempool_free_slab,
1598 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1606 void scsi_exit_queue(void)
1610 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1611 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1612 mempool_destroy(sgp->pool);
1613 kmem_cache_destroy(sgp->slab);
1617 * __scsi_mode_sense - issue a mode sense, falling back from 10 to
1618 * six bytes if necessary.
1619 * @sdev: SCSI device to be queried
1620 * @dbd: set if mode sense will allow block descriptors to be returned
1621 * @modepage: mode page being requested
1622 * @buffer: request buffer (may not be smaller than eight bytes)
1623 * @len: length of request buffer.
1624 * @timeout: command timeout
1625 * @retries: number of retries before failing
1626 * @data: returns a structure abstracting the mode header data
1627 * @sense: place to put sense data (or NULL if no sense to be collected).
1628 * must be SCSI_SENSE_BUFFERSIZE big.
1630 * Returns zero if unsuccessful, or the header offset (either 4
1631 * or 8 depending on whether a six or ten byte command was
1632 * issued) if successful.
1635 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1636 unsigned char *buffer, int len, int timeout, int retries,
1637 struct scsi_mode_data *data, char *sense) {
1638 unsigned char cmd[12];
1642 char *sense_buffer = NULL;
1644 memset(data, 0, sizeof(*data));
1645 memset(&cmd[0], 0, 12);
1646 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1650 sense_buffer = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
1651 if (!sense_buffer) {
1652 dev_printk(KERN_ERR, &sdev->sdev_gendev, "failed to allocate sense buffer\n");
1655 sense = sense_buffer;
1658 use_10_for_ms = sdev->use_10_for_ms;
1660 if (use_10_for_ms) {
1664 cmd[0] = MODE_SENSE_10;
1671 cmd[0] = MODE_SENSE;
1676 memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
1678 memset(buffer, 0, len);
1680 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1681 sense, timeout, retries);
1683 /* This code looks awful: what it's doing is making sure an
1684 * ILLEGAL REQUEST sense return identifies the actual command
1685 * byte as the problem. MODE_SENSE commands can return
1686 * ILLEGAL REQUEST if the code page isn't supported */
1688 if (use_10_for_ms && !scsi_status_is_good(result) &&
1689 (driver_byte(result) & DRIVER_SENSE)) {
1690 struct scsi_sense_hdr sshdr;
1692 if (scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
1693 if ((sshdr.sense_key == ILLEGAL_REQUEST) &&
1694 (sshdr.asc == 0x20) && (sshdr.ascq == 0)) {
1696 * Invalid command operation code
1698 sdev->use_10_for_ms = 0;
1704 if(scsi_status_is_good(result)) {
1705 data->header_length = header_length;
1707 data->length = buffer[0]*256 + buffer[1] + 2;
1708 data->medium_type = buffer[2];
1709 data->device_specific = buffer[3];
1710 data->longlba = buffer[4] & 0x01;
1711 data->block_descriptor_length = buffer[6]*256
1714 data->length = buffer[0] + 1;
1715 data->medium_type = buffer[1];
1716 data->device_specific = buffer[2];
1717 data->block_descriptor_length = buffer[3];
1721 kfree(sense_buffer);
1724 EXPORT_SYMBOL(scsi_mode_sense);
1727 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1730 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1732 char sense[SCSI_SENSE_BUFFERSIZE];
1735 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sense,
1738 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
1739 struct scsi_sense_hdr sshdr;
1741 if ((scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE,
1743 ((sshdr.sense_key == UNIT_ATTENTION) ||
1744 (sshdr.sense_key == NOT_READY))) {
1751 EXPORT_SYMBOL(scsi_test_unit_ready);
1754 * scsi_device_set_state - Take the given device through the device
1756 * @sdev: scsi device to change the state of.
1757 * @state: state to change to.
1759 * Returns zero if unsuccessful or an error if the requested
1760 * transition is illegal.
1763 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1765 enum scsi_device_state oldstate = sdev->sdev_state;
1767 if (state == oldstate)
1772 /* There are no legal states that come back to
1773 * created. This is the manually initialised start
1843 sdev->sdev_state = state;
1847 SCSI_LOG_ERROR_RECOVERY(1,
1848 dev_printk(KERN_ERR, &sdev->sdev_gendev,
1849 "Illegal state transition %s->%s\n",
1850 scsi_device_state_name(oldstate),
1851 scsi_device_state_name(state))
1855 EXPORT_SYMBOL(scsi_device_set_state);
1858 * scsi_device_quiesce - Block user issued commands.
1859 * @sdev: scsi device to quiesce.
1861 * This works by trying to transition to the SDEV_QUIESCE state
1862 * (which must be a legal transition). When the device is in this
1863 * state, only special requests will be accepted, all others will
1864 * be deferred. Since special requests may also be requeued requests,
1865 * a successful return doesn't guarantee the device will be
1866 * totally quiescent.
1868 * Must be called with user context, may sleep.
1870 * Returns zero if unsuccessful or an error if not.
1873 scsi_device_quiesce(struct scsi_device *sdev)
1875 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1879 scsi_run_queue(sdev->request_queue);
1880 while (sdev->device_busy) {
1881 msleep_interruptible(200);
1882 scsi_run_queue(sdev->request_queue);
1886 EXPORT_SYMBOL(scsi_device_quiesce);
1889 * scsi_device_resume - Restart user issued commands to a quiesced device.
1890 * @sdev: scsi device to resume.
1892 * Moves the device from quiesced back to running and restarts the
1895 * Must be called with user context, may sleep.
1898 scsi_device_resume(struct scsi_device *sdev)
1900 if(scsi_device_set_state(sdev, SDEV_RUNNING))
1902 scsi_run_queue(sdev->request_queue);
1904 EXPORT_SYMBOL(scsi_device_resume);
1907 device_quiesce_fn(struct scsi_device *sdev, void *data)
1909 scsi_device_quiesce(sdev);
1913 scsi_target_quiesce(struct scsi_target *starget)
1915 starget_for_each_device(starget, NULL, device_quiesce_fn);
1917 EXPORT_SYMBOL(scsi_target_quiesce);
1920 device_resume_fn(struct scsi_device *sdev, void *data)
1922 scsi_device_resume(sdev);
1926 scsi_target_resume(struct scsi_target *starget)
1928 starget_for_each_device(starget, NULL, device_resume_fn);
1930 EXPORT_SYMBOL(scsi_target_resume);
1933 * scsi_internal_device_block - internal function to put a device
1934 * temporarily into the SDEV_BLOCK state
1935 * @sdev: device to block
1937 * Block request made by scsi lld's to temporarily stop all
1938 * scsi commands on the specified device. Called from interrupt
1939 * or normal process context.
1941 * Returns zero if successful or error if not
1944 * This routine transitions the device to the SDEV_BLOCK state
1945 * (which must be a legal transition). When the device is in this
1946 * state, all commands are deferred until the scsi lld reenables
1947 * the device with scsi_device_unblock or device_block_tmo fires.
1948 * This routine assumes the host_lock is held on entry.
1951 scsi_internal_device_block(struct scsi_device *sdev)
1953 request_queue_t *q = sdev->request_queue;
1954 unsigned long flags;
1957 err = scsi_device_set_state(sdev, SDEV_BLOCK);
1962 * The device has transitioned to SDEV_BLOCK. Stop the
1963 * block layer from calling the midlayer with this device's
1966 spin_lock_irqsave(q->queue_lock, flags);
1968 spin_unlock_irqrestore(q->queue_lock, flags);
1972 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
1975 * scsi_internal_device_unblock - resume a device after a block request
1976 * @sdev: device to resume
1978 * Called by scsi lld's or the midlayer to restart the device queue
1979 * for the previously suspended scsi device. Called from interrupt or
1980 * normal process context.
1982 * Returns zero if successful or error if not.
1985 * This routine transitions the device to the SDEV_RUNNING state
1986 * (which must be a legal transition) allowing the midlayer to
1987 * goose the queue for this device. This routine assumes the
1988 * host_lock is held upon entry.
1991 scsi_internal_device_unblock(struct scsi_device *sdev)
1993 request_queue_t *q = sdev->request_queue;
1995 unsigned long flags;
1998 * Try to transition the scsi device to SDEV_RUNNING
1999 * and goose the device queue if successful.
2001 err = scsi_device_set_state(sdev, SDEV_RUNNING);
2005 spin_lock_irqsave(q->queue_lock, flags);
2007 spin_unlock_irqrestore(q->queue_lock, flags);
2011 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2014 device_block(struct scsi_device *sdev, void *data)
2016 scsi_internal_device_block(sdev);
2020 target_block(struct device *dev, void *data)
2022 if (scsi_is_target_device(dev))
2023 starget_for_each_device(to_scsi_target(dev), NULL,
2029 scsi_target_block(struct device *dev)
2031 if (scsi_is_target_device(dev))
2032 starget_for_each_device(to_scsi_target(dev), NULL,
2035 device_for_each_child(dev, NULL, target_block);
2037 EXPORT_SYMBOL_GPL(scsi_target_block);
2040 device_unblock(struct scsi_device *sdev, void *data)
2042 scsi_internal_device_unblock(sdev);
2046 target_unblock(struct device *dev, void *data)
2048 if (scsi_is_target_device(dev))
2049 starget_for_each_device(to_scsi_target(dev), NULL,
2055 scsi_target_unblock(struct device *dev)
2057 if (scsi_is_target_device(dev))
2058 starget_for_each_device(to_scsi_target(dev), NULL,
2061 device_for_each_child(dev, NULL, target_unblock);
2063 EXPORT_SYMBOL_GPL(scsi_target_unblock);