Merge commit 'v2.6.36' into kbuild/misc
[pandora-kernel.git] / drivers / scsi / scsi_lib.c
1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9
10 #include <linux/bio.h>
11 #include <linux/bitops.h>
12 #include <linux/blkdev.h>
13 #include <linux/completion.h>
14 #include <linux/kernel.h>
15 #include <linux/mempool.h>
16 #include <linux/slab.h>
17 #include <linux/init.h>
18 #include <linux/pci.h>
19 #include <linux/delay.h>
20 #include <linux/hardirq.h>
21 #include <linux/scatterlist.h>
22
23 #include <scsi/scsi.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_dbg.h>
26 #include <scsi/scsi_device.h>
27 #include <scsi/scsi_driver.h>
28 #include <scsi/scsi_eh.h>
29 #include <scsi/scsi_host.h>
30
31 #include "scsi_priv.h"
32 #include "scsi_logging.h"
33
34
35 #define SG_MEMPOOL_NR           ARRAY_SIZE(scsi_sg_pools)
36 #define SG_MEMPOOL_SIZE         2
37
38 struct scsi_host_sg_pool {
39         size_t          size;
40         char            *name;
41         struct kmem_cache       *slab;
42         mempool_t       *pool;
43 };
44
45 #define SP(x) { x, "sgpool-" __stringify(x) }
46 #if (SCSI_MAX_SG_SEGMENTS < 32)
47 #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
48 #endif
49 static struct scsi_host_sg_pool scsi_sg_pools[] = {
50         SP(8),
51         SP(16),
52 #if (SCSI_MAX_SG_SEGMENTS > 32)
53         SP(32),
54 #if (SCSI_MAX_SG_SEGMENTS > 64)
55         SP(64),
56 #if (SCSI_MAX_SG_SEGMENTS > 128)
57         SP(128),
58 #if (SCSI_MAX_SG_SEGMENTS > 256)
59 #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
60 #endif
61 #endif
62 #endif
63 #endif
64         SP(SCSI_MAX_SG_SEGMENTS)
65 };
66 #undef SP
67
68 struct kmem_cache *scsi_sdb_cache;
69
70 static void scsi_run_queue(struct request_queue *q);
71
72 /*
73  * Function:    scsi_unprep_request()
74  *
75  * Purpose:     Remove all preparation done for a request, including its
76  *              associated scsi_cmnd, so that it can be requeued.
77  *
78  * Arguments:   req     - request to unprepare
79  *
80  * Lock status: Assumed that no locks are held upon entry.
81  *
82  * Returns:     Nothing.
83  */
84 static void scsi_unprep_request(struct request *req)
85 {
86         struct scsi_cmnd *cmd = req->special;
87
88         blk_unprep_request(req);
89         req->special = NULL;
90
91         scsi_put_command(cmd);
92 }
93
94 /**
95  * __scsi_queue_insert - private queue insertion
96  * @cmd: The SCSI command being requeued
97  * @reason:  The reason for the requeue
98  * @unbusy: Whether the queue should be unbusied
99  *
100  * This is a private queue insertion.  The public interface
101  * scsi_queue_insert() always assumes the queue should be unbusied
102  * because it's always called before the completion.  This function is
103  * for a requeue after completion, which should only occur in this
104  * file.
105  */
106 static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
107 {
108         struct Scsi_Host *host = cmd->device->host;
109         struct scsi_device *device = cmd->device;
110         struct scsi_target *starget = scsi_target(device);
111         struct request_queue *q = device->request_queue;
112         unsigned long flags;
113
114         SCSI_LOG_MLQUEUE(1,
115                  printk("Inserting command %p into mlqueue\n", cmd));
116
117         /*
118          * Set the appropriate busy bit for the device/host.
119          *
120          * If the host/device isn't busy, assume that something actually
121          * completed, and that we should be able to queue a command now.
122          *
123          * Note that the prior mid-layer assumption that any host could
124          * always queue at least one command is now broken.  The mid-layer
125          * will implement a user specifiable stall (see
126          * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
127          * if a command is requeued with no other commands outstanding
128          * either for the device or for the host.
129          */
130         switch (reason) {
131         case SCSI_MLQUEUE_HOST_BUSY:
132                 host->host_blocked = host->max_host_blocked;
133                 break;
134         case SCSI_MLQUEUE_DEVICE_BUSY:
135                 device->device_blocked = device->max_device_blocked;
136                 break;
137         case SCSI_MLQUEUE_TARGET_BUSY:
138                 starget->target_blocked = starget->max_target_blocked;
139                 break;
140         }
141
142         /*
143          * Decrement the counters, since these commands are no longer
144          * active on the host/device.
145          */
146         if (unbusy)
147                 scsi_device_unbusy(device);
148
149         /*
150          * Requeue this command.  It will go before all other commands
151          * that are already in the queue.
152          *
153          * NOTE: there is magic here about the way the queue is plugged if
154          * we have no outstanding commands.
155          * 
156          * Although we *don't* plug the queue, we call the request
157          * function.  The SCSI request function detects the blocked condition
158          * and plugs the queue appropriately.
159          */
160         spin_lock_irqsave(q->queue_lock, flags);
161         blk_requeue_request(q, cmd->request);
162         spin_unlock_irqrestore(q->queue_lock, flags);
163
164         scsi_run_queue(q);
165
166         return 0;
167 }
168
169 /*
170  * Function:    scsi_queue_insert()
171  *
172  * Purpose:     Insert a command in the midlevel queue.
173  *
174  * Arguments:   cmd    - command that we are adding to queue.
175  *              reason - why we are inserting command to queue.
176  *
177  * Lock status: Assumed that lock is not held upon entry.
178  *
179  * Returns:     Nothing.
180  *
181  * Notes:       We do this for one of two cases.  Either the host is busy
182  *              and it cannot accept any more commands for the time being,
183  *              or the device returned QUEUE_FULL and can accept no more
184  *              commands.
185  * Notes:       This could be called either from an interrupt context or a
186  *              normal process context.
187  */
188 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
189 {
190         return __scsi_queue_insert(cmd, reason, 1);
191 }
192 /**
193  * scsi_execute - insert request and wait for the result
194  * @sdev:       scsi device
195  * @cmd:        scsi command
196  * @data_direction: data direction
197  * @buffer:     data buffer
198  * @bufflen:    len of buffer
199  * @sense:      optional sense buffer
200  * @timeout:    request timeout in seconds
201  * @retries:    number of times to retry request
202  * @flags:      or into request flags;
203  * @resid:      optional residual length
204  *
205  * returns the req->errors value which is the scsi_cmnd result
206  * field.
207  */
208 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
209                  int data_direction, void *buffer, unsigned bufflen,
210                  unsigned char *sense, int timeout, int retries, int flags,
211                  int *resid)
212 {
213         struct request *req;
214         int write = (data_direction == DMA_TO_DEVICE);
215         int ret = DRIVER_ERROR << 24;
216
217         req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
218
219         if (bufflen &&  blk_rq_map_kern(sdev->request_queue, req,
220                                         buffer, bufflen, __GFP_WAIT))
221                 goto out;
222
223         req->cmd_len = COMMAND_SIZE(cmd[0]);
224         memcpy(req->cmd, cmd, req->cmd_len);
225         req->sense = sense;
226         req->sense_len = 0;
227         req->retries = retries;
228         req->timeout = timeout;
229         req->cmd_type = REQ_TYPE_BLOCK_PC;
230         req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
231
232         /*
233          * head injection *required* here otherwise quiesce won't work
234          */
235         blk_execute_rq(req->q, NULL, req, 1);
236
237         /*
238          * Some devices (USB mass-storage in particular) may transfer
239          * garbage data together with a residue indicating that the data
240          * is invalid.  Prevent the garbage from being misinterpreted
241          * and prevent security leaks by zeroing out the excess data.
242          */
243         if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
244                 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
245
246         if (resid)
247                 *resid = req->resid_len;
248         ret = req->errors;
249  out:
250         blk_put_request(req);
251
252         return ret;
253 }
254 EXPORT_SYMBOL(scsi_execute);
255
256
257 int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
258                      int data_direction, void *buffer, unsigned bufflen,
259                      struct scsi_sense_hdr *sshdr, int timeout, int retries,
260                      int *resid)
261 {
262         char *sense = NULL;
263         int result;
264         
265         if (sshdr) {
266                 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
267                 if (!sense)
268                         return DRIVER_ERROR << 24;
269         }
270         result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
271                               sense, timeout, retries, 0, resid);
272         if (sshdr)
273                 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
274
275         kfree(sense);
276         return result;
277 }
278 EXPORT_SYMBOL(scsi_execute_req);
279
280 /*
281  * Function:    scsi_init_cmd_errh()
282  *
283  * Purpose:     Initialize cmd fields related to error handling.
284  *
285  * Arguments:   cmd     - command that is ready to be queued.
286  *
287  * Notes:       This function has the job of initializing a number of
288  *              fields related to error handling.   Typically this will
289  *              be called once for each command, as required.
290  */
291 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
292 {
293         cmd->serial_number = 0;
294         scsi_set_resid(cmd, 0);
295         memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
296         if (cmd->cmd_len == 0)
297                 cmd->cmd_len = scsi_command_size(cmd->cmnd);
298 }
299
300 void scsi_device_unbusy(struct scsi_device *sdev)
301 {
302         struct Scsi_Host *shost = sdev->host;
303         struct scsi_target *starget = scsi_target(sdev);
304         unsigned long flags;
305
306         spin_lock_irqsave(shost->host_lock, flags);
307         shost->host_busy--;
308         starget->target_busy--;
309         if (unlikely(scsi_host_in_recovery(shost) &&
310                      (shost->host_failed || shost->host_eh_scheduled)))
311                 scsi_eh_wakeup(shost);
312         spin_unlock(shost->host_lock);
313         spin_lock(sdev->request_queue->queue_lock);
314         sdev->device_busy--;
315         spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
316 }
317
318 /*
319  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
320  * and call blk_run_queue for all the scsi_devices on the target -
321  * including current_sdev first.
322  *
323  * Called with *no* scsi locks held.
324  */
325 static void scsi_single_lun_run(struct scsi_device *current_sdev)
326 {
327         struct Scsi_Host *shost = current_sdev->host;
328         struct scsi_device *sdev, *tmp;
329         struct scsi_target *starget = scsi_target(current_sdev);
330         unsigned long flags;
331
332         spin_lock_irqsave(shost->host_lock, flags);
333         starget->starget_sdev_user = NULL;
334         spin_unlock_irqrestore(shost->host_lock, flags);
335
336         /*
337          * Call blk_run_queue for all LUNs on the target, starting with
338          * current_sdev. We race with others (to set starget_sdev_user),
339          * but in most cases, we will be first. Ideally, each LU on the
340          * target would get some limited time or requests on the target.
341          */
342         blk_run_queue(current_sdev->request_queue);
343
344         spin_lock_irqsave(shost->host_lock, flags);
345         if (starget->starget_sdev_user)
346                 goto out;
347         list_for_each_entry_safe(sdev, tmp, &starget->devices,
348                         same_target_siblings) {
349                 if (sdev == current_sdev)
350                         continue;
351                 if (scsi_device_get(sdev))
352                         continue;
353
354                 spin_unlock_irqrestore(shost->host_lock, flags);
355                 blk_run_queue(sdev->request_queue);
356                 spin_lock_irqsave(shost->host_lock, flags);
357         
358                 scsi_device_put(sdev);
359         }
360  out:
361         spin_unlock_irqrestore(shost->host_lock, flags);
362 }
363
364 static inline int scsi_device_is_busy(struct scsi_device *sdev)
365 {
366         if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
367                 return 1;
368
369         return 0;
370 }
371
372 static inline int scsi_target_is_busy(struct scsi_target *starget)
373 {
374         return ((starget->can_queue > 0 &&
375                  starget->target_busy >= starget->can_queue) ||
376                  starget->target_blocked);
377 }
378
379 static inline int scsi_host_is_busy(struct Scsi_Host *shost)
380 {
381         if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
382             shost->host_blocked || shost->host_self_blocked)
383                 return 1;
384
385         return 0;
386 }
387
388 /*
389  * Function:    scsi_run_queue()
390  *
391  * Purpose:     Select a proper request queue to serve next
392  *
393  * Arguments:   q       - last request's queue
394  *
395  * Returns:     Nothing
396  *
397  * Notes:       The previous command was completely finished, start
398  *              a new one if possible.
399  */
400 static void scsi_run_queue(struct request_queue *q)
401 {
402         struct scsi_device *sdev = q->queuedata;
403         struct Scsi_Host *shost = sdev->host;
404         LIST_HEAD(starved_list);
405         unsigned long flags;
406
407         if (scsi_target(sdev)->single_lun)
408                 scsi_single_lun_run(sdev);
409
410         spin_lock_irqsave(shost->host_lock, flags);
411         list_splice_init(&shost->starved_list, &starved_list);
412
413         while (!list_empty(&starved_list)) {
414                 int flagset;
415
416                 /*
417                  * As long as shost is accepting commands and we have
418                  * starved queues, call blk_run_queue. scsi_request_fn
419                  * drops the queue_lock and can add us back to the
420                  * starved_list.
421                  *
422                  * host_lock protects the starved_list and starved_entry.
423                  * scsi_request_fn must get the host_lock before checking
424                  * or modifying starved_list or starved_entry.
425                  */
426                 if (scsi_host_is_busy(shost))
427                         break;
428
429                 sdev = list_entry(starved_list.next,
430                                   struct scsi_device, starved_entry);
431                 list_del_init(&sdev->starved_entry);
432                 if (scsi_target_is_busy(scsi_target(sdev))) {
433                         list_move_tail(&sdev->starved_entry,
434                                        &shost->starved_list);
435                         continue;
436                 }
437
438                 spin_unlock(shost->host_lock);
439
440                 spin_lock(sdev->request_queue->queue_lock);
441                 flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
442                                 !test_bit(QUEUE_FLAG_REENTER,
443                                         &sdev->request_queue->queue_flags);
444                 if (flagset)
445                         queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
446                 __blk_run_queue(sdev->request_queue);
447                 if (flagset)
448                         queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
449                 spin_unlock(sdev->request_queue->queue_lock);
450
451                 spin_lock(shost->host_lock);
452         }
453         /* put any unprocessed entries back */
454         list_splice(&starved_list, &shost->starved_list);
455         spin_unlock_irqrestore(shost->host_lock, flags);
456
457         blk_run_queue(q);
458 }
459
460 /*
461  * Function:    scsi_requeue_command()
462  *
463  * Purpose:     Handle post-processing of completed commands.
464  *
465  * Arguments:   q       - queue to operate on
466  *              cmd     - command that may need to be requeued.
467  *
468  * Returns:     Nothing
469  *
470  * Notes:       After command completion, there may be blocks left
471  *              over which weren't finished by the previous command
472  *              this can be for a number of reasons - the main one is
473  *              I/O errors in the middle of the request, in which case
474  *              we need to request the blocks that come after the bad
475  *              sector.
476  * Notes:       Upon return, cmd is a stale pointer.
477  */
478 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
479 {
480         struct request *req = cmd->request;
481         unsigned long flags;
482
483         spin_lock_irqsave(q->queue_lock, flags);
484         scsi_unprep_request(req);
485         blk_requeue_request(q, req);
486         spin_unlock_irqrestore(q->queue_lock, flags);
487
488         scsi_run_queue(q);
489 }
490
491 void scsi_next_command(struct scsi_cmnd *cmd)
492 {
493         struct scsi_device *sdev = cmd->device;
494         struct request_queue *q = sdev->request_queue;
495
496         /* need to hold a reference on the device before we let go of the cmd */
497         get_device(&sdev->sdev_gendev);
498
499         scsi_put_command(cmd);
500         scsi_run_queue(q);
501
502         /* ok to remove device now */
503         put_device(&sdev->sdev_gendev);
504 }
505
506 void scsi_run_host_queues(struct Scsi_Host *shost)
507 {
508         struct scsi_device *sdev;
509
510         shost_for_each_device(sdev, shost)
511                 scsi_run_queue(sdev->request_queue);
512 }
513
514 static void __scsi_release_buffers(struct scsi_cmnd *, int);
515
516 /*
517  * Function:    scsi_end_request()
518  *
519  * Purpose:     Post-processing of completed commands (usually invoked at end
520  *              of upper level post-processing and scsi_io_completion).
521  *
522  * Arguments:   cmd      - command that is complete.
523  *              error    - 0 if I/O indicates success, < 0 for I/O error.
524  *              bytes    - number of bytes of completed I/O
525  *              requeue  - indicates whether we should requeue leftovers.
526  *
527  * Lock status: Assumed that lock is not held upon entry.
528  *
529  * Returns:     cmd if requeue required, NULL otherwise.
530  *
531  * Notes:       This is called for block device requests in order to
532  *              mark some number of sectors as complete.
533  * 
534  *              We are guaranteeing that the request queue will be goosed
535  *              at some point during this call.
536  * Notes:       If cmd was requeued, upon return it will be a stale pointer.
537  */
538 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
539                                           int bytes, int requeue)
540 {
541         struct request_queue *q = cmd->device->request_queue;
542         struct request *req = cmd->request;
543
544         /*
545          * If there are blocks left over at the end, set up the command
546          * to queue the remainder of them.
547          */
548         if (blk_end_request(req, error, bytes)) {
549                 /* kill remainder if no retrys */
550                 if (error && scsi_noretry_cmd(cmd))
551                         blk_end_request_all(req, error);
552                 else {
553                         if (requeue) {
554                                 /*
555                                  * Bleah.  Leftovers again.  Stick the
556                                  * leftovers in the front of the
557                                  * queue, and goose the queue again.
558                                  */
559                                 scsi_release_buffers(cmd);
560                                 scsi_requeue_command(q, cmd);
561                                 cmd = NULL;
562                         }
563                         return cmd;
564                 }
565         }
566
567         /*
568          * This will goose the queue request function at the end, so we don't
569          * need to worry about launching another command.
570          */
571         __scsi_release_buffers(cmd, 0);
572         scsi_next_command(cmd);
573         return NULL;
574 }
575
576 static inline unsigned int scsi_sgtable_index(unsigned short nents)
577 {
578         unsigned int index;
579
580         BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
581
582         if (nents <= 8)
583                 index = 0;
584         else
585                 index = get_count_order(nents) - 3;
586
587         return index;
588 }
589
590 static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
591 {
592         struct scsi_host_sg_pool *sgp;
593
594         sgp = scsi_sg_pools + scsi_sgtable_index(nents);
595         mempool_free(sgl, sgp->pool);
596 }
597
598 static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
599 {
600         struct scsi_host_sg_pool *sgp;
601
602         sgp = scsi_sg_pools + scsi_sgtable_index(nents);
603         return mempool_alloc(sgp->pool, gfp_mask);
604 }
605
606 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
607                               gfp_t gfp_mask)
608 {
609         int ret;
610
611         BUG_ON(!nents);
612
613         ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
614                                gfp_mask, scsi_sg_alloc);
615         if (unlikely(ret))
616                 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
617                                 scsi_sg_free);
618
619         return ret;
620 }
621
622 static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
623 {
624         __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
625 }
626
627 static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
628 {
629
630         if (cmd->sdb.table.nents)
631                 scsi_free_sgtable(&cmd->sdb);
632
633         memset(&cmd->sdb, 0, sizeof(cmd->sdb));
634
635         if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
636                 struct scsi_data_buffer *bidi_sdb =
637                         cmd->request->next_rq->special;
638                 scsi_free_sgtable(bidi_sdb);
639                 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
640                 cmd->request->next_rq->special = NULL;
641         }
642
643         if (scsi_prot_sg_count(cmd))
644                 scsi_free_sgtable(cmd->prot_sdb);
645 }
646
647 /*
648  * Function:    scsi_release_buffers()
649  *
650  * Purpose:     Completion processing for block device I/O requests.
651  *
652  * Arguments:   cmd     - command that we are bailing.
653  *
654  * Lock status: Assumed that no lock is held upon entry.
655  *
656  * Returns:     Nothing
657  *
658  * Notes:       In the event that an upper level driver rejects a
659  *              command, we must release resources allocated during
660  *              the __init_io() function.  Primarily this would involve
661  *              the scatter-gather table, and potentially any bounce
662  *              buffers.
663  */
664 void scsi_release_buffers(struct scsi_cmnd *cmd)
665 {
666         __scsi_release_buffers(cmd, 1);
667 }
668 EXPORT_SYMBOL(scsi_release_buffers);
669
670 /*
671  * Function:    scsi_io_completion()
672  *
673  * Purpose:     Completion processing for block device I/O requests.
674  *
675  * Arguments:   cmd   - command that is finished.
676  *
677  * Lock status: Assumed that no lock is held upon entry.
678  *
679  * Returns:     Nothing
680  *
681  * Notes:       This function is matched in terms of capabilities to
682  *              the function that created the scatter-gather list.
683  *              In other words, if there are no bounce buffers
684  *              (the normal case for most drivers), we don't need
685  *              the logic to deal with cleaning up afterwards.
686  *
687  *              We must call scsi_end_request().  This will finish off
688  *              the specified number of sectors.  If we are done, the
689  *              command block will be released and the queue function
690  *              will be goosed.  If we are not done then we have to
691  *              figure out what to do next:
692  *
693  *              a) We can call scsi_requeue_command().  The request
694  *                 will be unprepared and put back on the queue.  Then
695  *                 a new command will be created for it.  This should
696  *                 be used if we made forward progress, or if we want
697  *                 to switch from READ(10) to READ(6) for example.
698  *
699  *              b) We can call scsi_queue_insert().  The request will
700  *                 be put back on the queue and retried using the same
701  *                 command as before, possibly after a delay.
702  *
703  *              c) We can call blk_end_request() with -EIO to fail
704  *                 the remainder of the request.
705  */
706 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
707 {
708         int result = cmd->result;
709         struct request_queue *q = cmd->device->request_queue;
710         struct request *req = cmd->request;
711         int error = 0;
712         struct scsi_sense_hdr sshdr;
713         int sense_valid = 0;
714         int sense_deferred = 0;
715         enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
716               ACTION_DELAYED_RETRY} action;
717         char *description = NULL;
718
719         if (result) {
720                 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
721                 if (sense_valid)
722                         sense_deferred = scsi_sense_is_deferred(&sshdr);
723         }
724
725         if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
726                 req->errors = result;
727                 if (result) {
728                         if (sense_valid && req->sense) {
729                                 /*
730                                  * SG_IO wants current and deferred errors
731                                  */
732                                 int len = 8 + cmd->sense_buffer[7];
733
734                                 if (len > SCSI_SENSE_BUFFERSIZE)
735                                         len = SCSI_SENSE_BUFFERSIZE;
736                                 memcpy(req->sense, cmd->sense_buffer,  len);
737                                 req->sense_len = len;
738                         }
739                         if (!sense_deferred)
740                                 error = -EIO;
741                 }
742
743                 req->resid_len = scsi_get_resid(cmd);
744
745                 if (scsi_bidi_cmnd(cmd)) {
746                         /*
747                          * Bidi commands Must be complete as a whole,
748                          * both sides at once.
749                          */
750                         req->next_rq->resid_len = scsi_in(cmd)->resid;
751
752                         scsi_release_buffers(cmd);
753                         blk_end_request_all(req, 0);
754
755                         scsi_next_command(cmd);
756                         return;
757                 }
758         }
759
760         /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
761         BUG_ON(blk_bidi_rq(req));
762
763         /*
764          * Next deal with any sectors which we were able to correctly
765          * handle.
766          */
767         SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
768                                       "%d bytes done.\n",
769                                       blk_rq_sectors(req), good_bytes));
770
771         /*
772          * Recovered errors need reporting, but they're always treated
773          * as success, so fiddle the result code here.  For BLOCK_PC
774          * we already took a copy of the original into rq->errors which
775          * is what gets returned to the user
776          */
777         if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
778                 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
779                  * print since caller wants ATA registers. Only occurs on
780                  * SCSI ATA PASS_THROUGH commands when CK_COND=1
781                  */
782                 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
783                         ;
784                 else if (!(req->cmd_flags & REQ_QUIET))
785                         scsi_print_sense("", cmd);
786                 result = 0;
787                 /* BLOCK_PC may have set error */
788                 error = 0;
789         }
790
791         /*
792          * A number of bytes were successfully read.  If there
793          * are leftovers and there is some kind of error
794          * (result != 0), retry the rest.
795          */
796         if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
797                 return;
798
799         error = -EIO;
800
801         if (host_byte(result) == DID_RESET) {
802                 /* Third party bus reset or reset for error recovery
803                  * reasons.  Just retry the command and see what
804                  * happens.
805                  */
806                 action = ACTION_RETRY;
807         } else if (sense_valid && !sense_deferred) {
808                 switch (sshdr.sense_key) {
809                 case UNIT_ATTENTION:
810                         if (cmd->device->removable) {
811                                 /* Detected disc change.  Set a bit
812                                  * and quietly refuse further access.
813                                  */
814                                 cmd->device->changed = 1;
815                                 description = "Media Changed";
816                                 action = ACTION_FAIL;
817                         } else {
818                                 /* Must have been a power glitch, or a
819                                  * bus reset.  Could not have been a
820                                  * media change, so we just retry the
821                                  * command and see what happens.
822                                  */
823                                 action = ACTION_RETRY;
824                         }
825                         break;
826                 case ILLEGAL_REQUEST:
827                         /* If we had an ILLEGAL REQUEST returned, then
828                          * we may have performed an unsupported
829                          * command.  The only thing this should be
830                          * would be a ten byte read where only a six
831                          * byte read was supported.  Also, on a system
832                          * where READ CAPACITY failed, we may have
833                          * read past the end of the disk.
834                          */
835                         if ((cmd->device->use_10_for_rw &&
836                             sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
837                             (cmd->cmnd[0] == READ_10 ||
838                              cmd->cmnd[0] == WRITE_10)) {
839                                 /* This will issue a new 6-byte command. */
840                                 cmd->device->use_10_for_rw = 0;
841                                 action = ACTION_REPREP;
842                         } else if (sshdr.asc == 0x10) /* DIX */ {
843                                 description = "Host Data Integrity Failure";
844                                 action = ACTION_FAIL;
845                                 error = -EILSEQ;
846                         } else
847                                 action = ACTION_FAIL;
848                         break;
849                 case ABORTED_COMMAND:
850                         action = ACTION_FAIL;
851                         if (sshdr.asc == 0x10) { /* DIF */
852                                 description = "Target Data Integrity Failure";
853                                 error = -EILSEQ;
854                         }
855                         break;
856                 case NOT_READY:
857                         /* If the device is in the process of becoming
858                          * ready, or has a temporary blockage, retry.
859                          */
860                         if (sshdr.asc == 0x04) {
861                                 switch (sshdr.ascq) {
862                                 case 0x01: /* becoming ready */
863                                 case 0x04: /* format in progress */
864                                 case 0x05: /* rebuild in progress */
865                                 case 0x06: /* recalculation in progress */
866                                 case 0x07: /* operation in progress */
867                                 case 0x08: /* Long write in progress */
868                                 case 0x09: /* self test in progress */
869                                 case 0x14: /* space allocation in progress */
870                                         action = ACTION_DELAYED_RETRY;
871                                         break;
872                                 default:
873                                         description = "Device not ready";
874                                         action = ACTION_FAIL;
875                                         break;
876                                 }
877                         } else {
878                                 description = "Device not ready";
879                                 action = ACTION_FAIL;
880                         }
881                         break;
882                 case VOLUME_OVERFLOW:
883                         /* See SSC3rXX or current. */
884                         action = ACTION_FAIL;
885                         break;
886                 default:
887                         description = "Unhandled sense code";
888                         action = ACTION_FAIL;
889                         break;
890                 }
891         } else {
892                 description = "Unhandled error code";
893                 action = ACTION_FAIL;
894         }
895
896         switch (action) {
897         case ACTION_FAIL:
898                 /* Give up and fail the remainder of the request */
899                 scsi_release_buffers(cmd);
900                 if (!(req->cmd_flags & REQ_QUIET)) {
901                         if (description)
902                                 scmd_printk(KERN_INFO, cmd, "%s\n",
903                                             description);
904                         scsi_print_result(cmd);
905                         if (driver_byte(result) & DRIVER_SENSE)
906                                 scsi_print_sense("", cmd);
907                         scsi_print_command(cmd);
908                 }
909                 if (blk_end_request_err(req, error))
910                         scsi_requeue_command(q, cmd);
911                 else
912                         scsi_next_command(cmd);
913                 break;
914         case ACTION_REPREP:
915                 /* Unprep the request and put it back at the head of the queue.
916                  * A new command will be prepared and issued.
917                  */
918                 scsi_release_buffers(cmd);
919                 scsi_requeue_command(q, cmd);
920                 break;
921         case ACTION_RETRY:
922                 /* Retry the same command immediately */
923                 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
924                 break;
925         case ACTION_DELAYED_RETRY:
926                 /* Retry the same command after a delay */
927                 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
928                 break;
929         }
930 }
931
932 static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
933                              gfp_t gfp_mask)
934 {
935         int count;
936
937         /*
938          * If sg table allocation fails, requeue request later.
939          */
940         if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
941                                         gfp_mask))) {
942                 return BLKPREP_DEFER;
943         }
944
945         req->buffer = NULL;
946
947         /* 
948          * Next, walk the list, and fill in the addresses and sizes of
949          * each segment.
950          */
951         count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
952         BUG_ON(count > sdb->table.nents);
953         sdb->table.nents = count;
954         sdb->length = blk_rq_bytes(req);
955         return BLKPREP_OK;
956 }
957
958 /*
959  * Function:    scsi_init_io()
960  *
961  * Purpose:     SCSI I/O initialize function.
962  *
963  * Arguments:   cmd   - Command descriptor we wish to initialize
964  *
965  * Returns:     0 on success
966  *              BLKPREP_DEFER if the failure is retryable
967  *              BLKPREP_KILL if the failure is fatal
968  */
969 int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
970 {
971         int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask);
972         if (error)
973                 goto err_exit;
974
975         if (blk_bidi_rq(cmd->request)) {
976                 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
977                         scsi_sdb_cache, GFP_ATOMIC);
978                 if (!bidi_sdb) {
979                         error = BLKPREP_DEFER;
980                         goto err_exit;
981                 }
982
983                 cmd->request->next_rq->special = bidi_sdb;
984                 error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb,
985                                                                     GFP_ATOMIC);
986                 if (error)
987                         goto err_exit;
988         }
989
990         if (blk_integrity_rq(cmd->request)) {
991                 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
992                 int ivecs, count;
993
994                 BUG_ON(prot_sdb == NULL);
995                 ivecs = blk_rq_count_integrity_sg(cmd->request);
996
997                 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
998                         error = BLKPREP_DEFER;
999                         goto err_exit;
1000                 }
1001
1002                 count = blk_rq_map_integrity_sg(cmd->request,
1003                                                 prot_sdb->table.sgl);
1004                 BUG_ON(unlikely(count > ivecs));
1005
1006                 cmd->prot_sdb = prot_sdb;
1007                 cmd->prot_sdb->table.nents = count;
1008         }
1009
1010         return BLKPREP_OK ;
1011
1012 err_exit:
1013         scsi_release_buffers(cmd);
1014         cmd->request->special = NULL;
1015         scsi_put_command(cmd);
1016         return error;
1017 }
1018 EXPORT_SYMBOL(scsi_init_io);
1019
1020 static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1021                 struct request *req)
1022 {
1023         struct scsi_cmnd *cmd;
1024
1025         if (!req->special) {
1026                 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1027                 if (unlikely(!cmd))
1028                         return NULL;
1029                 req->special = cmd;
1030         } else {
1031                 cmd = req->special;
1032         }
1033
1034         /* pull a tag out of the request if we have one */
1035         cmd->tag = req->tag;
1036         cmd->request = req;
1037
1038         cmd->cmnd = req->cmd;
1039
1040         return cmd;
1041 }
1042
1043 int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1044 {
1045         struct scsi_cmnd *cmd;
1046         int ret = scsi_prep_state_check(sdev, req);
1047
1048         if (ret != BLKPREP_OK)
1049                 return ret;
1050
1051         cmd = scsi_get_cmd_from_req(sdev, req);
1052         if (unlikely(!cmd))
1053                 return BLKPREP_DEFER;
1054
1055         /*
1056          * BLOCK_PC requests may transfer data, in which case they must
1057          * a bio attached to them.  Or they might contain a SCSI command
1058          * that does not transfer data, in which case they may optionally
1059          * submit a request without an attached bio.
1060          */
1061         if (req->bio) {
1062                 int ret;
1063
1064                 BUG_ON(!req->nr_phys_segments);
1065
1066                 ret = scsi_init_io(cmd, GFP_ATOMIC);
1067                 if (unlikely(ret))
1068                         return ret;
1069         } else {
1070                 BUG_ON(blk_rq_bytes(req));
1071
1072                 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1073                 req->buffer = NULL;
1074         }
1075
1076         cmd->cmd_len = req->cmd_len;
1077         if (!blk_rq_bytes(req))
1078                 cmd->sc_data_direction = DMA_NONE;
1079         else if (rq_data_dir(req) == WRITE)
1080                 cmd->sc_data_direction = DMA_TO_DEVICE;
1081         else
1082                 cmd->sc_data_direction = DMA_FROM_DEVICE;
1083         
1084         cmd->transfersize = blk_rq_bytes(req);
1085         cmd->allowed = req->retries;
1086         return BLKPREP_OK;
1087 }
1088 EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1089
1090 /*
1091  * Setup a REQ_TYPE_FS command.  These are simple read/write request
1092  * from filesystems that still need to be translated to SCSI CDBs from
1093  * the ULD.
1094  */
1095 int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1096 {
1097         struct scsi_cmnd *cmd;
1098         int ret = scsi_prep_state_check(sdev, req);
1099
1100         if (ret != BLKPREP_OK)
1101                 return ret;
1102
1103         if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1104                          && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1105                 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1106                 if (ret != BLKPREP_OK)
1107                         return ret;
1108         }
1109
1110         /*
1111          * Filesystem requests must transfer data.
1112          */
1113         BUG_ON(!req->nr_phys_segments);
1114
1115         cmd = scsi_get_cmd_from_req(sdev, req);
1116         if (unlikely(!cmd))
1117                 return BLKPREP_DEFER;
1118
1119         memset(cmd->cmnd, 0, BLK_MAX_CDB);
1120         return scsi_init_io(cmd, GFP_ATOMIC);
1121 }
1122 EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1123
1124 int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1125 {
1126         int ret = BLKPREP_OK;
1127
1128         /*
1129          * If the device is not in running state we will reject some
1130          * or all commands.
1131          */
1132         if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1133                 switch (sdev->sdev_state) {
1134                 case SDEV_OFFLINE:
1135                         /*
1136                          * If the device is offline we refuse to process any
1137                          * commands.  The device must be brought online
1138                          * before trying any recovery commands.
1139                          */
1140                         sdev_printk(KERN_ERR, sdev,
1141                                     "rejecting I/O to offline device\n");
1142                         ret = BLKPREP_KILL;
1143                         break;
1144                 case SDEV_DEL:
1145                         /*
1146                          * If the device is fully deleted, we refuse to
1147                          * process any commands as well.
1148                          */
1149                         sdev_printk(KERN_ERR, sdev,
1150                                     "rejecting I/O to dead device\n");
1151                         ret = BLKPREP_KILL;
1152                         break;
1153                 case SDEV_QUIESCE:
1154                 case SDEV_BLOCK:
1155                 case SDEV_CREATED_BLOCK:
1156                         /*
1157                          * If the devices is blocked we defer normal commands.
1158                          */
1159                         if (!(req->cmd_flags & REQ_PREEMPT))
1160                                 ret = BLKPREP_DEFER;
1161                         break;
1162                 default:
1163                         /*
1164                          * For any other not fully online state we only allow
1165                          * special commands.  In particular any user initiated
1166                          * command is not allowed.
1167                          */
1168                         if (!(req->cmd_flags & REQ_PREEMPT))
1169                                 ret = BLKPREP_KILL;
1170                         break;
1171                 }
1172         }
1173         return ret;
1174 }
1175 EXPORT_SYMBOL(scsi_prep_state_check);
1176
1177 int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1178 {
1179         struct scsi_device *sdev = q->queuedata;
1180
1181         switch (ret) {
1182         case BLKPREP_KILL:
1183                 req->errors = DID_NO_CONNECT << 16;
1184                 /* release the command and kill it */
1185                 if (req->special) {
1186                         struct scsi_cmnd *cmd = req->special;
1187                         scsi_release_buffers(cmd);
1188                         scsi_put_command(cmd);
1189                         req->special = NULL;
1190                 }
1191                 break;
1192         case BLKPREP_DEFER:
1193                 /*
1194                  * If we defer, the blk_peek_request() returns NULL, but the
1195                  * queue must be restarted, so we plug here if no returning
1196                  * command will automatically do that.
1197                  */
1198                 if (sdev->device_busy == 0)
1199                         blk_plug_device(q);
1200                 break;
1201         default:
1202                 req->cmd_flags |= REQ_DONTPREP;
1203         }
1204
1205         return ret;
1206 }
1207 EXPORT_SYMBOL(scsi_prep_return);
1208
1209 int scsi_prep_fn(struct request_queue *q, struct request *req)
1210 {
1211         struct scsi_device *sdev = q->queuedata;
1212         int ret = BLKPREP_KILL;
1213
1214         if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1215                 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1216         return scsi_prep_return(q, req, ret);
1217 }
1218 EXPORT_SYMBOL(scsi_prep_fn);
1219
1220 /*
1221  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1222  * return 0.
1223  *
1224  * Called with the queue_lock held.
1225  */
1226 static inline int scsi_dev_queue_ready(struct request_queue *q,
1227                                   struct scsi_device *sdev)
1228 {
1229         if (sdev->device_busy == 0 && sdev->device_blocked) {
1230                 /*
1231                  * unblock after device_blocked iterates to zero
1232                  */
1233                 if (--sdev->device_blocked == 0) {
1234                         SCSI_LOG_MLQUEUE(3,
1235                                    sdev_printk(KERN_INFO, sdev,
1236                                    "unblocking device at zero depth\n"));
1237                 } else {
1238                         blk_plug_device(q);
1239                         return 0;
1240                 }
1241         }
1242         if (scsi_device_is_busy(sdev))
1243                 return 0;
1244
1245         return 1;
1246 }
1247
1248
1249 /*
1250  * scsi_target_queue_ready: checks if there we can send commands to target
1251  * @sdev: scsi device on starget to check.
1252  *
1253  * Called with the host lock held.
1254  */
1255 static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1256                                            struct scsi_device *sdev)
1257 {
1258         struct scsi_target *starget = scsi_target(sdev);
1259
1260         if (starget->single_lun) {
1261                 if (starget->starget_sdev_user &&
1262                     starget->starget_sdev_user != sdev)
1263                         return 0;
1264                 starget->starget_sdev_user = sdev;
1265         }
1266
1267         if (starget->target_busy == 0 && starget->target_blocked) {
1268                 /*
1269                  * unblock after target_blocked iterates to zero
1270                  */
1271                 if (--starget->target_blocked == 0) {
1272                         SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1273                                          "unblocking target at zero depth\n"));
1274                 } else
1275                         return 0;
1276         }
1277
1278         if (scsi_target_is_busy(starget)) {
1279                 if (list_empty(&sdev->starved_entry)) {
1280                         list_add_tail(&sdev->starved_entry,
1281                                       &shost->starved_list);
1282                         return 0;
1283                 }
1284         }
1285
1286         /* We're OK to process the command, so we can't be starved */
1287         if (!list_empty(&sdev->starved_entry))
1288                 list_del_init(&sdev->starved_entry);
1289         return 1;
1290 }
1291
1292 /*
1293  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1294  * return 0. We must end up running the queue again whenever 0 is
1295  * returned, else IO can hang.
1296  *
1297  * Called with host_lock held.
1298  */
1299 static inline int scsi_host_queue_ready(struct request_queue *q,
1300                                    struct Scsi_Host *shost,
1301                                    struct scsi_device *sdev)
1302 {
1303         if (scsi_host_in_recovery(shost))
1304                 return 0;
1305         if (shost->host_busy == 0 && shost->host_blocked) {
1306                 /*
1307                  * unblock after host_blocked iterates to zero
1308                  */
1309                 if (--shost->host_blocked == 0) {
1310                         SCSI_LOG_MLQUEUE(3,
1311                                 printk("scsi%d unblocking host at zero depth\n",
1312                                         shost->host_no));
1313                 } else {
1314                         return 0;
1315                 }
1316         }
1317         if (scsi_host_is_busy(shost)) {
1318                 if (list_empty(&sdev->starved_entry))
1319                         list_add_tail(&sdev->starved_entry, &shost->starved_list);
1320                 return 0;
1321         }
1322
1323         /* We're OK to process the command, so we can't be starved */
1324         if (!list_empty(&sdev->starved_entry))
1325                 list_del_init(&sdev->starved_entry);
1326
1327         return 1;
1328 }
1329
1330 /*
1331  * Busy state exporting function for request stacking drivers.
1332  *
1333  * For efficiency, no lock is taken to check the busy state of
1334  * shost/starget/sdev, since the returned value is not guaranteed and
1335  * may be changed after request stacking drivers call the function,
1336  * regardless of taking lock or not.
1337  *
1338  * When scsi can't dispatch I/Os anymore and needs to kill I/Os
1339  * (e.g. !sdev), scsi needs to return 'not busy'.
1340  * Otherwise, request stacking drivers may hold requests forever.
1341  */
1342 static int scsi_lld_busy(struct request_queue *q)
1343 {
1344         struct scsi_device *sdev = q->queuedata;
1345         struct Scsi_Host *shost;
1346         struct scsi_target *starget;
1347
1348         if (!sdev)
1349                 return 0;
1350
1351         shost = sdev->host;
1352         starget = scsi_target(sdev);
1353
1354         if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
1355             scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
1356                 return 1;
1357
1358         return 0;
1359 }
1360
1361 /*
1362  * Kill a request for a dead device
1363  */
1364 static void scsi_kill_request(struct request *req, struct request_queue *q)
1365 {
1366         struct scsi_cmnd *cmd = req->special;
1367         struct scsi_device *sdev;
1368         struct scsi_target *starget;
1369         struct Scsi_Host *shost;
1370
1371         blk_start_request(req);
1372
1373         sdev = cmd->device;
1374         starget = scsi_target(sdev);
1375         shost = sdev->host;
1376         scsi_init_cmd_errh(cmd);
1377         cmd->result = DID_NO_CONNECT << 16;
1378         atomic_inc(&cmd->device->iorequest_cnt);
1379
1380         /*
1381          * SCSI request completion path will do scsi_device_unbusy(),
1382          * bump busy counts.  To bump the counters, we need to dance
1383          * with the locks as normal issue path does.
1384          */
1385         sdev->device_busy++;
1386         spin_unlock(sdev->request_queue->queue_lock);
1387         spin_lock(shost->host_lock);
1388         shost->host_busy++;
1389         starget->target_busy++;
1390         spin_unlock(shost->host_lock);
1391         spin_lock(sdev->request_queue->queue_lock);
1392
1393         blk_complete_request(req);
1394 }
1395
1396 static void scsi_softirq_done(struct request *rq)
1397 {
1398         struct scsi_cmnd *cmd = rq->special;
1399         unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1400         int disposition;
1401
1402         INIT_LIST_HEAD(&cmd->eh_entry);
1403
1404         /*
1405          * Set the serial numbers back to zero
1406          */
1407         cmd->serial_number = 0;
1408
1409         atomic_inc(&cmd->device->iodone_cnt);
1410         if (cmd->result)
1411                 atomic_inc(&cmd->device->ioerr_cnt);
1412
1413         disposition = scsi_decide_disposition(cmd);
1414         if (disposition != SUCCESS &&
1415             time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1416                 sdev_printk(KERN_ERR, cmd->device,
1417                             "timing out command, waited %lus\n",
1418                             wait_for/HZ);
1419                 disposition = SUCCESS;
1420         }
1421                         
1422         scsi_log_completion(cmd, disposition);
1423
1424         switch (disposition) {
1425                 case SUCCESS:
1426                         scsi_finish_command(cmd);
1427                         break;
1428                 case NEEDS_RETRY:
1429                         scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1430                         break;
1431                 case ADD_TO_MLQUEUE:
1432                         scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1433                         break;
1434                 default:
1435                         if (!scsi_eh_scmd_add(cmd, 0))
1436                                 scsi_finish_command(cmd);
1437         }
1438 }
1439
1440 /*
1441  * Function:    scsi_request_fn()
1442  *
1443  * Purpose:     Main strategy routine for SCSI.
1444  *
1445  * Arguments:   q       - Pointer to actual queue.
1446  *
1447  * Returns:     Nothing
1448  *
1449  * Lock status: IO request lock assumed to be held when called.
1450  */
1451 static void scsi_request_fn(struct request_queue *q)
1452 {
1453         struct scsi_device *sdev = q->queuedata;
1454         struct Scsi_Host *shost;
1455         struct scsi_cmnd *cmd;
1456         struct request *req;
1457
1458         if (!sdev) {
1459                 printk("scsi: killing requests for dead queue\n");
1460                 while ((req = blk_peek_request(q)) != NULL)
1461                         scsi_kill_request(req, q);
1462                 return;
1463         }
1464
1465         if(!get_device(&sdev->sdev_gendev))
1466                 /* We must be tearing the block queue down already */
1467                 return;
1468
1469         /*
1470          * To start with, we keep looping until the queue is empty, or until
1471          * the host is no longer able to accept any more requests.
1472          */
1473         shost = sdev->host;
1474         while (!blk_queue_plugged(q)) {
1475                 int rtn;
1476                 /*
1477                  * get next queueable request.  We do this early to make sure
1478                  * that the request is fully prepared even if we cannot 
1479                  * accept it.
1480                  */
1481                 req = blk_peek_request(q);
1482                 if (!req || !scsi_dev_queue_ready(q, sdev))
1483                         break;
1484
1485                 if (unlikely(!scsi_device_online(sdev))) {
1486                         sdev_printk(KERN_ERR, sdev,
1487                                     "rejecting I/O to offline device\n");
1488                         scsi_kill_request(req, q);
1489                         continue;
1490                 }
1491
1492
1493                 /*
1494                  * Remove the request from the request list.
1495                  */
1496                 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1497                         blk_start_request(req);
1498                 sdev->device_busy++;
1499
1500                 spin_unlock(q->queue_lock);
1501                 cmd = req->special;
1502                 if (unlikely(cmd == NULL)) {
1503                         printk(KERN_CRIT "impossible request in %s.\n"
1504                                          "please mail a stack trace to "
1505                                          "linux-scsi@vger.kernel.org\n",
1506                                          __func__);
1507                         blk_dump_rq_flags(req, "foo");
1508                         BUG();
1509                 }
1510                 spin_lock(shost->host_lock);
1511
1512                 /*
1513                  * We hit this when the driver is using a host wide
1514                  * tag map. For device level tag maps the queue_depth check
1515                  * in the device ready fn would prevent us from trying
1516                  * to allocate a tag. Since the map is a shared host resource
1517                  * we add the dev to the starved list so it eventually gets
1518                  * a run when a tag is freed.
1519                  */
1520                 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1521                         if (list_empty(&sdev->starved_entry))
1522                                 list_add_tail(&sdev->starved_entry,
1523                                               &shost->starved_list);
1524                         goto not_ready;
1525                 }
1526
1527                 if (!scsi_target_queue_ready(shost, sdev))
1528                         goto not_ready;
1529
1530                 if (!scsi_host_queue_ready(q, shost, sdev))
1531                         goto not_ready;
1532
1533                 scsi_target(sdev)->target_busy++;
1534                 shost->host_busy++;
1535
1536                 /*
1537                  * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1538                  *              take the lock again.
1539                  */
1540                 spin_unlock_irq(shost->host_lock);
1541
1542                 /*
1543                  * Finally, initialize any error handling parameters, and set up
1544                  * the timers for timeouts.
1545                  */
1546                 scsi_init_cmd_errh(cmd);
1547
1548                 /*
1549                  * Dispatch the command to the low-level driver.
1550                  */
1551                 rtn = scsi_dispatch_cmd(cmd);
1552                 spin_lock_irq(q->queue_lock);
1553                 if(rtn) {
1554                         /* we're refusing the command; because of
1555                          * the way locks get dropped, we need to 
1556                          * check here if plugging is required */
1557                         if(sdev->device_busy == 0)
1558                                 blk_plug_device(q);
1559
1560                         break;
1561                 }
1562         }
1563
1564         goto out;
1565
1566  not_ready:
1567         spin_unlock_irq(shost->host_lock);
1568
1569         /*
1570          * lock q, handle tag, requeue req, and decrement device_busy. We
1571          * must return with queue_lock held.
1572          *
1573          * Decrementing device_busy without checking it is OK, as all such
1574          * cases (host limits or settings) should run the queue at some
1575          * later time.
1576          */
1577         spin_lock_irq(q->queue_lock);
1578         blk_requeue_request(q, req);
1579         sdev->device_busy--;
1580         if(sdev->device_busy == 0)
1581                 blk_plug_device(q);
1582  out:
1583         /* must be careful here...if we trigger the ->remove() function
1584          * we cannot be holding the q lock */
1585         spin_unlock_irq(q->queue_lock);
1586         put_device(&sdev->sdev_gendev);
1587         spin_lock_irq(q->queue_lock);
1588 }
1589
1590 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1591 {
1592         struct device *host_dev;
1593         u64 bounce_limit = 0xffffffff;
1594
1595         if (shost->unchecked_isa_dma)
1596                 return BLK_BOUNCE_ISA;
1597         /*
1598          * Platforms with virtual-DMA translation
1599          * hardware have no practical limit.
1600          */
1601         if (!PCI_DMA_BUS_IS_PHYS)
1602                 return BLK_BOUNCE_ANY;
1603
1604         host_dev = scsi_get_device(shost);
1605         if (host_dev && host_dev->dma_mask)
1606                 bounce_limit = *host_dev->dma_mask;
1607
1608         return bounce_limit;
1609 }
1610 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1611
1612 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1613                                          request_fn_proc *request_fn)
1614 {
1615         struct request_queue *q;
1616         struct device *dev = shost->shost_gendev.parent;
1617
1618         q = blk_init_queue(request_fn, NULL);
1619         if (!q)
1620                 return NULL;
1621
1622         /*
1623          * this limit is imposed by hardware restrictions
1624          */
1625         blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1626                                         SCSI_MAX_SG_CHAIN_SEGMENTS));
1627
1628         blk_queue_max_hw_sectors(q, shost->max_sectors);
1629         blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1630         blk_queue_segment_boundary(q, shost->dma_boundary);
1631         dma_set_seg_boundary(dev, shost->dma_boundary);
1632
1633         blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1634
1635         /* New queue, no concurrency on queue_flags */
1636         if (!shost->use_clustering)
1637                 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
1638
1639         /*
1640          * set a reasonable default alignment on word boundaries: the
1641          * host and device may alter it using
1642          * blk_queue_update_dma_alignment() later.
1643          */
1644         blk_queue_dma_alignment(q, 0x03);
1645
1646         return q;
1647 }
1648 EXPORT_SYMBOL(__scsi_alloc_queue);
1649
1650 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1651 {
1652         struct request_queue *q;
1653
1654         q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1655         if (!q)
1656                 return NULL;
1657
1658         blk_queue_prep_rq(q, scsi_prep_fn);
1659         blk_queue_softirq_done(q, scsi_softirq_done);
1660         blk_queue_rq_timed_out(q, scsi_times_out);
1661         blk_queue_lld_busy(q, scsi_lld_busy);
1662         return q;
1663 }
1664
1665 void scsi_free_queue(struct request_queue *q)
1666 {
1667         blk_cleanup_queue(q);
1668 }
1669
1670 /*
1671  * Function:    scsi_block_requests()
1672  *
1673  * Purpose:     Utility function used by low-level drivers to prevent further
1674  *              commands from being queued to the device.
1675  *
1676  * Arguments:   shost       - Host in question
1677  *
1678  * Returns:     Nothing
1679  *
1680  * Lock status: No locks are assumed held.
1681  *
1682  * Notes:       There is no timer nor any other means by which the requests
1683  *              get unblocked other than the low-level driver calling
1684  *              scsi_unblock_requests().
1685  */
1686 void scsi_block_requests(struct Scsi_Host *shost)
1687 {
1688         shost->host_self_blocked = 1;
1689 }
1690 EXPORT_SYMBOL(scsi_block_requests);
1691
1692 /*
1693  * Function:    scsi_unblock_requests()
1694  *
1695  * Purpose:     Utility function used by low-level drivers to allow further
1696  *              commands from being queued to the device.
1697  *
1698  * Arguments:   shost       - Host in question
1699  *
1700  * Returns:     Nothing
1701  *
1702  * Lock status: No locks are assumed held.
1703  *
1704  * Notes:       There is no timer nor any other means by which the requests
1705  *              get unblocked other than the low-level driver calling
1706  *              scsi_unblock_requests().
1707  *
1708  *              This is done as an API function so that changes to the
1709  *              internals of the scsi mid-layer won't require wholesale
1710  *              changes to drivers that use this feature.
1711  */
1712 void scsi_unblock_requests(struct Scsi_Host *shost)
1713 {
1714         shost->host_self_blocked = 0;
1715         scsi_run_host_queues(shost);
1716 }
1717 EXPORT_SYMBOL(scsi_unblock_requests);
1718
1719 int __init scsi_init_queue(void)
1720 {
1721         int i;
1722
1723         scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1724                                            sizeof(struct scsi_data_buffer),
1725                                            0, 0, NULL);
1726         if (!scsi_sdb_cache) {
1727                 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1728                 return -ENOMEM;
1729         }
1730
1731         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1732                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1733                 int size = sgp->size * sizeof(struct scatterlist);
1734
1735                 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1736                                 SLAB_HWCACHE_ALIGN, NULL);
1737                 if (!sgp->slab) {
1738                         printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1739                                         sgp->name);
1740                         goto cleanup_sdb;
1741                 }
1742
1743                 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1744                                                      sgp->slab);
1745                 if (!sgp->pool) {
1746                         printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1747                                         sgp->name);
1748                         goto cleanup_sdb;
1749                 }
1750         }
1751
1752         return 0;
1753
1754 cleanup_sdb:
1755         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1756                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1757                 if (sgp->pool)
1758                         mempool_destroy(sgp->pool);
1759                 if (sgp->slab)
1760                         kmem_cache_destroy(sgp->slab);
1761         }
1762         kmem_cache_destroy(scsi_sdb_cache);
1763
1764         return -ENOMEM;
1765 }
1766
1767 void scsi_exit_queue(void)
1768 {
1769         int i;
1770
1771         kmem_cache_destroy(scsi_sdb_cache);
1772
1773         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1774                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1775                 mempool_destroy(sgp->pool);
1776                 kmem_cache_destroy(sgp->slab);
1777         }
1778 }
1779
1780 /**
1781  *      scsi_mode_select - issue a mode select
1782  *      @sdev:  SCSI device to be queried
1783  *      @pf:    Page format bit (1 == standard, 0 == vendor specific)
1784  *      @sp:    Save page bit (0 == don't save, 1 == save)
1785  *      @modepage: mode page being requested
1786  *      @buffer: request buffer (may not be smaller than eight bytes)
1787  *      @len:   length of request buffer.
1788  *      @timeout: command timeout
1789  *      @retries: number of retries before failing
1790  *      @data: returns a structure abstracting the mode header data
1791  *      @sshdr: place to put sense data (or NULL if no sense to be collected).
1792  *              must be SCSI_SENSE_BUFFERSIZE big.
1793  *
1794  *      Returns zero if successful; negative error number or scsi
1795  *      status on error
1796  *
1797  */
1798 int
1799 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1800                  unsigned char *buffer, int len, int timeout, int retries,
1801                  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1802 {
1803         unsigned char cmd[10];
1804         unsigned char *real_buffer;
1805         int ret;
1806
1807         memset(cmd, 0, sizeof(cmd));
1808         cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1809
1810         if (sdev->use_10_for_ms) {
1811                 if (len > 65535)
1812                         return -EINVAL;
1813                 real_buffer = kmalloc(8 + len, GFP_KERNEL);
1814                 if (!real_buffer)
1815                         return -ENOMEM;
1816                 memcpy(real_buffer + 8, buffer, len);
1817                 len += 8;
1818                 real_buffer[0] = 0;
1819                 real_buffer[1] = 0;
1820                 real_buffer[2] = data->medium_type;
1821                 real_buffer[3] = data->device_specific;
1822                 real_buffer[4] = data->longlba ? 0x01 : 0;
1823                 real_buffer[5] = 0;
1824                 real_buffer[6] = data->block_descriptor_length >> 8;
1825                 real_buffer[7] = data->block_descriptor_length;
1826
1827                 cmd[0] = MODE_SELECT_10;
1828                 cmd[7] = len >> 8;
1829                 cmd[8] = len;
1830         } else {
1831                 if (len > 255 || data->block_descriptor_length > 255 ||
1832                     data->longlba)
1833                         return -EINVAL;
1834
1835                 real_buffer = kmalloc(4 + len, GFP_KERNEL);
1836                 if (!real_buffer)
1837                         return -ENOMEM;
1838                 memcpy(real_buffer + 4, buffer, len);
1839                 len += 4;
1840                 real_buffer[0] = 0;
1841                 real_buffer[1] = data->medium_type;
1842                 real_buffer[2] = data->device_specific;
1843                 real_buffer[3] = data->block_descriptor_length;
1844                 
1845
1846                 cmd[0] = MODE_SELECT;
1847                 cmd[4] = len;
1848         }
1849
1850         ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1851                                sshdr, timeout, retries, NULL);
1852         kfree(real_buffer);
1853         return ret;
1854 }
1855 EXPORT_SYMBOL_GPL(scsi_mode_select);
1856
1857 /**
1858  *      scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
1859  *      @sdev:  SCSI device to be queried
1860  *      @dbd:   set if mode sense will allow block descriptors to be returned
1861  *      @modepage: mode page being requested
1862  *      @buffer: request buffer (may not be smaller than eight bytes)
1863  *      @len:   length of request buffer.
1864  *      @timeout: command timeout
1865  *      @retries: number of retries before failing
1866  *      @data: returns a structure abstracting the mode header data
1867  *      @sshdr: place to put sense data (or NULL if no sense to be collected).
1868  *              must be SCSI_SENSE_BUFFERSIZE big.
1869  *
1870  *      Returns zero if unsuccessful, or the header offset (either 4
1871  *      or 8 depending on whether a six or ten byte command was
1872  *      issued) if successful.
1873  */
1874 int
1875 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1876                   unsigned char *buffer, int len, int timeout, int retries,
1877                   struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1878 {
1879         unsigned char cmd[12];
1880         int use_10_for_ms;
1881         int header_length;
1882         int result;
1883         struct scsi_sense_hdr my_sshdr;
1884
1885         memset(data, 0, sizeof(*data));
1886         memset(&cmd[0], 0, 12);
1887         cmd[1] = dbd & 0x18;    /* allows DBD and LLBA bits */
1888         cmd[2] = modepage;
1889
1890         /* caller might not be interested in sense, but we need it */
1891         if (!sshdr)
1892                 sshdr = &my_sshdr;
1893
1894  retry:
1895         use_10_for_ms = sdev->use_10_for_ms;
1896
1897         if (use_10_for_ms) {
1898                 if (len < 8)
1899                         len = 8;
1900
1901                 cmd[0] = MODE_SENSE_10;
1902                 cmd[8] = len;
1903                 header_length = 8;
1904         } else {
1905                 if (len < 4)
1906                         len = 4;
1907
1908                 cmd[0] = MODE_SENSE;
1909                 cmd[4] = len;
1910                 header_length = 4;
1911         }
1912
1913         memset(buffer, 0, len);
1914
1915         result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1916                                   sshdr, timeout, retries, NULL);
1917
1918         /* This code looks awful: what it's doing is making sure an
1919          * ILLEGAL REQUEST sense return identifies the actual command
1920          * byte as the problem.  MODE_SENSE commands can return
1921          * ILLEGAL REQUEST if the code page isn't supported */
1922
1923         if (use_10_for_ms && !scsi_status_is_good(result) &&
1924             (driver_byte(result) & DRIVER_SENSE)) {
1925                 if (scsi_sense_valid(sshdr)) {
1926                         if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1927                             (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1928                                 /* 
1929                                  * Invalid command operation code
1930                                  */
1931                                 sdev->use_10_for_ms = 0;
1932                                 goto retry;
1933                         }
1934                 }
1935         }
1936
1937         if(scsi_status_is_good(result)) {
1938                 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
1939                              (modepage == 6 || modepage == 8))) {
1940                         /* Initio breakage? */
1941                         header_length = 0;
1942                         data->length = 13;
1943                         data->medium_type = 0;
1944                         data->device_specific = 0;
1945                         data->longlba = 0;
1946                         data->block_descriptor_length = 0;
1947                 } else if(use_10_for_ms) {
1948                         data->length = buffer[0]*256 + buffer[1] + 2;
1949                         data->medium_type = buffer[2];
1950                         data->device_specific = buffer[3];
1951                         data->longlba = buffer[4] & 0x01;
1952                         data->block_descriptor_length = buffer[6]*256
1953                                 + buffer[7];
1954                 } else {
1955                         data->length = buffer[0] + 1;
1956                         data->medium_type = buffer[1];
1957                         data->device_specific = buffer[2];
1958                         data->block_descriptor_length = buffer[3];
1959                 }
1960                 data->header_length = header_length;
1961         }
1962
1963         return result;
1964 }
1965 EXPORT_SYMBOL(scsi_mode_sense);
1966
1967 /**
1968  *      scsi_test_unit_ready - test if unit is ready
1969  *      @sdev:  scsi device to change the state of.
1970  *      @timeout: command timeout
1971  *      @retries: number of retries before failing
1972  *      @sshdr_external: Optional pointer to struct scsi_sense_hdr for
1973  *              returning sense. Make sure that this is cleared before passing
1974  *              in.
1975  *
1976  *      Returns zero if unsuccessful or an error if TUR failed.  For
1977  *      removable media, a return of NOT_READY or UNIT_ATTENTION is
1978  *      translated to success, with the ->changed flag updated.
1979  **/
1980 int
1981 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
1982                      struct scsi_sense_hdr *sshdr_external)
1983 {
1984         char cmd[] = {
1985                 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1986         };
1987         struct scsi_sense_hdr *sshdr;
1988         int result;
1989
1990         if (!sshdr_external)
1991                 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
1992         else
1993                 sshdr = sshdr_external;
1994
1995         /* try to eat the UNIT_ATTENTION if there are enough retries */
1996         do {
1997                 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
1998                                           timeout, retries, NULL);
1999                 if (sdev->removable && scsi_sense_valid(sshdr) &&
2000                     sshdr->sense_key == UNIT_ATTENTION)
2001                         sdev->changed = 1;
2002         } while (scsi_sense_valid(sshdr) &&
2003                  sshdr->sense_key == UNIT_ATTENTION && --retries);
2004
2005         if (!sshdr)
2006                 /* could not allocate sense buffer, so can't process it */
2007                 return result;
2008
2009         if (sdev->removable && scsi_sense_valid(sshdr) &&
2010             (sshdr->sense_key == UNIT_ATTENTION ||
2011              sshdr->sense_key == NOT_READY)) {
2012                 sdev->changed = 1;
2013                 result = 0;
2014         }
2015         if (!sshdr_external)
2016                 kfree(sshdr);
2017         return result;
2018 }
2019 EXPORT_SYMBOL(scsi_test_unit_ready);
2020
2021 /**
2022  *      scsi_device_set_state - Take the given device through the device state model.
2023  *      @sdev:  scsi device to change the state of.
2024  *      @state: state to change to.
2025  *
2026  *      Returns zero if unsuccessful or an error if the requested 
2027  *      transition is illegal.
2028  */
2029 int
2030 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2031 {
2032         enum scsi_device_state oldstate = sdev->sdev_state;
2033
2034         if (state == oldstate)
2035                 return 0;
2036
2037         switch (state) {
2038         case SDEV_CREATED:
2039                 switch (oldstate) {
2040                 case SDEV_CREATED_BLOCK:
2041                         break;
2042                 default:
2043                         goto illegal;
2044                 }
2045                 break;
2046                         
2047         case SDEV_RUNNING:
2048                 switch (oldstate) {
2049                 case SDEV_CREATED:
2050                 case SDEV_OFFLINE:
2051                 case SDEV_QUIESCE:
2052                 case SDEV_BLOCK:
2053                         break;
2054                 default:
2055                         goto illegal;
2056                 }
2057                 break;
2058
2059         case SDEV_QUIESCE:
2060                 switch (oldstate) {
2061                 case SDEV_RUNNING:
2062                 case SDEV_OFFLINE:
2063                         break;
2064                 default:
2065                         goto illegal;
2066                 }
2067                 break;
2068
2069         case SDEV_OFFLINE:
2070                 switch (oldstate) {
2071                 case SDEV_CREATED:
2072                 case SDEV_RUNNING:
2073                 case SDEV_QUIESCE:
2074                 case SDEV_BLOCK:
2075                         break;
2076                 default:
2077                         goto illegal;
2078                 }
2079                 break;
2080
2081         case SDEV_BLOCK:
2082                 switch (oldstate) {
2083                 case SDEV_RUNNING:
2084                 case SDEV_CREATED_BLOCK:
2085                         break;
2086                 default:
2087                         goto illegal;
2088                 }
2089                 break;
2090
2091         case SDEV_CREATED_BLOCK:
2092                 switch (oldstate) {
2093                 case SDEV_CREATED:
2094                         break;
2095                 default:
2096                         goto illegal;
2097                 }
2098                 break;
2099
2100         case SDEV_CANCEL:
2101                 switch (oldstate) {
2102                 case SDEV_CREATED:
2103                 case SDEV_RUNNING:
2104                 case SDEV_QUIESCE:
2105                 case SDEV_OFFLINE:
2106                 case SDEV_BLOCK:
2107                         break;
2108                 default:
2109                         goto illegal;
2110                 }
2111                 break;
2112
2113         case SDEV_DEL:
2114                 switch (oldstate) {
2115                 case SDEV_CREATED:
2116                 case SDEV_RUNNING:
2117                 case SDEV_OFFLINE:
2118                 case SDEV_CANCEL:
2119                         break;
2120                 default:
2121                         goto illegal;
2122                 }
2123                 break;
2124
2125         }
2126         sdev->sdev_state = state;
2127         return 0;
2128
2129  illegal:
2130         SCSI_LOG_ERROR_RECOVERY(1, 
2131                                 sdev_printk(KERN_ERR, sdev,
2132                                             "Illegal state transition %s->%s\n",
2133                                             scsi_device_state_name(oldstate),
2134                                             scsi_device_state_name(state))
2135                                 );
2136         return -EINVAL;
2137 }
2138 EXPORT_SYMBOL(scsi_device_set_state);
2139
2140 /**
2141  *      sdev_evt_emit - emit a single SCSI device uevent
2142  *      @sdev: associated SCSI device
2143  *      @evt: event to emit
2144  *
2145  *      Send a single uevent (scsi_event) to the associated scsi_device.
2146  */
2147 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2148 {
2149         int idx = 0;
2150         char *envp[3];
2151
2152         switch (evt->evt_type) {
2153         case SDEV_EVT_MEDIA_CHANGE:
2154                 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2155                 break;
2156
2157         default:
2158                 /* do nothing */
2159                 break;
2160         }
2161
2162         envp[idx++] = NULL;
2163
2164         kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2165 }
2166
2167 /**
2168  *      sdev_evt_thread - send a uevent for each scsi event
2169  *      @work: work struct for scsi_device
2170  *
2171  *      Dispatch queued events to their associated scsi_device kobjects
2172  *      as uevents.
2173  */
2174 void scsi_evt_thread(struct work_struct *work)
2175 {
2176         struct scsi_device *sdev;
2177         LIST_HEAD(event_list);
2178
2179         sdev = container_of(work, struct scsi_device, event_work);
2180
2181         while (1) {
2182                 struct scsi_event *evt;
2183                 struct list_head *this, *tmp;
2184                 unsigned long flags;
2185
2186                 spin_lock_irqsave(&sdev->list_lock, flags);
2187                 list_splice_init(&sdev->event_list, &event_list);
2188                 spin_unlock_irqrestore(&sdev->list_lock, flags);
2189
2190                 if (list_empty(&event_list))
2191                         break;
2192
2193                 list_for_each_safe(this, tmp, &event_list) {
2194                         evt = list_entry(this, struct scsi_event, node);
2195                         list_del(&evt->node);
2196                         scsi_evt_emit(sdev, evt);
2197                         kfree(evt);
2198                 }
2199         }
2200 }
2201
2202 /**
2203  *      sdev_evt_send - send asserted event to uevent thread
2204  *      @sdev: scsi_device event occurred on
2205  *      @evt: event to send
2206  *
2207  *      Assert scsi device event asynchronously.
2208  */
2209 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2210 {
2211         unsigned long flags;
2212
2213 #if 0
2214         /* FIXME: currently this check eliminates all media change events
2215          * for polled devices.  Need to update to discriminate between AN
2216          * and polled events */
2217         if (!test_bit(evt->evt_type, sdev->supported_events)) {
2218                 kfree(evt);
2219                 return;
2220         }
2221 #endif
2222
2223         spin_lock_irqsave(&sdev->list_lock, flags);
2224         list_add_tail(&evt->node, &sdev->event_list);
2225         schedule_work(&sdev->event_work);
2226         spin_unlock_irqrestore(&sdev->list_lock, flags);
2227 }
2228 EXPORT_SYMBOL_GPL(sdev_evt_send);
2229
2230 /**
2231  *      sdev_evt_alloc - allocate a new scsi event
2232  *      @evt_type: type of event to allocate
2233  *      @gfpflags: GFP flags for allocation
2234  *
2235  *      Allocates and returns a new scsi_event.
2236  */
2237 struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2238                                   gfp_t gfpflags)
2239 {
2240         struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2241         if (!evt)
2242                 return NULL;
2243
2244         evt->evt_type = evt_type;
2245         INIT_LIST_HEAD(&evt->node);
2246
2247         /* evt_type-specific initialization, if any */
2248         switch (evt_type) {
2249         case SDEV_EVT_MEDIA_CHANGE:
2250         default:
2251                 /* do nothing */
2252                 break;
2253         }
2254
2255         return evt;
2256 }
2257 EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2258
2259 /**
2260  *      sdev_evt_send_simple - send asserted event to uevent thread
2261  *      @sdev: scsi_device event occurred on
2262  *      @evt_type: type of event to send
2263  *      @gfpflags: GFP flags for allocation
2264  *
2265  *      Assert scsi device event asynchronously, given an event type.
2266  */
2267 void sdev_evt_send_simple(struct scsi_device *sdev,
2268                           enum scsi_device_event evt_type, gfp_t gfpflags)
2269 {
2270         struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2271         if (!evt) {
2272                 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2273                             evt_type);
2274                 return;
2275         }
2276
2277         sdev_evt_send(sdev, evt);
2278 }
2279 EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2280
2281 /**
2282  *      scsi_device_quiesce - Block user issued commands.
2283  *      @sdev:  scsi device to quiesce.
2284  *
2285  *      This works by trying to transition to the SDEV_QUIESCE state
2286  *      (which must be a legal transition).  When the device is in this
2287  *      state, only special requests will be accepted, all others will
2288  *      be deferred.  Since special requests may also be requeued requests,
2289  *      a successful return doesn't guarantee the device will be 
2290  *      totally quiescent.
2291  *
2292  *      Must be called with user context, may sleep.
2293  *
2294  *      Returns zero if unsuccessful or an error if not.
2295  */
2296 int
2297 scsi_device_quiesce(struct scsi_device *sdev)
2298 {
2299         int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2300         if (err)
2301                 return err;
2302
2303         scsi_run_queue(sdev->request_queue);
2304         while (sdev->device_busy) {
2305                 msleep_interruptible(200);
2306                 scsi_run_queue(sdev->request_queue);
2307         }
2308         return 0;
2309 }
2310 EXPORT_SYMBOL(scsi_device_quiesce);
2311
2312 /**
2313  *      scsi_device_resume - Restart user issued commands to a quiesced device.
2314  *      @sdev:  scsi device to resume.
2315  *
2316  *      Moves the device from quiesced back to running and restarts the
2317  *      queues.
2318  *
2319  *      Must be called with user context, may sleep.
2320  */
2321 void
2322 scsi_device_resume(struct scsi_device *sdev)
2323 {
2324         if(scsi_device_set_state(sdev, SDEV_RUNNING))
2325                 return;
2326         scsi_run_queue(sdev->request_queue);
2327 }
2328 EXPORT_SYMBOL(scsi_device_resume);
2329
2330 static void
2331 device_quiesce_fn(struct scsi_device *sdev, void *data)
2332 {
2333         scsi_device_quiesce(sdev);
2334 }
2335
2336 void
2337 scsi_target_quiesce(struct scsi_target *starget)
2338 {
2339         starget_for_each_device(starget, NULL, device_quiesce_fn);
2340 }
2341 EXPORT_SYMBOL(scsi_target_quiesce);
2342
2343 static void
2344 device_resume_fn(struct scsi_device *sdev, void *data)
2345 {
2346         scsi_device_resume(sdev);
2347 }
2348
2349 void
2350 scsi_target_resume(struct scsi_target *starget)
2351 {
2352         starget_for_each_device(starget, NULL, device_resume_fn);
2353 }
2354 EXPORT_SYMBOL(scsi_target_resume);
2355
2356 /**
2357  * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
2358  * @sdev:       device to block
2359  *
2360  * Block request made by scsi lld's to temporarily stop all
2361  * scsi commands on the specified device.  Called from interrupt
2362  * or normal process context.
2363  *
2364  * Returns zero if successful or error if not
2365  *
2366  * Notes:       
2367  *      This routine transitions the device to the SDEV_BLOCK state
2368  *      (which must be a legal transition).  When the device is in this
2369  *      state, all commands are deferred until the scsi lld reenables
2370  *      the device with scsi_device_unblock or device_block_tmo fires.
2371  *      This routine assumes the host_lock is held on entry.
2372  */
2373 int
2374 scsi_internal_device_block(struct scsi_device *sdev)
2375 {
2376         struct request_queue *q = sdev->request_queue;
2377         unsigned long flags;
2378         int err = 0;
2379
2380         err = scsi_device_set_state(sdev, SDEV_BLOCK);
2381         if (err) {
2382                 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2383
2384                 if (err)
2385                         return err;
2386         }
2387
2388         /* 
2389          * The device has transitioned to SDEV_BLOCK.  Stop the
2390          * block layer from calling the midlayer with this device's
2391          * request queue. 
2392          */
2393         spin_lock_irqsave(q->queue_lock, flags);
2394         blk_stop_queue(q);
2395         spin_unlock_irqrestore(q->queue_lock, flags);
2396
2397         return 0;
2398 }
2399 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2400  
2401 /**
2402  * scsi_internal_device_unblock - resume a device after a block request
2403  * @sdev:       device to resume
2404  *
2405  * Called by scsi lld's or the midlayer to restart the device queue
2406  * for the previously suspended scsi device.  Called from interrupt or
2407  * normal process context.
2408  *
2409  * Returns zero if successful or error if not.
2410  *
2411  * Notes:       
2412  *      This routine transitions the device to the SDEV_RUNNING state
2413  *      (which must be a legal transition) allowing the midlayer to
2414  *      goose the queue for this device.  This routine assumes the 
2415  *      host_lock is held upon entry.
2416  */
2417 int
2418 scsi_internal_device_unblock(struct scsi_device *sdev)
2419 {
2420         struct request_queue *q = sdev->request_queue; 
2421         unsigned long flags;
2422         
2423         /* 
2424          * Try to transition the scsi device to SDEV_RUNNING
2425          * and goose the device queue if successful.  
2426          */
2427         if (sdev->sdev_state == SDEV_BLOCK)
2428                 sdev->sdev_state = SDEV_RUNNING;
2429         else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
2430                 sdev->sdev_state = SDEV_CREATED;
2431         else
2432                 return -EINVAL;
2433
2434         spin_lock_irqsave(q->queue_lock, flags);
2435         blk_start_queue(q);
2436         spin_unlock_irqrestore(q->queue_lock, flags);
2437
2438         return 0;
2439 }
2440 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2441
2442 static void
2443 device_block(struct scsi_device *sdev, void *data)
2444 {
2445         scsi_internal_device_block(sdev);
2446 }
2447
2448 static int
2449 target_block(struct device *dev, void *data)
2450 {
2451         if (scsi_is_target_device(dev))
2452                 starget_for_each_device(to_scsi_target(dev), NULL,
2453                                         device_block);
2454         return 0;
2455 }
2456
2457 void
2458 scsi_target_block(struct device *dev)
2459 {
2460         if (scsi_is_target_device(dev))
2461                 starget_for_each_device(to_scsi_target(dev), NULL,
2462                                         device_block);
2463         else
2464                 device_for_each_child(dev, NULL, target_block);
2465 }
2466 EXPORT_SYMBOL_GPL(scsi_target_block);
2467
2468 static void
2469 device_unblock(struct scsi_device *sdev, void *data)
2470 {
2471         scsi_internal_device_unblock(sdev);
2472 }
2473
2474 static int
2475 target_unblock(struct device *dev, void *data)
2476 {
2477         if (scsi_is_target_device(dev))
2478                 starget_for_each_device(to_scsi_target(dev), NULL,
2479                                         device_unblock);
2480         return 0;
2481 }
2482
2483 void
2484 scsi_target_unblock(struct device *dev)
2485 {
2486         if (scsi_is_target_device(dev))
2487                 starget_for_each_device(to_scsi_target(dev), NULL,
2488                                         device_unblock);
2489         else
2490                 device_for_each_child(dev, NULL, target_unblock);
2491 }
2492 EXPORT_SYMBOL_GPL(scsi_target_unblock);
2493
2494 /**
2495  * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2496  * @sgl:        scatter-gather list
2497  * @sg_count:   number of segments in sg
2498  * @offset:     offset in bytes into sg, on return offset into the mapped area
2499  * @len:        bytes to map, on return number of bytes mapped
2500  *
2501  * Returns virtual address of the start of the mapped page
2502  */
2503 void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2504                           size_t *offset, size_t *len)
2505 {
2506         int i;
2507         size_t sg_len = 0, len_complete = 0;
2508         struct scatterlist *sg;
2509         struct page *page;
2510
2511         WARN_ON(!irqs_disabled());
2512
2513         for_each_sg(sgl, sg, sg_count, i) {
2514                 len_complete = sg_len; /* Complete sg-entries */
2515                 sg_len += sg->length;
2516                 if (sg_len > *offset)
2517                         break;
2518         }
2519
2520         if (unlikely(i == sg_count)) {
2521                 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2522                         "elements %d\n",
2523                        __func__, sg_len, *offset, sg_count);
2524                 WARN_ON(1);
2525                 return NULL;
2526         }
2527
2528         /* Offset starting from the beginning of first page in this sg-entry */
2529         *offset = *offset - len_complete + sg->offset;
2530
2531         /* Assumption: contiguous pages can be accessed as "page + i" */
2532         page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2533         *offset &= ~PAGE_MASK;
2534
2535         /* Bytes in this sg-entry from *offset to the end of the page */
2536         sg_len = PAGE_SIZE - *offset;
2537         if (*len > sg_len)
2538                 *len = sg_len;
2539
2540         return kmap_atomic(page, KM_BIO_SRC_IRQ);
2541 }
2542 EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2543
2544 /**
2545  * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
2546  * @virt:       virtual address to be unmapped
2547  */
2548 void scsi_kunmap_atomic_sg(void *virt)
2549 {
2550         kunmap_atomic(virt, KM_BIO_SRC_IRQ);
2551 }
2552 EXPORT_SYMBOL(scsi_kunmap_atomic_sg);