[SCSI] remove scsi_cmnd->owner
[pandora-kernel.git] / drivers / scsi / scsi_lib.c
1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/completion.h>
13 #include <linux/kernel.h>
14 #include <linux/mempool.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
19
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_dbg.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_driver.h>
24 #include <scsi/scsi_eh.h>
25 #include <scsi/scsi_host.h>
26 #include <scsi/scsi_request.h>
27
28 #include "scsi_priv.h"
29 #include "scsi_logging.h"
30
31
32 #define SG_MEMPOOL_NR           (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33 #define SG_MEMPOOL_SIZE         32
34
35 struct scsi_host_sg_pool {
36         size_t          size;
37         char            *name; 
38         kmem_cache_t    *slab;
39         mempool_t       *pool;
40 };
41
42 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
43 #error SCSI_MAX_PHYS_SEGMENTS is too small
44 #endif
45
46 #define SP(x) { x, "sgpool-" #x } 
47 static struct scsi_host_sg_pool scsi_sg_pools[] = {
48         SP(8),
49         SP(16),
50         SP(32),
51 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
52         SP(64),
53 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
54         SP(128),
55 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
56         SP(256),
57 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
58 #error SCSI_MAX_PHYS_SEGMENTS is too large
59 #endif
60 #endif
61 #endif
62 #endif
63 };      
64 #undef SP
65
66
67 /*
68  * Function:    scsi_insert_special_req()
69  *
70  * Purpose:     Insert pre-formed request into request queue.
71  *
72  * Arguments:   sreq    - request that is ready to be queued.
73  *              at_head - boolean.  True if we should insert at head
74  *                        of queue, false if we should insert at tail.
75  *
76  * Lock status: Assumed that lock is not held upon entry.
77  *
78  * Returns:     Nothing
79  *
80  * Notes:       This function is called from character device and from
81  *              ioctl types of functions where the caller knows exactly
82  *              what SCSI command needs to be issued.   The idea is that
83  *              we merely inject the command into the queue (at the head
84  *              for now), and then call the queue request function to actually
85  *              process it.
86  */
87 int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
88 {
89         /*
90          * Because users of this function are apt to reuse requests with no
91          * modification, we have to sanitise the request flags here
92          */
93         sreq->sr_request->flags &= ~REQ_DONTPREP;
94         blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
95                            at_head, sreq);
96         return 0;
97 }
98
99 static void scsi_run_queue(struct request_queue *q);
100
101 /*
102  * Function:    scsi_queue_insert()
103  *
104  * Purpose:     Insert a command in the midlevel queue.
105  *
106  * Arguments:   cmd    - command that we are adding to queue.
107  *              reason - why we are inserting command to queue.
108  *
109  * Lock status: Assumed that lock is not held upon entry.
110  *
111  * Returns:     Nothing.
112  *
113  * Notes:       We do this for one of two cases.  Either the host is busy
114  *              and it cannot accept any more commands for the time being,
115  *              or the device returned QUEUE_FULL and can accept no more
116  *              commands.
117  * Notes:       This could be called either from an interrupt context or a
118  *              normal process context.
119  */
120 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
121 {
122         struct Scsi_Host *host = cmd->device->host;
123         struct scsi_device *device = cmd->device;
124         struct request_queue *q = device->request_queue;
125         unsigned long flags;
126
127         SCSI_LOG_MLQUEUE(1,
128                  printk("Inserting command %p into mlqueue\n", cmd));
129
130         /*
131          * Set the appropriate busy bit for the device/host.
132          *
133          * If the host/device isn't busy, assume that something actually
134          * completed, and that we should be able to queue a command now.
135          *
136          * Note that the prior mid-layer assumption that any host could
137          * always queue at least one command is now broken.  The mid-layer
138          * will implement a user specifiable stall (see
139          * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
140          * if a command is requeued with no other commands outstanding
141          * either for the device or for the host.
142          */
143         if (reason == SCSI_MLQUEUE_HOST_BUSY)
144                 host->host_blocked = host->max_host_blocked;
145         else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
146                 device->device_blocked = device->max_device_blocked;
147
148         /*
149          * Register the fact that we own the thing for now.
150          */
151         cmd->state = SCSI_STATE_MLQUEUE;
152
153         /*
154          * Decrement the counters, since these commands are no longer
155          * active on the host/device.
156          */
157         scsi_device_unbusy(device);
158
159         /*
160          * Requeue this command.  It will go before all other commands
161          * that are already in the queue.
162          *
163          * NOTE: there is magic here about the way the queue is plugged if
164          * we have no outstanding commands.
165          * 
166          * Although we *don't* plug the queue, we call the request
167          * function.  The SCSI request function detects the blocked condition
168          * and plugs the queue appropriately.
169          */
170         spin_lock_irqsave(q->queue_lock, flags);
171         blk_requeue_request(q, cmd->request);
172         spin_unlock_irqrestore(q->queue_lock, flags);
173
174         scsi_run_queue(q);
175
176         return 0;
177 }
178
179 /*
180  * Function:    scsi_do_req
181  *
182  * Purpose:     Queue a SCSI request
183  *
184  * Arguments:   sreq      - command descriptor.
185  *              cmnd      - actual SCSI command to be performed.
186  *              buffer    - data buffer.
187  *              bufflen   - size of data buffer.
188  *              done      - completion function to be run.
189  *              timeout   - how long to let it run before timeout.
190  *              retries   - number of retries we allow.
191  *
192  * Lock status: No locks held upon entry.
193  *
194  * Returns:     Nothing.
195  *
196  * Notes:       This function is only used for queueing requests for things
197  *              like ioctls and character device requests - this is because
198  *              we essentially just inject a request into the queue for the
199  *              device.
200  *
201  *              In order to support the scsi_device_quiesce function, we
202  *              now inject requests on the *head* of the device queue
203  *              rather than the tail.
204  */
205 void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
206                  void *buffer, unsigned bufflen,
207                  void (*done)(struct scsi_cmnd *),
208                  int timeout, int retries)
209 {
210         /*
211          * If the upper level driver is reusing these things, then
212          * we should release the low-level block now.  Another one will
213          * be allocated later when this request is getting queued.
214          */
215         __scsi_release_request(sreq);
216
217         /*
218          * Our own function scsi_done (which marks the host as not busy,
219          * disables the timeout counter, etc) will be called by us or by the
220          * scsi_hosts[host].queuecommand() function needs to also call
221          * the completion function for the high level driver.
222          */
223         memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
224         sreq->sr_bufflen = bufflen;
225         sreq->sr_buffer = buffer;
226         sreq->sr_allowed = retries;
227         sreq->sr_done = done;
228         sreq->sr_timeout_per_command = timeout;
229
230         if (sreq->sr_cmd_len == 0)
231                 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
232
233         /*
234          * head injection *required* here otherwise quiesce won't work
235          */
236         scsi_insert_special_req(sreq, 1);
237 }
238 EXPORT_SYMBOL(scsi_do_req);
239
240 static void scsi_wait_done(struct scsi_cmnd *cmd)
241 {
242         struct request *req = cmd->request;
243         struct request_queue *q = cmd->device->request_queue;
244         unsigned long flags;
245
246         req->rq_status = RQ_SCSI_DONE;  /* Busy, but indicate request done */
247
248         spin_lock_irqsave(q->queue_lock, flags);
249         if (blk_rq_tagged(req))
250                 blk_queue_end_tag(q, req);
251         spin_unlock_irqrestore(q->queue_lock, flags);
252
253         if (req->waiting)
254                 complete(req->waiting);
255 }
256
257 /* This is the end routine we get to if a command was never attached
258  * to the request.  Simply complete the request without changing
259  * rq_status; this will cause a DRIVER_ERROR. */
260 static void scsi_wait_req_end_io(struct request *req)
261 {
262         BUG_ON(!req->waiting);
263
264         complete(req->waiting);
265 }
266
267 void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
268                    unsigned bufflen, int timeout, int retries)
269 {
270         DECLARE_COMPLETION(wait);
271         
272         sreq->sr_request->waiting = &wait;
273         sreq->sr_request->rq_status = RQ_SCSI_BUSY;
274         sreq->sr_request->end_io = scsi_wait_req_end_io;
275         scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_wait_done,
276                         timeout, retries);
277         wait_for_completion(&wait);
278         sreq->sr_request->waiting = NULL;
279         if (sreq->sr_request->rq_status != RQ_SCSI_DONE)
280                 sreq->sr_result |= (DRIVER_ERROR << 24);
281
282         __scsi_release_request(sreq);
283 }
284 EXPORT_SYMBOL(scsi_wait_req);
285
286 /*
287  * Function:    scsi_init_cmd_errh()
288  *
289  * Purpose:     Initialize cmd fields related to error handling.
290  *
291  * Arguments:   cmd     - command that is ready to be queued.
292  *
293  * Returns:     Nothing
294  *
295  * Notes:       This function has the job of initializing a number of
296  *              fields related to error handling.   Typically this will
297  *              be called once for each command, as required.
298  */
299 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
300 {
301         cmd->serial_number = 0;
302
303         memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
304
305         if (cmd->cmd_len == 0)
306                 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
307
308         /*
309          * We need saved copies of a number of fields - this is because
310          * error handling may need to overwrite these with different values
311          * to run different commands, and once error handling is complete,
312          * we will need to restore these values prior to running the actual
313          * command.
314          */
315         cmd->old_use_sg = cmd->use_sg;
316         cmd->old_cmd_len = cmd->cmd_len;
317         cmd->sc_old_data_direction = cmd->sc_data_direction;
318         cmd->old_underflow = cmd->underflow;
319         memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
320         cmd->buffer = cmd->request_buffer;
321         cmd->bufflen = cmd->request_bufflen;
322
323         return 1;
324 }
325
326 /*
327  * Function:   scsi_setup_cmd_retry()
328  *
329  * Purpose:    Restore the command state for a retry
330  *
331  * Arguments:  cmd      - command to be restored
332  *
333  * Returns:    Nothing
334  *
335  * Notes:      Immediately prior to retrying a command, we need
336  *             to restore certain fields that we saved above.
337  */
338 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
339 {
340         memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
341         cmd->request_buffer = cmd->buffer;
342         cmd->request_bufflen = cmd->bufflen;
343         cmd->use_sg = cmd->old_use_sg;
344         cmd->cmd_len = cmd->old_cmd_len;
345         cmd->sc_data_direction = cmd->sc_old_data_direction;
346         cmd->underflow = cmd->old_underflow;
347 }
348
349 void scsi_device_unbusy(struct scsi_device *sdev)
350 {
351         struct Scsi_Host *shost = sdev->host;
352         unsigned long flags;
353
354         spin_lock_irqsave(shost->host_lock, flags);
355         shost->host_busy--;
356         if (unlikely(test_bit(SHOST_RECOVERY, &shost->shost_state) &&
357                      shost->host_failed))
358                 scsi_eh_wakeup(shost);
359         spin_unlock(shost->host_lock);
360         spin_lock(sdev->request_queue->queue_lock);
361         sdev->device_busy--;
362         spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
363 }
364
365 /*
366  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
367  * and call blk_run_queue for all the scsi_devices on the target -
368  * including current_sdev first.
369  *
370  * Called with *no* scsi locks held.
371  */
372 static void scsi_single_lun_run(struct scsi_device *current_sdev)
373 {
374         struct Scsi_Host *shost = current_sdev->host;
375         struct scsi_device *sdev, *tmp;
376         struct scsi_target *starget = scsi_target(current_sdev);
377         unsigned long flags;
378
379         spin_lock_irqsave(shost->host_lock, flags);
380         starget->starget_sdev_user = NULL;
381         spin_unlock_irqrestore(shost->host_lock, flags);
382
383         /*
384          * Call blk_run_queue for all LUNs on the target, starting with
385          * current_sdev. We race with others (to set starget_sdev_user),
386          * but in most cases, we will be first. Ideally, each LU on the
387          * target would get some limited time or requests on the target.
388          */
389         blk_run_queue(current_sdev->request_queue);
390
391         spin_lock_irqsave(shost->host_lock, flags);
392         if (starget->starget_sdev_user)
393                 goto out;
394         list_for_each_entry_safe(sdev, tmp, &starget->devices,
395                         same_target_siblings) {
396                 if (sdev == current_sdev)
397                         continue;
398                 if (scsi_device_get(sdev))
399                         continue;
400
401                 spin_unlock_irqrestore(shost->host_lock, flags);
402                 blk_run_queue(sdev->request_queue);
403                 spin_lock_irqsave(shost->host_lock, flags);
404         
405                 scsi_device_put(sdev);
406         }
407  out:
408         spin_unlock_irqrestore(shost->host_lock, flags);
409 }
410
411 /*
412  * Function:    scsi_run_queue()
413  *
414  * Purpose:     Select a proper request queue to serve next
415  *
416  * Arguments:   q       - last request's queue
417  *
418  * Returns:     Nothing
419  *
420  * Notes:       The previous command was completely finished, start
421  *              a new one if possible.
422  */
423 static void scsi_run_queue(struct request_queue *q)
424 {
425         struct scsi_device *sdev = q->queuedata;
426         struct Scsi_Host *shost = sdev->host;
427         unsigned long flags;
428
429         if (sdev->single_lun)
430                 scsi_single_lun_run(sdev);
431
432         spin_lock_irqsave(shost->host_lock, flags);
433         while (!list_empty(&shost->starved_list) &&
434                !shost->host_blocked && !shost->host_self_blocked &&
435                 !((shost->can_queue > 0) &&
436                   (shost->host_busy >= shost->can_queue))) {
437                 /*
438                  * As long as shost is accepting commands and we have
439                  * starved queues, call blk_run_queue. scsi_request_fn
440                  * drops the queue_lock and can add us back to the
441                  * starved_list.
442                  *
443                  * host_lock protects the starved_list and starved_entry.
444                  * scsi_request_fn must get the host_lock before checking
445                  * or modifying starved_list or starved_entry.
446                  */
447                 sdev = list_entry(shost->starved_list.next,
448                                           struct scsi_device, starved_entry);
449                 list_del_init(&sdev->starved_entry);
450                 spin_unlock_irqrestore(shost->host_lock, flags);
451
452                 blk_run_queue(sdev->request_queue);
453
454                 spin_lock_irqsave(shost->host_lock, flags);
455                 if (unlikely(!list_empty(&sdev->starved_entry)))
456                         /*
457                          * sdev lost a race, and was put back on the
458                          * starved list. This is unlikely but without this
459                          * in theory we could loop forever.
460                          */
461                         break;
462         }
463         spin_unlock_irqrestore(shost->host_lock, flags);
464
465         blk_run_queue(q);
466 }
467
468 /*
469  * Function:    scsi_requeue_command()
470  *
471  * Purpose:     Handle post-processing of completed commands.
472  *
473  * Arguments:   q       - queue to operate on
474  *              cmd     - command that may need to be requeued.
475  *
476  * Returns:     Nothing
477  *
478  * Notes:       After command completion, there may be blocks left
479  *              over which weren't finished by the previous command
480  *              this can be for a number of reasons - the main one is
481  *              I/O errors in the middle of the request, in which case
482  *              we need to request the blocks that come after the bad
483  *              sector.
484  */
485 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
486 {
487         unsigned long flags;
488
489         cmd->request->flags &= ~REQ_DONTPREP;
490
491         spin_lock_irqsave(q->queue_lock, flags);
492         blk_requeue_request(q, cmd->request);
493         spin_unlock_irqrestore(q->queue_lock, flags);
494
495         scsi_run_queue(q);
496 }
497
498 void scsi_next_command(struct scsi_cmnd *cmd)
499 {
500         struct request_queue *q = cmd->device->request_queue;
501
502         scsi_put_command(cmd);
503         scsi_run_queue(q);
504 }
505
506 void scsi_run_host_queues(struct Scsi_Host *shost)
507 {
508         struct scsi_device *sdev;
509
510         shost_for_each_device(sdev, shost)
511                 scsi_run_queue(sdev->request_queue);
512 }
513
514 /*
515  * Function:    scsi_end_request()
516  *
517  * Purpose:     Post-processing of completed commands (usually invoked at end
518  *              of upper level post-processing and scsi_io_completion).
519  *
520  * Arguments:   cmd      - command that is complete.
521  *              uptodate - 1 if I/O indicates success, <= 0 for I/O error.
522  *              bytes    - number of bytes of completed I/O
523  *              requeue  - indicates whether we should requeue leftovers.
524  *
525  * Lock status: Assumed that lock is not held upon entry.
526  *
527  * Returns:     cmd if requeue done or required, NULL otherwise
528  *
529  * Notes:       This is called for block device requests in order to
530  *              mark some number of sectors as complete.
531  * 
532  *              We are guaranteeing that the request queue will be goosed
533  *              at some point during this call.
534  */
535 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
536                                           int bytes, int requeue)
537 {
538         request_queue_t *q = cmd->device->request_queue;
539         struct request *req = cmd->request;
540         unsigned long flags;
541
542         /*
543          * If there are blocks left over at the end, set up the command
544          * to queue the remainder of them.
545          */
546         if (end_that_request_chunk(req, uptodate, bytes)) {
547                 int leftover = (req->hard_nr_sectors << 9);
548
549                 if (blk_pc_request(req))
550                         leftover = req->data_len;
551
552                 /* kill remainder if no retrys */
553                 if (!uptodate && blk_noretry_request(req))
554                         end_that_request_chunk(req, 0, leftover);
555                 else {
556                         if (requeue)
557                                 /*
558                                  * Bleah.  Leftovers again.  Stick the
559                                  * leftovers in the front of the
560                                  * queue, and goose the queue again.
561                                  */
562                                 scsi_requeue_command(q, cmd);
563
564                         return cmd;
565                 }
566         }
567
568         add_disk_randomness(req->rq_disk);
569
570         spin_lock_irqsave(q->queue_lock, flags);
571         if (blk_rq_tagged(req))
572                 blk_queue_end_tag(q, req);
573         end_that_request_last(req);
574         spin_unlock_irqrestore(q->queue_lock, flags);
575
576         /*
577          * This will goose the queue request function at the end, so we don't
578          * need to worry about launching another command.
579          */
580         scsi_next_command(cmd);
581         return NULL;
582 }
583
584 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
585 {
586         struct scsi_host_sg_pool *sgp;
587         struct scatterlist *sgl;
588
589         BUG_ON(!cmd->use_sg);
590
591         switch (cmd->use_sg) {
592         case 1 ... 8:
593                 cmd->sglist_len = 0;
594                 break;
595         case 9 ... 16:
596                 cmd->sglist_len = 1;
597                 break;
598         case 17 ... 32:
599                 cmd->sglist_len = 2;
600                 break;
601 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
602         case 33 ... 64:
603                 cmd->sglist_len = 3;
604                 break;
605 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
606         case 65 ... 128:
607                 cmd->sglist_len = 4;
608                 break;
609 #if (SCSI_MAX_PHYS_SEGMENTS  > 128)
610         case 129 ... 256:
611                 cmd->sglist_len = 5;
612                 break;
613 #endif
614 #endif
615 #endif
616         default:
617                 return NULL;
618         }
619
620         sgp = scsi_sg_pools + cmd->sglist_len;
621         sgl = mempool_alloc(sgp->pool, gfp_mask);
622         if (sgl)
623                 memset(sgl, 0, sgp->size);
624         return sgl;
625 }
626
627 static void scsi_free_sgtable(struct scatterlist *sgl, int index)
628 {
629         struct scsi_host_sg_pool *sgp;
630
631         BUG_ON(index > SG_MEMPOOL_NR);
632
633         sgp = scsi_sg_pools + index;
634         mempool_free(sgl, sgp->pool);
635 }
636
637 /*
638  * Function:    scsi_release_buffers()
639  *
640  * Purpose:     Completion processing for block device I/O requests.
641  *
642  * Arguments:   cmd     - command that we are bailing.
643  *
644  * Lock status: Assumed that no lock is held upon entry.
645  *
646  * Returns:     Nothing
647  *
648  * Notes:       In the event that an upper level driver rejects a
649  *              command, we must release resources allocated during
650  *              the __init_io() function.  Primarily this would involve
651  *              the scatter-gather table, and potentially any bounce
652  *              buffers.
653  */
654 static void scsi_release_buffers(struct scsi_cmnd *cmd)
655 {
656         struct request *req = cmd->request;
657
658         /*
659          * Free up any indirection buffers we allocated for DMA purposes. 
660          */
661         if (cmd->use_sg)
662                 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
663         else if (cmd->request_buffer != req->buffer)
664                 kfree(cmd->request_buffer);
665
666         /*
667          * Zero these out.  They now point to freed memory, and it is
668          * dangerous to hang onto the pointers.
669          */
670         cmd->buffer  = NULL;
671         cmd->bufflen = 0;
672         cmd->request_buffer = NULL;
673         cmd->request_bufflen = 0;
674 }
675
676 /*
677  * Function:    scsi_io_completion()
678  *
679  * Purpose:     Completion processing for block device I/O requests.
680  *
681  * Arguments:   cmd   - command that is finished.
682  *
683  * Lock status: Assumed that no lock is held upon entry.
684  *
685  * Returns:     Nothing
686  *
687  * Notes:       This function is matched in terms of capabilities to
688  *              the function that created the scatter-gather list.
689  *              In other words, if there are no bounce buffers
690  *              (the normal case for most drivers), we don't need
691  *              the logic to deal with cleaning up afterwards.
692  *
693  *              We must do one of several things here:
694  *
695  *              a) Call scsi_end_request.  This will finish off the
696  *                 specified number of sectors.  If we are done, the
697  *                 command block will be released, and the queue
698  *                 function will be goosed.  If we are not done, then
699  *                 scsi_end_request will directly goose the queue.
700  *
701  *              b) We can just use scsi_requeue_command() here.  This would
702  *                 be used if we just wanted to retry, for example.
703  */
704 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
705                         unsigned int block_bytes)
706 {
707         int result = cmd->result;
708         int this_count = cmd->bufflen;
709         request_queue_t *q = cmd->device->request_queue;
710         struct request *req = cmd->request;
711         int clear_errors = 1;
712         struct scsi_sense_hdr sshdr;
713         int sense_valid = 0;
714         int sense_deferred = 0;
715
716         if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
717                 return;
718
719         /*
720          * Free up any indirection buffers we allocated for DMA purposes. 
721          * For the case of a READ, we need to copy the data out of the
722          * bounce buffer and into the real buffer.
723          */
724         if (cmd->use_sg)
725                 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
726         else if (cmd->buffer != req->buffer) {
727                 if (rq_data_dir(req) == READ) {
728                         unsigned long flags;
729                         char *to = bio_kmap_irq(req->bio, &flags);
730                         memcpy(to, cmd->buffer, cmd->bufflen);
731                         bio_kunmap_irq(to, &flags);
732                 }
733                 kfree(cmd->buffer);
734         }
735
736         if (result) {
737                 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
738                 if (sense_valid)
739                         sense_deferred = scsi_sense_is_deferred(&sshdr);
740         }
741         if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
742                 req->errors = result;
743                 if (result) {
744                         clear_errors = 0;
745                         if (sense_valid && req->sense) {
746                                 /*
747                                  * SG_IO wants current and deferred errors
748                                  */
749                                 int len = 8 + cmd->sense_buffer[7];
750
751                                 if (len > SCSI_SENSE_BUFFERSIZE)
752                                         len = SCSI_SENSE_BUFFERSIZE;
753                                 memcpy(req->sense, cmd->sense_buffer,  len);
754                                 req->sense_len = len;
755                         }
756                 } else
757                         req->data_len = cmd->resid;
758         }
759
760         /*
761          * Zero these out.  They now point to freed memory, and it is
762          * dangerous to hang onto the pointers.
763          */
764         cmd->buffer  = NULL;
765         cmd->bufflen = 0;
766         cmd->request_buffer = NULL;
767         cmd->request_bufflen = 0;
768
769         /*
770          * Next deal with any sectors which we were able to correctly
771          * handle.
772          */
773         if (good_bytes >= 0) {
774                 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
775                                               req->nr_sectors, good_bytes));
776                 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
777
778                 if (clear_errors)
779                         req->errors = 0;
780                 /*
781                  * If multiple sectors are requested in one buffer, then
782                  * they will have been finished off by the first command.
783                  * If not, then we have a multi-buffer command.
784                  *
785                  * If block_bytes != 0, it means we had a medium error
786                  * of some sort, and that we want to mark some number of
787                  * sectors as not uptodate.  Thus we want to inhibit
788                  * requeueing right here - we will requeue down below
789                  * when we handle the bad sectors.
790                  */
791                 cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
792
793                 /*
794                  * If the command completed without error, then either finish off the
795                  * rest of the command, or start a new one.
796                  */
797                 if (result == 0 || cmd == NULL ) {
798                         return;
799                 }
800         }
801         /*
802          * Now, if we were good little boys and girls, Santa left us a request
803          * sense buffer.  We can extract information from this, so we
804          * can choose a block to remap, etc.
805          */
806         if (sense_valid && !sense_deferred) {
807                 switch (sshdr.sense_key) {
808                 case UNIT_ATTENTION:
809                         if (cmd->device->removable) {
810                                 /* detected disc change.  set a bit 
811                                  * and quietly refuse further access.
812                                  */
813                                 cmd->device->changed = 1;
814                                 cmd = scsi_end_request(cmd, 0,
815                                                 this_count, 1);
816                                 return;
817                         } else {
818                                 /*
819                                 * Must have been a power glitch, or a
820                                 * bus reset.  Could not have been a
821                                 * media change, so we just retry the
822                                 * request and see what happens.  
823                                 */
824                                 scsi_requeue_command(q, cmd);
825                                 return;
826                         }
827                         break;
828                 case ILLEGAL_REQUEST:
829                         /*
830                         * If we had an ILLEGAL REQUEST returned, then we may
831                         * have performed an unsupported command.  The only
832                         * thing this should be would be a ten byte read where
833                         * only a six byte read was supported.  Also, on a
834                         * system where READ CAPACITY failed, we may have read
835                         * past the end of the disk.
836                         */
837                         if (cmd->device->use_10_for_rw &&
838                             (cmd->cmnd[0] == READ_10 ||
839                              cmd->cmnd[0] == WRITE_10)) {
840                                 cmd->device->use_10_for_rw = 0;
841                                 /*
842                                  * This will cause a retry with a 6-byte
843                                  * command.
844                                  */
845                                 scsi_requeue_command(q, cmd);
846                                 result = 0;
847                         } else {
848                                 cmd = scsi_end_request(cmd, 0, this_count, 1);
849                                 return;
850                         }
851                         break;
852                 case NOT_READY:
853                         /*
854                          * If the device is in the process of becoming ready,
855                          * retry.
856                          */
857                         if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
858                                 scsi_requeue_command(q, cmd);
859                                 return;
860                         }
861                         printk(KERN_INFO "Device %s not ready.\n",
862                                req->rq_disk ? req->rq_disk->disk_name : "");
863                         cmd = scsi_end_request(cmd, 0, this_count, 1);
864                         return;
865                 case VOLUME_OVERFLOW:
866                         printk(KERN_INFO "Volume overflow <%d %d %d %d> CDB: ",
867                                cmd->device->host->host_no,
868                                (int)cmd->device->channel,
869                                (int)cmd->device->id, (int)cmd->device->lun);
870                         __scsi_print_command(cmd->data_cmnd);
871                         scsi_print_sense("", cmd);
872                         cmd = scsi_end_request(cmd, 0, block_bytes, 1);
873                         return;
874                 default:
875                         break;
876                 }
877         }                       /* driver byte != 0 */
878         if (host_byte(result) == DID_RESET) {
879                 /*
880                  * Third party bus reset or reset for error
881                  * recovery reasons.  Just retry the request
882                  * and see what happens.  
883                  */
884                 scsi_requeue_command(q, cmd);
885                 return;
886         }
887         if (result) {
888                 printk(KERN_INFO "SCSI error : <%d %d %d %d> return code "
889                        "= 0x%x\n", cmd->device->host->host_no,
890                        cmd->device->channel,
891                        cmd->device->id,
892                        cmd->device->lun, result);
893
894                 if (driver_byte(result) & DRIVER_SENSE)
895                         scsi_print_sense("", cmd);
896                 /*
897                  * Mark a single buffer as not uptodate.  Queue the remainder.
898                  * We sometimes get this cruft in the event that a medium error
899                  * isn't properly reported.
900                  */
901                 block_bytes = req->hard_cur_sectors << 9;
902                 if (!block_bytes)
903                         block_bytes = req->data_len;
904                 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
905         }
906 }
907 EXPORT_SYMBOL(scsi_io_completion);
908
909 /*
910  * Function:    scsi_init_io()
911  *
912  * Purpose:     SCSI I/O initialize function.
913  *
914  * Arguments:   cmd   - Command descriptor we wish to initialize
915  *
916  * Returns:     0 on success
917  *              BLKPREP_DEFER if the failure is retryable
918  *              BLKPREP_KILL if the failure is fatal
919  */
920 static int scsi_init_io(struct scsi_cmnd *cmd)
921 {
922         struct request     *req = cmd->request;
923         struct scatterlist *sgpnt;
924         int                count;
925
926         /*
927          * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
928          */
929         if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
930                 cmd->request_bufflen = req->data_len;
931                 cmd->request_buffer = req->data;
932                 req->buffer = req->data;
933                 cmd->use_sg = 0;
934                 return 0;
935         }
936
937         /*
938          * we used to not use scatter-gather for single segment request,
939          * but now we do (it makes highmem I/O easier to support without
940          * kmapping pages)
941          */
942         cmd->use_sg = req->nr_phys_segments;
943
944         /*
945          * if sg table allocation fails, requeue request later.
946          */
947         sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
948         if (unlikely(!sgpnt))
949                 return BLKPREP_DEFER;
950
951         cmd->request_buffer = (char *) sgpnt;
952         cmd->request_bufflen = req->nr_sectors << 9;
953         if (blk_pc_request(req))
954                 cmd->request_bufflen = req->data_len;
955         req->buffer = NULL;
956
957         /* 
958          * Next, walk the list, and fill in the addresses and sizes of
959          * each segment.
960          */
961         count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
962
963         /*
964          * mapped well, send it off
965          */
966         if (likely(count <= cmd->use_sg)) {
967                 cmd->use_sg = count;
968                 return 0;
969         }
970
971         printk(KERN_ERR "Incorrect number of segments after building list\n");
972         printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
973         printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
974                         req->current_nr_sectors);
975
976         /* release the command and kill it */
977         scsi_release_buffers(cmd);
978         scsi_put_command(cmd);
979         return BLKPREP_KILL;
980 }
981
982 static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
983 {
984         struct scsi_device *sdev = q->queuedata;
985         struct scsi_driver *drv;
986
987         if (sdev->sdev_state == SDEV_RUNNING) {
988                 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
989
990                 if (drv->prepare_flush)
991                         return drv->prepare_flush(q, rq);
992         }
993
994         return 0;
995 }
996
997 static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
998 {
999         struct scsi_device *sdev = q->queuedata;
1000         struct request *flush_rq = rq->end_io_data;
1001         struct scsi_driver *drv;
1002
1003         if (flush_rq->errors) {
1004                 printk("scsi: barrier error, disabling flush support\n");
1005                 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1006         }
1007
1008         if (sdev->sdev_state == SDEV_RUNNING) {
1009                 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1010                 drv->end_flush(q, rq);
1011         }
1012 }
1013
1014 static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1015                                sector_t *error_sector)
1016 {
1017         struct scsi_device *sdev = q->queuedata;
1018         struct scsi_driver *drv;
1019
1020         if (sdev->sdev_state != SDEV_RUNNING)
1021                 return -ENXIO;
1022
1023         drv = *(struct scsi_driver **) disk->private_data;
1024         if (drv->issue_flush)
1025                 return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1026
1027         return -EOPNOTSUPP;
1028 }
1029
1030 static int scsi_prep_fn(struct request_queue *q, struct request *req)
1031 {
1032         struct scsi_device *sdev = q->queuedata;
1033         struct scsi_cmnd *cmd;
1034         int specials_only = 0;
1035
1036         /*
1037          * Just check to see if the device is online.  If it isn't, we
1038          * refuse to process any commands.  The device must be brought
1039          * online before trying any recovery commands
1040          */
1041         if (unlikely(!scsi_device_online(sdev))) {
1042                 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1043                        sdev->host->host_no, sdev->id, sdev->lun);
1044                 return BLKPREP_KILL;
1045         }
1046         if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1047                 /* OK, we're not in a running state don't prep
1048                  * user commands */
1049                 if (sdev->sdev_state == SDEV_DEL) {
1050                         /* Device is fully deleted, no commands
1051                          * at all allowed down */
1052                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
1053                                sdev->host->host_no, sdev->id, sdev->lun);
1054                         return BLKPREP_KILL;
1055                 }
1056                 /* OK, we only allow special commands (i.e. not
1057                  * user initiated ones */
1058                 specials_only = sdev->sdev_state;
1059         }
1060
1061         /*
1062          * Find the actual device driver associated with this command.
1063          * The SPECIAL requests are things like character device or
1064          * ioctls, which did not originate from ll_rw_blk.  Note that
1065          * the special field is also used to indicate the cmd for
1066          * the remainder of a partially fulfilled request that can 
1067          * come up when there is a medium error.  We have to treat
1068          * these two cases differently.  We differentiate by looking
1069          * at request->cmd, as this tells us the real story.
1070          */
1071         if (req->flags & REQ_SPECIAL) {
1072                 struct scsi_request *sreq = req->special;
1073
1074                 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1075                         cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1076                         if (unlikely(!cmd))
1077                                 goto defer;
1078                         scsi_init_cmd_from_req(cmd, sreq);
1079                 } else
1080                         cmd = req->special;
1081         } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1082
1083                 if(unlikely(specials_only)) {
1084                         if(specials_only == SDEV_QUIESCE ||
1085                                         specials_only == SDEV_BLOCK)
1086                                 return BLKPREP_DEFER;
1087                         
1088                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1089                                sdev->host->host_no, sdev->id, sdev->lun);
1090                         return BLKPREP_KILL;
1091                 }
1092                         
1093                         
1094                 /*
1095                  * Now try and find a command block that we can use.
1096                  */
1097                 if (!req->special) {
1098                         cmd = scsi_get_command(sdev, GFP_ATOMIC);
1099                         if (unlikely(!cmd))
1100                                 goto defer;
1101                 } else
1102                         cmd = req->special;
1103                 
1104                 /* pull a tag out of the request if we have one */
1105                 cmd->tag = req->tag;
1106         } else {
1107                 blk_dump_rq_flags(req, "SCSI bad req");
1108                 return BLKPREP_KILL;
1109         }
1110         
1111         /* note the overloading of req->special.  When the tag
1112          * is active it always means cmd.  If the tag goes
1113          * back for re-queueing, it may be reset */
1114         req->special = cmd;
1115         cmd->request = req;
1116         
1117         /*
1118          * FIXME: drop the lock here because the functions below
1119          * expect to be called without the queue lock held.  Also,
1120          * previously, we dequeued the request before dropping the
1121          * lock.  We hope REQ_STARTED prevents anything untoward from
1122          * happening now.
1123          */
1124         if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1125                 struct scsi_driver *drv;
1126                 int ret;
1127
1128                 /*
1129                  * This will do a couple of things:
1130                  *  1) Fill in the actual SCSI command.
1131                  *  2) Fill in any other upper-level specific fields
1132                  * (timeout).
1133                  *
1134                  * If this returns 0, it means that the request failed
1135                  * (reading past end of disk, reading offline device,
1136                  * etc).   This won't actually talk to the device, but
1137                  * some kinds of consistency checking may cause the     
1138                  * request to be rejected immediately.
1139                  */
1140
1141                 /* 
1142                  * This sets up the scatter-gather table (allocating if
1143                  * required).
1144                  */
1145                 ret = scsi_init_io(cmd);
1146                 if (ret)        /* BLKPREP_KILL return also releases the command */
1147                         return ret;
1148                 
1149                 /*
1150                  * Initialize the actual SCSI command for this request.
1151                  */
1152                 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1153                 if (unlikely(!drv->init_command(cmd))) {
1154                         scsi_release_buffers(cmd);
1155                         scsi_put_command(cmd);
1156                         return BLKPREP_KILL;
1157                 }
1158         }
1159
1160         /*
1161          * The request is now prepped, no need to come back here
1162          */
1163         req->flags |= REQ_DONTPREP;
1164         return BLKPREP_OK;
1165
1166  defer:
1167         /* If we defer, the elv_next_request() returns NULL, but the
1168          * queue must be restarted, so we plug here if no returning
1169          * command will automatically do that. */
1170         if (sdev->device_busy == 0)
1171                 blk_plug_device(q);
1172         return BLKPREP_DEFER;
1173 }
1174
1175 /*
1176  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1177  * return 0.
1178  *
1179  * Called with the queue_lock held.
1180  */
1181 static inline int scsi_dev_queue_ready(struct request_queue *q,
1182                                   struct scsi_device *sdev)
1183 {
1184         if (sdev->device_busy >= sdev->queue_depth)
1185                 return 0;
1186         if (sdev->device_busy == 0 && sdev->device_blocked) {
1187                 /*
1188                  * unblock after device_blocked iterates to zero
1189                  */
1190                 if (--sdev->device_blocked == 0) {
1191                         SCSI_LOG_MLQUEUE(3,
1192                                 printk("scsi%d (%d:%d) unblocking device at"
1193                                        " zero depth\n", sdev->host->host_no,
1194                                        sdev->id, sdev->lun));
1195                 } else {
1196                         blk_plug_device(q);
1197                         return 0;
1198                 }
1199         }
1200         if (sdev->device_blocked)
1201                 return 0;
1202
1203         return 1;
1204 }
1205
1206 /*
1207  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1208  * return 0. We must end up running the queue again whenever 0 is
1209  * returned, else IO can hang.
1210  *
1211  * Called with host_lock held.
1212  */
1213 static inline int scsi_host_queue_ready(struct request_queue *q,
1214                                    struct Scsi_Host *shost,
1215                                    struct scsi_device *sdev)
1216 {
1217         if (test_bit(SHOST_RECOVERY, &shost->shost_state))
1218                 return 0;
1219         if (shost->host_busy == 0 && shost->host_blocked) {
1220                 /*
1221                  * unblock after host_blocked iterates to zero
1222                  */
1223                 if (--shost->host_blocked == 0) {
1224                         SCSI_LOG_MLQUEUE(3,
1225                                 printk("scsi%d unblocking host at zero depth\n",
1226                                         shost->host_no));
1227                 } else {
1228                         blk_plug_device(q);
1229                         return 0;
1230                 }
1231         }
1232         if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1233             shost->host_blocked || shost->host_self_blocked) {
1234                 if (list_empty(&sdev->starved_entry))
1235                         list_add_tail(&sdev->starved_entry, &shost->starved_list);
1236                 return 0;
1237         }
1238
1239         /* We're OK to process the command, so we can't be starved */
1240         if (!list_empty(&sdev->starved_entry))
1241                 list_del_init(&sdev->starved_entry);
1242
1243         return 1;
1244 }
1245
1246 /*
1247  * Kill requests for a dead device
1248  */
1249 static void scsi_kill_requests(request_queue_t *q)
1250 {
1251         struct request *req;
1252
1253         while ((req = elv_next_request(q)) != NULL) {
1254                 blkdev_dequeue_request(req);
1255                 req->flags |= REQ_QUIET;
1256                 while (end_that_request_first(req, 0, req->nr_sectors))
1257                         ;
1258                 end_that_request_last(req);
1259         }
1260 }
1261
1262 /*
1263  * Function:    scsi_request_fn()
1264  *
1265  * Purpose:     Main strategy routine for SCSI.
1266  *
1267  * Arguments:   q       - Pointer to actual queue.
1268  *
1269  * Returns:     Nothing
1270  *
1271  * Lock status: IO request lock assumed to be held when called.
1272  */
1273 static void scsi_request_fn(struct request_queue *q)
1274 {
1275         struct scsi_device *sdev = q->queuedata;
1276         struct Scsi_Host *shost;
1277         struct scsi_cmnd *cmd;
1278         struct request *req;
1279
1280         if (!sdev) {
1281                 printk("scsi: killing requests for dead queue\n");
1282                 scsi_kill_requests(q);
1283                 return;
1284         }
1285
1286         if(!get_device(&sdev->sdev_gendev))
1287                 /* We must be tearing the block queue down already */
1288                 return;
1289
1290         /*
1291          * To start with, we keep looping until the queue is empty, or until
1292          * the host is no longer able to accept any more requests.
1293          */
1294         shost = sdev->host;
1295         while (!blk_queue_plugged(q)) {
1296                 int rtn;
1297                 /*
1298                  * get next queueable request.  We do this early to make sure
1299                  * that the request is fully prepared even if we cannot 
1300                  * accept it.
1301                  */
1302                 req = elv_next_request(q);
1303                 if (!req || !scsi_dev_queue_ready(q, sdev))
1304                         break;
1305
1306                 if (unlikely(!scsi_device_online(sdev))) {
1307                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1308                                sdev->host->host_no, sdev->id, sdev->lun);
1309                         blkdev_dequeue_request(req);
1310                         req->flags |= REQ_QUIET;
1311                         while (end_that_request_first(req, 0, req->nr_sectors))
1312                                 ;
1313                         end_that_request_last(req);
1314                         continue;
1315                 }
1316
1317
1318                 /*
1319                  * Remove the request from the request list.
1320                  */
1321                 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1322                         blkdev_dequeue_request(req);
1323                 sdev->device_busy++;
1324
1325                 spin_unlock(q->queue_lock);
1326                 spin_lock(shost->host_lock);
1327
1328                 if (!scsi_host_queue_ready(q, shost, sdev))
1329                         goto not_ready;
1330                 if (sdev->single_lun) {
1331                         if (scsi_target(sdev)->starget_sdev_user &&
1332                             scsi_target(sdev)->starget_sdev_user != sdev)
1333                                 goto not_ready;
1334                         scsi_target(sdev)->starget_sdev_user = sdev;
1335                 }
1336                 shost->host_busy++;
1337
1338                 /*
1339                  * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1340                  *              take the lock again.
1341                  */
1342                 spin_unlock_irq(shost->host_lock);
1343
1344                 cmd = req->special;
1345                 if (unlikely(cmd == NULL)) {
1346                         printk(KERN_CRIT "impossible request in %s.\n"
1347                                          "please mail a stack trace to "
1348                                          "linux-scsi@vger.kernel.org",
1349                                          __FUNCTION__);
1350                         BUG();
1351                 }
1352
1353                 /*
1354                  * Finally, initialize any error handling parameters, and set up
1355                  * the timers for timeouts.
1356                  */
1357                 scsi_init_cmd_errh(cmd);
1358
1359                 /*
1360                  * Dispatch the command to the low-level driver.
1361                  */
1362                 rtn = scsi_dispatch_cmd(cmd);
1363                 spin_lock_irq(q->queue_lock);
1364                 if(rtn) {
1365                         /* we're refusing the command; because of
1366                          * the way locks get dropped, we need to 
1367                          * check here if plugging is required */
1368                         if(sdev->device_busy == 0)
1369                                 blk_plug_device(q);
1370
1371                         break;
1372                 }
1373         }
1374
1375         goto out;
1376
1377  not_ready:
1378         spin_unlock_irq(shost->host_lock);
1379
1380         /*
1381          * lock q, handle tag, requeue req, and decrement device_busy. We
1382          * must return with queue_lock held.
1383          *
1384          * Decrementing device_busy without checking it is OK, as all such
1385          * cases (host limits or settings) should run the queue at some
1386          * later time.
1387          */
1388         spin_lock_irq(q->queue_lock);
1389         blk_requeue_request(q, req);
1390         sdev->device_busy--;
1391         if(sdev->device_busy == 0)
1392                 blk_plug_device(q);
1393  out:
1394         /* must be careful here...if we trigger the ->remove() function
1395          * we cannot be holding the q lock */
1396         spin_unlock_irq(q->queue_lock);
1397         put_device(&sdev->sdev_gendev);
1398         spin_lock_irq(q->queue_lock);
1399 }
1400
1401 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1402 {
1403         struct device *host_dev;
1404         u64 bounce_limit = 0xffffffff;
1405
1406         if (shost->unchecked_isa_dma)
1407                 return BLK_BOUNCE_ISA;
1408         /*
1409          * Platforms with virtual-DMA translation
1410          * hardware have no practical limit.
1411          */
1412         if (!PCI_DMA_BUS_IS_PHYS)
1413                 return BLK_BOUNCE_ANY;
1414
1415         host_dev = scsi_get_device(shost);
1416         if (host_dev && host_dev->dma_mask)
1417                 bounce_limit = *host_dev->dma_mask;
1418
1419         return bounce_limit;
1420 }
1421 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1422
1423 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1424 {
1425         struct Scsi_Host *shost = sdev->host;
1426         struct request_queue *q;
1427
1428         q = blk_init_queue(scsi_request_fn, NULL);
1429         if (!q)
1430                 return NULL;
1431
1432         blk_queue_prep_rq(q, scsi_prep_fn);
1433
1434         blk_queue_max_hw_segments(q, shost->sg_tablesize);
1435         blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1436         blk_queue_max_sectors(q, shost->max_sectors);
1437         blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1438         blk_queue_segment_boundary(q, shost->dma_boundary);
1439         blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1440
1441         /*
1442          * ordered tags are superior to flush ordering
1443          */
1444         if (shost->ordered_tag)
1445                 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1446         else if (shost->ordered_flush) {
1447                 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1448                 q->prepare_flush_fn = scsi_prepare_flush_fn;
1449                 q->end_flush_fn = scsi_end_flush_fn;
1450         }
1451
1452         if (!shost->use_clustering)
1453                 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1454         return q;
1455 }
1456
1457 void scsi_free_queue(struct request_queue *q)
1458 {
1459         blk_cleanup_queue(q);
1460 }
1461
1462 /*
1463  * Function:    scsi_block_requests()
1464  *
1465  * Purpose:     Utility function used by low-level drivers to prevent further
1466  *              commands from being queued to the device.
1467  *
1468  * Arguments:   shost       - Host in question
1469  *
1470  * Returns:     Nothing
1471  *
1472  * Lock status: No locks are assumed held.
1473  *
1474  * Notes:       There is no timer nor any other means by which the requests
1475  *              get unblocked other than the low-level driver calling
1476  *              scsi_unblock_requests().
1477  */
1478 void scsi_block_requests(struct Scsi_Host *shost)
1479 {
1480         shost->host_self_blocked = 1;
1481 }
1482 EXPORT_SYMBOL(scsi_block_requests);
1483
1484 /*
1485  * Function:    scsi_unblock_requests()
1486  *
1487  * Purpose:     Utility function used by low-level drivers to allow further
1488  *              commands from being queued to the device.
1489  *
1490  * Arguments:   shost       - Host in question
1491  *
1492  * Returns:     Nothing
1493  *
1494  * Lock status: No locks are assumed held.
1495  *
1496  * Notes:       There is no timer nor any other means by which the requests
1497  *              get unblocked other than the low-level driver calling
1498  *              scsi_unblock_requests().
1499  *
1500  *              This is done as an API function so that changes to the
1501  *              internals of the scsi mid-layer won't require wholesale
1502  *              changes to drivers that use this feature.
1503  */
1504 void scsi_unblock_requests(struct Scsi_Host *shost)
1505 {
1506         shost->host_self_blocked = 0;
1507         scsi_run_host_queues(shost);
1508 }
1509 EXPORT_SYMBOL(scsi_unblock_requests);
1510
1511 int __init scsi_init_queue(void)
1512 {
1513         int i;
1514
1515         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1516                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1517                 int size = sgp->size * sizeof(struct scatterlist);
1518
1519                 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1520                                 SLAB_HWCACHE_ALIGN, NULL, NULL);
1521                 if (!sgp->slab) {
1522                         printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1523                                         sgp->name);
1524                 }
1525
1526                 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1527                                 mempool_alloc_slab, mempool_free_slab,
1528                                 sgp->slab);
1529                 if (!sgp->pool) {
1530                         printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1531                                         sgp->name);
1532                 }
1533         }
1534
1535         return 0;
1536 }
1537
1538 void scsi_exit_queue(void)
1539 {
1540         int i;
1541
1542         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1543                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1544                 mempool_destroy(sgp->pool);
1545                 kmem_cache_destroy(sgp->slab);
1546         }
1547 }
1548 /**
1549  *      __scsi_mode_sense - issue a mode sense, falling back from 10 to 
1550  *              six bytes if necessary.
1551  *      @sreq:  SCSI request to fill in with the MODE_SENSE
1552  *      @dbd:   set if mode sense will allow block descriptors to be returned
1553  *      @modepage: mode page being requested
1554  *      @buffer: request buffer (may not be smaller than eight bytes)
1555  *      @len:   length of request buffer.
1556  *      @timeout: command timeout
1557  *      @retries: number of retries before failing
1558  *      @data: returns a structure abstracting the mode header data
1559  *
1560  *      Returns zero if unsuccessful, or the header offset (either 4
1561  *      or 8 depending on whether a six or ten byte command was
1562  *      issued) if successful.
1563  **/
1564 int
1565 __scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage,
1566                   unsigned char *buffer, int len, int timeout, int retries,
1567                   struct scsi_mode_data *data) {
1568         unsigned char cmd[12];
1569         int use_10_for_ms;
1570         int header_length;
1571
1572         memset(data, 0, sizeof(*data));
1573         memset(&cmd[0], 0, 12);
1574         cmd[1] = dbd & 0x18;    /* allows DBD and LLBA bits */
1575         cmd[2] = modepage;
1576
1577  retry:
1578         use_10_for_ms = sreq->sr_device->use_10_for_ms;
1579
1580         if (use_10_for_ms) {
1581                 if (len < 8)
1582                         len = 8;
1583
1584                 cmd[0] = MODE_SENSE_10;
1585                 cmd[8] = len;
1586                 header_length = 8;
1587         } else {
1588                 if (len < 4)
1589                         len = 4;
1590
1591                 cmd[0] = MODE_SENSE;
1592                 cmd[4] = len;
1593                 header_length = 4;
1594         }
1595
1596         sreq->sr_cmd_len = 0;
1597         memset(sreq->sr_sense_buffer, 0, sizeof(sreq->sr_sense_buffer));
1598         sreq->sr_data_direction = DMA_FROM_DEVICE;
1599
1600         memset(buffer, 0, len);
1601
1602         scsi_wait_req(sreq, cmd, buffer, len, timeout, retries);
1603
1604         /* This code looks awful: what it's doing is making sure an
1605          * ILLEGAL REQUEST sense return identifies the actual command
1606          * byte as the problem.  MODE_SENSE commands can return
1607          * ILLEGAL REQUEST if the code page isn't supported */
1608
1609         if (use_10_for_ms && !scsi_status_is_good(sreq->sr_result) &&
1610             (driver_byte(sreq->sr_result) & DRIVER_SENSE)) {
1611                 struct scsi_sense_hdr sshdr;
1612
1613                 if (scsi_request_normalize_sense(sreq, &sshdr)) {
1614                         if ((sshdr.sense_key == ILLEGAL_REQUEST) &&
1615                             (sshdr.asc == 0x20) && (sshdr.ascq == 0)) {
1616                                 /* 
1617                                  * Invalid command operation code
1618                                  */
1619                                 sreq->sr_device->use_10_for_ms = 0;
1620                                 goto retry;
1621                         }
1622                 }
1623         }
1624
1625         if(scsi_status_is_good(sreq->sr_result)) {
1626                 data->header_length = header_length;
1627                 if(use_10_for_ms) {
1628                         data->length = buffer[0]*256 + buffer[1] + 2;
1629                         data->medium_type = buffer[2];
1630                         data->device_specific = buffer[3];
1631                         data->longlba = buffer[4] & 0x01;
1632                         data->block_descriptor_length = buffer[6]*256
1633                                 + buffer[7];
1634                 } else {
1635                         data->length = buffer[0] + 1;
1636                         data->medium_type = buffer[1];
1637                         data->device_specific = buffer[2];
1638                         data->block_descriptor_length = buffer[3];
1639                 }
1640         }
1641
1642         return sreq->sr_result;
1643 }
1644 EXPORT_SYMBOL(__scsi_mode_sense);
1645
1646 /**
1647  *      scsi_mode_sense - issue a mode sense, falling back from 10 to 
1648  *              six bytes if necessary.
1649  *      @sdev:  scsi device to send command to.
1650  *      @dbd:   set if mode sense will disable block descriptors in the return
1651  *      @modepage: mode page being requested
1652  *      @buffer: request buffer (may not be smaller than eight bytes)
1653  *      @len:   length of request buffer.
1654  *      @timeout: command timeout
1655  *      @retries: number of retries before failing
1656  *
1657  *      Returns zero if unsuccessful, or the header offset (either 4
1658  *      or 8 depending on whether a six or ten byte command was
1659  *      issued) if successful.
1660  **/
1661 int
1662 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1663                 unsigned char *buffer, int len, int timeout, int retries,
1664                 struct scsi_mode_data *data)
1665 {
1666         struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1667         int ret;
1668
1669         if (!sreq)
1670                 return -1;
1671
1672         ret = __scsi_mode_sense(sreq, dbd, modepage, buffer, len,
1673                                 timeout, retries, data);
1674
1675         scsi_release_request(sreq);
1676
1677         return ret;
1678 }
1679 EXPORT_SYMBOL(scsi_mode_sense);
1680
1681 int
1682 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1683 {
1684         struct scsi_request *sreq;
1685         char cmd[] = {
1686                 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1687         };
1688         int result;
1689         
1690         sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1691         if (!sreq)
1692                 return -ENOMEM;
1693
1694         sreq->sr_data_direction = DMA_NONE;
1695         scsi_wait_req(sreq, cmd, NULL, 0, timeout, retries);
1696
1697         if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && sdev->removable) {
1698                 struct scsi_sense_hdr sshdr;
1699
1700                 if ((scsi_request_normalize_sense(sreq, &sshdr)) &&
1701                     ((sshdr.sense_key == UNIT_ATTENTION) ||
1702                      (sshdr.sense_key == NOT_READY))) {
1703                         sdev->changed = 1;
1704                         sreq->sr_result = 0;
1705                 }
1706         }
1707         result = sreq->sr_result;
1708         scsi_release_request(sreq);
1709         return result;
1710 }
1711 EXPORT_SYMBOL(scsi_test_unit_ready);
1712
1713 /**
1714  *      scsi_device_set_state - Take the given device through the device
1715  *              state model.
1716  *      @sdev:  scsi device to change the state of.
1717  *      @state: state to change to.
1718  *
1719  *      Returns zero if unsuccessful or an error if the requested 
1720  *      transition is illegal.
1721  **/
1722 int
1723 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1724 {
1725         enum scsi_device_state oldstate = sdev->sdev_state;
1726
1727         if (state == oldstate)
1728                 return 0;
1729
1730         switch (state) {
1731         case SDEV_CREATED:
1732                 /* There are no legal states that come back to
1733                  * created.  This is the manually initialised start
1734                  * state */
1735                 goto illegal;
1736                         
1737         case SDEV_RUNNING:
1738                 switch (oldstate) {
1739                 case SDEV_CREATED:
1740                 case SDEV_OFFLINE:
1741                 case SDEV_QUIESCE:
1742                 case SDEV_BLOCK:
1743                         break;
1744                 default:
1745                         goto illegal;
1746                 }
1747                 break;
1748
1749         case SDEV_QUIESCE:
1750                 switch (oldstate) {
1751                 case SDEV_RUNNING:
1752                 case SDEV_OFFLINE:
1753                         break;
1754                 default:
1755                         goto illegal;
1756                 }
1757                 break;
1758
1759         case SDEV_OFFLINE:
1760                 switch (oldstate) {
1761                 case SDEV_CREATED:
1762                 case SDEV_RUNNING:
1763                 case SDEV_QUIESCE:
1764                 case SDEV_BLOCK:
1765                         break;
1766                 default:
1767                         goto illegal;
1768                 }
1769                 break;
1770
1771         case SDEV_BLOCK:
1772                 switch (oldstate) {
1773                 case SDEV_CREATED:
1774                 case SDEV_RUNNING:
1775                         break;
1776                 default:
1777                         goto illegal;
1778                 }
1779                 break;
1780
1781         case SDEV_CANCEL:
1782                 switch (oldstate) {
1783                 case SDEV_CREATED:
1784                 case SDEV_RUNNING:
1785                 case SDEV_OFFLINE:
1786                 case SDEV_BLOCK:
1787                         break;
1788                 default:
1789                         goto illegal;
1790                 }
1791                 break;
1792
1793         case SDEV_DEL:
1794                 switch (oldstate) {
1795                 case SDEV_CANCEL:
1796                         break;
1797                 default:
1798                         goto illegal;
1799                 }
1800                 break;
1801
1802         }
1803         sdev->sdev_state = state;
1804         return 0;
1805
1806  illegal:
1807         SCSI_LOG_ERROR_RECOVERY(1, 
1808                                 dev_printk(KERN_ERR, &sdev->sdev_gendev,
1809                                            "Illegal state transition %s->%s\n",
1810                                            scsi_device_state_name(oldstate),
1811                                            scsi_device_state_name(state))
1812                                 );
1813         return -EINVAL;
1814 }
1815 EXPORT_SYMBOL(scsi_device_set_state);
1816
1817 /**
1818  *      scsi_device_quiesce - Block user issued commands.
1819  *      @sdev:  scsi device to quiesce.
1820  *
1821  *      This works by trying to transition to the SDEV_QUIESCE state
1822  *      (which must be a legal transition).  When the device is in this
1823  *      state, only special requests will be accepted, all others will
1824  *      be deferred.  Since special requests may also be requeued requests,
1825  *      a successful return doesn't guarantee the device will be 
1826  *      totally quiescent.
1827  *
1828  *      Must be called with user context, may sleep.
1829  *
1830  *      Returns zero if unsuccessful or an error if not.
1831  **/
1832 int
1833 scsi_device_quiesce(struct scsi_device *sdev)
1834 {
1835         int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1836         if (err)
1837                 return err;
1838
1839         scsi_run_queue(sdev->request_queue);
1840         while (sdev->device_busy) {
1841                 msleep_interruptible(200);
1842                 scsi_run_queue(sdev->request_queue);
1843         }
1844         return 0;
1845 }
1846 EXPORT_SYMBOL(scsi_device_quiesce);
1847
1848 /**
1849  *      scsi_device_resume - Restart user issued commands to a quiesced device.
1850  *      @sdev:  scsi device to resume.
1851  *
1852  *      Moves the device from quiesced back to running and restarts the
1853  *      queues.
1854  *
1855  *      Must be called with user context, may sleep.
1856  **/
1857 void
1858 scsi_device_resume(struct scsi_device *sdev)
1859 {
1860         if(scsi_device_set_state(sdev, SDEV_RUNNING))
1861                 return;
1862         scsi_run_queue(sdev->request_queue);
1863 }
1864 EXPORT_SYMBOL(scsi_device_resume);
1865
1866 static void
1867 device_quiesce_fn(struct scsi_device *sdev, void *data)
1868 {
1869         scsi_device_quiesce(sdev);
1870 }
1871
1872 void
1873 scsi_target_quiesce(struct scsi_target *starget)
1874 {
1875         starget_for_each_device(starget, NULL, device_quiesce_fn);
1876 }
1877 EXPORT_SYMBOL(scsi_target_quiesce);
1878
1879 static void
1880 device_resume_fn(struct scsi_device *sdev, void *data)
1881 {
1882         scsi_device_resume(sdev);
1883 }
1884
1885 void
1886 scsi_target_resume(struct scsi_target *starget)
1887 {
1888         starget_for_each_device(starget, NULL, device_resume_fn);
1889 }
1890 EXPORT_SYMBOL(scsi_target_resume);
1891
1892 /**
1893  * scsi_internal_device_block - internal function to put a device
1894  *                              temporarily into the SDEV_BLOCK state
1895  * @sdev:       device to block
1896  *
1897  * Block request made by scsi lld's to temporarily stop all
1898  * scsi commands on the specified device.  Called from interrupt
1899  * or normal process context.
1900  *
1901  * Returns zero if successful or error if not
1902  *
1903  * Notes:       
1904  *      This routine transitions the device to the SDEV_BLOCK state
1905  *      (which must be a legal transition).  When the device is in this
1906  *      state, all commands are deferred until the scsi lld reenables
1907  *      the device with scsi_device_unblock or device_block_tmo fires.
1908  *      This routine assumes the host_lock is held on entry.
1909  **/
1910 int
1911 scsi_internal_device_block(struct scsi_device *sdev)
1912 {
1913         request_queue_t *q = sdev->request_queue;
1914         unsigned long flags;
1915         int err = 0;
1916
1917         err = scsi_device_set_state(sdev, SDEV_BLOCK);
1918         if (err)
1919                 return err;
1920
1921         /* 
1922          * The device has transitioned to SDEV_BLOCK.  Stop the
1923          * block layer from calling the midlayer with this device's
1924          * request queue. 
1925          */
1926         spin_lock_irqsave(q->queue_lock, flags);
1927         blk_stop_queue(q);
1928         spin_unlock_irqrestore(q->queue_lock, flags);
1929
1930         return 0;
1931 }
1932 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
1933  
1934 /**
1935  * scsi_internal_device_unblock - resume a device after a block request
1936  * @sdev:       device to resume
1937  *
1938  * Called by scsi lld's or the midlayer to restart the device queue
1939  * for the previously suspended scsi device.  Called from interrupt or
1940  * normal process context.
1941  *
1942  * Returns zero if successful or error if not.
1943  *
1944  * Notes:       
1945  *      This routine transitions the device to the SDEV_RUNNING state
1946  *      (which must be a legal transition) allowing the midlayer to
1947  *      goose the queue for this device.  This routine assumes the 
1948  *      host_lock is held upon entry.
1949  **/
1950 int
1951 scsi_internal_device_unblock(struct scsi_device *sdev)
1952 {
1953         request_queue_t *q = sdev->request_queue; 
1954         int err;
1955         unsigned long flags;
1956         
1957         /* 
1958          * Try to transition the scsi device to SDEV_RUNNING
1959          * and goose the device queue if successful.  
1960          */
1961         err = scsi_device_set_state(sdev, SDEV_RUNNING);
1962         if (err)
1963                 return err;
1964
1965         spin_lock_irqsave(q->queue_lock, flags);
1966         blk_start_queue(q);
1967         spin_unlock_irqrestore(q->queue_lock, flags);
1968
1969         return 0;
1970 }
1971 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
1972
1973 static void
1974 device_block(struct scsi_device *sdev, void *data)
1975 {
1976         scsi_internal_device_block(sdev);
1977 }
1978
1979 static int
1980 target_block(struct device *dev, void *data)
1981 {
1982         if (scsi_is_target_device(dev))
1983                 starget_for_each_device(to_scsi_target(dev), NULL,
1984                                         device_block);
1985         return 0;
1986 }
1987
1988 void
1989 scsi_target_block(struct device *dev)
1990 {
1991         if (scsi_is_target_device(dev))
1992                 starget_for_each_device(to_scsi_target(dev), NULL,
1993                                         device_block);
1994         else
1995                 device_for_each_child(dev, NULL, target_block);
1996 }
1997 EXPORT_SYMBOL_GPL(scsi_target_block);
1998
1999 static void
2000 device_unblock(struct scsi_device *sdev, void *data)
2001 {
2002         scsi_internal_device_unblock(sdev);
2003 }
2004
2005 static int
2006 target_unblock(struct device *dev, void *data)
2007 {
2008         if (scsi_is_target_device(dev))
2009                 starget_for_each_device(to_scsi_target(dev), NULL,
2010                                         device_unblock);
2011         return 0;
2012 }
2013
2014 void
2015 scsi_target_unblock(struct device *dev)
2016 {
2017         if (scsi_is_target_device(dev))
2018                 starget_for_each_device(to_scsi_target(dev), NULL,
2019                                         device_unblock);
2020         else
2021                 device_for_each_child(dev, NULL, target_unblock);
2022 }
2023 EXPORT_SYMBOL_GPL(scsi_target_unblock);