[PATCH] USB: ub 01 remove first_open
[pandora-kernel.git] / drivers / block / ub.c
1 /*
2  * The low performance USB storage driver (ub).
3  *
4  * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
5  * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com)
6  *
7  * This work is a part of Linux kernel, is derived from it,
8  * and is not licensed separately. See file COPYING for details.
9  *
10  * TODO (sorted by decreasing priority)
11  *  -- set readonly flag for CDs, set removable flag for CF readers
12  *  -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
13  *  -- special case some senses, e.g. 3a/0 -> no media present, reduce retries
14  *  -- verify the 13 conditions and do bulk resets
15  *  -- kill last_pipe and simply do two-state clearing on both pipes
16  *  -- highmem
17  *  -- move top_sense and work_bcs into separate allocations (if they survive)
18  *     for cache purists and esoteric architectures.
19  *  -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ?
20  *  -- prune comments, they are too volumnous
21  *  -- Exterminate P3 printks
22  *  -- Resove XXX's
23  *  -- Redo "benh's retries", perhaps have spin-up code to handle them. V:D=?
24  *  -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring.
25  */
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/usb.h>
29 #include <linux/usb_usual.h>
30 #include <linux/blkdev.h>
31 #include <linux/devfs_fs_kernel.h>
32 #include <linux/timer.h>
33 #include <scsi/scsi.h>
34
35 #define DRV_NAME "ub"
36 #define DEVFS_NAME DRV_NAME
37
38 #define UB_MAJOR 180
39
40 /*
41  * The command state machine is the key model for understanding of this driver.
42  *
43  * The general rule is that all transitions are done towards the bottom
44  * of the diagram, thus preventing any loops.
45  *
46  * An exception to that is how the STAT state is handled. A counter allows it
47  * to be re-entered along the path marked with [C].
48  *
49  *       +--------+
50  *       ! INIT   !
51  *       +--------+
52  *           !
53  *        ub_scsi_cmd_start fails ->--------------------------------------\
54  *           !                                                            !
55  *           V                                                            !
56  *       +--------+                                                       !
57  *       ! CMD    !                                                       !
58  *       +--------+                                                       !
59  *           !                                            +--------+      !
60  *         was -EPIPE -->-------------------------------->! CLEAR  !      !
61  *           !                                            +--------+      !
62  *           !                                                !           !
63  *         was error -->------------------------------------- ! --------->\
64  *           !                                                !           !
65  *  /--<-- cmd->dir == NONE ?                                 !           !
66  *  !        !                                                !           !
67  *  !        V                                                !           !
68  *  !    +--------+                                           !           !
69  *  !    ! DATA   !                                           !           !
70  *  !    +--------+                                           !           !
71  *  !        !                           +---------+          !           !
72  *  !      was -EPIPE -->--------------->! CLR2STS !          !           !
73  *  !        !                           +---------+          !           !
74  *  !        !                                !               !           !
75  *  !        !                              was error -->---- ! --------->\
76  *  !      was error -->--------------------- ! ------------- ! --------->\
77  *  !        !                                !               !           !
78  *  !        V                                !               !           !
79  *  \--->+--------+                           !               !           !
80  *       ! STAT   !<--------------------------/               !           !
81  *  /--->+--------+                                           !           !
82  *  !        !                                                !           !
83  * [C]     was -EPIPE -->-----------\                         !           !
84  *  !        !                      !                         !           !
85  *  +<---- len == 0                 !                         !           !
86  *  !        !                      !                         !           !
87  *  !      was error -->--------------------------------------!---------->\
88  *  !        !                      !                         !           !
89  *  +<---- bad CSW                  !                         !           !
90  *  +<---- bad tag                  !                         !           !
91  *  !        !                      V                         !           !
92  *  !        !                 +--------+                     !           !
93  *  !        !                 ! CLRRS  !                     !           !
94  *  !        !                 +--------+                     !           !
95  *  !        !                      !                         !           !
96  *  \------- ! --------------------[C]--------\               !           !
97  *           !                                !               !           !
98  *         cmd->error---\                +--------+           !           !
99  *           !          +--------------->! SENSE  !<----------/           !
100  *         STAT_FAIL----/                +--------+                       !
101  *           !                                !                           V
102  *           !                                V                      +--------+
103  *           \--------------------------------\--------------------->! DONE   !
104  *                                                                   +--------+
105  */
106
107 /*
108  * This many LUNs per USB device.
109  * Every one of them takes a host, see UB_MAX_HOSTS.
110  */
111 #define UB_MAX_LUNS   9
112
113 /*
114  */
115
116 #define UB_PARTS_PER_LUN      8
117
118 #define UB_MAX_CDB_SIZE      16         /* Corresponds to Bulk */
119
120 #define UB_SENSE_SIZE  18
121
122 /*
123  */
124
125 /* command block wrapper */
126 struct bulk_cb_wrap {
127         __le32  Signature;              /* contains 'USBC' */
128         u32     Tag;                    /* unique per command id */
129         __le32  DataTransferLength;     /* size of data */
130         u8      Flags;                  /* direction in bit 0 */
131         u8      Lun;                    /* LUN */
132         u8      Length;                 /* of of the CDB */
133         u8      CDB[UB_MAX_CDB_SIZE];   /* max command */
134 };
135
136 #define US_BULK_CB_WRAP_LEN     31
137 #define US_BULK_CB_SIGN         0x43425355      /*spells out USBC */
138 #define US_BULK_FLAG_IN         1
139 #define US_BULK_FLAG_OUT        0
140
141 /* command status wrapper */
142 struct bulk_cs_wrap {
143         __le32  Signature;              /* should = 'USBS' */
144         u32     Tag;                    /* same as original command */
145         __le32  Residue;                /* amount not transferred */
146         u8      Status;                 /* see below */
147 };
148
149 #define US_BULK_CS_WRAP_LEN     13
150 #define US_BULK_CS_SIGN         0x53425355      /* spells out 'USBS' */
151 #define US_BULK_STAT_OK         0
152 #define US_BULK_STAT_FAIL       1
153 #define US_BULK_STAT_PHASE      2
154
155 /* bulk-only class specific requests */
156 #define US_BULK_RESET_REQUEST   0xff
157 #define US_BULK_GET_MAX_LUN     0xfe
158
159 /*
160  */
161 struct ub_dev;
162
163 #define UB_MAX_REQ_SG   9       /* cdrecord requires 32KB and maybe a header */
164 #define UB_MAX_SECTORS 64
165
166 /*
167  * A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
168  * even if a webcam hogs the bus, but some devices need time to spin up.
169  */
170 #define UB_URB_TIMEOUT  (HZ*2)
171 #define UB_DATA_TIMEOUT (HZ*5)  /* ZIP does spin-ups in the data phase */
172 #define UB_STAT_TIMEOUT (HZ*5)  /* Same spinups and eject for a dataless cmd. */
173 #define UB_CTRL_TIMEOUT (HZ/2)  /* 500ms ought to be enough to clear a stall */
174
175 /*
176  * An instance of a SCSI command in transit.
177  */
178 #define UB_DIR_NONE     0
179 #define UB_DIR_READ     1
180 #define UB_DIR_ILLEGAL2 2
181 #define UB_DIR_WRITE    3
182
183 #define UB_DIR_CHAR(c)  (((c)==UB_DIR_WRITE)? 'w': \
184                          (((c)==UB_DIR_READ)? 'r': 'n'))
185
186 enum ub_scsi_cmd_state {
187         UB_CMDST_INIT,                  /* Initial state */
188         UB_CMDST_CMD,                   /* Command submitted */
189         UB_CMDST_DATA,                  /* Data phase */
190         UB_CMDST_CLR2STS,               /* Clearing before requesting status */
191         UB_CMDST_STAT,                  /* Status phase */
192         UB_CMDST_CLEAR,                 /* Clearing a stall (halt, actually) */
193         UB_CMDST_CLRRS,                 /* Clearing before retrying status */
194         UB_CMDST_SENSE,                 /* Sending Request Sense */
195         UB_CMDST_DONE                   /* Final state */
196 };
197
198 static char *ub_scsi_cmd_stname[] = {
199         ".  ",
200         "Cmd",
201         "dat",
202         "c2s",
203         "sts",
204         "clr",
205         "crs",
206         "Sen",
207         "fin"
208 };
209
210 struct ub_scsi_cmd {
211         unsigned char cdb[UB_MAX_CDB_SIZE];
212         unsigned char cdb_len;
213
214         unsigned char dir;              /* 0 - none, 1 - read, 3 - write. */
215         unsigned char trace_index;
216         enum ub_scsi_cmd_state state;
217         unsigned int tag;
218         struct ub_scsi_cmd *next;
219
220         int error;                      /* Return code - valid upon done */
221         unsigned int act_len;           /* Return size */
222         unsigned char key, asc, ascq;   /* May be valid if error==-EIO */
223
224         int stat_count;                 /* Retries getting status. */
225
226         unsigned int len;               /* Requested length */
227         unsigned int current_sg;
228         unsigned int nsg;               /* sgv[nsg] */
229         struct scatterlist sgv[UB_MAX_REQ_SG];
230
231         struct ub_lun *lun;
232         void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
233         void *back;
234 };
235
236 struct ub_request {
237         struct request *rq;
238         unsigned int current_try;
239         unsigned int nsg;               /* sgv[nsg] */
240         struct scatterlist sgv[UB_MAX_REQ_SG];
241 };
242
243 /*
244  */
245 struct ub_capacity {
246         unsigned long nsec;             /* Linux size - 512 byte sectors */
247         unsigned int bsize;             /* Linux hardsect_size */
248         unsigned int bshift;            /* Shift between 512 and hard sects */
249 };
250
251 /*
252  * The SCSI command tracing structure.
253  */
254
255 #define SCMD_ST_HIST_SZ   8
256 #define SCMD_TRACE_SZ    63             /* Less than 4KB of 61-byte lines */
257
258 struct ub_scsi_cmd_trace {
259         int hcur;
260         unsigned int tag;
261         unsigned int req_size, act_size;
262         unsigned char op;
263         unsigned char dir;
264         unsigned char key, asc, ascq;
265         char st_hst[SCMD_ST_HIST_SZ];   
266 };
267
268 struct ub_scsi_trace {
269         int cur;
270         struct ub_scsi_cmd_trace vec[SCMD_TRACE_SZ];
271 };
272
273 /*
274  * This is a direct take-off from linux/include/completion.h
275  * The difference is that I do not wait on this thing, just poll.
276  * When I want to wait (ub_probe), I just use the stock completion.
277  *
278  * Note that INIT_COMPLETION takes no lock. It is correct. But why
279  * in the bloody hell that thing takes struct instead of pointer to struct
280  * is quite beyond me. I just copied it from the stock completion.
281  */
282 struct ub_completion {
283         unsigned int done;
284         spinlock_t lock;
285 };
286
287 static inline void ub_init_completion(struct ub_completion *x)
288 {
289         x->done = 0;
290         spin_lock_init(&x->lock);
291 }
292
293 #define UB_INIT_COMPLETION(x)   ((x).done = 0)
294
295 static void ub_complete(struct ub_completion *x)
296 {
297         unsigned long flags;
298
299         spin_lock_irqsave(&x->lock, flags);
300         x->done++;
301         spin_unlock_irqrestore(&x->lock, flags);
302 }
303
304 static int ub_is_completed(struct ub_completion *x)
305 {
306         unsigned long flags;
307         int ret;
308
309         spin_lock_irqsave(&x->lock, flags);
310         ret = x->done;
311         spin_unlock_irqrestore(&x->lock, flags);
312         return ret;
313 }
314
315 /*
316  */
317 struct ub_scsi_cmd_queue {
318         int qlen, qmax;
319         struct ub_scsi_cmd *head, *tail;
320 };
321
322 /*
323  * The block device instance (one per LUN).
324  */
325 struct ub_lun {
326         struct ub_dev *udev;
327         struct list_head link;
328         struct gendisk *disk;
329         int id;                         /* Host index */
330         int num;                        /* LUN number */
331         char name[16];
332
333         int changed;                    /* Media was changed */
334         int removable;
335         int readonly;
336
337         struct ub_request urq;
338
339         /* Use Ingo's mempool if or when we have more than one command. */
340         /*
341          * Currently we never need more than one command for the whole device.
342          * However, giving every LUN a command is a cheap and automatic way
343          * to enforce fairness between them.
344          */
345         int cmda[1];
346         struct ub_scsi_cmd cmdv[1];
347
348         struct ub_capacity capacity; 
349 };
350
351 /*
352  * The USB device instance.
353  */
354 struct ub_dev {
355         spinlock_t *lock;
356         atomic_t poison;                /* The USB device is disconnected */
357         int openc;                      /* protected by ub_lock! */
358                                         /* kref is too implicit for our taste */
359         int reset;                      /* Reset is running */
360         unsigned int tagcnt;
361         char name[12];
362         struct usb_device *dev;
363         struct usb_interface *intf;
364
365         struct list_head luns;
366
367         unsigned int send_bulk_pipe;    /* cached pipe values */
368         unsigned int recv_bulk_pipe;
369         unsigned int send_ctrl_pipe;
370         unsigned int recv_ctrl_pipe;
371
372         struct tasklet_struct tasklet;
373
374         struct ub_scsi_cmd_queue cmd_queue;
375         struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */
376         unsigned char top_sense[UB_SENSE_SIZE];
377
378         struct ub_completion work_done;
379         struct urb work_urb;
380         struct timer_list work_timer;
381         int last_pipe;                  /* What might need clearing */
382         __le32 signature;               /* Learned signature */
383         struct bulk_cb_wrap work_bcb;
384         struct bulk_cs_wrap work_bcs;
385         struct usb_ctrlrequest work_cr;
386
387         struct work_struct reset_work;
388         wait_queue_head_t reset_wait;
389
390         int sg_stat[6];
391         struct ub_scsi_trace tr;
392 };
393
394 /*
395  */
396 static void ub_cleanup(struct ub_dev *sc);
397 static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
398 static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
399     struct ub_scsi_cmd *cmd, struct ub_request *urq);
400 static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
401     struct ub_scsi_cmd *cmd, struct ub_request *urq);
402 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
403 static void ub_end_rq(struct request *rq, int uptodate);
404 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
405     struct ub_request *urq, struct ub_scsi_cmd *cmd);
406 static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
407 static void ub_urb_complete(struct urb *urb, struct pt_regs *pt);
408 static void ub_scsi_action(unsigned long _dev);
409 static void ub_scsi_dispatch(struct ub_dev *sc);
410 static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
411 static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
412 static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
413 static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
414 static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
415 static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
416 static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
417 static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
418     int stalled_pipe);
419 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
420 static void ub_reset_enter(struct ub_dev *sc, int try);
421 static void ub_reset_task(void *arg);
422 static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
423 static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
424     struct ub_capacity *ret);
425 static int ub_sync_reset(struct ub_dev *sc);
426 static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
427 static int ub_probe_lun(struct ub_dev *sc, int lnum);
428
429 /*
430  */
431 #ifdef CONFIG_USB_LIBUSUAL
432
433 #define ub_usb_ids  storage_usb_ids
434 #else
435
436 static struct usb_device_id ub_usb_ids[] = {
437         { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
438         { }
439 };
440
441 MODULE_DEVICE_TABLE(usb, ub_usb_ids);
442 #endif /* CONFIG_USB_LIBUSUAL */
443
444 /*
445  * Find me a way to identify "next free minor" for add_disk(),
446  * and the array disappears the next day. However, the number of
447  * hosts has something to do with the naming and /proc/partitions.
448  * This has to be thought out in detail before changing.
449  * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
450  */
451 #define UB_MAX_HOSTS  26
452 static char ub_hostv[UB_MAX_HOSTS];
453
454 #define UB_QLOCK_NUM 5
455 static spinlock_t ub_qlockv[UB_QLOCK_NUM];
456 static int ub_qlock_next = 0;
457
458 static DEFINE_SPINLOCK(ub_lock);        /* Locks globals and ->openc */
459
460 /*
461  * The SCSI command tracing procedures.
462  */
463
464 static void ub_cmdtr_new(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
465 {
466         int n;
467         struct ub_scsi_cmd_trace *t;
468
469         if ((n = sc->tr.cur + 1) == SCMD_TRACE_SZ) n = 0;
470         t = &sc->tr.vec[n];
471
472         memset(t, 0, sizeof(struct ub_scsi_cmd_trace));
473         t->tag = cmd->tag;
474         t->op = cmd->cdb[0];
475         t->dir = cmd->dir;
476         t->req_size = cmd->len;
477         t->st_hst[0] = cmd->state;
478
479         sc->tr.cur = n;
480         cmd->trace_index = n;
481 }
482
483 static void ub_cmdtr_state(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
484 {
485         int n;
486         struct ub_scsi_cmd_trace *t;
487
488         t = &sc->tr.vec[cmd->trace_index];
489         if (t->tag == cmd->tag) {
490                 if ((n = t->hcur + 1) == SCMD_ST_HIST_SZ) n = 0;
491                 t->st_hst[n] = cmd->state;
492                 t->hcur = n;
493         }
494 }
495
496 static void ub_cmdtr_act_len(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
497 {
498         struct ub_scsi_cmd_trace *t;
499
500         t = &sc->tr.vec[cmd->trace_index];
501         if (t->tag == cmd->tag)
502                 t->act_size = cmd->act_len;
503 }
504
505 static void ub_cmdtr_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
506     unsigned char *sense)
507 {
508         struct ub_scsi_cmd_trace *t;
509
510         t = &sc->tr.vec[cmd->trace_index];
511         if (t->tag == cmd->tag) {
512                 t->key = sense[2] & 0x0F;
513                 t->asc = sense[12];
514                 t->ascq = sense[13];
515         }
516 }
517
518 static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr,
519     char *page)
520 {
521         struct usb_interface *intf;
522         struct ub_dev *sc;
523         struct list_head *p;
524         struct ub_lun *lun;
525         int cnt;
526         unsigned long flags;
527         int nc, nh;
528         int i, j;
529         struct ub_scsi_cmd_trace *t;
530
531         intf = to_usb_interface(dev);
532         sc = usb_get_intfdata(intf);
533         if (sc == NULL)
534                 return 0;
535
536         cnt = 0;
537         spin_lock_irqsave(sc->lock, flags);
538
539         cnt += sprintf(page + cnt,
540             "poison %d reset %d\n",
541             atomic_read(&sc->poison), sc->reset);
542         cnt += sprintf(page + cnt,
543             "qlen %d qmax %d\n",
544             sc->cmd_queue.qlen, sc->cmd_queue.qmax);
545         cnt += sprintf(page + cnt,
546             "sg %d %d %d %d %d .. %d\n",
547             sc->sg_stat[0],
548             sc->sg_stat[1],
549             sc->sg_stat[2],
550             sc->sg_stat[3],
551             sc->sg_stat[4],
552             sc->sg_stat[5]);
553
554         list_for_each (p, &sc->luns) {
555                 lun = list_entry(p, struct ub_lun, link);
556                 cnt += sprintf(page + cnt,
557                     "lun %u changed %d removable %d readonly %d\n",
558                     lun->num, lun->changed, lun->removable, lun->readonly);
559         }
560
561         if ((nc = sc->tr.cur + 1) == SCMD_TRACE_SZ) nc = 0;
562         for (j = 0; j < SCMD_TRACE_SZ; j++) {
563                 t = &sc->tr.vec[nc];
564
565                 cnt += sprintf(page + cnt, "%08x %02x", t->tag, t->op);
566                 if (t->op == REQUEST_SENSE) {
567                         cnt += sprintf(page + cnt, " [sense %x %02x %02x]",
568                                         t->key, t->asc, t->ascq);
569                 } else {
570                         cnt += sprintf(page + cnt, " %c", UB_DIR_CHAR(t->dir));
571                         cnt += sprintf(page + cnt, " [%5d %5d]",
572                                         t->req_size, t->act_size);
573                 }
574                 if ((nh = t->hcur + 1) == SCMD_ST_HIST_SZ) nh = 0;
575                 for (i = 0; i < SCMD_ST_HIST_SZ; i++) {
576                         cnt += sprintf(page + cnt, " %s",
577                                         ub_scsi_cmd_stname[(int)t->st_hst[nh]]);
578                         if (++nh == SCMD_ST_HIST_SZ) nh = 0;
579                 }
580                 cnt += sprintf(page + cnt, "\n");
581
582                 if (++nc == SCMD_TRACE_SZ) nc = 0;
583         }
584
585         spin_unlock_irqrestore(sc->lock, flags);
586         return cnt;
587 }
588
589 static DEVICE_ATTR(diag, S_IRUGO, ub_diag_show, NULL); /* N.B. World readable */
590
591 /*
592  * The id allocator.
593  *
594  * This also stores the host for indexing by minor, which is somewhat dirty.
595  */
596 static int ub_id_get(void)
597 {
598         unsigned long flags;
599         int i;
600
601         spin_lock_irqsave(&ub_lock, flags);
602         for (i = 0; i < UB_MAX_HOSTS; i++) {
603                 if (ub_hostv[i] == 0) {
604                         ub_hostv[i] = 1;
605                         spin_unlock_irqrestore(&ub_lock, flags);
606                         return i;
607                 }
608         }
609         spin_unlock_irqrestore(&ub_lock, flags);
610         return -1;
611 }
612
613 static void ub_id_put(int id)
614 {
615         unsigned long flags;
616
617         if (id < 0 || id >= UB_MAX_HOSTS) {
618                 printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
619                 return;
620         }
621
622         spin_lock_irqsave(&ub_lock, flags);
623         if (ub_hostv[id] == 0) {
624                 spin_unlock_irqrestore(&ub_lock, flags);
625                 printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
626                 return;
627         }
628         ub_hostv[id] = 0;
629         spin_unlock_irqrestore(&ub_lock, flags);
630 }
631
632 /*
633  * This is necessitated by the fact that blk_cleanup_queue does not
634  * necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
635  * Since our blk_init_queue() passes a spinlock common with ub_dev,
636  * we have life time issues when ub_cleanup frees ub_dev.
637  */
638 static spinlock_t *ub_next_lock(void)
639 {
640         unsigned long flags;
641         spinlock_t *ret;
642
643         spin_lock_irqsave(&ub_lock, flags);
644         ret = &ub_qlockv[ub_qlock_next];
645         ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
646         spin_unlock_irqrestore(&ub_lock, flags);
647         return ret;
648 }
649
650 /*
651  * Downcount for deallocation. This rides on two assumptions:
652  *  - once something is poisoned, its refcount cannot grow
653  *  - opens cannot happen at this time (del_gendisk was done)
654  * If the above is true, we can drop the lock, which we need for
655  * blk_cleanup_queue(): the silly thing may attempt to sleep.
656  * [Actually, it never needs to sleep for us, but it calls might_sleep()]
657  */
658 static void ub_put(struct ub_dev *sc)
659 {
660         unsigned long flags;
661
662         spin_lock_irqsave(&ub_lock, flags);
663         --sc->openc;
664         if (sc->openc == 0 && atomic_read(&sc->poison)) {
665                 spin_unlock_irqrestore(&ub_lock, flags);
666                 ub_cleanup(sc);
667         } else {
668                 spin_unlock_irqrestore(&ub_lock, flags);
669         }
670 }
671
672 /*
673  * Final cleanup and deallocation.
674  */
675 static void ub_cleanup(struct ub_dev *sc)
676 {
677         struct list_head *p;
678         struct ub_lun *lun;
679         request_queue_t *q;
680
681         while (!list_empty(&sc->luns)) {
682                 p = sc->luns.next;
683                 lun = list_entry(p, struct ub_lun, link);
684                 list_del(p);
685
686                 /* I don't think queue can be NULL. But... Stolen from sx8.c */
687                 if ((q = lun->disk->queue) != NULL)
688                         blk_cleanup_queue(q);
689                 /*
690                  * If we zero disk->private_data BEFORE put_disk, we have
691                  * to check for NULL all over the place in open, release,
692                  * check_media and revalidate, because the block level
693                  * semaphore is well inside the put_disk.
694                  * But we cannot zero after the call, because *disk is gone.
695                  * The sd.c is blatantly racy in this area.
696                  */
697                 /* disk->private_data = NULL; */
698                 put_disk(lun->disk);
699                 lun->disk = NULL;
700
701                 ub_id_put(lun->id);
702                 kfree(lun);
703         }
704
705         kfree(sc);
706 }
707
708 /*
709  * The "command allocator".
710  */
711 static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun)
712 {
713         struct ub_scsi_cmd *ret;
714
715         if (lun->cmda[0])
716                 return NULL;
717         ret = &lun->cmdv[0];
718         lun->cmda[0] = 1;
719         return ret;
720 }
721
722 static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd)
723 {
724         if (cmd != &lun->cmdv[0]) {
725                 printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
726                     lun->name, cmd);
727                 return;
728         }
729         if (!lun->cmda[0]) {
730                 printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);
731                 return;
732         }
733         lun->cmda[0] = 0;
734 }
735
736 /*
737  * The command queue.
738  */
739 static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
740 {
741         struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
742
743         if (t->qlen++ == 0) {
744                 t->head = cmd;
745                 t->tail = cmd;
746         } else {
747                 t->tail->next = cmd;
748                 t->tail = cmd;
749         }
750
751         if (t->qlen > t->qmax)
752                 t->qmax = t->qlen;
753 }
754
755 static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
756 {
757         struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
758
759         if (t->qlen++ == 0) {
760                 t->head = cmd;
761                 t->tail = cmd;
762         } else {
763                 cmd->next = t->head;
764                 t->head = cmd;
765         }
766
767         if (t->qlen > t->qmax)
768                 t->qmax = t->qlen;
769 }
770
771 static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
772 {
773         struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
774         struct ub_scsi_cmd *cmd;
775
776         if (t->qlen == 0)
777                 return NULL;
778         if (--t->qlen == 0)
779                 t->tail = NULL;
780         cmd = t->head;
781         t->head = cmd->next;
782         cmd->next = NULL;
783         return cmd;
784 }
785
786 #define ub_cmdq_peek(sc)  ((sc)->cmd_queue.head)
787
788 /*
789  * The request function is our main entry point
790  */
791
792 static void ub_request_fn(request_queue_t *q)
793 {
794         struct ub_lun *lun = q->queuedata;
795         struct request *rq;
796
797         while ((rq = elv_next_request(q)) != NULL) {
798                 if (ub_request_fn_1(lun, rq) != 0) {
799                         blk_stop_queue(q);
800                         break;
801                 }
802         }
803 }
804
805 static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
806 {
807         struct ub_dev *sc = lun->udev;
808         struct ub_scsi_cmd *cmd;
809         struct ub_request *urq;
810         int n_elem;
811
812         if (atomic_read(&sc->poison) || lun->changed) {
813                 blkdev_dequeue_request(rq);
814                 ub_end_rq(rq, 0);
815                 return 0;
816         }
817
818         if (lun->urq.rq != NULL)
819                 return -1;
820         if ((cmd = ub_get_cmd(lun)) == NULL)
821                 return -1;
822         memset(cmd, 0, sizeof(struct ub_scsi_cmd));
823
824         blkdev_dequeue_request(rq);
825
826         urq = &lun->urq;
827         memset(urq, 0, sizeof(struct ub_request));
828         urq->rq = rq;
829
830         /*
831          * get scatterlist from block layer
832          */
833         n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
834         if (n_elem < 0) {
835                 printk(KERN_INFO "%s: failed request map (%d)\n",
836                     lun->name, n_elem); /* P3 */
837                 goto drop;
838         }
839         if (n_elem > UB_MAX_REQ_SG) {   /* Paranoia */
840                 printk(KERN_WARNING "%s: request with %d segments\n",
841                     lun->name, n_elem);
842                 goto drop;
843         }
844         urq->nsg = n_elem;
845         sc->sg_stat[n_elem < 5 ? n_elem : 5]++;
846
847         if (blk_pc_request(rq)) {
848                 ub_cmd_build_packet(sc, lun, cmd, urq);
849         } else {
850                 ub_cmd_build_block(sc, lun, cmd, urq);
851         }
852         cmd->state = UB_CMDST_INIT;
853         cmd->lun = lun;
854         cmd->done = ub_rw_cmd_done;
855         cmd->back = urq;
856
857         cmd->tag = sc->tagcnt++;
858         if (ub_submit_scsi(sc, cmd) != 0)
859                 goto drop;
860
861         return 0;
862
863 drop:
864         ub_put_cmd(lun, cmd);
865         ub_end_rq(rq, 0);
866         return 0;
867 }
868
869 static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
870     struct ub_scsi_cmd *cmd, struct ub_request *urq)
871 {
872         struct request *rq = urq->rq;
873         unsigned int block, nblks;
874
875         if (rq_data_dir(rq) == WRITE)
876                 cmd->dir = UB_DIR_WRITE;
877         else
878                 cmd->dir = UB_DIR_READ;
879
880         cmd->nsg = urq->nsg;
881         memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
882
883         /*
884          * build the command
885          *
886          * The call to blk_queue_hardsect_size() guarantees that request
887          * is aligned, but it is given in terms of 512 byte units, always.
888          */
889         block = rq->sector >> lun->capacity.bshift;
890         nblks = rq->nr_sectors >> lun->capacity.bshift;
891
892         cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
893         /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
894         cmd->cdb[2] = block >> 24;
895         cmd->cdb[3] = block >> 16;
896         cmd->cdb[4] = block >> 8;
897         cmd->cdb[5] = block;
898         cmd->cdb[7] = nblks >> 8;
899         cmd->cdb[8] = nblks;
900         cmd->cdb_len = 10;
901
902         cmd->len = rq->nr_sectors * 512;
903 }
904
905 static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
906     struct ub_scsi_cmd *cmd, struct ub_request *urq)
907 {
908         struct request *rq = urq->rq;
909
910         if (rq->data_len == 0) {
911                 cmd->dir = UB_DIR_NONE;
912         } else {
913                 if (rq_data_dir(rq) == WRITE)
914                         cmd->dir = UB_DIR_WRITE;
915                 else
916                         cmd->dir = UB_DIR_READ;
917         }
918
919         cmd->nsg = urq->nsg;
920         memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
921
922         memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
923         cmd->cdb_len = rq->cmd_len;
924
925         cmd->len = rq->data_len;
926 }
927
928 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
929 {
930         struct ub_lun *lun = cmd->lun;
931         struct ub_request *urq = cmd->back;
932         struct request *rq;
933         int uptodate;
934
935         rq = urq->rq;
936
937         if (cmd->error == 0) {
938                 uptodate = 1;
939
940                 if (blk_pc_request(rq)) {
941                         if (cmd->act_len >= rq->data_len)
942                                 rq->data_len = 0;
943                         else
944                                 rq->data_len -= cmd->act_len;
945                 }
946         } else {
947                 uptodate = 0;
948
949                 if (blk_pc_request(rq)) {
950                         /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
951                         memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
952                         rq->sense_len = UB_SENSE_SIZE;
953                         if (sc->top_sense[0] != 0)
954                                 rq->errors = SAM_STAT_CHECK_CONDITION;
955                         else
956                                 rq->errors = DID_ERROR << 16;
957                 } else {
958                         if (cmd->error == -EIO) {
959                                 if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
960                                         return;
961                         }
962                 }
963         }
964
965         urq->rq = NULL;
966
967         ub_put_cmd(lun, cmd);
968         ub_end_rq(rq, uptodate);
969         blk_start_queue(lun->disk->queue);
970 }
971
972 static void ub_end_rq(struct request *rq, int uptodate)
973 {
974         end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
975         end_that_request_last(rq, uptodate);
976 }
977
978 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
979     struct ub_request *urq, struct ub_scsi_cmd *cmd)
980 {
981
982         if (atomic_read(&sc->poison))
983                 return -ENXIO;
984
985         ub_reset_enter(sc, urq->current_try);
986
987         if (urq->current_try >= 3)
988                 return -EIO;
989         urq->current_try++;
990         /* P3 */ printk("%s: dir %c len/act %d/%d "
991             "[sense %x %02x %02x] retry %d\n",
992             sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,
993             cmd->key, cmd->asc, cmd->ascq, urq->current_try);
994
995         memset(cmd, 0, sizeof(struct ub_scsi_cmd));
996         ub_cmd_build_block(sc, lun, cmd, urq);
997
998         cmd->state = UB_CMDST_INIT;
999         cmd->lun = lun;
1000         cmd->done = ub_rw_cmd_done;
1001         cmd->back = urq;
1002
1003         cmd->tag = sc->tagcnt++;
1004
1005 #if 0 /* Wasteful */
1006         return ub_submit_scsi(sc, cmd);
1007 #else
1008         ub_cmdq_add(sc, cmd);
1009         return 0;
1010 #endif
1011 }
1012
1013 /*
1014  * Submit a regular SCSI operation (not an auto-sense).
1015  *
1016  * The Iron Law of Good Submit Routine is:
1017  * Zero return - callback is done, Nonzero return - callback is not done.
1018  * No exceptions.
1019  *
1020  * Host is assumed locked.
1021  */
1022 static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1023 {
1024
1025         if (cmd->state != UB_CMDST_INIT ||
1026             (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
1027                 return -EINVAL;
1028         }
1029
1030         ub_cmdq_add(sc, cmd);
1031         /*
1032          * We can call ub_scsi_dispatch(sc) right away here, but it's a little
1033          * safer to jump to a tasklet, in case upper layers do something silly.
1034          */
1035         tasklet_schedule(&sc->tasklet);
1036         return 0;
1037 }
1038
1039 /*
1040  * Submit the first URB for the queued command.
1041  * This function does not deal with queueing in any way.
1042  */
1043 static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1044 {
1045         struct bulk_cb_wrap *bcb;
1046         int rc;
1047
1048         bcb = &sc->work_bcb;
1049
1050         /*
1051          * ``If the allocation length is eighteen or greater, and a device
1052          * server returns less than eithteen bytes of data, the application
1053          * client should assume that the bytes not transferred would have been
1054          * zeroes had the device server returned those bytes.''
1055          *
1056          * We zero sense for all commands so that when a packet request
1057          * fails it does not return a stale sense.
1058          */
1059         memset(&sc->top_sense, 0, UB_SENSE_SIZE);
1060
1061         /* set up the command wrapper */
1062         bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
1063         bcb->Tag = cmd->tag;            /* Endianness is not important */
1064         bcb->DataTransferLength = cpu_to_le32(cmd->len);
1065         bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
1066         bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;
1067         bcb->Length = cmd->cdb_len;
1068
1069         /* copy the command payload */
1070         memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
1071
1072         UB_INIT_COMPLETION(sc->work_done);
1073
1074         sc->last_pipe = sc->send_bulk_pipe;
1075         usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
1076             bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
1077
1078         /* Fill what we shouldn't be filling, because usb-storage did so. */
1079         sc->work_urb.actual_length = 0;
1080         sc->work_urb.error_count = 0;
1081         sc->work_urb.status = 0;
1082
1083         if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1084                 /* XXX Clear stalls */
1085                 ub_complete(&sc->work_done);
1086                 return rc;
1087         }
1088
1089         sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
1090         add_timer(&sc->work_timer);
1091
1092         cmd->state = UB_CMDST_CMD;
1093         ub_cmdtr_state(sc, cmd);
1094         return 0;
1095 }
1096
1097 /*
1098  * Timeout handler.
1099  */
1100 static void ub_urb_timeout(unsigned long arg)
1101 {
1102         struct ub_dev *sc = (struct ub_dev *) arg;
1103         unsigned long flags;
1104
1105         spin_lock_irqsave(sc->lock, flags);
1106         if (!ub_is_completed(&sc->work_done))
1107                 usb_unlink_urb(&sc->work_urb);
1108         spin_unlock_irqrestore(sc->lock, flags);
1109 }
1110
1111 /*
1112  * Completion routine for the work URB.
1113  *
1114  * This can be called directly from usb_submit_urb (while we have
1115  * the sc->lock taken) and from an interrupt (while we do NOT have
1116  * the sc->lock taken). Therefore, bounce this off to a tasklet.
1117  */
1118 static void ub_urb_complete(struct urb *urb, struct pt_regs *pt)
1119 {
1120         struct ub_dev *sc = urb->context;
1121
1122         ub_complete(&sc->work_done);
1123         tasklet_schedule(&sc->tasklet);
1124 }
1125
1126 static void ub_scsi_action(unsigned long _dev)
1127 {
1128         struct ub_dev *sc = (struct ub_dev *) _dev;
1129         unsigned long flags;
1130
1131         spin_lock_irqsave(sc->lock, flags);
1132         ub_scsi_dispatch(sc);
1133         spin_unlock_irqrestore(sc->lock, flags);
1134 }
1135
1136 static void ub_scsi_dispatch(struct ub_dev *sc)
1137 {
1138         struct ub_scsi_cmd *cmd;
1139         int rc;
1140
1141         while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {
1142                 if (cmd->state == UB_CMDST_DONE) {
1143                         ub_cmdq_pop(sc);
1144                         (*cmd->done)(sc, cmd);
1145                 } else if (cmd->state == UB_CMDST_INIT) {
1146                         ub_cmdtr_new(sc, cmd);
1147                         if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
1148                                 break;
1149                         cmd->error = rc;
1150                         cmd->state = UB_CMDST_DONE;
1151                         ub_cmdtr_state(sc, cmd);
1152                 } else {
1153                         if (!ub_is_completed(&sc->work_done))
1154                                 break;
1155                         del_timer(&sc->work_timer);
1156                         ub_scsi_urb_compl(sc, cmd);
1157                 }
1158         }
1159 }
1160
1161 static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1162 {
1163         struct urb *urb = &sc->work_urb;
1164         struct bulk_cs_wrap *bcs;
1165         int len;
1166         int rc;
1167
1168         if (atomic_read(&sc->poison)) {
1169                 ub_state_done(sc, cmd, -ENODEV);
1170                 return;
1171         }
1172
1173         if (cmd->state == UB_CMDST_CLEAR) {
1174                 if (urb->status == -EPIPE) {
1175                         /*
1176                          * STALL while clearning STALL.
1177                          * The control pipe clears itself - nothing to do.
1178                          */
1179                         printk(KERN_NOTICE "%s: stall on control pipe\n",
1180                             sc->name);
1181                         goto Bad_End;
1182                 }
1183
1184                 /*
1185                  * We ignore the result for the halt clear.
1186                  */
1187
1188                 /* reset the endpoint toggle */
1189                 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1190                         usb_pipeout(sc->last_pipe), 0);
1191
1192                 ub_state_sense(sc, cmd);
1193
1194         } else if (cmd->state == UB_CMDST_CLR2STS) {
1195                 if (urb->status == -EPIPE) {
1196                         printk(KERN_NOTICE "%s: stall on control pipe\n",
1197                             sc->name);
1198                         goto Bad_End;
1199                 }
1200
1201                 /*
1202                  * We ignore the result for the halt clear.
1203                  */
1204
1205                 /* reset the endpoint toggle */
1206                 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1207                         usb_pipeout(sc->last_pipe), 0);
1208
1209                 ub_state_stat(sc, cmd);
1210
1211         } else if (cmd->state == UB_CMDST_CLRRS) {
1212                 if (urb->status == -EPIPE) {
1213                         printk(KERN_NOTICE "%s: stall on control pipe\n",
1214                             sc->name);
1215                         goto Bad_End;
1216                 }
1217
1218                 /*
1219                  * We ignore the result for the halt clear.
1220                  */
1221
1222                 /* reset the endpoint toggle */
1223                 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1224                         usb_pipeout(sc->last_pipe), 0);
1225
1226                 ub_state_stat_counted(sc, cmd);
1227
1228         } else if (cmd->state == UB_CMDST_CMD) {
1229                 switch (urb->status) {
1230                 case 0:
1231                         break;
1232                 case -EOVERFLOW:
1233                         goto Bad_End;
1234                 case -EPIPE:
1235                         rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1236                         if (rc != 0) {
1237                                 printk(KERN_NOTICE "%s: "
1238                                     "unable to submit clear (%d)\n",
1239                                     sc->name, rc);
1240                                 /*
1241                                  * This is typically ENOMEM or some other such shit.
1242                                  * Retrying is pointless. Just do Bad End on it...
1243                                  */
1244                                 ub_state_done(sc, cmd, rc);
1245                                 return;
1246                         }
1247                         cmd->state = UB_CMDST_CLEAR;
1248                         ub_cmdtr_state(sc, cmd);
1249                         return;
1250                 case -ESHUTDOWN:        /* unplug */
1251                 case -EILSEQ:           /* unplug timeout on uhci */
1252                         ub_state_done(sc, cmd, -ENODEV);
1253                         return;
1254                 default:
1255                         goto Bad_End;
1256                 }
1257                 if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
1258                         goto Bad_End;
1259                 }
1260
1261                 if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
1262                         ub_state_stat(sc, cmd);
1263                         return;
1264                 }
1265
1266                 // udelay(125);         // usb-storage has this
1267                 ub_data_start(sc, cmd);
1268
1269         } else if (cmd->state == UB_CMDST_DATA) {
1270                 if (urb->status == -EPIPE) {
1271                         rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1272                         if (rc != 0) {
1273                                 printk(KERN_NOTICE "%s: "
1274                                     "unable to submit clear (%d)\n",
1275                                     sc->name, rc);
1276                                 ub_state_done(sc, cmd, rc);
1277                                 return;
1278                         }
1279                         cmd->state = UB_CMDST_CLR2STS;
1280                         ub_cmdtr_state(sc, cmd);
1281                         return;
1282                 }
1283                 if (urb->status == -EOVERFLOW) {
1284                         /*
1285                          * A babble? Failure, but we must transfer CSW now.
1286                          */
1287                         cmd->error = -EOVERFLOW;        /* A cheap trick... */
1288                         ub_state_stat(sc, cmd);
1289                         return;
1290                 }
1291
1292                 if (cmd->dir == UB_DIR_WRITE) {
1293                         /*
1294                          * Do not continue writes in case of a failure.
1295                          * Doing so would cause sectors to be mixed up,
1296                          * which is worse than sectors lost.
1297                          *
1298                          * We must try to read the CSW, or many devices
1299                          * get confused.
1300                          */
1301                         len = urb->actual_length;
1302                         if (urb->status != 0 ||
1303                             len != cmd->sgv[cmd->current_sg].length) {
1304                                 cmd->act_len += len;
1305                                 ub_cmdtr_act_len(sc, cmd);
1306
1307                                 cmd->error = -EIO;
1308                                 ub_state_stat(sc, cmd);
1309                                 return;
1310                         }
1311
1312                 } else {
1313                         /*
1314                          * If an error occurs on read, we record it, and
1315                          * continue to fetch data in order to avoid bubble.
1316                          *
1317                          * As a small shortcut, we stop if we detect that
1318                          * a CSW mixed into data.
1319                          */
1320                         if (urb->status != 0)
1321                                 cmd->error = -EIO;
1322
1323                         len = urb->actual_length;
1324                         if (urb->status != 0 ||
1325                             len != cmd->sgv[cmd->current_sg].length) {
1326                                 if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN)
1327                                         goto Bad_End;
1328                         }
1329                 }
1330
1331                 cmd->act_len += urb->actual_length;
1332                 ub_cmdtr_act_len(sc, cmd);
1333
1334                 if (++cmd->current_sg < cmd->nsg) {
1335                         ub_data_start(sc, cmd);
1336                         return;
1337                 }
1338                 ub_state_stat(sc, cmd);
1339
1340         } else if (cmd->state == UB_CMDST_STAT) {
1341                 if (urb->status == -EPIPE) {
1342                         rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1343                         if (rc != 0) {
1344                                 printk(KERN_NOTICE "%s: "
1345                                     "unable to submit clear (%d)\n",
1346                                     sc->name, rc);
1347                                 ub_state_done(sc, cmd, rc);
1348                                 return;
1349                         }
1350
1351                         /*
1352                          * Having a stall when getting CSW is an error, so
1353                          * make sure uppper levels are not oblivious to it.
1354                          */
1355                         cmd->error = -EIO;              /* A cheap trick... */
1356
1357                         cmd->state = UB_CMDST_CLRRS;
1358                         ub_cmdtr_state(sc, cmd);
1359                         return;
1360                 }
1361
1362                 /* Catch everything, including -EOVERFLOW and other nasties. */
1363                 if (urb->status != 0)
1364                         goto Bad_End;
1365
1366                 if (urb->actual_length == 0) {
1367                         ub_state_stat_counted(sc, cmd);
1368                         return;
1369                 }
1370
1371                 /*
1372                  * Check the returned Bulk protocol status.
1373                  * The status block has to be validated first.
1374                  */
1375
1376                 bcs = &sc->work_bcs;
1377
1378                 if (sc->signature == cpu_to_le32(0)) {
1379                         /*
1380                          * This is the first reply, so do not perform the check.
1381                          * Instead, remember the signature the device uses
1382                          * for future checks. But do not allow a nul.
1383                          */
1384                         sc->signature = bcs->Signature;
1385                         if (sc->signature == cpu_to_le32(0)) {
1386                                 ub_state_stat_counted(sc, cmd);
1387                                 return;
1388                         }
1389                 } else {
1390                         if (bcs->Signature != sc->signature) {
1391                                 ub_state_stat_counted(sc, cmd);
1392                                 return;
1393                         }
1394                 }
1395
1396                 if (bcs->Tag != cmd->tag) {
1397                         /*
1398                          * This usually happens when we disagree with the
1399                          * device's microcode about something. For instance,
1400                          * a few of them throw this after timeouts. They buffer
1401                          * commands and reply at commands we timed out before.
1402                          * Without flushing these replies we loop forever.
1403                          */
1404                         ub_state_stat_counted(sc, cmd);
1405                         return;
1406                 }
1407
1408                 len = le32_to_cpu(bcs->Residue);
1409                 if (len != cmd->len - cmd->act_len) {
1410                         /*
1411                          * It is all right to transfer less, the caller has
1412                          * to check. But it's not all right if the device
1413                          * counts disagree with our counts.
1414                          */
1415                         /* P3 */ printk("%s: resid %d len %d act %d\n",
1416                             sc->name, len, cmd->len, cmd->act_len);
1417                         goto Bad_End;
1418                 }
1419
1420                 switch (bcs->Status) {
1421                 case US_BULK_STAT_OK:
1422                         break;
1423                 case US_BULK_STAT_FAIL:
1424                         ub_state_sense(sc, cmd);
1425                         return;
1426                 case US_BULK_STAT_PHASE:
1427                         /* P3 */ printk("%s: status PHASE\n", sc->name);
1428                         goto Bad_End;
1429                 default:
1430                         printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
1431                             sc->name, bcs->Status);
1432                         ub_state_done(sc, cmd, -EINVAL);
1433                         return;
1434                 }
1435
1436                 /* Not zeroing error to preserve a babble indicator */
1437                 if (cmd->error != 0) {
1438                         ub_state_sense(sc, cmd);
1439                         return;
1440                 }
1441                 cmd->state = UB_CMDST_DONE;
1442                 ub_cmdtr_state(sc, cmd);
1443                 ub_cmdq_pop(sc);
1444                 (*cmd->done)(sc, cmd);
1445
1446         } else if (cmd->state == UB_CMDST_SENSE) {
1447                 ub_state_done(sc, cmd, -EIO);
1448
1449         } else {
1450                 printk(KERN_WARNING "%s: "
1451                     "wrong command state %d\n",
1452                     sc->name, cmd->state);
1453                 ub_state_done(sc, cmd, -EINVAL);
1454                 return;
1455         }
1456         return;
1457
1458 Bad_End: /* Little Excel is dead */
1459         ub_state_done(sc, cmd, -EIO);
1460 }
1461
1462 /*
1463  * Factorization helper for the command state machine:
1464  * Initiate a data segment transfer.
1465  */
1466 static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1467 {
1468         struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
1469         int pipe;
1470         int rc;
1471
1472         UB_INIT_COMPLETION(sc->work_done);
1473
1474         if (cmd->dir == UB_DIR_READ)
1475                 pipe = sc->recv_bulk_pipe;
1476         else
1477                 pipe = sc->send_bulk_pipe;
1478         sc->last_pipe = pipe;
1479         usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
1480             page_address(sg->page) + sg->offset, sg->length,
1481             ub_urb_complete, sc);
1482         sc->work_urb.actual_length = 0;
1483         sc->work_urb.error_count = 0;
1484         sc->work_urb.status = 0;
1485
1486         if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1487                 /* XXX Clear stalls */
1488                 ub_complete(&sc->work_done);
1489                 ub_state_done(sc, cmd, rc);
1490                 return;
1491         }
1492
1493         sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1494         add_timer(&sc->work_timer);
1495
1496         cmd->state = UB_CMDST_DATA;
1497         ub_cmdtr_state(sc, cmd);
1498 }
1499
1500 /*
1501  * Factorization helper for the command state machine:
1502  * Finish the command.
1503  */
1504 static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
1505 {
1506
1507         cmd->error = rc;
1508         cmd->state = UB_CMDST_DONE;
1509         ub_cmdtr_state(sc, cmd);
1510         ub_cmdq_pop(sc);
1511         (*cmd->done)(sc, cmd);
1512 }
1513
1514 /*
1515  * Factorization helper for the command state machine:
1516  * Submit a CSW read.
1517  */
1518 static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1519 {
1520         int rc;
1521
1522         UB_INIT_COMPLETION(sc->work_done);
1523
1524         sc->last_pipe = sc->recv_bulk_pipe;
1525         usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1526             &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1527         sc->work_urb.actual_length = 0;
1528         sc->work_urb.error_count = 0;
1529         sc->work_urb.status = 0;
1530
1531         if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1532                 /* XXX Clear stalls */
1533                 ub_complete(&sc->work_done);
1534                 ub_state_done(sc, cmd, rc);
1535                 return -1;
1536         }
1537
1538         sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
1539         add_timer(&sc->work_timer);
1540         return 0;
1541 }
1542
1543 /*
1544  * Factorization helper for the command state machine:
1545  * Submit a CSW read and go to STAT state.
1546  */
1547 static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1548 {
1549
1550         if (__ub_state_stat(sc, cmd) != 0)
1551                 return;
1552
1553         cmd->stat_count = 0;
1554         cmd->state = UB_CMDST_STAT;
1555         ub_cmdtr_state(sc, cmd);
1556 }
1557
1558 /*
1559  * Factorization helper for the command state machine:
1560  * Submit a CSW read and go to STAT state with counter (along [C] path).
1561  */
1562 static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1563 {
1564
1565         if (++cmd->stat_count >= 4) {
1566                 ub_state_sense(sc, cmd);
1567                 return;
1568         }
1569
1570         if (__ub_state_stat(sc, cmd) != 0)
1571                 return;
1572
1573         cmd->state = UB_CMDST_STAT;
1574         ub_cmdtr_state(sc, cmd);
1575 }
1576
1577 /*
1578  * Factorization helper for the command state machine:
1579  * Submit a REQUEST SENSE and go to SENSE state.
1580  */
1581 static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1582 {
1583         struct ub_scsi_cmd *scmd;
1584         struct scatterlist *sg;
1585         int rc;
1586
1587         if (cmd->cdb[0] == REQUEST_SENSE) {
1588                 rc = -EPIPE;
1589                 goto error;
1590         }
1591
1592         scmd = &sc->top_rqs_cmd;
1593         memset(scmd, 0, sizeof(struct ub_scsi_cmd));
1594         scmd->cdb[0] = REQUEST_SENSE;
1595         scmd->cdb[4] = UB_SENSE_SIZE;
1596         scmd->cdb_len = 6;
1597         scmd->dir = UB_DIR_READ;
1598         scmd->state = UB_CMDST_INIT;
1599         scmd->nsg = 1;
1600         sg = &scmd->sgv[0];
1601         sg->page = virt_to_page(sc->top_sense);
1602         sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1);
1603         sg->length = UB_SENSE_SIZE;
1604         scmd->len = UB_SENSE_SIZE;
1605         scmd->lun = cmd->lun;
1606         scmd->done = ub_top_sense_done;
1607         scmd->back = cmd;
1608
1609         scmd->tag = sc->tagcnt++;
1610
1611         cmd->state = UB_CMDST_SENSE;
1612         ub_cmdtr_state(sc, cmd);
1613
1614         ub_cmdq_insert(sc, scmd);
1615         return;
1616
1617 error:
1618         ub_state_done(sc, cmd, rc);
1619 }
1620
1621 /*
1622  * A helper for the command's state machine:
1623  * Submit a stall clear.
1624  */
1625 static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1626     int stalled_pipe)
1627 {
1628         int endp;
1629         struct usb_ctrlrequest *cr;
1630         int rc;
1631
1632         endp = usb_pipeendpoint(stalled_pipe);
1633         if (usb_pipein (stalled_pipe))
1634                 endp |= USB_DIR_IN;
1635
1636         cr = &sc->work_cr;
1637         cr->bRequestType = USB_RECIP_ENDPOINT;
1638         cr->bRequest = USB_REQ_CLEAR_FEATURE;
1639         cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
1640         cr->wIndex = cpu_to_le16(endp);
1641         cr->wLength = cpu_to_le16(0);
1642
1643         UB_INIT_COMPLETION(sc->work_done);
1644
1645         usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1646             (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1647         sc->work_urb.actual_length = 0;
1648         sc->work_urb.error_count = 0;
1649         sc->work_urb.status = 0;
1650
1651         if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1652                 ub_complete(&sc->work_done);
1653                 return rc;
1654         }
1655
1656         sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
1657         add_timer(&sc->work_timer);
1658         return 0;
1659 }
1660
1661 /*
1662  */
1663 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1664 {
1665         unsigned char *sense = sc->top_sense;
1666         struct ub_scsi_cmd *cmd;
1667
1668         /*
1669          * Ignoring scmd->act_len, because the buffer was pre-zeroed.
1670          */
1671         ub_cmdtr_sense(sc, scmd, sense);
1672
1673         /*
1674          * Find the command which triggered the unit attention or a check,
1675          * save the sense into it, and advance its state machine.
1676          */
1677         if ((cmd = ub_cmdq_peek(sc)) == NULL) {
1678                 printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
1679                 return;
1680         }
1681         if (cmd != scmd->back) {
1682                 printk(KERN_WARNING "%s: "
1683                     "sense done for wrong command 0x%x\n",
1684                     sc->name, cmd->tag);
1685                 return;
1686         }
1687         if (cmd->state != UB_CMDST_SENSE) {
1688                 printk(KERN_WARNING "%s: "
1689                     "sense done with bad cmd state %d\n",
1690                     sc->name, cmd->state);
1691                 return;
1692         }
1693
1694         cmd->key = sense[2] & 0x0F;
1695         cmd->asc = sense[12];
1696         cmd->ascq = sense[13];
1697
1698         ub_scsi_urb_compl(sc, cmd);
1699 }
1700
1701 /*
1702  * Reset management
1703  * XXX Move usb_reset_device to khubd. Hogging kevent is not a good thing.
1704  * XXX Make usb_sync_reset asynchronous.
1705  */
1706
1707 static void ub_reset_enter(struct ub_dev *sc, int try)
1708 {
1709
1710         if (sc->reset) {
1711                 /* This happens often on multi-LUN devices. */
1712                 return;
1713         }
1714         sc->reset = try + 1;
1715
1716 #if 0 /* Not needed because the disconnect waits for us. */
1717         unsigned long flags;
1718         spin_lock_irqsave(&ub_lock, flags);
1719         sc->openc++;
1720         spin_unlock_irqrestore(&ub_lock, flags);
1721 #endif
1722
1723 #if 0 /* We let them stop themselves. */
1724         struct list_head *p;
1725         struct ub_lun *lun;
1726         list_for_each(p, &sc->luns) {
1727                 lun = list_entry(p, struct ub_lun, link);
1728                 blk_stop_queue(lun->disk->queue);
1729         }
1730 #endif
1731
1732         schedule_work(&sc->reset_work);
1733 }
1734
1735 static void ub_reset_task(void *arg)
1736 {
1737         struct ub_dev *sc = arg;
1738         unsigned long flags;
1739         struct list_head *p;
1740         struct ub_lun *lun;
1741         int lkr, rc;
1742
1743         if (!sc->reset) {
1744                 printk(KERN_WARNING "%s: Running reset unrequested\n",
1745                     sc->name);
1746                 return;
1747         }
1748
1749         if (atomic_read(&sc->poison)) {
1750                 printk(KERN_NOTICE "%s: Not resetting disconnected device\n",
1751                     sc->name); /* P3 This floods. Remove soon. XXX */
1752         } else if ((sc->reset & 1) == 0) {
1753                 ub_sync_reset(sc);
1754                 msleep(700);    /* usb-storage sleeps 6s (!) */
1755                 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
1756                 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
1757         } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
1758                 printk(KERN_NOTICE "%s: Not resetting multi-interface device\n",
1759                     sc->name); /* P3 This floods. Remove soon. XXX */
1760         } else {
1761                 if ((lkr = usb_lock_device_for_reset(sc->dev, sc->intf)) < 0) {
1762                         printk(KERN_NOTICE
1763                             "%s: usb_lock_device_for_reset failed (%d)\n",
1764                             sc->name, lkr);
1765                 } else {
1766                         rc = usb_reset_device(sc->dev);
1767                         if (rc < 0) {
1768                                 printk(KERN_NOTICE "%s: "
1769                                     "usb_lock_device_for_reset failed (%d)\n",
1770                                     sc->name, rc);
1771                         }
1772
1773                         if (lkr)
1774                                 usb_unlock_device(sc->dev);
1775                 }
1776         }
1777
1778         /*
1779          * In theory, no commands can be running while reset is active,
1780          * so nobody can ask for another reset, and so we do not need any
1781          * queues of resets or anything. We do need a spinlock though,
1782          * to interact with block layer.
1783          */
1784         spin_lock_irqsave(sc->lock, flags);
1785         sc->reset = 0;
1786         tasklet_schedule(&sc->tasklet);
1787         list_for_each(p, &sc->luns) {
1788                 lun = list_entry(p, struct ub_lun, link);
1789                 blk_start_queue(lun->disk->queue);
1790         }
1791         wake_up(&sc->reset_wait);
1792         spin_unlock_irqrestore(sc->lock, flags);
1793 }
1794
1795 /*
1796  * This is called from a process context.
1797  */
1798 static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
1799 {
1800
1801         lun->readonly = 0;      /* XXX Query this from the device */
1802
1803         lun->capacity.nsec = 0;
1804         lun->capacity.bsize = 512;
1805         lun->capacity.bshift = 0;
1806
1807         if (ub_sync_tur(sc, lun) != 0)
1808                 return;                 /* Not ready */
1809         lun->changed = 0;
1810
1811         if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1812                 /*
1813                  * The retry here means something is wrong, either with the
1814                  * device, with the transport, or with our code.
1815                  * We keep this because sd.c has retries for capacity.
1816                  */
1817                 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1818                         lun->capacity.nsec = 0;
1819                         lun->capacity.bsize = 512;
1820                         lun->capacity.bshift = 0;
1821                 }
1822         }
1823 }
1824
1825 /*
1826  * The open funcion.
1827  * This is mostly needed to keep refcounting, but also to support
1828  * media checks on removable media drives.
1829  */
1830 static int ub_bd_open(struct inode *inode, struct file *filp)
1831 {
1832         struct gendisk *disk = inode->i_bdev->bd_disk;
1833         struct ub_lun *lun;
1834         struct ub_dev *sc;
1835         unsigned long flags;
1836         int rc;
1837
1838         if ((lun = disk->private_data) == NULL)
1839                 return -ENXIO;
1840         sc = lun->udev;
1841
1842         spin_lock_irqsave(&ub_lock, flags);
1843         if (atomic_read(&sc->poison)) {
1844                 spin_unlock_irqrestore(&ub_lock, flags);
1845                 return -ENXIO;
1846         }
1847         sc->openc++;
1848         spin_unlock_irqrestore(&ub_lock, flags);
1849
1850         if (lun->removable || lun->readonly)
1851                 check_disk_change(inode->i_bdev);
1852
1853         /*
1854          * The sd.c considers ->media_present and ->changed not equivalent,
1855          * under some pretty murky conditions (a failure of READ CAPACITY).
1856          * We may need it one day.
1857          */
1858         if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) {
1859                 rc = -ENOMEDIUM;
1860                 goto err_open;
1861         }
1862
1863         if (lun->readonly && (filp->f_mode & FMODE_WRITE)) {
1864                 rc = -EROFS;
1865                 goto err_open;
1866         }
1867
1868         return 0;
1869
1870 err_open:
1871         ub_put(sc);
1872         return rc;
1873 }
1874
1875 /*
1876  */
1877 static int ub_bd_release(struct inode *inode, struct file *filp)
1878 {
1879         struct gendisk *disk = inode->i_bdev->bd_disk;
1880         struct ub_lun *lun = disk->private_data;
1881         struct ub_dev *sc = lun->udev;
1882
1883         ub_put(sc);
1884         return 0;
1885 }
1886
1887 /*
1888  * The ioctl interface.
1889  */
1890 static int ub_bd_ioctl(struct inode *inode, struct file *filp,
1891     unsigned int cmd, unsigned long arg)
1892 {
1893         struct gendisk *disk = inode->i_bdev->bd_disk;
1894         void __user *usermem = (void __user *) arg;
1895
1896         return scsi_cmd_ioctl(filp, disk, cmd, usermem);
1897 }
1898
1899 /*
1900  * This is called once a new disk was seen by the block layer or by ub_probe().
1901  * The main onjective here is to discover the features of the media such as
1902  * the capacity, read-only status, etc. USB storage generally does not
1903  * need to be spun up, but if we needed it, this would be the place.
1904  *
1905  * This call can sleep.
1906  *
1907  * The return code is not used.
1908  */
1909 static int ub_bd_revalidate(struct gendisk *disk)
1910 {
1911         struct ub_lun *lun = disk->private_data;
1912
1913         ub_revalidate(lun->udev, lun);
1914
1915         /* XXX Support sector size switching like in sr.c */
1916         blk_queue_hardsect_size(disk->queue, lun->capacity.bsize);
1917         set_capacity(disk, lun->capacity.nsec);
1918         // set_disk_ro(sdkp->disk, lun->readonly);
1919
1920         return 0;
1921 }
1922
1923 /*
1924  * The check is called by the block layer to verify if the media
1925  * is still available. It is supposed to be harmless, lightweight and
1926  * non-intrusive in case the media was not changed.
1927  *
1928  * This call can sleep.
1929  *
1930  * The return code is bool!
1931  */
1932 static int ub_bd_media_changed(struct gendisk *disk)
1933 {
1934         struct ub_lun *lun = disk->private_data;
1935
1936         if (!lun->removable)
1937                 return 0;
1938
1939         /*
1940          * We clean checks always after every command, so this is not
1941          * as dangerous as it looks. If the TEST_UNIT_READY fails here,
1942          * the device is actually not ready with operator or software
1943          * intervention required. One dangerous item might be a drive which
1944          * spins itself down, and come the time to write dirty pages, this
1945          * will fail, then block layer discards the data. Since we never
1946          * spin drives up, such devices simply cannot be used with ub anyway.
1947          */
1948         if (ub_sync_tur(lun->udev, lun) != 0) {
1949                 lun->changed = 1;
1950                 return 1;
1951         }
1952
1953         return lun->changed;
1954 }
1955
1956 static struct block_device_operations ub_bd_fops = {
1957         .owner          = THIS_MODULE,
1958         .open           = ub_bd_open,
1959         .release        = ub_bd_release,
1960         .ioctl          = ub_bd_ioctl,
1961         .media_changed  = ub_bd_media_changed,
1962         .revalidate_disk = ub_bd_revalidate,
1963 };
1964
1965 /*
1966  * Common ->done routine for commands executed synchronously.
1967  */
1968 static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1969 {
1970         struct completion *cop = cmd->back;
1971         complete(cop);
1972 }
1973
1974 /*
1975  * Test if the device has a check condition on it, synchronously.
1976  */
1977 static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
1978 {
1979         struct ub_scsi_cmd *cmd;
1980         enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
1981         unsigned long flags;
1982         struct completion compl;
1983         int rc;
1984
1985         init_completion(&compl);
1986
1987         rc = -ENOMEM;
1988         if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1989                 goto err_alloc;
1990
1991         cmd->cdb[0] = TEST_UNIT_READY;
1992         cmd->cdb_len = 6;
1993         cmd->dir = UB_DIR_NONE;
1994         cmd->state = UB_CMDST_INIT;
1995         cmd->lun = lun;                 /* This may be NULL, but that's ok */
1996         cmd->done = ub_probe_done;
1997         cmd->back = &compl;
1998
1999         spin_lock_irqsave(sc->lock, flags);
2000         cmd->tag = sc->tagcnt++;
2001
2002         rc = ub_submit_scsi(sc, cmd);
2003         spin_unlock_irqrestore(sc->lock, flags);
2004
2005         if (rc != 0) {
2006                 printk("ub: testing ready: submit error (%d)\n", rc); /* P3 */
2007                 goto err_submit;
2008         }
2009
2010         wait_for_completion(&compl);
2011
2012         rc = cmd->error;
2013
2014         if (rc == -EIO && cmd->key != 0)        /* Retries for benh's key */
2015                 rc = cmd->key;
2016
2017 err_submit:
2018         kfree(cmd);
2019 err_alloc:
2020         return rc;
2021 }
2022
2023 /*
2024  * Read the SCSI capacity synchronously (for probing).
2025  */
2026 static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
2027     struct ub_capacity *ret)
2028 {
2029         struct ub_scsi_cmd *cmd;
2030         struct scatterlist *sg;
2031         char *p;
2032         enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
2033         unsigned long flags;
2034         unsigned int bsize, shift;
2035         unsigned long nsec;
2036         struct completion compl;
2037         int rc;
2038
2039         init_completion(&compl);
2040
2041         rc = -ENOMEM;
2042         if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
2043                 goto err_alloc;
2044         p = (char *)cmd + sizeof(struct ub_scsi_cmd);
2045
2046         cmd->cdb[0] = 0x25;
2047         cmd->cdb_len = 10;
2048         cmd->dir = UB_DIR_READ;
2049         cmd->state = UB_CMDST_INIT;
2050         cmd->nsg = 1;
2051         sg = &cmd->sgv[0];
2052         sg->page = virt_to_page(p);
2053         sg->offset = (unsigned long)p & (PAGE_SIZE-1);
2054         sg->length = 8;
2055         cmd->len = 8;
2056         cmd->lun = lun;
2057         cmd->done = ub_probe_done;
2058         cmd->back = &compl;
2059
2060         spin_lock_irqsave(sc->lock, flags);
2061         cmd->tag = sc->tagcnt++;
2062
2063         rc = ub_submit_scsi(sc, cmd);
2064         spin_unlock_irqrestore(sc->lock, flags);
2065
2066         if (rc != 0) {
2067                 printk("ub: reading capacity: submit error (%d)\n", rc); /* P3 */
2068                 goto err_submit;
2069         }
2070
2071         wait_for_completion(&compl);
2072
2073         if (cmd->error != 0) {
2074                 printk("ub: reading capacity: error %d\n", cmd->error); /* P3 */
2075                 rc = -EIO;
2076                 goto err_read;
2077         }
2078         if (cmd->act_len != 8) {
2079                 printk("ub: reading capacity: size %d\n", cmd->act_len); /* P3 */
2080                 rc = -EIO;
2081                 goto err_read;
2082         }
2083
2084         /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
2085         nsec = be32_to_cpu(*(__be32 *)p) + 1;
2086         bsize = be32_to_cpu(*(__be32 *)(p + 4));
2087         switch (bsize) {
2088         case 512:       shift = 0;      break;
2089         case 1024:      shift = 1;      break;
2090         case 2048:      shift = 2;      break;
2091         case 4096:      shift = 3;      break;
2092         default:
2093                 printk("ub: Bad sector size %u\n", bsize); /* P3 */
2094                 rc = -EDOM;
2095                 goto err_inv_bsize;
2096         }
2097
2098         ret->bsize = bsize;
2099         ret->bshift = shift;
2100         ret->nsec = nsec << shift;
2101         rc = 0;
2102
2103 err_inv_bsize:
2104 err_read:
2105 err_submit:
2106         kfree(cmd);
2107 err_alloc:
2108         return rc;
2109 }
2110
2111 /*
2112  */
2113 static void ub_probe_urb_complete(struct urb *urb, struct pt_regs *pt)
2114 {
2115         struct completion *cop = urb->context;
2116         complete(cop);
2117 }
2118
2119 static void ub_probe_timeout(unsigned long arg)
2120 {
2121         struct completion *cop = (struct completion *) arg;
2122         complete(cop);
2123 }
2124
2125 /*
2126  * Reset with a Bulk reset.
2127  */
2128 static int ub_sync_reset(struct ub_dev *sc)
2129 {
2130         int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
2131         struct usb_ctrlrequest *cr;
2132         struct completion compl;
2133         struct timer_list timer;
2134         int rc;
2135
2136         init_completion(&compl);
2137
2138         cr = &sc->work_cr;
2139         cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
2140         cr->bRequest = US_BULK_RESET_REQUEST;
2141         cr->wValue = cpu_to_le16(0);
2142         cr->wIndex = cpu_to_le16(ifnum);
2143         cr->wLength = cpu_to_le16(0);
2144
2145         usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2146             (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2147         sc->work_urb.actual_length = 0;
2148         sc->work_urb.error_count = 0;
2149         sc->work_urb.status = 0;
2150
2151         if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2152                 printk(KERN_WARNING
2153                      "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
2154                 return rc;
2155         }
2156
2157         init_timer(&timer);
2158         timer.function = ub_probe_timeout;
2159         timer.data = (unsigned long) &compl;
2160         timer.expires = jiffies + UB_CTRL_TIMEOUT;
2161         add_timer(&timer);
2162
2163         wait_for_completion(&compl);
2164
2165         del_timer_sync(&timer);
2166         usb_kill_urb(&sc->work_urb);
2167
2168         return sc->work_urb.status;
2169 }
2170
2171 /*
2172  * Get number of LUNs by the way of Bulk GetMaxLUN command.
2173  */
2174 static int ub_sync_getmaxlun(struct ub_dev *sc)
2175 {
2176         int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
2177         unsigned char *p;
2178         enum { ALLOC_SIZE = 1 };
2179         struct usb_ctrlrequest *cr;
2180         struct completion compl;
2181         struct timer_list timer;
2182         int nluns;
2183         int rc;
2184
2185         init_completion(&compl);
2186
2187         rc = -ENOMEM;
2188         if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
2189                 goto err_alloc;
2190         *p = 55;
2191
2192         cr = &sc->work_cr;
2193         cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
2194         cr->bRequest = US_BULK_GET_MAX_LUN;
2195         cr->wValue = cpu_to_le16(0);
2196         cr->wIndex = cpu_to_le16(ifnum);
2197         cr->wLength = cpu_to_le16(1);
2198
2199         usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
2200             (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
2201         sc->work_urb.actual_length = 0;
2202         sc->work_urb.error_count = 0;
2203         sc->work_urb.status = 0;
2204
2205         if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2206                 if (rc == -EPIPE) {
2207                         printk("%s: Stall submitting GetMaxLUN, using 1 LUN\n",
2208                              sc->name); /* P3 */
2209                 } else {
2210                         printk(KERN_NOTICE
2211                              "%s: Unable to submit GetMaxLUN (%d)\n",
2212                              sc->name, rc);
2213                 }
2214                 goto err_submit;
2215         }
2216
2217         init_timer(&timer);
2218         timer.function = ub_probe_timeout;
2219         timer.data = (unsigned long) &compl;
2220         timer.expires = jiffies + UB_CTRL_TIMEOUT;
2221         add_timer(&timer);
2222
2223         wait_for_completion(&compl);
2224
2225         del_timer_sync(&timer);
2226         usb_kill_urb(&sc->work_urb);
2227
2228         if ((rc = sc->work_urb.status) < 0) {
2229                 if (rc == -EPIPE) {
2230                         printk("%s: Stall at GetMaxLUN, using 1 LUN\n",
2231                              sc->name); /* P3 */
2232                 } else {
2233                         printk(KERN_NOTICE
2234                              "%s: Error at GetMaxLUN (%d)\n",
2235                              sc->name, rc);
2236                 }
2237                 goto err_io;
2238         }
2239
2240         if (sc->work_urb.actual_length != 1) {
2241                 printk("%s: GetMaxLUN returned %d bytes\n", sc->name,
2242                     sc->work_urb.actual_length); /* P3 */
2243                 nluns = 0;
2244         } else {
2245                 if ((nluns = *p) == 55) {
2246                         nluns = 0;
2247                 } else {
2248                         /* GetMaxLUN returns the maximum LUN number */
2249                         nluns += 1;
2250                         if (nluns > UB_MAX_LUNS)
2251                                 nluns = UB_MAX_LUNS;
2252                 }
2253                 printk("%s: GetMaxLUN returned %d, using %d LUNs\n", sc->name,
2254                     *p, nluns); /* P3 */
2255         }
2256
2257         kfree(p);
2258         return nluns;
2259
2260 err_io:
2261 err_submit:
2262         kfree(p);
2263 err_alloc:
2264         return rc;
2265 }
2266
2267 /*
2268  * Clear initial stalls.
2269  */
2270 static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
2271 {
2272         int endp;
2273         struct usb_ctrlrequest *cr;
2274         struct completion compl;
2275         struct timer_list timer;
2276         int rc;
2277
2278         init_completion(&compl);
2279
2280         endp = usb_pipeendpoint(stalled_pipe);
2281         if (usb_pipein (stalled_pipe))
2282                 endp |= USB_DIR_IN;
2283
2284         cr = &sc->work_cr;
2285         cr->bRequestType = USB_RECIP_ENDPOINT;
2286         cr->bRequest = USB_REQ_CLEAR_FEATURE;
2287         cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
2288         cr->wIndex = cpu_to_le16(endp);
2289         cr->wLength = cpu_to_le16(0);
2290
2291         usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2292             (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2293         sc->work_urb.actual_length = 0;
2294         sc->work_urb.error_count = 0;
2295         sc->work_urb.status = 0;
2296
2297         if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2298                 printk(KERN_WARNING
2299                      "%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
2300                 return rc;
2301         }
2302
2303         init_timer(&timer);
2304         timer.function = ub_probe_timeout;
2305         timer.data = (unsigned long) &compl;
2306         timer.expires = jiffies + UB_CTRL_TIMEOUT;
2307         add_timer(&timer);
2308
2309         wait_for_completion(&compl);
2310
2311         del_timer_sync(&timer);
2312         usb_kill_urb(&sc->work_urb);
2313
2314         /* reset the endpoint toggle */
2315         usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0);
2316
2317         return 0;
2318 }
2319
2320 /*
2321  * Get the pipe settings.
2322  */
2323 static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
2324     struct usb_interface *intf)
2325 {
2326         struct usb_host_interface *altsetting = intf->cur_altsetting;
2327         struct usb_endpoint_descriptor *ep_in = NULL;
2328         struct usb_endpoint_descriptor *ep_out = NULL;
2329         struct usb_endpoint_descriptor *ep;
2330         int i;
2331
2332         /*
2333          * Find the endpoints we need.
2334          * We are expecting a minimum of 2 endpoints - in and out (bulk).
2335          * We will ignore any others.
2336          */
2337         for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
2338                 ep = &altsetting->endpoint[i].desc;
2339
2340                 /* Is it a BULK endpoint? */
2341                 if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
2342                                 == USB_ENDPOINT_XFER_BULK) {
2343                         /* BULK in or out? */
2344                         if (ep->bEndpointAddress & USB_DIR_IN)
2345                                 ep_in = ep;
2346                         else
2347                                 ep_out = ep;
2348                 }
2349         }
2350
2351         if (ep_in == NULL || ep_out == NULL) {
2352                 printk(KERN_NOTICE "%s: failed endpoint check\n",
2353                     sc->name);
2354                 return -ENODEV;
2355         }
2356
2357         /* Calculate and store the pipe values */
2358         sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
2359         sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
2360         sc->send_bulk_pipe = usb_sndbulkpipe(dev,
2361                 ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2362         sc->recv_bulk_pipe = usb_rcvbulkpipe(dev, 
2363                 ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2364
2365         return 0;
2366 }
2367
2368 /*
2369  * Probing is done in the process context, which allows us to cheat
2370  * and not to build a state machine for the discovery.
2371  */
2372 static int ub_probe(struct usb_interface *intf,
2373     const struct usb_device_id *dev_id)
2374 {
2375         struct ub_dev *sc;
2376         int nluns;
2377         int rc;
2378         int i;
2379
2380         if (usb_usual_check_type(dev_id, USB_US_TYPE_UB))
2381                 return -ENXIO;
2382
2383         rc = -ENOMEM;
2384         if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
2385                 goto err_core;
2386         sc->lock = ub_next_lock();
2387         INIT_LIST_HEAD(&sc->luns);
2388         usb_init_urb(&sc->work_urb);
2389         tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
2390         atomic_set(&sc->poison, 0);
2391         INIT_WORK(&sc->reset_work, ub_reset_task, sc);
2392         init_waitqueue_head(&sc->reset_wait);
2393
2394         init_timer(&sc->work_timer);
2395         sc->work_timer.data = (unsigned long) sc;
2396         sc->work_timer.function = ub_urb_timeout;
2397
2398         ub_init_completion(&sc->work_done);
2399         sc->work_done.done = 1;         /* A little yuk, but oh well... */
2400
2401         sc->dev = interface_to_usbdev(intf);
2402         sc->intf = intf;
2403         // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
2404         usb_set_intfdata(intf, sc);
2405         usb_get_dev(sc->dev);
2406         // usb_get_intf(sc->intf);      /* Do we need this? */
2407
2408         snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
2409             sc->dev->bus->busnum, sc->dev->devnum);
2410
2411         /* XXX Verify that we can handle the device (from descriptors) */
2412
2413         if (ub_get_pipes(sc, sc->dev, intf) != 0)
2414                 goto err_dev_desc;
2415
2416         if (device_create_file(&sc->intf->dev, &dev_attr_diag) != 0)
2417                 goto err_diag;
2418
2419         /*
2420          * At this point, all USB initialization is done, do upper layer.
2421          * We really hate halfway initialized structures, so from the
2422          * invariants perspective, this ub_dev is fully constructed at
2423          * this point.
2424          */
2425
2426         /*
2427          * This is needed to clear toggles. It is a problem only if we do
2428          * `rmmod ub && modprobe ub` without disconnects, but we like that.
2429          */
2430 #if 0 /* iPod Mini fails if we do this (big white iPod works) */
2431         ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
2432         ub_probe_clear_stall(sc, sc->send_bulk_pipe);
2433 #endif
2434
2435         /*
2436          * The way this is used by the startup code is a little specific.
2437          * A SCSI check causes a USB stall. Our common case code sees it
2438          * and clears the check, after which the device is ready for use.
2439          * But if a check was not present, any command other than
2440          * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
2441          *
2442          * If we neglect to clear the SCSI check, the first real command fails
2443          * (which is the capacity readout). We clear that and retry, but why
2444          * causing spurious retries for no reason.
2445          *
2446          * Revalidation may start with its own TEST_UNIT_READY, but that one
2447          * has to succeed, so we clear checks with an additional one here.
2448          * In any case it's not our business how revaliadation is implemented.
2449          */
2450         for (i = 0; i < 3; i++) {       /* Retries for benh's key */
2451                 if ((rc = ub_sync_tur(sc, NULL)) <= 0) break;
2452                 if (rc != 0x6) break;
2453                 msleep(10);
2454         }
2455
2456         nluns = 1;
2457         for (i = 0; i < 3; i++) {
2458                 if ((rc = ub_sync_getmaxlun(sc)) < 0) {
2459                         /* 
2460                          * This segment is taken from usb-storage. They say
2461                          * that ZIP-100 needs this, but my own ZIP-100 works
2462                          * fine without this.
2463                          * Still, it does not seem to hurt anything.
2464                          */
2465                         if (rc == -EPIPE) {
2466                                 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
2467                                 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
2468                         }
2469                         break;
2470                 }
2471                 if (rc != 0) {
2472                         nluns = rc;
2473                         break;
2474                 }
2475                 msleep(100);
2476         }
2477
2478         for (i = 0; i < nluns; i++) {
2479                 ub_probe_lun(sc, i);
2480         }
2481         return 0;
2482
2483         /* device_remove_file(&sc->intf->dev, &dev_attr_diag); */
2484 err_diag:
2485 err_dev_desc:
2486         usb_set_intfdata(intf, NULL);
2487         // usb_put_intf(sc->intf);
2488         usb_put_dev(sc->dev);
2489         kfree(sc);
2490 err_core:
2491         return rc;
2492 }
2493
2494 static int ub_probe_lun(struct ub_dev *sc, int lnum)
2495 {
2496         struct ub_lun *lun;
2497         request_queue_t *q;
2498         struct gendisk *disk;
2499         int rc;
2500
2501         rc = -ENOMEM;
2502         if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
2503                 goto err_alloc;
2504         lun->num = lnum;
2505
2506         rc = -ENOSR;
2507         if ((lun->id = ub_id_get()) == -1)
2508                 goto err_id;
2509
2510         lun->udev = sc;
2511         list_add(&lun->link, &sc->luns);
2512
2513         snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)",
2514             lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num);
2515
2516         lun->removable = 1;             /* XXX Query this from the device */
2517         lun->changed = 1;               /* ub_revalidate clears only */
2518         ub_revalidate(sc, lun);
2519
2520         rc = -ENOMEM;
2521         if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL)
2522                 goto err_diskalloc;
2523
2524         lun->disk = disk;
2525         sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a');
2526         sprintf(disk->devfs_name, DEVFS_NAME "/%c", lun->id + 'a');
2527         disk->major = UB_MAJOR;
2528         disk->first_minor = lun->id * UB_PARTS_PER_LUN;
2529         disk->fops = &ub_bd_fops;
2530         disk->private_data = lun;
2531         disk->driverfs_dev = &sc->intf->dev;
2532
2533         rc = -ENOMEM;
2534         if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
2535                 goto err_blkqinit;
2536
2537         disk->queue = q;
2538
2539         blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2540         blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
2541         blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
2542         blk_queue_segment_boundary(q, 0xffffffff);      /* Dubious. */
2543         blk_queue_max_sectors(q, UB_MAX_SECTORS);
2544         blk_queue_hardsect_size(q, lun->capacity.bsize);
2545
2546         q->queuedata = lun;
2547
2548         set_capacity(disk, lun->capacity.nsec);
2549         if (lun->removable)
2550                 disk->flags |= GENHD_FL_REMOVABLE;
2551
2552         add_disk(disk);
2553
2554         return 0;
2555
2556 err_blkqinit:
2557         put_disk(disk);
2558 err_diskalloc:
2559         list_del(&lun->link);
2560         ub_id_put(lun->id);
2561 err_id:
2562         kfree(lun);
2563 err_alloc:
2564         return rc;
2565 }
2566
2567 static void ub_disconnect(struct usb_interface *intf)
2568 {
2569         struct ub_dev *sc = usb_get_intfdata(intf);
2570         struct list_head *p;
2571         struct ub_lun *lun;
2572         struct gendisk *disk;
2573         unsigned long flags;
2574
2575         /*
2576          * Prevent ub_bd_release from pulling the rug from under us.
2577          * XXX This is starting to look like a kref.
2578          * XXX Why not to take this ref at probe time?
2579          */
2580         spin_lock_irqsave(&ub_lock, flags);
2581         sc->openc++;
2582         spin_unlock_irqrestore(&ub_lock, flags);
2583
2584         /*
2585          * Fence stall clearnings, operations triggered by unlinkings and so on.
2586          * We do not attempt to unlink any URBs, because we do not trust the
2587          * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
2588          */
2589         atomic_set(&sc->poison, 1);
2590
2591         /*
2592          * Wait for reset to end, if any.
2593          */
2594         wait_event(sc->reset_wait, !sc->reset);
2595
2596         /*
2597          * Blow away queued commands.
2598          *
2599          * Actually, this never works, because before we get here
2600          * the HCD terminates outstanding URB(s). It causes our
2601          * SCSI command queue to advance, commands fail to submit,
2602          * and the whole queue drains. So, we just use this code to
2603          * print warnings.
2604          */
2605         spin_lock_irqsave(sc->lock, flags);
2606         {
2607                 struct ub_scsi_cmd *cmd;
2608                 int cnt = 0;
2609                 while ((cmd = ub_cmdq_peek(sc)) != NULL) {
2610                         cmd->error = -ENOTCONN;
2611                         cmd->state = UB_CMDST_DONE;
2612                         ub_cmdtr_state(sc, cmd);
2613                         ub_cmdq_pop(sc);
2614                         (*cmd->done)(sc, cmd);
2615                         cnt++;
2616                 }
2617                 if (cnt != 0) {
2618                         printk(KERN_WARNING "%s: "
2619                             "%d was queued after shutdown\n", sc->name, cnt);
2620                 }
2621         }
2622         spin_unlock_irqrestore(sc->lock, flags);
2623
2624         /*
2625          * Unregister the upper layer.
2626          */
2627         list_for_each (p, &sc->luns) {
2628                 lun = list_entry(p, struct ub_lun, link);
2629                 disk = lun->disk;
2630                 if (disk->flags & GENHD_FL_UP)
2631                         del_gendisk(disk);
2632                 /*
2633                  * I wish I could do:
2634                  *    set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
2635                  * As it is, we rely on our internal poisoning and let
2636                  * the upper levels to spin furiously failing all the I/O.
2637                  */
2638         }
2639
2640         /*
2641          * Testing for -EINPROGRESS is always a bug, so we are bending
2642          * the rules a little.
2643          */
2644         spin_lock_irqsave(sc->lock, flags);
2645         if (sc->work_urb.status == -EINPROGRESS) {      /* janitors: ignore */
2646                 printk(KERN_WARNING "%s: "
2647                     "URB is active after disconnect\n", sc->name);
2648         }
2649         spin_unlock_irqrestore(sc->lock, flags);
2650
2651         /*
2652          * There is virtually no chance that other CPU runs times so long
2653          * after ub_urb_complete should have called del_timer, but only if HCD
2654          * didn't forget to deliver a callback on unlink.
2655          */
2656         del_timer_sync(&sc->work_timer);
2657
2658         /*
2659          * At this point there must be no commands coming from anyone
2660          * and no URBs left in transit.
2661          */
2662
2663         device_remove_file(&sc->intf->dev, &dev_attr_diag);
2664         usb_set_intfdata(intf, NULL);
2665         // usb_put_intf(sc->intf);
2666         sc->intf = NULL;
2667         usb_put_dev(sc->dev);
2668         sc->dev = NULL;
2669
2670         ub_put(sc);
2671 }
2672
2673 static struct usb_driver ub_driver = {
2674         .name =         "ub",
2675         .probe =        ub_probe,
2676         .disconnect =   ub_disconnect,
2677         .id_table =     ub_usb_ids,
2678 };
2679
2680 static int __init ub_init(void)
2681 {
2682         int rc;
2683         int i;
2684
2685         for (i = 0; i < UB_QLOCK_NUM; i++)
2686                 spin_lock_init(&ub_qlockv[i]);
2687
2688         if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2689                 goto err_regblkdev;
2690         devfs_mk_dir(DEVFS_NAME);
2691
2692         if ((rc = usb_register(&ub_driver)) != 0)
2693                 goto err_register;
2694
2695         usb_usual_set_present(USB_US_TYPE_UB);
2696         return 0;
2697
2698 err_register:
2699         devfs_remove(DEVFS_NAME);
2700         unregister_blkdev(UB_MAJOR, DRV_NAME);
2701 err_regblkdev:
2702         return rc;
2703 }
2704
2705 static void __exit ub_exit(void)
2706 {
2707         usb_deregister(&ub_driver);
2708
2709         devfs_remove(DEVFS_NAME);
2710         unregister_blkdev(UB_MAJOR, DRV_NAME);
2711         usb_usual_clear_present(USB_US_TYPE_UB);
2712 }
2713
2714 module_init(ub_init);
2715 module_exit(ub_exit);
2716
2717 MODULE_LICENSE("GPL");