aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorCong Wang <xiyou.wangcong@gmail.com>2012-08-26 02:40:07 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-09-05 20:18:53 -0400
commit68a5059ecf82cc9d52a86fb523584b4d485f1bbe (patch)
tree445fb388ac37a181c84b13aafbddc774ac246026 /drivers/block
parent89bb957ec8b054ea9994974f03848cb2f129f50c (diff)
block: remove the deprecated ub driver
It was scheduled to be removed in 3.6. Acked-by: Pete Zaitcev <zaitcev@redhat.com> Cc: Jens Axboe <jaxboe@fusionio.com> Cc: Sebastian Andrzej Siewior <sebastian@breakpoint.cc> Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/Kconfig12
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/ub.c2474
3 files changed, 0 insertions, 2487 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index a796407123c7..f529407db93f 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -353,18 +353,6 @@ config BLK_DEV_SX8
353 353
354 Use devices /dev/sx8/$N and /dev/sx8/$Np$M. 354 Use devices /dev/sx8/$N and /dev/sx8/$Np$M.
355 355
356config BLK_DEV_UB
357 tristate "Low Performance USB Block driver (deprecated)"
358 depends on USB
359 help
360 This driver supports certain USB attached storage devices
361 such as flash keys.
362
363 If you enable this driver, it is recommended to avoid conflicts
364 with usb-storage by enabling USB_LIBUSUAL.
365
366 If unsure, say N.
367
368config BLK_DEV_RAM 356config BLK_DEV_RAM
369 tristate "RAM block device support" 357 tristate "RAM block device support"
370 ---help--- 358 ---help---
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 5b795059f8fb..17e82df3df74 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o
33 33
34obj-$(CONFIG_VIODASD) += viodasd.o 34obj-$(CONFIG_VIODASD) += viodasd.o
35obj-$(CONFIG_BLK_DEV_SX8) += sx8.o 35obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
36obj-$(CONFIG_BLK_DEV_UB) += ub.o
37obj-$(CONFIG_BLK_DEV_HD) += hd.o 36obj-$(CONFIG_BLK_DEV_HD) += hd.o
38 37
39obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o 38obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
deleted file mode 100644
index fcec0225ac76..000000000000
--- a/drivers/block/ub.c
+++ /dev/null
@@ -1,2474 +0,0 @@
1/*
2 * The low performance USB storage driver (ub).
3 *
4 * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
5 * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com)
6 *
7 * This work is a part of Linux kernel, is derived from it,
8 * and is not licensed separately. See file COPYING for details.
9 *
10 * TODO (sorted by decreasing priority)
11 * -- Return sense now that rq allows it (we always auto-sense anyway).
12 * -- set readonly flag for CDs, set removable flag for CF readers
13 * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
14 * -- verify the 13 conditions and do bulk resets
15 * -- highmem
16 * -- move top_sense and work_bcs into separate allocations (if they survive)
17 * for cache purists and esoteric architectures.
18 * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ?
19 * -- prune comments, they are too volumnous
20 * -- Resove XXX's
21 * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring.
22 */
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/usb.h>
26#include <linux/usb_usual.h>
27#include <linux/blkdev.h>
28#include <linux/timer.h>
29#include <linux/scatterlist.h>
30#include <linux/slab.h>
31#include <linux/mutex.h>
32#include <scsi/scsi.h>
33
34#define DRV_NAME "ub"
35
36#define UB_MAJOR 180
37
38/*
39 * The command state machine is the key model for understanding of this driver.
40 *
41 * The general rule is that all transitions are done towards the bottom
42 * of the diagram, thus preventing any loops.
43 *
44 * An exception to that is how the STAT state is handled. A counter allows it
45 * to be re-entered along the path marked with [C].
46 *
47 * +--------+
48 * ! INIT !
49 * +--------+
50 * !
51 * ub_scsi_cmd_start fails ->--------------------------------------\
52 * ! !
53 * V !
54 * +--------+ !
55 * ! CMD ! !
56 * +--------+ !
57 * ! +--------+ !
58 * was -EPIPE -->-------------------------------->! CLEAR ! !
59 * ! +--------+ !
60 * ! ! !
61 * was error -->------------------------------------- ! --------->\
62 * ! ! !
63 * /--<-- cmd->dir == NONE ? ! !
64 * ! ! ! !
65 * ! V ! !
66 * ! +--------+ ! !
67 * ! ! DATA ! ! !
68 * ! +--------+ ! !
69 * ! ! +---------+ ! !
70 * ! was -EPIPE -->--------------->! CLR2STS ! ! !
71 * ! ! +---------+ ! !
72 * ! ! ! ! !
73 * ! ! was error -->---- ! --------->\
74 * ! was error -->--------------------- ! ------------- ! --------->\
75 * ! ! ! ! !
76 * ! V ! ! !
77 * \--->+--------+ ! ! !
78 * ! STAT !<--------------------------/ ! !
79 * /--->+--------+ ! !
80 * ! ! ! !
81 * [C] was -EPIPE -->-----------\ ! !
82 * ! ! ! ! !
83 * +<---- len == 0 ! ! !
84 * ! ! ! ! !
85 * ! was error -->--------------------------------------!---------->\
86 * ! ! ! ! !
87 * +<---- bad CSW ! ! !
88 * +<---- bad tag ! ! !
89 * ! ! V ! !
90 * ! ! +--------+ ! !
91 * ! ! ! CLRRS ! ! !
92 * ! ! +--------+ ! !
93 * ! ! ! ! !
94 * \------- ! --------------------[C]--------\ ! !
95 * ! ! ! !
96 * cmd->error---\ +--------+ ! !
97 * ! +--------------->! SENSE !<----------/ !
98 * STAT_FAIL----/ +--------+ !
99 * ! ! V
100 * ! V +--------+
101 * \--------------------------------\--------------------->! DONE !
102 * +--------+
103 */
104
105/*
106 * This many LUNs per USB device.
107 * Every one of them takes a host, see UB_MAX_HOSTS.
108 */
109#define UB_MAX_LUNS 9
110
111/*
112 */
113
114#define UB_PARTS_PER_LUN 8
115
116#define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */
117
118#define UB_SENSE_SIZE 18
119
120/*
121 */
122struct ub_dev;
123
124#define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */
125#define UB_MAX_SECTORS 64
126
127/*
128 * A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
129 * even if a webcam hogs the bus, but some devices need time to spin up.
130 */
131#define UB_URB_TIMEOUT (HZ*2)
132#define UB_DATA_TIMEOUT (HZ*5) /* ZIP does spin-ups in the data phase */
133#define UB_STAT_TIMEOUT (HZ*5) /* Same spinups and eject for a dataless cmd. */
134#define UB_CTRL_TIMEOUT (HZ/2) /* 500ms ought to be enough to clear a stall */
135
136/*
137 * An instance of a SCSI command in transit.
138 */
139#define UB_DIR_NONE 0
140#define UB_DIR_READ 1
141#define UB_DIR_ILLEGAL2 2
142#define UB_DIR_WRITE 3
143
144#define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \
145 (((c)==UB_DIR_READ)? 'r': 'n'))
146
147enum ub_scsi_cmd_state {
148 UB_CMDST_INIT, /* Initial state */
149 UB_CMDST_CMD, /* Command submitted */
150 UB_CMDST_DATA, /* Data phase */
151 UB_CMDST_CLR2STS, /* Clearing before requesting status */
152 UB_CMDST_STAT, /* Status phase */
153 UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */
154 UB_CMDST_CLRRS, /* Clearing before retrying status */
155 UB_CMDST_SENSE, /* Sending Request Sense */
156 UB_CMDST_DONE /* Final state */
157};
158
159struct ub_scsi_cmd {
160 unsigned char cdb[UB_MAX_CDB_SIZE];
161 unsigned char cdb_len;
162
163 unsigned char dir; /* 0 - none, 1 - read, 3 - write. */
164 enum ub_scsi_cmd_state state;
165 unsigned int tag;
166 struct ub_scsi_cmd *next;
167
168 int error; /* Return code - valid upon done */
169 unsigned int act_len; /* Return size */
170 unsigned char key, asc, ascq; /* May be valid if error==-EIO */
171
172 int stat_count; /* Retries getting status. */
173 unsigned int timeo; /* jiffies until rq->timeout changes */
174
175 unsigned int len; /* Requested length */
176 unsigned int current_sg;
177 unsigned int nsg; /* sgv[nsg] */
178 struct scatterlist sgv[UB_MAX_REQ_SG];
179
180 struct ub_lun *lun;
181 void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
182 void *back;
183};
184
185struct ub_request {
186 struct request *rq;
187 unsigned int current_try;
188 unsigned int nsg; /* sgv[nsg] */
189 struct scatterlist sgv[UB_MAX_REQ_SG];
190};
191
192/*
193 */
194struct ub_capacity {
195 unsigned long nsec; /* Linux size - 512 byte sectors */
196 unsigned int bsize; /* Linux hardsect_size */
197 unsigned int bshift; /* Shift between 512 and hard sects */
198};
199
200/*
201 * This is a direct take-off from linux/include/completion.h
202 * The difference is that I do not wait on this thing, just poll.
203 * When I want to wait (ub_probe), I just use the stock completion.
204 *
205 * Note that INIT_COMPLETION takes no lock. It is correct. But why
206 * in the bloody hell that thing takes struct instead of pointer to struct
207 * is quite beyond me. I just copied it from the stock completion.
208 */
209struct ub_completion {
210 unsigned int done;
211 spinlock_t lock;
212};
213
214static DEFINE_MUTEX(ub_mutex);
215static inline void ub_init_completion(struct ub_completion *x)
216{
217 x->done = 0;
218 spin_lock_init(&x->lock);
219}
220
221#define UB_INIT_COMPLETION(x) ((x).done = 0)
222
223static void ub_complete(struct ub_completion *x)
224{
225 unsigned long flags;
226
227 spin_lock_irqsave(&x->lock, flags);
228 x->done++;
229 spin_unlock_irqrestore(&x->lock, flags);
230}
231
232static int ub_is_completed(struct ub_completion *x)
233{
234 unsigned long flags;
235 int ret;
236
237 spin_lock_irqsave(&x->lock, flags);
238 ret = x->done;
239 spin_unlock_irqrestore(&x->lock, flags);
240 return ret;
241}
242
243/*
244 */
245struct ub_scsi_cmd_queue {
246 int qlen, qmax;
247 struct ub_scsi_cmd *head, *tail;
248};
249
250/*
251 * The block device instance (one per LUN).
252 */
253struct ub_lun {
254 struct ub_dev *udev;
255 struct list_head link;
256 struct gendisk *disk;
257 int id; /* Host index */
258 int num; /* LUN number */
259 char name[16];
260
261 int changed; /* Media was changed */
262 int removable;
263 int readonly;
264
265 struct ub_request urq;
266
267 /* Use Ingo's mempool if or when we have more than one command. */
268 /*
269 * Currently we never need more than one command for the whole device.
270 * However, giving every LUN a command is a cheap and automatic way
271 * to enforce fairness between them.
272 */
273 int cmda[1];
274 struct ub_scsi_cmd cmdv[1];
275
276 struct ub_capacity capacity;
277};
278
279/*
280 * The USB device instance.
281 */
282struct ub_dev {
283 spinlock_t *lock;
284 atomic_t poison; /* The USB device is disconnected */
285 int openc; /* protected by ub_lock! */
286 /* kref is too implicit for our taste */
287 int reset; /* Reset is running */
288 int bad_resid;
289 unsigned int tagcnt;
290 char name[12];
291 struct usb_device *dev;
292 struct usb_interface *intf;
293
294 struct list_head luns;
295
296 unsigned int send_bulk_pipe; /* cached pipe values */
297 unsigned int recv_bulk_pipe;
298 unsigned int send_ctrl_pipe;
299 unsigned int recv_ctrl_pipe;
300
301 struct tasklet_struct tasklet;
302
303 struct ub_scsi_cmd_queue cmd_queue;
304 struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */
305 unsigned char top_sense[UB_SENSE_SIZE];
306
307 struct ub_completion work_done;
308 struct urb work_urb;
309 struct timer_list work_timer;
310 int last_pipe; /* What might need clearing */
311 __le32 signature; /* Learned signature */
312 struct bulk_cb_wrap work_bcb;
313 struct bulk_cs_wrap work_bcs;
314 struct usb_ctrlrequest work_cr;
315
316 struct work_struct reset_work;
317 wait_queue_head_t reset_wait;
318};
319
320/*
321 */
322static void ub_cleanup(struct ub_dev *sc);
323static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
324static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
325 struct ub_scsi_cmd *cmd, struct ub_request *urq);
326static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
327 struct ub_scsi_cmd *cmd, struct ub_request *urq);
328static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
329static void ub_end_rq(struct request *rq, unsigned int status);
330static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
331 struct ub_request *urq, struct ub_scsi_cmd *cmd);
332static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
333static void ub_urb_complete(struct urb *urb);
334static void ub_scsi_action(unsigned long _dev);
335static void ub_scsi_dispatch(struct ub_dev *sc);
336static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
337static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
338static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
339static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
340static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
341static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
342static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
343static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
344 int stalled_pipe);
345static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
346static void ub_reset_enter(struct ub_dev *sc, int try);
347static void ub_reset_task(struct work_struct *work);
348static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
349static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
350 struct ub_capacity *ret);
351static int ub_sync_reset(struct ub_dev *sc);
352static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
353static int ub_probe_lun(struct ub_dev *sc, int lnum);
354
355/*
356 */
357#ifdef CONFIG_USB_LIBUSUAL
358
359#define ub_usb_ids usb_storage_usb_ids
360#else
361
362static const struct usb_device_id ub_usb_ids[] = {
363 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_BULK) },
364 { }
365};
366
367MODULE_DEVICE_TABLE(usb, ub_usb_ids);
368#endif /* CONFIG_USB_LIBUSUAL */
369
370/*
371 * Find me a way to identify "next free minor" for add_disk(),
372 * and the array disappears the next day. However, the number of
373 * hosts has something to do with the naming and /proc/partitions.
374 * This has to be thought out in detail before changing.
375 * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
376 */
377#define UB_MAX_HOSTS 26
378static char ub_hostv[UB_MAX_HOSTS];
379
380#define UB_QLOCK_NUM 5
381static spinlock_t ub_qlockv[UB_QLOCK_NUM];
382static int ub_qlock_next = 0;
383
384static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */
385
386/*
387 * The id allocator.
388 *
389 * This also stores the host for indexing by minor, which is somewhat dirty.
390 */
391static int ub_id_get(void)
392{
393 unsigned long flags;
394 int i;
395
396 spin_lock_irqsave(&ub_lock, flags);
397 for (i = 0; i < UB_MAX_HOSTS; i++) {
398 if (ub_hostv[i] == 0) {
399 ub_hostv[i] = 1;
400 spin_unlock_irqrestore(&ub_lock, flags);
401 return i;
402 }
403 }
404 spin_unlock_irqrestore(&ub_lock, flags);
405 return -1;
406}
407
408static void ub_id_put(int id)
409{
410 unsigned long flags;
411
412 if (id < 0 || id >= UB_MAX_HOSTS) {
413 printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
414 return;
415 }
416
417 spin_lock_irqsave(&ub_lock, flags);
418 if (ub_hostv[id] == 0) {
419 spin_unlock_irqrestore(&ub_lock, flags);
420 printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
421 return;
422 }
423 ub_hostv[id] = 0;
424 spin_unlock_irqrestore(&ub_lock, flags);
425}
426
427/*
428 * This is necessitated by the fact that blk_cleanup_queue does not
429 * necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
430 * Since our blk_init_queue() passes a spinlock common with ub_dev,
431 * we have life time issues when ub_cleanup frees ub_dev.
432 */
433static spinlock_t *ub_next_lock(void)
434{
435 unsigned long flags;
436 spinlock_t *ret;
437
438 spin_lock_irqsave(&ub_lock, flags);
439 ret = &ub_qlockv[ub_qlock_next];
440 ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
441 spin_unlock_irqrestore(&ub_lock, flags);
442 return ret;
443}
444
445/*
446 * Downcount for deallocation. This rides on two assumptions:
447 * - once something is poisoned, its refcount cannot grow
448 * - opens cannot happen at this time (del_gendisk was done)
449 * If the above is true, we can drop the lock, which we need for
450 * blk_cleanup_queue(): the silly thing may attempt to sleep.
451 * [Actually, it never needs to sleep for us, but it calls might_sleep()]
452 */
453static void ub_put(struct ub_dev *sc)
454{
455 unsigned long flags;
456
457 spin_lock_irqsave(&ub_lock, flags);
458 --sc->openc;
459 if (sc->openc == 0 && atomic_read(&sc->poison)) {
460 spin_unlock_irqrestore(&ub_lock, flags);
461 ub_cleanup(sc);
462 } else {
463 spin_unlock_irqrestore(&ub_lock, flags);
464 }
465}
466
467/*
468 * Final cleanup and deallocation.
469 */
470static void ub_cleanup(struct ub_dev *sc)
471{
472 struct list_head *p;
473 struct ub_lun *lun;
474 struct request_queue *q;
475
476 while (!list_empty(&sc->luns)) {
477 p = sc->luns.next;
478 lun = list_entry(p, struct ub_lun, link);
479 list_del(p);
480
481 /* I don't think queue can be NULL. But... Stolen from sx8.c */
482 if ((q = lun->disk->queue) != NULL)
483 blk_cleanup_queue(q);
484 /*
485 * If we zero disk->private_data BEFORE put_disk, we have
486 * to check for NULL all over the place in open, release,
487 * check_media and revalidate, because the block level
488 * semaphore is well inside the put_disk.
489 * But we cannot zero after the call, because *disk is gone.
490 * The sd.c is blatantly racy in this area.
491 */
492 /* disk->private_data = NULL; */
493 put_disk(lun->disk);
494 lun->disk = NULL;
495
496 ub_id_put(lun->id);
497 kfree(lun);
498 }
499
500 usb_set_intfdata(sc->intf, NULL);
501 usb_put_intf(sc->intf);
502 usb_put_dev(sc->dev);
503 kfree(sc);
504}
505
506/*
507 * The "command allocator".
508 */
509static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun)
510{
511 struct ub_scsi_cmd *ret;
512
513 if (lun->cmda[0])
514 return NULL;
515 ret = &lun->cmdv[0];
516 lun->cmda[0] = 1;
517 return ret;
518}
519
520static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd)
521{
522 if (cmd != &lun->cmdv[0]) {
523 printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
524 lun->name, cmd);
525 return;
526 }
527 if (!lun->cmda[0]) {
528 printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);
529 return;
530 }
531 lun->cmda[0] = 0;
532}
533
534/*
535 * The command queue.
536 */
537static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
538{
539 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
540
541 if (t->qlen++ == 0) {
542 t->head = cmd;
543 t->tail = cmd;
544 } else {
545 t->tail->next = cmd;
546 t->tail = cmd;
547 }
548
549 if (t->qlen > t->qmax)
550 t->qmax = t->qlen;
551}
552
553static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
554{
555 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
556
557 if (t->qlen++ == 0) {
558 t->head = cmd;
559 t->tail = cmd;
560 } else {
561 cmd->next = t->head;
562 t->head = cmd;
563 }
564
565 if (t->qlen > t->qmax)
566 t->qmax = t->qlen;
567}
568
569static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
570{
571 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
572 struct ub_scsi_cmd *cmd;
573
574 if (t->qlen == 0)
575 return NULL;
576 if (--t->qlen == 0)
577 t->tail = NULL;
578 cmd = t->head;
579 t->head = cmd->next;
580 cmd->next = NULL;
581 return cmd;
582}
583
584#define ub_cmdq_peek(sc) ((sc)->cmd_queue.head)
585
586/*
587 * The request function is our main entry point
588 */
589
590static void ub_request_fn(struct request_queue *q)
591{
592 struct ub_lun *lun = q->queuedata;
593 struct request *rq;
594
595 while ((rq = blk_peek_request(q)) != NULL) {
596 if (ub_request_fn_1(lun, rq) != 0) {
597 blk_stop_queue(q);
598 break;
599 }
600 }
601}
602
603static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
604{
605 struct ub_dev *sc = lun->udev;
606 struct ub_scsi_cmd *cmd;
607 struct ub_request *urq;
608 int n_elem;
609
610 if (atomic_read(&sc->poison)) {
611 blk_start_request(rq);
612 ub_end_rq(rq, DID_NO_CONNECT << 16);
613 return 0;
614 }
615
616 if (lun->changed && rq->cmd_type != REQ_TYPE_BLOCK_PC) {
617 blk_start_request(rq);
618 ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
619 return 0;
620 }
621
622 if (lun->urq.rq != NULL)
623 return -1;
624 if ((cmd = ub_get_cmd(lun)) == NULL)
625 return -1;
626 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
627
628 blk_start_request(rq);
629
630 urq = &lun->urq;
631 memset(urq, 0, sizeof(struct ub_request));
632 urq->rq = rq;
633
634 /*
635 * get scatterlist from block layer
636 */
637 sg_init_table(&urq->sgv[0], UB_MAX_REQ_SG);
638 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
639 if (n_elem < 0) {
640 /* Impossible, because blk_rq_map_sg should not hit ENOMEM. */
641 printk(KERN_INFO "%s: failed request map (%d)\n",
642 lun->name, n_elem);
643 goto drop;
644 }
645 if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */
646 printk(KERN_WARNING "%s: request with %d segments\n",
647 lun->name, n_elem);
648 goto drop;
649 }
650 urq->nsg = n_elem;
651
652 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
653 ub_cmd_build_packet(sc, lun, cmd, urq);
654 } else {
655 ub_cmd_build_block(sc, lun, cmd, urq);
656 }
657 cmd->state = UB_CMDST_INIT;
658 cmd->lun = lun;
659 cmd->done = ub_rw_cmd_done;
660 cmd->back = urq;
661
662 cmd->tag = sc->tagcnt++;
663 if (ub_submit_scsi(sc, cmd) != 0)
664 goto drop;
665
666 return 0;
667
668drop:
669 ub_put_cmd(lun, cmd);
670 ub_end_rq(rq, DID_ERROR << 16);
671 return 0;
672}
673
674static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
675 struct ub_scsi_cmd *cmd, struct ub_request *urq)
676{
677 struct request *rq = urq->rq;
678 unsigned int block, nblks;
679
680 if (rq_data_dir(rq) == WRITE)
681 cmd->dir = UB_DIR_WRITE;
682 else
683 cmd->dir = UB_DIR_READ;
684
685 cmd->nsg = urq->nsg;
686 memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
687
688 /*
689 * build the command
690 *
691 * The call to blk_queue_logical_block_size() guarantees that request
692 * is aligned, but it is given in terms of 512 byte units, always.
693 */
694 block = blk_rq_pos(rq) >> lun->capacity.bshift;
695 nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
696
697 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
698 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
699 cmd->cdb[2] = block >> 24;
700 cmd->cdb[3] = block >> 16;
701 cmd->cdb[4] = block >> 8;
702 cmd->cdb[5] = block;
703 cmd->cdb[7] = nblks >> 8;
704 cmd->cdb[8] = nblks;
705 cmd->cdb_len = 10;
706
707 cmd->len = blk_rq_bytes(rq);
708}
709
710static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
711 struct ub_scsi_cmd *cmd, struct ub_request *urq)
712{
713 struct request *rq = urq->rq;
714
715 if (blk_rq_bytes(rq) == 0) {
716 cmd->dir = UB_DIR_NONE;
717 } else {
718 if (rq_data_dir(rq) == WRITE)
719 cmd->dir = UB_DIR_WRITE;
720 else
721 cmd->dir = UB_DIR_READ;
722 }
723
724 cmd->nsg = urq->nsg;
725 memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
726
727 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
728 cmd->cdb_len = rq->cmd_len;
729
730 cmd->len = blk_rq_bytes(rq);
731
732 /*
733 * To reapply this to every URB is not as incorrect as it looks.
734 * In return, we avoid any complicated tracking calculations.
735 */
736 cmd->timeo = rq->timeout;
737}
738
739static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
740{
741 struct ub_lun *lun = cmd->lun;
742 struct ub_request *urq = cmd->back;
743 struct request *rq;
744 unsigned int scsi_status;
745
746 rq = urq->rq;
747
748 if (cmd->error == 0) {
749 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
750 if (cmd->act_len >= rq->resid_len)
751 rq->resid_len = 0;
752 else
753 rq->resid_len -= cmd->act_len;
754 scsi_status = 0;
755 } else {
756 if (cmd->act_len != cmd->len) {
757 scsi_status = SAM_STAT_CHECK_CONDITION;
758 } else {
759 scsi_status = 0;
760 }
761 }
762 } else {
763 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
764 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
765 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
766 rq->sense_len = UB_SENSE_SIZE;
767 if (sc->top_sense[0] != 0)
768 scsi_status = SAM_STAT_CHECK_CONDITION;
769 else
770 scsi_status = DID_ERROR << 16;
771 } else {
772 if (cmd->error == -EIO &&
773 (cmd->key == 0 ||
774 cmd->key == MEDIUM_ERROR ||
775 cmd->key == UNIT_ATTENTION)) {
776 if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
777 return;
778 }
779 scsi_status = SAM_STAT_CHECK_CONDITION;
780 }
781 }
782
783 urq->rq = NULL;
784
785 ub_put_cmd(lun, cmd);
786 ub_end_rq(rq, scsi_status);
787 blk_start_queue(lun->disk->queue);
788}
789
790static void ub_end_rq(struct request *rq, unsigned int scsi_status)
791{
792 int error;
793
794 if (scsi_status == 0) {
795 error = 0;
796 } else {
797 error = -EIO;
798 rq->errors = scsi_status;
799 }
800 __blk_end_request_all(rq, error);
801}
802
803static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
804 struct ub_request *urq, struct ub_scsi_cmd *cmd)
805{
806
807 if (atomic_read(&sc->poison))
808 return -ENXIO;
809
810 ub_reset_enter(sc, urq->current_try);
811
812 if (urq->current_try >= 3)
813 return -EIO;
814 urq->current_try++;
815
816 /* Remove this if anyone complains of flooding. */
817 printk(KERN_DEBUG "%s: dir %c len/act %d/%d "
818 "[sense %x %02x %02x] retry %d\n",
819 sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,
820 cmd->key, cmd->asc, cmd->ascq, urq->current_try);
821
822 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
823 ub_cmd_build_block(sc, lun, cmd, urq);
824
825 cmd->state = UB_CMDST_INIT;
826 cmd->lun = lun;
827 cmd->done = ub_rw_cmd_done;
828 cmd->back = urq;
829
830 cmd->tag = sc->tagcnt++;
831
832#if 0 /* Wasteful */
833 return ub_submit_scsi(sc, cmd);
834#else
835 ub_cmdq_add(sc, cmd);
836 return 0;
837#endif
838}
839
840/*
841 * Submit a regular SCSI operation (not an auto-sense).
842 *
843 * The Iron Law of Good Submit Routine is:
844 * Zero return - callback is done, Nonzero return - callback is not done.
845 * No exceptions.
846 *
847 * Host is assumed locked.
848 */
849static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
850{
851
852 if (cmd->state != UB_CMDST_INIT ||
853 (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
854 return -EINVAL;
855 }
856
857 ub_cmdq_add(sc, cmd);
858 /*
859 * We can call ub_scsi_dispatch(sc) right away here, but it's a little
860 * safer to jump to a tasklet, in case upper layers do something silly.
861 */
862 tasklet_schedule(&sc->tasklet);
863 return 0;
864}
865
866/*
867 * Submit the first URB for the queued command.
868 * This function does not deal with queueing in any way.
869 */
870static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
871{
872 struct bulk_cb_wrap *bcb;
873 int rc;
874
875 bcb = &sc->work_bcb;
876
877 /*
878 * ``If the allocation length is eighteen or greater, and a device
879 * server returns less than eithteen bytes of data, the application
880 * client should assume that the bytes not transferred would have been
881 * zeroes had the device server returned those bytes.''
882 *
883 * We zero sense for all commands so that when a packet request
884 * fails it does not return a stale sense.
885 */
886 memset(&sc->top_sense, 0, UB_SENSE_SIZE);
887
888 /* set up the command wrapper */
889 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
890 bcb->Tag = cmd->tag; /* Endianness is not important */
891 bcb->DataTransferLength = cpu_to_le32(cmd->len);
892 bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
893 bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;
894 bcb->Length = cmd->cdb_len;
895
896 /* copy the command payload */
897 memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
898
899 UB_INIT_COMPLETION(sc->work_done);
900
901 sc->last_pipe = sc->send_bulk_pipe;
902 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
903 bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
904
905 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
906 /* XXX Clear stalls */
907 ub_complete(&sc->work_done);
908 return rc;
909 }
910
911 sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
912 add_timer(&sc->work_timer);
913
914 cmd->state = UB_CMDST_CMD;
915 return 0;
916}
917
918/*
919 * Timeout handler.
920 */
921static void ub_urb_timeout(unsigned long arg)
922{
923 struct ub_dev *sc = (struct ub_dev *) arg;
924 unsigned long flags;
925
926 spin_lock_irqsave(sc->lock, flags);
927 if (!ub_is_completed(&sc->work_done))
928 usb_unlink_urb(&sc->work_urb);
929 spin_unlock_irqrestore(sc->lock, flags);
930}
931
932/*
933 * Completion routine for the work URB.
934 *
935 * This can be called directly from usb_submit_urb (while we have
936 * the sc->lock taken) and from an interrupt (while we do NOT have
937 * the sc->lock taken). Therefore, bounce this off to a tasklet.
938 */
939static void ub_urb_complete(struct urb *urb)
940{
941 struct ub_dev *sc = urb->context;
942
943 ub_complete(&sc->work_done);
944 tasklet_schedule(&sc->tasklet);
945}
946
947static void ub_scsi_action(unsigned long _dev)
948{
949 struct ub_dev *sc = (struct ub_dev *) _dev;
950 unsigned long flags;
951
952 spin_lock_irqsave(sc->lock, flags);
953 ub_scsi_dispatch(sc);
954 spin_unlock_irqrestore(sc->lock, flags);
955}
956
957static void ub_scsi_dispatch(struct ub_dev *sc)
958{
959 struct ub_scsi_cmd *cmd;
960 int rc;
961
962 while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {
963 if (cmd->state == UB_CMDST_DONE) {
964 ub_cmdq_pop(sc);
965 (*cmd->done)(sc, cmd);
966 } else if (cmd->state == UB_CMDST_INIT) {
967 if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
968 break;
969 cmd->error = rc;
970 cmd->state = UB_CMDST_DONE;
971 } else {
972 if (!ub_is_completed(&sc->work_done))
973 break;
974 del_timer(&sc->work_timer);
975 ub_scsi_urb_compl(sc, cmd);
976 }
977 }
978}
979
980static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
981{
982 struct urb *urb = &sc->work_urb;
983 struct bulk_cs_wrap *bcs;
984 int endp;
985 int len;
986 int rc;
987
988 if (atomic_read(&sc->poison)) {
989 ub_state_done(sc, cmd, -ENODEV);
990 return;
991 }
992
993 endp = usb_pipeendpoint(sc->last_pipe);
994 if (usb_pipein(sc->last_pipe))
995 endp |= USB_DIR_IN;
996
997 if (cmd->state == UB_CMDST_CLEAR) {
998 if (urb->status == -EPIPE) {
999 /*
1000 * STALL while clearning STALL.
1001 * The control pipe clears itself - nothing to do.
1002 */
1003 printk(KERN_NOTICE "%s: stall on control pipe\n",
1004 sc->name);
1005 goto Bad_End;
1006 }
1007
1008 /*
1009 * We ignore the result for the halt clear.
1010 */
1011
1012 usb_reset_endpoint(sc->dev, endp);
1013
1014 ub_state_sense(sc, cmd);
1015
1016 } else if (cmd->state == UB_CMDST_CLR2STS) {
1017 if (urb->status == -EPIPE) {
1018 printk(KERN_NOTICE "%s: stall on control pipe\n",
1019 sc->name);
1020 goto Bad_End;
1021 }
1022
1023 /*
1024 * We ignore the result for the halt clear.
1025 */
1026
1027 usb_reset_endpoint(sc->dev, endp);
1028
1029 ub_state_stat(sc, cmd);
1030
1031 } else if (cmd->state == UB_CMDST_CLRRS) {
1032 if (urb->status == -EPIPE) {
1033 printk(KERN_NOTICE "%s: stall on control pipe\n",
1034 sc->name);
1035 goto Bad_End;
1036 }
1037
1038 /*
1039 * We ignore the result for the halt clear.
1040 */
1041
1042 usb_reset_endpoint(sc->dev, endp);
1043
1044 ub_state_stat_counted(sc, cmd);
1045
1046 } else if (cmd->state == UB_CMDST_CMD) {
1047 switch (urb->status) {
1048 case 0:
1049 break;
1050 case -EOVERFLOW:
1051 goto Bad_End;
1052 case -EPIPE:
1053 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1054 if (rc != 0) {
1055 printk(KERN_NOTICE "%s: "
1056 "unable to submit clear (%d)\n",
1057 sc->name, rc);
1058 /*
1059 * This is typically ENOMEM or some other such shit.
1060 * Retrying is pointless. Just do Bad End on it...
1061 */
1062 ub_state_done(sc, cmd, rc);
1063 return;
1064 }
1065 cmd->state = UB_CMDST_CLEAR;
1066 return;
1067 case -ESHUTDOWN: /* unplug */
1068 case -EILSEQ: /* unplug timeout on uhci */
1069 ub_state_done(sc, cmd, -ENODEV);
1070 return;
1071 default:
1072 goto Bad_End;
1073 }
1074 if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
1075 goto Bad_End;
1076 }
1077
1078 if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
1079 ub_state_stat(sc, cmd);
1080 return;
1081 }
1082
1083 // udelay(125); // usb-storage has this
1084 ub_data_start(sc, cmd);
1085
1086 } else if (cmd->state == UB_CMDST_DATA) {
1087 if (urb->status == -EPIPE) {
1088 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1089 if (rc != 0) {
1090 printk(KERN_NOTICE "%s: "
1091 "unable to submit clear (%d)\n",
1092 sc->name, rc);
1093 ub_state_done(sc, cmd, rc);
1094 return;
1095 }
1096 cmd->state = UB_CMDST_CLR2STS;
1097 return;
1098 }
1099 if (urb->status == -EOVERFLOW) {
1100 /*
1101 * A babble? Failure, but we must transfer CSW now.
1102 */
1103 cmd->error = -EOVERFLOW; /* A cheap trick... */
1104 ub_state_stat(sc, cmd);
1105 return;
1106 }
1107
1108 if (cmd->dir == UB_DIR_WRITE) {
1109 /*
1110 * Do not continue writes in case of a failure.
1111 * Doing so would cause sectors to be mixed up,
1112 * which is worse than sectors lost.
1113 *
1114 * We must try to read the CSW, or many devices
1115 * get confused.
1116 */
1117 len = urb->actual_length;
1118 if (urb->status != 0 ||
1119 len != cmd->sgv[cmd->current_sg].length) {
1120 cmd->act_len += len;
1121
1122 cmd->error = -EIO;
1123 ub_state_stat(sc, cmd);
1124 return;
1125 }
1126
1127 } else {
1128 /*
1129 * If an error occurs on read, we record it, and
1130 * continue to fetch data in order to avoid bubble.
1131 *
1132 * As a small shortcut, we stop if we detect that
1133 * a CSW mixed into data.
1134 */
1135 if (urb->status != 0)
1136 cmd->error = -EIO;
1137
1138 len = urb->actual_length;
1139 if (urb->status != 0 ||
1140 len != cmd->sgv[cmd->current_sg].length) {
1141 if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN)
1142 goto Bad_End;
1143 }
1144 }
1145
1146 cmd->act_len += urb->actual_length;
1147
1148 if (++cmd->current_sg < cmd->nsg) {
1149 ub_data_start(sc, cmd);
1150 return;
1151 }
1152 ub_state_stat(sc, cmd);
1153
1154 } else if (cmd->state == UB_CMDST_STAT) {
1155 if (urb->status == -EPIPE) {
1156 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1157 if (rc != 0) {
1158 printk(KERN_NOTICE "%s: "
1159 "unable to submit clear (%d)\n",
1160 sc->name, rc);
1161 ub_state_done(sc, cmd, rc);
1162 return;
1163 }
1164
1165 /*
1166 * Having a stall when getting CSW is an error, so
1167 * make sure uppper levels are not oblivious to it.
1168 */
1169 cmd->error = -EIO; /* A cheap trick... */
1170
1171 cmd->state = UB_CMDST_CLRRS;
1172 return;
1173 }
1174
1175 /* Catch everything, including -EOVERFLOW and other nasties. */
1176 if (urb->status != 0)
1177 goto Bad_End;
1178
1179 if (urb->actual_length == 0) {
1180 ub_state_stat_counted(sc, cmd);
1181 return;
1182 }
1183
1184 /*
1185 * Check the returned Bulk protocol status.
1186 * The status block has to be validated first.
1187 */
1188
1189 bcs = &sc->work_bcs;
1190
1191 if (sc->signature == cpu_to_le32(0)) {
1192 /*
1193 * This is the first reply, so do not perform the check.
1194 * Instead, remember the signature the device uses
1195 * for future checks. But do not allow a nul.
1196 */
1197 sc->signature = bcs->Signature;
1198 if (sc->signature == cpu_to_le32(0)) {
1199 ub_state_stat_counted(sc, cmd);
1200 return;
1201 }
1202 } else {
1203 if (bcs->Signature != sc->signature) {
1204 ub_state_stat_counted(sc, cmd);
1205 return;
1206 }
1207 }
1208
1209 if (bcs->Tag != cmd->tag) {
1210 /*
1211 * This usually happens when we disagree with the
1212 * device's microcode about something. For instance,
1213 * a few of them throw this after timeouts. They buffer
1214 * commands and reply at commands we timed out before.
1215 * Without flushing these replies we loop forever.
1216 */
1217 ub_state_stat_counted(sc, cmd);
1218 return;
1219 }
1220
1221 if (!sc->bad_resid) {
1222 len = le32_to_cpu(bcs->Residue);
1223 if (len != cmd->len - cmd->act_len) {
1224 /*
1225 * Only start ignoring if this cmd ended well.
1226 */
1227 if (cmd->len == cmd->act_len) {
1228 printk(KERN_NOTICE "%s: "
1229 "bad residual %d of %d, ignoring\n",
1230 sc->name, len, cmd->len);
1231 sc->bad_resid = 1;
1232 }
1233 }
1234 }
1235
1236 switch (bcs->Status) {
1237 case US_BULK_STAT_OK:
1238 break;
1239 case US_BULK_STAT_FAIL:
1240 ub_state_sense(sc, cmd);
1241 return;
1242 case US_BULK_STAT_PHASE:
1243 goto Bad_End;
1244 default:
1245 printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
1246 sc->name, bcs->Status);
1247 ub_state_done(sc, cmd, -EINVAL);
1248 return;
1249 }
1250
1251 /* Not zeroing error to preserve a babble indicator */
1252 if (cmd->error != 0) {
1253 ub_state_sense(sc, cmd);
1254 return;
1255 }
1256 cmd->state = UB_CMDST_DONE;
1257 ub_cmdq_pop(sc);
1258 (*cmd->done)(sc, cmd);
1259
1260 } else if (cmd->state == UB_CMDST_SENSE) {
1261 ub_state_done(sc, cmd, -EIO);
1262
1263 } else {
1264 printk(KERN_WARNING "%s: wrong command state %d\n",
1265 sc->name, cmd->state);
1266 ub_state_done(sc, cmd, -EINVAL);
1267 return;
1268 }
1269 return;
1270
1271Bad_End: /* Little Excel is dead */
1272 ub_state_done(sc, cmd, -EIO);
1273}
1274
1275/*
1276 * Factorization helper for the command state machine:
1277 * Initiate a data segment transfer.
1278 */
1279static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1280{
1281 struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
1282 int pipe;
1283 int rc;
1284
1285 UB_INIT_COMPLETION(sc->work_done);
1286
1287 if (cmd->dir == UB_DIR_READ)
1288 pipe = sc->recv_bulk_pipe;
1289 else
1290 pipe = sc->send_bulk_pipe;
1291 sc->last_pipe = pipe;
1292 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
1293 sg->length, ub_urb_complete, sc);
1294
1295 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1296 /* XXX Clear stalls */
1297 ub_complete(&sc->work_done);
1298 ub_state_done(sc, cmd, rc);
1299 return;
1300 }
1301
1302 if (cmd->timeo)
1303 sc->work_timer.expires = jiffies + cmd->timeo;
1304 else
1305 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1306 add_timer(&sc->work_timer);
1307
1308 cmd->state = UB_CMDST_DATA;
1309}
1310
1311/*
1312 * Factorization helper for the command state machine:
1313 * Finish the command.
1314 */
1315static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
1316{
1317
1318 cmd->error = rc;
1319 cmd->state = UB_CMDST_DONE;
1320 ub_cmdq_pop(sc);
1321 (*cmd->done)(sc, cmd);
1322}
1323
1324/*
1325 * Factorization helper for the command state machine:
1326 * Submit a CSW read.
1327 */
1328static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1329{
1330 int rc;
1331
1332 UB_INIT_COMPLETION(sc->work_done);
1333
1334 sc->last_pipe = sc->recv_bulk_pipe;
1335 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1336 &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1337
1338 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1339 /* XXX Clear stalls */
1340 ub_complete(&sc->work_done);
1341 ub_state_done(sc, cmd, rc);
1342 return -1;
1343 }
1344
1345 if (cmd->timeo)
1346 sc->work_timer.expires = jiffies + cmd->timeo;
1347 else
1348 sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
1349 add_timer(&sc->work_timer);
1350 return 0;
1351}
1352
1353/*
1354 * Factorization helper for the command state machine:
1355 * Submit a CSW read and go to STAT state.
1356 */
1357static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1358{
1359
1360 if (__ub_state_stat(sc, cmd) != 0)
1361 return;
1362
1363 cmd->stat_count = 0;
1364 cmd->state = UB_CMDST_STAT;
1365}
1366
1367/*
1368 * Factorization helper for the command state machine:
1369 * Submit a CSW read and go to STAT state with counter (along [C] path).
1370 */
1371static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1372{
1373
1374 if (++cmd->stat_count >= 4) {
1375 ub_state_sense(sc, cmd);
1376 return;
1377 }
1378
1379 if (__ub_state_stat(sc, cmd) != 0)
1380 return;
1381
1382 cmd->state = UB_CMDST_STAT;
1383}
1384
1385/*
1386 * Factorization helper for the command state machine:
1387 * Submit a REQUEST SENSE and go to SENSE state.
1388 */
1389static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1390{
1391 struct ub_scsi_cmd *scmd;
1392 struct scatterlist *sg;
1393 int rc;
1394
1395 if (cmd->cdb[0] == REQUEST_SENSE) {
1396 rc = -EPIPE;
1397 goto error;
1398 }
1399
1400 scmd = &sc->top_rqs_cmd;
1401 memset(scmd, 0, sizeof(struct ub_scsi_cmd));
1402 scmd->cdb[0] = REQUEST_SENSE;
1403 scmd->cdb[4] = UB_SENSE_SIZE;
1404 scmd->cdb_len = 6;
1405 scmd->dir = UB_DIR_READ;
1406 scmd->state = UB_CMDST_INIT;
1407 scmd->nsg = 1;
1408 sg = &scmd->sgv[0];
1409 sg_init_table(sg, UB_MAX_REQ_SG);
1410 sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE,
1411 (unsigned long)sc->top_sense & (PAGE_SIZE-1));
1412 scmd->len = UB_SENSE_SIZE;
1413 scmd->lun = cmd->lun;
1414 scmd->done = ub_top_sense_done;
1415 scmd->back = cmd;
1416
1417 scmd->tag = sc->tagcnt++;
1418
1419 cmd->state = UB_CMDST_SENSE;
1420
1421 ub_cmdq_insert(sc, scmd);
1422 return;
1423
1424error:
1425 ub_state_done(sc, cmd, rc);
1426}
1427
1428/*
1429 * A helper for the command's state machine:
1430 * Submit a stall clear.
1431 */
1432static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1433 int stalled_pipe)
1434{
1435 int endp;
1436 struct usb_ctrlrequest *cr;
1437 int rc;
1438
1439 endp = usb_pipeendpoint(stalled_pipe);
1440 if (usb_pipein (stalled_pipe))
1441 endp |= USB_DIR_IN;
1442
1443 cr = &sc->work_cr;
1444 cr->bRequestType = USB_RECIP_ENDPOINT;
1445 cr->bRequest = USB_REQ_CLEAR_FEATURE;
1446 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
1447 cr->wIndex = cpu_to_le16(endp);
1448 cr->wLength = cpu_to_le16(0);
1449
1450 UB_INIT_COMPLETION(sc->work_done);
1451
1452 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1453 (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1454
1455 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1456 ub_complete(&sc->work_done);
1457 return rc;
1458 }
1459
1460 sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
1461 add_timer(&sc->work_timer);
1462 return 0;
1463}
1464
1465/*
1466 */
1467static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1468{
1469 unsigned char *sense = sc->top_sense;
1470 struct ub_scsi_cmd *cmd;
1471
1472 /*
1473 * Find the command which triggered the unit attention or a check,
1474 * save the sense into it, and advance its state machine.
1475 */
1476 if ((cmd = ub_cmdq_peek(sc)) == NULL) {
1477 printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
1478 return;
1479 }
1480 if (cmd != scmd->back) {
1481 printk(KERN_WARNING "%s: "
1482 "sense done for wrong command 0x%x\n",
1483 sc->name, cmd->tag);
1484 return;
1485 }
1486 if (cmd->state != UB_CMDST_SENSE) {
1487 printk(KERN_WARNING "%s: sense done with bad cmd state %d\n",
1488 sc->name, cmd->state);
1489 return;
1490 }
1491
1492 /*
1493 * Ignoring scmd->act_len, because the buffer was pre-zeroed.
1494 */
1495 cmd->key = sense[2] & 0x0F;
1496 cmd->asc = sense[12];
1497 cmd->ascq = sense[13];
1498
1499 ub_scsi_urb_compl(sc, cmd);
1500}
1501
1502/*
1503 * Reset management
1504 */
1505
1506static void ub_reset_enter(struct ub_dev *sc, int try)
1507{
1508
1509 if (sc->reset) {
1510 /* This happens often on multi-LUN devices. */
1511 return;
1512 }
1513 sc->reset = try + 1;
1514
1515#if 0 /* Not needed because the disconnect waits for us. */
1516 unsigned long flags;
1517 spin_lock_irqsave(&ub_lock, flags);
1518 sc->openc++;
1519 spin_unlock_irqrestore(&ub_lock, flags);
1520#endif
1521
1522#if 0 /* We let them stop themselves. */
1523 struct ub_lun *lun;
1524 list_for_each_entry(lun, &sc->luns, link) {
1525 blk_stop_queue(lun->disk->queue);
1526 }
1527#endif
1528
1529 schedule_work(&sc->reset_work);
1530}
1531
1532static void ub_reset_task(struct work_struct *work)
1533{
1534 struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
1535 unsigned long flags;
1536 struct ub_lun *lun;
1537 int rc;
1538
1539 if (!sc->reset) {
1540 printk(KERN_WARNING "%s: Running reset unrequested\n",
1541 sc->name);
1542 return;
1543 }
1544
1545 if (atomic_read(&sc->poison)) {
1546 ;
1547 } else if ((sc->reset & 1) == 0) {
1548 ub_sync_reset(sc);
1549 msleep(700); /* usb-storage sleeps 6s (!) */
1550 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
1551 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
1552 } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
1553 ;
1554 } else {
1555 rc = usb_lock_device_for_reset(sc->dev, sc->intf);
1556 if (rc < 0) {
1557 printk(KERN_NOTICE
1558 "%s: usb_lock_device_for_reset failed (%d)\n",
1559 sc->name, rc);
1560 } else {
1561 rc = usb_reset_device(sc->dev);
1562 if (rc < 0) {
1563 printk(KERN_NOTICE "%s: "
1564 "usb_lock_device_for_reset failed (%d)\n",
1565 sc->name, rc);
1566 }
1567 usb_unlock_device(sc->dev);
1568 }
1569 }
1570
1571 /*
1572 * In theory, no commands can be running while reset is active,
1573 * so nobody can ask for another reset, and so we do not need any
1574 * queues of resets or anything. We do need a spinlock though,
1575 * to interact with block layer.
1576 */
1577 spin_lock_irqsave(sc->lock, flags);
1578 sc->reset = 0;
1579 tasklet_schedule(&sc->tasklet);
1580 list_for_each_entry(lun, &sc->luns, link) {
1581 blk_start_queue(lun->disk->queue);
1582 }
1583 wake_up(&sc->reset_wait);
1584 spin_unlock_irqrestore(sc->lock, flags);
1585}
1586
1587/*
1588 * XXX Reset brackets are too much hassle to implement, so just stub them
1589 * in order to prevent forced unbinding (which deadlocks solid when our
1590 * ->disconnect method waits for the reset to complete and this kills keventd).
1591 *
1592 * XXX Tell Alan to move usb_unlock_device inside of usb_reset_device,
1593 * or else the post_reset is invoked, and restats I/O on a locked device.
1594 */
1595static int ub_pre_reset(struct usb_interface *iface) {
1596 return 0;
1597}
1598
1599static int ub_post_reset(struct usb_interface *iface) {
1600 return 0;
1601}
1602
1603/*
1604 * This is called from a process context.
1605 */
1606static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
1607{
1608
1609 lun->readonly = 0; /* XXX Query this from the device */
1610
1611 lun->capacity.nsec = 0;
1612 lun->capacity.bsize = 512;
1613 lun->capacity.bshift = 0;
1614
1615 if (ub_sync_tur(sc, lun) != 0)
1616 return; /* Not ready */
1617 lun->changed = 0;
1618
1619 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1620 /*
1621 * The retry here means something is wrong, either with the
1622 * device, with the transport, or with our code.
1623 * We keep this because sd.c has retries for capacity.
1624 */
1625 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1626 lun->capacity.nsec = 0;
1627 lun->capacity.bsize = 512;
1628 lun->capacity.bshift = 0;
1629 }
1630 }
1631}
1632
1633/*
1634 * The open funcion.
1635 * This is mostly needed to keep refcounting, but also to support
1636 * media checks on removable media drives.
1637 */
1638static int ub_bd_open(struct block_device *bdev, fmode_t mode)
1639{
1640 struct ub_lun *lun = bdev->bd_disk->private_data;
1641 struct ub_dev *sc = lun->udev;
1642 unsigned long flags;
1643 int rc;
1644
1645 spin_lock_irqsave(&ub_lock, flags);
1646 if (atomic_read(&sc->poison)) {
1647 spin_unlock_irqrestore(&ub_lock, flags);
1648 return -ENXIO;
1649 }
1650 sc->openc++;
1651 spin_unlock_irqrestore(&ub_lock, flags);
1652
1653 if (lun->removable || lun->readonly)
1654 check_disk_change(bdev);
1655
1656 /*
1657 * The sd.c considers ->media_present and ->changed not equivalent,
1658 * under some pretty murky conditions (a failure of READ CAPACITY).
1659 * We may need it one day.
1660 */
1661 if (lun->removable && lun->changed && !(mode & FMODE_NDELAY)) {
1662 rc = -ENOMEDIUM;
1663 goto err_open;
1664 }
1665
1666 if (lun->readonly && (mode & FMODE_WRITE)) {
1667 rc = -EROFS;
1668 goto err_open;
1669 }
1670
1671 return 0;
1672
1673err_open:
1674 ub_put(sc);
1675 return rc;
1676}
1677
1678static int ub_bd_unlocked_open(struct block_device *bdev, fmode_t mode)
1679{
1680 int ret;
1681
1682 mutex_lock(&ub_mutex);
1683 ret = ub_bd_open(bdev, mode);
1684 mutex_unlock(&ub_mutex);
1685
1686 return ret;
1687}
1688
1689
1690/*
1691 */
1692static int ub_bd_release(struct gendisk *disk, fmode_t mode)
1693{
1694 struct ub_lun *lun = disk->private_data;
1695 struct ub_dev *sc = lun->udev;
1696
1697 mutex_lock(&ub_mutex);
1698 ub_put(sc);
1699 mutex_unlock(&ub_mutex);
1700
1701 return 0;
1702}
1703
1704/*
1705 * The ioctl interface.
1706 */
1707static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
1708 unsigned int cmd, unsigned long arg)
1709{
1710 void __user *usermem = (void __user *) arg;
1711 int ret;
1712
1713 mutex_lock(&ub_mutex);
1714 ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, usermem);
1715 mutex_unlock(&ub_mutex);
1716
1717 return ret;
1718}
1719
1720/*
1721 * This is called by check_disk_change if we reported a media change.
1722 * The main onjective here is to discover the features of the media such as
1723 * the capacity, read-only status, etc. USB storage generally does not
1724 * need to be spun up, but if we needed it, this would be the place.
1725 *
1726 * This call can sleep.
1727 *
1728 * The return code is not used.
1729 */
1730static int ub_bd_revalidate(struct gendisk *disk)
1731{
1732 struct ub_lun *lun = disk->private_data;
1733
1734 ub_revalidate(lun->udev, lun);
1735
1736 /* XXX Support sector size switching like in sr.c */
1737 blk_queue_logical_block_size(disk->queue, lun->capacity.bsize);
1738 set_capacity(disk, lun->capacity.nsec);
1739 // set_disk_ro(sdkp->disk, lun->readonly);
1740
1741 return 0;
1742}
1743
1744/*
1745 * The check is called by the block layer to verify if the media
1746 * is still available. It is supposed to be harmless, lightweight and
1747 * non-intrusive in case the media was not changed.
1748 *
1749 * This call can sleep.
1750 *
1751 * The return code is bool!
1752 */
1753static unsigned int ub_bd_check_events(struct gendisk *disk,
1754 unsigned int clearing)
1755{
1756 struct ub_lun *lun = disk->private_data;
1757
1758 if (!lun->removable)
1759 return 0;
1760
1761 /*
1762 * We clean checks always after every command, so this is not
1763 * as dangerous as it looks. If the TEST_UNIT_READY fails here,
1764 * the device is actually not ready with operator or software
1765 * intervention required. One dangerous item might be a drive which
1766 * spins itself down, and come the time to write dirty pages, this
1767 * will fail, then block layer discards the data. Since we never
1768 * spin drives up, such devices simply cannot be used with ub anyway.
1769 */
1770 if (ub_sync_tur(lun->udev, lun) != 0) {
1771 lun->changed = 1;
1772 return DISK_EVENT_MEDIA_CHANGE;
1773 }
1774
1775 return lun->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
1776}
1777
1778static const struct block_device_operations ub_bd_fops = {
1779 .owner = THIS_MODULE,
1780 .open = ub_bd_unlocked_open,
1781 .release = ub_bd_release,
1782 .ioctl = ub_bd_ioctl,
1783 .check_events = ub_bd_check_events,
1784 .revalidate_disk = ub_bd_revalidate,
1785};
1786
1787/*
1788 * Common ->done routine for commands executed synchronously.
1789 */
1790static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1791{
1792 struct completion *cop = cmd->back;
1793 complete(cop);
1794}
1795
1796/*
1797 * Test if the device has a check condition on it, synchronously.
1798 */
1799static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
1800{
1801 struct ub_scsi_cmd *cmd;
1802 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
1803 unsigned long flags;
1804 struct completion compl;
1805 int rc;
1806
1807 init_completion(&compl);
1808
1809 rc = -ENOMEM;
1810 if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1811 goto err_alloc;
1812
1813 cmd->cdb[0] = TEST_UNIT_READY;
1814 cmd->cdb_len = 6;
1815 cmd->dir = UB_DIR_NONE;
1816 cmd->state = UB_CMDST_INIT;
1817 cmd->lun = lun; /* This may be NULL, but that's ok */
1818 cmd->done = ub_probe_done;
1819 cmd->back = &compl;
1820
1821 spin_lock_irqsave(sc->lock, flags);
1822 cmd->tag = sc->tagcnt++;
1823
1824 rc = ub_submit_scsi(sc, cmd);
1825 spin_unlock_irqrestore(sc->lock, flags);
1826
1827 if (rc != 0)
1828 goto err_submit;
1829
1830 wait_for_completion(&compl);
1831
1832 rc = cmd->error;
1833
1834 if (rc == -EIO && cmd->key != 0) /* Retries for benh's key */
1835 rc = cmd->key;
1836
1837err_submit:
1838 kfree(cmd);
1839err_alloc:
1840 return rc;
1841}
1842
1843/*
1844 * Read the SCSI capacity synchronously (for probing).
1845 */
1846static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1847 struct ub_capacity *ret)
1848{
1849 struct ub_scsi_cmd *cmd;
1850 struct scatterlist *sg;
1851 char *p;
1852 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
1853 unsigned long flags;
1854 unsigned int bsize, shift;
1855 unsigned long nsec;
1856 struct completion compl;
1857 int rc;
1858
1859 init_completion(&compl);
1860
1861 rc = -ENOMEM;
1862 if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1863 goto err_alloc;
1864 p = (char *)cmd + sizeof(struct ub_scsi_cmd);
1865
1866 cmd->cdb[0] = 0x25;
1867 cmd->cdb_len = 10;
1868 cmd->dir = UB_DIR_READ;
1869 cmd->state = UB_CMDST_INIT;
1870 cmd->nsg = 1;
1871 sg = &cmd->sgv[0];
1872 sg_init_table(sg, UB_MAX_REQ_SG);
1873 sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1));
1874 cmd->len = 8;
1875 cmd->lun = lun;
1876 cmd->done = ub_probe_done;
1877 cmd->back = &compl;
1878
1879 spin_lock_irqsave(sc->lock, flags);
1880 cmd->tag = sc->tagcnt++;
1881
1882 rc = ub_submit_scsi(sc, cmd);
1883 spin_unlock_irqrestore(sc->lock, flags);
1884
1885 if (rc != 0)
1886 goto err_submit;
1887
1888 wait_for_completion(&compl);
1889
1890 if (cmd->error != 0) {
1891 rc = -EIO;
1892 goto err_read;
1893 }
1894 if (cmd->act_len != 8) {
1895 rc = -EIO;
1896 goto err_read;
1897 }
1898
1899 /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
1900 nsec = be32_to_cpu(*(__be32 *)p) + 1;
1901 bsize = be32_to_cpu(*(__be32 *)(p + 4));
1902 switch (bsize) {
1903 case 512: shift = 0; break;
1904 case 1024: shift = 1; break;
1905 case 2048: shift = 2; break;
1906 case 4096: shift = 3; break;
1907 default:
1908 rc = -EDOM;
1909 goto err_inv_bsize;
1910 }
1911
1912 ret->bsize = bsize;
1913 ret->bshift = shift;
1914 ret->nsec = nsec << shift;
1915 rc = 0;
1916
1917err_inv_bsize:
1918err_read:
1919err_submit:
1920 kfree(cmd);
1921err_alloc:
1922 return rc;
1923}
1924
1925/*
1926 */
1927static void ub_probe_urb_complete(struct urb *urb)
1928{
1929 struct completion *cop = urb->context;
1930 complete(cop);
1931}
1932
1933static void ub_probe_timeout(unsigned long arg)
1934{
1935 struct completion *cop = (struct completion *) arg;
1936 complete(cop);
1937}
1938
1939/*
1940 * Reset with a Bulk reset.
1941 */
1942static int ub_sync_reset(struct ub_dev *sc)
1943{
1944 int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1945 struct usb_ctrlrequest *cr;
1946 struct completion compl;
1947 struct timer_list timer;
1948 int rc;
1949
1950 init_completion(&compl);
1951
1952 cr = &sc->work_cr;
1953 cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
1954 cr->bRequest = US_BULK_RESET_REQUEST;
1955 cr->wValue = cpu_to_le16(0);
1956 cr->wIndex = cpu_to_le16(ifnum);
1957 cr->wLength = cpu_to_le16(0);
1958
1959 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1960 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
1961
1962 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
1963 printk(KERN_WARNING
1964 "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
1965 return rc;
1966 }
1967
1968 init_timer(&timer);
1969 timer.function = ub_probe_timeout;
1970 timer.data = (unsigned long) &compl;
1971 timer.expires = jiffies + UB_CTRL_TIMEOUT;
1972 add_timer(&timer);
1973
1974 wait_for_completion(&compl);
1975
1976 del_timer_sync(&timer);
1977 usb_kill_urb(&sc->work_urb);
1978
1979 return sc->work_urb.status;
1980}
1981
1982/*
1983 * Get number of LUNs by the way of Bulk GetMaxLUN command.
1984 */
1985static int ub_sync_getmaxlun(struct ub_dev *sc)
1986{
1987 int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1988 unsigned char *p;
1989 enum { ALLOC_SIZE = 1 };
1990 struct usb_ctrlrequest *cr;
1991 struct completion compl;
1992 struct timer_list timer;
1993 int nluns;
1994 int rc;
1995
1996 init_completion(&compl);
1997
1998 rc = -ENOMEM;
1999 if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
2000 goto err_alloc;
2001 *p = 55;
2002
2003 cr = &sc->work_cr;
2004 cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
2005 cr->bRequest = US_BULK_GET_MAX_LUN;
2006 cr->wValue = cpu_to_le16(0);
2007 cr->wIndex = cpu_to_le16(ifnum);
2008 cr->wLength = cpu_to_le16(1);
2009
2010 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
2011 (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
2012
2013 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0)
2014 goto err_submit;
2015
2016 init_timer(&timer);
2017 timer.function = ub_probe_timeout;
2018 timer.data = (unsigned long) &compl;
2019 timer.expires = jiffies + UB_CTRL_TIMEOUT;
2020 add_timer(&timer);
2021
2022 wait_for_completion(&compl);
2023
2024 del_timer_sync(&timer);
2025 usb_kill_urb(&sc->work_urb);
2026
2027 if ((rc = sc->work_urb.status) < 0)
2028 goto err_io;
2029
2030 if (sc->work_urb.actual_length != 1) {
2031 nluns = 0;
2032 } else {
2033 if ((nluns = *p) == 55) {
2034 nluns = 0;
2035 } else {
2036 /* GetMaxLUN returns the maximum LUN number */
2037 nluns += 1;
2038 if (nluns > UB_MAX_LUNS)
2039 nluns = UB_MAX_LUNS;
2040 }
2041 }
2042
2043 kfree(p);
2044 return nluns;
2045
2046err_io:
2047err_submit:
2048 kfree(p);
2049err_alloc:
2050 return rc;
2051}
2052
2053/*
2054 * Clear initial stalls.
2055 */
2056static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
2057{
2058 int endp;
2059 struct usb_ctrlrequest *cr;
2060 struct completion compl;
2061 struct timer_list timer;
2062 int rc;
2063
2064 init_completion(&compl);
2065
2066 endp = usb_pipeendpoint(stalled_pipe);
2067 if (usb_pipein (stalled_pipe))
2068 endp |= USB_DIR_IN;
2069
2070 cr = &sc->work_cr;
2071 cr->bRequestType = USB_RECIP_ENDPOINT;
2072 cr->bRequest = USB_REQ_CLEAR_FEATURE;
2073 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
2074 cr->wIndex = cpu_to_le16(endp);
2075 cr->wLength = cpu_to_le16(0);
2076
2077 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2078 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2079
2080 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2081 printk(KERN_WARNING
2082 "%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
2083 return rc;
2084 }
2085
2086 init_timer(&timer);
2087 timer.function = ub_probe_timeout;
2088 timer.data = (unsigned long) &compl;
2089 timer.expires = jiffies + UB_CTRL_TIMEOUT;
2090 add_timer(&timer);
2091
2092 wait_for_completion(&compl);
2093
2094 del_timer_sync(&timer);
2095 usb_kill_urb(&sc->work_urb);
2096
2097 usb_reset_endpoint(sc->dev, endp);
2098
2099 return 0;
2100}
2101
2102/*
2103 * Get the pipe settings.
2104 */
2105static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
2106 struct usb_interface *intf)
2107{
2108 struct usb_host_interface *altsetting = intf->cur_altsetting;
2109 struct usb_endpoint_descriptor *ep_in = NULL;
2110 struct usb_endpoint_descriptor *ep_out = NULL;
2111 struct usb_endpoint_descriptor *ep;
2112 int i;
2113
2114 /*
2115 * Find the endpoints we need.
2116 * We are expecting a minimum of 2 endpoints - in and out (bulk).
2117 * We will ignore any others.
2118 */
2119 for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
2120 ep = &altsetting->endpoint[i].desc;
2121
2122 /* Is it a BULK endpoint? */
2123 if (usb_endpoint_xfer_bulk(ep)) {
2124 /* BULK in or out? */
2125 if (usb_endpoint_dir_in(ep)) {
2126 if (ep_in == NULL)
2127 ep_in = ep;
2128 } else {
2129 if (ep_out == NULL)
2130 ep_out = ep;
2131 }
2132 }
2133 }
2134
2135 if (ep_in == NULL || ep_out == NULL) {
2136 printk(KERN_NOTICE "%s: failed endpoint check\n", sc->name);
2137 return -ENODEV;
2138 }
2139
2140 /* Calculate and store the pipe values */
2141 sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
2142 sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
2143 sc->send_bulk_pipe = usb_sndbulkpipe(dev,
2144 usb_endpoint_num(ep_out));
2145 sc->recv_bulk_pipe = usb_rcvbulkpipe(dev,
2146 usb_endpoint_num(ep_in));
2147
2148 return 0;
2149}
2150
2151/*
2152 * Probing is done in the process context, which allows us to cheat
2153 * and not to build a state machine for the discovery.
2154 */
2155static int ub_probe(struct usb_interface *intf,
2156 const struct usb_device_id *dev_id)
2157{
2158 struct ub_dev *sc;
2159 int nluns;
2160 int rc;
2161 int i;
2162
2163 if (usb_usual_check_type(dev_id, USB_US_TYPE_UB))
2164 return -ENXIO;
2165
2166 rc = -ENOMEM;
2167 if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
2168 goto err_core;
2169 sc->lock = ub_next_lock();
2170 INIT_LIST_HEAD(&sc->luns);
2171 usb_init_urb(&sc->work_urb);
2172 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
2173 atomic_set(&sc->poison, 0);
2174 INIT_WORK(&sc->reset_work, ub_reset_task);
2175 init_waitqueue_head(&sc->reset_wait);
2176
2177 init_timer(&sc->work_timer);
2178 sc->work_timer.data = (unsigned long) sc;
2179 sc->work_timer.function = ub_urb_timeout;
2180
2181 ub_init_completion(&sc->work_done);
2182 sc->work_done.done = 1; /* A little yuk, but oh well... */
2183
2184 sc->dev = interface_to_usbdev(intf);
2185 sc->intf = intf;
2186 // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
2187 usb_set_intfdata(intf, sc);
2188 usb_get_dev(sc->dev);
2189 /*
2190 * Since we give the interface struct to the block level through
2191 * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent
2192 * oopses on close after a disconnect (kernels 2.6.16 and up).
2193 */
2194 usb_get_intf(sc->intf);
2195
2196 snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
2197 sc->dev->bus->busnum, sc->dev->devnum);
2198
2199 /* XXX Verify that we can handle the device (from descriptors) */
2200
2201 if (ub_get_pipes(sc, sc->dev, intf) != 0)
2202 goto err_dev_desc;
2203
2204 /*
2205 * At this point, all USB initialization is done, do upper layer.
2206 * We really hate halfway initialized structures, so from the
2207 * invariants perspective, this ub_dev is fully constructed at
2208 * this point.
2209 */
2210
2211 /*
2212 * This is needed to clear toggles. It is a problem only if we do
2213 * `rmmod ub && modprobe ub` without disconnects, but we like that.
2214 */
2215#if 0 /* iPod Mini fails if we do this (big white iPod works) */
2216 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
2217 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
2218#endif
2219
2220 /*
2221 * The way this is used by the startup code is a little specific.
2222 * A SCSI check causes a USB stall. Our common case code sees it
2223 * and clears the check, after which the device is ready for use.
2224 * But if a check was not present, any command other than
2225 * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
2226 *
2227 * If we neglect to clear the SCSI check, the first real command fails
2228 * (which is the capacity readout). We clear that and retry, but why
2229 * causing spurious retries for no reason.
2230 *
2231 * Revalidation may start with its own TEST_UNIT_READY, but that one
2232 * has to succeed, so we clear checks with an additional one here.
2233 * In any case it's not our business how revaliadation is implemented.
2234 */
2235 for (i = 0; i < 3; i++) { /* Retries for the schwag key from KS'04 */
2236 if ((rc = ub_sync_tur(sc, NULL)) <= 0) break;
2237 if (rc != 0x6) break;
2238 msleep(10);
2239 }
2240
2241 nluns = 1;
2242 for (i = 0; i < 3; i++) {
2243 if ((rc = ub_sync_getmaxlun(sc)) < 0)
2244 break;
2245 if (rc != 0) {
2246 nluns = rc;
2247 break;
2248 }
2249 msleep(100);
2250 }
2251
2252 for (i = 0; i < nluns; i++) {
2253 ub_probe_lun(sc, i);
2254 }
2255 return 0;
2256
2257err_dev_desc:
2258 usb_set_intfdata(intf, NULL);
2259 usb_put_intf(sc->intf);
2260 usb_put_dev(sc->dev);
2261 kfree(sc);
2262err_core:
2263 return rc;
2264}
2265
2266static int ub_probe_lun(struct ub_dev *sc, int lnum)
2267{
2268 struct ub_lun *lun;
2269 struct request_queue *q;
2270 struct gendisk *disk;
2271 int rc;
2272
2273 rc = -ENOMEM;
2274 if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
2275 goto err_alloc;
2276 lun->num = lnum;
2277
2278 rc = -ENOSR;
2279 if ((lun->id = ub_id_get()) == -1)
2280 goto err_id;
2281
2282 lun->udev = sc;
2283
2284 snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)",
2285 lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num);
2286
2287 lun->removable = 1; /* XXX Query this from the device */
2288 lun->changed = 1; /* ub_revalidate clears only */
2289 ub_revalidate(sc, lun);
2290
2291 rc = -ENOMEM;
2292 if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL)
2293 goto err_diskalloc;
2294
2295 sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a');
2296 disk->major = UB_MAJOR;
2297 disk->first_minor = lun->id * UB_PARTS_PER_LUN;
2298 disk->fops = &ub_bd_fops;
2299 disk->private_data = lun;
2300 disk->driverfs_dev = &sc->intf->dev;
2301
2302 rc = -ENOMEM;
2303 if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
2304 goto err_blkqinit;
2305
2306 disk->queue = q;
2307
2308 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2309 blk_queue_max_segments(q, UB_MAX_REQ_SG);
2310 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
2311 blk_queue_max_hw_sectors(q, UB_MAX_SECTORS);
2312 blk_queue_logical_block_size(q, lun->capacity.bsize);
2313
2314 lun->disk = disk;
2315 q->queuedata = lun;
2316 list_add(&lun->link, &sc->luns);
2317
2318 set_capacity(disk, lun->capacity.nsec);
2319 if (lun->removable)
2320 disk->flags |= GENHD_FL_REMOVABLE;
2321
2322 add_disk(disk);
2323
2324 return 0;
2325
2326err_blkqinit:
2327 put_disk(disk);
2328err_diskalloc:
2329 ub_id_put(lun->id);
2330err_id:
2331 kfree(lun);
2332err_alloc:
2333 return rc;
2334}
2335
2336static void ub_disconnect(struct usb_interface *intf)
2337{
2338 struct ub_dev *sc = usb_get_intfdata(intf);
2339 struct ub_lun *lun;
2340 unsigned long flags;
2341
2342 /*
2343 * Prevent ub_bd_release from pulling the rug from under us.
2344 * XXX This is starting to look like a kref.
2345 * XXX Why not to take this ref at probe time?
2346 */
2347 spin_lock_irqsave(&ub_lock, flags);
2348 sc->openc++;
2349 spin_unlock_irqrestore(&ub_lock, flags);
2350
2351 /*
2352 * Fence stall clearings, operations triggered by unlinkings and so on.
2353 * We do not attempt to unlink any URBs, because we do not trust the
2354 * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
2355 */
2356 atomic_set(&sc->poison, 1);
2357
2358 /*
2359 * Wait for reset to end, if any.
2360 */
2361 wait_event(sc->reset_wait, !sc->reset);
2362
2363 /*
2364 * Blow away queued commands.
2365 *
2366 * Actually, this never works, because before we get here
2367 * the HCD terminates outstanding URB(s). It causes our
2368 * SCSI command queue to advance, commands fail to submit,
2369 * and the whole queue drains. So, we just use this code to
2370 * print warnings.
2371 */
2372 spin_lock_irqsave(sc->lock, flags);
2373 {
2374 struct ub_scsi_cmd *cmd;
2375 int cnt = 0;
2376 while ((cmd = ub_cmdq_peek(sc)) != NULL) {
2377 cmd->error = -ENOTCONN;
2378 cmd->state = UB_CMDST_DONE;
2379 ub_cmdq_pop(sc);
2380 (*cmd->done)(sc, cmd);
2381 cnt++;
2382 }
2383 if (cnt != 0) {
2384 printk(KERN_WARNING "%s: "
2385 "%d was queued after shutdown\n", sc->name, cnt);
2386 }
2387 }
2388 spin_unlock_irqrestore(sc->lock, flags);
2389
2390 /*
2391 * Unregister the upper layer.
2392 */
2393 list_for_each_entry(lun, &sc->luns, link) {
2394 del_gendisk(lun->disk);
2395 /*
2396 * I wish I could do:
2397 * queue_flag_set(QUEUE_FLAG_DEAD, q);
2398 * As it is, we rely on our internal poisoning and let
2399 * the upper levels to spin furiously failing all the I/O.
2400 */
2401 }
2402
2403 /*
2404 * Testing for -EINPROGRESS is always a bug, so we are bending
2405 * the rules a little.
2406 */
2407 spin_lock_irqsave(sc->lock, flags);
2408 if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */
2409 printk(KERN_WARNING "%s: "
2410 "URB is active after disconnect\n", sc->name);
2411 }
2412 spin_unlock_irqrestore(sc->lock, flags);
2413
2414 /*
2415 * There is virtually no chance that other CPU runs a timeout so long
2416 * after ub_urb_complete should have called del_timer, but only if HCD
2417 * didn't forget to deliver a callback on unlink.
2418 */
2419 del_timer_sync(&sc->work_timer);
2420
2421 /*
2422 * At this point there must be no commands coming from anyone
2423 * and no URBs left in transit.
2424 */
2425
2426 ub_put(sc);
2427}
2428
2429static struct usb_driver ub_driver = {
2430 .name = "ub",
2431 .probe = ub_probe,
2432 .disconnect = ub_disconnect,
2433 .id_table = ub_usb_ids,
2434 .pre_reset = ub_pre_reset,
2435 .post_reset = ub_post_reset,
2436};
2437
2438static int __init ub_init(void)
2439{
2440 int rc;
2441 int i;
2442
2443 pr_info("'Low Performance USB Block' driver is deprecated. "
2444 "Please switch to usb-storage\n");
2445 for (i = 0; i < UB_QLOCK_NUM; i++)
2446 spin_lock_init(&ub_qlockv[i]);
2447
2448 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2449 goto err_regblkdev;
2450
2451 if ((rc = usb_register(&ub_driver)) != 0)
2452 goto err_register;
2453
2454 usb_usual_set_present(USB_US_TYPE_UB);
2455 return 0;
2456
2457err_register:
2458 unregister_blkdev(UB_MAJOR, DRV_NAME);
2459err_regblkdev:
2460 return rc;
2461}
2462
2463static void __exit ub_exit(void)
2464{
2465 usb_deregister(&ub_driver);
2466
2467 unregister_blkdev(UB_MAJOR, DRV_NAME);
2468 usb_usual_clear_present(USB_US_TYPE_UB);
2469}
2470
2471module_init(ub_init);
2472module_exit(ub_exit);
2473
2474MODULE_LICENSE("GPL");