diff options
author | Jaroslav Kysela <perex@hera.kernel.org> | 2005-06-21 10:39:41 -0400 |
---|---|---|
committer | Jaroslav Kysela <perex@hera.kernel.org> | 2005-06-21 10:39:41 -0400 |
commit | fae6ec69c84d71b1d5bda9ede1a262c1681684aa (patch) | |
tree | eb4aff9a5c2b7d04ce09a3717bb1dd4a79fe7595 /drivers/block | |
parent | bbc0274e9bb2e3f1d724d445a2bd32566b9b66f7 (diff) | |
parent | 1d345dac1f30af1cd9f3a1faa12f9f18f17f236e (diff) |
Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/aoe/aoechr.c | 10 | ||||
-rw-r--r-- | drivers/block/as-iosched.c | 4 | ||||
-rw-r--r-- | drivers/block/cciss.c | 12 | ||||
-rw-r--r-- | drivers/block/cfq-iosched.c | 13 | ||||
-rw-r--r-- | drivers/block/deadline-iosched.c | 4 | ||||
-rw-r--r-- | drivers/block/elevator.c | 22 | ||||
-rw-r--r-- | drivers/block/genhd.c | 2 | ||||
-rw-r--r-- | drivers/block/ll_rw_blk.c | 24 | ||||
-rw-r--r-- | drivers/block/paride/pd.c | 2 | ||||
-rw-r--r-- | drivers/block/paride/pg.c | 14 | ||||
-rw-r--r-- | drivers/block/paride/pt.c | 20 | ||||
-rw-r--r-- | drivers/block/sx8.c | 4 | ||||
-rw-r--r-- | drivers/block/ub.c | 600 |
13 files changed, 427 insertions, 304 deletions
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c index 14aeca3e2e8c..45a243096187 100644 --- a/drivers/block/aoe/aoechr.c +++ b/drivers/block/aoe/aoechr.c | |||
@@ -36,7 +36,7 @@ static int emsgs_head_idx, emsgs_tail_idx; | |||
36 | static struct semaphore emsgs_sema; | 36 | static struct semaphore emsgs_sema; |
37 | static spinlock_t emsgs_lock; | 37 | static spinlock_t emsgs_lock; |
38 | static int nblocked_emsgs_readers; | 38 | static int nblocked_emsgs_readers; |
39 | static struct class_simple *aoe_class; | 39 | static struct class *aoe_class; |
40 | static struct aoe_chardev chardevs[] = { | 40 | static struct aoe_chardev chardevs[] = { |
41 | { MINOR_ERR, "err" }, | 41 | { MINOR_ERR, "err" }, |
42 | { MINOR_DISCOVER, "discover" }, | 42 | { MINOR_DISCOVER, "discover" }, |
@@ -218,13 +218,13 @@ aoechr_init(void) | |||
218 | } | 218 | } |
219 | sema_init(&emsgs_sema, 0); | 219 | sema_init(&emsgs_sema, 0); |
220 | spin_lock_init(&emsgs_lock); | 220 | spin_lock_init(&emsgs_lock); |
221 | aoe_class = class_simple_create(THIS_MODULE, "aoe"); | 221 | aoe_class = class_create(THIS_MODULE, "aoe"); |
222 | if (IS_ERR(aoe_class)) { | 222 | if (IS_ERR(aoe_class)) { |
223 | unregister_chrdev(AOE_MAJOR, "aoechr"); | 223 | unregister_chrdev(AOE_MAJOR, "aoechr"); |
224 | return PTR_ERR(aoe_class); | 224 | return PTR_ERR(aoe_class); |
225 | } | 225 | } |
226 | for (i = 0; i < ARRAY_SIZE(chardevs); ++i) | 226 | for (i = 0; i < ARRAY_SIZE(chardevs); ++i) |
227 | class_simple_device_add(aoe_class, | 227 | class_device_create(aoe_class, |
228 | MKDEV(AOE_MAJOR, chardevs[i].minor), | 228 | MKDEV(AOE_MAJOR, chardevs[i].minor), |
229 | NULL, chardevs[i].name); | 229 | NULL, chardevs[i].name); |
230 | 230 | ||
@@ -237,8 +237,8 @@ aoechr_exit(void) | |||
237 | int i; | 237 | int i; |
238 | 238 | ||
239 | for (i = 0; i < ARRAY_SIZE(chardevs); ++i) | 239 | for (i = 0; i < ARRAY_SIZE(chardevs); ++i) |
240 | class_simple_device_remove(MKDEV(AOE_MAJOR, chardevs[i].minor)); | 240 | class_device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor)); |
241 | class_simple_destroy(aoe_class); | 241 | class_destroy(aoe_class); |
242 | unregister_chrdev(AOE_MAJOR, "aoechr"); | 242 | unregister_chrdev(AOE_MAJOR, "aoechr"); |
243 | } | 243 | } |
244 | 244 | ||
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c index a9575bb58a5e..638db06de2be 100644 --- a/drivers/block/as-iosched.c +++ b/drivers/block/as-iosched.c | |||
@@ -2044,7 +2044,7 @@ as_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |||
2044 | struct as_fs_entry *entry = to_as(attr); | 2044 | struct as_fs_entry *entry = to_as(attr); |
2045 | 2045 | ||
2046 | if (!entry->show) | 2046 | if (!entry->show) |
2047 | return 0; | 2047 | return -EIO; |
2048 | 2048 | ||
2049 | return entry->show(e->elevator_data, page); | 2049 | return entry->show(e->elevator_data, page); |
2050 | } | 2050 | } |
@@ -2057,7 +2057,7 @@ as_attr_store(struct kobject *kobj, struct attribute *attr, | |||
2057 | struct as_fs_entry *entry = to_as(attr); | 2057 | struct as_fs_entry *entry = to_as(attr); |
2058 | 2058 | ||
2059 | if (!entry->store) | 2059 | if (!entry->store) |
2060 | return -EINVAL; | 2060 | return -EIO; |
2061 | 2061 | ||
2062 | return entry->store(e->elevator_data, page, length); | 2062 | return entry->store(e->elevator_data, page, length); |
2063 | } | 2063 | } |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 8f7c1a1ed7f4..abde27027c06 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <asm/uaccess.h> | 41 | #include <asm/uaccess.h> |
42 | #include <asm/io.h> | 42 | #include <asm/io.h> |
43 | 43 | ||
44 | #include <linux/dma-mapping.h> | ||
44 | #include <linux/blkdev.h> | 45 | #include <linux/blkdev.h> |
45 | #include <linux/genhd.h> | 46 | #include <linux/genhd.h> |
46 | #include <linux/completion.h> | 47 | #include <linux/completion.h> |
@@ -126,8 +127,6 @@ static struct board_type products[] = { | |||
126 | #define MAX_CTLR_ORIG 8 | 127 | #define MAX_CTLR_ORIG 8 |
127 | 128 | ||
128 | 129 | ||
129 | #define CCISS_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */ | ||
130 | |||
131 | static ctlr_info_t *hba[MAX_CTLR]; | 130 | static ctlr_info_t *hba[MAX_CTLR]; |
132 | 131 | ||
133 | static void do_cciss_request(request_queue_t *q); | 132 | static void do_cciss_request(request_queue_t *q); |
@@ -2393,11 +2392,6 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) | |||
2393 | printk(KERN_ERR "cciss: Unable to Enable PCI device\n"); | 2392 | printk(KERN_ERR "cciss: Unable to Enable PCI device\n"); |
2394 | return( -1); | 2393 | return( -1); |
2395 | } | 2394 | } |
2396 | if (pci_set_dma_mask(pdev, CCISS_DMA_MASK ) != 0) | ||
2397 | { | ||
2398 | printk(KERN_ERR "cciss: Unable to set DMA mask\n"); | ||
2399 | return(-1); | ||
2400 | } | ||
2401 | 2395 | ||
2402 | subsystem_vendor_id = pdev->subsystem_vendor; | 2396 | subsystem_vendor_id = pdev->subsystem_vendor; |
2403 | subsystem_device_id = pdev->subsystem_device; | 2397 | subsystem_device_id = pdev->subsystem_device; |
@@ -2747,9 +2741,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
2747 | hba[i]->pdev = pdev; | 2741 | hba[i]->pdev = pdev; |
2748 | 2742 | ||
2749 | /* configure PCI DMA stuff */ | 2743 | /* configure PCI DMA stuff */ |
2750 | if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) | 2744 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) |
2751 | printk("cciss: using DAC cycles\n"); | 2745 | printk("cciss: using DAC cycles\n"); |
2752 | else if (!pci_set_dma_mask(pdev, 0xffffffff)) | 2746 | else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) |
2753 | printk("cciss: not using DAC cycles\n"); | 2747 | printk("cciss: not using DAC cycles\n"); |
2754 | else { | 2748 | else { |
2755 | printk("cciss: no suitable DMA available\n"); | 2749 | printk("cciss: no suitable DMA available\n"); |
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c index 0ef7a0065ece..3ac47dde64da 100644 --- a/drivers/block/cfq-iosched.c +++ b/drivers/block/cfq-iosched.c | |||
@@ -1202,13 +1202,16 @@ retry: | |||
1202 | if (new_cfqq) { | 1202 | if (new_cfqq) { |
1203 | cfqq = new_cfqq; | 1203 | cfqq = new_cfqq; |
1204 | new_cfqq = NULL; | 1204 | new_cfqq = NULL; |
1205 | } else if (gfp_mask & __GFP_WAIT) { | 1205 | } else { |
1206 | spin_unlock_irq(cfqd->queue->queue_lock); | 1206 | spin_unlock_irq(cfqd->queue->queue_lock); |
1207 | new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); | 1207 | new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); |
1208 | spin_lock_irq(cfqd->queue->queue_lock); | 1208 | spin_lock_irq(cfqd->queue->queue_lock); |
1209 | |||
1210 | if (!new_cfqq && !(gfp_mask & __GFP_WAIT)) | ||
1211 | goto out; | ||
1212 | |||
1209 | goto retry; | 1213 | goto retry; |
1210 | } else | 1214 | } |
1211 | goto out; | ||
1212 | 1215 | ||
1213 | memset(cfqq, 0, sizeof(*cfqq)); | 1216 | memset(cfqq, 0, sizeof(*cfqq)); |
1214 | 1217 | ||
@@ -1772,7 +1775,7 @@ cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |||
1772 | struct cfq_fs_entry *entry = to_cfq(attr); | 1775 | struct cfq_fs_entry *entry = to_cfq(attr); |
1773 | 1776 | ||
1774 | if (!entry->show) | 1777 | if (!entry->show) |
1775 | return 0; | 1778 | return -EIO; |
1776 | 1779 | ||
1777 | return entry->show(e->elevator_data, page); | 1780 | return entry->show(e->elevator_data, page); |
1778 | } | 1781 | } |
@@ -1785,7 +1788,7 @@ cfq_attr_store(struct kobject *kobj, struct attribute *attr, | |||
1785 | struct cfq_fs_entry *entry = to_cfq(attr); | 1788 | struct cfq_fs_entry *entry = to_cfq(attr); |
1786 | 1789 | ||
1787 | if (!entry->store) | 1790 | if (!entry->store) |
1788 | return -EINVAL; | 1791 | return -EIO; |
1789 | 1792 | ||
1790 | return entry->store(e->elevator_data, page, length); | 1793 | return entry->store(e->elevator_data, page, length); |
1791 | } | 1794 | } |
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c index d63d34c671f7..7f79f3dd0165 100644 --- a/drivers/block/deadline-iosched.c +++ b/drivers/block/deadline-iosched.c | |||
@@ -886,7 +886,7 @@ deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |||
886 | struct deadline_fs_entry *entry = to_deadline(attr); | 886 | struct deadline_fs_entry *entry = to_deadline(attr); |
887 | 887 | ||
888 | if (!entry->show) | 888 | if (!entry->show) |
889 | return 0; | 889 | return -EIO; |
890 | 890 | ||
891 | return entry->show(e->elevator_data, page); | 891 | return entry->show(e->elevator_data, page); |
892 | } | 892 | } |
@@ -899,7 +899,7 @@ deadline_attr_store(struct kobject *kobj, struct attribute *attr, | |||
899 | struct deadline_fs_entry *entry = to_deadline(attr); | 899 | struct deadline_fs_entry *entry = to_deadline(attr); |
900 | 900 | ||
901 | if (!entry->store) | 901 | if (!entry->store) |
902 | return -EINVAL; | 902 | return -EIO; |
903 | 903 | ||
904 | return entry->store(e->elevator_data, page, length); | 904 | return entry->store(e->elevator_data, page, length); |
905 | } | 905 | } |
diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c index 6b79b4314622..f831f08f839c 100644 --- a/drivers/block/elevator.c +++ b/drivers/block/elevator.c | |||
@@ -220,11 +220,6 @@ void elevator_exit(elevator_t *e) | |||
220 | kfree(e); | 220 | kfree(e); |
221 | } | 221 | } |
222 | 222 | ||
223 | static int elevator_global_init(void) | ||
224 | { | ||
225 | return 0; | ||
226 | } | ||
227 | |||
228 | int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) | 223 | int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) |
229 | { | 224 | { |
230 | elevator_t *e = q->elevator; | 225 | elevator_t *e = q->elevator; |
@@ -291,6 +286,13 @@ void elv_requeue_request(request_queue_t *q, struct request *rq) | |||
291 | } | 286 | } |
292 | 287 | ||
293 | /* | 288 | /* |
289 | * the request is prepped and may have some resources allocated. | ||
290 | * allowing unprepped requests to pass this one may cause resource | ||
291 | * deadlock. turn on softbarrier. | ||
292 | */ | ||
293 | rq->flags |= REQ_SOFTBARRIER; | ||
294 | |||
295 | /* | ||
294 | * if iosched has an explicit requeue hook, then use that. otherwise | 296 | * if iosched has an explicit requeue hook, then use that. otherwise |
295 | * just put the request at the front of the queue | 297 | * just put the request at the front of the queue |
296 | */ | 298 | */ |
@@ -322,7 +324,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, | |||
322 | int nrq = q->rq.count[READ] + q->rq.count[WRITE] | 324 | int nrq = q->rq.count[READ] + q->rq.count[WRITE] |
323 | - q->in_flight; | 325 | - q->in_flight; |
324 | 326 | ||
325 | if (nrq == q->unplug_thresh) | 327 | if (nrq >= q->unplug_thresh) |
326 | __generic_unplug_device(q); | 328 | __generic_unplug_device(q); |
327 | } | 329 | } |
328 | } else | 330 | } else |
@@ -386,6 +388,12 @@ struct request *elv_next_request(request_queue_t *q) | |||
386 | if (ret == BLKPREP_OK) { | 388 | if (ret == BLKPREP_OK) { |
387 | break; | 389 | break; |
388 | } else if (ret == BLKPREP_DEFER) { | 390 | } else if (ret == BLKPREP_DEFER) { |
391 | /* | ||
392 | * the request may have been (partially) prepped. | ||
393 | * we need to keep this request in the front to | ||
394 | * avoid resource deadlock. turn on softbarrier. | ||
395 | */ | ||
396 | rq->flags |= REQ_SOFTBARRIER; | ||
389 | rq = NULL; | 397 | rq = NULL; |
390 | break; | 398 | break; |
391 | } else if (ret == BLKPREP_KILL) { | 399 | } else if (ret == BLKPREP_KILL) { |
@@ -692,8 +700,6 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name) | |||
692 | return len; | 700 | return len; |
693 | } | 701 | } |
694 | 702 | ||
695 | module_init(elevator_global_init); | ||
696 | |||
697 | EXPORT_SYMBOL(elv_add_request); | 703 | EXPORT_SYMBOL(elv_add_request); |
698 | EXPORT_SYMBOL(__elv_add_request); | 704 | EXPORT_SYMBOL(__elv_add_request); |
699 | EXPORT_SYMBOL(elv_requeue_request); | 705 | EXPORT_SYMBOL(elv_requeue_request); |
diff --git a/drivers/block/genhd.c b/drivers/block/genhd.c index 8bbe01d4b487..53f7d846b747 100644 --- a/drivers/block/genhd.c +++ b/drivers/block/genhd.c | |||
@@ -322,7 +322,7 @@ static ssize_t disk_attr_show(struct kobject *kobj, struct attribute *attr, | |||
322 | struct gendisk *disk = to_disk(kobj); | 322 | struct gendisk *disk = to_disk(kobj); |
323 | struct disk_attribute *disk_attr = | 323 | struct disk_attribute *disk_attr = |
324 | container_of(attr,struct disk_attribute,attr); | 324 | container_of(attr,struct disk_attribute,attr); |
325 | ssize_t ret = 0; | 325 | ssize_t ret = -EIO; |
326 | 326 | ||
327 | if (disk_attr->show) | 327 | if (disk_attr->show) |
328 | ret = disk_attr->show(disk,page); | 328 | ret = disk_attr->show(disk,page); |
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index 11ef9d9ea139..81fe3a0c1fe7 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c | |||
@@ -2038,7 +2038,6 @@ EXPORT_SYMBOL(blk_requeue_request); | |||
2038 | * @rq: request to be inserted | 2038 | * @rq: request to be inserted |
2039 | * @at_head: insert request at head or tail of queue | 2039 | * @at_head: insert request at head or tail of queue |
2040 | * @data: private data | 2040 | * @data: private data |
2041 | * @reinsert: true if request it a reinsertion of previously processed one | ||
2042 | * | 2041 | * |
2043 | * Description: | 2042 | * Description: |
2044 | * Many block devices need to execute commands asynchronously, so they don't | 2043 | * Many block devices need to execute commands asynchronously, so they don't |
@@ -2053,8 +2052,9 @@ EXPORT_SYMBOL(blk_requeue_request); | |||
2053 | * host that is unable to accept a particular command. | 2052 | * host that is unable to accept a particular command. |
2054 | */ | 2053 | */ |
2055 | void blk_insert_request(request_queue_t *q, struct request *rq, | 2054 | void blk_insert_request(request_queue_t *q, struct request *rq, |
2056 | int at_head, void *data, int reinsert) | 2055 | int at_head, void *data) |
2057 | { | 2056 | { |
2057 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; | ||
2058 | unsigned long flags; | 2058 | unsigned long flags; |
2059 | 2059 | ||
2060 | /* | 2060 | /* |
@@ -2071,20 +2071,12 @@ void blk_insert_request(request_queue_t *q, struct request *rq, | |||
2071 | /* | 2071 | /* |
2072 | * If command is tagged, release the tag | 2072 | * If command is tagged, release the tag |
2073 | */ | 2073 | */ |
2074 | if (reinsert) | 2074 | if (blk_rq_tagged(rq)) |
2075 | blk_requeue_request(q, rq); | 2075 | blk_queue_end_tag(q, rq); |
2076 | else { | ||
2077 | int where = ELEVATOR_INSERT_BACK; | ||
2078 | |||
2079 | if (at_head) | ||
2080 | where = ELEVATOR_INSERT_FRONT; | ||
2081 | 2076 | ||
2082 | if (blk_rq_tagged(rq)) | 2077 | drive_stat_acct(rq, rq->nr_sectors, 1); |
2083 | blk_queue_end_tag(q, rq); | 2078 | __elv_add_request(q, rq, where, 0); |
2084 | 2079 | ||
2085 | drive_stat_acct(rq, rq->nr_sectors, 1); | ||
2086 | __elv_add_request(q, rq, where, 0); | ||
2087 | } | ||
2088 | if (blk_queue_plugged(q)) | 2080 | if (blk_queue_plugged(q)) |
2089 | __generic_unplug_device(q); | 2081 | __generic_unplug_device(q); |
2090 | else | 2082 | else |
@@ -3582,7 +3574,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |||
3582 | 3574 | ||
3583 | q = container_of(kobj, struct request_queue, kobj); | 3575 | q = container_of(kobj, struct request_queue, kobj); |
3584 | if (!entry->show) | 3576 | if (!entry->show) |
3585 | return 0; | 3577 | return -EIO; |
3586 | 3578 | ||
3587 | return entry->show(q, page); | 3579 | return entry->show(q, page); |
3588 | } | 3580 | } |
@@ -3596,7 +3588,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, | |||
3596 | 3588 | ||
3597 | q = container_of(kobj, struct request_queue, kobj); | 3589 | q = container_of(kobj, struct request_queue, kobj); |
3598 | if (!entry->store) | 3590 | if (!entry->store) |
3599 | return -EINVAL; | 3591 | return -EIO; |
3600 | 3592 | ||
3601 | return entry->store(q, page, length); | 3593 | return entry->store(q, page, length); |
3602 | } | 3594 | } |
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 202a5a74ad37..fa49d62626ba 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c | |||
@@ -723,7 +723,7 @@ static int pd_special_command(struct pd_unit *disk, | |||
723 | rq.ref_count = 1; | 723 | rq.ref_count = 1; |
724 | rq.waiting = &wait; | 724 | rq.waiting = &wait; |
725 | rq.end_io = blk_end_sync_rq; | 725 | rq.end_io = blk_end_sync_rq; |
726 | blk_insert_request(disk->gd->queue, &rq, 0, func, 0); | 726 | blk_insert_request(disk->gd->queue, &rq, 0, func); |
727 | wait_for_completion(&wait); | 727 | wait_for_completion(&wait); |
728 | rq.waiting = NULL; | 728 | rq.waiting = NULL; |
729 | if (rq.errors) | 729 | if (rq.errors) |
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c index dbeb107bb971..84d8e291ed96 100644 --- a/drivers/block/paride/pg.c +++ b/drivers/block/paride/pg.c | |||
@@ -222,7 +222,7 @@ static int pg_identify(struct pg *dev, int log); | |||
222 | 222 | ||
223 | static char pg_scratch[512]; /* scratch block buffer */ | 223 | static char pg_scratch[512]; /* scratch block buffer */ |
224 | 224 | ||
225 | static struct class_simple *pg_class; | 225 | static struct class *pg_class; |
226 | 226 | ||
227 | /* kernel glue structures */ | 227 | /* kernel glue structures */ |
228 | 228 | ||
@@ -666,7 +666,7 @@ static int __init pg_init(void) | |||
666 | err = -1; | 666 | err = -1; |
667 | goto out; | 667 | goto out; |
668 | } | 668 | } |
669 | pg_class = class_simple_create(THIS_MODULE, "pg"); | 669 | pg_class = class_create(THIS_MODULE, "pg"); |
670 | if (IS_ERR(pg_class)) { | 670 | if (IS_ERR(pg_class)) { |
671 | err = PTR_ERR(pg_class); | 671 | err = PTR_ERR(pg_class); |
672 | goto out_chrdev; | 672 | goto out_chrdev; |
@@ -675,7 +675,7 @@ static int __init pg_init(void) | |||
675 | for (unit = 0; unit < PG_UNITS; unit++) { | 675 | for (unit = 0; unit < PG_UNITS; unit++) { |
676 | struct pg *dev = &devices[unit]; | 676 | struct pg *dev = &devices[unit]; |
677 | if (dev->present) { | 677 | if (dev->present) { |
678 | class_simple_device_add(pg_class, MKDEV(major, unit), | 678 | class_device_create(pg_class, MKDEV(major, unit), |
679 | NULL, "pg%u", unit); | 679 | NULL, "pg%u", unit); |
680 | err = devfs_mk_cdev(MKDEV(major, unit), | 680 | err = devfs_mk_cdev(MKDEV(major, unit), |
681 | S_IFCHR | S_IRUSR | S_IWUSR, "pg/%u", | 681 | S_IFCHR | S_IRUSR | S_IWUSR, "pg/%u", |
@@ -688,8 +688,8 @@ static int __init pg_init(void) | |||
688 | goto out; | 688 | goto out; |
689 | 689 | ||
690 | out_class: | 690 | out_class: |
691 | class_simple_device_remove(MKDEV(major, unit)); | 691 | class_device_destroy(pg_class, MKDEV(major, unit)); |
692 | class_simple_destroy(pg_class); | 692 | class_destroy(pg_class); |
693 | out_chrdev: | 693 | out_chrdev: |
694 | unregister_chrdev(major, "pg"); | 694 | unregister_chrdev(major, "pg"); |
695 | out: | 695 | out: |
@@ -703,11 +703,11 @@ static void __exit pg_exit(void) | |||
703 | for (unit = 0; unit < PG_UNITS; unit++) { | 703 | for (unit = 0; unit < PG_UNITS; unit++) { |
704 | struct pg *dev = &devices[unit]; | 704 | struct pg *dev = &devices[unit]; |
705 | if (dev->present) { | 705 | if (dev->present) { |
706 | class_simple_device_remove(MKDEV(major, unit)); | 706 | class_device_destroy(pg_class, MKDEV(major, unit)); |
707 | devfs_remove("pg/%u", unit); | 707 | devfs_remove("pg/%u", unit); |
708 | } | 708 | } |
709 | } | 709 | } |
710 | class_simple_destroy(pg_class); | 710 | class_destroy(pg_class); |
711 | devfs_remove("pg"); | 711 | devfs_remove("pg"); |
712 | unregister_chrdev(major, name); | 712 | unregister_chrdev(major, name); |
713 | 713 | ||
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c index 8fbd6922fe0d..5fe8ee86f095 100644 --- a/drivers/block/paride/pt.c +++ b/drivers/block/paride/pt.c | |||
@@ -242,7 +242,7 @@ static struct file_operations pt_fops = { | |||
242 | }; | 242 | }; |
243 | 243 | ||
244 | /* sysfs class support */ | 244 | /* sysfs class support */ |
245 | static struct class_simple *pt_class; | 245 | static struct class *pt_class; |
246 | 246 | ||
247 | static inline int status_reg(struct pi_adapter *pi) | 247 | static inline int status_reg(struct pi_adapter *pi) |
248 | { | 248 | { |
@@ -963,7 +963,7 @@ static int __init pt_init(void) | |||
963 | err = -1; | 963 | err = -1; |
964 | goto out; | 964 | goto out; |
965 | } | 965 | } |
966 | pt_class = class_simple_create(THIS_MODULE, "pt"); | 966 | pt_class = class_create(THIS_MODULE, "pt"); |
967 | if (IS_ERR(pt_class)) { | 967 | if (IS_ERR(pt_class)) { |
968 | err = PTR_ERR(pt_class); | 968 | err = PTR_ERR(pt_class); |
969 | goto out_chrdev; | 969 | goto out_chrdev; |
@@ -972,29 +972,29 @@ static int __init pt_init(void) | |||
972 | devfs_mk_dir("pt"); | 972 | devfs_mk_dir("pt"); |
973 | for (unit = 0; unit < PT_UNITS; unit++) | 973 | for (unit = 0; unit < PT_UNITS; unit++) |
974 | if (pt[unit].present) { | 974 | if (pt[unit].present) { |
975 | class_simple_device_add(pt_class, MKDEV(major, unit), | 975 | class_device_create(pt_class, MKDEV(major, unit), |
976 | NULL, "pt%d", unit); | 976 | NULL, "pt%d", unit); |
977 | err = devfs_mk_cdev(MKDEV(major, unit), | 977 | err = devfs_mk_cdev(MKDEV(major, unit), |
978 | S_IFCHR | S_IRUSR | S_IWUSR, | 978 | S_IFCHR | S_IRUSR | S_IWUSR, |
979 | "pt/%d", unit); | 979 | "pt/%d", unit); |
980 | if (err) { | 980 | if (err) { |
981 | class_simple_device_remove(MKDEV(major, unit)); | 981 | class_device_destroy(pt_class, MKDEV(major, unit)); |
982 | goto out_class; | 982 | goto out_class; |
983 | } | 983 | } |
984 | class_simple_device_add(pt_class, MKDEV(major, unit + 128), | 984 | class_device_create(pt_class, MKDEV(major, unit + 128), |
985 | NULL, "pt%dn", unit); | 985 | NULL, "pt%dn", unit); |
986 | err = devfs_mk_cdev(MKDEV(major, unit + 128), | 986 | err = devfs_mk_cdev(MKDEV(major, unit + 128), |
987 | S_IFCHR | S_IRUSR | S_IWUSR, | 987 | S_IFCHR | S_IRUSR | S_IWUSR, |
988 | "pt/%dn", unit); | 988 | "pt/%dn", unit); |
989 | if (err) { | 989 | if (err) { |
990 | class_simple_device_remove(MKDEV(major, unit + 128)); | 990 | class_device_destroy(pt_class, MKDEV(major, unit + 128)); |
991 | goto out_class; | 991 | goto out_class; |
992 | } | 992 | } |
993 | } | 993 | } |
994 | goto out; | 994 | goto out; |
995 | 995 | ||
996 | out_class: | 996 | out_class: |
997 | class_simple_destroy(pt_class); | 997 | class_destroy(pt_class); |
998 | out_chrdev: | 998 | out_chrdev: |
999 | unregister_chrdev(major, "pt"); | 999 | unregister_chrdev(major, "pt"); |
1000 | out: | 1000 | out: |
@@ -1006,12 +1006,12 @@ static void __exit pt_exit(void) | |||
1006 | int unit; | 1006 | int unit; |
1007 | for (unit = 0; unit < PT_UNITS; unit++) | 1007 | for (unit = 0; unit < PT_UNITS; unit++) |
1008 | if (pt[unit].present) { | 1008 | if (pt[unit].present) { |
1009 | class_simple_device_remove(MKDEV(major, unit)); | 1009 | class_device_destroy(pt_class, MKDEV(major, unit)); |
1010 | devfs_remove("pt/%d", unit); | 1010 | devfs_remove("pt/%d", unit); |
1011 | class_simple_device_remove(MKDEV(major, unit + 128)); | 1011 | class_device_destroy(pt_class, MKDEV(major, unit + 128)); |
1012 | devfs_remove("pt/%dn", unit); | 1012 | devfs_remove("pt/%dn", unit); |
1013 | } | 1013 | } |
1014 | class_simple_destroy(pt_class); | 1014 | class_destroy(pt_class); |
1015 | devfs_remove("pt"); | 1015 | devfs_remove("pt"); |
1016 | unregister_chrdev(major, name); | 1016 | unregister_chrdev(major, name); |
1017 | for (unit = 0; unit < PT_UNITS; unit++) | 1017 | for (unit = 0; unit < PT_UNITS; unit++) |
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index 797f5988c2b5..5ed3a6379452 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c | |||
@@ -614,7 +614,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx) | |||
614 | spin_unlock_irq(&host->lock); | 614 | spin_unlock_irq(&host->lock); |
615 | 615 | ||
616 | DPRINTK("blk_insert_request, tag == %u\n", idx); | 616 | DPRINTK("blk_insert_request, tag == %u\n", idx); |
617 | blk_insert_request(host->oob_q, crq->rq, 1, crq, 0); | 617 | blk_insert_request(host->oob_q, crq->rq, 1, crq); |
618 | 618 | ||
619 | return 0; | 619 | return 0; |
620 | 620 | ||
@@ -653,7 +653,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func) | |||
653 | crq->msg_bucket = (u32) rc; | 653 | crq->msg_bucket = (u32) rc; |
654 | 654 | ||
655 | DPRINTK("blk_insert_request, tag == %u\n", idx); | 655 | DPRINTK("blk_insert_request, tag == %u\n", idx); |
656 | blk_insert_request(host->oob_q, crq->rq, 1, crq, 0); | 656 | blk_insert_request(host->oob_q, crq->rq, 1, crq); |
657 | 657 | ||
658 | return 0; | 658 | return 0; |
659 | } | 659 | } |
diff --git a/drivers/block/ub.c b/drivers/block/ub.c index ce42889f98fb..685f061e69b2 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c | |||
@@ -8,13 +8,12 @@ | |||
8 | * and is not licensed separately. See file COPYING for details. | 8 | * and is not licensed separately. See file COPYING for details. |
9 | * | 9 | * |
10 | * TODO (sorted by decreasing priority) | 10 | * TODO (sorted by decreasing priority) |
11 | * -- Kill first_open (Al Viro fixed the block layer now) | ||
11 | * -- Do resets with usb_device_reset (needs a thread context, use khubd) | 12 | * -- Do resets with usb_device_reset (needs a thread context, use khubd) |
12 | * -- set readonly flag for CDs, set removable flag for CF readers | 13 | * -- set readonly flag for CDs, set removable flag for CF readers |
13 | * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) | 14 | * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) |
14 | * -- support pphaneuf's SDDR-75 with two LUNs (also broken capacity...) | ||
15 | * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries | 15 | * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries |
16 | * -- verify the 13 conditions and do bulk resets | 16 | * -- verify the 13 conditions and do bulk resets |
17 | * -- normal pool of commands instead of cmdv[]? | ||
18 | * -- kill last_pipe and simply do two-state clearing on both pipes | 17 | * -- kill last_pipe and simply do two-state clearing on both pipes |
19 | * -- verify protocol (bulk) from USB descriptors (maybe...) | 18 | * -- verify protocol (bulk) from USB descriptors (maybe...) |
20 | * -- highmem and sg | 19 | * -- highmem and sg |
@@ -49,7 +48,14 @@ | |||
49 | #define US_SC_SCSI 0x06 /* Transparent */ | 48 | #define US_SC_SCSI 0x06 /* Transparent */ |
50 | 49 | ||
51 | /* | 50 | /* |
51 | * This many LUNs per USB device. | ||
52 | * Every one of them takes a host, see UB_MAX_HOSTS. | ||
52 | */ | 53 | */ |
54 | #define UB_MAX_LUNS 9 | ||
55 | |||
56 | /* | ||
57 | */ | ||
58 | |||
53 | #define UB_MINORS_PER_MAJOR 8 | 59 | #define UB_MINORS_PER_MAJOR 8 |
54 | 60 | ||
55 | #define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ | 61 | #define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ |
@@ -65,7 +71,7 @@ struct bulk_cb_wrap { | |||
65 | u32 Tag; /* unique per command id */ | 71 | u32 Tag; /* unique per command id */ |
66 | __le32 DataTransferLength; /* size of data */ | 72 | __le32 DataTransferLength; /* size of data */ |
67 | u8 Flags; /* direction in bit 0 */ | 73 | u8 Flags; /* direction in bit 0 */ |
68 | u8 Lun; /* LUN normally 0 */ | 74 | u8 Lun; /* LUN */ |
69 | u8 Length; /* of of the CDB */ | 75 | u8 Length; /* of of the CDB */ |
70 | u8 CDB[UB_MAX_CDB_SIZE]; /* max command */ | 76 | u8 CDB[UB_MAX_CDB_SIZE]; /* max command */ |
71 | }; | 77 | }; |
@@ -168,6 +174,7 @@ struct ub_scsi_cmd { | |||
168 | unsigned int len; /* Requested length */ | 174 | unsigned int len; /* Requested length */ |
169 | // struct scatterlist sgv[UB_MAX_REQ_SG]; | 175 | // struct scatterlist sgv[UB_MAX_REQ_SG]; |
170 | 176 | ||
177 | struct ub_lun *lun; | ||
171 | void (*done)(struct ub_dev *, struct ub_scsi_cmd *); | 178 | void (*done)(struct ub_dev *, struct ub_scsi_cmd *); |
172 | void *back; | 179 | void *back; |
173 | }; | 180 | }; |
@@ -252,25 +259,47 @@ struct ub_scsi_cmd_queue { | |||
252 | }; | 259 | }; |
253 | 260 | ||
254 | /* | 261 | /* |
255 | * The UB device instance. | 262 | * The block device instance (one per LUN). |
263 | */ | ||
264 | struct ub_lun { | ||
265 | struct ub_dev *udev; | ||
266 | struct list_head link; | ||
267 | struct gendisk *disk; | ||
268 | int id; /* Host index */ | ||
269 | int num; /* LUN number */ | ||
270 | char name[16]; | ||
271 | |||
272 | int changed; /* Media was changed */ | ||
273 | int removable; | ||
274 | int readonly; | ||
275 | int first_open; /* Kludge. See ub_bd_open. */ | ||
276 | |||
277 | /* Use Ingo's mempool if or when we have more than one command. */ | ||
278 | /* | ||
279 | * Currently we never need more than one command for the whole device. | ||
280 | * However, giving every LUN a command is a cheap and automatic way | ||
281 | * to enforce fairness between them. | ||
282 | */ | ||
283 | int cmda[1]; | ||
284 | struct ub_scsi_cmd cmdv[1]; | ||
285 | |||
286 | struct ub_capacity capacity; | ||
287 | }; | ||
288 | |||
289 | /* | ||
290 | * The USB device instance. | ||
256 | */ | 291 | */ |
257 | struct ub_dev { | 292 | struct ub_dev { |
258 | spinlock_t lock; | 293 | spinlock_t lock; |
259 | int id; /* Number among ub's */ | ||
260 | atomic_t poison; /* The USB device is disconnected */ | 294 | atomic_t poison; /* The USB device is disconnected */ |
261 | int openc; /* protected by ub_lock! */ | 295 | int openc; /* protected by ub_lock! */ |
262 | /* kref is too implicit for our taste */ | 296 | /* kref is too implicit for our taste */ |
263 | unsigned int tagcnt; | 297 | unsigned int tagcnt; |
264 | int changed; /* Media was changed */ | 298 | char name[12]; |
265 | int removable; | ||
266 | int readonly; | ||
267 | int first_open; /* Kludge. See ub_bd_open. */ | ||
268 | char name[8]; | ||
269 | struct usb_device *dev; | 299 | struct usb_device *dev; |
270 | struct usb_interface *intf; | 300 | struct usb_interface *intf; |
271 | 301 | ||
272 | struct ub_capacity capacity; | 302 | struct list_head luns; |
273 | struct gendisk *disk; | ||
274 | 303 | ||
275 | unsigned int send_bulk_pipe; /* cached pipe values */ | 304 | unsigned int send_bulk_pipe; /* cached pipe values */ |
276 | unsigned int recv_bulk_pipe; | 305 | unsigned int recv_bulk_pipe; |
@@ -279,10 +308,6 @@ struct ub_dev { | |||
279 | 308 | ||
280 | struct tasklet_struct tasklet; | 309 | struct tasklet_struct tasklet; |
281 | 310 | ||
282 | /* XXX Use Ingo's mempool (once we have more than one) */ | ||
283 | int cmda[1]; | ||
284 | struct ub_scsi_cmd cmdv[1]; | ||
285 | |||
286 | struct ub_scsi_cmd_queue cmd_queue; | 311 | struct ub_scsi_cmd_queue cmd_queue; |
287 | struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */ | 312 | struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */ |
288 | unsigned char top_sense[UB_SENSE_SIZE]; | 313 | unsigned char top_sense[UB_SENSE_SIZE]; |
@@ -301,9 +326,9 @@ struct ub_dev { | |||
301 | /* | 326 | /* |
302 | */ | 327 | */ |
303 | static void ub_cleanup(struct ub_dev *sc); | 328 | static void ub_cleanup(struct ub_dev *sc); |
304 | static int ub_bd_rq_fn_1(struct ub_dev *sc, struct request *rq); | 329 | static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq); |
305 | static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | 330 | static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, |
306 | struct request *rq); | 331 | struct ub_scsi_cmd *cmd, struct request *rq); |
307 | static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | 332 | static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, |
308 | struct request *rq); | 333 | struct request *rq); |
309 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | 334 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
@@ -320,8 +345,10 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | |||
320 | static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | 345 | static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, |
321 | int stalled_pipe); | 346 | int stalled_pipe); |
322 | static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); | 347 | static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); |
323 | static int ub_sync_tur(struct ub_dev *sc); | 348 | static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); |
324 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret); | 349 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, |
350 | struct ub_capacity *ret); | ||
351 | static int ub_probe_lun(struct ub_dev *sc, int lnum); | ||
325 | 352 | ||
326 | /* | 353 | /* |
327 | */ | 354 | */ |
@@ -342,6 +369,7 @@ MODULE_DEVICE_TABLE(usb, ub_usb_ids); | |||
342 | */ | 369 | */ |
343 | #define UB_MAX_HOSTS 26 | 370 | #define UB_MAX_HOSTS 26 |
344 | static char ub_hostv[UB_MAX_HOSTS]; | 371 | static char ub_hostv[UB_MAX_HOSTS]; |
372 | |||
345 | static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */ | 373 | static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */ |
346 | 374 | ||
347 | /* | 375 | /* |
@@ -402,10 +430,12 @@ static void ub_cmdtr_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | |||
402 | } | 430 | } |
403 | } | 431 | } |
404 | 432 | ||
405 | static ssize_t ub_diag_show(struct device *dev, char *page) | 433 | static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, char *page) |
406 | { | 434 | { |
407 | struct usb_interface *intf; | 435 | struct usb_interface *intf; |
408 | struct ub_dev *sc; | 436 | struct ub_dev *sc; |
437 | struct list_head *p; | ||
438 | struct ub_lun *lun; | ||
409 | int cnt; | 439 | int cnt; |
410 | unsigned long flags; | 440 | unsigned long flags; |
411 | int nc, nh; | 441 | int nc, nh; |
@@ -421,9 +451,15 @@ static ssize_t ub_diag_show(struct device *dev, char *page) | |||
421 | spin_lock_irqsave(&sc->lock, flags); | 451 | spin_lock_irqsave(&sc->lock, flags); |
422 | 452 | ||
423 | cnt += sprintf(page + cnt, | 453 | cnt += sprintf(page + cnt, |
424 | "qlen %d qmax %d changed %d removable %d readonly %d\n", | 454 | "qlen %d qmax %d\n", |
425 | sc->cmd_queue.qlen, sc->cmd_queue.qmax, | 455 | sc->cmd_queue.qlen, sc->cmd_queue.qmax); |
426 | sc->changed, sc->removable, sc->readonly); | 456 | |
457 | list_for_each (p, &sc->luns) { | ||
458 | lun = list_entry(p, struct ub_lun, link); | ||
459 | cnt += sprintf(page + cnt, | ||
460 | "lun %u changed %d removable %d readonly %d\n", | ||
461 | lun->num, lun->changed, lun->removable, lun->readonly); | ||
462 | } | ||
427 | 463 | ||
428 | if ((nc = sc->tr.cur + 1) == SCMD_TRACE_SZ) nc = 0; | 464 | if ((nc = sc->tr.cur + 1) == SCMD_TRACE_SZ) nc = 0; |
429 | for (j = 0; j < SCMD_TRACE_SZ; j++) { | 465 | for (j = 0; j < SCMD_TRACE_SZ; j++) { |
@@ -523,53 +559,63 @@ static void ub_put(struct ub_dev *sc) | |||
523 | */ | 559 | */ |
524 | static void ub_cleanup(struct ub_dev *sc) | 560 | static void ub_cleanup(struct ub_dev *sc) |
525 | { | 561 | { |
562 | struct list_head *p; | ||
563 | struct ub_lun *lun; | ||
526 | request_queue_t *q; | 564 | request_queue_t *q; |
527 | 565 | ||
528 | /* I don't think queue can be NULL. But... Stolen from sx8.c */ | 566 | while (!list_empty(&sc->luns)) { |
529 | if ((q = sc->disk->queue) != NULL) | 567 | p = sc->luns.next; |
530 | blk_cleanup_queue(q); | 568 | lun = list_entry(p, struct ub_lun, link); |
569 | list_del(p); | ||
531 | 570 | ||
532 | /* | 571 | /* I don't think queue can be NULL. But... Stolen from sx8.c */ |
533 | * If we zero disk->private_data BEFORE put_disk, we have to check | 572 | if ((q = lun->disk->queue) != NULL) |
534 | * for NULL all over the place in open, release, check_media and | 573 | blk_cleanup_queue(q); |
535 | * revalidate, because the block level semaphore is well inside the | 574 | /* |
536 | * put_disk. But we cannot zero after the call, because *disk is gone. | 575 | * If we zero disk->private_data BEFORE put_disk, we have |
537 | * The sd.c is blatantly racy in this area. | 576 | * to check for NULL all over the place in open, release, |
538 | */ | 577 | * check_media and revalidate, because the block level |
539 | /* disk->private_data = NULL; */ | 578 | * semaphore is well inside the put_disk. |
540 | put_disk(sc->disk); | 579 | * But we cannot zero after the call, because *disk is gone. |
541 | sc->disk = NULL; | 580 | * The sd.c is blatantly racy in this area. |
581 | */ | ||
582 | /* disk->private_data = NULL; */ | ||
583 | put_disk(lun->disk); | ||
584 | lun->disk = NULL; | ||
585 | |||
586 | ub_id_put(lun->id); | ||
587 | kfree(lun); | ||
588 | } | ||
542 | 589 | ||
543 | ub_id_put(sc->id); | ||
544 | kfree(sc); | 590 | kfree(sc); |
545 | } | 591 | } |
546 | 592 | ||
547 | /* | 593 | /* |
548 | * The "command allocator". | 594 | * The "command allocator". |
549 | */ | 595 | */ |
550 | static struct ub_scsi_cmd *ub_get_cmd(struct ub_dev *sc) | 596 | static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun) |
551 | { | 597 | { |
552 | struct ub_scsi_cmd *ret; | 598 | struct ub_scsi_cmd *ret; |
553 | 599 | ||
554 | if (sc->cmda[0]) | 600 | if (lun->cmda[0]) |
555 | return NULL; | 601 | return NULL; |
556 | ret = &sc->cmdv[0]; | 602 | ret = &lun->cmdv[0]; |
557 | sc->cmda[0] = 1; | 603 | lun->cmda[0] = 1; |
558 | return ret; | 604 | return ret; |
559 | } | 605 | } |
560 | 606 | ||
561 | static void ub_put_cmd(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | 607 | static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd) |
562 | { | 608 | { |
563 | if (cmd != &sc->cmdv[0]) { | 609 | if (cmd != &lun->cmdv[0]) { |
564 | printk(KERN_WARNING "%s: releasing a foreign cmd %p\n", | 610 | printk(KERN_WARNING "%s: releasing a foreign cmd %p\n", |
565 | sc->name, cmd); | 611 | lun->name, cmd); |
566 | return; | 612 | return; |
567 | } | 613 | } |
568 | if (!sc->cmda[0]) { | 614 | if (!lun->cmda[0]) { |
569 | printk(KERN_WARNING "%s: releasing a free cmd\n", sc->name); | 615 | printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name); |
570 | return; | 616 | return; |
571 | } | 617 | } |
572 | sc->cmda[0] = 0; | 618 | lun->cmda[0] = 0; |
573 | } | 619 | } |
574 | 620 | ||
575 | /* | 621 | /* |
@@ -630,29 +676,30 @@ static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc) | |||
630 | 676 | ||
631 | static void ub_bd_rq_fn(request_queue_t *q) | 677 | static void ub_bd_rq_fn(request_queue_t *q) |
632 | { | 678 | { |
633 | struct ub_dev *sc = q->queuedata; | 679 | struct ub_lun *lun = q->queuedata; |
634 | struct request *rq; | 680 | struct request *rq; |
635 | 681 | ||
636 | while ((rq = elv_next_request(q)) != NULL) { | 682 | while ((rq = elv_next_request(q)) != NULL) { |
637 | if (ub_bd_rq_fn_1(sc, rq) != 0) { | 683 | if (ub_bd_rq_fn_1(lun, rq) != 0) { |
638 | blk_stop_queue(q); | 684 | blk_stop_queue(q); |
639 | break; | 685 | break; |
640 | } | 686 | } |
641 | } | 687 | } |
642 | } | 688 | } |
643 | 689 | ||
644 | static int ub_bd_rq_fn_1(struct ub_dev *sc, struct request *rq) | 690 | static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq) |
645 | { | 691 | { |
692 | struct ub_dev *sc = lun->udev; | ||
646 | struct ub_scsi_cmd *cmd; | 693 | struct ub_scsi_cmd *cmd; |
647 | int rc; | 694 | int rc; |
648 | 695 | ||
649 | if (atomic_read(&sc->poison) || sc->changed) { | 696 | if (atomic_read(&sc->poison) || lun->changed) { |
650 | blkdev_dequeue_request(rq); | 697 | blkdev_dequeue_request(rq); |
651 | ub_end_rq(rq, 0); | 698 | ub_end_rq(rq, 0); |
652 | return 0; | 699 | return 0; |
653 | } | 700 | } |
654 | 701 | ||
655 | if ((cmd = ub_get_cmd(sc)) == NULL) | 702 | if ((cmd = ub_get_cmd(lun)) == NULL) |
656 | return -1; | 703 | return -1; |
657 | memset(cmd, 0, sizeof(struct ub_scsi_cmd)); | 704 | memset(cmd, 0, sizeof(struct ub_scsi_cmd)); |
658 | 705 | ||
@@ -661,32 +708,30 @@ static int ub_bd_rq_fn_1(struct ub_dev *sc, struct request *rq) | |||
661 | if (blk_pc_request(rq)) { | 708 | if (blk_pc_request(rq)) { |
662 | rc = ub_cmd_build_packet(sc, cmd, rq); | 709 | rc = ub_cmd_build_packet(sc, cmd, rq); |
663 | } else { | 710 | } else { |
664 | rc = ub_cmd_build_block(sc, cmd, rq); | 711 | rc = ub_cmd_build_block(sc, lun, cmd, rq); |
665 | } | 712 | } |
666 | if (rc != 0) { | 713 | if (rc != 0) { |
667 | ub_put_cmd(sc, cmd); | 714 | ub_put_cmd(lun, cmd); |
668 | ub_end_rq(rq, 0); | 715 | ub_end_rq(rq, 0); |
669 | blk_start_queue(sc->disk->queue); | ||
670 | return 0; | 716 | return 0; |
671 | } | 717 | } |
672 | |||
673 | cmd->state = UB_CMDST_INIT; | 718 | cmd->state = UB_CMDST_INIT; |
719 | cmd->lun = lun; | ||
674 | cmd->done = ub_rw_cmd_done; | 720 | cmd->done = ub_rw_cmd_done; |
675 | cmd->back = rq; | 721 | cmd->back = rq; |
676 | 722 | ||
677 | cmd->tag = sc->tagcnt++; | 723 | cmd->tag = sc->tagcnt++; |
678 | if ((rc = ub_submit_scsi(sc, cmd)) != 0) { | 724 | if ((rc = ub_submit_scsi(sc, cmd)) != 0) { |
679 | ub_put_cmd(sc, cmd); | 725 | ub_put_cmd(lun, cmd); |
680 | ub_end_rq(rq, 0); | 726 | ub_end_rq(rq, 0); |
681 | blk_start_queue(sc->disk->queue); | ||
682 | return 0; | 727 | return 0; |
683 | } | 728 | } |
684 | 729 | ||
685 | return 0; | 730 | return 0; |
686 | } | 731 | } |
687 | 732 | ||
688 | static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | 733 | static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, |
689 | struct request *rq) | 734 | struct ub_scsi_cmd *cmd, struct request *rq) |
690 | { | 735 | { |
691 | int ub_dir; | 736 | int ub_dir; |
692 | #if 0 /* We use rq->buffer for now */ | 737 | #if 0 /* We use rq->buffer for now */ |
@@ -707,7 +752,7 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | |||
707 | sg = &cmd->sgv[0]; | 752 | sg = &cmd->sgv[0]; |
708 | n_elem = blk_rq_map_sg(q, rq, sg); | 753 | n_elem = blk_rq_map_sg(q, rq, sg); |
709 | if (n_elem <= 0) { | 754 | if (n_elem <= 0) { |
710 | ub_put_cmd(sc, cmd); | 755 | ub_put_cmd(lun, cmd); |
711 | ub_end_rq(rq, 0); | 756 | ub_end_rq(rq, 0); |
712 | blk_start_queue(q); | 757 | blk_start_queue(q); |
713 | return 0; /* request with no s/g entries? */ | 758 | return 0; /* request with no s/g entries? */ |
@@ -716,7 +761,7 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | |||
716 | if (n_elem != 1) { /* Paranoia */ | 761 | if (n_elem != 1) { /* Paranoia */ |
717 | printk(KERN_WARNING "%s: request with %d segments\n", | 762 | printk(KERN_WARNING "%s: request with %d segments\n", |
718 | sc->name, n_elem); | 763 | sc->name, n_elem); |
719 | ub_put_cmd(sc, cmd); | 764 | ub_put_cmd(lun, cmd); |
720 | ub_end_rq(rq, 0); | 765 | ub_end_rq(rq, 0); |
721 | blk_start_queue(q); | 766 | blk_start_queue(q); |
722 | return 0; | 767 | return 0; |
@@ -748,8 +793,8 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | |||
748 | * The call to blk_queue_hardsect_size() guarantees that request | 793 | * The call to blk_queue_hardsect_size() guarantees that request |
749 | * is aligned, but it is given in terms of 512 byte units, always. | 794 | * is aligned, but it is given in terms of 512 byte units, always. |
750 | */ | 795 | */ |
751 | block = rq->sector >> sc->capacity.bshift; | 796 | block = rq->sector >> lun->capacity.bshift; |
752 | nblks = rq->nr_sectors >> sc->capacity.bshift; | 797 | nblks = rq->nr_sectors >> lun->capacity.bshift; |
753 | 798 | ||
754 | cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10; | 799 | cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10; |
755 | /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ | 800 | /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ |
@@ -803,7 +848,8 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | |||
803 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | 848 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) |
804 | { | 849 | { |
805 | struct request *rq = cmd->back; | 850 | struct request *rq = cmd->back; |
806 | struct gendisk *disk = sc->disk; | 851 | struct ub_lun *lun = cmd->lun; |
852 | struct gendisk *disk = lun->disk; | ||
807 | request_queue_t *q = disk->queue; | 853 | request_queue_t *q = disk->queue; |
808 | int uptodate; | 854 | int uptodate; |
809 | 855 | ||
@@ -818,7 +864,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
818 | else | 864 | else |
819 | uptodate = 0; | 865 | uptodate = 0; |
820 | 866 | ||
821 | ub_put_cmd(sc, cmd); | 867 | ub_put_cmd(lun, cmd); |
822 | ub_end_rq(rq, uptodate); | 868 | ub_end_rq(rq, uptodate); |
823 | blk_start_queue(q); | 869 | blk_start_queue(q); |
824 | } | 870 | } |
@@ -887,7 +933,7 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
887 | bcb->Tag = cmd->tag; /* Endianness is not important */ | 933 | bcb->Tag = cmd->tag; /* Endianness is not important */ |
888 | bcb->DataTransferLength = cpu_to_le32(cmd->len); | 934 | bcb->DataTransferLength = cpu_to_le32(cmd->len); |
889 | bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0; | 935 | bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0; |
890 | bcb->Lun = 0; /* No multi-LUN yet */ | 936 | bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0; |
891 | bcb->Length = cmd->cdb_len; | 937 | bcb->Length = cmd->cdb_len; |
892 | 938 | ||
893 | /* copy the command payload */ | 939 | /* copy the command payload */ |
@@ -1002,9 +1048,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1002 | * The control pipe clears itself - nothing to do. | 1048 | * The control pipe clears itself - nothing to do. |
1003 | * XXX Might try to reset the device here and retry. | 1049 | * XXX Might try to reset the device here and retry. |
1004 | */ | 1050 | */ |
1005 | printk(KERN_NOTICE "%s: " | 1051 | printk(KERN_NOTICE "%s: stall on control pipe\n", |
1006 | "stall on control pipe for device %u\n", | 1052 | sc->name); |
1007 | sc->name, sc->dev->devnum); | ||
1008 | goto Bad_End; | 1053 | goto Bad_End; |
1009 | } | 1054 | } |
1010 | 1055 | ||
@@ -1025,9 +1070,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1025 | * The control pipe clears itself - nothing to do. | 1070 | * The control pipe clears itself - nothing to do. |
1026 | * XXX Might try to reset the device here and retry. | 1071 | * XXX Might try to reset the device here and retry. |
1027 | */ | 1072 | */ |
1028 | printk(KERN_NOTICE "%s: " | 1073 | printk(KERN_NOTICE "%s: stall on control pipe\n", |
1029 | "stall on control pipe for device %u\n", | 1074 | sc->name); |
1030 | sc->name, sc->dev->devnum); | ||
1031 | goto Bad_End; | 1075 | goto Bad_End; |
1032 | } | 1076 | } |
1033 | 1077 | ||
@@ -1046,9 +1090,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1046 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); | 1090 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); |
1047 | if (rc != 0) { | 1091 | if (rc != 0) { |
1048 | printk(KERN_NOTICE "%s: " | 1092 | printk(KERN_NOTICE "%s: " |
1049 | "unable to submit clear for device %u" | 1093 | "unable to submit clear (%d)\n", |
1050 | " (code %d)\n", | 1094 | sc->name, rc); |
1051 | sc->name, sc->dev->devnum, rc); | ||
1052 | /* | 1095 | /* |
1053 | * This is typically ENOMEM or some other such shit. | 1096 | * This is typically ENOMEM or some other such shit. |
1054 | * Retrying is pointless. Just do Bad End on it... | 1097 | * Retrying is pointless. Just do Bad End on it... |
@@ -1107,9 +1150,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1107 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); | 1150 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); |
1108 | if (rc != 0) { | 1151 | if (rc != 0) { |
1109 | printk(KERN_NOTICE "%s: " | 1152 | printk(KERN_NOTICE "%s: " |
1110 | "unable to submit clear for device %u" | 1153 | "unable to submit clear (%d)\n", |
1111 | " (code %d)\n", | 1154 | sc->name, rc); |
1112 | sc->name, sc->dev->devnum, rc); | ||
1113 | /* | 1155 | /* |
1114 | * This is typically ENOMEM or some other such shit. | 1156 | * This is typically ENOMEM or some other such shit. |
1115 | * Retrying is pointless. Just do Bad End on it... | 1157 | * Retrying is pointless. Just do Bad End on it... |
@@ -1140,9 +1182,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1140 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); | 1182 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); |
1141 | if (rc != 0) { | 1183 | if (rc != 0) { |
1142 | printk(KERN_NOTICE "%s: " | 1184 | printk(KERN_NOTICE "%s: " |
1143 | "unable to submit clear for device %u" | 1185 | "unable to submit clear (%d)\n", |
1144 | " (code %d)\n", | 1186 | sc->name, rc); |
1145 | sc->name, sc->dev->devnum, rc); | ||
1146 | /* | 1187 | /* |
1147 | * This is typically ENOMEM or some other such shit. | 1188 | * This is typically ENOMEM or some other such shit. |
1148 | * Retrying is pointless. Just do Bad End on it... | 1189 | * Retrying is pointless. Just do Bad End on it... |
@@ -1164,9 +1205,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1164 | * encounter such a thing, try to read the CSW again. | 1205 | * encounter such a thing, try to read the CSW again. |
1165 | */ | 1206 | */ |
1166 | if (++cmd->stat_count >= 4) { | 1207 | if (++cmd->stat_count >= 4) { |
1167 | printk(KERN_NOTICE "%s: " | 1208 | printk(KERN_NOTICE "%s: unable to get CSW\n", |
1168 | "unable to get CSW on device %u\n", | 1209 | sc->name); |
1169 | sc->name, sc->dev->devnum); | ||
1170 | goto Bad_End; | 1210 | goto Bad_End; |
1171 | } | 1211 | } |
1172 | __ub_state_stat(sc, cmd); | 1212 | __ub_state_stat(sc, cmd); |
@@ -1207,10 +1247,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1207 | */ | 1247 | */ |
1208 | if (++cmd->stat_count >= 4) { | 1248 | if (++cmd->stat_count >= 4) { |
1209 | printk(KERN_NOTICE "%s: " | 1249 | printk(KERN_NOTICE "%s: " |
1210 | "tag mismatch orig 0x%x reply 0x%x " | 1250 | "tag mismatch orig 0x%x reply 0x%x\n", |
1211 | "on device %u\n", | 1251 | sc->name, cmd->tag, bcs->Tag); |
1212 | sc->name, cmd->tag, bcs->Tag, | ||
1213 | sc->dev->devnum); | ||
1214 | goto Bad_End; | 1252 | goto Bad_End; |
1215 | } | 1253 | } |
1216 | __ub_state_stat(sc, cmd); | 1254 | __ub_state_stat(sc, cmd); |
@@ -1244,8 +1282,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1244 | 1282 | ||
1245 | } else { | 1283 | } else { |
1246 | printk(KERN_WARNING "%s: " | 1284 | printk(KERN_WARNING "%s: " |
1247 | "wrong command state %d on device %u\n", | 1285 | "wrong command state %d\n", |
1248 | sc->name, cmd->state, sc->dev->devnum); | 1286 | sc->name, cmd->state); |
1249 | goto Bad_End; | 1287 | goto Bad_End; |
1250 | } | 1288 | } |
1251 | return; | 1289 | return; |
@@ -1288,7 +1326,6 @@ static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1288 | 1326 | ||
1289 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { | 1327 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { |
1290 | /* XXX Clear stalls */ | 1328 | /* XXX Clear stalls */ |
1291 | printk("%s: CSW #%d submit failed (%d)\n", sc->name, cmd->tag, rc); /* P3 */ | ||
1292 | ub_complete(&sc->work_done); | 1329 | ub_complete(&sc->work_done); |
1293 | ub_state_done(sc, cmd, rc); | 1330 | ub_state_done(sc, cmd, rc); |
1294 | return; | 1331 | return; |
@@ -1333,6 +1370,7 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1333 | scmd->state = UB_CMDST_INIT; | 1370 | scmd->state = UB_CMDST_INIT; |
1334 | scmd->data = sc->top_sense; | 1371 | scmd->data = sc->top_sense; |
1335 | scmd->len = UB_SENSE_SIZE; | 1372 | scmd->len = UB_SENSE_SIZE; |
1373 | scmd->lun = cmd->lun; | ||
1336 | scmd->done = ub_top_sense_done; | 1374 | scmd->done = ub_top_sense_done; |
1337 | scmd->back = cmd; | 1375 | scmd->back = cmd; |
1338 | 1376 | ||
@@ -1411,14 +1449,14 @@ static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) | |||
1411 | } | 1449 | } |
1412 | if (cmd != scmd->back) { | 1450 | if (cmd != scmd->back) { |
1413 | printk(KERN_WARNING "%s: " | 1451 | printk(KERN_WARNING "%s: " |
1414 | "sense done for wrong command 0x%x on device %u\n", | 1452 | "sense done for wrong command 0x%x\n", |
1415 | sc->name, cmd->tag, sc->dev->devnum); | 1453 | sc->name, cmd->tag); |
1416 | return; | 1454 | return; |
1417 | } | 1455 | } |
1418 | if (cmd->state != UB_CMDST_SENSE) { | 1456 | if (cmd->state != UB_CMDST_SENSE) { |
1419 | printk(KERN_WARNING "%s: " | 1457 | printk(KERN_WARNING "%s: " |
1420 | "sense done with bad cmd state %d on device %u\n", | 1458 | "sense done with bad cmd state %d\n", |
1421 | sc->name, cmd->state, sc->dev->devnum); | 1459 | sc->name, cmd->state); |
1422 | return; | 1460 | return; |
1423 | } | 1461 | } |
1424 | 1462 | ||
@@ -1429,68 +1467,32 @@ static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) | |||
1429 | ub_scsi_urb_compl(sc, cmd); | 1467 | ub_scsi_urb_compl(sc, cmd); |
1430 | } | 1468 | } |
1431 | 1469 | ||
1432 | #if 0 | ||
1433 | /* Determine what the maximum LUN supported is */ | ||
1434 | int usb_stor_Bulk_max_lun(struct us_data *us) | ||
1435 | { | ||
1436 | int result; | ||
1437 | |||
1438 | /* issue the command */ | ||
1439 | result = usb_stor_control_msg(us, us->recv_ctrl_pipe, | ||
1440 | US_BULK_GET_MAX_LUN, | ||
1441 | USB_DIR_IN | USB_TYPE_CLASS | | ||
1442 | USB_RECIP_INTERFACE, | ||
1443 | 0, us->ifnum, us->iobuf, 1, HZ); | ||
1444 | |||
1445 | /* | ||
1446 | * Some devices (i.e. Iomega Zip100) need this -- apparently | ||
1447 | * the bulk pipes get STALLed when the GetMaxLUN request is | ||
1448 | * processed. This is, in theory, harmless to all other devices | ||
1449 | * (regardless of if they stall or not). | ||
1450 | */ | ||
1451 | if (result < 0) { | ||
1452 | usb_stor_clear_halt(us, us->recv_bulk_pipe); | ||
1453 | usb_stor_clear_halt(us, us->send_bulk_pipe); | ||
1454 | } | ||
1455 | |||
1456 | US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", | ||
1457 | result, us->iobuf[0]); | ||
1458 | |||
1459 | /* if we have a successful request, return the result */ | ||
1460 | if (result == 1) | ||
1461 | return us->iobuf[0]; | ||
1462 | |||
1463 | /* return the default -- no LUNs */ | ||
1464 | return 0; | ||
1465 | } | ||
1466 | #endif | ||
1467 | |||
1468 | /* | 1470 | /* |
1469 | * This is called from a process context. | 1471 | * This is called from a process context. |
1470 | */ | 1472 | */ |
1471 | static void ub_revalidate(struct ub_dev *sc) | 1473 | static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun) |
1472 | { | 1474 | { |
1473 | 1475 | ||
1474 | sc->readonly = 0; /* XXX Query this from the device */ | 1476 | lun->readonly = 0; /* XXX Query this from the device */ |
1475 | 1477 | ||
1476 | sc->capacity.nsec = 0; | 1478 | lun->capacity.nsec = 0; |
1477 | sc->capacity.bsize = 512; | 1479 | lun->capacity.bsize = 512; |
1478 | sc->capacity.bshift = 0; | 1480 | lun->capacity.bshift = 0; |
1479 | 1481 | ||
1480 | if (ub_sync_tur(sc) != 0) | 1482 | if (ub_sync_tur(sc, lun) != 0) |
1481 | return; /* Not ready */ | 1483 | return; /* Not ready */ |
1482 | sc->changed = 0; | 1484 | lun->changed = 0; |
1483 | 1485 | ||
1484 | if (ub_sync_read_cap(sc, &sc->capacity) != 0) { | 1486 | if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { |
1485 | /* | 1487 | /* |
1486 | * The retry here means something is wrong, either with the | 1488 | * The retry here means something is wrong, either with the |
1487 | * device, with the transport, or with our code. | 1489 | * device, with the transport, or with our code. |
1488 | * We keep this because sd.c has retries for capacity. | 1490 | * We keep this because sd.c has retries for capacity. |
1489 | */ | 1491 | */ |
1490 | if (ub_sync_read_cap(sc, &sc->capacity) != 0) { | 1492 | if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { |
1491 | sc->capacity.nsec = 0; | 1493 | lun->capacity.nsec = 0; |
1492 | sc->capacity.bsize = 512; | 1494 | lun->capacity.bsize = 512; |
1493 | sc->capacity.bshift = 0; | 1495 | lun->capacity.bshift = 0; |
1494 | } | 1496 | } |
1495 | } | 1497 | } |
1496 | } | 1498 | } |
@@ -1503,12 +1505,15 @@ static void ub_revalidate(struct ub_dev *sc) | |||
1503 | static int ub_bd_open(struct inode *inode, struct file *filp) | 1505 | static int ub_bd_open(struct inode *inode, struct file *filp) |
1504 | { | 1506 | { |
1505 | struct gendisk *disk = inode->i_bdev->bd_disk; | 1507 | struct gendisk *disk = inode->i_bdev->bd_disk; |
1508 | struct ub_lun *lun; | ||
1506 | struct ub_dev *sc; | 1509 | struct ub_dev *sc; |
1507 | unsigned long flags; | 1510 | unsigned long flags; |
1508 | int rc; | 1511 | int rc; |
1509 | 1512 | ||
1510 | if ((sc = disk->private_data) == NULL) | 1513 | if ((lun = disk->private_data) == NULL) |
1511 | return -ENXIO; | 1514 | return -ENXIO; |
1515 | sc = lun->udev; | ||
1516 | |||
1512 | spin_lock_irqsave(&ub_lock, flags); | 1517 | spin_lock_irqsave(&ub_lock, flags); |
1513 | if (atomic_read(&sc->poison)) { | 1518 | if (atomic_read(&sc->poison)) { |
1514 | spin_unlock_irqrestore(&ub_lock, flags); | 1519 | spin_unlock_irqrestore(&ub_lock, flags); |
@@ -1529,15 +1534,15 @@ static int ub_bd_open(struct inode *inode, struct file *filp) | |||
1529 | * The bottom line is, Al Viro says that we should not allow | 1534 | * The bottom line is, Al Viro says that we should not allow |
1530 | * bdev->bd_invalidated to be set when doing add_disk no matter what. | 1535 | * bdev->bd_invalidated to be set when doing add_disk no matter what. |
1531 | */ | 1536 | */ |
1532 | if (sc->first_open) { | 1537 | if (lun->first_open) { |
1533 | if (sc->changed) { | 1538 | lun->first_open = 0; |
1534 | sc->first_open = 0; | 1539 | if (lun->changed) { |
1535 | rc = -ENOMEDIUM; | 1540 | rc = -ENOMEDIUM; |
1536 | goto err_open; | 1541 | goto err_open; |
1537 | } | 1542 | } |
1538 | } | 1543 | } |
1539 | 1544 | ||
1540 | if (sc->removable || sc->readonly) | 1545 | if (lun->removable || lun->readonly) |
1541 | check_disk_change(inode->i_bdev); | 1546 | check_disk_change(inode->i_bdev); |
1542 | 1547 | ||
1543 | /* | 1548 | /* |
@@ -1545,12 +1550,12 @@ static int ub_bd_open(struct inode *inode, struct file *filp) | |||
1545 | * under some pretty murky conditions (a failure of READ CAPACITY). | 1550 | * under some pretty murky conditions (a failure of READ CAPACITY). |
1546 | * We may need it one day. | 1551 | * We may need it one day. |
1547 | */ | 1552 | */ |
1548 | if (sc->removable && sc->changed && !(filp->f_flags & O_NDELAY)) { | 1553 | if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) { |
1549 | rc = -ENOMEDIUM; | 1554 | rc = -ENOMEDIUM; |
1550 | goto err_open; | 1555 | goto err_open; |
1551 | } | 1556 | } |
1552 | 1557 | ||
1553 | if (sc->readonly && (filp->f_mode & FMODE_WRITE)) { | 1558 | if (lun->readonly && (filp->f_mode & FMODE_WRITE)) { |
1554 | rc = -EROFS; | 1559 | rc = -EROFS; |
1555 | goto err_open; | 1560 | goto err_open; |
1556 | } | 1561 | } |
@@ -1567,7 +1572,8 @@ err_open: | |||
1567 | static int ub_bd_release(struct inode *inode, struct file *filp) | 1572 | static int ub_bd_release(struct inode *inode, struct file *filp) |
1568 | { | 1573 | { |
1569 | struct gendisk *disk = inode->i_bdev->bd_disk; | 1574 | struct gendisk *disk = inode->i_bdev->bd_disk; |
1570 | struct ub_dev *sc = disk->private_data; | 1575 | struct ub_lun *lun = disk->private_data; |
1576 | struct ub_dev *sc = lun->udev; | ||
1571 | 1577 | ||
1572 | ub_put(sc); | 1578 | ub_put(sc); |
1573 | return 0; | 1579 | return 0; |
@@ -1597,20 +1603,14 @@ static int ub_bd_ioctl(struct inode *inode, struct file *filp, | |||
1597 | */ | 1603 | */ |
1598 | static int ub_bd_revalidate(struct gendisk *disk) | 1604 | static int ub_bd_revalidate(struct gendisk *disk) |
1599 | { | 1605 | { |
1600 | struct ub_dev *sc = disk->private_data; | 1606 | struct ub_lun *lun = disk->private_data; |
1601 | 1607 | ||
1602 | ub_revalidate(sc); | 1608 | ub_revalidate(lun->udev, lun); |
1603 | /* This is pretty much a long term P3 */ | ||
1604 | if (!atomic_read(&sc->poison)) { /* Cover sc->dev */ | ||
1605 | printk(KERN_INFO "%s: device %u capacity nsec %ld bsize %u\n", | ||
1606 | sc->name, sc->dev->devnum, | ||
1607 | sc->capacity.nsec, sc->capacity.bsize); | ||
1608 | } | ||
1609 | 1609 | ||
1610 | /* XXX Support sector size switching like in sr.c */ | 1610 | /* XXX Support sector size switching like in sr.c */ |
1611 | blk_queue_hardsect_size(disk->queue, sc->capacity.bsize); | 1611 | blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); |
1612 | set_capacity(disk, sc->capacity.nsec); | 1612 | set_capacity(disk, lun->capacity.nsec); |
1613 | // set_disk_ro(sdkp->disk, sc->readonly); | 1613 | // set_disk_ro(sdkp->disk, lun->readonly); |
1614 | 1614 | ||
1615 | return 0; | 1615 | return 0; |
1616 | } | 1616 | } |
@@ -1626,9 +1626,9 @@ static int ub_bd_revalidate(struct gendisk *disk) | |||
1626 | */ | 1626 | */ |
1627 | static int ub_bd_media_changed(struct gendisk *disk) | 1627 | static int ub_bd_media_changed(struct gendisk *disk) |
1628 | { | 1628 | { |
1629 | struct ub_dev *sc = disk->private_data; | 1629 | struct ub_lun *lun = disk->private_data; |
1630 | 1630 | ||
1631 | if (!sc->removable) | 1631 | if (!lun->removable) |
1632 | return 0; | 1632 | return 0; |
1633 | 1633 | ||
1634 | /* | 1634 | /* |
@@ -1640,12 +1640,12 @@ static int ub_bd_media_changed(struct gendisk *disk) | |||
1640 | * will fail, then block layer discards the data. Since we never | 1640 | * will fail, then block layer discards the data. Since we never |
1641 | * spin drives up, such devices simply cannot be used with ub anyway. | 1641 | * spin drives up, such devices simply cannot be used with ub anyway. |
1642 | */ | 1642 | */ |
1643 | if (ub_sync_tur(sc) != 0) { | 1643 | if (ub_sync_tur(lun->udev, lun) != 0) { |
1644 | sc->changed = 1; | 1644 | lun->changed = 1; |
1645 | return 1; | 1645 | return 1; |
1646 | } | 1646 | } |
1647 | 1647 | ||
1648 | return sc->changed; | 1648 | return lun->changed; |
1649 | } | 1649 | } |
1650 | 1650 | ||
1651 | static struct block_device_operations ub_bd_fops = { | 1651 | static struct block_device_operations ub_bd_fops = { |
@@ -1669,7 +1669,7 @@ static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1669 | /* | 1669 | /* |
1670 | * Test if the device has a check condition on it, synchronously. | 1670 | * Test if the device has a check condition on it, synchronously. |
1671 | */ | 1671 | */ |
1672 | static int ub_sync_tur(struct ub_dev *sc) | 1672 | static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun) |
1673 | { | 1673 | { |
1674 | struct ub_scsi_cmd *cmd; | 1674 | struct ub_scsi_cmd *cmd; |
1675 | enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) }; | 1675 | enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) }; |
@@ -1688,6 +1688,7 @@ static int ub_sync_tur(struct ub_dev *sc) | |||
1688 | cmd->cdb_len = 6; | 1688 | cmd->cdb_len = 6; |
1689 | cmd->dir = UB_DIR_NONE; | 1689 | cmd->dir = UB_DIR_NONE; |
1690 | cmd->state = UB_CMDST_INIT; | 1690 | cmd->state = UB_CMDST_INIT; |
1691 | cmd->lun = lun; /* This may be NULL, but that's ok */ | ||
1691 | cmd->done = ub_probe_done; | 1692 | cmd->done = ub_probe_done; |
1692 | cmd->back = &compl; | 1693 | cmd->back = &compl; |
1693 | 1694 | ||
@@ -1718,7 +1719,8 @@ err_alloc: | |||
1718 | /* | 1719 | /* |
1719 | * Read the SCSI capacity synchronously (for probing). | 1720 | * Read the SCSI capacity synchronously (for probing). |
1720 | */ | 1721 | */ |
1721 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret) | 1722 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, |
1723 | struct ub_capacity *ret) | ||
1722 | { | 1724 | { |
1723 | struct ub_scsi_cmd *cmd; | 1725 | struct ub_scsi_cmd *cmd; |
1724 | char *p; | 1726 | char *p; |
@@ -1743,6 +1745,7 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret) | |||
1743 | cmd->state = UB_CMDST_INIT; | 1745 | cmd->state = UB_CMDST_INIT; |
1744 | cmd->data = p; | 1746 | cmd->data = p; |
1745 | cmd->len = 8; | 1747 | cmd->len = 8; |
1748 | cmd->lun = lun; | ||
1746 | cmd->done = ub_probe_done; | 1749 | cmd->done = ub_probe_done; |
1747 | cmd->back = &compl; | 1750 | cmd->back = &compl; |
1748 | 1751 | ||
@@ -1812,6 +1815,90 @@ static void ub_probe_timeout(unsigned long arg) | |||
1812 | } | 1815 | } |
1813 | 1816 | ||
1814 | /* | 1817 | /* |
1818 | * Get number of LUNs by the way of Bulk GetMaxLUN command. | ||
1819 | */ | ||
1820 | static int ub_sync_getmaxlun(struct ub_dev *sc) | ||
1821 | { | ||
1822 | int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; | ||
1823 | unsigned char *p; | ||
1824 | enum { ALLOC_SIZE = 1 }; | ||
1825 | struct usb_ctrlrequest *cr; | ||
1826 | struct completion compl; | ||
1827 | struct timer_list timer; | ||
1828 | int nluns; | ||
1829 | int rc; | ||
1830 | |||
1831 | init_completion(&compl); | ||
1832 | |||
1833 | rc = -ENOMEM; | ||
1834 | if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) | ||
1835 | goto err_alloc; | ||
1836 | *p = 55; | ||
1837 | |||
1838 | cr = &sc->work_cr; | ||
1839 | cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; | ||
1840 | cr->bRequest = US_BULK_GET_MAX_LUN; | ||
1841 | cr->wValue = cpu_to_le16(0); | ||
1842 | cr->wIndex = cpu_to_le16(ifnum); | ||
1843 | cr->wLength = cpu_to_le16(1); | ||
1844 | |||
1845 | usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe, | ||
1846 | (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl); | ||
1847 | sc->work_urb.transfer_flags = 0; | ||
1848 | sc->work_urb.actual_length = 0; | ||
1849 | sc->work_urb.error_count = 0; | ||
1850 | sc->work_urb.status = 0; | ||
1851 | |||
1852 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { | ||
1853 | if (rc == -EPIPE) { | ||
1854 | printk("%s: Stall at GetMaxLUN, using 1 LUN\n", | ||
1855 | sc->name); /* P3 */ | ||
1856 | } else { | ||
1857 | printk(KERN_WARNING | ||
1858 | "%s: Unable to submit GetMaxLUN (%d)\n", | ||
1859 | sc->name, rc); | ||
1860 | } | ||
1861 | goto err_submit; | ||
1862 | } | ||
1863 | |||
1864 | init_timer(&timer); | ||
1865 | timer.function = ub_probe_timeout; | ||
1866 | timer.data = (unsigned long) &compl; | ||
1867 | timer.expires = jiffies + UB_CTRL_TIMEOUT; | ||
1868 | add_timer(&timer); | ||
1869 | |||
1870 | wait_for_completion(&compl); | ||
1871 | |||
1872 | del_timer_sync(&timer); | ||
1873 | usb_kill_urb(&sc->work_urb); | ||
1874 | |||
1875 | if (sc->work_urb.actual_length != 1) { | ||
1876 | printk("%s: GetMaxLUN returned %d bytes\n", sc->name, | ||
1877 | sc->work_urb.actual_length); /* P3 */ | ||
1878 | nluns = 0; | ||
1879 | } else { | ||
1880 | if ((nluns = *p) == 55) { | ||
1881 | nluns = 0; | ||
1882 | } else { | ||
1883 | /* GetMaxLUN returns the maximum LUN number */ | ||
1884 | nluns += 1; | ||
1885 | if (nluns > UB_MAX_LUNS) | ||
1886 | nluns = UB_MAX_LUNS; | ||
1887 | } | ||
1888 | printk("%s: GetMaxLUN returned %d, using %d LUNs\n", sc->name, | ||
1889 | *p, nluns); /* P3 */ | ||
1890 | } | ||
1891 | |||
1892 | kfree(p); | ||
1893 | return nluns; | ||
1894 | |||
1895 | err_submit: | ||
1896 | kfree(p); | ||
1897 | err_alloc: | ||
1898 | return rc; | ||
1899 | } | ||
1900 | |||
1901 | /* | ||
1815 | * Clear initial stalls. | 1902 | * Clear initial stalls. |
1816 | */ | 1903 | */ |
1817 | static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe) | 1904 | static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe) |
@@ -1897,8 +1984,8 @@ static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev, | |||
1897 | } | 1984 | } |
1898 | 1985 | ||
1899 | if (ep_in == NULL || ep_out == NULL) { | 1986 | if (ep_in == NULL || ep_out == NULL) { |
1900 | printk(KERN_NOTICE "%s: device %u failed endpoint check\n", | 1987 | printk(KERN_NOTICE "%s: failed endpoint check\n", |
1901 | sc->name, sc->dev->devnum); | 1988 | sc->name); |
1902 | return -EIO; | 1989 | return -EIO; |
1903 | } | 1990 | } |
1904 | 1991 | ||
@@ -1921,8 +2008,7 @@ static int ub_probe(struct usb_interface *intf, | |||
1921 | const struct usb_device_id *dev_id) | 2008 | const struct usb_device_id *dev_id) |
1922 | { | 2009 | { |
1923 | struct ub_dev *sc; | 2010 | struct ub_dev *sc; |
1924 | request_queue_t *q; | 2011 | int nluns; |
1925 | struct gendisk *disk; | ||
1926 | int rc; | 2012 | int rc; |
1927 | int i; | 2013 | int i; |
1928 | 2014 | ||
@@ -1931,6 +2017,7 @@ static int ub_probe(struct usb_interface *intf, | |||
1931 | goto err_core; | 2017 | goto err_core; |
1932 | memset(sc, 0, sizeof(struct ub_dev)); | 2018 | memset(sc, 0, sizeof(struct ub_dev)); |
1933 | spin_lock_init(&sc->lock); | 2019 | spin_lock_init(&sc->lock); |
2020 | INIT_LIST_HEAD(&sc->luns); | ||
1934 | usb_init_urb(&sc->work_urb); | 2021 | usb_init_urb(&sc->work_urb); |
1935 | tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); | 2022 | tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); |
1936 | atomic_set(&sc->poison, 0); | 2023 | atomic_set(&sc->poison, 0); |
@@ -1942,19 +2029,16 @@ static int ub_probe(struct usb_interface *intf, | |||
1942 | ub_init_completion(&sc->work_done); | 2029 | ub_init_completion(&sc->work_done); |
1943 | sc->work_done.done = 1; /* A little yuk, but oh well... */ | 2030 | sc->work_done.done = 1; /* A little yuk, but oh well... */ |
1944 | 2031 | ||
1945 | rc = -ENOSR; | ||
1946 | if ((sc->id = ub_id_get()) == -1) | ||
1947 | goto err_id; | ||
1948 | snprintf(sc->name, 8, DRV_NAME "%c", sc->id + 'a'); | ||
1949 | |||
1950 | sc->dev = interface_to_usbdev(intf); | 2032 | sc->dev = interface_to_usbdev(intf); |
1951 | sc->intf = intf; | 2033 | sc->intf = intf; |
1952 | // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; | 2034 | // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; |
1953 | |||
1954 | usb_set_intfdata(intf, sc); | 2035 | usb_set_intfdata(intf, sc); |
1955 | usb_get_dev(sc->dev); | 2036 | usb_get_dev(sc->dev); |
1956 | // usb_get_intf(sc->intf); /* Do we need this? */ | 2037 | // usb_get_intf(sc->intf); /* Do we need this? */ |
1957 | 2038 | ||
2039 | snprintf(sc->name, 12, DRV_NAME "(%d.%d)", | ||
2040 | sc->dev->bus->busnum, sc->dev->devnum); | ||
2041 | |||
1958 | /* XXX Verify that we can handle the device (from descriptors) */ | 2042 | /* XXX Verify that we can handle the device (from descriptors) */ |
1959 | 2043 | ||
1960 | ub_get_pipes(sc, sc->dev, intf); | 2044 | ub_get_pipes(sc, sc->dev, intf); |
@@ -1992,35 +2076,88 @@ static int ub_probe(struct usb_interface *intf, | |||
1992 | * In any case it's not our business how revaliadation is implemented. | 2076 | * In any case it's not our business how revaliadation is implemented. |
1993 | */ | 2077 | */ |
1994 | for (i = 0; i < 3; i++) { /* Retries for benh's key */ | 2078 | for (i = 0; i < 3; i++) { /* Retries for benh's key */ |
1995 | if ((rc = ub_sync_tur(sc)) <= 0) break; | 2079 | if ((rc = ub_sync_tur(sc, NULL)) <= 0) break; |
1996 | if (rc != 0x6) break; | 2080 | if (rc != 0x6) break; |
1997 | msleep(10); | 2081 | msleep(10); |
1998 | } | 2082 | } |
1999 | 2083 | ||
2000 | sc->removable = 1; /* XXX Query this from the device */ | 2084 | nluns = 1; |
2001 | sc->changed = 1; /* ub_revalidate clears only */ | 2085 | for (i = 0; i < 3; i++) { |
2002 | sc->first_open = 1; | 2086 | if ((rc = ub_sync_getmaxlun(sc)) < 0) { |
2087 | /* | ||
2088 | * Some devices (i.e. Iomega Zip100) need this -- | ||
2089 | * apparently the bulk pipes get STALLed when the | ||
2090 | * GetMaxLUN request is processed. | ||
2091 | * XXX I have a ZIP-100, verify it does this. | ||
2092 | */ | ||
2093 | if (rc == -EPIPE) { | ||
2094 | ub_probe_clear_stall(sc, sc->recv_bulk_pipe); | ||
2095 | ub_probe_clear_stall(sc, sc->send_bulk_pipe); | ||
2096 | } | ||
2097 | break; | ||
2098 | } | ||
2099 | if (rc != 0) { | ||
2100 | nluns = rc; | ||
2101 | break; | ||
2102 | } | ||
2103 | msleep(100); | ||
2104 | } | ||
2003 | 2105 | ||
2004 | ub_revalidate(sc); | 2106 | for (i = 0; i < nluns; i++) { |
2005 | /* This is pretty much a long term P3 */ | 2107 | ub_probe_lun(sc, i); |
2006 | printk(KERN_INFO "%s: device %u capacity nsec %ld bsize %u\n", | 2108 | } |
2007 | sc->name, sc->dev->devnum, sc->capacity.nsec, sc->capacity.bsize); | 2109 | return 0; |
2110 | |||
2111 | /* device_remove_file(&sc->intf->dev, &dev_attr_diag); */ | ||
2112 | err_diag: | ||
2113 | usb_set_intfdata(intf, NULL); | ||
2114 | // usb_put_intf(sc->intf); | ||
2115 | usb_put_dev(sc->dev); | ||
2116 | kfree(sc); | ||
2117 | err_core: | ||
2118 | return rc; | ||
2119 | } | ||
2120 | |||
2121 | static int ub_probe_lun(struct ub_dev *sc, int lnum) | ||
2122 | { | ||
2123 | struct ub_lun *lun; | ||
2124 | request_queue_t *q; | ||
2125 | struct gendisk *disk; | ||
2126 | int rc; | ||
2127 | |||
2128 | rc = -ENOMEM; | ||
2129 | if ((lun = kmalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL) | ||
2130 | goto err_alloc; | ||
2131 | memset(lun, 0, sizeof(struct ub_lun)); | ||
2132 | lun->num = lnum; | ||
2133 | |||
2134 | rc = -ENOSR; | ||
2135 | if ((lun->id = ub_id_get()) == -1) | ||
2136 | goto err_id; | ||
2137 | |||
2138 | lun->udev = sc; | ||
2139 | list_add(&lun->link, &sc->luns); | ||
2140 | |||
2141 | snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)", | ||
2142 | lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num); | ||
2143 | |||
2144 | lun->removable = 1; /* XXX Query this from the device */ | ||
2145 | lun->changed = 1; /* ub_revalidate clears only */ | ||
2146 | lun->first_open = 1; | ||
2147 | ub_revalidate(sc, lun); | ||
2008 | 2148 | ||
2009 | /* | ||
2010 | * Just one disk per sc currently, but maybe more. | ||
2011 | */ | ||
2012 | rc = -ENOMEM; | 2149 | rc = -ENOMEM; |
2013 | if ((disk = alloc_disk(UB_MINORS_PER_MAJOR)) == NULL) | 2150 | if ((disk = alloc_disk(UB_MINORS_PER_MAJOR)) == NULL) |
2014 | goto err_diskalloc; | 2151 | goto err_diskalloc; |
2015 | 2152 | ||
2016 | sc->disk = disk; | 2153 | lun->disk = disk; |
2017 | sprintf(disk->disk_name, DRV_NAME "%c", sc->id + 'a'); | 2154 | sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a'); |
2018 | sprintf(disk->devfs_name, DEVFS_NAME "/%c", sc->id + 'a'); | 2155 | sprintf(disk->devfs_name, DEVFS_NAME "/%c", lun->id + 'a'); |
2019 | disk->major = UB_MAJOR; | 2156 | disk->major = UB_MAJOR; |
2020 | disk->first_minor = sc->id * UB_MINORS_PER_MAJOR; | 2157 | disk->first_minor = lun->id * UB_MINORS_PER_MAJOR; |
2021 | disk->fops = &ub_bd_fops; | 2158 | disk->fops = &ub_bd_fops; |
2022 | disk->private_data = sc; | 2159 | disk->private_data = lun; |
2023 | disk->driverfs_dev = &intf->dev; | 2160 | disk->driverfs_dev = &sc->intf->dev; /* XXX Many to one ok? */ |
2024 | 2161 | ||
2025 | rc = -ENOMEM; | 2162 | rc = -ENOMEM; |
2026 | if ((q = blk_init_queue(ub_bd_rq_fn, &sc->lock)) == NULL) | 2163 | if ((q = blk_init_queue(ub_bd_rq_fn, &sc->lock)) == NULL) |
@@ -2028,28 +2165,17 @@ static int ub_probe(struct usb_interface *intf, | |||
2028 | 2165 | ||
2029 | disk->queue = q; | 2166 | disk->queue = q; |
2030 | 2167 | ||
2031 | // blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); | 2168 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
2032 | blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); | 2169 | blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); |
2033 | blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); | 2170 | blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); |
2034 | // blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); | 2171 | blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ |
2035 | blk_queue_max_sectors(q, UB_MAX_SECTORS); | 2172 | blk_queue_max_sectors(q, UB_MAX_SECTORS); |
2036 | blk_queue_hardsect_size(q, sc->capacity.bsize); | 2173 | blk_queue_hardsect_size(q, lun->capacity.bsize); |
2037 | |||
2038 | /* | ||
2039 | * This is a serious infraction, caused by a deficiency in the | ||
2040 | * USB sg interface (usb_sg_wait()). We plan to remove this once | ||
2041 | * we get mileage on the driver and can justify a change to USB API. | ||
2042 | * See blk_queue_bounce_limit() to understand this part. | ||
2043 | * | ||
2044 | * XXX And I still need to be aware of the DMA mask in the HC. | ||
2045 | */ | ||
2046 | q->bounce_pfn = blk_max_low_pfn; | ||
2047 | q->bounce_gfp = GFP_NOIO; | ||
2048 | 2174 | ||
2049 | q->queuedata = sc; | 2175 | q->queuedata = lun; |
2050 | 2176 | ||
2051 | set_capacity(disk, sc->capacity.nsec); | 2177 | set_capacity(disk, lun->capacity.nsec); |
2052 | if (sc->removable) | 2178 | if (lun->removable) |
2053 | disk->flags |= GENHD_FL_REMOVABLE; | 2179 | disk->flags |= GENHD_FL_REMOVABLE; |
2054 | 2180 | ||
2055 | add_disk(disk); | 2181 | add_disk(disk); |
@@ -2059,22 +2185,20 @@ static int ub_probe(struct usb_interface *intf, | |||
2059 | err_blkqinit: | 2185 | err_blkqinit: |
2060 | put_disk(disk); | 2186 | put_disk(disk); |
2061 | err_diskalloc: | 2187 | err_diskalloc: |
2062 | device_remove_file(&sc->intf->dev, &dev_attr_diag); | 2188 | list_del(&lun->link); |
2063 | err_diag: | 2189 | ub_id_put(lun->id); |
2064 | usb_set_intfdata(intf, NULL); | ||
2065 | // usb_put_intf(sc->intf); | ||
2066 | usb_put_dev(sc->dev); | ||
2067 | ub_id_put(sc->id); | ||
2068 | err_id: | 2190 | err_id: |
2069 | kfree(sc); | 2191 | kfree(lun); |
2070 | err_core: | 2192 | err_alloc: |
2071 | return rc; | 2193 | return rc; |
2072 | } | 2194 | } |
2073 | 2195 | ||
2074 | static void ub_disconnect(struct usb_interface *intf) | 2196 | static void ub_disconnect(struct usb_interface *intf) |
2075 | { | 2197 | { |
2076 | struct ub_dev *sc = usb_get_intfdata(intf); | 2198 | struct ub_dev *sc = usb_get_intfdata(intf); |
2077 | struct gendisk *disk = sc->disk; | 2199 | struct list_head *p; |
2200 | struct ub_lun *lun; | ||
2201 | struct gendisk *disk; | ||
2078 | unsigned long flags; | 2202 | unsigned long flags; |
2079 | 2203 | ||
2080 | /* | 2204 | /* |
@@ -2124,14 +2248,18 @@ static void ub_disconnect(struct usb_interface *intf) | |||
2124 | /* | 2248 | /* |
2125 | * Unregister the upper layer. | 2249 | * Unregister the upper layer. |
2126 | */ | 2250 | */ |
2127 | if (disk->flags & GENHD_FL_UP) | 2251 | list_for_each (p, &sc->luns) { |
2128 | del_gendisk(disk); | 2252 | lun = list_entry(p, struct ub_lun, link); |
2129 | /* | 2253 | disk = lun->disk; |
2130 | * I wish I could do: | 2254 | if (disk->flags & GENHD_FL_UP) |
2131 | * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); | 2255 | del_gendisk(disk); |
2132 | * As it is, we rely on our internal poisoning and let | 2256 | /* |
2133 | * the upper levels to spin furiously failing all the I/O. | 2257 | * I wish I could do: |
2134 | */ | 2258 | * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); |
2259 | * As it is, we rely on our internal poisoning and let | ||
2260 | * the upper levels to spin furiously failing all the I/O. | ||
2261 | */ | ||
2262 | } | ||
2135 | 2263 | ||
2136 | /* | 2264 | /* |
2137 | * Taking a lock on a structure which is about to be freed | 2265 | * Taking a lock on a structure which is about to be freed |
@@ -2182,8 +2310,8 @@ static int __init ub_init(void) | |||
2182 | { | 2310 | { |
2183 | int rc; | 2311 | int rc; |
2184 | 2312 | ||
2185 | /* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu\n", | 2313 | /* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu ub_lun %zu\n", |
2186 | sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev)); | 2314 | sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev), sizeof(struct ub_lun)); |
2187 | 2315 | ||
2188 | if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) | 2316 | if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) |
2189 | goto err_regblkdev; | 2317 | goto err_regblkdev; |