diff options
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/Kconfig | 2 | ||||
-rw-r--r-- | drivers/block/aoe/aoenet.c | 2 | ||||
-rw-r--r-- | drivers/block/as-iosched.c | 10 | ||||
-rw-r--r-- | drivers/block/cciss.c | 49 | ||||
-rw-r--r-- | drivers/block/cciss.h | 4 | ||||
-rw-r--r-- | drivers/block/cfq-iosched.c | 32 | ||||
-rw-r--r-- | drivers/block/cryptoloop.c | 6 | ||||
-rw-r--r-- | drivers/block/floppy.c | 41 | ||||
-rw-r--r-- | drivers/block/ll_rw_blk.c | 18 | ||||
-rw-r--r-- | drivers/block/sx8.c | 4 | ||||
-rw-r--r-- | drivers/block/ub.c | 211 | ||||
-rw-r--r-- | drivers/block/viodasd.c | 2 |
12 files changed, 273 insertions, 108 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 7cd7bf35d028..6b736364cc5b 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -413,7 +413,7 @@ config BLK_DEV_INITRD | |||
413 | #for instance. | 413 | #for instance. |
414 | config LBD | 414 | config LBD |
415 | bool "Support for Large Block Devices" | 415 | bool "Support for Large Block Devices" |
416 | depends on X86 || MIPS32 || PPC32 || ARCH_S390_31 || SUPERH || UML | 416 | depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML |
417 | help | 417 | help |
418 | Say Y here if you want to attach large (bigger than 2TB) discs to | 418 | Say Y here if you want to attach large (bigger than 2TB) discs to |
419 | your machine, or if you want to have a raid or loopback device | 419 | your machine, or if you want to have a raid or loopback device |
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c index 9e6f51c528b0..4be976940f69 100644 --- a/drivers/block/aoe/aoenet.c +++ b/drivers/block/aoe/aoenet.c | |||
@@ -120,7 +120,7 @@ aoenet_xmit(struct sk_buff *sl) | |||
120 | * (1) len doesn't include the header by default. I want this. | 120 | * (1) len doesn't include the header by default. I want this. |
121 | */ | 121 | */ |
122 | static int | 122 | static int |
123 | aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt) | 123 | aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev) |
124 | { | 124 | { |
125 | struct aoe_hdr *h; | 125 | struct aoe_hdr *h; |
126 | u32 n; | 126 | u32 n; |
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c index 91aeb678135d..95c0a3690b0f 100644 --- a/drivers/block/as-iosched.c +++ b/drivers/block/as-iosched.c | |||
@@ -1935,23 +1935,15 @@ struct as_fs_entry { | |||
1935 | static ssize_t | 1935 | static ssize_t |
1936 | as_var_show(unsigned int var, char *page) | 1936 | as_var_show(unsigned int var, char *page) |
1937 | { | 1937 | { |
1938 | var = (var * 1000) / HZ; | ||
1939 | return sprintf(page, "%d\n", var); | 1938 | return sprintf(page, "%d\n", var); |
1940 | } | 1939 | } |
1941 | 1940 | ||
1942 | static ssize_t | 1941 | static ssize_t |
1943 | as_var_store(unsigned long *var, const char *page, size_t count) | 1942 | as_var_store(unsigned long *var, const char *page, size_t count) |
1944 | { | 1943 | { |
1945 | unsigned long tmp; | ||
1946 | char *p = (char *) page; | 1944 | char *p = (char *) page; |
1947 | 1945 | ||
1948 | tmp = simple_strtoul(p, &p, 10); | 1946 | *var = simple_strtoul(p, &p, 10); |
1949 | if (tmp != 0) { | ||
1950 | tmp = (tmp * HZ) / 1000; | ||
1951 | if (tmp == 0) | ||
1952 | tmp = 1; | ||
1953 | } | ||
1954 | *var = tmp; | ||
1955 | return count; | 1947 | return count; |
1956 | } | 1948 | } |
1957 | 1949 | ||
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 3e9fb6e4a52a..418b1469d75d 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -1135,7 +1135,7 @@ static int revalidate_allvol(ctlr_info_t *host) | |||
1135 | /* this is for the online array utilities */ | 1135 | /* this is for the online array utilities */ |
1136 | if (!drv->heads && i) | 1136 | if (!drv->heads && i) |
1137 | continue; | 1137 | continue; |
1138 | blk_queue_hardsect_size(host->queue, drv->block_size); | 1138 | blk_queue_hardsect_size(drv->queue, drv->block_size); |
1139 | set_capacity(disk, drv->nr_blocks); | 1139 | set_capacity(disk, drv->nr_blocks); |
1140 | add_disk(disk); | 1140 | add_disk(disk); |
1141 | } | 1141 | } |
@@ -1691,7 +1691,7 @@ static int cciss_revalidate(struct gendisk *disk) | |||
1691 | cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size); | 1691 | cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size); |
1692 | cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv); | 1692 | cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv); |
1693 | 1693 | ||
1694 | blk_queue_hardsect_size(h->queue, drv->block_size); | 1694 | blk_queue_hardsect_size(drv->queue, drv->block_size); |
1695 | set_capacity(disk, drv->nr_blocks); | 1695 | set_capacity(disk, drv->nr_blocks); |
1696 | 1696 | ||
1697 | kfree(size_buff); | 1697 | kfree(size_buff); |
@@ -2248,12 +2248,12 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
2248 | * them up. We will also keep track of the next queue to run so | 2248 | * them up. We will also keep track of the next queue to run so |
2249 | * that every queue gets a chance to be started first. | 2249 | * that every queue gets a chance to be started first. |
2250 | */ | 2250 | */ |
2251 | for (j=0; j < NWD; j++){ | 2251 | for (j=0; j < h->highest_lun + 1; j++){ |
2252 | int curr_queue = (start_queue + j) % NWD; | 2252 | int curr_queue = (start_queue + j) % (h->highest_lun + 1); |
2253 | /* make sure the disk has been added and the drive is real | 2253 | /* make sure the disk has been added and the drive is real |
2254 | * because this can be called from the middle of init_one. | 2254 | * because this can be called from the middle of init_one. |
2255 | */ | 2255 | */ |
2256 | if(!(h->gendisk[curr_queue]->queue) || | 2256 | if(!(h->drv[curr_queue].queue) || |
2257 | !(h->drv[curr_queue].heads)) | 2257 | !(h->drv[curr_queue].heads)) |
2258 | continue; | 2258 | continue; |
2259 | blk_start_queue(h->gendisk[curr_queue]->queue); | 2259 | blk_start_queue(h->gendisk[curr_queue]->queue); |
@@ -2264,14 +2264,14 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
2264 | if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) | 2264 | if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) |
2265 | { | 2265 | { |
2266 | if (curr_queue == start_queue){ | 2266 | if (curr_queue == start_queue){ |
2267 | h->next_to_run = (start_queue + 1) % NWD; | 2267 | h->next_to_run = (start_queue + 1) % (h->highest_lun + 1); |
2268 | goto cleanup; | 2268 | goto cleanup; |
2269 | } else { | 2269 | } else { |
2270 | h->next_to_run = curr_queue; | 2270 | h->next_to_run = curr_queue; |
2271 | goto cleanup; | 2271 | goto cleanup; |
2272 | } | 2272 | } |
2273 | } else { | 2273 | } else { |
2274 | curr_queue = (curr_queue + 1) % NWD; | 2274 | curr_queue = (curr_queue + 1) % (h->highest_lun + 1); |
2275 | } | 2275 | } |
2276 | } | 2276 | } |
2277 | 2277 | ||
@@ -2279,7 +2279,6 @@ cleanup: | |||
2279 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 2279 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); |
2280 | return IRQ_HANDLED; | 2280 | return IRQ_HANDLED; |
2281 | } | 2281 | } |
2282 | |||
2283 | /* | 2282 | /* |
2284 | * We cannot read the structure directly, for portablity we must use | 2283 | * We cannot read the structure directly, for portablity we must use |
2285 | * the io functions. | 2284 | * the io functions. |
@@ -2789,13 +2788,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
2789 | } | 2788 | } |
2790 | 2789 | ||
2791 | spin_lock_init(&hba[i]->lock); | 2790 | spin_lock_init(&hba[i]->lock); |
2792 | q = blk_init_queue(do_cciss_request, &hba[i]->lock); | ||
2793 | if (!q) | ||
2794 | goto clean4; | ||
2795 | |||
2796 | q->backing_dev_info.ra_pages = READ_AHEAD; | ||
2797 | hba[i]->queue = q; | ||
2798 | q->queuedata = hba[i]; | ||
2799 | 2791 | ||
2800 | /* Initialize the pdev driver private data. | 2792 | /* Initialize the pdev driver private data. |
2801 | have it point to hba[i]. */ | 2793 | have it point to hba[i]. */ |
@@ -2817,6 +2809,20 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
2817 | 2809 | ||
2818 | cciss_procinit(i); | 2810 | cciss_procinit(i); |
2819 | 2811 | ||
2812 | for(j=0; j < NWD; j++) { /* mfm */ | ||
2813 | drive_info_struct *drv = &(hba[i]->drv[j]); | ||
2814 | struct gendisk *disk = hba[i]->gendisk[j]; | ||
2815 | |||
2816 | q = blk_init_queue(do_cciss_request, &hba[i]->lock); | ||
2817 | if (!q) { | ||
2818 | printk(KERN_ERR | ||
2819 | "cciss: unable to allocate queue for disk %d\n", | ||
2820 | j); | ||
2821 | break; | ||
2822 | } | ||
2823 | drv->queue = q; | ||
2824 | |||
2825 | q->backing_dev_info.ra_pages = READ_AHEAD; | ||
2820 | blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); | 2826 | blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); |
2821 | 2827 | ||
2822 | /* This is a hardware imposed limit. */ | 2828 | /* This is a hardware imposed limit. */ |
@@ -2827,26 +2833,23 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
2827 | 2833 | ||
2828 | blk_queue_max_sectors(q, 512); | 2834 | blk_queue_max_sectors(q, 512); |
2829 | 2835 | ||
2830 | 2836 | q->queuedata = hba[i]; | |
2831 | for(j=0; j<NWD; j++) { | ||
2832 | drive_info_struct *drv = &(hba[i]->drv[j]); | ||
2833 | struct gendisk *disk = hba[i]->gendisk[j]; | ||
2834 | |||
2835 | sprintf(disk->disk_name, "cciss/c%dd%d", i, j); | 2837 | sprintf(disk->disk_name, "cciss/c%dd%d", i, j); |
2836 | sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j); | 2838 | sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j); |
2837 | disk->major = hba[i]->major; | 2839 | disk->major = hba[i]->major; |
2838 | disk->first_minor = j << NWD_SHIFT; | 2840 | disk->first_minor = j << NWD_SHIFT; |
2839 | disk->fops = &cciss_fops; | 2841 | disk->fops = &cciss_fops; |
2840 | disk->queue = hba[i]->queue; | 2842 | disk->queue = q; |
2841 | disk->private_data = drv; | 2843 | disk->private_data = drv; |
2842 | /* we must register the controller even if no disks exist */ | 2844 | /* we must register the controller even if no disks exist */ |
2843 | /* this is for the online array utilities */ | 2845 | /* this is for the online array utilities */ |
2844 | if(!drv->heads && j) | 2846 | if(!drv->heads && j) |
2845 | continue; | 2847 | continue; |
2846 | blk_queue_hardsect_size(hba[i]->queue, drv->block_size); | 2848 | blk_queue_hardsect_size(q, drv->block_size); |
2847 | set_capacity(disk, drv->nr_blocks); | 2849 | set_capacity(disk, drv->nr_blocks); |
2848 | add_disk(disk); | 2850 | add_disk(disk); |
2849 | } | 2851 | } |
2852 | |||
2850 | return(1); | 2853 | return(1); |
2851 | 2854 | ||
2852 | clean4: | 2855 | clean4: |
@@ -2912,10 +2915,10 @@ static void __devexit cciss_remove_one (struct pci_dev *pdev) | |||
2912 | for (j = 0; j < NWD; j++) { | 2915 | for (j = 0; j < NWD; j++) { |
2913 | struct gendisk *disk = hba[i]->gendisk[j]; | 2916 | struct gendisk *disk = hba[i]->gendisk[j]; |
2914 | if (disk->flags & GENHD_FL_UP) | 2917 | if (disk->flags & GENHD_FL_UP) |
2918 | blk_cleanup_queue(disk->queue); | ||
2915 | del_gendisk(disk); | 2919 | del_gendisk(disk); |
2916 | } | 2920 | } |
2917 | 2921 | ||
2918 | blk_cleanup_queue(hba[i]->queue); | ||
2919 | pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct), | 2922 | pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct), |
2920 | hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); | 2923 | hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); |
2921 | pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct), | 2924 | pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct), |
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 8fb19206eddb..566587d0a500 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h | |||
@@ -29,6 +29,7 @@ typedef struct _drive_info_struct | |||
29 | { | 29 | { |
30 | __u32 LunID; | 30 | __u32 LunID; |
31 | int usage_count; | 31 | int usage_count; |
32 | struct request_queue *queue; | ||
32 | sector_t nr_blocks; | 33 | sector_t nr_blocks; |
33 | int block_size; | 34 | int block_size; |
34 | int heads; | 35 | int heads; |
@@ -72,7 +73,6 @@ struct ctlr_info | |||
72 | unsigned int maxQsinceinit; | 73 | unsigned int maxQsinceinit; |
73 | unsigned int maxSG; | 74 | unsigned int maxSG; |
74 | spinlock_t lock; | 75 | spinlock_t lock; |
75 | struct request_queue *queue; | ||
76 | 76 | ||
77 | //* pointers to command and error info pool */ | 77 | //* pointers to command and error info pool */ |
78 | CommandList_struct *cmd_pool; | 78 | CommandList_struct *cmd_pool; |
@@ -260,7 +260,7 @@ struct board_type { | |||
260 | struct access_method *access; | 260 | struct access_method *access; |
261 | }; | 261 | }; |
262 | 262 | ||
263 | #define CCISS_LOCK(i) (hba[i]->queue->queue_lock) | 263 | #define CCISS_LOCK(i) (&hba[i]->lock) |
264 | 264 | ||
265 | #endif /* CCISS_H */ | 265 | #endif /* CCISS_H */ |
266 | 266 | ||
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c index de5746e38af9..cd056e7e64ec 100644 --- a/drivers/block/cfq-iosched.c +++ b/drivers/block/cfq-iosched.c | |||
@@ -47,7 +47,7 @@ static int cfq_slice_idle = HZ / 100; | |||
47 | /* | 47 | /* |
48 | * disable queueing at the driver/hardware level | 48 | * disable queueing at the driver/hardware level |
49 | */ | 49 | */ |
50 | static int cfq_max_depth = 1; | 50 | static int cfq_max_depth = 2; |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * for the hash of cfqq inside the cfqd | 53 | * for the hash of cfqq inside the cfqd |
@@ -385,9 +385,15 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2) | |||
385 | return crq2; | 385 | return crq2; |
386 | if (crq2 == NULL) | 386 | if (crq2 == NULL) |
387 | return crq1; | 387 | return crq1; |
388 | if (cfq_crq_requeued(crq1)) | 388 | |
389 | if (cfq_crq_requeued(crq1) && !cfq_crq_requeued(crq2)) | ||
389 | return crq1; | 390 | return crq1; |
390 | if (cfq_crq_requeued(crq2)) | 391 | else if (cfq_crq_requeued(crq2) && !cfq_crq_requeued(crq1)) |
392 | return crq2; | ||
393 | |||
394 | if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2)) | ||
395 | return crq1; | ||
396 | else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1)) | ||
391 | return crq2; | 397 | return crq2; |
392 | 398 | ||
393 | s1 = crq1->request->sector; | 399 | s1 = crq1->request->sector; |
@@ -1281,6 +1287,7 @@ dispatch: | |||
1281 | */ | 1287 | */ |
1282 | if (!cfq_crq_in_driver(crq) && | 1288 | if (!cfq_crq_in_driver(crq) && |
1283 | !cfq_cfqq_idle_window(cfqq) && | 1289 | !cfq_cfqq_idle_window(cfqq) && |
1290 | !blk_barrier_rq(rq) && | ||
1284 | cfqd->rq_in_driver >= cfqd->cfq_max_depth) | 1291 | cfqd->rq_in_driver >= cfqd->cfq_max_depth) |
1285 | return NULL; | 1292 | return NULL; |
1286 | 1293 | ||
@@ -1768,18 +1775,23 @@ static void | |||
1768 | cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 1775 | cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
1769 | struct cfq_rq *crq) | 1776 | struct cfq_rq *crq) |
1770 | { | 1777 | { |
1771 | const int sync = cfq_crq_is_sync(crq); | 1778 | struct cfq_io_context *cic; |
1772 | 1779 | ||
1773 | cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); | 1780 | cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); |
1774 | 1781 | ||
1775 | if (sync) { | 1782 | /* |
1776 | struct cfq_io_context *cic = crq->io_context; | 1783 | * we never wait for an async request and we don't allow preemption |
1784 | * of an async request. so just return early | ||
1785 | */ | ||
1786 | if (!cfq_crq_is_sync(crq)) | ||
1787 | return; | ||
1777 | 1788 | ||
1778 | cfq_update_io_thinktime(cfqd, cic); | 1789 | cic = crq->io_context; |
1779 | cfq_update_idle_window(cfqd, cfqq, cic); | ||
1780 | 1790 | ||
1781 | cic->last_queue = jiffies; | 1791 | cfq_update_io_thinktime(cfqd, cic); |
1782 | } | 1792 | cfq_update_idle_window(cfqd, cfqq, cic); |
1793 | |||
1794 | cic->last_queue = jiffies; | ||
1783 | 1795 | ||
1784 | if (cfqq == cfqd->active_queue) { | 1796 | if (cfqq == cfqd->active_queue) { |
1785 | /* | 1797 | /* |
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c index 5be6f998d8c5..3d4261c39f16 100644 --- a/drivers/block/cryptoloop.c +++ b/drivers/block/cryptoloop.c | |||
@@ -57,9 +57,11 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) | |||
57 | mode = strsep(&cmsp, "-"); | 57 | mode = strsep(&cmsp, "-"); |
58 | 58 | ||
59 | if (mode == NULL || strcmp(mode, "cbc") == 0) | 59 | if (mode == NULL || strcmp(mode, "cbc") == 0) |
60 | tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC); | 60 | tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC | |
61 | CRYPTO_TFM_REQ_MAY_SLEEP); | ||
61 | else if (strcmp(mode, "ecb") == 0) | 62 | else if (strcmp(mode, "ecb") == 0) |
62 | tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB); | 63 | tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB | |
64 | CRYPTO_TFM_REQ_MAY_SLEEP); | ||
63 | if (tfm == NULL) | 65 | if (tfm == NULL) |
64 | return -EINVAL; | 66 | return -EINVAL; |
65 | 67 | ||
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index f0c1084b840f..888dad5eef34 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -493,6 +493,8 @@ static struct floppy_struct user_params[N_DRIVE]; | |||
493 | 493 | ||
494 | static sector_t floppy_sizes[256]; | 494 | static sector_t floppy_sizes[256]; |
495 | 495 | ||
496 | static char floppy_device_name[] = "floppy"; | ||
497 | |||
496 | /* | 498 | /* |
497 | * The driver is trying to determine the correct media format | 499 | * The driver is trying to determine the correct media format |
498 | * while probing is set. rw_interrupt() clears it after a | 500 | * while probing is set. rw_interrupt() clears it after a |
@@ -4191,18 +4193,24 @@ static int __init floppy_setup(char *str) | |||
4191 | 4193 | ||
4192 | static int have_no_fdc = -ENODEV; | 4194 | static int have_no_fdc = -ENODEV; |
4193 | 4195 | ||
4196 | static ssize_t floppy_cmos_show(struct device *dev, | ||
4197 | struct device_attribute *attr, char *buf) | ||
4198 | { | ||
4199 | struct platform_device *p; | ||
4200 | int drive; | ||
4201 | |||
4202 | p = container_of(dev, struct platform_device,dev); | ||
4203 | drive = p->id; | ||
4204 | return sprintf(buf, "%X\n", UDP->cmos); | ||
4205 | } | ||
4206 | DEVICE_ATTR(cmos,S_IRUGO,floppy_cmos_show,NULL); | ||
4207 | |||
4194 | static void floppy_device_release(struct device *dev) | 4208 | static void floppy_device_release(struct device *dev) |
4195 | { | 4209 | { |
4196 | complete(&device_release); | 4210 | complete(&device_release); |
4197 | } | 4211 | } |
4198 | 4212 | ||
4199 | static struct platform_device floppy_device = { | 4213 | static struct platform_device floppy_device[N_DRIVE]; |
4200 | .name = "floppy", | ||
4201 | .id = 0, | ||
4202 | .dev = { | ||
4203 | .release = floppy_device_release, | ||
4204 | } | ||
4205 | }; | ||
4206 | 4214 | ||
4207 | static struct kobject *floppy_find(dev_t dev, int *part, void *data) | 4215 | static struct kobject *floppy_find(dev_t dev, int *part, void *data) |
4208 | { | 4216 | { |
@@ -4370,20 +4378,26 @@ static int __init floppy_init(void) | |||
4370 | goto out_flush_work; | 4378 | goto out_flush_work; |
4371 | } | 4379 | } |
4372 | 4380 | ||
4373 | err = platform_device_register(&floppy_device); | ||
4374 | if (err) | ||
4375 | goto out_flush_work; | ||
4376 | |||
4377 | for (drive = 0; drive < N_DRIVE; drive++) { | 4381 | for (drive = 0; drive < N_DRIVE; drive++) { |
4378 | if (!(allowed_drive_mask & (1 << drive))) | 4382 | if (!(allowed_drive_mask & (1 << drive))) |
4379 | continue; | 4383 | continue; |
4380 | if (fdc_state[FDC(drive)].version == FDC_NONE) | 4384 | if (fdc_state[FDC(drive)].version == FDC_NONE) |
4381 | continue; | 4385 | continue; |
4386 | |||
4387 | floppy_device[drive].name = floppy_device_name; | ||
4388 | floppy_device[drive].id = drive; | ||
4389 | floppy_device[drive].dev.release = floppy_device_release; | ||
4390 | |||
4391 | err = platform_device_register(&floppy_device[drive]); | ||
4392 | if (err) | ||
4393 | goto out_flush_work; | ||
4394 | |||
4395 | device_create_file(&floppy_device[drive].dev,&dev_attr_cmos); | ||
4382 | /* to be cleaned up... */ | 4396 | /* to be cleaned up... */ |
4383 | disks[drive]->private_data = (void *)(long)drive; | 4397 | disks[drive]->private_data = (void *)(long)drive; |
4384 | disks[drive]->queue = floppy_queue; | 4398 | disks[drive]->queue = floppy_queue; |
4385 | disks[drive]->flags |= GENHD_FL_REMOVABLE; | 4399 | disks[drive]->flags |= GENHD_FL_REMOVABLE; |
4386 | disks[drive]->driverfs_dev = &floppy_device.dev; | 4400 | disks[drive]->driverfs_dev = &floppy_device[drive].dev; |
4387 | add_disk(disks[drive]); | 4401 | add_disk(disks[drive]); |
4388 | } | 4402 | } |
4389 | 4403 | ||
@@ -4603,10 +4617,11 @@ void cleanup_module(void) | |||
4603 | fdc_state[FDC(drive)].version != FDC_NONE) { | 4617 | fdc_state[FDC(drive)].version != FDC_NONE) { |
4604 | del_gendisk(disks[drive]); | 4618 | del_gendisk(disks[drive]); |
4605 | unregister_devfs_entries(drive); | 4619 | unregister_devfs_entries(drive); |
4620 | device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos); | ||
4621 | platform_device_unregister(&floppy_device[drive]); | ||
4606 | } | 4622 | } |
4607 | put_disk(disks[drive]); | 4623 | put_disk(disks[drive]); |
4608 | } | 4624 | } |
4609 | platform_device_unregister(&floppy_device); | ||
4610 | devfs_remove("floppy"); | 4625 | devfs_remove("floppy"); |
4611 | 4626 | ||
4612 | del_timer_sync(&fd_timeout); | 4627 | del_timer_sync(&fd_timeout); |
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index 692a5fced76e..3c818544475e 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c | |||
@@ -719,7 +719,7 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag) | |||
719 | { | 719 | { |
720 | struct blk_queue_tag *bqt = q->queue_tags; | 720 | struct blk_queue_tag *bqt = q->queue_tags; |
721 | 721 | ||
722 | if (unlikely(bqt == NULL || tag >= bqt->max_depth)) | 722 | if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) |
723 | return NULL; | 723 | return NULL; |
724 | 724 | ||
725 | return bqt->tag_index[tag]; | 725 | return bqt->tag_index[tag]; |
@@ -798,6 +798,7 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) | |||
798 | 798 | ||
799 | memset(tag_index, 0, depth * sizeof(struct request *)); | 799 | memset(tag_index, 0, depth * sizeof(struct request *)); |
800 | memset(tag_map, 0, nr_ulongs * sizeof(unsigned long)); | 800 | memset(tag_map, 0, nr_ulongs * sizeof(unsigned long)); |
801 | tags->real_max_depth = depth; | ||
801 | tags->max_depth = depth; | 802 | tags->max_depth = depth; |
802 | tags->tag_index = tag_index; | 803 | tags->tag_index = tag_index; |
803 | tags->tag_map = tag_map; | 804 | tags->tag_map = tag_map; |
@@ -872,11 +873,22 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth) | |||
872 | return -ENXIO; | 873 | return -ENXIO; |
873 | 874 | ||
874 | /* | 875 | /* |
876 | * if we already have large enough real_max_depth. just | ||
877 | * adjust max_depth. *NOTE* as requests with tag value | ||
878 | * between new_depth and real_max_depth can be in-flight, tag | ||
879 | * map can not be shrunk blindly here. | ||
880 | */ | ||
881 | if (new_depth <= bqt->real_max_depth) { | ||
882 | bqt->max_depth = new_depth; | ||
883 | return 0; | ||
884 | } | ||
885 | |||
886 | /* | ||
875 | * save the old state info, so we can copy it back | 887 | * save the old state info, so we can copy it back |
876 | */ | 888 | */ |
877 | tag_index = bqt->tag_index; | 889 | tag_index = bqt->tag_index; |
878 | tag_map = bqt->tag_map; | 890 | tag_map = bqt->tag_map; |
879 | max_depth = bqt->max_depth; | 891 | max_depth = bqt->real_max_depth; |
880 | 892 | ||
881 | if (init_tag_map(q, bqt, new_depth)) | 893 | if (init_tag_map(q, bqt, new_depth)) |
882 | return -ENOMEM; | 894 | return -ENOMEM; |
@@ -913,7 +925,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq) | |||
913 | 925 | ||
914 | BUG_ON(tag == -1); | 926 | BUG_ON(tag == -1); |
915 | 927 | ||
916 | if (unlikely(tag >= bqt->max_depth)) | 928 | if (unlikely(tag >= bqt->real_max_depth)) |
917 | /* | 929 | /* |
918 | * This can happen after tag depth has been reduced. | 930 | * This can happen after tag depth has been reduced. |
919 | * FIXME: how about a warning or info message here? | 931 | * FIXME: how about a warning or info message here? |
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index 9db0a9e3e59c..d57007b92f77 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c | |||
@@ -1582,7 +1582,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1582 | if (rc) | 1582 | if (rc) |
1583 | goto err_out; | 1583 | goto err_out; |
1584 | 1584 | ||
1585 | #if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ | 1585 | #ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ |
1586 | rc = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | 1586 | rc = pci_set_dma_mask(pdev, DMA_64BIT_MASK); |
1587 | if (!rc) { | 1587 | if (!rc) { |
1588 | rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | 1588 | rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); |
@@ -1601,7 +1601,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1601 | goto err_out_regions; | 1601 | goto err_out_regions; |
1602 | } | 1602 | } |
1603 | pci_dac = 0; | 1603 | pci_dac = 0; |
1604 | #if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ | 1604 | #ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ |
1605 | } | 1605 | } |
1606 | #endif | 1606 | #endif |
1607 | 1607 | ||
diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 685f061e69b2..a026567f5d18 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c | |||
@@ -23,6 +23,7 @@ | |||
23 | * -- Exterminate P3 printks | 23 | * -- Exterminate P3 printks |
24 | * -- Resove XXX's | 24 | * -- Resove XXX's |
25 | * -- Redo "benh's retries", perhaps have spin-up code to handle them. V:D=? | 25 | * -- Redo "benh's retries", perhaps have spin-up code to handle them. V:D=? |
26 | * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring. | ||
26 | */ | 27 | */ |
27 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
28 | #include <linux/module.h> | 29 | #include <linux/module.h> |
@@ -38,6 +39,73 @@ | |||
38 | #define UB_MAJOR 180 | 39 | #define UB_MAJOR 180 |
39 | 40 | ||
40 | /* | 41 | /* |
42 | * The command state machine is the key model for understanding of this driver. | ||
43 | * | ||
44 | * The general rule is that all transitions are done towards the bottom | ||
45 | * of the diagram, thus preventing any loops. | ||
46 | * | ||
47 | * An exception to that is how the STAT state is handled. A counter allows it | ||
48 | * to be re-entered along the path marked with [C]. | ||
49 | * | ||
50 | * +--------+ | ||
51 | * ! INIT ! | ||
52 | * +--------+ | ||
53 | * ! | ||
54 | * ub_scsi_cmd_start fails ->--------------------------------------\ | ||
55 | * ! ! | ||
56 | * V ! | ||
57 | * +--------+ ! | ||
58 | * ! CMD ! ! | ||
59 | * +--------+ ! | ||
60 | * ! +--------+ ! | ||
61 | * was -EPIPE -->-------------------------------->! CLEAR ! ! | ||
62 | * ! +--------+ ! | ||
63 | * ! ! ! | ||
64 | * was error -->------------------------------------- ! --------->\ | ||
65 | * ! ! ! | ||
66 | * /--<-- cmd->dir == NONE ? ! ! | ||
67 | * ! ! ! ! | ||
68 | * ! V ! ! | ||
69 | * ! +--------+ ! ! | ||
70 | * ! ! DATA ! ! ! | ||
71 | * ! +--------+ ! ! | ||
72 | * ! ! +---------+ ! ! | ||
73 | * ! was -EPIPE -->--------------->! CLR2STS ! ! ! | ||
74 | * ! ! +---------+ ! ! | ||
75 | * ! ! ! ! ! | ||
76 | * ! ! was error -->---- ! --------->\ | ||
77 | * ! was error -->--------------------- ! ------------- ! --------->\ | ||
78 | * ! ! ! ! ! | ||
79 | * ! V ! ! ! | ||
80 | * \--->+--------+ ! ! ! | ||
81 | * ! STAT !<--------------------------/ ! ! | ||
82 | * /--->+--------+ ! ! | ||
83 | * ! ! ! ! | ||
84 | * [C] was -EPIPE -->-----------\ ! ! | ||
85 | * ! ! ! ! ! | ||
86 | * +<---- len == 0 ! ! ! | ||
87 | * ! ! ! ! ! | ||
88 | * ! was error -->--------------------------------------!---------->\ | ||
89 | * ! ! ! ! ! | ||
90 | * +<---- bad CSW ! ! ! | ||
91 | * +<---- bad tag ! ! ! | ||
92 | * ! ! V ! ! | ||
93 | * ! ! +--------+ ! ! | ||
94 | * ! ! ! CLRRS ! ! ! | ||
95 | * ! ! +--------+ ! ! | ||
96 | * ! ! ! ! ! | ||
97 | * \------- ! --------------------[C]--------\ ! ! | ||
98 | * ! ! ! ! | ||
99 | * cmd->error---\ +--------+ ! ! | ||
100 | * ! +--------------->! SENSE !<----------/ ! | ||
101 | * STAT_FAIL----/ +--------+ ! | ||
102 | * ! ! V | ||
103 | * ! V +--------+ | ||
104 | * \--------------------------------\--------------------->! DONE ! | ||
105 | * +--------+ | ||
106 | */ | ||
107 | |||
108 | /* | ||
41 | * Definitions which have to be scattered once we understand the layout better. | 109 | * Definitions which have to be scattered once we understand the layout better. |
42 | */ | 110 | */ |
43 | 111 | ||
@@ -91,8 +159,6 @@ struct bulk_cs_wrap { | |||
91 | 159 | ||
92 | #define US_BULK_CS_WRAP_LEN 13 | 160 | #define US_BULK_CS_WRAP_LEN 13 |
93 | #define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */ | 161 | #define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */ |
94 | /* This is for Olympus Camedia digital cameras */ | ||
95 | #define US_BULK_CS_OLYMPUS_SIGN 0x55425355 /* spells out 'USBU' */ | ||
96 | #define US_BULK_STAT_OK 0 | 162 | #define US_BULK_STAT_OK 0 |
97 | #define US_BULK_STAT_FAIL 1 | 163 | #define US_BULK_STAT_FAIL 1 |
98 | #define US_BULK_STAT_PHASE 2 | 164 | #define US_BULK_STAT_PHASE 2 |
@@ -135,6 +201,7 @@ enum ub_scsi_cmd_state { | |||
135 | UB_CMDST_CLR2STS, /* Clearing before requesting status */ | 201 | UB_CMDST_CLR2STS, /* Clearing before requesting status */ |
136 | UB_CMDST_STAT, /* Status phase */ | 202 | UB_CMDST_STAT, /* Status phase */ |
137 | UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */ | 203 | UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */ |
204 | UB_CMDST_CLRRS, /* Clearing before retrying status */ | ||
138 | UB_CMDST_SENSE, /* Sending Request Sense */ | 205 | UB_CMDST_SENSE, /* Sending Request Sense */ |
139 | UB_CMDST_DONE /* Final state */ | 206 | UB_CMDST_DONE /* Final state */ |
140 | }; | 207 | }; |
@@ -146,6 +213,7 @@ static char *ub_scsi_cmd_stname[] = { | |||
146 | "c2s", | 213 | "c2s", |
147 | "sts", | 214 | "sts", |
148 | "clr", | 215 | "clr", |
216 | "crs", | ||
149 | "Sen", | 217 | "Sen", |
150 | "fin" | 218 | "fin" |
151 | }; | 219 | }; |
@@ -316,6 +384,7 @@ struct ub_dev { | |||
316 | struct urb work_urb; | 384 | struct urb work_urb; |
317 | struct timer_list work_timer; | 385 | struct timer_list work_timer; |
318 | int last_pipe; /* What might need clearing */ | 386 | int last_pipe; /* What might need clearing */ |
387 | __le32 signature; /* Learned signature */ | ||
319 | struct bulk_cb_wrap work_bcb; | 388 | struct bulk_cb_wrap work_bcb; |
320 | struct bulk_cs_wrap work_bcs; | 389 | struct bulk_cs_wrap work_bcs; |
321 | struct usb_ctrlrequest work_cr; | 390 | struct usb_ctrlrequest work_cr; |
@@ -339,8 +408,9 @@ static void ub_scsi_action(unsigned long _dev); | |||
339 | static void ub_scsi_dispatch(struct ub_dev *sc); | 408 | static void ub_scsi_dispatch(struct ub_dev *sc); |
340 | static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | 409 | static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
341 | static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); | 410 | static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); |
342 | static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | 411 | static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
343 | static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | 412 | static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
413 | static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | ||
344 | static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | 414 | static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
345 | static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | 415 | static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, |
346 | int stalled_pipe); | 416 | int stalled_pipe); |
@@ -1085,6 +1155,28 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1085 | 1155 | ||
1086 | ub_state_stat(sc, cmd); | 1156 | ub_state_stat(sc, cmd); |
1087 | 1157 | ||
1158 | } else if (cmd->state == UB_CMDST_CLRRS) { | ||
1159 | if (urb->status == -EPIPE) { | ||
1160 | /* | ||
1161 | * STALL while clearning STALL. | ||
1162 | * The control pipe clears itself - nothing to do. | ||
1163 | * XXX Might try to reset the device here and retry. | ||
1164 | */ | ||
1165 | printk(KERN_NOTICE "%s: stall on control pipe\n", | ||
1166 | sc->name); | ||
1167 | goto Bad_End; | ||
1168 | } | ||
1169 | |||
1170 | /* | ||
1171 | * We ignore the result for the halt clear. | ||
1172 | */ | ||
1173 | |||
1174 | /* reset the endpoint toggle */ | ||
1175 | usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), | ||
1176 | usb_pipeout(sc->last_pipe), 0); | ||
1177 | |||
1178 | ub_state_stat_counted(sc, cmd); | ||
1179 | |||
1088 | } else if (cmd->state == UB_CMDST_CMD) { | 1180 | } else if (cmd->state == UB_CMDST_CMD) { |
1089 | if (urb->status == -EPIPE) { | 1181 | if (urb->status == -EPIPE) { |
1090 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); | 1182 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); |
@@ -1190,52 +1282,57 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1190 | */ | 1282 | */ |
1191 | goto Bad_End; | 1283 | goto Bad_End; |
1192 | } | 1284 | } |
1193 | cmd->state = UB_CMDST_CLEAR; | 1285 | |
1286 | /* | ||
1287 | * Having a stall when getting CSW is an error, so | ||
1288 | * make sure uppper levels are not oblivious to it. | ||
1289 | */ | ||
1290 | cmd->error = -EIO; /* A cheap trick... */ | ||
1291 | |||
1292 | cmd->state = UB_CMDST_CLRRS; | ||
1194 | ub_cmdtr_state(sc, cmd); | 1293 | ub_cmdtr_state(sc, cmd); |
1195 | return; | 1294 | return; |
1196 | } | 1295 | } |
1296 | if (urb->status == -EOVERFLOW) { | ||
1297 | /* | ||
1298 | * XXX We are screwed here. Retrying is pointless, | ||
1299 | * because the pipelined data will not get in until | ||
1300 | * we read with a big enough buffer. We must reset XXX. | ||
1301 | */ | ||
1302 | goto Bad_End; | ||
1303 | } | ||
1197 | if (urb->status != 0) | 1304 | if (urb->status != 0) |
1198 | goto Bad_End; | 1305 | goto Bad_End; |
1199 | 1306 | ||
1200 | if (urb->actual_length == 0) { | 1307 | if (urb->actual_length == 0) { |
1201 | /* | 1308 | ub_state_stat_counted(sc, cmd); |
1202 | * Some broken devices add unnecessary zero-length | ||
1203 | * packets to the end of their data transfers. | ||
1204 | * Such packets show up as 0-length CSWs. If we | ||
1205 | * encounter such a thing, try to read the CSW again. | ||
1206 | */ | ||
1207 | if (++cmd->stat_count >= 4) { | ||
1208 | printk(KERN_NOTICE "%s: unable to get CSW\n", | ||
1209 | sc->name); | ||
1210 | goto Bad_End; | ||
1211 | } | ||
1212 | __ub_state_stat(sc, cmd); | ||
1213 | return; | 1309 | return; |
1214 | } | 1310 | } |
1215 | 1311 | ||
1216 | /* | 1312 | /* |
1217 | * Check the returned Bulk protocol status. | 1313 | * Check the returned Bulk protocol status. |
1314 | * The status block has to be validated first. | ||
1218 | */ | 1315 | */ |
1219 | 1316 | ||
1220 | bcs = &sc->work_bcs; | 1317 | bcs = &sc->work_bcs; |
1221 | rc = le32_to_cpu(bcs->Residue); | 1318 | |
1222 | if (rc != cmd->len - cmd->act_len) { | 1319 | if (sc->signature == cpu_to_le32(0)) { |
1223 | /* | 1320 | /* |
1224 | * It is all right to transfer less, the caller has | 1321 | * This is the first reply, so do not perform the check. |
1225 | * to check. But it's not all right if the device | 1322 | * Instead, remember the signature the device uses |
1226 | * counts disagree with our counts. | 1323 | * for future checks. But do not allow a nul. |
1227 | */ | 1324 | */ |
1228 | /* P3 */ printk("%s: resid %d len %d act %d\n", | 1325 | sc->signature = bcs->Signature; |
1229 | sc->name, rc, cmd->len, cmd->act_len); | 1326 | if (sc->signature == cpu_to_le32(0)) { |
1230 | goto Bad_End; | 1327 | ub_state_stat_counted(sc, cmd); |
1231 | } | 1328 | return; |
1232 | 1329 | } | |
1233 | #if 0 | 1330 | } else { |
1234 | if (bcs->Signature != cpu_to_le32(US_BULK_CS_SIGN) && | 1331 | if (bcs->Signature != sc->signature) { |
1235 | bcs->Signature != cpu_to_le32(US_BULK_CS_OLYMPUS_SIGN)) { | 1332 | ub_state_stat_counted(sc, cmd); |
1236 | /* Windows ignores signatures, so do we. */ | 1333 | return; |
1334 | } | ||
1237 | } | 1335 | } |
1238 | #endif | ||
1239 | 1336 | ||
1240 | if (bcs->Tag != cmd->tag) { | 1337 | if (bcs->Tag != cmd->tag) { |
1241 | /* | 1338 | /* |
@@ -1245,16 +1342,22 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1245 | * commands and reply at commands we timed out before. | 1342 | * commands and reply at commands we timed out before. |
1246 | * Without flushing these replies we loop forever. | 1343 | * Without flushing these replies we loop forever. |
1247 | */ | 1344 | */ |
1248 | if (++cmd->stat_count >= 4) { | 1345 | ub_state_stat_counted(sc, cmd); |
1249 | printk(KERN_NOTICE "%s: " | ||
1250 | "tag mismatch orig 0x%x reply 0x%x\n", | ||
1251 | sc->name, cmd->tag, bcs->Tag); | ||
1252 | goto Bad_End; | ||
1253 | } | ||
1254 | __ub_state_stat(sc, cmd); | ||
1255 | return; | 1346 | return; |
1256 | } | 1347 | } |
1257 | 1348 | ||
1349 | rc = le32_to_cpu(bcs->Residue); | ||
1350 | if (rc != cmd->len - cmd->act_len) { | ||
1351 | /* | ||
1352 | * It is all right to transfer less, the caller has | ||
1353 | * to check. But it's not all right if the device | ||
1354 | * counts disagree with our counts. | ||
1355 | */ | ||
1356 | /* P3 */ printk("%s: resid %d len %d act %d\n", | ||
1357 | sc->name, rc, cmd->len, cmd->act_len); | ||
1358 | goto Bad_End; | ||
1359 | } | ||
1360 | |||
1258 | switch (bcs->Status) { | 1361 | switch (bcs->Status) { |
1259 | case US_BULK_STAT_OK: | 1362 | case US_BULK_STAT_OK: |
1260 | break; | 1363 | break; |
@@ -1272,6 +1375,10 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1272 | } | 1375 | } |
1273 | 1376 | ||
1274 | /* Not zeroing error to preserve a babble indicator */ | 1377 | /* Not zeroing error to preserve a babble indicator */ |
1378 | if (cmd->error != 0) { | ||
1379 | ub_state_sense(sc, cmd); | ||
1380 | return; | ||
1381 | } | ||
1275 | cmd->state = UB_CMDST_DONE; | 1382 | cmd->state = UB_CMDST_DONE; |
1276 | ub_cmdtr_state(sc, cmd); | 1383 | ub_cmdtr_state(sc, cmd); |
1277 | ub_cmdq_pop(sc); | 1384 | ub_cmdq_pop(sc); |
@@ -1310,7 +1417,7 @@ static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc) | |||
1310 | * Factorization helper for the command state machine: | 1417 | * Factorization helper for the command state machine: |
1311 | * Submit a CSW read. | 1418 | * Submit a CSW read. |
1312 | */ | 1419 | */ |
1313 | static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | 1420 | static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) |
1314 | { | 1421 | { |
1315 | int rc; | 1422 | int rc; |
1316 | 1423 | ||
@@ -1328,11 +1435,12 @@ static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1328 | /* XXX Clear stalls */ | 1435 | /* XXX Clear stalls */ |
1329 | ub_complete(&sc->work_done); | 1436 | ub_complete(&sc->work_done); |
1330 | ub_state_done(sc, cmd, rc); | 1437 | ub_state_done(sc, cmd, rc); |
1331 | return; | 1438 | return -1; |
1332 | } | 1439 | } |
1333 | 1440 | ||
1334 | sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; | 1441 | sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; |
1335 | add_timer(&sc->work_timer); | 1442 | add_timer(&sc->work_timer); |
1443 | return 0; | ||
1336 | } | 1444 | } |
1337 | 1445 | ||
1338 | /* | 1446 | /* |
@@ -1341,7 +1449,9 @@ static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1341 | */ | 1449 | */ |
1342 | static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | 1450 | static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) |
1343 | { | 1451 | { |
1344 | __ub_state_stat(sc, cmd); | 1452 | |
1453 | if (__ub_state_stat(sc, cmd) != 0) | ||
1454 | return; | ||
1345 | 1455 | ||
1346 | cmd->stat_count = 0; | 1456 | cmd->stat_count = 0; |
1347 | cmd->state = UB_CMDST_STAT; | 1457 | cmd->state = UB_CMDST_STAT; |
@@ -1350,6 +1460,25 @@ static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1350 | 1460 | ||
1351 | /* | 1461 | /* |
1352 | * Factorization helper for the command state machine: | 1462 | * Factorization helper for the command state machine: |
1463 | * Submit a CSW read and go to STAT state with counter (along [C] path). | ||
1464 | */ | ||
1465 | static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | ||
1466 | { | ||
1467 | |||
1468 | if (++cmd->stat_count >= 4) { | ||
1469 | ub_state_sense(sc, cmd); | ||
1470 | return; | ||
1471 | } | ||
1472 | |||
1473 | if (__ub_state_stat(sc, cmd) != 0) | ||
1474 | return; | ||
1475 | |||
1476 | cmd->state = UB_CMDST_STAT; | ||
1477 | ub_cmdtr_state(sc, cmd); | ||
1478 | } | ||
1479 | |||
1480 | /* | ||
1481 | * Factorization helper for the command state machine: | ||
1353 | * Submit a REQUEST SENSE and go to SENSE state. | 1482 | * Submit a REQUEST SENSE and go to SENSE state. |
1354 | */ | 1483 | */ |
1355 | static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | 1484 | static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) |
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c index 46e56a25d2c8..e46ecd23b3ac 100644 --- a/drivers/block/viodasd.c +++ b/drivers/block/viodasd.c | |||
@@ -776,7 +776,7 @@ static int viodasd_remove(struct vio_dev *vdev) | |||
776 | */ | 776 | */ |
777 | static struct vio_device_id viodasd_device_table[] __devinitdata = { | 777 | static struct vio_device_id viodasd_device_table[] __devinitdata = { |
778 | { "viodasd", "" }, | 778 | { "viodasd", "" }, |
779 | { 0, } | 779 | { "", "" } |
780 | }; | 780 | }; |
781 | 781 | ||
782 | MODULE_DEVICE_TABLE(vio, viodasd_device_table); | 782 | MODULE_DEVICE_TABLE(vio, viodasd_device_table); |