diff options
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/DAC960.c | 2 | ||||
-rw-r--r-- | drivers/block/Kconfig | 5 | ||||
-rw-r--r-- | drivers/block/cciss.c | 9 | ||||
-rw-r--r-- | drivers/block/cpqarray.c | 2 | ||||
-rw-r--r-- | drivers/block/floppy.c | 2 | ||||
-rw-r--r-- | drivers/block/loop.c | 23 | ||||
-rw-r--r-- | drivers/block/nbd.c | 124 | ||||
-rw-r--r-- | drivers/block/paride/Kconfig | 5 | ||||
-rw-r--r-- | drivers/block/rd.c | 4 | ||||
-rw-r--r-- | drivers/block/sx8.c | 2 | ||||
-rw-r--r-- | drivers/block/ub.c | 439 | ||||
-rw-r--r-- | drivers/block/viodasd.c | 2 |
12 files changed, 399 insertions, 220 deletions
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 70eaa5c7ac08..21097a39a057 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c | |||
@@ -3471,7 +3471,7 @@ static inline boolean DAC960_ProcessCompletedRequest(DAC960_Command_T *Command, | |||
3471 | 3471 | ||
3472 | if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) { | 3472 | if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) { |
3473 | 3473 | ||
3474 | end_that_request_last(Request); | 3474 | end_that_request_last(Request, UpToDate); |
3475 | 3475 | ||
3476 | if (Command->Completion) { | 3476 | if (Command->Completion) { |
3477 | complete(Command->Completion); | 3477 | complete(Command->Completion); |
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 7b1cd93892be..139cbba76180 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -117,7 +117,7 @@ config BLK_DEV_XD | |||
117 | 117 | ||
118 | config PARIDE | 118 | config PARIDE |
119 | tristate "Parallel port IDE device support" | 119 | tristate "Parallel port IDE device support" |
120 | depends on PARPORT | 120 | depends on PARPORT_PC |
121 | ---help--- | 121 | ---help--- |
122 | There are many external CD-ROM and disk devices that connect through | 122 | There are many external CD-ROM and disk devices that connect through |
123 | your computer's parallel port. Most of them are actually IDE devices | 123 | your computer's parallel port. Most of them are actually IDE devices |
@@ -358,7 +358,8 @@ config BLK_DEV_UB | |||
358 | This driver supports certain USB attached storage devices | 358 | This driver supports certain USB attached storage devices |
359 | such as flash keys. | 359 | such as flash keys. |
360 | 360 | ||
361 | Warning: Enabling this cripples the usb-storage driver. | 361 | If you enable this driver, it is recommended to avoid conflicts |
362 | with usb-storage by enabling USB_LIBUSUAL. | ||
362 | 363 | ||
363 | If unsure, say N. | 364 | If unsure, say N. |
364 | 365 | ||
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index a9e33db46e68..d2815b7a9150 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -1146,7 +1146,6 @@ static int revalidate_allvol(ctlr_info_t *host) | |||
1146 | del_gendisk(disk); | 1146 | del_gendisk(disk); |
1147 | if (q) | 1147 | if (q) |
1148 | blk_cleanup_queue(q); | 1148 | blk_cleanup_queue(q); |
1149 | put_disk(disk); | ||
1150 | } | 1149 | } |
1151 | } | 1150 | } |
1152 | 1151 | ||
@@ -1465,9 +1464,10 @@ static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, | |||
1465 | request_queue_t *q = disk->queue; | 1464 | request_queue_t *q = disk->queue; |
1466 | if (disk->flags & GENHD_FL_UP) | 1465 | if (disk->flags & GENHD_FL_UP) |
1467 | del_gendisk(disk); | 1466 | del_gendisk(disk); |
1468 | if (q) | 1467 | if (q) { |
1469 | blk_cleanup_queue(q); | 1468 | blk_cleanup_queue(q); |
1470 | put_disk(disk); | 1469 | drv->queue = NULL; |
1470 | } | ||
1471 | } | 1471 | } |
1472 | } | 1472 | } |
1473 | 1473 | ||
@@ -2310,7 +2310,7 @@ static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd, | |||
2310 | printk("Done with %p\n", cmd->rq); | 2310 | printk("Done with %p\n", cmd->rq); |
2311 | #endif /* CCISS_DEBUG */ | 2311 | #endif /* CCISS_DEBUG */ |
2312 | 2312 | ||
2313 | end_that_request_last(cmd->rq); | 2313 | end_that_request_last(cmd->rq, status ? 1 : -EIO); |
2314 | cmd_free(h,cmd,1); | 2314 | cmd_free(h,cmd,1); |
2315 | } | 2315 | } |
2316 | 2316 | ||
@@ -3243,7 +3243,6 @@ static void __devexit cciss_remove_one (struct pci_dev *pdev) | |||
3243 | del_gendisk(disk); | 3243 | del_gendisk(disk); |
3244 | if (q) | 3244 | if (q) |
3245 | blk_cleanup_queue(q); | 3245 | blk_cleanup_queue(q); |
3246 | put_disk(disk); | ||
3247 | } | 3246 | } |
3248 | } | 3247 | } |
3249 | 3248 | ||
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index cf1822a6361c..9bddb6874873 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c | |||
@@ -1036,7 +1036,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout) | |||
1036 | complete_buffers(cmd->rq->bio, ok); | 1036 | complete_buffers(cmd->rq->bio, ok); |
1037 | 1037 | ||
1038 | DBGPX(printk("Done with %p\n", cmd->rq);); | 1038 | DBGPX(printk("Done with %p\n", cmd->rq);); |
1039 | end_that_request_last(cmd->rq); | 1039 | end_that_request_last(cmd->rq, ok ? 1 : -EIO); |
1040 | } | 1040 | } |
1041 | 1041 | ||
1042 | /* | 1042 | /* |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index f7e765a1d313..a5b857c5c4b8 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -2301,7 +2301,7 @@ static void floppy_end_request(struct request *req, int uptodate) | |||
2301 | add_disk_randomness(req->rq_disk); | 2301 | add_disk_randomness(req->rq_disk); |
2302 | floppy_off((long)req->rq_disk->private_data); | 2302 | floppy_off((long)req->rq_disk->private_data); |
2303 | blkdev_dequeue_request(req); | 2303 | blkdev_dequeue_request(req); |
2304 | end_that_request_last(req); | 2304 | end_that_request_last(req, uptodate); |
2305 | 2305 | ||
2306 | /* We're done with the request */ | 2306 | /* We're done with the request */ |
2307 | current_req = NULL; | 2307 | current_req = NULL; |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 96c664af8d06..a452b13620a2 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -213,7 +213,7 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec, | |||
213 | struct address_space_operations *aops = mapping->a_ops; | 213 | struct address_space_operations *aops = mapping->a_ops; |
214 | pgoff_t index; | 214 | pgoff_t index; |
215 | unsigned offset, bv_offs; | 215 | unsigned offset, bv_offs; |
216 | int len, ret = 0; | 216 | int len, ret; |
217 | 217 | ||
218 | down(&mapping->host->i_sem); | 218 | down(&mapping->host->i_sem); |
219 | index = pos >> PAGE_CACHE_SHIFT; | 219 | index = pos >> PAGE_CACHE_SHIFT; |
@@ -232,9 +232,15 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec, | |||
232 | page = grab_cache_page(mapping, index); | 232 | page = grab_cache_page(mapping, index); |
233 | if (unlikely(!page)) | 233 | if (unlikely(!page)) |
234 | goto fail; | 234 | goto fail; |
235 | if (unlikely(aops->prepare_write(file, page, offset, | 235 | ret = aops->prepare_write(file, page, offset, |
236 | offset + size))) | 236 | offset + size); |
237 | if (unlikely(ret)) { | ||
238 | if (ret == AOP_TRUNCATED_PAGE) { | ||
239 | page_cache_release(page); | ||
240 | continue; | ||
241 | } | ||
237 | goto unlock; | 242 | goto unlock; |
243 | } | ||
238 | transfer_result = lo_do_transfer(lo, WRITE, page, offset, | 244 | transfer_result = lo_do_transfer(lo, WRITE, page, offset, |
239 | bvec->bv_page, bv_offs, size, IV); | 245 | bvec->bv_page, bv_offs, size, IV); |
240 | if (unlikely(transfer_result)) { | 246 | if (unlikely(transfer_result)) { |
@@ -251,9 +257,15 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec, | |||
251 | kunmap_atomic(kaddr, KM_USER0); | 257 | kunmap_atomic(kaddr, KM_USER0); |
252 | } | 258 | } |
253 | flush_dcache_page(page); | 259 | flush_dcache_page(page); |
254 | if (unlikely(aops->commit_write(file, page, offset, | 260 | ret = aops->commit_write(file, page, offset, |
255 | offset + size))) | 261 | offset + size); |
262 | if (unlikely(ret)) { | ||
263 | if (ret == AOP_TRUNCATED_PAGE) { | ||
264 | page_cache_release(page); | ||
265 | continue; | ||
266 | } | ||
256 | goto unlock; | 267 | goto unlock; |
268 | } | ||
257 | if (unlikely(transfer_result)) | 269 | if (unlikely(transfer_result)) |
258 | goto unlock; | 270 | goto unlock; |
259 | bv_offs += size; | 271 | bv_offs += size; |
@@ -264,6 +276,7 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec, | |||
264 | unlock_page(page); | 276 | unlock_page(page); |
265 | page_cache_release(page); | 277 | page_cache_release(page); |
266 | } | 278 | } |
279 | ret = 0; | ||
267 | out: | 280 | out: |
268 | up(&mapping->host->i_sem); | 281 | up(&mapping->host->i_sem); |
269 | return ret; | 282 | return ret; |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 9e268ddedfbd..33d6f237b2ed 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -54,11 +54,15 @@ | |||
54 | #include <linux/errno.h> | 54 | #include <linux/errno.h> |
55 | #include <linux/file.h> | 55 | #include <linux/file.h> |
56 | #include <linux/ioctl.h> | 56 | #include <linux/ioctl.h> |
57 | #include <linux/compiler.h> | ||
58 | #include <linux/err.h> | ||
59 | #include <linux/kernel.h> | ||
57 | #include <net/sock.h> | 60 | #include <net/sock.h> |
58 | 61 | ||
59 | #include <linux/devfs_fs_kernel.h> | 62 | #include <linux/devfs_fs_kernel.h> |
60 | 63 | ||
61 | #include <asm/uaccess.h> | 64 | #include <asm/uaccess.h> |
65 | #include <asm/system.h> | ||
62 | #include <asm/types.h> | 66 | #include <asm/types.h> |
63 | 67 | ||
64 | #include <linux/nbd.h> | 68 | #include <linux/nbd.h> |
@@ -136,7 +140,7 @@ static void nbd_end_request(struct request *req) | |||
136 | 140 | ||
137 | spin_lock_irqsave(q->queue_lock, flags); | 141 | spin_lock_irqsave(q->queue_lock, flags); |
138 | if (!end_that_request_first(req, uptodate, req->nr_sectors)) { | 142 | if (!end_that_request_first(req, uptodate, req->nr_sectors)) { |
139 | end_that_request_last(req); | 143 | end_that_request_last(req, uptodate); |
140 | } | 144 | } |
141 | spin_unlock_irqrestore(q->queue_lock, flags); | 145 | spin_unlock_irqrestore(q->queue_lock, flags); |
142 | } | 146 | } |
@@ -230,14 +234,6 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req) | |||
230 | request.len = htonl(size); | 234 | request.len = htonl(size); |
231 | memcpy(request.handle, &req, sizeof(req)); | 235 | memcpy(request.handle, &req, sizeof(req)); |
232 | 236 | ||
233 | down(&lo->tx_lock); | ||
234 | |||
235 | if (!sock || !lo->sock) { | ||
236 | printk(KERN_ERR "%s: Attempted send on closed socket\n", | ||
237 | lo->disk->disk_name); | ||
238 | goto error_out; | ||
239 | } | ||
240 | |||
241 | dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n", | 237 | dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n", |
242 | lo->disk->disk_name, req, | 238 | lo->disk->disk_name, req, |
243 | nbdcmd_to_ascii(nbd_cmd(req)), | 239 | nbdcmd_to_ascii(nbd_cmd(req)), |
@@ -276,11 +272,9 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req) | |||
276 | } | 272 | } |
277 | } | 273 | } |
278 | } | 274 | } |
279 | up(&lo->tx_lock); | ||
280 | return 0; | 275 | return 0; |
281 | 276 | ||
282 | error_out: | 277 | error_out: |
283 | up(&lo->tx_lock); | ||
284 | return 1; | 278 | return 1; |
285 | } | 279 | } |
286 | 280 | ||
@@ -289,9 +283,14 @@ static struct request *nbd_find_request(struct nbd_device *lo, char *handle) | |||
289 | struct request *req; | 283 | struct request *req; |
290 | struct list_head *tmp; | 284 | struct list_head *tmp; |
291 | struct request *xreq; | 285 | struct request *xreq; |
286 | int err; | ||
292 | 287 | ||
293 | memcpy(&xreq, handle, sizeof(xreq)); | 288 | memcpy(&xreq, handle, sizeof(xreq)); |
294 | 289 | ||
290 | err = wait_event_interruptible(lo->active_wq, lo->active_req != xreq); | ||
291 | if (unlikely(err)) | ||
292 | goto out; | ||
293 | |||
295 | spin_lock(&lo->queue_lock); | 294 | spin_lock(&lo->queue_lock); |
296 | list_for_each(tmp, &lo->queue_head) { | 295 | list_for_each(tmp, &lo->queue_head) { |
297 | req = list_entry(tmp, struct request, queuelist); | 296 | req = list_entry(tmp, struct request, queuelist); |
@@ -302,7 +301,11 @@ static struct request *nbd_find_request(struct nbd_device *lo, char *handle) | |||
302 | return req; | 301 | return req; |
303 | } | 302 | } |
304 | spin_unlock(&lo->queue_lock); | 303 | spin_unlock(&lo->queue_lock); |
305 | return NULL; | 304 | |
305 | err = -ENOENT; | ||
306 | |||
307 | out: | ||
308 | return ERR_PTR(err); | ||
306 | } | 309 | } |
307 | 310 | ||
308 | static inline int sock_recv_bvec(struct socket *sock, struct bio_vec *bvec) | 311 | static inline int sock_recv_bvec(struct socket *sock, struct bio_vec *bvec) |
@@ -331,7 +334,11 @@ static struct request *nbd_read_stat(struct nbd_device *lo) | |||
331 | goto harderror; | 334 | goto harderror; |
332 | } | 335 | } |
333 | req = nbd_find_request(lo, reply.handle); | 336 | req = nbd_find_request(lo, reply.handle); |
334 | if (req == NULL) { | 337 | if (unlikely(IS_ERR(req))) { |
338 | result = PTR_ERR(req); | ||
339 | if (result != -ENOENT) | ||
340 | goto harderror; | ||
341 | |||
335 | printk(KERN_ERR "%s: Unexpected reply (%p)\n", | 342 | printk(KERN_ERR "%s: Unexpected reply (%p)\n", |
336 | lo->disk->disk_name, reply.handle); | 343 | lo->disk->disk_name, reply.handle); |
337 | result = -EBADR; | 344 | result = -EBADR; |
@@ -395,19 +402,24 @@ static void nbd_clear_que(struct nbd_device *lo) | |||
395 | 402 | ||
396 | BUG_ON(lo->magic != LO_MAGIC); | 403 | BUG_ON(lo->magic != LO_MAGIC); |
397 | 404 | ||
398 | do { | 405 | /* |
399 | req = NULL; | 406 | * Because we have set lo->sock to NULL under the tx_lock, all |
400 | spin_lock(&lo->queue_lock); | 407 | * modifications to the list must have completed by now. For |
401 | if (!list_empty(&lo->queue_head)) { | 408 | * the same reason, the active_req must be NULL. |
402 | req = list_entry(lo->queue_head.next, struct request, queuelist); | 409 | * |
403 | list_del_init(&req->queuelist); | 410 | * As a consequence, we don't need to take the spin lock while |
404 | } | 411 | * purging the list here. |
405 | spin_unlock(&lo->queue_lock); | 412 | */ |
406 | if (req) { | 413 | BUG_ON(lo->sock); |
407 | req->errors++; | 414 | BUG_ON(lo->active_req); |
408 | nbd_end_request(req); | 415 | |
409 | } | 416 | while (!list_empty(&lo->queue_head)) { |
410 | } while (req); | 417 | req = list_entry(lo->queue_head.next, struct request, |
418 | queuelist); | ||
419 | list_del_init(&req->queuelist); | ||
420 | req->errors++; | ||
421 | nbd_end_request(req); | ||
422 | } | ||
411 | } | 423 | } |
412 | 424 | ||
413 | /* | 425 | /* |
@@ -435,11 +447,6 @@ static void do_nbd_request(request_queue_t * q) | |||
435 | 447 | ||
436 | BUG_ON(lo->magic != LO_MAGIC); | 448 | BUG_ON(lo->magic != LO_MAGIC); |
437 | 449 | ||
438 | if (!lo->file) { | ||
439 | printk(KERN_ERR "%s: Request when not-ready\n", | ||
440 | lo->disk->disk_name); | ||
441 | goto error_out; | ||
442 | } | ||
443 | nbd_cmd(req) = NBD_CMD_READ; | 450 | nbd_cmd(req) = NBD_CMD_READ; |
444 | if (rq_data_dir(req) == WRITE) { | 451 | if (rq_data_dir(req) == WRITE) { |
445 | nbd_cmd(req) = NBD_CMD_WRITE; | 452 | nbd_cmd(req) = NBD_CMD_WRITE; |
@@ -453,32 +460,34 @@ static void do_nbd_request(request_queue_t * q) | |||
453 | req->errors = 0; | 460 | req->errors = 0; |
454 | spin_unlock_irq(q->queue_lock); | 461 | spin_unlock_irq(q->queue_lock); |
455 | 462 | ||
456 | spin_lock(&lo->queue_lock); | 463 | down(&lo->tx_lock); |
457 | 464 | if (unlikely(!lo->sock)) { | |
458 | if (!lo->file) { | 465 | up(&lo->tx_lock); |
459 | spin_unlock(&lo->queue_lock); | 466 | printk(KERN_ERR "%s: Attempted send on closed socket\n", |
460 | printk(KERN_ERR "%s: failed between accept and semaphore, file lost\n", | 467 | lo->disk->disk_name); |
461 | lo->disk->disk_name); | ||
462 | req->errors++; | 468 | req->errors++; |
463 | nbd_end_request(req); | 469 | nbd_end_request(req); |
464 | spin_lock_irq(q->queue_lock); | 470 | spin_lock_irq(q->queue_lock); |
465 | continue; | 471 | continue; |
466 | } | 472 | } |
467 | 473 | ||
468 | list_add(&req->queuelist, &lo->queue_head); | 474 | lo->active_req = req; |
469 | spin_unlock(&lo->queue_lock); | ||
470 | 475 | ||
471 | if (nbd_send_req(lo, req) != 0) { | 476 | if (nbd_send_req(lo, req) != 0) { |
472 | printk(KERN_ERR "%s: Request send failed\n", | 477 | printk(KERN_ERR "%s: Request send failed\n", |
473 | lo->disk->disk_name); | 478 | lo->disk->disk_name); |
474 | if (nbd_find_request(lo, (char *)&req) != NULL) { | 479 | req->errors++; |
475 | /* we still own req */ | 480 | nbd_end_request(req); |
476 | req->errors++; | 481 | } else { |
477 | nbd_end_request(req); | 482 | spin_lock(&lo->queue_lock); |
478 | } else /* we're racing with nbd_clear_que */ | 483 | list_add(&req->queuelist, &lo->queue_head); |
479 | printk(KERN_DEBUG "nbd: can't find req\n"); | 484 | spin_unlock(&lo->queue_lock); |
480 | } | 485 | } |
481 | 486 | ||
487 | lo->active_req = NULL; | ||
488 | up(&lo->tx_lock); | ||
489 | wake_up_all(&lo->active_wq); | ||
490 | |||
482 | spin_lock_irq(q->queue_lock); | 491 | spin_lock_irq(q->queue_lock); |
483 | continue; | 492 | continue; |
484 | 493 | ||
@@ -529,17 +538,10 @@ static int nbd_ioctl(struct inode *inode, struct file *file, | |||
529 | down(&lo->tx_lock); | 538 | down(&lo->tx_lock); |
530 | lo->sock = NULL; | 539 | lo->sock = NULL; |
531 | up(&lo->tx_lock); | 540 | up(&lo->tx_lock); |
532 | spin_lock(&lo->queue_lock); | ||
533 | file = lo->file; | 541 | file = lo->file; |
534 | lo->file = NULL; | 542 | lo->file = NULL; |
535 | spin_unlock(&lo->queue_lock); | ||
536 | nbd_clear_que(lo); | 543 | nbd_clear_que(lo); |
537 | spin_lock(&lo->queue_lock); | 544 | BUG_ON(!list_empty(&lo->queue_head)); |
538 | if (!list_empty(&lo->queue_head)) { | ||
539 | printk(KERN_ERR "nbd: disconnect: some requests are in progress -> please try again.\n"); | ||
540 | error = -EBUSY; | ||
541 | } | ||
542 | spin_unlock(&lo->queue_lock); | ||
543 | if (file) | 545 | if (file) |
544 | fput(file); | 546 | fput(file); |
545 | return error; | 547 | return error; |
@@ -598,24 +600,19 @@ static int nbd_ioctl(struct inode *inode, struct file *file, | |||
598 | lo->sock = NULL; | 600 | lo->sock = NULL; |
599 | } | 601 | } |
600 | up(&lo->tx_lock); | 602 | up(&lo->tx_lock); |
601 | spin_lock(&lo->queue_lock); | ||
602 | file = lo->file; | 603 | file = lo->file; |
603 | lo->file = NULL; | 604 | lo->file = NULL; |
604 | spin_unlock(&lo->queue_lock); | ||
605 | nbd_clear_que(lo); | 605 | nbd_clear_que(lo); |
606 | printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name); | 606 | printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name); |
607 | if (file) | 607 | if (file) |
608 | fput(file); | 608 | fput(file); |
609 | return lo->harderror; | 609 | return lo->harderror; |
610 | case NBD_CLEAR_QUE: | 610 | case NBD_CLEAR_QUE: |
611 | down(&lo->tx_lock); | 611 | /* |
612 | if (lo->sock) { | 612 | * This is for compatibility only. The queue is always cleared |
613 | up(&lo->tx_lock); | 613 | * by NBD_DO_IT or NBD_CLEAR_SOCK. |
614 | return 0; /* probably should be error, but that would | 614 | */ |
615 | * break "nbd-client -d", so just return 0 */ | 615 | BUG_ON(!lo->sock && !list_empty(&lo->queue_head)); |
616 | } | ||
617 | up(&lo->tx_lock); | ||
618 | nbd_clear_que(lo); | ||
619 | return 0; | 616 | return 0; |
620 | case NBD_PRINT_DEBUG: | 617 | case NBD_PRINT_DEBUG: |
621 | printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n", | 618 | printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n", |
@@ -688,6 +685,7 @@ static int __init nbd_init(void) | |||
688 | spin_lock_init(&nbd_dev[i].queue_lock); | 685 | spin_lock_init(&nbd_dev[i].queue_lock); |
689 | INIT_LIST_HEAD(&nbd_dev[i].queue_head); | 686 | INIT_LIST_HEAD(&nbd_dev[i].queue_head); |
690 | init_MUTEX(&nbd_dev[i].tx_lock); | 687 | init_MUTEX(&nbd_dev[i].tx_lock); |
688 | init_waitqueue_head(&nbd_dev[i].active_wq); | ||
691 | nbd_dev[i].blksize = 1024; | 689 | nbd_dev[i].blksize = 1024; |
692 | nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */ | 690 | nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */ |
693 | disk->major = NBD_MAJOR; | 691 | disk->major = NBD_MAJOR; |
diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig index 17ff40561257..c0d2854dd097 100644 --- a/drivers/block/paride/Kconfig +++ b/drivers/block/paride/Kconfig | |||
@@ -4,11 +4,12 @@ | |||
4 | # PARIDE doesn't need PARPORT, but if PARPORT is configured as a module, | 4 | # PARIDE doesn't need PARPORT, but if PARPORT is configured as a module, |
5 | # PARIDE must also be a module. The bogus CONFIG_PARIDE_PARPORT option | 5 | # PARIDE must also be a module. The bogus CONFIG_PARIDE_PARPORT option |
6 | # controls the choices given to the user ... | 6 | # controls the choices given to the user ... |
7 | # PARIDE only supports PC style parports. Tough for USB or other parports... | ||
7 | config PARIDE_PARPORT | 8 | config PARIDE_PARPORT |
8 | tristate | 9 | tristate |
9 | depends on PARIDE!=n | 10 | depends on PARIDE!=n |
10 | default m if PARPORT=m | 11 | default m if PARPORT_PC=m |
11 | default y if PARPORT!=m | 12 | default y if PARPORT_PC!=m |
12 | 13 | ||
13 | comment "Parallel IDE high-level drivers" | 14 | comment "Parallel IDE high-level drivers" |
14 | depends on PARIDE | 15 | depends on PARIDE |
diff --git a/drivers/block/rd.c b/drivers/block/rd.c index 68c60a5bcdab..ffd6abd6d5a0 100644 --- a/drivers/block/rd.c +++ b/drivers/block/rd.c | |||
@@ -154,7 +154,7 @@ static int ramdisk_commit_write(struct file *file, struct page *page, | |||
154 | 154 | ||
155 | /* | 155 | /* |
156 | * ->writepage to the the blockdev's mapping has to redirty the page so that the | 156 | * ->writepage to the the blockdev's mapping has to redirty the page so that the |
157 | * VM doesn't go and steal it. We return WRITEPAGE_ACTIVATE so that the VM | 157 | * VM doesn't go and steal it. We return AOP_WRITEPAGE_ACTIVATE so that the VM |
158 | * won't try to (pointlessly) write the page again for a while. | 158 | * won't try to (pointlessly) write the page again for a while. |
159 | * | 159 | * |
160 | * Really, these pages should not be on the LRU at all. | 160 | * Really, these pages should not be on the LRU at all. |
@@ -165,7 +165,7 @@ static int ramdisk_writepage(struct page *page, struct writeback_control *wbc) | |||
165 | make_page_uptodate(page); | 165 | make_page_uptodate(page); |
166 | SetPageDirty(page); | 166 | SetPageDirty(page); |
167 | if (wbc->for_reclaim) | 167 | if (wbc->for_reclaim) |
168 | return WRITEPAGE_ACTIVATE; | 168 | return AOP_WRITEPAGE_ACTIVATE; |
169 | unlock_page(page); | 169 | unlock_page(page); |
170 | return 0; | 170 | return 0; |
171 | } | 171 | } |
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index 1ded3b433459..9251f4131b53 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c | |||
@@ -770,7 +770,7 @@ static inline void carm_end_request_queued(struct carm_host *host, | |||
770 | rc = end_that_request_first(req, uptodate, req->hard_nr_sectors); | 770 | rc = end_that_request_first(req, uptodate, req->hard_nr_sectors); |
771 | assert(rc == 0); | 771 | assert(rc == 0); |
772 | 772 | ||
773 | end_that_request_last(req); | 773 | end_that_request_last(req, uptodate); |
774 | 774 | ||
775 | rc = carm_put_request(host, crq); | 775 | rc = carm_put_request(host, crq); |
776 | assert(rc == 0); | 776 | assert(rc == 0); |
diff --git a/drivers/block/ub.c b/drivers/block/ub.c index bfb23d543ff7..a05fe5843e6c 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c | |||
@@ -9,7 +9,6 @@ | |||
9 | * | 9 | * |
10 | * TODO (sorted by decreasing priority) | 10 | * TODO (sorted by decreasing priority) |
11 | * -- Kill first_open (Al Viro fixed the block layer now) | 11 | * -- Kill first_open (Al Viro fixed the block layer now) |
12 | * -- Do resets with usb_device_reset (needs a thread context, use khubd) | ||
13 | * -- set readonly flag for CDs, set removable flag for CF readers | 12 | * -- set readonly flag for CDs, set removable flag for CF readers |
14 | * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) | 13 | * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) |
15 | * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries | 14 | * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries |
@@ -29,6 +28,7 @@ | |||
29 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
30 | #include <linux/module.h> | 29 | #include <linux/module.h> |
31 | #include <linux/usb.h> | 30 | #include <linux/usb.h> |
31 | #include <linux/usb_usual.h> | ||
32 | #include <linux/blkdev.h> | 32 | #include <linux/blkdev.h> |
33 | #include <linux/devfs_fs_kernel.h> | 33 | #include <linux/devfs_fs_kernel.h> |
34 | #include <linux/timer.h> | 34 | #include <linux/timer.h> |
@@ -107,16 +107,6 @@ | |||
107 | */ | 107 | */ |
108 | 108 | ||
109 | /* | 109 | /* |
110 | * Definitions which have to be scattered once we understand the layout better. | ||
111 | */ | ||
112 | |||
113 | /* Transport (despite PR in the name) */ | ||
114 | #define US_PR_BULK 0x50 /* bulk only */ | ||
115 | |||
116 | /* Protocol */ | ||
117 | #define US_SC_SCSI 0x06 /* Transparent */ | ||
118 | |||
119 | /* | ||
120 | * This many LUNs per USB device. | 110 | * This many LUNs per USB device. |
121 | * Every one of them takes a host, see UB_MAX_HOSTS. | 111 | * Every one of them takes a host, see UB_MAX_HOSTS. |
122 | */ | 112 | */ |
@@ -125,7 +115,7 @@ | |||
125 | /* | 115 | /* |
126 | */ | 116 | */ |
127 | 117 | ||
128 | #define UB_MINORS_PER_MAJOR 8 | 118 | #define UB_PARTS_PER_LUN 8 |
129 | 119 | ||
130 | #define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ | 120 | #define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ |
131 | 121 | ||
@@ -245,6 +235,13 @@ struct ub_scsi_cmd { | |||
245 | void *back; | 235 | void *back; |
246 | }; | 236 | }; |
247 | 237 | ||
238 | struct ub_request { | ||
239 | struct request *rq; | ||
240 | unsigned int current_try; | ||
241 | unsigned int nsg; /* sgv[nsg] */ | ||
242 | struct scatterlist sgv[UB_MAX_REQ_SG]; | ||
243 | }; | ||
244 | |||
248 | /* | 245 | /* |
249 | */ | 246 | */ |
250 | struct ub_capacity { | 247 | struct ub_capacity { |
@@ -340,6 +337,8 @@ struct ub_lun { | |||
340 | int readonly; | 337 | int readonly; |
341 | int first_open; /* Kludge. See ub_bd_open. */ | 338 | int first_open; /* Kludge. See ub_bd_open. */ |
342 | 339 | ||
340 | struct ub_request urq; | ||
341 | |||
343 | /* Use Ingo's mempool if or when we have more than one command. */ | 342 | /* Use Ingo's mempool if or when we have more than one command. */ |
344 | /* | 343 | /* |
345 | * Currently we never need more than one command for the whole device. | 344 | * Currently we never need more than one command for the whole device. |
@@ -360,6 +359,7 @@ struct ub_dev { | |||
360 | atomic_t poison; /* The USB device is disconnected */ | 359 | atomic_t poison; /* The USB device is disconnected */ |
361 | int openc; /* protected by ub_lock! */ | 360 | int openc; /* protected by ub_lock! */ |
362 | /* kref is too implicit for our taste */ | 361 | /* kref is too implicit for our taste */ |
362 | int reset; /* Reset is running */ | ||
363 | unsigned int tagcnt; | 363 | unsigned int tagcnt; |
364 | char name[12]; | 364 | char name[12]; |
365 | struct usb_device *dev; | 365 | struct usb_device *dev; |
@@ -387,6 +387,9 @@ struct ub_dev { | |||
387 | struct bulk_cs_wrap work_bcs; | 387 | struct bulk_cs_wrap work_bcs; |
388 | struct usb_ctrlrequest work_cr; | 388 | struct usb_ctrlrequest work_cr; |
389 | 389 | ||
390 | struct work_struct reset_work; | ||
391 | wait_queue_head_t reset_wait; | ||
392 | |||
390 | int sg_stat[6]; | 393 | int sg_stat[6]; |
391 | struct ub_scsi_trace tr; | 394 | struct ub_scsi_trace tr; |
392 | }; | 395 | }; |
@@ -395,12 +398,14 @@ struct ub_dev { | |||
395 | */ | 398 | */ |
396 | static void ub_cleanup(struct ub_dev *sc); | 399 | static void ub_cleanup(struct ub_dev *sc); |
397 | static int ub_request_fn_1(struct ub_lun *lun, struct request *rq); | 400 | static int ub_request_fn_1(struct ub_lun *lun, struct request *rq); |
398 | static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, | 401 | static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, |
399 | struct ub_scsi_cmd *cmd, struct request *rq); | 402 | struct ub_scsi_cmd *cmd, struct ub_request *urq); |
400 | static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, | 403 | static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, |
401 | struct ub_scsi_cmd *cmd, struct request *rq); | 404 | struct ub_scsi_cmd *cmd, struct ub_request *urq); |
402 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | 405 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
403 | static void ub_end_rq(struct request *rq, int uptodate); | 406 | static void ub_end_rq(struct request *rq, int uptodate); |
407 | static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, | ||
408 | struct ub_request *urq, struct ub_scsi_cmd *cmd); | ||
404 | static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | 409 | static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
405 | static void ub_urb_complete(struct urb *urb, struct pt_regs *pt); | 410 | static void ub_urb_complete(struct urb *urb, struct pt_regs *pt); |
406 | static void ub_scsi_action(unsigned long _dev); | 411 | static void ub_scsi_action(unsigned long _dev); |
@@ -415,6 +420,8 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | |||
415 | static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | 420 | static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, |
416 | int stalled_pipe); | 421 | int stalled_pipe); |
417 | static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); | 422 | static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); |
423 | static void ub_reset_enter(struct ub_dev *sc); | ||
424 | static void ub_reset_task(void *arg); | ||
418 | static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); | 425 | static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); |
419 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, | 426 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, |
420 | struct ub_capacity *ret); | 427 | struct ub_capacity *ret); |
@@ -422,13 +429,18 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum); | |||
422 | 429 | ||
423 | /* | 430 | /* |
424 | */ | 431 | */ |
432 | #ifdef CONFIG_USB_LIBUSUAL | ||
433 | |||
434 | #define ub_usb_ids storage_usb_ids | ||
435 | #else | ||
436 | |||
425 | static struct usb_device_id ub_usb_ids[] = { | 437 | static struct usb_device_id ub_usb_ids[] = { |
426 | // { USB_DEVICE_VER(0x0781, 0x0002, 0x0009, 0x0009) }, /* SDDR-31 */ | ||
427 | { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) }, | 438 | { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) }, |
428 | { } | 439 | { } |
429 | }; | 440 | }; |
430 | 441 | ||
431 | MODULE_DEVICE_TABLE(usb, ub_usb_ids); | 442 | MODULE_DEVICE_TABLE(usb, ub_usb_ids); |
443 | #endif /* CONFIG_USB_LIBUSUAL */ | ||
432 | 444 | ||
433 | /* | 445 | /* |
434 | * Find me a way to identify "next free minor" for add_disk(), | 446 | * Find me a way to identify "next free minor" for add_disk(), |
@@ -522,6 +534,9 @@ static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, | |||
522 | spin_lock_irqsave(&sc->lock, flags); | 534 | spin_lock_irqsave(&sc->lock, flags); |
523 | 535 | ||
524 | cnt += sprintf(page + cnt, | 536 | cnt += sprintf(page + cnt, |
537 | "poison %d reset %d\n", | ||
538 | atomic_read(&sc->poison), sc->reset); | ||
539 | cnt += sprintf(page + cnt, | ||
525 | "qlen %d qmax %d\n", | 540 | "qlen %d qmax %d\n", |
526 | sc->cmd_queue.qlen, sc->cmd_queue.qmax); | 541 | sc->cmd_queue.qlen, sc->cmd_queue.qmax); |
527 | cnt += sprintf(page + cnt, | 542 | cnt += sprintf(page + cnt, |
@@ -770,7 +785,8 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) | |||
770 | { | 785 | { |
771 | struct ub_dev *sc = lun->udev; | 786 | struct ub_dev *sc = lun->udev; |
772 | struct ub_scsi_cmd *cmd; | 787 | struct ub_scsi_cmd *cmd; |
773 | int rc; | 788 | struct ub_request *urq; |
789 | int n_elem; | ||
774 | 790 | ||
775 | if (atomic_read(&sc->poison) || lun->changed) { | 791 | if (atomic_read(&sc->poison) || lun->changed) { |
776 | blkdev_dequeue_request(rq); | 792 | blkdev_dequeue_request(rq); |
@@ -778,65 +794,70 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) | |||
778 | return 0; | 794 | return 0; |
779 | } | 795 | } |
780 | 796 | ||
797 | if (lun->urq.rq != NULL) | ||
798 | return -1; | ||
781 | if ((cmd = ub_get_cmd(lun)) == NULL) | 799 | if ((cmd = ub_get_cmd(lun)) == NULL) |
782 | return -1; | 800 | return -1; |
783 | memset(cmd, 0, sizeof(struct ub_scsi_cmd)); | 801 | memset(cmd, 0, sizeof(struct ub_scsi_cmd)); |
784 | 802 | ||
785 | blkdev_dequeue_request(rq); | 803 | blkdev_dequeue_request(rq); |
804 | |||
805 | urq = &lun->urq; | ||
806 | memset(urq, 0, sizeof(struct ub_request)); | ||
807 | urq->rq = rq; | ||
808 | |||
809 | /* | ||
810 | * get scatterlist from block layer | ||
811 | */ | ||
812 | n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]); | ||
813 | if (n_elem < 0) { | ||
814 | printk(KERN_INFO "%s: failed request map (%d)\n", | ||
815 | lun->name, n_elem); /* P3 */ | ||
816 | goto drop; | ||
817 | } | ||
818 | if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ | ||
819 | printk(KERN_WARNING "%s: request with %d segments\n", | ||
820 | lun->name, n_elem); | ||
821 | goto drop; | ||
822 | } | ||
823 | urq->nsg = n_elem; | ||
824 | sc->sg_stat[n_elem < 5 ? n_elem : 5]++; | ||
825 | |||
786 | if (blk_pc_request(rq)) { | 826 | if (blk_pc_request(rq)) { |
787 | rc = ub_cmd_build_packet(sc, lun, cmd, rq); | 827 | ub_cmd_build_packet(sc, lun, cmd, urq); |
788 | } else { | 828 | } else { |
789 | rc = ub_cmd_build_block(sc, lun, cmd, rq); | 829 | ub_cmd_build_block(sc, lun, cmd, urq); |
790 | } | ||
791 | if (rc != 0) { | ||
792 | ub_put_cmd(lun, cmd); | ||
793 | ub_end_rq(rq, 0); | ||
794 | return 0; | ||
795 | } | 830 | } |
796 | cmd->state = UB_CMDST_INIT; | 831 | cmd->state = UB_CMDST_INIT; |
797 | cmd->lun = lun; | 832 | cmd->lun = lun; |
798 | cmd->done = ub_rw_cmd_done; | 833 | cmd->done = ub_rw_cmd_done; |
799 | cmd->back = rq; | 834 | cmd->back = urq; |
800 | 835 | ||
801 | cmd->tag = sc->tagcnt++; | 836 | cmd->tag = sc->tagcnt++; |
802 | if (ub_submit_scsi(sc, cmd) != 0) { | 837 | if (ub_submit_scsi(sc, cmd) != 0) |
803 | ub_put_cmd(lun, cmd); | 838 | goto drop; |
804 | ub_end_rq(rq, 0); | ||
805 | return 0; | ||
806 | } | ||
807 | 839 | ||
808 | return 0; | 840 | return 0; |
841 | |||
842 | drop: | ||
843 | ub_put_cmd(lun, cmd); | ||
844 | ub_end_rq(rq, 0); | ||
845 | return 0; | ||
809 | } | 846 | } |
810 | 847 | ||
811 | static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, | 848 | static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, |
812 | struct ub_scsi_cmd *cmd, struct request *rq) | 849 | struct ub_scsi_cmd *cmd, struct ub_request *urq) |
813 | { | 850 | { |
814 | int ub_dir; | 851 | struct request *rq = urq->rq; |
815 | int n_elem; | ||
816 | unsigned int block, nblks; | 852 | unsigned int block, nblks; |
817 | 853 | ||
818 | if (rq_data_dir(rq) == WRITE) | 854 | if (rq_data_dir(rq) == WRITE) |
819 | ub_dir = UB_DIR_WRITE; | 855 | cmd->dir = UB_DIR_WRITE; |
820 | else | 856 | else |
821 | ub_dir = UB_DIR_READ; | 857 | cmd->dir = UB_DIR_READ; |
822 | cmd->dir = ub_dir; | ||
823 | 858 | ||
824 | /* | 859 | cmd->nsg = urq->nsg; |
825 | * get scatterlist from block layer | 860 | memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); |
826 | */ | ||
827 | n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]); | ||
828 | if (n_elem <= 0) { | ||
829 | printk(KERN_INFO "%s: failed request map (%d)\n", | ||
830 | sc->name, n_elem); /* P3 */ | ||
831 | return -1; /* request with no s/g entries? */ | ||
832 | } | ||
833 | if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ | ||
834 | printk(KERN_WARNING "%s: request with %d segments\n", | ||
835 | sc->name, n_elem); | ||
836 | return -1; | ||
837 | } | ||
838 | cmd->nsg = n_elem; | ||
839 | sc->sg_stat[n_elem < 5 ? n_elem : 5]++; | ||
840 | 861 | ||
841 | /* | 862 | /* |
842 | * build the command | 863 | * build the command |
@@ -847,7 +868,7 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, | |||
847 | block = rq->sector >> lun->capacity.bshift; | 868 | block = rq->sector >> lun->capacity.bshift; |
848 | nblks = rq->nr_sectors >> lun->capacity.bshift; | 869 | nblks = rq->nr_sectors >> lun->capacity.bshift; |
849 | 870 | ||
850 | cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10; | 871 | cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10; |
851 | /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ | 872 | /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ |
852 | cmd->cdb[2] = block >> 24; | 873 | cmd->cdb[2] = block >> 24; |
853 | cmd->cdb[3] = block >> 16; | 874 | cmd->cdb[3] = block >> 16; |
@@ -858,14 +879,12 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, | |||
858 | cmd->cdb_len = 10; | 879 | cmd->cdb_len = 10; |
859 | 880 | ||
860 | cmd->len = rq->nr_sectors * 512; | 881 | cmd->len = rq->nr_sectors * 512; |
861 | |||
862 | return 0; | ||
863 | } | 882 | } |
864 | 883 | ||
865 | static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, | 884 | static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, |
866 | struct ub_scsi_cmd *cmd, struct request *rq) | 885 | struct ub_scsi_cmd *cmd, struct ub_request *urq) |
867 | { | 886 | { |
868 | int n_elem; | 887 | struct request *rq = urq->rq; |
869 | 888 | ||
870 | if (rq->data_len == 0) { | 889 | if (rq->data_len == 0) { |
871 | cmd->dir = UB_DIR_NONE; | 890 | cmd->dir = UB_DIR_NONE; |
@@ -874,40 +893,26 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, | |||
874 | cmd->dir = UB_DIR_WRITE; | 893 | cmd->dir = UB_DIR_WRITE; |
875 | else | 894 | else |
876 | cmd->dir = UB_DIR_READ; | 895 | cmd->dir = UB_DIR_READ; |
877 | |||
878 | } | 896 | } |
879 | 897 | ||
880 | /* | 898 | cmd->nsg = urq->nsg; |
881 | * get scatterlist from block layer | 899 | memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); |
882 | */ | ||
883 | n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]); | ||
884 | if (n_elem < 0) { | ||
885 | printk(KERN_INFO "%s: failed request map (%d)\n", | ||
886 | sc->name, n_elem); /* P3 */ | ||
887 | return -1; | ||
888 | } | ||
889 | if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ | ||
890 | printk(KERN_WARNING "%s: request with %d segments\n", | ||
891 | sc->name, n_elem); | ||
892 | return -1; | ||
893 | } | ||
894 | cmd->nsg = n_elem; | ||
895 | sc->sg_stat[n_elem < 5 ? n_elem : 5]++; | ||
896 | 900 | ||
897 | memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); | 901 | memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); |
898 | cmd->cdb_len = rq->cmd_len; | 902 | cmd->cdb_len = rq->cmd_len; |
899 | 903 | ||
900 | cmd->len = rq->data_len; | 904 | cmd->len = rq->data_len; |
901 | |||
902 | return 0; | ||
903 | } | 905 | } |
904 | 906 | ||
905 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | 907 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) |
906 | { | 908 | { |
907 | struct request *rq = cmd->back; | ||
908 | struct ub_lun *lun = cmd->lun; | 909 | struct ub_lun *lun = cmd->lun; |
910 | struct ub_request *urq = cmd->back; | ||
911 | struct request *rq; | ||
909 | int uptodate; | 912 | int uptodate; |
910 | 913 | ||
914 | rq = urq->rq; | ||
915 | |||
911 | if (cmd->error == 0) { | 916 | if (cmd->error == 0) { |
912 | uptodate = 1; | 917 | uptodate = 1; |
913 | 918 | ||
@@ -928,9 +933,16 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
928 | rq->errors = SAM_STAT_CHECK_CONDITION; | 933 | rq->errors = SAM_STAT_CHECK_CONDITION; |
929 | else | 934 | else |
930 | rq->errors = DID_ERROR << 16; | 935 | rq->errors = DID_ERROR << 16; |
936 | } else { | ||
937 | if (cmd->error == -EIO) { | ||
938 | if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0) | ||
939 | return; | ||
940 | } | ||
931 | } | 941 | } |
932 | } | 942 | } |
933 | 943 | ||
944 | urq->rq = NULL; | ||
945 | |||
934 | ub_put_cmd(lun, cmd); | 946 | ub_put_cmd(lun, cmd); |
935 | ub_end_rq(rq, uptodate); | 947 | ub_end_rq(rq, uptodate); |
936 | blk_start_queue(lun->disk->queue); | 948 | blk_start_queue(lun->disk->queue); |
@@ -938,11 +950,43 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
938 | 950 | ||
939 | static void ub_end_rq(struct request *rq, int uptodate) | 951 | static void ub_end_rq(struct request *rq, int uptodate) |
940 | { | 952 | { |
941 | int rc; | 953 | end_that_request_first(rq, uptodate, rq->hard_nr_sectors); |
954 | end_that_request_last(rq, uptodate); | ||
955 | } | ||
956 | |||
957 | static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, | ||
958 | struct ub_request *urq, struct ub_scsi_cmd *cmd) | ||
959 | { | ||
960 | |||
961 | if (atomic_read(&sc->poison)) | ||
962 | return -ENXIO; | ||
963 | |||
964 | ub_reset_enter(sc); | ||
942 | 965 | ||
943 | rc = end_that_request_first(rq, uptodate, rq->hard_nr_sectors); | 966 | if (urq->current_try >= 3) |
944 | // assert(rc == 0); | 967 | return -EIO; |
945 | end_that_request_last(rq); | 968 | urq->current_try++; |
969 | /* P3 */ printk("%s: dir %c len/act %d/%d " | ||
970 | "[sense %x %02x %02x] retry %d\n", | ||
971 | sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len, | ||
972 | cmd->key, cmd->asc, cmd->ascq, urq->current_try); | ||
973 | |||
974 | memset(cmd, 0, sizeof(struct ub_scsi_cmd)); | ||
975 | ub_cmd_build_block(sc, lun, cmd, urq); | ||
976 | |||
977 | cmd->state = UB_CMDST_INIT; | ||
978 | cmd->lun = lun; | ||
979 | cmd->done = ub_rw_cmd_done; | ||
980 | cmd->back = urq; | ||
981 | |||
982 | cmd->tag = sc->tagcnt++; | ||
983 | |||
984 | #if 0 /* Wasteful */ | ||
985 | return ub_submit_scsi(sc, cmd); | ||
986 | #else | ||
987 | ub_cmdq_add(sc, cmd); | ||
988 | return 0; | ||
989 | #endif | ||
946 | } | 990 | } |
947 | 991 | ||
948 | /* | 992 | /* |
@@ -1075,7 +1119,7 @@ static void ub_scsi_dispatch(struct ub_dev *sc) | |||
1075 | struct ub_scsi_cmd *cmd; | 1119 | struct ub_scsi_cmd *cmd; |
1076 | int rc; | 1120 | int rc; |
1077 | 1121 | ||
1078 | while ((cmd = ub_cmdq_peek(sc)) != NULL) { | 1122 | while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) { |
1079 | if (cmd->state == UB_CMDST_DONE) { | 1123 | if (cmd->state == UB_CMDST_DONE) { |
1080 | ub_cmdq_pop(sc); | 1124 | ub_cmdq_pop(sc); |
1081 | (*cmd->done)(sc, cmd); | 1125 | (*cmd->done)(sc, cmd); |
@@ -1098,11 +1142,12 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1098 | { | 1142 | { |
1099 | struct urb *urb = &sc->work_urb; | 1143 | struct urb *urb = &sc->work_urb; |
1100 | struct bulk_cs_wrap *bcs; | 1144 | struct bulk_cs_wrap *bcs; |
1145 | int len; | ||
1101 | int rc; | 1146 | int rc; |
1102 | 1147 | ||
1103 | if (atomic_read(&sc->poison)) { | 1148 | if (atomic_read(&sc->poison)) { |
1104 | /* A little too simplistic, I feel... */ | 1149 | ub_state_done(sc, cmd, -ENODEV); |
1105 | goto Bad_End; | 1150 | return; |
1106 | } | 1151 | } |
1107 | 1152 | ||
1108 | if (cmd->state == UB_CMDST_CLEAR) { | 1153 | if (cmd->state == UB_CMDST_CLEAR) { |
@@ -1110,7 +1155,6 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1110 | /* | 1155 | /* |
1111 | * STALL while clearning STALL. | 1156 | * STALL while clearning STALL. |
1112 | * The control pipe clears itself - nothing to do. | 1157 | * The control pipe clears itself - nothing to do. |
1113 | * XXX Might try to reset the device here and retry. | ||
1114 | */ | 1158 | */ |
1115 | printk(KERN_NOTICE "%s: stall on control pipe\n", | 1159 | printk(KERN_NOTICE "%s: stall on control pipe\n", |
1116 | sc->name); | 1160 | sc->name); |
@@ -1129,11 +1173,6 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1129 | 1173 | ||
1130 | } else if (cmd->state == UB_CMDST_CLR2STS) { | 1174 | } else if (cmd->state == UB_CMDST_CLR2STS) { |
1131 | if (urb->status == -EPIPE) { | 1175 | if (urb->status == -EPIPE) { |
1132 | /* | ||
1133 | * STALL while clearning STALL. | ||
1134 | * The control pipe clears itself - nothing to do. | ||
1135 | * XXX Might try to reset the device here and retry. | ||
1136 | */ | ||
1137 | printk(KERN_NOTICE "%s: stall on control pipe\n", | 1176 | printk(KERN_NOTICE "%s: stall on control pipe\n", |
1138 | sc->name); | 1177 | sc->name); |
1139 | goto Bad_End; | 1178 | goto Bad_End; |
@@ -1151,11 +1190,6 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1151 | 1190 | ||
1152 | } else if (cmd->state == UB_CMDST_CLRRS) { | 1191 | } else if (cmd->state == UB_CMDST_CLRRS) { |
1153 | if (urb->status == -EPIPE) { | 1192 | if (urb->status == -EPIPE) { |
1154 | /* | ||
1155 | * STALL while clearning STALL. | ||
1156 | * The control pipe clears itself - nothing to do. | ||
1157 | * XXX Might try to reset the device here and retry. | ||
1158 | */ | ||
1159 | printk(KERN_NOTICE "%s: stall on control pipe\n", | 1193 | printk(KERN_NOTICE "%s: stall on control pipe\n", |
1160 | sc->name); | 1194 | sc->name); |
1161 | goto Bad_End; | 1195 | goto Bad_End; |
@@ -1172,7 +1206,12 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1172 | ub_state_stat_counted(sc, cmd); | 1206 | ub_state_stat_counted(sc, cmd); |
1173 | 1207 | ||
1174 | } else if (cmd->state == UB_CMDST_CMD) { | 1208 | } else if (cmd->state == UB_CMDST_CMD) { |
1175 | if (urb->status == -EPIPE) { | 1209 | switch (urb->status) { |
1210 | case 0: | ||
1211 | break; | ||
1212 | case -EOVERFLOW: | ||
1213 | goto Bad_End; | ||
1214 | case -EPIPE: | ||
1176 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); | 1215 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); |
1177 | if (rc != 0) { | 1216 | if (rc != 0) { |
1178 | printk(KERN_NOTICE "%s: " | 1217 | printk(KERN_NOTICE "%s: " |
@@ -1182,17 +1221,20 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1182 | * This is typically ENOMEM or some other such shit. | 1221 | * This is typically ENOMEM or some other such shit. |
1183 | * Retrying is pointless. Just do Bad End on it... | 1222 | * Retrying is pointless. Just do Bad End on it... |
1184 | */ | 1223 | */ |
1185 | goto Bad_End; | 1224 | ub_state_done(sc, cmd, rc); |
1225 | return; | ||
1186 | } | 1226 | } |
1187 | cmd->state = UB_CMDST_CLEAR; | 1227 | cmd->state = UB_CMDST_CLEAR; |
1188 | ub_cmdtr_state(sc, cmd); | 1228 | ub_cmdtr_state(sc, cmd); |
1189 | return; | 1229 | return; |
1190 | } | 1230 | case -ESHUTDOWN: /* unplug */ |
1191 | if (urb->status != 0) { | 1231 | case -EILSEQ: /* unplug timeout on uhci */ |
1232 | ub_state_done(sc, cmd, -ENODEV); | ||
1233 | return; | ||
1234 | default: | ||
1192 | goto Bad_End; | 1235 | goto Bad_End; |
1193 | } | 1236 | } |
1194 | if (urb->actual_length != US_BULK_CB_WRAP_LEN) { | 1237 | if (urb->actual_length != US_BULK_CB_WRAP_LEN) { |
1195 | /* XXX Must do reset here to unconfuse the device */ | ||
1196 | goto Bad_End; | 1238 | goto Bad_End; |
1197 | } | 1239 | } |
1198 | 1240 | ||
@@ -1211,11 +1253,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1211 | printk(KERN_NOTICE "%s: " | 1253 | printk(KERN_NOTICE "%s: " |
1212 | "unable to submit clear (%d)\n", | 1254 | "unable to submit clear (%d)\n", |
1213 | sc->name, rc); | 1255 | sc->name, rc); |
1214 | /* | 1256 | ub_state_done(sc, cmd, rc); |
1215 | * This is typically ENOMEM or some other such shit. | 1257 | return; |
1216 | * Retrying is pointless. Just do Bad End on it... | ||
1217 | */ | ||
1218 | goto Bad_End; | ||
1219 | } | 1258 | } |
1220 | cmd->state = UB_CMDST_CLR2STS; | 1259 | cmd->state = UB_CMDST_CLR2STS; |
1221 | ub_cmdtr_state(sc, cmd); | 1260 | ub_cmdtr_state(sc, cmd); |
@@ -1224,14 +1263,50 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1224 | if (urb->status == -EOVERFLOW) { | 1263 | if (urb->status == -EOVERFLOW) { |
1225 | /* | 1264 | /* |
1226 | * A babble? Failure, but we must transfer CSW now. | 1265 | * A babble? Failure, but we must transfer CSW now. |
1227 | * XXX This is going to end in perpetual babble. Reset. | ||
1228 | */ | 1266 | */ |
1229 | cmd->error = -EOVERFLOW; /* A cheap trick... */ | 1267 | cmd->error = -EOVERFLOW; /* A cheap trick... */ |
1230 | ub_state_stat(sc, cmd); | 1268 | ub_state_stat(sc, cmd); |
1231 | return; | 1269 | return; |
1232 | } | 1270 | } |
1233 | if (urb->status != 0) | 1271 | |
1234 | goto Bad_End; | 1272 | if (cmd->dir == UB_DIR_WRITE) { |
1273 | /* | ||
1274 | * Do not continue writes in case of a failure. | ||
1275 | * Doing so would cause sectors to be mixed up, | ||
1276 | * which is worse than sectors lost. | ||
1277 | * | ||
1278 | * We must try to read the CSW, or many devices | ||
1279 | * get confused. | ||
1280 | */ | ||
1281 | len = urb->actual_length; | ||
1282 | if (urb->status != 0 || | ||
1283 | len != cmd->sgv[cmd->current_sg].length) { | ||
1284 | cmd->act_len += len; | ||
1285 | ub_cmdtr_act_len(sc, cmd); | ||
1286 | |||
1287 | cmd->error = -EIO; | ||
1288 | ub_state_stat(sc, cmd); | ||
1289 | return; | ||
1290 | } | ||
1291 | |||
1292 | } else { | ||
1293 | /* | ||
1294 | * If an error occurs on read, we record it, and | ||
1295 | * continue to fetch data in order to avoid bubble. | ||
1296 | * | ||
1297 | * As a small shortcut, we stop if we detect that | ||
1298 | * a CSW mixed into data. | ||
1299 | */ | ||
1300 | if (urb->status != 0) | ||
1301 | cmd->error = -EIO; | ||
1302 | |||
1303 | len = urb->actual_length; | ||
1304 | if (urb->status != 0 || | ||
1305 | len != cmd->sgv[cmd->current_sg].length) { | ||
1306 | if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN) | ||
1307 | goto Bad_End; | ||
1308 | } | ||
1309 | } | ||
1235 | 1310 | ||
1236 | cmd->act_len += urb->actual_length; | 1311 | cmd->act_len += urb->actual_length; |
1237 | ub_cmdtr_act_len(sc, cmd); | 1312 | ub_cmdtr_act_len(sc, cmd); |
@@ -1249,11 +1324,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1249 | printk(KERN_NOTICE "%s: " | 1324 | printk(KERN_NOTICE "%s: " |
1250 | "unable to submit clear (%d)\n", | 1325 | "unable to submit clear (%d)\n", |
1251 | sc->name, rc); | 1326 | sc->name, rc); |
1252 | /* | 1327 | ub_state_done(sc, cmd, rc); |
1253 | * This is typically ENOMEM or some other such shit. | 1328 | return; |
1254 | * Retrying is pointless. Just do Bad End on it... | ||
1255 | */ | ||
1256 | goto Bad_End; | ||
1257 | } | 1329 | } |
1258 | 1330 | ||
1259 | /* | 1331 | /* |
@@ -1266,14 +1338,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1266 | ub_cmdtr_state(sc, cmd); | 1338 | ub_cmdtr_state(sc, cmd); |
1267 | return; | 1339 | return; |
1268 | } | 1340 | } |
1269 | if (urb->status == -EOVERFLOW) { | 1341 | |
1270 | /* | 1342 | /* Catch everything, including -EOVERFLOW and other nasties. */ |
1271 | * XXX We are screwed here. Retrying is pointless, | ||
1272 | * because the pipelined data will not get in until | ||
1273 | * we read with a big enough buffer. We must reset XXX. | ||
1274 | */ | ||
1275 | goto Bad_End; | ||
1276 | } | ||
1277 | if (urb->status != 0) | 1343 | if (urb->status != 0) |
1278 | goto Bad_End; | 1344 | goto Bad_End; |
1279 | 1345 | ||
@@ -1319,15 +1385,15 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1319 | return; | 1385 | return; |
1320 | } | 1386 | } |
1321 | 1387 | ||
1322 | rc = le32_to_cpu(bcs->Residue); | 1388 | len = le32_to_cpu(bcs->Residue); |
1323 | if (rc != cmd->len - cmd->act_len) { | 1389 | if (len != cmd->len - cmd->act_len) { |
1324 | /* | 1390 | /* |
1325 | * It is all right to transfer less, the caller has | 1391 | * It is all right to transfer less, the caller has |
1326 | * to check. But it's not all right if the device | 1392 | * to check. But it's not all right if the device |
1327 | * counts disagree with our counts. | 1393 | * counts disagree with our counts. |
1328 | */ | 1394 | */ |
1329 | /* P3 */ printk("%s: resid %d len %d act %d\n", | 1395 | /* P3 */ printk("%s: resid %d len %d act %d\n", |
1330 | sc->name, rc, cmd->len, cmd->act_len); | 1396 | sc->name, len, cmd->len, cmd->act_len); |
1331 | goto Bad_End; | 1397 | goto Bad_End; |
1332 | } | 1398 | } |
1333 | 1399 | ||
@@ -1338,13 +1404,13 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1338 | ub_state_sense(sc, cmd); | 1404 | ub_state_sense(sc, cmd); |
1339 | return; | 1405 | return; |
1340 | case US_BULK_STAT_PHASE: | 1406 | case US_BULK_STAT_PHASE: |
1341 | /* XXX We must reset the transport here */ | ||
1342 | /* P3 */ printk("%s: status PHASE\n", sc->name); | 1407 | /* P3 */ printk("%s: status PHASE\n", sc->name); |
1343 | goto Bad_End; | 1408 | goto Bad_End; |
1344 | default: | 1409 | default: |
1345 | printk(KERN_INFO "%s: unknown CSW status 0x%x\n", | 1410 | printk(KERN_INFO "%s: unknown CSW status 0x%x\n", |
1346 | sc->name, bcs->Status); | 1411 | sc->name, bcs->Status); |
1347 | goto Bad_End; | 1412 | ub_state_done(sc, cmd, -EINVAL); |
1413 | return; | ||
1348 | } | 1414 | } |
1349 | 1415 | ||
1350 | /* Not zeroing error to preserve a babble indicator */ | 1416 | /* Not zeroing error to preserve a babble indicator */ |
@@ -1364,7 +1430,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1364 | printk(KERN_WARNING "%s: " | 1430 | printk(KERN_WARNING "%s: " |
1365 | "wrong command state %d\n", | 1431 | "wrong command state %d\n", |
1366 | sc->name, cmd->state); | 1432 | sc->name, cmd->state); |
1367 | goto Bad_End; | 1433 | ub_state_done(sc, cmd, -EINVAL); |
1434 | return; | ||
1368 | } | 1435 | } |
1369 | return; | 1436 | return; |
1370 | 1437 | ||
@@ -1612,6 +1679,93 @@ static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) | |||
1612 | } | 1679 | } |
1613 | 1680 | ||
1614 | /* | 1681 | /* |
1682 | * Reset management | ||
1683 | */ | ||
1684 | |||
1685 | static void ub_reset_enter(struct ub_dev *sc) | ||
1686 | { | ||
1687 | |||
1688 | if (sc->reset) { | ||
1689 | /* This happens often on multi-LUN devices. */ | ||
1690 | return; | ||
1691 | } | ||
1692 | sc->reset = 1; | ||
1693 | |||
1694 | #if 0 /* Not needed because the disconnect waits for us. */ | ||
1695 | unsigned long flags; | ||
1696 | spin_lock_irqsave(&ub_lock, flags); | ||
1697 | sc->openc++; | ||
1698 | spin_unlock_irqrestore(&ub_lock, flags); | ||
1699 | #endif | ||
1700 | |||
1701 | #if 0 /* We let them stop themselves. */ | ||
1702 | struct list_head *p; | ||
1703 | struct ub_lun *lun; | ||
1704 | list_for_each(p, &sc->luns) { | ||
1705 | lun = list_entry(p, struct ub_lun, link); | ||
1706 | blk_stop_queue(lun->disk->queue); | ||
1707 | } | ||
1708 | #endif | ||
1709 | |||
1710 | schedule_work(&sc->reset_work); | ||
1711 | } | ||
1712 | |||
1713 | static void ub_reset_task(void *arg) | ||
1714 | { | ||
1715 | struct ub_dev *sc = arg; | ||
1716 | unsigned long flags; | ||
1717 | struct list_head *p; | ||
1718 | struct ub_lun *lun; | ||
1719 | int lkr, rc; | ||
1720 | |||
1721 | if (!sc->reset) { | ||
1722 | printk(KERN_WARNING "%s: Running reset unrequested\n", | ||
1723 | sc->name); | ||
1724 | return; | ||
1725 | } | ||
1726 | |||
1727 | if (atomic_read(&sc->poison)) { | ||
1728 | printk(KERN_NOTICE "%s: Not resetting disconnected device\n", | ||
1729 | sc->name); /* P3 This floods. Remove soon. XXX */ | ||
1730 | } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) { | ||
1731 | printk(KERN_NOTICE "%s: Not resetting multi-interface device\n", | ||
1732 | sc->name); /* P3 This floods. Remove soon. XXX */ | ||
1733 | } else { | ||
1734 | if ((lkr = usb_lock_device_for_reset(sc->dev, sc->intf)) < 0) { | ||
1735 | printk(KERN_NOTICE | ||
1736 | "%s: usb_lock_device_for_reset failed (%d)\n", | ||
1737 | sc->name, lkr); | ||
1738 | } else { | ||
1739 | rc = usb_reset_device(sc->dev); | ||
1740 | if (rc < 0) { | ||
1741 | printk(KERN_NOTICE "%s: " | ||
1742 | "usb_lock_device_for_reset failed (%d)\n", | ||
1743 | sc->name, rc); | ||
1744 | } | ||
1745 | |||
1746 | if (lkr) | ||
1747 | usb_unlock_device(sc->dev); | ||
1748 | } | ||
1749 | } | ||
1750 | |||
1751 | /* | ||
1752 | * In theory, no commands can be running while reset is active, | ||
1753 | * so nobody can ask for another reset, and so we do not need any | ||
1754 | * queues of resets or anything. We do need a spinlock though, | ||
1755 | * to interact with block layer. | ||
1756 | */ | ||
1757 | spin_lock_irqsave(&sc->lock, flags); | ||
1758 | sc->reset = 0; | ||
1759 | tasklet_schedule(&sc->tasklet); | ||
1760 | list_for_each(p, &sc->luns) { | ||
1761 | lun = list_entry(p, struct ub_lun, link); | ||
1762 | blk_start_queue(lun->disk->queue); | ||
1763 | } | ||
1764 | wake_up(&sc->reset_wait); | ||
1765 | spin_unlock_irqrestore(&sc->lock, flags); | ||
1766 | } | ||
1767 | |||
1768 | /* | ||
1615 | * This is called from a process context. | 1769 | * This is called from a process context. |
1616 | */ | 1770 | */ |
1617 | static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun) | 1771 | static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun) |
@@ -2146,7 +2300,7 @@ static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev, | |||
2146 | if (ep_in == NULL || ep_out == NULL) { | 2300 | if (ep_in == NULL || ep_out == NULL) { |
2147 | printk(KERN_NOTICE "%s: failed endpoint check\n", | 2301 | printk(KERN_NOTICE "%s: failed endpoint check\n", |
2148 | sc->name); | 2302 | sc->name); |
2149 | return -EIO; | 2303 | return -ENODEV; |
2150 | } | 2304 | } |
2151 | 2305 | ||
2152 | /* Calculate and store the pipe values */ | 2306 | /* Calculate and store the pipe values */ |
@@ -2172,6 +2326,9 @@ static int ub_probe(struct usb_interface *intf, | |||
2172 | int rc; | 2326 | int rc; |
2173 | int i; | 2327 | int i; |
2174 | 2328 | ||
2329 | if (usb_usual_check_type(dev_id, USB_US_TYPE_UB)) | ||
2330 | return -ENXIO; | ||
2331 | |||
2175 | rc = -ENOMEM; | 2332 | rc = -ENOMEM; |
2176 | if ((sc = kmalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL) | 2333 | if ((sc = kmalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL) |
2177 | goto err_core; | 2334 | goto err_core; |
@@ -2181,6 +2338,8 @@ static int ub_probe(struct usb_interface *intf, | |||
2181 | usb_init_urb(&sc->work_urb); | 2338 | usb_init_urb(&sc->work_urb); |
2182 | tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); | 2339 | tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); |
2183 | atomic_set(&sc->poison, 0); | 2340 | atomic_set(&sc->poison, 0); |
2341 | INIT_WORK(&sc->reset_work, ub_reset_task, sc); | ||
2342 | init_waitqueue_head(&sc->reset_wait); | ||
2184 | 2343 | ||
2185 | init_timer(&sc->work_timer); | 2344 | init_timer(&sc->work_timer); |
2186 | sc->work_timer.data = (unsigned long) sc; | 2345 | sc->work_timer.data = (unsigned long) sc; |
@@ -2201,7 +2360,8 @@ static int ub_probe(struct usb_interface *intf, | |||
2201 | 2360 | ||
2202 | /* XXX Verify that we can handle the device (from descriptors) */ | 2361 | /* XXX Verify that we can handle the device (from descriptors) */ |
2203 | 2362 | ||
2204 | ub_get_pipes(sc, sc->dev, intf); | 2363 | if (ub_get_pipes(sc, sc->dev, intf) != 0) |
2364 | goto err_dev_desc; | ||
2205 | 2365 | ||
2206 | if (device_create_file(&sc->intf->dev, &dev_attr_diag) != 0) | 2366 | if (device_create_file(&sc->intf->dev, &dev_attr_diag) != 0) |
2207 | goto err_diag; | 2367 | goto err_diag; |
@@ -2272,6 +2432,7 @@ static int ub_probe(struct usb_interface *intf, | |||
2272 | 2432 | ||
2273 | /* device_remove_file(&sc->intf->dev, &dev_attr_diag); */ | 2433 | /* device_remove_file(&sc->intf->dev, &dev_attr_diag); */ |
2274 | err_diag: | 2434 | err_diag: |
2435 | err_dev_desc: | ||
2275 | usb_set_intfdata(intf, NULL); | 2436 | usb_set_intfdata(intf, NULL); |
2276 | // usb_put_intf(sc->intf); | 2437 | // usb_put_intf(sc->intf); |
2277 | usb_put_dev(sc->dev); | 2438 | usb_put_dev(sc->dev); |
@@ -2309,14 +2470,14 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum) | |||
2309 | ub_revalidate(sc, lun); | 2470 | ub_revalidate(sc, lun); |
2310 | 2471 | ||
2311 | rc = -ENOMEM; | 2472 | rc = -ENOMEM; |
2312 | if ((disk = alloc_disk(UB_MINORS_PER_MAJOR)) == NULL) | 2473 | if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL) |
2313 | goto err_diskalloc; | 2474 | goto err_diskalloc; |
2314 | 2475 | ||
2315 | lun->disk = disk; | 2476 | lun->disk = disk; |
2316 | sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a'); | 2477 | sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a'); |
2317 | sprintf(disk->devfs_name, DEVFS_NAME "/%c", lun->id + 'a'); | 2478 | sprintf(disk->devfs_name, DEVFS_NAME "/%c", lun->id + 'a'); |
2318 | disk->major = UB_MAJOR; | 2479 | disk->major = UB_MAJOR; |
2319 | disk->first_minor = lun->id * UB_MINORS_PER_MAJOR; | 2480 | disk->first_minor = lun->id * UB_PARTS_PER_LUN; |
2320 | disk->fops = &ub_bd_fops; | 2481 | disk->fops = &ub_bd_fops; |
2321 | disk->private_data = lun; | 2482 | disk->private_data = lun; |
2322 | disk->driverfs_dev = &sc->intf->dev; | 2483 | disk->driverfs_dev = &sc->intf->dev; |
@@ -2380,6 +2541,11 @@ static void ub_disconnect(struct usb_interface *intf) | |||
2380 | atomic_set(&sc->poison, 1); | 2541 | atomic_set(&sc->poison, 1); |
2381 | 2542 | ||
2382 | /* | 2543 | /* |
2544 | * Wait for reset to end, if any. | ||
2545 | */ | ||
2546 | wait_event(sc->reset_wait, !sc->reset); | ||
2547 | |||
2548 | /* | ||
2383 | * Blow away queued commands. | 2549 | * Blow away queued commands. |
2384 | * | 2550 | * |
2385 | * Actually, this never works, because before we get here | 2551 | * Actually, this never works, because before we get here |
@@ -2392,7 +2558,7 @@ static void ub_disconnect(struct usb_interface *intf) | |||
2392 | { | 2558 | { |
2393 | struct ub_scsi_cmd *cmd; | 2559 | struct ub_scsi_cmd *cmd; |
2394 | int cnt = 0; | 2560 | int cnt = 0; |
2395 | while ((cmd = ub_cmdq_pop(sc)) != NULL) { | 2561 | while ((cmd = ub_cmdq_peek(sc)) != NULL) { |
2396 | cmd->error = -ENOTCONN; | 2562 | cmd->error = -ENOTCONN; |
2397 | cmd->state = UB_CMDST_DONE; | 2563 | cmd->state = UB_CMDST_DONE; |
2398 | ub_cmdtr_state(sc, cmd); | 2564 | ub_cmdtr_state(sc, cmd); |
@@ -2461,7 +2627,6 @@ static void ub_disconnect(struct usb_interface *intf) | |||
2461 | } | 2627 | } |
2462 | 2628 | ||
2463 | static struct usb_driver ub_driver = { | 2629 | static struct usb_driver ub_driver = { |
2464 | .owner = THIS_MODULE, | ||
2465 | .name = "ub", | 2630 | .name = "ub", |
2466 | .probe = ub_probe, | 2631 | .probe = ub_probe, |
2467 | .disconnect = ub_disconnect, | 2632 | .disconnect = ub_disconnect, |
@@ -2479,6 +2644,7 @@ static int __init ub_init(void) | |||
2479 | if ((rc = usb_register(&ub_driver)) != 0) | 2644 | if ((rc = usb_register(&ub_driver)) != 0) |
2480 | goto err_register; | 2645 | goto err_register; |
2481 | 2646 | ||
2647 | usb_usual_set_present(USB_US_TYPE_UB); | ||
2482 | return 0; | 2648 | return 0; |
2483 | 2649 | ||
2484 | err_register: | 2650 | err_register: |
@@ -2494,6 +2660,7 @@ static void __exit ub_exit(void) | |||
2494 | 2660 | ||
2495 | devfs_remove(DEVFS_NAME); | 2661 | devfs_remove(DEVFS_NAME); |
2496 | unregister_blkdev(UB_MAJOR, DRV_NAME); | 2662 | unregister_blkdev(UB_MAJOR, DRV_NAME); |
2663 | usb_usual_clear_present(USB_US_TYPE_UB); | ||
2497 | } | 2664 | } |
2498 | 2665 | ||
2499 | module_init(ub_init); | 2666 | module_init(ub_init); |
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c index 2d518aa2720a..063f0304a163 100644 --- a/drivers/block/viodasd.c +++ b/drivers/block/viodasd.c | |||
@@ -305,7 +305,7 @@ static void viodasd_end_request(struct request *req, int uptodate, | |||
305 | if (end_that_request_first(req, uptodate, num_sectors)) | 305 | if (end_that_request_first(req, uptodate, num_sectors)) |
306 | return; | 306 | return; |
307 | add_disk_randomness(req->rq_disk); | 307 | add_disk_randomness(req->rq_disk); |
308 | end_that_request_last(req); | 308 | end_that_request_last(req, uptodate); |
309 | } | 309 | } |
310 | 310 | ||
311 | /* | 311 | /* |