diff options
Diffstat (limited to 'drivers/block/ub.c')
-rw-r--r-- | drivers/block/ub.c | 439 |
1 files changed, 303 insertions, 136 deletions
diff --git a/drivers/block/ub.c b/drivers/block/ub.c index bfb23d543ff7..10740a065088 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c | |||
@@ -9,7 +9,6 @@ | |||
9 | * | 9 | * |
10 | * TODO (sorted by decreasing priority) | 10 | * TODO (sorted by decreasing priority) |
11 | * -- Kill first_open (Al Viro fixed the block layer now) | 11 | * -- Kill first_open (Al Viro fixed the block layer now) |
12 | * -- Do resets with usb_device_reset (needs a thread context, use khubd) | ||
13 | * -- set readonly flag for CDs, set removable flag for CF readers | 12 | * -- set readonly flag for CDs, set removable flag for CF readers |
14 | * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) | 13 | * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) |
15 | * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries | 14 | * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries |
@@ -29,6 +28,7 @@ | |||
29 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
30 | #include <linux/module.h> | 29 | #include <linux/module.h> |
31 | #include <linux/usb.h> | 30 | #include <linux/usb.h> |
31 | #include <linux/usb_usual.h> | ||
32 | #include <linux/blkdev.h> | 32 | #include <linux/blkdev.h> |
33 | #include <linux/devfs_fs_kernel.h> | 33 | #include <linux/devfs_fs_kernel.h> |
34 | #include <linux/timer.h> | 34 | #include <linux/timer.h> |
@@ -107,16 +107,6 @@ | |||
107 | */ | 107 | */ |
108 | 108 | ||
109 | /* | 109 | /* |
110 | * Definitions which have to be scattered once we understand the layout better. | ||
111 | */ | ||
112 | |||
113 | /* Transport (despite PR in the name) */ | ||
114 | #define US_PR_BULK 0x50 /* bulk only */ | ||
115 | |||
116 | /* Protocol */ | ||
117 | #define US_SC_SCSI 0x06 /* Transparent */ | ||
118 | |||
119 | /* | ||
120 | * This many LUNs per USB device. | 110 | * This many LUNs per USB device. |
121 | * Every one of them takes a host, see UB_MAX_HOSTS. | 111 | * Every one of them takes a host, see UB_MAX_HOSTS. |
122 | */ | 112 | */ |
@@ -125,7 +115,7 @@ | |||
125 | /* | 115 | /* |
126 | */ | 116 | */ |
127 | 117 | ||
128 | #define UB_MINORS_PER_MAJOR 8 | 118 | #define UB_PARTS_PER_LUN 8 |
129 | 119 | ||
130 | #define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ | 120 | #define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ |
131 | 121 | ||
@@ -245,6 +235,13 @@ struct ub_scsi_cmd { | |||
245 | void *back; | 235 | void *back; |
246 | }; | 236 | }; |
247 | 237 | ||
238 | struct ub_request { | ||
239 | struct request *rq; | ||
240 | unsigned int current_try; | ||
241 | unsigned int nsg; /* sgv[nsg] */ | ||
242 | struct scatterlist sgv[UB_MAX_REQ_SG]; | ||
243 | }; | ||
244 | |||
248 | /* | 245 | /* |
249 | */ | 246 | */ |
250 | struct ub_capacity { | 247 | struct ub_capacity { |
@@ -340,6 +337,8 @@ struct ub_lun { | |||
340 | int readonly; | 337 | int readonly; |
341 | int first_open; /* Kludge. See ub_bd_open. */ | 338 | int first_open; /* Kludge. See ub_bd_open. */ |
342 | 339 | ||
340 | struct ub_request urq; | ||
341 | |||
343 | /* Use Ingo's mempool if or when we have more than one command. */ | 342 | /* Use Ingo's mempool if or when we have more than one command. */ |
344 | /* | 343 | /* |
345 | * Currently we never need more than one command for the whole device. | 344 | * Currently we never need more than one command for the whole device. |
@@ -360,6 +359,7 @@ struct ub_dev { | |||
360 | atomic_t poison; /* The USB device is disconnected */ | 359 | atomic_t poison; /* The USB device is disconnected */ |
361 | int openc; /* protected by ub_lock! */ | 360 | int openc; /* protected by ub_lock! */ |
362 | /* kref is too implicit for our taste */ | 361 | /* kref is too implicit for our taste */ |
362 | int reset; /* Reset is running */ | ||
363 | unsigned int tagcnt; | 363 | unsigned int tagcnt; |
364 | char name[12]; | 364 | char name[12]; |
365 | struct usb_device *dev; | 365 | struct usb_device *dev; |
@@ -387,6 +387,9 @@ struct ub_dev { | |||
387 | struct bulk_cs_wrap work_bcs; | 387 | struct bulk_cs_wrap work_bcs; |
388 | struct usb_ctrlrequest work_cr; | 388 | struct usb_ctrlrequest work_cr; |
389 | 389 | ||
390 | struct work_struct reset_work; | ||
391 | wait_queue_head_t reset_wait; | ||
392 | |||
390 | int sg_stat[6]; | 393 | int sg_stat[6]; |
391 | struct ub_scsi_trace tr; | 394 | struct ub_scsi_trace tr; |
392 | }; | 395 | }; |
@@ -395,12 +398,14 @@ struct ub_dev { | |||
395 | */ | 398 | */ |
396 | static void ub_cleanup(struct ub_dev *sc); | 399 | static void ub_cleanup(struct ub_dev *sc); |
397 | static int ub_request_fn_1(struct ub_lun *lun, struct request *rq); | 400 | static int ub_request_fn_1(struct ub_lun *lun, struct request *rq); |
398 | static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, | 401 | static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, |
399 | struct ub_scsi_cmd *cmd, struct request *rq); | 402 | struct ub_scsi_cmd *cmd, struct ub_request *urq); |
400 | static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, | 403 | static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, |
401 | struct ub_scsi_cmd *cmd, struct request *rq); | 404 | struct ub_scsi_cmd *cmd, struct ub_request *urq); |
402 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | 405 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
403 | static void ub_end_rq(struct request *rq, int uptodate); | 406 | static void ub_end_rq(struct request *rq, int uptodate); |
407 | static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, | ||
408 | struct ub_request *urq, struct ub_scsi_cmd *cmd); | ||
404 | static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | 409 | static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
405 | static void ub_urb_complete(struct urb *urb, struct pt_regs *pt); | 410 | static void ub_urb_complete(struct urb *urb, struct pt_regs *pt); |
406 | static void ub_scsi_action(unsigned long _dev); | 411 | static void ub_scsi_action(unsigned long _dev); |
@@ -415,6 +420,8 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | |||
415 | static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | 420 | static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, |
416 | int stalled_pipe); | 421 | int stalled_pipe); |
417 | static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); | 422 | static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); |
423 | static void ub_reset_enter(struct ub_dev *sc); | ||
424 | static void ub_reset_task(void *arg); | ||
418 | static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); | 425 | static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); |
419 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, | 426 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, |
420 | struct ub_capacity *ret); | 427 | struct ub_capacity *ret); |
@@ -422,13 +429,18 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum); | |||
422 | 429 | ||
423 | /* | 430 | /* |
424 | */ | 431 | */ |
432 | #ifdef CONFIG_USB_LIBUSUAL | ||
433 | |||
434 | #define ub_usb_ids storage_usb_ids | ||
435 | #else | ||
436 | |||
425 | static struct usb_device_id ub_usb_ids[] = { | 437 | static struct usb_device_id ub_usb_ids[] = { |
426 | // { USB_DEVICE_VER(0x0781, 0x0002, 0x0009, 0x0009) }, /* SDDR-31 */ | ||
427 | { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) }, | 438 | { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) }, |
428 | { } | 439 | { } |
429 | }; | 440 | }; |
430 | 441 | ||
431 | MODULE_DEVICE_TABLE(usb, ub_usb_ids); | 442 | MODULE_DEVICE_TABLE(usb, ub_usb_ids); |
443 | #endif /* CONFIG_USB_LIBUSUAL */ | ||
432 | 444 | ||
433 | /* | 445 | /* |
434 | * Find me a way to identify "next free minor" for add_disk(), | 446 | * Find me a way to identify "next free minor" for add_disk(), |
@@ -522,6 +534,9 @@ static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, | |||
522 | spin_lock_irqsave(&sc->lock, flags); | 534 | spin_lock_irqsave(&sc->lock, flags); |
523 | 535 | ||
524 | cnt += sprintf(page + cnt, | 536 | cnt += sprintf(page + cnt, |
537 | "poison %d reset %d\n", | ||
538 | atomic_read(&sc->poison), sc->reset); | ||
539 | cnt += sprintf(page + cnt, | ||
525 | "qlen %d qmax %d\n", | 540 | "qlen %d qmax %d\n", |
526 | sc->cmd_queue.qlen, sc->cmd_queue.qmax); | 541 | sc->cmd_queue.qlen, sc->cmd_queue.qmax); |
527 | cnt += sprintf(page + cnt, | 542 | cnt += sprintf(page + cnt, |
@@ -770,7 +785,8 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) | |||
770 | { | 785 | { |
771 | struct ub_dev *sc = lun->udev; | 786 | struct ub_dev *sc = lun->udev; |
772 | struct ub_scsi_cmd *cmd; | 787 | struct ub_scsi_cmd *cmd; |
773 | int rc; | 788 | struct ub_request *urq; |
789 | int n_elem; | ||
774 | 790 | ||
775 | if (atomic_read(&sc->poison) || lun->changed) { | 791 | if (atomic_read(&sc->poison) || lun->changed) { |
776 | blkdev_dequeue_request(rq); | 792 | blkdev_dequeue_request(rq); |
@@ -778,65 +794,70 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) | |||
778 | return 0; | 794 | return 0; |
779 | } | 795 | } |
780 | 796 | ||
797 | if (lun->urq.rq != NULL) | ||
798 | return -1; | ||
781 | if ((cmd = ub_get_cmd(lun)) == NULL) | 799 | if ((cmd = ub_get_cmd(lun)) == NULL) |
782 | return -1; | 800 | return -1; |
783 | memset(cmd, 0, sizeof(struct ub_scsi_cmd)); | 801 | memset(cmd, 0, sizeof(struct ub_scsi_cmd)); |
784 | 802 | ||
785 | blkdev_dequeue_request(rq); | 803 | blkdev_dequeue_request(rq); |
804 | |||
805 | urq = &lun->urq; | ||
806 | memset(urq, 0, sizeof(struct ub_request)); | ||
807 | urq->rq = rq; | ||
808 | |||
809 | /* | ||
810 | * get scatterlist from block layer | ||
811 | */ | ||
812 | n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]); | ||
813 | if (n_elem < 0) { | ||
814 | printk(KERN_INFO "%s: failed request map (%d)\n", | ||
815 | lun->name, n_elem); /* P3 */ | ||
816 | goto drop; | ||
817 | } | ||
818 | if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ | ||
819 | printk(KERN_WARNING "%s: request with %d segments\n", | ||
820 | lun->name, n_elem); | ||
821 | goto drop; | ||
822 | } | ||
823 | urq->nsg = n_elem; | ||
824 | sc->sg_stat[n_elem < 5 ? n_elem : 5]++; | ||
825 | |||
786 | if (blk_pc_request(rq)) { | 826 | if (blk_pc_request(rq)) { |
787 | rc = ub_cmd_build_packet(sc, lun, cmd, rq); | 827 | ub_cmd_build_packet(sc, lun, cmd, urq); |
788 | } else { | 828 | } else { |
789 | rc = ub_cmd_build_block(sc, lun, cmd, rq); | 829 | ub_cmd_build_block(sc, lun, cmd, urq); |
790 | } | ||
791 | if (rc != 0) { | ||
792 | ub_put_cmd(lun, cmd); | ||
793 | ub_end_rq(rq, 0); | ||
794 | return 0; | ||
795 | } | 830 | } |
796 | cmd->state = UB_CMDST_INIT; | 831 | cmd->state = UB_CMDST_INIT; |
797 | cmd->lun = lun; | 832 | cmd->lun = lun; |
798 | cmd->done = ub_rw_cmd_done; | 833 | cmd->done = ub_rw_cmd_done; |
799 | cmd->back = rq; | 834 | cmd->back = urq; |
800 | 835 | ||
801 | cmd->tag = sc->tagcnt++; | 836 | cmd->tag = sc->tagcnt++; |
802 | if (ub_submit_scsi(sc, cmd) != 0) { | 837 | if (ub_submit_scsi(sc, cmd) != 0) |
803 | ub_put_cmd(lun, cmd); | 838 | goto drop; |
804 | ub_end_rq(rq, 0); | ||
805 | return 0; | ||
806 | } | ||
807 | 839 | ||
808 | return 0; | 840 | return 0; |
841 | |||
842 | drop: | ||
843 | ub_put_cmd(lun, cmd); | ||
844 | ub_end_rq(rq, 0); | ||
845 | return 0; | ||
809 | } | 846 | } |
810 | 847 | ||
811 | static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, | 848 | static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, |
812 | struct ub_scsi_cmd *cmd, struct request *rq) | 849 | struct ub_scsi_cmd *cmd, struct ub_request *urq) |
813 | { | 850 | { |
814 | int ub_dir; | 851 | struct request *rq = urq->rq; |
815 | int n_elem; | ||
816 | unsigned int block, nblks; | 852 | unsigned int block, nblks; |
817 | 853 | ||
818 | if (rq_data_dir(rq) == WRITE) | 854 | if (rq_data_dir(rq) == WRITE) |
819 | ub_dir = UB_DIR_WRITE; | 855 | cmd->dir = UB_DIR_WRITE; |
820 | else | 856 | else |
821 | ub_dir = UB_DIR_READ; | 857 | cmd->dir = UB_DIR_READ; |
822 | cmd->dir = ub_dir; | ||
823 | 858 | ||
824 | /* | 859 | cmd->nsg = urq->nsg; |
825 | * get scatterlist from block layer | 860 | memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); |
826 | */ | ||
827 | n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]); | ||
828 | if (n_elem <= 0) { | ||
829 | printk(KERN_INFO "%s: failed request map (%d)\n", | ||
830 | sc->name, n_elem); /* P3 */ | ||
831 | return -1; /* request with no s/g entries? */ | ||
832 | } | ||
833 | if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ | ||
834 | printk(KERN_WARNING "%s: request with %d segments\n", | ||
835 | sc->name, n_elem); | ||
836 | return -1; | ||
837 | } | ||
838 | cmd->nsg = n_elem; | ||
839 | sc->sg_stat[n_elem < 5 ? n_elem : 5]++; | ||
840 | 861 | ||
841 | /* | 862 | /* |
842 | * build the command | 863 | * build the command |
@@ -847,7 +868,7 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, | |||
847 | block = rq->sector >> lun->capacity.bshift; | 868 | block = rq->sector >> lun->capacity.bshift; |
848 | nblks = rq->nr_sectors >> lun->capacity.bshift; | 869 | nblks = rq->nr_sectors >> lun->capacity.bshift; |
849 | 870 | ||
850 | cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10; | 871 | cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10; |
851 | /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ | 872 | /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ |
852 | cmd->cdb[2] = block >> 24; | 873 | cmd->cdb[2] = block >> 24; |
853 | cmd->cdb[3] = block >> 16; | 874 | cmd->cdb[3] = block >> 16; |
@@ -858,14 +879,12 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, | |||
858 | cmd->cdb_len = 10; | 879 | cmd->cdb_len = 10; |
859 | 880 | ||
860 | cmd->len = rq->nr_sectors * 512; | 881 | cmd->len = rq->nr_sectors * 512; |
861 | |||
862 | return 0; | ||
863 | } | 882 | } |
864 | 883 | ||
865 | static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, | 884 | static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, |
866 | struct ub_scsi_cmd *cmd, struct request *rq) | 885 | struct ub_scsi_cmd *cmd, struct ub_request *urq) |
867 | { | 886 | { |
868 | int n_elem; | 887 | struct request *rq = urq->rq; |
869 | 888 | ||
870 | if (rq->data_len == 0) { | 889 | if (rq->data_len == 0) { |
871 | cmd->dir = UB_DIR_NONE; | 890 | cmd->dir = UB_DIR_NONE; |
@@ -874,40 +893,26 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, | |||
874 | cmd->dir = UB_DIR_WRITE; | 893 | cmd->dir = UB_DIR_WRITE; |
875 | else | 894 | else |
876 | cmd->dir = UB_DIR_READ; | 895 | cmd->dir = UB_DIR_READ; |
877 | |||
878 | } | 896 | } |
879 | 897 | ||
880 | /* | 898 | cmd->nsg = urq->nsg; |
881 | * get scatterlist from block layer | 899 | memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); |
882 | */ | ||
883 | n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]); | ||
884 | if (n_elem < 0) { | ||
885 | printk(KERN_INFO "%s: failed request map (%d)\n", | ||
886 | sc->name, n_elem); /* P3 */ | ||
887 | return -1; | ||
888 | } | ||
889 | if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ | ||
890 | printk(KERN_WARNING "%s: request with %d segments\n", | ||
891 | sc->name, n_elem); | ||
892 | return -1; | ||
893 | } | ||
894 | cmd->nsg = n_elem; | ||
895 | sc->sg_stat[n_elem < 5 ? n_elem : 5]++; | ||
896 | 900 | ||
897 | memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); | 901 | memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); |
898 | cmd->cdb_len = rq->cmd_len; | 902 | cmd->cdb_len = rq->cmd_len; |
899 | 903 | ||
900 | cmd->len = rq->data_len; | 904 | cmd->len = rq->data_len; |
901 | |||
902 | return 0; | ||
903 | } | 905 | } |
904 | 906 | ||
905 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | 907 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) |
906 | { | 908 | { |
907 | struct request *rq = cmd->back; | ||
908 | struct ub_lun *lun = cmd->lun; | 909 | struct ub_lun *lun = cmd->lun; |
910 | struct ub_request *urq = cmd->back; | ||
911 | struct request *rq; | ||
909 | int uptodate; | 912 | int uptodate; |
910 | 913 | ||
914 | rq = urq->rq; | ||
915 | |||
911 | if (cmd->error == 0) { | 916 | if (cmd->error == 0) { |
912 | uptodate = 1; | 917 | uptodate = 1; |
913 | 918 | ||
@@ -928,9 +933,16 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
928 | rq->errors = SAM_STAT_CHECK_CONDITION; | 933 | rq->errors = SAM_STAT_CHECK_CONDITION; |
929 | else | 934 | else |
930 | rq->errors = DID_ERROR << 16; | 935 | rq->errors = DID_ERROR << 16; |
936 | } else { | ||
937 | if (cmd->error == -EIO) { | ||
938 | if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0) | ||
939 | return; | ||
940 | } | ||
931 | } | 941 | } |
932 | } | 942 | } |
933 | 943 | ||
944 | urq->rq = NULL; | ||
945 | |||
934 | ub_put_cmd(lun, cmd); | 946 | ub_put_cmd(lun, cmd); |
935 | ub_end_rq(rq, uptodate); | 947 | ub_end_rq(rq, uptodate); |
936 | blk_start_queue(lun->disk->queue); | 948 | blk_start_queue(lun->disk->queue); |
@@ -938,13 +950,45 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
938 | 950 | ||
939 | static void ub_end_rq(struct request *rq, int uptodate) | 951 | static void ub_end_rq(struct request *rq, int uptodate) |
940 | { | 952 | { |
941 | int rc; | 953 | end_that_request_first(rq, uptodate, rq->hard_nr_sectors); |
942 | |||
943 | rc = end_that_request_first(rq, uptodate, rq->hard_nr_sectors); | ||
944 | // assert(rc == 0); | ||
945 | end_that_request_last(rq); | 954 | end_that_request_last(rq); |
946 | } | 955 | } |
947 | 956 | ||
957 | static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, | ||
958 | struct ub_request *urq, struct ub_scsi_cmd *cmd) | ||
959 | { | ||
960 | |||
961 | if (atomic_read(&sc->poison)) | ||
962 | return -ENXIO; | ||
963 | |||
964 | ub_reset_enter(sc); | ||
965 | |||
966 | if (urq->current_try >= 3) | ||
967 | return -EIO; | ||
968 | urq->current_try++; | ||
969 | /* P3 */ printk("%s: dir %c len/act %d/%d " | ||
970 | "[sense %x %02x %02x] retry %d\n", | ||
971 | sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len, | ||
972 | cmd->key, cmd->asc, cmd->ascq, urq->current_try); | ||
973 | |||
974 | memset(cmd, 0, sizeof(struct ub_scsi_cmd)); | ||
975 | ub_cmd_build_block(sc, lun, cmd, urq); | ||
976 | |||
977 | cmd->state = UB_CMDST_INIT; | ||
978 | cmd->lun = lun; | ||
979 | cmd->done = ub_rw_cmd_done; | ||
980 | cmd->back = urq; | ||
981 | |||
982 | cmd->tag = sc->tagcnt++; | ||
983 | |||
984 | #if 0 /* Wasteful */ | ||
985 | return ub_submit_scsi(sc, cmd); | ||
986 | #else | ||
987 | ub_cmdq_add(sc, cmd); | ||
988 | return 0; | ||
989 | #endif | ||
990 | } | ||
991 | |||
948 | /* | 992 | /* |
949 | * Submit a regular SCSI operation (not an auto-sense). | 993 | * Submit a regular SCSI operation (not an auto-sense). |
950 | * | 994 | * |
@@ -1075,7 +1119,7 @@ static void ub_scsi_dispatch(struct ub_dev *sc) | |||
1075 | struct ub_scsi_cmd *cmd; | 1119 | struct ub_scsi_cmd *cmd; |
1076 | int rc; | 1120 | int rc; |
1077 | 1121 | ||
1078 | while ((cmd = ub_cmdq_peek(sc)) != NULL) { | 1122 | while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) { |
1079 | if (cmd->state == UB_CMDST_DONE) { | 1123 | if (cmd->state == UB_CMDST_DONE) { |
1080 | ub_cmdq_pop(sc); | 1124 | ub_cmdq_pop(sc); |
1081 | (*cmd->done)(sc, cmd); | 1125 | (*cmd->done)(sc, cmd); |
@@ -1098,11 +1142,12 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1098 | { | 1142 | { |
1099 | struct urb *urb = &sc->work_urb; | 1143 | struct urb *urb = &sc->work_urb; |
1100 | struct bulk_cs_wrap *bcs; | 1144 | struct bulk_cs_wrap *bcs; |
1145 | int len; | ||
1101 | int rc; | 1146 | int rc; |
1102 | 1147 | ||
1103 | if (atomic_read(&sc->poison)) { | 1148 | if (atomic_read(&sc->poison)) { |
1104 | /* A little too simplistic, I feel... */ | 1149 | ub_state_done(sc, cmd, -ENODEV); |
1105 | goto Bad_End; | 1150 | return; |
1106 | } | 1151 | } |
1107 | 1152 | ||
1108 | if (cmd->state == UB_CMDST_CLEAR) { | 1153 | if (cmd->state == UB_CMDST_CLEAR) { |
@@ -1110,7 +1155,6 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1110 | /* | 1155 | /* |
1111 | * STALL while clearning STALL. | 1156 | * STALL while clearning STALL. |
1112 | * The control pipe clears itself - nothing to do. | 1157 | * The control pipe clears itself - nothing to do. |
1113 | * XXX Might try to reset the device here and retry. | ||
1114 | */ | 1158 | */ |
1115 | printk(KERN_NOTICE "%s: stall on control pipe\n", | 1159 | printk(KERN_NOTICE "%s: stall on control pipe\n", |
1116 | sc->name); | 1160 | sc->name); |
@@ -1129,11 +1173,6 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1129 | 1173 | ||
1130 | } else if (cmd->state == UB_CMDST_CLR2STS) { | 1174 | } else if (cmd->state == UB_CMDST_CLR2STS) { |
1131 | if (urb->status == -EPIPE) { | 1175 | if (urb->status == -EPIPE) { |
1132 | /* | ||
1133 | * STALL while clearning STALL. | ||
1134 | * The control pipe clears itself - nothing to do. | ||
1135 | * XXX Might try to reset the device here and retry. | ||
1136 | */ | ||
1137 | printk(KERN_NOTICE "%s: stall on control pipe\n", | 1176 | printk(KERN_NOTICE "%s: stall on control pipe\n", |
1138 | sc->name); | 1177 | sc->name); |
1139 | goto Bad_End; | 1178 | goto Bad_End; |
@@ -1151,11 +1190,6 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1151 | 1190 | ||
1152 | } else if (cmd->state == UB_CMDST_CLRRS) { | 1191 | } else if (cmd->state == UB_CMDST_CLRRS) { |
1153 | if (urb->status == -EPIPE) { | 1192 | if (urb->status == -EPIPE) { |
1154 | /* | ||
1155 | * STALL while clearning STALL. | ||
1156 | * The control pipe clears itself - nothing to do. | ||
1157 | * XXX Might try to reset the device here and retry. | ||
1158 | */ | ||
1159 | printk(KERN_NOTICE "%s: stall on control pipe\n", | 1193 | printk(KERN_NOTICE "%s: stall on control pipe\n", |
1160 | sc->name); | 1194 | sc->name); |
1161 | goto Bad_End; | 1195 | goto Bad_End; |
@@ -1172,7 +1206,12 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1172 | ub_state_stat_counted(sc, cmd); | 1206 | ub_state_stat_counted(sc, cmd); |
1173 | 1207 | ||
1174 | } else if (cmd->state == UB_CMDST_CMD) { | 1208 | } else if (cmd->state == UB_CMDST_CMD) { |
1175 | if (urb->status == -EPIPE) { | 1209 | switch (urb->status) { |
1210 | case 0: | ||
1211 | break; | ||
1212 | case -EOVERFLOW: | ||
1213 | goto Bad_End; | ||
1214 | case -EPIPE: | ||
1176 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); | 1215 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); |
1177 | if (rc != 0) { | 1216 | if (rc != 0) { |
1178 | printk(KERN_NOTICE "%s: " | 1217 | printk(KERN_NOTICE "%s: " |
@@ -1182,17 +1221,20 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1182 | * This is typically ENOMEM or some other such shit. | 1221 | * This is typically ENOMEM or some other such shit. |
1183 | * Retrying is pointless. Just do Bad End on it... | 1222 | * Retrying is pointless. Just do Bad End on it... |
1184 | */ | 1223 | */ |
1185 | goto Bad_End; | 1224 | ub_state_done(sc, cmd, rc); |
1225 | return; | ||
1186 | } | 1226 | } |
1187 | cmd->state = UB_CMDST_CLEAR; | 1227 | cmd->state = UB_CMDST_CLEAR; |
1188 | ub_cmdtr_state(sc, cmd); | 1228 | ub_cmdtr_state(sc, cmd); |
1189 | return; | 1229 | return; |
1190 | } | 1230 | case -ESHUTDOWN: /* unplug */ |
1191 | if (urb->status != 0) { | 1231 | case -EILSEQ: /* unplug timeout on uhci */ |
1232 | ub_state_done(sc, cmd, -ENODEV); | ||
1233 | return; | ||
1234 | default: | ||
1192 | goto Bad_End; | 1235 | goto Bad_End; |
1193 | } | 1236 | } |
1194 | if (urb->actual_length != US_BULK_CB_WRAP_LEN) { | 1237 | if (urb->actual_length != US_BULK_CB_WRAP_LEN) { |
1195 | /* XXX Must do reset here to unconfuse the device */ | ||
1196 | goto Bad_End; | 1238 | goto Bad_End; |
1197 | } | 1239 | } |
1198 | 1240 | ||
@@ -1211,11 +1253,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1211 | printk(KERN_NOTICE "%s: " | 1253 | printk(KERN_NOTICE "%s: " |
1212 | "unable to submit clear (%d)\n", | 1254 | "unable to submit clear (%d)\n", |
1213 | sc->name, rc); | 1255 | sc->name, rc); |
1214 | /* | 1256 | ub_state_done(sc, cmd, rc); |
1215 | * This is typically ENOMEM or some other such shit. | 1257 | return; |
1216 | * Retrying is pointless. Just do Bad End on it... | ||
1217 | */ | ||
1218 | goto Bad_End; | ||
1219 | } | 1258 | } |
1220 | cmd->state = UB_CMDST_CLR2STS; | 1259 | cmd->state = UB_CMDST_CLR2STS; |
1221 | ub_cmdtr_state(sc, cmd); | 1260 | ub_cmdtr_state(sc, cmd); |
@@ -1224,14 +1263,50 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1224 | if (urb->status == -EOVERFLOW) { | 1263 | if (urb->status == -EOVERFLOW) { |
1225 | /* | 1264 | /* |
1226 | * A babble? Failure, but we must transfer CSW now. | 1265 | * A babble? Failure, but we must transfer CSW now. |
1227 | * XXX This is going to end in perpetual babble. Reset. | ||
1228 | */ | 1266 | */ |
1229 | cmd->error = -EOVERFLOW; /* A cheap trick... */ | 1267 | cmd->error = -EOVERFLOW; /* A cheap trick... */ |
1230 | ub_state_stat(sc, cmd); | 1268 | ub_state_stat(sc, cmd); |
1231 | return; | 1269 | return; |
1232 | } | 1270 | } |
1233 | if (urb->status != 0) | 1271 | |
1234 | goto Bad_End; | 1272 | if (cmd->dir == UB_DIR_WRITE) { |
1273 | /* | ||
1274 | * Do not continue writes in case of a failure. | ||
1275 | * Doing so would cause sectors to be mixed up, | ||
1276 | * which is worse than sectors lost. | ||
1277 | * | ||
1278 | * We must try to read the CSW, or many devices | ||
1279 | * get confused. | ||
1280 | */ | ||
1281 | len = urb->actual_length; | ||
1282 | if (urb->status != 0 || | ||
1283 | len != cmd->sgv[cmd->current_sg].length) { | ||
1284 | cmd->act_len += len; | ||
1285 | ub_cmdtr_act_len(sc, cmd); | ||
1286 | |||
1287 | cmd->error = -EIO; | ||
1288 | ub_state_stat(sc, cmd); | ||
1289 | return; | ||
1290 | } | ||
1291 | |||
1292 | } else { | ||
1293 | /* | ||
1294 | * If an error occurs on read, we record it, and | ||
1295 | * continue to fetch data in order to avoid bubble. | ||
1296 | * | ||
1297 | * As a small shortcut, we stop if we detect that | ||
1298 | * a CSW mixed into data. | ||
1299 | */ | ||
1300 | if (urb->status != 0) | ||
1301 | cmd->error = -EIO; | ||
1302 | |||
1303 | len = urb->actual_length; | ||
1304 | if (urb->status != 0 || | ||
1305 | len != cmd->sgv[cmd->current_sg].length) { | ||
1306 | if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN) | ||
1307 | goto Bad_End; | ||
1308 | } | ||
1309 | } | ||
1235 | 1310 | ||
1236 | cmd->act_len += urb->actual_length; | 1311 | cmd->act_len += urb->actual_length; |
1237 | ub_cmdtr_act_len(sc, cmd); | 1312 | ub_cmdtr_act_len(sc, cmd); |
@@ -1249,11 +1324,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1249 | printk(KERN_NOTICE "%s: " | 1324 | printk(KERN_NOTICE "%s: " |
1250 | "unable to submit clear (%d)\n", | 1325 | "unable to submit clear (%d)\n", |
1251 | sc->name, rc); | 1326 | sc->name, rc); |
1252 | /* | 1327 | ub_state_done(sc, cmd, rc); |
1253 | * This is typically ENOMEM or some other such shit. | 1328 | return; |
1254 | * Retrying is pointless. Just do Bad End on it... | ||
1255 | */ | ||
1256 | goto Bad_End; | ||
1257 | } | 1329 | } |
1258 | 1330 | ||
1259 | /* | 1331 | /* |
@@ -1266,14 +1338,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1266 | ub_cmdtr_state(sc, cmd); | 1338 | ub_cmdtr_state(sc, cmd); |
1267 | return; | 1339 | return; |
1268 | } | 1340 | } |
1269 | if (urb->status == -EOVERFLOW) { | 1341 | |
1270 | /* | 1342 | /* Catch everything, including -EOVERFLOW and other nasties. */ |
1271 | * XXX We are screwed here. Retrying is pointless, | ||
1272 | * because the pipelined data will not get in until | ||
1273 | * we read with a big enough buffer. We must reset XXX. | ||
1274 | */ | ||
1275 | goto Bad_End; | ||
1276 | } | ||
1277 | if (urb->status != 0) | 1343 | if (urb->status != 0) |
1278 | goto Bad_End; | 1344 | goto Bad_End; |
1279 | 1345 | ||
@@ -1319,15 +1385,15 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1319 | return; | 1385 | return; |
1320 | } | 1386 | } |
1321 | 1387 | ||
1322 | rc = le32_to_cpu(bcs->Residue); | 1388 | len = le32_to_cpu(bcs->Residue); |
1323 | if (rc != cmd->len - cmd->act_len) { | 1389 | if (len != cmd->len - cmd->act_len) { |
1324 | /* | 1390 | /* |
1325 | * It is all right to transfer less, the caller has | 1391 | * It is all right to transfer less, the caller has |
1326 | * to check. But it's not all right if the device | 1392 | * to check. But it's not all right if the device |
1327 | * counts disagree with our counts. | 1393 | * counts disagree with our counts. |
1328 | */ | 1394 | */ |
1329 | /* P3 */ printk("%s: resid %d len %d act %d\n", | 1395 | /* P3 */ printk("%s: resid %d len %d act %d\n", |
1330 | sc->name, rc, cmd->len, cmd->act_len); | 1396 | sc->name, len, cmd->len, cmd->act_len); |
1331 | goto Bad_End; | 1397 | goto Bad_End; |
1332 | } | 1398 | } |
1333 | 1399 | ||
@@ -1338,13 +1404,13 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1338 | ub_state_sense(sc, cmd); | 1404 | ub_state_sense(sc, cmd); |
1339 | return; | 1405 | return; |
1340 | case US_BULK_STAT_PHASE: | 1406 | case US_BULK_STAT_PHASE: |
1341 | /* XXX We must reset the transport here */ | ||
1342 | /* P3 */ printk("%s: status PHASE\n", sc->name); | 1407 | /* P3 */ printk("%s: status PHASE\n", sc->name); |
1343 | goto Bad_End; | 1408 | goto Bad_End; |
1344 | default: | 1409 | default: |
1345 | printk(KERN_INFO "%s: unknown CSW status 0x%x\n", | 1410 | printk(KERN_INFO "%s: unknown CSW status 0x%x\n", |
1346 | sc->name, bcs->Status); | 1411 | sc->name, bcs->Status); |
1347 | goto Bad_End; | 1412 | ub_state_done(sc, cmd, -EINVAL); |
1413 | return; | ||
1348 | } | 1414 | } |
1349 | 1415 | ||
1350 | /* Not zeroing error to preserve a babble indicator */ | 1416 | /* Not zeroing error to preserve a babble indicator */ |
@@ -1364,7 +1430,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1364 | printk(KERN_WARNING "%s: " | 1430 | printk(KERN_WARNING "%s: " |
1365 | "wrong command state %d\n", | 1431 | "wrong command state %d\n", |
1366 | sc->name, cmd->state); | 1432 | sc->name, cmd->state); |
1367 | goto Bad_End; | 1433 | ub_state_done(sc, cmd, -EINVAL); |
1434 | return; | ||
1368 | } | 1435 | } |
1369 | return; | 1436 | return; |
1370 | 1437 | ||
@@ -1612,6 +1679,93 @@ static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) | |||
1612 | } | 1679 | } |
1613 | 1680 | ||
1614 | /* | 1681 | /* |
1682 | * Reset management | ||
1683 | */ | ||
1684 | |||
1685 | static void ub_reset_enter(struct ub_dev *sc) | ||
1686 | { | ||
1687 | |||
1688 | if (sc->reset) { | ||
1689 | /* This happens often on multi-LUN devices. */ | ||
1690 | return; | ||
1691 | } | ||
1692 | sc->reset = 1; | ||
1693 | |||
1694 | #if 0 /* Not needed because the disconnect waits for us. */ | ||
1695 | unsigned long flags; | ||
1696 | spin_lock_irqsave(&ub_lock, flags); | ||
1697 | sc->openc++; | ||
1698 | spin_unlock_irqrestore(&ub_lock, flags); | ||
1699 | #endif | ||
1700 | |||
1701 | #if 0 /* We let them stop themselves. */ | ||
1702 | struct list_head *p; | ||
1703 | struct ub_lun *lun; | ||
1704 | list_for_each(p, &sc->luns) { | ||
1705 | lun = list_entry(p, struct ub_lun, link); | ||
1706 | blk_stop_queue(lun->disk->queue); | ||
1707 | } | ||
1708 | #endif | ||
1709 | |||
1710 | schedule_work(&sc->reset_work); | ||
1711 | } | ||
1712 | |||
1713 | static void ub_reset_task(void *arg) | ||
1714 | { | ||
1715 | struct ub_dev *sc = arg; | ||
1716 | unsigned long flags; | ||
1717 | struct list_head *p; | ||
1718 | struct ub_lun *lun; | ||
1719 | int lkr, rc; | ||
1720 | |||
1721 | if (!sc->reset) { | ||
1722 | printk(KERN_WARNING "%s: Running reset unrequested\n", | ||
1723 | sc->name); | ||
1724 | return; | ||
1725 | } | ||
1726 | |||
1727 | if (atomic_read(&sc->poison)) { | ||
1728 | printk(KERN_NOTICE "%s: Not resetting disconnected device\n", | ||
1729 | sc->name); /* P3 This floods. Remove soon. XXX */ | ||
1730 | } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) { | ||
1731 | printk(KERN_NOTICE "%s: Not resetting multi-interface device\n", | ||
1732 | sc->name); /* P3 This floods. Remove soon. XXX */ | ||
1733 | } else { | ||
1734 | if ((lkr = usb_lock_device_for_reset(sc->dev, sc->intf)) < 0) { | ||
1735 | printk(KERN_NOTICE | ||
1736 | "%s: usb_lock_device_for_reset failed (%d)\n", | ||
1737 | sc->name, lkr); | ||
1738 | } else { | ||
1739 | rc = usb_reset_device(sc->dev); | ||
1740 | if (rc < 0) { | ||
1741 | printk(KERN_NOTICE "%s: " | ||
1742 | "usb_lock_device_for_reset failed (%d)\n", | ||
1743 | sc->name, rc); | ||
1744 | } | ||
1745 | |||
1746 | if (lkr) | ||
1747 | usb_unlock_device(sc->dev); | ||
1748 | } | ||
1749 | } | ||
1750 | |||
1751 | /* | ||
1752 | * In theory, no commands can be running while reset is active, | ||
1753 | * so nobody can ask for another reset, and so we do not need any | ||
1754 | * queues of resets or anything. We do need a spinlock though, | ||
1755 | * to interact with block layer. | ||
1756 | */ | ||
1757 | spin_lock_irqsave(&sc->lock, flags); | ||
1758 | sc->reset = 0; | ||
1759 | tasklet_schedule(&sc->tasklet); | ||
1760 | list_for_each(p, &sc->luns) { | ||
1761 | lun = list_entry(p, struct ub_lun, link); | ||
1762 | blk_start_queue(lun->disk->queue); | ||
1763 | } | ||
1764 | wake_up(&sc->reset_wait); | ||
1765 | spin_unlock_irqrestore(&sc->lock, flags); | ||
1766 | } | ||
1767 | |||
1768 | /* | ||
1615 | * This is called from a process context. | 1769 | * This is called from a process context. |
1616 | */ | 1770 | */ |
1617 | static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun) | 1771 | static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun) |
@@ -2146,7 +2300,7 @@ static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev, | |||
2146 | if (ep_in == NULL || ep_out == NULL) { | 2300 | if (ep_in == NULL || ep_out == NULL) { |
2147 | printk(KERN_NOTICE "%s: failed endpoint check\n", | 2301 | printk(KERN_NOTICE "%s: failed endpoint check\n", |
2148 | sc->name); | 2302 | sc->name); |
2149 | return -EIO; | 2303 | return -ENODEV; |
2150 | } | 2304 | } |
2151 | 2305 | ||
2152 | /* Calculate and store the pipe values */ | 2306 | /* Calculate and store the pipe values */ |
@@ -2172,6 +2326,9 @@ static int ub_probe(struct usb_interface *intf, | |||
2172 | int rc; | 2326 | int rc; |
2173 | int i; | 2327 | int i; |
2174 | 2328 | ||
2329 | if (usb_usual_check_type(dev_id, USB_US_TYPE_UB)) | ||
2330 | return -ENXIO; | ||
2331 | |||
2175 | rc = -ENOMEM; | 2332 | rc = -ENOMEM; |
2176 | if ((sc = kmalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL) | 2333 | if ((sc = kmalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL) |
2177 | goto err_core; | 2334 | goto err_core; |
@@ -2181,6 +2338,8 @@ static int ub_probe(struct usb_interface *intf, | |||
2181 | usb_init_urb(&sc->work_urb); | 2338 | usb_init_urb(&sc->work_urb); |
2182 | tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); | 2339 | tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); |
2183 | atomic_set(&sc->poison, 0); | 2340 | atomic_set(&sc->poison, 0); |
2341 | INIT_WORK(&sc->reset_work, ub_reset_task, sc); | ||
2342 | init_waitqueue_head(&sc->reset_wait); | ||
2184 | 2343 | ||
2185 | init_timer(&sc->work_timer); | 2344 | init_timer(&sc->work_timer); |
2186 | sc->work_timer.data = (unsigned long) sc; | 2345 | sc->work_timer.data = (unsigned long) sc; |
@@ -2201,7 +2360,8 @@ static int ub_probe(struct usb_interface *intf, | |||
2201 | 2360 | ||
2202 | /* XXX Verify that we can handle the device (from descriptors) */ | 2361 | /* XXX Verify that we can handle the device (from descriptors) */ |
2203 | 2362 | ||
2204 | ub_get_pipes(sc, sc->dev, intf); | 2363 | if (ub_get_pipes(sc, sc->dev, intf) != 0) |
2364 | goto err_dev_desc; | ||
2205 | 2365 | ||
2206 | if (device_create_file(&sc->intf->dev, &dev_attr_diag) != 0) | 2366 | if (device_create_file(&sc->intf->dev, &dev_attr_diag) != 0) |
2207 | goto err_diag; | 2367 | goto err_diag; |
@@ -2272,6 +2432,7 @@ static int ub_probe(struct usb_interface *intf, | |||
2272 | 2432 | ||
2273 | /* device_remove_file(&sc->intf->dev, &dev_attr_diag); */ | 2433 | /* device_remove_file(&sc->intf->dev, &dev_attr_diag); */ |
2274 | err_diag: | 2434 | err_diag: |
2435 | err_dev_desc: | ||
2275 | usb_set_intfdata(intf, NULL); | 2436 | usb_set_intfdata(intf, NULL); |
2276 | // usb_put_intf(sc->intf); | 2437 | // usb_put_intf(sc->intf); |
2277 | usb_put_dev(sc->dev); | 2438 | usb_put_dev(sc->dev); |
@@ -2309,14 +2470,14 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum) | |||
2309 | ub_revalidate(sc, lun); | 2470 | ub_revalidate(sc, lun); |
2310 | 2471 | ||
2311 | rc = -ENOMEM; | 2472 | rc = -ENOMEM; |
2312 | if ((disk = alloc_disk(UB_MINORS_PER_MAJOR)) == NULL) | 2473 | if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL) |
2313 | goto err_diskalloc; | 2474 | goto err_diskalloc; |
2314 | 2475 | ||
2315 | lun->disk = disk; | 2476 | lun->disk = disk; |
2316 | sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a'); | 2477 | sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a'); |
2317 | sprintf(disk->devfs_name, DEVFS_NAME "/%c", lun->id + 'a'); | 2478 | sprintf(disk->devfs_name, DEVFS_NAME "/%c", lun->id + 'a'); |
2318 | disk->major = UB_MAJOR; | 2479 | disk->major = UB_MAJOR; |
2319 | disk->first_minor = lun->id * UB_MINORS_PER_MAJOR; | 2480 | disk->first_minor = lun->id * UB_PARTS_PER_LUN; |
2320 | disk->fops = &ub_bd_fops; | 2481 | disk->fops = &ub_bd_fops; |
2321 | disk->private_data = lun; | 2482 | disk->private_data = lun; |
2322 | disk->driverfs_dev = &sc->intf->dev; | 2483 | disk->driverfs_dev = &sc->intf->dev; |
@@ -2380,6 +2541,11 @@ static void ub_disconnect(struct usb_interface *intf) | |||
2380 | atomic_set(&sc->poison, 1); | 2541 | atomic_set(&sc->poison, 1); |
2381 | 2542 | ||
2382 | /* | 2543 | /* |
2544 | * Wait for reset to end, if any. | ||
2545 | */ | ||
2546 | wait_event(sc->reset_wait, !sc->reset); | ||
2547 | |||
2548 | /* | ||
2383 | * Blow away queued commands. | 2549 | * Blow away queued commands. |
2384 | * | 2550 | * |
2385 | * Actually, this never works, because before we get here | 2551 | * Actually, this never works, because before we get here |
@@ -2392,7 +2558,7 @@ static void ub_disconnect(struct usb_interface *intf) | |||
2392 | { | 2558 | { |
2393 | struct ub_scsi_cmd *cmd; | 2559 | struct ub_scsi_cmd *cmd; |
2394 | int cnt = 0; | 2560 | int cnt = 0; |
2395 | while ((cmd = ub_cmdq_pop(sc)) != NULL) { | 2561 | while ((cmd = ub_cmdq_peek(sc)) != NULL) { |
2396 | cmd->error = -ENOTCONN; | 2562 | cmd->error = -ENOTCONN; |
2397 | cmd->state = UB_CMDST_DONE; | 2563 | cmd->state = UB_CMDST_DONE; |
2398 | ub_cmdtr_state(sc, cmd); | 2564 | ub_cmdtr_state(sc, cmd); |
@@ -2461,7 +2627,6 @@ static void ub_disconnect(struct usb_interface *intf) | |||
2461 | } | 2627 | } |
2462 | 2628 | ||
2463 | static struct usb_driver ub_driver = { | 2629 | static struct usb_driver ub_driver = { |
2464 | .owner = THIS_MODULE, | ||
2465 | .name = "ub", | 2630 | .name = "ub", |
2466 | .probe = ub_probe, | 2631 | .probe = ub_probe, |
2467 | .disconnect = ub_disconnect, | 2632 | .disconnect = ub_disconnect, |
@@ -2479,6 +2644,7 @@ static int __init ub_init(void) | |||
2479 | if ((rc = usb_register(&ub_driver)) != 0) | 2644 | if ((rc = usb_register(&ub_driver)) != 0) |
2480 | goto err_register; | 2645 | goto err_register; |
2481 | 2646 | ||
2647 | usb_usual_set_present(USB_US_TYPE_UB); | ||
2482 | return 0; | 2648 | return 0; |
2483 | 2649 | ||
2484 | err_register: | 2650 | err_register: |
@@ -2494,6 +2660,7 @@ static void __exit ub_exit(void) | |||
2494 | 2660 | ||
2495 | devfs_remove(DEVFS_NAME); | 2661 | devfs_remove(DEVFS_NAME); |
2496 | unregister_blkdev(UB_MAJOR, DRV_NAME); | 2662 | unregister_blkdev(UB_MAJOR, DRV_NAME); |
2663 | usb_usual_clear_present(USB_US_TYPE_UB); | ||
2497 | } | 2664 | } |
2498 | 2665 | ||
2499 | module_init(ub_init); | 2666 | module_init(ub_init); |