aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/ub.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/ub.c')
-rw-r--r--drivers/block/ub.c273
1 files changed, 150 insertions, 123 deletions
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index a026567f5d18..aa0bf7ee008d 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -16,9 +16,10 @@
16 * -- verify the 13 conditions and do bulk resets 16 * -- verify the 13 conditions and do bulk resets
17 * -- kill last_pipe and simply do two-state clearing on both pipes 17 * -- kill last_pipe and simply do two-state clearing on both pipes
18 * -- verify protocol (bulk) from USB descriptors (maybe...) 18 * -- verify protocol (bulk) from USB descriptors (maybe...)
19 * -- highmem and sg 19 * -- highmem
20 * -- move top_sense and work_bcs into separate allocations (if they survive) 20 * -- move top_sense and work_bcs into separate allocations (if they survive)
21 * for cache purists and esoteric architectures. 21 * for cache purists and esoteric architectures.
22 * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ?
22 * -- prune comments, they are too volumnous 23 * -- prune comments, they are too volumnous
23 * -- Exterminate P3 printks 24 * -- Exterminate P3 printks
24 * -- Resove XXX's 25 * -- Resove XXX's
@@ -171,7 +172,7 @@ struct bulk_cs_wrap {
171 */ 172 */
172struct ub_dev; 173struct ub_dev;
173 174
174#define UB_MAX_REQ_SG 1 175#define UB_MAX_REQ_SG 4
175#define UB_MAX_SECTORS 64 176#define UB_MAX_SECTORS 64
176 177
177/* 178/*
@@ -234,13 +235,10 @@ struct ub_scsi_cmd {
234 235
235 int stat_count; /* Retries getting status. */ 236 int stat_count; /* Retries getting status. */
236 237
237 /*
238 * We do not support transfers from highmem pages
239 * because the underlying USB framework does not do what we need.
240 */
241 char *data; /* Requested buffer */
242 unsigned int len; /* Requested length */ 238 unsigned int len; /* Requested length */
243 // struct scatterlist sgv[UB_MAX_REQ_SG]; 239 unsigned int current_sg;
240 unsigned int nsg; /* sgv[nsg] */
241 struct scatterlist sgv[UB_MAX_REQ_SG];
244 242
245 struct ub_lun *lun; 243 struct ub_lun *lun;
246 void (*done)(struct ub_dev *, struct ub_scsi_cmd *); 244 void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
@@ -389,17 +387,18 @@ struct ub_dev {
389 struct bulk_cs_wrap work_bcs; 387 struct bulk_cs_wrap work_bcs;
390 struct usb_ctrlrequest work_cr; 388 struct usb_ctrlrequest work_cr;
391 389
390 int sg_stat[UB_MAX_REQ_SG+1];
392 struct ub_scsi_trace tr; 391 struct ub_scsi_trace tr;
393}; 392};
394 393
395/* 394/*
396 */ 395 */
397static void ub_cleanup(struct ub_dev *sc); 396static void ub_cleanup(struct ub_dev *sc);
398static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq); 397static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
399static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, 398static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
400 struct ub_scsi_cmd *cmd, struct request *rq); 399 struct ub_scsi_cmd *cmd, struct request *rq);
401static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 400static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
402 struct request *rq); 401 struct ub_scsi_cmd *cmd, struct request *rq);
403static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 402static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
404static void ub_end_rq(struct request *rq, int uptodate); 403static void ub_end_rq(struct request *rq, int uptodate);
405static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 404static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
@@ -407,6 +406,7 @@ static void ub_urb_complete(struct urb *urb, struct pt_regs *pt);
407static void ub_scsi_action(unsigned long _dev); 406static void ub_scsi_action(unsigned long _dev);
408static void ub_scsi_dispatch(struct ub_dev *sc); 407static void ub_scsi_dispatch(struct ub_dev *sc);
409static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 408static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
409static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
410static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); 410static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
411static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 411static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
412static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 412static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
@@ -500,7 +500,8 @@ static void ub_cmdtr_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
500 } 500 }
501} 501}
502 502
503static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, char *page) 503static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr,
504 char *page)
504{ 505{
505 struct usb_interface *intf; 506 struct usb_interface *intf;
506 struct ub_dev *sc; 507 struct ub_dev *sc;
@@ -523,6 +524,13 @@ static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, c
523 cnt += sprintf(page + cnt, 524 cnt += sprintf(page + cnt,
524 "qlen %d qmax %d\n", 525 "qlen %d qmax %d\n",
525 sc->cmd_queue.qlen, sc->cmd_queue.qmax); 526 sc->cmd_queue.qlen, sc->cmd_queue.qmax);
527 cnt += sprintf(page + cnt,
528 "sg %d %d %d %d %d\n",
529 sc->sg_stat[0],
530 sc->sg_stat[1],
531 sc->sg_stat[2],
532 sc->sg_stat[3],
533 sc->sg_stat[4]);
526 534
527 list_for_each (p, &sc->luns) { 535 list_for_each (p, &sc->luns) {
528 lun = list_entry(p, struct ub_lun, link); 536 lun = list_entry(p, struct ub_lun, link);
@@ -744,20 +752,20 @@ static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
744 * The request function is our main entry point 752 * The request function is our main entry point
745 */ 753 */
746 754
747static void ub_bd_rq_fn(request_queue_t *q) 755static void ub_request_fn(request_queue_t *q)
748{ 756{
749 struct ub_lun *lun = q->queuedata; 757 struct ub_lun *lun = q->queuedata;
750 struct request *rq; 758 struct request *rq;
751 759
752 while ((rq = elv_next_request(q)) != NULL) { 760 while ((rq = elv_next_request(q)) != NULL) {
753 if (ub_bd_rq_fn_1(lun, rq) != 0) { 761 if (ub_request_fn_1(lun, rq) != 0) {
754 blk_stop_queue(q); 762 blk_stop_queue(q);
755 break; 763 break;
756 } 764 }
757 } 765 }
758} 766}
759 767
760static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq) 768static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
761{ 769{
762 struct ub_dev *sc = lun->udev; 770 struct ub_dev *sc = lun->udev;
763 struct ub_scsi_cmd *cmd; 771 struct ub_scsi_cmd *cmd;
@@ -774,9 +782,8 @@ static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq)
774 memset(cmd, 0, sizeof(struct ub_scsi_cmd)); 782 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
775 783
776 blkdev_dequeue_request(rq); 784 blkdev_dequeue_request(rq);
777
778 if (blk_pc_request(rq)) { 785 if (blk_pc_request(rq)) {
779 rc = ub_cmd_build_packet(sc, cmd, rq); 786 rc = ub_cmd_build_packet(sc, lun, cmd, rq);
780 } else { 787 } else {
781 rc = ub_cmd_build_block(sc, lun, cmd, rq); 788 rc = ub_cmd_build_block(sc, lun, cmd, rq);
782 } 789 }
@@ -791,7 +798,7 @@ static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq)
791 cmd->back = rq; 798 cmd->back = rq;
792 799
793 cmd->tag = sc->tagcnt++; 800 cmd->tag = sc->tagcnt++;
794 if ((rc = ub_submit_scsi(sc, cmd)) != 0) { 801 if (ub_submit_scsi(sc, cmd) != 0) {
795 ub_put_cmd(lun, cmd); 802 ub_put_cmd(lun, cmd);
796 ub_end_rq(rq, 0); 803 ub_end_rq(rq, 0);
797 return 0; 804 return 0;
@@ -804,58 +811,31 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
804 struct ub_scsi_cmd *cmd, struct request *rq) 811 struct ub_scsi_cmd *cmd, struct request *rq)
805{ 812{
806 int ub_dir; 813 int ub_dir;
807#if 0 /* We use rq->buffer for now */
808 struct scatterlist *sg;
809 int n_elem; 814 int n_elem;
810#endif
811 unsigned int block, nblks; 815 unsigned int block, nblks;
812 816
813 if (rq_data_dir(rq) == WRITE) 817 if (rq_data_dir(rq) == WRITE)
814 ub_dir = UB_DIR_WRITE; 818 ub_dir = UB_DIR_WRITE;
815 else 819 else
816 ub_dir = UB_DIR_READ; 820 ub_dir = UB_DIR_READ;
821 cmd->dir = ub_dir;
817 822
818 /* 823 /*
819 * get scatterlist from block layer 824 * get scatterlist from block layer
820 */ 825 */
821#if 0 /* We use rq->buffer for now */ 826 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]);
822 sg = &cmd->sgv[0];
823 n_elem = blk_rq_map_sg(q, rq, sg);
824 if (n_elem <= 0) { 827 if (n_elem <= 0) {
825 ub_put_cmd(lun, cmd); 828 printk(KERN_INFO "%s: failed request map (%d)\n",
826 ub_end_rq(rq, 0); 829 sc->name, n_elem); /* P3 */
827 blk_start_queue(q); 830 return -1; /* request with no s/g entries? */
828 return 0; /* request with no s/g entries? */
829 } 831 }
830 832 if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */
831 if (n_elem != 1) { /* Paranoia */
832 printk(KERN_WARNING "%s: request with %d segments\n", 833 printk(KERN_WARNING "%s: request with %d segments\n",
833 sc->name, n_elem); 834 sc->name, n_elem);
834 ub_put_cmd(lun, cmd);
835 ub_end_rq(rq, 0);
836 blk_start_queue(q);
837 return 0;
838 }
839#endif
840
841 /*
842 * XXX Unfortunately, this check does not work. It is quite possible
843 * to get bogus non-null rq->buffer if you allow sg by mistake.
844 */
845 if (rq->buffer == NULL) {
846 /*
847 * This must not happen if we set the queue right.
848 * The block level must create bounce buffers for us.
849 */
850 static int do_print = 1;
851 if (do_print) {
852 printk(KERN_WARNING "%s: unmapped block request"
853 " flags 0x%lx sectors %lu\n",
854 sc->name, rq->flags, rq->nr_sectors);
855 do_print = 0;
856 }
857 return -1; 835 return -1;
858 } 836 }
837 cmd->nsg = n_elem;
838 sc->sg_stat[n_elem]++;
859 839
860 /* 840 /*
861 * build the command 841 * build the command
@@ -876,30 +856,15 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
876 cmd->cdb[8] = nblks; 856 cmd->cdb[8] = nblks;
877 cmd->cdb_len = 10; 857 cmd->cdb_len = 10;
878 858
879 cmd->dir = ub_dir;
880 cmd->data = rq->buffer;
881 cmd->len = rq->nr_sectors * 512; 859 cmd->len = rq->nr_sectors * 512;
882 860
883 return 0; 861 return 0;
884} 862}
885 863
886static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 864static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
887 struct request *rq) 865 struct ub_scsi_cmd *cmd, struct request *rq)
888{ 866{
889 867 int n_elem;
890 if (rq->data_len != 0 && rq->data == NULL) {
891 static int do_print = 1;
892 if (do_print) {
893 printk(KERN_WARNING "%s: unmapped packet request"
894 " flags 0x%lx length %d\n",
895 sc->name, rq->flags, rq->data_len);
896 do_print = 0;
897 }
898 return -1;
899 }
900
901 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
902 cmd->cdb_len = rq->cmd_len;
903 868
904 if (rq->data_len == 0) { 869 if (rq->data_len == 0) {
905 cmd->dir = UB_DIR_NONE; 870 cmd->dir = UB_DIR_NONE;
@@ -908,8 +873,29 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
908 cmd->dir = UB_DIR_WRITE; 873 cmd->dir = UB_DIR_WRITE;
909 else 874 else
910 cmd->dir = UB_DIR_READ; 875 cmd->dir = UB_DIR_READ;
876
877 }
878
879 /*
880 * get scatterlist from block layer
881 */
882 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]);
883 if (n_elem < 0) {
884 printk(KERN_INFO "%s: failed request map (%d)\n",
885 sc->name, n_elem); /* P3 */
886 return -1;
911 } 887 }
912 cmd->data = rq->data; 888 if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */
889 printk(KERN_WARNING "%s: request with %d segments\n",
890 sc->name, n_elem);
891 return -1;
892 }
893 cmd->nsg = n_elem;
894 sc->sg_stat[n_elem]++;
895
896 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
897 cmd->cdb_len = rq->cmd_len;
898
913 cmd->len = rq->data_len; 899 cmd->len = rq->data_len;
914 900
915 return 0; 901 return 0;
@@ -919,24 +905,34 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
919{ 905{
920 struct request *rq = cmd->back; 906 struct request *rq = cmd->back;
921 struct ub_lun *lun = cmd->lun; 907 struct ub_lun *lun = cmd->lun;
922 struct gendisk *disk = lun->disk;
923 request_queue_t *q = disk->queue;
924 int uptodate; 908 int uptodate;
925 909
926 if (blk_pc_request(rq)) { 910 if (cmd->error == 0) {
927 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
928 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
929 rq->sense_len = UB_SENSE_SIZE;
930 }
931
932 if (cmd->error == 0)
933 uptodate = 1; 911 uptodate = 1;
934 else 912
913 if (blk_pc_request(rq)) {
914 if (cmd->act_len >= rq->data_len)
915 rq->data_len = 0;
916 else
917 rq->data_len -= cmd->act_len;
918 }
919 } else {
935 uptodate = 0; 920 uptodate = 0;
936 921
922 if (blk_pc_request(rq)) {
923 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
924 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
925 rq->sense_len = UB_SENSE_SIZE;
926 if (sc->top_sense[0] != 0)
927 rq->errors = SAM_STAT_CHECK_CONDITION;
928 else
929 rq->errors = DID_ERROR << 16;
930 }
931 }
932
937 ub_put_cmd(lun, cmd); 933 ub_put_cmd(lun, cmd);
938 ub_end_rq(rq, uptodate); 934 ub_end_rq(rq, uptodate);
939 blk_start_queue(q); 935 blk_start_queue(lun->disk->queue);
940} 936}
941 937
942static void ub_end_rq(struct request *rq, int uptodate) 938static void ub_end_rq(struct request *rq, int uptodate)
@@ -1014,7 +1010,7 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1014 sc->last_pipe = sc->send_bulk_pipe; 1010 sc->last_pipe = sc->send_bulk_pipe;
1015 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe, 1011 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
1016 bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc); 1012 bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
1017 sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; 1013 sc->work_urb.transfer_flags = 0;
1018 1014
1019 /* Fill what we shouldn't be filling, because usb-storage did so. */ 1015 /* Fill what we shouldn't be filling, because usb-storage did so. */
1020 sc->work_urb.actual_length = 0; 1016 sc->work_urb.actual_length = 0;
@@ -1103,7 +1099,6 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1103{ 1099{
1104 struct urb *urb = &sc->work_urb; 1100 struct urb *urb = &sc->work_urb;
1105 struct bulk_cs_wrap *bcs; 1101 struct bulk_cs_wrap *bcs;
1106 int pipe;
1107 int rc; 1102 int rc;
1108 1103
1109 if (atomic_read(&sc->poison)) { 1104 if (atomic_read(&sc->poison)) {
@@ -1204,38 +1199,13 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1204 goto Bad_End; 1199 goto Bad_End;
1205 } 1200 }
1206 1201
1207 if (cmd->dir == UB_DIR_NONE) { 1202 if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
1208 ub_state_stat(sc, cmd); 1203 ub_state_stat(sc, cmd);
1209 return; 1204 return;
1210 } 1205 }
1211 1206
1212 UB_INIT_COMPLETION(sc->work_done); 1207 // udelay(125); // usb-storage has this
1213 1208 ub_data_start(sc, cmd);
1214 if (cmd->dir == UB_DIR_READ)
1215 pipe = sc->recv_bulk_pipe;
1216 else
1217 pipe = sc->send_bulk_pipe;
1218 sc->last_pipe = pipe;
1219 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
1220 cmd->data, cmd->len, ub_urb_complete, sc);
1221 sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;
1222 sc->work_urb.actual_length = 0;
1223 sc->work_urb.error_count = 0;
1224 sc->work_urb.status = 0;
1225
1226 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1227 /* XXX Clear stalls */
1228 printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */
1229 ub_complete(&sc->work_done);
1230 ub_state_done(sc, cmd, rc);
1231 return;
1232 }
1233
1234 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1235 add_timer(&sc->work_timer);
1236
1237 cmd->state = UB_CMDST_DATA;
1238 ub_cmdtr_state(sc, cmd);
1239 1209
1240 } else if (cmd->state == UB_CMDST_DATA) { 1210 } else if (cmd->state == UB_CMDST_DATA) {
1241 if (urb->status == -EPIPE) { 1211 if (urb->status == -EPIPE) {
@@ -1257,16 +1227,22 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1257 if (urb->status == -EOVERFLOW) { 1227 if (urb->status == -EOVERFLOW) {
1258 /* 1228 /*
1259 * A babble? Failure, but we must transfer CSW now. 1229 * A babble? Failure, but we must transfer CSW now.
1230 * XXX This is going to end in perpetual babble. Reset.
1260 */ 1231 */
1261 cmd->error = -EOVERFLOW; /* A cheap trick... */ 1232 cmd->error = -EOVERFLOW; /* A cheap trick... */
1262 } else { 1233 ub_state_stat(sc, cmd);
1263 if (urb->status != 0) 1234 return;
1264 goto Bad_End;
1265 } 1235 }
1236 if (urb->status != 0)
1237 goto Bad_End;
1266 1238
1267 cmd->act_len = urb->actual_length; 1239 cmd->act_len += urb->actual_length;
1268 ub_cmdtr_act_len(sc, cmd); 1240 ub_cmdtr_act_len(sc, cmd);
1269 1241
1242 if (++cmd->current_sg < cmd->nsg) {
1243 ub_data_start(sc, cmd);
1244 return;
1245 }
1270 ub_state_stat(sc, cmd); 1246 ub_state_stat(sc, cmd);
1271 1247
1272 } else if (cmd->state == UB_CMDST_STAT) { 1248 } else if (cmd->state == UB_CMDST_STAT) {
@@ -1401,6 +1377,46 @@ Bad_End: /* Little Excel is dead */
1401 1377
1402/* 1378/*
1403 * Factorization helper for the command state machine: 1379 * Factorization helper for the command state machine:
1380 * Initiate a data segment transfer.
1381 */
1382static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1383{
1384 struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
1385 int pipe;
1386 int rc;
1387
1388 UB_INIT_COMPLETION(sc->work_done);
1389
1390 if (cmd->dir == UB_DIR_READ)
1391 pipe = sc->recv_bulk_pipe;
1392 else
1393 pipe = sc->send_bulk_pipe;
1394 sc->last_pipe = pipe;
1395 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
1396 page_address(sg->page) + sg->offset, sg->length,
1397 ub_urb_complete, sc);
1398 sc->work_urb.transfer_flags = 0;
1399 sc->work_urb.actual_length = 0;
1400 sc->work_urb.error_count = 0;
1401 sc->work_urb.status = 0;
1402
1403 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1404 /* XXX Clear stalls */
1405 printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */
1406 ub_complete(&sc->work_done);
1407 ub_state_done(sc, cmd, rc);
1408 return;
1409 }
1410
1411 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1412 add_timer(&sc->work_timer);
1413
1414 cmd->state = UB_CMDST_DATA;
1415 ub_cmdtr_state(sc, cmd);
1416}
1417
1418/*
1419 * Factorization helper for the command state machine:
1404 * Finish the command. 1420 * Finish the command.
1405 */ 1421 */
1406static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc) 1422static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
@@ -1426,7 +1442,7 @@ static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1426 sc->last_pipe = sc->recv_bulk_pipe; 1442 sc->last_pipe = sc->recv_bulk_pipe;
1427 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe, 1443 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1428 &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc); 1444 &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1429 sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; 1445 sc->work_urb.transfer_flags = 0;
1430 sc->work_urb.actual_length = 0; 1446 sc->work_urb.actual_length = 0;
1431 sc->work_urb.error_count = 0; 1447 sc->work_urb.error_count = 0;
1432 sc->work_urb.status = 0; 1448 sc->work_urb.status = 0;
@@ -1484,6 +1500,7 @@ static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1484static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1500static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1485{ 1501{
1486 struct ub_scsi_cmd *scmd; 1502 struct ub_scsi_cmd *scmd;
1503 struct scatterlist *sg;
1487 int rc; 1504 int rc;
1488 1505
1489 if (cmd->cdb[0] == REQUEST_SENSE) { 1506 if (cmd->cdb[0] == REQUEST_SENSE) {
@@ -1492,12 +1509,17 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1492 } 1509 }
1493 1510
1494 scmd = &sc->top_rqs_cmd; 1511 scmd = &sc->top_rqs_cmd;
1512 memset(scmd, 0, sizeof(struct ub_scsi_cmd));
1495 scmd->cdb[0] = REQUEST_SENSE; 1513 scmd->cdb[0] = REQUEST_SENSE;
1496 scmd->cdb[4] = UB_SENSE_SIZE; 1514 scmd->cdb[4] = UB_SENSE_SIZE;
1497 scmd->cdb_len = 6; 1515 scmd->cdb_len = 6;
1498 scmd->dir = UB_DIR_READ; 1516 scmd->dir = UB_DIR_READ;
1499 scmd->state = UB_CMDST_INIT; 1517 scmd->state = UB_CMDST_INIT;
1500 scmd->data = sc->top_sense; 1518 scmd->nsg = 1;
1519 sg = &scmd->sgv[0];
1520 sg->page = virt_to_page(sc->top_sense);
1521 sg->offset = (unsigned int)sc->top_sense & (PAGE_SIZE-1);
1522 sg->length = UB_SENSE_SIZE;
1501 scmd->len = UB_SENSE_SIZE; 1523 scmd->len = UB_SENSE_SIZE;
1502 scmd->lun = cmd->lun; 1524 scmd->lun = cmd->lun;
1503 scmd->done = ub_top_sense_done; 1525 scmd->done = ub_top_sense_done;
@@ -1541,7 +1563,7 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1541 1563
1542 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, 1564 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1543 (unsigned char*) cr, NULL, 0, ub_urb_complete, sc); 1565 (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1544 sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; 1566 sc->work_urb.transfer_flags = 0;
1545 sc->work_urb.actual_length = 0; 1567 sc->work_urb.actual_length = 0;
1546 sc->work_urb.error_count = 0; 1568 sc->work_urb.error_count = 0;
1547 sc->work_urb.status = 0; 1569 sc->work_urb.status = 0;
@@ -1560,7 +1582,7 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1560 */ 1582 */
1561static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) 1583static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1562{ 1584{
1563 unsigned char *sense = scmd->data; 1585 unsigned char *sense = sc->top_sense;
1564 struct ub_scsi_cmd *cmd; 1586 struct ub_scsi_cmd *cmd;
1565 1587
1566 /* 1588 /*
@@ -1852,6 +1874,7 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1852 struct ub_capacity *ret) 1874 struct ub_capacity *ret)
1853{ 1875{
1854 struct ub_scsi_cmd *cmd; 1876 struct ub_scsi_cmd *cmd;
1877 struct scatterlist *sg;
1855 char *p; 1878 char *p;
1856 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 }; 1879 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
1857 unsigned long flags; 1880 unsigned long flags;
@@ -1872,7 +1895,11 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1872 cmd->cdb_len = 10; 1895 cmd->cdb_len = 10;
1873 cmd->dir = UB_DIR_READ; 1896 cmd->dir = UB_DIR_READ;
1874 cmd->state = UB_CMDST_INIT; 1897 cmd->state = UB_CMDST_INIT;
1875 cmd->data = p; 1898 cmd->nsg = 1;
1899 sg = &cmd->sgv[0];
1900 sg->page = virt_to_page(p);
1901 sg->offset = (unsigned int)p & (PAGE_SIZE-1);
1902 sg->length = 8;
1876 cmd->len = 8; 1903 cmd->len = 8;
1877 cmd->lun = lun; 1904 cmd->lun = lun;
1878 cmd->done = ub_probe_done; 1905 cmd->done = ub_probe_done;
@@ -2289,7 +2316,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
2289 disk->driverfs_dev = &sc->intf->dev; /* XXX Many to one ok? */ 2316 disk->driverfs_dev = &sc->intf->dev; /* XXX Many to one ok? */
2290 2317
2291 rc = -ENOMEM; 2318 rc = -ENOMEM;
2292 if ((q = blk_init_queue(ub_bd_rq_fn, &sc->lock)) == NULL) 2319 if ((q = blk_init_queue(ub_request_fn, &sc->lock)) == NULL)
2293 goto err_blkqinit; 2320 goto err_blkqinit;
2294 2321
2295 disk->queue = q; 2322 disk->queue = q;