aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/ub.c
diff options
context:
space:
mode:
authorPete Zaitcev <zaitcev@redhat.com>2005-08-15 00:16:03 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2005-09-08 19:22:40 -0400
commita1cf96efbabac2f8af6f75286ffcefd40b0a466c (patch)
treec5fefdf36832ccbef94c5e168cb67705b6275b20 /drivers/block/ub.c
parent6c1eb8c1c3ec2df00b629ab4fe7fe04a95129f08 (diff)
[PATCH] USB: ub 4: Zaitcev's quasi-S/G
Back out Axboe-style quasi-S/G and replace it with one command and repeated URBs. This is similar to what usb-storage does, only instead of a few URBs allocated together, one URB is reused. Jens's idea was very nice, but it collapsed when I had to support packet commads for CD burning. I cannot issue two or more packet commands where application expected only one. However, burning does not work completely yet. The cdrecord starts, recognizes the device, then aborts without writing a TOC. Signed-off-by: Pete Zaitcev <zaitcev@yahoo.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/block/ub.c')
-rw-r--r--drivers/block/ub.c287
1 files changed, 123 insertions, 164 deletions
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 692553270401..57d3279a8815 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -235,27 +235,16 @@ struct ub_scsi_cmd {
235 235
236 int stat_count; /* Retries getting status. */ 236 int stat_count; /* Retries getting status. */
237 237
238 /*
239 * We do not support transfers from highmem pages
240 * because the underlying USB framework does not do what we need.
241 */
242 char *data; /* Requested buffer */
243 unsigned int len; /* Requested length */ 238 unsigned int len; /* Requested length */
239 unsigned int current_sg;
240 unsigned int nsg; /* sgv[nsg] */
241 struct scatterlist sgv[UB_MAX_REQ_SG];
244 242
245 struct ub_lun *lun; 243 struct ub_lun *lun;
246 void (*done)(struct ub_dev *, struct ub_scsi_cmd *); 244 void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
247 void *back; 245 void *back;
248}; 246};
249 247
250struct ub_request {
251 struct request *rq;
252 unsigned char dir;
253 unsigned int current_block;
254 unsigned int current_sg;
255 unsigned int nsg; /* sgv[nsg] */
256 struct scatterlist sgv[UB_MAX_REQ_SG];
257};
258
259/* 248/*
260 */ 249 */
261struct ub_capacity { 250struct ub_capacity {
@@ -351,8 +340,6 @@ struct ub_lun {
351 int readonly; 340 int readonly;
352 int first_open; /* Kludge. See ub_bd_open. */ 341 int first_open; /* Kludge. See ub_bd_open. */
353 342
354 struct ub_request urq;
355
356 /* Use Ingo's mempool if or when we have more than one command. */ 343 /* Use Ingo's mempool if or when we have more than one command. */
357 /* 344 /*
358 * Currently we never need more than one command for the whole device. 345 * Currently we never need more than one command for the whole device.
@@ -410,19 +397,16 @@ static void ub_cleanup(struct ub_dev *sc);
410static int ub_request_fn_1(struct ub_lun *lun, struct request *rq); 397static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
411static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, 398static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
412 struct ub_scsi_cmd *cmd, struct request *rq); 399 struct ub_scsi_cmd *cmd, struct request *rq);
413static void ub_scsi_build_block(struct ub_lun *lun,
414 struct ub_scsi_cmd *cmd, struct ub_request *urq);
415static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 400static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
416 struct ub_scsi_cmd *cmd, struct request *rq); 401 struct ub_scsi_cmd *cmd, struct request *rq);
417static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 402static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
418static void ub_end_rq(struct request *rq, int uptodate); 403static void ub_end_rq(struct request *rq, int uptodate);
419static int ub_request_advance(struct ub_dev *sc, struct ub_lun *lun,
420 struct ub_request *urq, struct ub_scsi_cmd *cmd);
421static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 404static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
422static void ub_urb_complete(struct urb *urb, struct pt_regs *pt); 405static void ub_urb_complete(struct urb *urb, struct pt_regs *pt);
423static void ub_scsi_action(unsigned long _dev); 406static void ub_scsi_action(unsigned long _dev);
424static void ub_scsi_dispatch(struct ub_dev *sc); 407static void ub_scsi_dispatch(struct ub_dev *sc);
425static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 408static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
409static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
426static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); 410static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
427static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 411static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
428static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 412static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
@@ -793,8 +777,6 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
793 return 0; 777 return 0;
794 } 778 }
795 779
796 if (lun->urq.rq != NULL)
797 return -1;
798 if ((cmd = ub_get_cmd(lun)) == NULL) 780 if ((cmd = ub_get_cmd(lun)) == NULL)
799 return -1; 781 return -1;
800 memset(cmd, 0, sizeof(struct ub_scsi_cmd)); 782 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
@@ -813,7 +795,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
813 cmd->state = UB_CMDST_INIT; 795 cmd->state = UB_CMDST_INIT;
814 cmd->lun = lun; 796 cmd->lun = lun;
815 cmd->done = ub_rw_cmd_done; 797 cmd->done = ub_rw_cmd_done;
816 cmd->back = &lun->urq; 798 cmd->back = rq;
817 799
818 cmd->tag = sc->tagcnt++; 800 cmd->tag = sc->tagcnt++;
819 if (ub_submit_scsi(sc, cmd) != 0) { 801 if (ub_submit_scsi(sc, cmd) != 0) {
@@ -828,22 +810,20 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
828static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, 810static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
829 struct ub_scsi_cmd *cmd, struct request *rq) 811 struct ub_scsi_cmd *cmd, struct request *rq)
830{ 812{
831 struct ub_request *urq;
832 int ub_dir; 813 int ub_dir;
833 int n_elem; 814 int n_elem;
834 815 unsigned int block, nblks;
835 urq = &lun->urq;
836 memset(urq, 0, sizeof(struct ub_request));
837 816
838 if (rq_data_dir(rq) == WRITE) 817 if (rq_data_dir(rq) == WRITE)
839 ub_dir = UB_DIR_WRITE; 818 ub_dir = UB_DIR_WRITE;
840 else 819 else
841 ub_dir = UB_DIR_READ; 820 ub_dir = UB_DIR_READ;
821 cmd->dir = ub_dir;
842 822
843 /* 823 /*
844 * get scatterlist from block layer 824 * get scatterlist from block layer
845 */ 825 */
846 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]); 826 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]);
847 if (n_elem <= 0) { 827 if (n_elem <= 0) {
848 printk(KERN_INFO "%s: failed request map (%d)\n", 828 printk(KERN_INFO "%s: failed request map (%d)\n",
849 sc->name, n_elem); /* P3 */ 829 sc->name, n_elem); /* P3 */
@@ -854,7 +834,7 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
854 sc->name, n_elem); 834 sc->name, n_elem);
855 return -1; 835 return -1;
856 } 836 }
857 urq->nsg = n_elem; 837 cmd->nsg = n_elem;
858 sc->sg_stat[n_elem]++; 838 sc->sg_stat[n_elem]++;
859 839
860 /* 840 /*
@@ -863,29 +843,10 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
863 * The call to blk_queue_hardsect_size() guarantees that request 843 * The call to blk_queue_hardsect_size() guarantees that request
864 * is aligned, but it is given in terms of 512 byte units, always. 844 * is aligned, but it is given in terms of 512 byte units, always.
865 */ 845 */
866 urq->current_block = rq->sector >> lun->capacity.bshift; 846 block = rq->sector >> lun->capacity.bshift;
867 // nblks = rq->nr_sectors >> lun->capacity.bshift; 847 nblks = rq->nr_sectors >> lun->capacity.bshift;
868
869 urq->rq = rq;
870 urq->current_sg = 0;
871 urq->dir = ub_dir;
872 848
873 ub_scsi_build_block(lun, cmd, urq); 849 cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10;
874 return 0;
875}
876
877static void ub_scsi_build_block(struct ub_lun *lun,
878 struct ub_scsi_cmd *cmd, struct ub_request *urq)
879{
880 struct scatterlist *sg;
881 unsigned int block, nblks;
882
883 sg = &urq->sgv[urq->current_sg];
884
885 block = urq->current_block;
886 nblks = sg->length >> (lun->capacity.bshift + 9);
887
888 cmd->cdb[0] = (urq->dir == UB_DIR_READ)? READ_10: WRITE_10;
889 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ 850 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
890 cmd->cdb[2] = block >> 24; 851 cmd->cdb[2] = block >> 24;
891 cmd->cdb[3] = block >> 16; 852 cmd->cdb[3] = block >> 16;
@@ -895,34 +856,15 @@ static void ub_scsi_build_block(struct ub_lun *lun,
895 cmd->cdb[8] = nblks; 856 cmd->cdb[8] = nblks;
896 cmd->cdb_len = 10; 857 cmd->cdb_len = 10;
897 858
898 cmd->dir = urq->dir; 859 cmd->len = rq->nr_sectors * 512;
899 cmd->data = page_address(sg->page) + sg->offset; 860
900 cmd->len = sg->length; 861 return 0;
901} 862}
902 863
903static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 864static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
904 struct ub_scsi_cmd *cmd, struct request *rq) 865 struct ub_scsi_cmd *cmd, struct request *rq)
905{ 866{
906 struct ub_request *urq; 867 int n_elem;
907
908 urq = &lun->urq;
909 memset(urq, 0, sizeof(struct ub_request));
910 urq->rq = rq;
911 sc->sg_stat[0]++;
912
913 if (rq->data_len != 0 && rq->data == NULL) {
914 static int do_print = 1;
915 if (do_print) {
916 printk(KERN_WARNING "%s: unmapped packet request"
917 " flags 0x%lx length %d\n",
918 sc->name, rq->flags, rq->data_len);
919 do_print = 0;
920 }
921 return -1;
922 }
923
924 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
925 cmd->cdb_len = rq->cmd_len;
926 868
927 if (rq->data_len == 0) { 869 if (rq->data_len == 0) {
928 cmd->dir = UB_DIR_NONE; 870 cmd->dir = UB_DIR_NONE;
@@ -931,8 +873,29 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
931 cmd->dir = UB_DIR_WRITE; 873 cmd->dir = UB_DIR_WRITE;
932 else 874 else
933 cmd->dir = UB_DIR_READ; 875 cmd->dir = UB_DIR_READ;
876
934 } 877 }
935 cmd->data = rq->data; 878
879 /*
880 * get scatterlist from block layer
881 */
882 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]);
883 if (n_elem < 0) {
884 printk(KERN_INFO "%s: failed request map (%d)\n",
885 sc->name, n_elem); /* P3 */
886 return -1;
887 }
888 if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */
889 printk(KERN_WARNING "%s: request with %d segments\n",
890 sc->name, n_elem);
891 return -1;
892 }
893 cmd->nsg = n_elem;
894 sc->sg_stat[n_elem]++;
895
896 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
897 cmd->cdb_len = rq->cmd_len;
898
936 cmd->len = rq->data_len; 899 cmd->len = rq->data_len;
937 900
938 return 0; 901 return 0;
@@ -940,33 +903,32 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
940 903
941static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 904static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
942{ 905{
906 struct request *rq = cmd->back;
943 struct ub_lun *lun = cmd->lun; 907 struct ub_lun *lun = cmd->lun;
944 struct ub_request *urq = cmd->back;
945 struct request *rq;
946 int uptodate; 908 int uptodate;
947 909
948 rq = urq->rq; 910 if (cmd->error == 0) {
949
950 if (blk_pc_request(rq)) {
951 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
952 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
953 rq->sense_len = UB_SENSE_SIZE;
954 }
955
956 if (cmd->error == 0)
957 uptodate = 1; 911 uptodate = 1;
958 else
959 uptodate = 0;
960 912
961 if (cmd->error == 0 && urq->current_sg+1 < urq->nsg) { 913 if (blk_pc_request(rq)) {
962 if (ub_request_advance(sc, lun, urq, cmd) == 0) { 914 if (cmd->act_len >= rq->data_len)
963 /* Stay on target... */ 915 rq->data_len = 0;
964 return; 916 else
917 rq->data_len -= cmd->act_len;
965 } 918 }
919 } else {
966 uptodate = 0; 920 uptodate = 0;
967 }
968 921
969 urq->rq = NULL; 922 if (blk_pc_request(rq)) {
923 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
924 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
925 rq->sense_len = UB_SENSE_SIZE;
926 if (sc->top_sense[0] != 0)
927 rq->errors = SAM_STAT_CHECK_CONDITION;
928 else
929 rq->errors = DID_ERROR << 16;
930 }
931 }
970 932
971 ub_put_cmd(lun, cmd); 933 ub_put_cmd(lun, cmd);
972 ub_end_rq(rq, uptodate); 934 ub_end_rq(rq, uptodate);
@@ -982,40 +944,6 @@ static void ub_end_rq(struct request *rq, int uptodate)
982 end_that_request_last(rq); 944 end_that_request_last(rq);
983} 945}
984 946
985static int ub_request_advance(struct ub_dev *sc, struct ub_lun *lun,
986 struct ub_request *urq, struct ub_scsi_cmd *cmd)
987{
988 struct scatterlist *sg;
989 unsigned int nblks;
990
991 /* XXX This is temporary, until we sort out S/G in packet requests. */
992 if (blk_pc_request(urq->rq)) {
993 printk(KERN_WARNING
994 "2-segment packet request completed\n"); /* P3 */
995 return -1;
996 }
997
998 sg = &urq->sgv[urq->current_sg];
999 nblks = sg->length >> (lun->capacity.bshift + 9);
1000 urq->current_block += nblks;
1001 urq->current_sg++;
1002 sg++;
1003
1004 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
1005 ub_scsi_build_block(lun, cmd, urq);
1006 cmd->state = UB_CMDST_INIT;
1007 cmd->lun = lun;
1008 cmd->done = ub_rw_cmd_done;
1009 cmd->back = &lun->urq;
1010
1011 cmd->tag = sc->tagcnt++;
1012 if (ub_submit_scsi(sc, cmd) != 0) {
1013 return -1;
1014 }
1015
1016 return 0;
1017}
1018
1019/* 947/*
1020 * Submit a regular SCSI operation (not an auto-sense). 948 * Submit a regular SCSI operation (not an auto-sense).
1021 * 949 *
@@ -1171,7 +1099,6 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1171{ 1099{
1172 struct urb *urb = &sc->work_urb; 1100 struct urb *urb = &sc->work_urb;
1173 struct bulk_cs_wrap *bcs; 1101 struct bulk_cs_wrap *bcs;
1174 int pipe;
1175 int rc; 1102 int rc;
1176 1103
1177 if (atomic_read(&sc->poison)) { 1104 if (atomic_read(&sc->poison)) {
@@ -1272,38 +1199,13 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1272 goto Bad_End; 1199 goto Bad_End;
1273 } 1200 }
1274 1201
1275 if (cmd->dir == UB_DIR_NONE) { 1202 if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
1276 ub_state_stat(sc, cmd); 1203 ub_state_stat(sc, cmd);
1277 return; 1204 return;
1278 } 1205 }
1279 1206
1280 UB_INIT_COMPLETION(sc->work_done); 1207 // udelay(125); // usb-storage has this
1281 1208 ub_data_start(sc, cmd);
1282 if (cmd->dir == UB_DIR_READ)
1283 pipe = sc->recv_bulk_pipe;
1284 else
1285 pipe = sc->send_bulk_pipe;
1286 sc->last_pipe = pipe;
1287 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
1288 cmd->data, cmd->len, ub_urb_complete, sc);
1289 sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;
1290 sc->work_urb.actual_length = 0;
1291 sc->work_urb.error_count = 0;
1292 sc->work_urb.status = 0;
1293
1294 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1295 /* XXX Clear stalls */
1296 printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */
1297 ub_complete(&sc->work_done);
1298 ub_state_done(sc, cmd, rc);
1299 return;
1300 }
1301
1302 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1303 add_timer(&sc->work_timer);
1304
1305 cmd->state = UB_CMDST_DATA;
1306 ub_cmdtr_state(sc, cmd);
1307 1209
1308 } else if (cmd->state == UB_CMDST_DATA) { 1210 } else if (cmd->state == UB_CMDST_DATA) {
1309 if (urb->status == -EPIPE) { 1211 if (urb->status == -EPIPE) {
@@ -1325,16 +1227,22 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1325 if (urb->status == -EOVERFLOW) { 1227 if (urb->status == -EOVERFLOW) {
1326 /* 1228 /*
1327 * A babble? Failure, but we must transfer CSW now. 1229 * A babble? Failure, but we must transfer CSW now.
1230 * XXX This is going to end in perpetual babble. Reset.
1328 */ 1231 */
1329 cmd->error = -EOVERFLOW; /* A cheap trick... */ 1232 cmd->error = -EOVERFLOW; /* A cheap trick... */
1330 } else { 1233 ub_state_stat(sc, cmd);
1331 if (urb->status != 0) 1234 return;
1332 goto Bad_End;
1333 } 1235 }
1236 if (urb->status != 0)
1237 goto Bad_End;
1334 1238
1335 cmd->act_len = urb->actual_length; 1239 cmd->act_len += urb->actual_length;
1336 ub_cmdtr_act_len(sc, cmd); 1240 ub_cmdtr_act_len(sc, cmd);
1337 1241
1242 if (++cmd->current_sg < cmd->nsg) {
1243 ub_data_start(sc, cmd);
1244 return;
1245 }
1338 ub_state_stat(sc, cmd); 1246 ub_state_stat(sc, cmd);
1339 1247
1340 } else if (cmd->state == UB_CMDST_STAT) { 1248 } else if (cmd->state == UB_CMDST_STAT) {
@@ -1469,6 +1377,46 @@ Bad_End: /* Little Excel is dead */
1469 1377
1470/* 1378/*
1471 * Factorization helper for the command state machine: 1379 * Factorization helper for the command state machine:
1380 * Initiate a data segment transfer.
1381 */
1382static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1383{
1384 struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
1385 int pipe;
1386 int rc;
1387
1388 UB_INIT_COMPLETION(sc->work_done);
1389
1390 if (cmd->dir == UB_DIR_READ)
1391 pipe = sc->recv_bulk_pipe;
1392 else
1393 pipe = sc->send_bulk_pipe;
1394 sc->last_pipe = pipe;
1395 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
1396 page_address(sg->page) + sg->offset, sg->length,
1397 ub_urb_complete, sc);
1398 sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;
1399 sc->work_urb.actual_length = 0;
1400 sc->work_urb.error_count = 0;
1401 sc->work_urb.status = 0;
1402
1403 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1404 /* XXX Clear stalls */
1405 printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */
1406 ub_complete(&sc->work_done);
1407 ub_state_done(sc, cmd, rc);
1408 return;
1409 }
1410
1411 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1412 add_timer(&sc->work_timer);
1413
1414 cmd->state = UB_CMDST_DATA;
1415 ub_cmdtr_state(sc, cmd);
1416}
1417
1418/*
1419 * Factorization helper for the command state machine:
1472 * Finish the command. 1420 * Finish the command.
1473 */ 1421 */
1474static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc) 1422static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
@@ -1552,6 +1500,7 @@ static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1552static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1500static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1553{ 1501{
1554 struct ub_scsi_cmd *scmd; 1502 struct ub_scsi_cmd *scmd;
1503 struct scatterlist *sg;
1555 int rc; 1504 int rc;
1556 1505
1557 if (cmd->cdb[0] == REQUEST_SENSE) { 1506 if (cmd->cdb[0] == REQUEST_SENSE) {
@@ -1560,12 +1509,17 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1560 } 1509 }
1561 1510
1562 scmd = &sc->top_rqs_cmd; 1511 scmd = &sc->top_rqs_cmd;
1512 memset(scmd, 0, sizeof(struct ub_scsi_cmd));
1563 scmd->cdb[0] = REQUEST_SENSE; 1513 scmd->cdb[0] = REQUEST_SENSE;
1564 scmd->cdb[4] = UB_SENSE_SIZE; 1514 scmd->cdb[4] = UB_SENSE_SIZE;
1565 scmd->cdb_len = 6; 1515 scmd->cdb_len = 6;
1566 scmd->dir = UB_DIR_READ; 1516 scmd->dir = UB_DIR_READ;
1567 scmd->state = UB_CMDST_INIT; 1517 scmd->state = UB_CMDST_INIT;
1568 scmd->data = sc->top_sense; 1518 scmd->nsg = 1;
1519 sg = &scmd->sgv[0];
1520 sg->page = virt_to_page(sc->top_sense);
1521 sg->offset = (unsigned int)sc->top_sense & (PAGE_SIZE-1);
1522 sg->length = UB_SENSE_SIZE;
1569 scmd->len = UB_SENSE_SIZE; 1523 scmd->len = UB_SENSE_SIZE;
1570 scmd->lun = cmd->lun; 1524 scmd->lun = cmd->lun;
1571 scmd->done = ub_top_sense_done; 1525 scmd->done = ub_top_sense_done;
@@ -1628,7 +1582,7 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1628 */ 1582 */
1629static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) 1583static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1630{ 1584{
1631 unsigned char *sense = scmd->data; 1585 unsigned char *sense = sc->top_sense;
1632 struct ub_scsi_cmd *cmd; 1586 struct ub_scsi_cmd *cmd;
1633 1587
1634 /* 1588 /*
@@ -1920,6 +1874,7 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1920 struct ub_capacity *ret) 1874 struct ub_capacity *ret)
1921{ 1875{
1922 struct ub_scsi_cmd *cmd; 1876 struct ub_scsi_cmd *cmd;
1877 struct scatterlist *sg;
1923 char *p; 1878 char *p;
1924 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 }; 1879 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
1925 unsigned long flags; 1880 unsigned long flags;
@@ -1940,7 +1895,11 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1940 cmd->cdb_len = 10; 1895 cmd->cdb_len = 10;
1941 cmd->dir = UB_DIR_READ; 1896 cmd->dir = UB_DIR_READ;
1942 cmd->state = UB_CMDST_INIT; 1897 cmd->state = UB_CMDST_INIT;
1943 cmd->data = p; 1898 cmd->nsg = 1;
1899 sg = &cmd->sgv[0];
1900 sg->page = virt_to_page(p);
1901 sg->offset = (unsigned int)p & (PAGE_SIZE-1);
1902 sg->length = 8;
1944 cmd->len = 8; 1903 cmd->len = 8;
1945 cmd->lun = lun; 1904 cmd->lun = lun;
1946 cmd->done = ub_probe_done; 1905 cmd->done = ub_probe_done;