aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorPete Zaitcev <zaitcev@redhat.com>2005-07-31 01:38:30 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2005-09-08 19:22:39 -0400
commitba6abf1352dc83e500a71e3ad9b39de0337f0c6b (patch)
tree9ffeef3d2e7c36798ff38ffc450b1d3fb15c7174 /drivers
parent64be07585893d207d23f8516948222faf746aa43 (diff)
[PATCH] USB: ub 1/3: Axboe's quasi-S/G
This the quasi-S/G patch for ub as suggested by Jens Axboe at OLS and implemented that night before 4 a.m. Surprisingly, it worked right away... Alas, I had to skip some OLS partying, but it was for the good cause. Now the speed of ub is quite acceptable even on partitions with small block size. The ub does not really support S/G. Instead, it just tells the block layer that it does. Then, most of the time, the block layer merges requests and passes single-segmnent requests down to ub; everything works as before. Very rarely ub gets an unmerged S/G request. In such case, it issues several commands to the device. I added a small array of counters to monitor the merging (sg_stat). This may be dropped later. Signed-off-by: Pete Zaitcev <zaitcev@yahoo.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/ub.c185
1 files changed, 126 insertions, 59 deletions
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index a026567f5d18..fb3d1e9bc407 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -16,9 +16,10 @@
16 * -- verify the 13 conditions and do bulk resets 16 * -- verify the 13 conditions and do bulk resets
17 * -- kill last_pipe and simply do two-state clearing on both pipes 17 * -- kill last_pipe and simply do two-state clearing on both pipes
18 * -- verify protocol (bulk) from USB descriptors (maybe...) 18 * -- verify protocol (bulk) from USB descriptors (maybe...)
19 * -- highmem and sg 19 * -- highmem
20 * -- move top_sense and work_bcs into separate allocations (if they survive) 20 * -- move top_sense and work_bcs into separate allocations (if they survive)
21 * for cache purists and esoteric architectures. 21 * for cache purists and esoteric architectures.
22 * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ?
22 * -- prune comments, they are too volumnous 23 * -- prune comments, they are too volumnous
23 * -- Exterminate P3 printks 24 * -- Exterminate P3 printks
24 * -- Resove XXX's 25 * -- Resove XXX's
@@ -171,7 +172,7 @@ struct bulk_cs_wrap {
171 */ 172 */
172struct ub_dev; 173struct ub_dev;
173 174
174#define UB_MAX_REQ_SG 1 175#define UB_MAX_REQ_SG 4
175#define UB_MAX_SECTORS 64 176#define UB_MAX_SECTORS 64
176 177
177/* 178/*
@@ -240,13 +241,21 @@ struct ub_scsi_cmd {
240 */ 241 */
241 char *data; /* Requested buffer */ 242 char *data; /* Requested buffer */
242 unsigned int len; /* Requested length */ 243 unsigned int len; /* Requested length */
243 // struct scatterlist sgv[UB_MAX_REQ_SG];
244 244
245 struct ub_lun *lun; 245 struct ub_lun *lun;
246 void (*done)(struct ub_dev *, struct ub_scsi_cmd *); 246 void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
247 void *back; 247 void *back;
248}; 248};
249 249
250struct ub_request {
251 struct request *rq;
252 unsigned char dir;
253 unsigned int current_block;
254 unsigned int current_sg;
255 unsigned int nsg; /* sgv[nsg] */
256 struct scatterlist sgv[UB_MAX_REQ_SG];
257};
258
250/* 259/*
251 */ 260 */
252struct ub_capacity { 261struct ub_capacity {
@@ -342,6 +351,8 @@ struct ub_lun {
342 int readonly; 351 int readonly;
343 int first_open; /* Kludge. See ub_bd_open. */ 352 int first_open; /* Kludge. See ub_bd_open. */
344 353
354 struct ub_request urq;
355
345 /* Use Ingo's mempool if or when we have more than one command. */ 356 /* Use Ingo's mempool if or when we have more than one command. */
346 /* 357 /*
347 * Currently we never need more than one command for the whole device. 358 * Currently we never need more than one command for the whole device.
@@ -389,6 +400,7 @@ struct ub_dev {
389 struct bulk_cs_wrap work_bcs; 400 struct bulk_cs_wrap work_bcs;
390 struct usb_ctrlrequest work_cr; 401 struct usb_ctrlrequest work_cr;
391 402
403 int sg_stat[UB_MAX_REQ_SG+1];
392 struct ub_scsi_trace tr; 404 struct ub_scsi_trace tr;
393}; 405};
394 406
@@ -398,10 +410,14 @@ static void ub_cleanup(struct ub_dev *sc);
398static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq); 410static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq);
399static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, 411static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
400 struct ub_scsi_cmd *cmd, struct request *rq); 412 struct ub_scsi_cmd *cmd, struct request *rq);
401static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 413static void ub_scsi_build_block(struct ub_lun *lun,
402 struct request *rq); 414 struct ub_scsi_cmd *cmd, struct ub_request *urq);
415static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
416 struct ub_scsi_cmd *cmd, struct request *rq);
403static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 417static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
404static void ub_end_rq(struct request *rq, int uptodate); 418static void ub_end_rq(struct request *rq, int uptodate);
419static int ub_request_advance(struct ub_dev *sc, struct ub_lun *lun,
420 struct ub_request *urq, struct ub_scsi_cmd *cmd);
405static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 421static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
406static void ub_urb_complete(struct urb *urb, struct pt_regs *pt); 422static void ub_urb_complete(struct urb *urb, struct pt_regs *pt);
407static void ub_scsi_action(unsigned long _dev); 423static void ub_scsi_action(unsigned long _dev);
@@ -523,6 +539,13 @@ static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, c
523 cnt += sprintf(page + cnt, 539 cnt += sprintf(page + cnt,
524 "qlen %d qmax %d\n", 540 "qlen %d qmax %d\n",
525 sc->cmd_queue.qlen, sc->cmd_queue.qmax); 541 sc->cmd_queue.qlen, sc->cmd_queue.qmax);
542 cnt += sprintf(page + cnt,
543 "sg %d %d %d %d %d\n",
544 sc->sg_stat[0],
545 sc->sg_stat[1],
546 sc->sg_stat[2],
547 sc->sg_stat[3],
548 sc->sg_stat[4]);
526 549
527 list_for_each (p, &sc->luns) { 550 list_for_each (p, &sc->luns) {
528 lun = list_entry(p, struct ub_lun, link); 551 lun = list_entry(p, struct ub_lun, link);
@@ -769,14 +792,15 @@ static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq)
769 return 0; 792 return 0;
770 } 793 }
771 794
795 if (lun->urq.rq != NULL)
796 return -1;
772 if ((cmd = ub_get_cmd(lun)) == NULL) 797 if ((cmd = ub_get_cmd(lun)) == NULL)
773 return -1; 798 return -1;
774 memset(cmd, 0, sizeof(struct ub_scsi_cmd)); 799 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
775 800
776 blkdev_dequeue_request(rq); 801 blkdev_dequeue_request(rq);
777
778 if (blk_pc_request(rq)) { 802 if (blk_pc_request(rq)) {
779 rc = ub_cmd_build_packet(sc, cmd, rq); 803 rc = ub_cmd_build_packet(sc, lun, cmd, rq);
780 } else { 804 } else {
781 rc = ub_cmd_build_block(sc, lun, cmd, rq); 805 rc = ub_cmd_build_block(sc, lun, cmd, rq);
782 } 806 }
@@ -788,10 +812,10 @@ static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq)
788 cmd->state = UB_CMDST_INIT; 812 cmd->state = UB_CMDST_INIT;
789 cmd->lun = lun; 813 cmd->lun = lun;
790 cmd->done = ub_rw_cmd_done; 814 cmd->done = ub_rw_cmd_done;
791 cmd->back = rq; 815 cmd->back = &lun->urq;
792 816
793 cmd->tag = sc->tagcnt++; 817 cmd->tag = sc->tagcnt++;
794 if ((rc = ub_submit_scsi(sc, cmd)) != 0) { 818 if (ub_submit_scsi(sc, cmd) != 0) {
795 ub_put_cmd(lun, cmd); 819 ub_put_cmd(lun, cmd);
796 ub_end_rq(rq, 0); 820 ub_end_rq(rq, 0);
797 return 0; 821 return 0;
@@ -803,12 +827,12 @@ static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq)
803static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, 827static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
804 struct ub_scsi_cmd *cmd, struct request *rq) 828 struct ub_scsi_cmd *cmd, struct request *rq)
805{ 829{
830 struct ub_request *urq;
806 int ub_dir; 831 int ub_dir;
807#if 0 /* We use rq->buffer for now */
808 struct scatterlist *sg;
809 int n_elem; 832 int n_elem;
810#endif 833
811 unsigned int block, nblks; 834 urq = &lun->urq;
835 memset(urq, 0, sizeof(struct ub_request));
812 836
813 if (rq_data_dir(rq) == WRITE) 837 if (rq_data_dir(rq) == WRITE)
814 ub_dir = UB_DIR_WRITE; 838 ub_dir = UB_DIR_WRITE;
@@ -818,44 +842,19 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
818 /* 842 /*
819 * get scatterlist from block layer 843 * get scatterlist from block layer
820 */ 844 */
821#if 0 /* We use rq->buffer for now */ 845 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
822 sg = &cmd->sgv[0];
823 n_elem = blk_rq_map_sg(q, rq, sg);
824 if (n_elem <= 0) { 846 if (n_elem <= 0) {
825 ub_put_cmd(lun, cmd); 847 printk(KERN_INFO "%s: failed request map (%d)\n",
826 ub_end_rq(rq, 0); 848 sc->name, n_elem); /* P3 */
827 blk_start_queue(q); 849 return -1; /* request with no s/g entries? */
828 return 0; /* request with no s/g entries? */
829 } 850 }
830 851 if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */
831 if (n_elem != 1) { /* Paranoia */
832 printk(KERN_WARNING "%s: request with %d segments\n", 852 printk(KERN_WARNING "%s: request with %d segments\n",
833 sc->name, n_elem); 853 sc->name, n_elem);
834 ub_put_cmd(lun, cmd);
835 ub_end_rq(rq, 0);
836 blk_start_queue(q);
837 return 0;
838 }
839#endif
840
841 /*
842 * XXX Unfortunately, this check does not work. It is quite possible
843 * to get bogus non-null rq->buffer if you allow sg by mistake.
844 */
845 if (rq->buffer == NULL) {
846 /*
847 * This must not happen if we set the queue right.
848 * The block level must create bounce buffers for us.
849 */
850 static int do_print = 1;
851 if (do_print) {
852 printk(KERN_WARNING "%s: unmapped block request"
853 " flags 0x%lx sectors %lu\n",
854 sc->name, rq->flags, rq->nr_sectors);
855 do_print = 0;
856 }
857 return -1; 854 return -1;
858 } 855 }
856 urq->nsg = n_elem;
857 sc->sg_stat[n_elem]++;
859 858
860 /* 859 /*
861 * build the command 860 * build the command
@@ -863,10 +862,29 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
863 * The call to blk_queue_hardsect_size() guarantees that request 862 * The call to blk_queue_hardsect_size() guarantees that request
864 * is aligned, but it is given in terms of 512 byte units, always. 863 * is aligned, but it is given in terms of 512 byte units, always.
865 */ 864 */
866 block = rq->sector >> lun->capacity.bshift; 865 urq->current_block = rq->sector >> lun->capacity.bshift;
867 nblks = rq->nr_sectors >> lun->capacity.bshift; 866 // nblks = rq->nr_sectors >> lun->capacity.bshift;
867
868 urq->rq = rq;
869 urq->current_sg = 0;
870 urq->dir = ub_dir;
871
872 ub_scsi_build_block(lun, cmd, urq);
873 return 0;
874}
875
876static void ub_scsi_build_block(struct ub_lun *lun,
877 struct ub_scsi_cmd *cmd, struct ub_request *urq)
878{
879 struct scatterlist *sg;
880 unsigned int block, nblks;
881
882 sg = &urq->sgv[urq->current_sg];
883
884 block = urq->current_block;
885 nblks = sg->length >> (lun->capacity.bshift + 9);
868 886
869 cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10; 887 cmd->cdb[0] = (urq->dir == UB_DIR_READ)? READ_10: WRITE_10;
870 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ 888 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
871 cmd->cdb[2] = block >> 24; 889 cmd->cdb[2] = block >> 24;
872 cmd->cdb[3] = block >> 16; 890 cmd->cdb[3] = block >> 16;
@@ -876,16 +894,20 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
876 cmd->cdb[8] = nblks; 894 cmd->cdb[8] = nblks;
877 cmd->cdb_len = 10; 895 cmd->cdb_len = 10;
878 896
879 cmd->dir = ub_dir; 897 cmd->dir = urq->dir;
880 cmd->data = rq->buffer; 898 cmd->data = page_address(sg->page) + sg->offset;
881 cmd->len = rq->nr_sectors * 512; 899 cmd->len = sg->length;
882
883 return 0;
884} 900}
885 901
886static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 902static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
887 struct request *rq) 903 struct ub_scsi_cmd *cmd, struct request *rq)
888{ 904{
905 struct ub_request *urq;
906
907 urq = &lun->urq;
908 memset(urq, 0, sizeof(struct ub_request));
909 urq->rq = rq;
910 sc->sg_stat[0]++;
889 911
890 if (rq->data_len != 0 && rq->data == NULL) { 912 if (rq->data_len != 0 && rq->data == NULL) {
891 static int do_print = 1; 913 static int do_print = 1;
@@ -917,12 +939,13 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
917 939
918static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 940static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
919{ 941{
920 struct request *rq = cmd->back;
921 struct ub_lun *lun = cmd->lun; 942 struct ub_lun *lun = cmd->lun;
922 struct gendisk *disk = lun->disk; 943 struct ub_request *urq = cmd->back;
923 request_queue_t *q = disk->queue; 944 struct request *rq;
924 int uptodate; 945 int uptodate;
925 946
947 rq = urq->rq;
948
926 if (blk_pc_request(rq)) { 949 if (blk_pc_request(rq)) {
927 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ 950 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
928 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); 951 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
@@ -934,9 +957,19 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
934 else 957 else
935 uptodate = 0; 958 uptodate = 0;
936 959
960 if (cmd->error == 0 && urq->current_sg+1 < urq->nsg) {
961 if (ub_request_advance(sc, lun, urq, cmd) == 0) {
962 /* Stay on target... */
963 return;
964 }
965 uptodate = 0;
966 }
967
968 urq->rq = NULL;
969
937 ub_put_cmd(lun, cmd); 970 ub_put_cmd(lun, cmd);
938 ub_end_rq(rq, uptodate); 971 ub_end_rq(rq, uptodate);
939 blk_start_queue(q); 972 blk_start_queue(lun->disk->queue);
940} 973}
941 974
942static void ub_end_rq(struct request *rq, int uptodate) 975static void ub_end_rq(struct request *rq, int uptodate)
@@ -948,6 +981,40 @@ static void ub_end_rq(struct request *rq, int uptodate)
948 end_that_request_last(rq); 981 end_that_request_last(rq);
949} 982}
950 983
984static int ub_request_advance(struct ub_dev *sc, struct ub_lun *lun,
985 struct ub_request *urq, struct ub_scsi_cmd *cmd)
986{
987 struct scatterlist *sg;
988 unsigned int nblks;
989
990 /* XXX This is temporary, until we sort out S/G in packet requests. */
991 if (blk_pc_request(urq->rq)) {
992 printk(KERN_WARNING
993 "2-segment packet request completed\n"); /* P3 */
994 return -1;
995 }
996
997 sg = &urq->sgv[urq->current_sg];
998 nblks = sg->length >> (lun->capacity.bshift + 9);
999 urq->current_block += nblks;
1000 urq->current_sg++;
1001 sg++;
1002
1003 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
1004 ub_scsi_build_block(lun, cmd, urq);
1005 cmd->state = UB_CMDST_INIT;
1006 cmd->lun = lun;
1007 cmd->done = ub_rw_cmd_done;
1008 cmd->back = &lun->urq;
1009
1010 cmd->tag = sc->tagcnt++;
1011 if (ub_submit_scsi(sc, cmd) != 0) {
1012 return -1;
1013 }
1014
1015 return 0;
1016}
1017
951/* 1018/*
952 * Submit a regular SCSI operation (not an auto-sense). 1019 * Submit a regular SCSI operation (not an auto-sense).
953 * 1020 *