aboutsummaryrefslogtreecommitdiffstats
path: root/block/bsg.c
diff options
context:
space:
mode:
authorBoaz Harrosh <bharrosh@panasas.com>2009-03-24 07:23:40 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-03-24 07:35:17 -0400
commit05378940caf979a8655c18b18a17213dcfa52412 (patch)
treeb72d29396fd6aa6f4485af638090066c7280325b /block/bsg.c
parent0061d38642244892e17156f005bd7055fe744644 (diff)
bsg: add support for tail queuing
Currently inherited from sg.c bsg will submit asynchronous request at the head-of-the-queue, (using "at_head" set in the call to blk_execute_rq_nowait()). This is bad in situation where the queues are full, requests will execute out of order, and can cause starvation of the first submitted requests. The sg_io_v4->flags member is used and a bit is allocated to denote the Q_AT_TAIL. Zero is to queue at_head as before, to be compatible with old code at the write/read path. SG_IO code path behavior was changed so to be the same as write/read behavior. SG_IO was very rarely used and breaking compatibility with it is OK at this stage. sg_io_hdr at sg.h also has a flags member and uses 3 bits from the first nibble and one bit from the last nibble. Even though none of these bits are supported by bsg, The second nibble is allocated for use by bsg. Just in case. Signed-off-by: Boaz Harrosh <bharrosh@panasas.com> CC: Douglas Gilbert <dgilbert@interlog.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/bsg.c')
-rw-r--r--block/bsg.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/block/bsg.c b/block/bsg.c
index 0ce8806dd0c..0f63b91d0af 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -353,6 +353,8 @@ static void bsg_rq_end_io(struct request *rq, int uptodate)
353static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, 353static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
354 struct bsg_command *bc, struct request *rq) 354 struct bsg_command *bc, struct request *rq)
355{ 355{
356 int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL));
357
356 /* 358 /*
357 * add bc command to busy queue and submit rq for io 359 * add bc command to busy queue and submit rq for io
358 */ 360 */
@@ -368,7 +370,7 @@ static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
368 dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); 370 dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
369 371
370 rq->end_io_data = bc; 372 rq->end_io_data = bc;
371 blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io); 373 blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
372} 374}
373 375
374static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) 376static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
@@ -924,6 +926,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
924 struct request *rq; 926 struct request *rq;
925 struct bio *bio, *bidi_bio = NULL; 927 struct bio *bio, *bidi_bio = NULL;
926 struct sg_io_v4 hdr; 928 struct sg_io_v4 hdr;
929 int at_head;
927 u8 sense[SCSI_SENSE_BUFFERSIZE]; 930 u8 sense[SCSI_SENSE_BUFFERSIZE];
928 931
929 if (copy_from_user(&hdr, uarg, sizeof(hdr))) 932 if (copy_from_user(&hdr, uarg, sizeof(hdr)))
@@ -936,7 +939,9 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
936 bio = rq->bio; 939 bio = rq->bio;
937 if (rq->next_rq) 940 if (rq->next_rq)
938 bidi_bio = rq->next_rq->bio; 941 bidi_bio = rq->next_rq->bio;
939 blk_execute_rq(bd->queue, NULL, rq, 0); 942
943 at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
944 blk_execute_rq(bd->queue, NULL, rq, at_head);
940 ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); 945 ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
941 946
942 if (copy_to_user(uarg, &hdr, sizeof(hdr))) 947 if (copy_to_user(uarg, &hdr, sizeof(hdr)))