aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/drivers
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-10-15 06:56:21 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-10-15 06:56:21 -0400
commit47526903feb52f4c26a6350370bdf74e337fcdb1 (patch)
tree319aaf924a030ccbb680817128606defd33ac476 /arch/um/drivers
parent93055c31045a2d5599ec613a0c6cdcefc481a460 (diff)
ubd: fix incorrect sector handling during request restart
Commit f81f2f7c (ubd: drop unnecessary rq->sector manipulation) dropped request->sector manipulation in preparation for global request handling cleanup; unfortunately, it incorrectly assumed that the updated sector wasn't being used. ubd tries to issue as many requests as possible to io_thread. When issuing fails due to memory pressure or other reasons, the device is put on the restart list and issuing stops. On IO completion, devices on the restart list are scanned and IO issuing is restarted. ubd issues IOs sg-by-sg and issuing can be stopped in the middle of a request, so each device on the restart queue needs to remember where to restart in its current request. ubd needs to keep track of the issue position itself because, * blk_rq_pos(req) is now updated by the block layer to keep track of _completion_ position. * Multiple io_req's for the current request may be in flight, so it's difficult to tell where blk_rq_pos(req) currently is. Add ubd->rq_pos to keep track of the issue position and use it to correctly restart io_req issue. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Richard Weinberger <richard@nod.at> Tested-by: Richard Weinberger <richard@nod.at> Tested-by: Chris Frey <cdfrey@foursquare.net> Cc: stable@kernel.org Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'arch/um/drivers')
-rw-r--r--arch/um/drivers/ubd_kern.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 1bcd208c459f..9734994cba1e 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -163,6 +163,7 @@ struct ubd {
163 struct scatterlist sg[MAX_SG]; 163 struct scatterlist sg[MAX_SG];
164 struct request *request; 164 struct request *request;
165 int start_sg, end_sg; 165 int start_sg, end_sg;
166 sector_t rq_pos;
166}; 167};
167 168
168#define DEFAULT_COW { \ 169#define DEFAULT_COW { \
@@ -187,6 +188,7 @@ struct ubd {
187 .request = NULL, \ 188 .request = NULL, \
188 .start_sg = 0, \ 189 .start_sg = 0, \
189 .end_sg = 0, \ 190 .end_sg = 0, \
191 .rq_pos = 0, \
190} 192}
191 193
192/* Protected by ubd_lock */ 194/* Protected by ubd_lock */
@@ -1228,7 +1230,6 @@ static void do_ubd_request(struct request_queue *q)
1228{ 1230{
1229 struct io_thread_req *io_req; 1231 struct io_thread_req *io_req;
1230 struct request *req; 1232 struct request *req;
1231 sector_t sector;
1232 int n; 1233 int n;
1233 1234
1234 while(1){ 1235 while(1){
@@ -1239,12 +1240,12 @@ static void do_ubd_request(struct request_queue *q)
1239 return; 1240 return;
1240 1241
1241 dev->request = req; 1242 dev->request = req;
1243 dev->rq_pos = blk_rq_pos(req);
1242 dev->start_sg = 0; 1244 dev->start_sg = 0;
1243 dev->end_sg = blk_rq_map_sg(q, req, dev->sg); 1245 dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
1244 } 1246 }
1245 1247
1246 req = dev->request; 1248 req = dev->request;
1247 sector = blk_rq_pos(req);
1248 while(dev->start_sg < dev->end_sg){ 1249 while(dev->start_sg < dev->end_sg){
1249 struct scatterlist *sg = &dev->sg[dev->start_sg]; 1250 struct scatterlist *sg = &dev->sg[dev->start_sg];
1250 1251
@@ -1256,10 +1257,9 @@ static void do_ubd_request(struct request_queue *q)
1256 return; 1257 return;
1257 } 1258 }
1258 prepare_request(req, io_req, 1259 prepare_request(req, io_req,
1259 (unsigned long long)sector << 9, 1260 (unsigned long long)dev->rq_pos << 9,
1260 sg->offset, sg->length, sg_page(sg)); 1261 sg->offset, sg->length, sg_page(sg));
1261 1262
1262 sector += sg->length >> 9;
1263 n = os_write_file(thread_fd, &io_req, 1263 n = os_write_file(thread_fd, &io_req,
1264 sizeof(struct io_thread_req *)); 1264 sizeof(struct io_thread_req *));
1265 if(n != sizeof(struct io_thread_req *)){ 1265 if(n != sizeof(struct io_thread_req *)){
@@ -1272,6 +1272,7 @@ static void do_ubd_request(struct request_queue *q)
1272 return; 1272 return;
1273 } 1273 }
1274 1274
1275 dev->rq_pos += sg->length >> 9;
1275 dev->start_sg++; 1276 dev->start_sg++;
1276 } 1277 }
1277 dev->end_sg = 0; 1278 dev->end_sg = 0;