aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2009-06-12 03:53:47 -0400
committerArnd Bergmann <arnd@arndb.de>2009-06-12 05:32:58 -0400
commit5b02ee3d219f9e01b6e9146e25613822cfc2e5ce (patch)
tree7ce9126738c3cf4b37d67170d0e4b34818c057a9 /arch/um
parent26a28fa4fea5b8c65713aa50c124f76a88c7924d (diff)
parent8ebf975608aaebd7feb33d77f07ba21a6380e086 (diff)
asm-generic: merge branch 'master' of torvalds/linux-2.6
Fixes a merge conflict against the x86 tree caused by a fix to atomic.h which I renamed to atomic_long.h. Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/um')
-rw-r--r--arch/um/drivers/ubd_kern.c36
1 files changed, 7 insertions, 29 deletions
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index f934225fd8ef..aa9e926e13d7 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -451,23 +451,6 @@ static void do_ubd_request(struct request_queue * q);
451 451
452/* Only changed by ubd_init, which is an initcall. */ 452/* Only changed by ubd_init, which is an initcall. */
453static int thread_fd = -1; 453static int thread_fd = -1;
454
455static void ubd_end_request(struct request *req, int bytes, int error)
456{
457 blk_end_request(req, error, bytes);
458}
459
460/* Callable only from interrupt context - otherwise you need to do
461 * spin_lock_irq()/spin_lock_irqsave() */
462static inline void ubd_finish(struct request *req, int bytes)
463{
464 if(bytes < 0){
465 ubd_end_request(req, 0, -EIO);
466 return;
467 }
468 ubd_end_request(req, bytes, 0);
469}
470
471static LIST_HEAD(restart); 454static LIST_HEAD(restart);
472 455
473/* XXX - move this inside ubd_intr. */ 456/* XXX - move this inside ubd_intr. */
@@ -475,7 +458,6 @@ static LIST_HEAD(restart);
475static void ubd_handler(void) 458static void ubd_handler(void)
476{ 459{
477 struct io_thread_req *req; 460 struct io_thread_req *req;
478 struct request *rq;
479 struct ubd *ubd; 461 struct ubd *ubd;
480 struct list_head *list, *next_ele; 462 struct list_head *list, *next_ele;
481 unsigned long flags; 463 unsigned long flags;
@@ -492,10 +474,7 @@ static void ubd_handler(void)
492 return; 474 return;
493 } 475 }
494 476
495 rq = req->req; 477 blk_end_request(req->req, 0, req->length);
496 rq->nr_sectors -= req->length >> 9;
497 if(rq->nr_sectors == 0)
498 ubd_finish(rq, rq->hard_nr_sectors << 9);
499 kfree(req); 478 kfree(req);
500 } 479 }
501 reactivate_fd(thread_fd, UBD_IRQ); 480 reactivate_fd(thread_fd, UBD_IRQ);
@@ -1243,27 +1222,26 @@ static void do_ubd_request(struct request_queue *q)
1243{ 1222{
1244 struct io_thread_req *io_req; 1223 struct io_thread_req *io_req;
1245 struct request *req; 1224 struct request *req;
1246 int n, last_sectors; 1225 sector_t sector;
1226 int n;
1247 1227
1248 while(1){ 1228 while(1){
1249 struct ubd *dev = q->queuedata; 1229 struct ubd *dev = q->queuedata;
1250 if(dev->end_sg == 0){ 1230 if(dev->end_sg == 0){
1251 struct request *req = elv_next_request(q); 1231 struct request *req = blk_fetch_request(q);
1252 if(req == NULL) 1232 if(req == NULL)
1253 return; 1233 return;
1254 1234
1255 dev->request = req; 1235 dev->request = req;
1256 blkdev_dequeue_request(req);
1257 dev->start_sg = 0; 1236 dev->start_sg = 0;
1258 dev->end_sg = blk_rq_map_sg(q, req, dev->sg); 1237 dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
1259 } 1238 }
1260 1239
1261 req = dev->request; 1240 req = dev->request;
1262 last_sectors = 0; 1241 sector = blk_rq_pos(req);
1263 while(dev->start_sg < dev->end_sg){ 1242 while(dev->start_sg < dev->end_sg){
1264 struct scatterlist *sg = &dev->sg[dev->start_sg]; 1243 struct scatterlist *sg = &dev->sg[dev->start_sg];
1265 1244
1266 req->sector += last_sectors;
1267 io_req = kmalloc(sizeof(struct io_thread_req), 1245 io_req = kmalloc(sizeof(struct io_thread_req),
1268 GFP_ATOMIC); 1246 GFP_ATOMIC);
1269 if(io_req == NULL){ 1247 if(io_req == NULL){
@@ -1272,10 +1250,10 @@ static void do_ubd_request(struct request_queue *q)
1272 return; 1250 return;
1273 } 1251 }
1274 prepare_request(req, io_req, 1252 prepare_request(req, io_req,
1275 (unsigned long long) req->sector << 9, 1253 (unsigned long long)sector << 9,
1276 sg->offset, sg->length, sg_page(sg)); 1254 sg->offset, sg->length, sg_page(sg));
1277 1255
1278 last_sectors = sg->length >> 9; 1256 sector += sg->length >> 9;
1279 n = os_write_file(thread_fd, &io_req, 1257 n = os_write_file(thread_fd, &io_req,
1280 sizeof(struct io_thread_req *)); 1258 sizeof(struct io_thread_req *));
1281 if(n != sizeof(struct io_thread_req *)){ 1259 if(n != sizeof(struct io_thread_req *)){