diff options
author | Jeff Dike <jdike@addtoit.com> | 2007-05-06 17:51:37 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 15:13:03 -0400 |
commit | 2adcec2197897365e0a0f657f1098cbfdb44bc8b (patch) | |
tree | 90e0f152271ed95f7b2836124d3871a5a9b197f0 /arch/um/drivers/ubd_kern.c | |
parent | a0044bdf60c212366a314da09ca624cb315906e2 (diff) |
uml: send pointers instead of structures to I/O thread
Instead of writing entire structures between UML and the I/O thread, we send
pointers. This cuts down on the amount of data being copied and possibly
allows more requests to be pending between the two.
This requires that the requests be kmalloced and freed instead of living on
the stack.
Signed-off-by: Jeff Dike <jdike@linux.intel.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/um/drivers/ubd_kern.c')
-rw-r--r-- | arch/um/drivers/ubd_kern.c | 41 |
1 files changed, 26 insertions, 15 deletions
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 5ef47b73ce99..9200a457eb98 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c | |||
@@ -503,7 +503,7 @@ static LIST_HEAD(restart); | |||
503 | /* Called without dev->lock held, and only in interrupt context. */ | 503 | /* Called without dev->lock held, and only in interrupt context. */ |
504 | static void ubd_handler(void) | 504 | static void ubd_handler(void) |
505 | { | 505 | { |
506 | struct io_thread_req req; | 506 | struct io_thread_req *req; |
507 | struct request *rq; | 507 | struct request *rq; |
508 | struct ubd *ubd; | 508 | struct ubd *ubd; |
509 | struct list_head *list, *next_ele; | 509 | struct list_head *list, *next_ele; |
@@ -511,7 +511,8 @@ static void ubd_handler(void) | |||
511 | int n; | 511 | int n; |
512 | 512 | ||
513 | while(1){ | 513 | while(1){ |
514 | n = os_read_file_k(thread_fd, &req, sizeof(req)); | 514 | n = os_read_file_k(thread_fd, &req, |
515 | sizeof(struct io_thread_req *)); | ||
515 | if(n != sizeof(req)){ | 516 | if(n != sizeof(req)){ |
516 | if(n == -EAGAIN) | 517 | if(n == -EAGAIN) |
517 | break; | 518 | break; |
@@ -520,10 +521,11 @@ static void ubd_handler(void) | |||
520 | return; | 521 | return; |
521 | } | 522 | } |
522 | 523 | ||
523 | rq = req.req; | 524 | rq = req->req; |
524 | rq->nr_sectors -= req.length >> 9; | 525 | rq->nr_sectors -= req->length >> 9; |
525 | if(rq->nr_sectors == 0) | 526 | if(rq->nr_sectors == 0) |
526 | ubd_finish(rq, rq->hard_nr_sectors << 9); | 527 | ubd_finish(rq, rq->hard_nr_sectors << 9); |
528 | kfree(req); | ||
527 | } | 529 | } |
528 | reactivate_fd(thread_fd, UBD_IRQ); | 530 | reactivate_fd(thread_fd, UBD_IRQ); |
529 | 531 | ||
@@ -1078,7 +1080,7 @@ static void prepare_request(struct request *req, struct io_thread_req *io_req, | |||
1078 | /* Called with dev->lock held */ | 1080 | /* Called with dev->lock held */ |
1079 | static void do_ubd_request(request_queue_t *q) | 1081 | static void do_ubd_request(request_queue_t *q) |
1080 | { | 1082 | { |
1081 | struct io_thread_req io_req; | 1083 | struct io_thread_req *io_req; |
1082 | struct request *req; | 1084 | struct request *req; |
1083 | int n; | 1085 | int n; |
1084 | 1086 | ||
@@ -1099,13 +1101,20 @@ static void do_ubd_request(request_queue_t *q) | |||
1099 | while(dev->start_sg < dev->end_sg){ | 1101 | while(dev->start_sg < dev->end_sg){ |
1100 | struct scatterlist *sg = &dev->sg[dev->start_sg]; | 1102 | struct scatterlist *sg = &dev->sg[dev->start_sg]; |
1101 | 1103 | ||
1102 | prepare_request(req, &io_req, | 1104 | io_req = kmalloc(sizeof(struct io_thread_req), |
1105 | GFP_KERNEL | GFP_ATOMIC); | ||
1106 | if(io_req == NULL){ | ||
1107 | if(list_empty(&dev->restart)) | ||
1108 | list_add(&dev->restart, &restart); | ||
1109 | return; | ||
1110 | } | ||
1111 | prepare_request(req, io_req, | ||
1103 | (unsigned long long) req->sector << 9, | 1112 | (unsigned long long) req->sector << 9, |
1104 | sg->offset, sg->length, sg->page); | 1113 | sg->offset, sg->length, sg->page); |
1105 | 1114 | ||
1106 | n = os_write_file_k(thread_fd, (char *) &io_req, | 1115 | n = os_write_file_k(thread_fd, &io_req, |
1107 | sizeof(io_req)); | 1116 | sizeof(struct io_thread_req *)); |
1108 | if(n != sizeof(io_req)){ | 1117 | if(n != sizeof(struct io_thread_req *)){ |
1109 | if(n != -EAGAIN) | 1118 | if(n != -EAGAIN) |
1110 | printk("write to io thread failed, " | 1119 | printk("write to io thread failed, " |
1111 | "errno = %d\n", -n); | 1120 | "errno = %d\n", -n); |
@@ -1437,13 +1446,14 @@ static int io_count = 0; | |||
1437 | 1446 | ||
1438 | int io_thread(void *arg) | 1447 | int io_thread(void *arg) |
1439 | { | 1448 | { |
1440 | struct io_thread_req req; | 1449 | struct io_thread_req *req; |
1441 | int n; | 1450 | int n; |
1442 | 1451 | ||
1443 | ignore_sigwinch_sig(); | 1452 | ignore_sigwinch_sig(); |
1444 | while(1){ | 1453 | while(1){ |
1445 | n = os_read_file_k(kernel_fd, &req, sizeof(req)); | 1454 | n = os_read_file_k(kernel_fd, &req, |
1446 | if(n != sizeof(req)){ | 1455 | sizeof(struct io_thread_req *)); |
1456 | if(n != sizeof(struct io_thread_req *)){ | ||
1447 | if(n < 0) | 1457 | if(n < 0) |
1448 | printk("io_thread - read failed, fd = %d, " | 1458 | printk("io_thread - read failed, fd = %d, " |
1449 | "err = %d\n", kernel_fd, -n); | 1459 | "err = %d\n", kernel_fd, -n); |
@@ -1454,9 +1464,10 @@ int io_thread(void *arg) | |||
1454 | continue; | 1464 | continue; |
1455 | } | 1465 | } |
1456 | io_count++; | 1466 | io_count++; |
1457 | do_io(&req); | 1467 | do_io(req); |
1458 | n = os_write_file_k(kernel_fd, &req, sizeof(req)); | 1468 | n = os_write_file_k(kernel_fd, &req, |
1459 | if(n != sizeof(req)) | 1469 | sizeof(struct io_thread_req *)); |
1470 | if(n != sizeof(struct io_thread_req *)) | ||
1460 | printk("io_thread - write failed, fd = %d, err = %d\n", | 1471 | printk("io_thread - write failed, fd = %d, err = %d\n", |
1461 | kernel_fd, -n); | 1472 | kernel_fd, -n); |
1462 | } | 1473 | } |