diff options
author | Jeff Dike <jdike@addtoit.com> | 2005-09-03 18:57:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@evo.osdl.org> | 2005-09-05 03:06:23 -0400 |
commit | 09ace81c1d737bcbb2423db235ac980cac4d5de9 (patch) | |
tree | d31987b15d57429bd40843c02b2283b8e2d90cb0 /arch/um/drivers | |
parent | 75e5584c89d213d6089f64f22cd899fb172e4c95 (diff) |
[PATCH] uml: add host AIO support to block driver
This adds AIO support to the ubd driver.
The driver breaks a struct request into IO requests to the host, based on the
hardware segments in the request and on any COW blocks covered by the request.
The ubd IO thread is gone, since there is now an equivalent thread in the AIO
module.
There is provision for multiple outstanding requests now. Requests aren't
retired until all pieces of it have been completed. The AIO requests have a
shared count, which is decremented as IO operations come in until it reaches
0. This can be possibly moved to the request struct - haven't looked at this
yet.
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/um/drivers')
-rw-r--r-- | arch/um/drivers/Makefile | 2 | ||||
-rw-r--r-- | arch/um/drivers/ubd_kern.c | 556 |
2 files changed, 247 insertions, 311 deletions
diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile index de17d4c6e02d..783e18cae090 100644 --- a/arch/um/drivers/Makefile +++ b/arch/um/drivers/Makefile | |||
@@ -13,7 +13,7 @@ mcast-objs := mcast_kern.o mcast_user.o | |||
13 | net-objs := net_kern.o net_user.o | 13 | net-objs := net_kern.o net_user.o |
14 | mconsole-objs := mconsole_kern.o mconsole_user.o | 14 | mconsole-objs := mconsole_kern.o mconsole_user.o |
15 | hostaudio-objs := hostaudio_kern.o | 15 | hostaudio-objs := hostaudio_kern.o |
16 | ubd-objs := ubd_kern.o ubd_user.o | 16 | ubd-objs := ubd_kern.o |
17 | port-objs := port_kern.o port_user.o | 17 | port-objs := port_kern.o port_user.o |
18 | harddog-objs := harddog_kern.o harddog_user.o | 18 | harddog-objs := harddog_kern.o harddog_user.o |
19 | 19 | ||
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index f73134333f64..e77a38da4350 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include "linux/blkpg.h" | 35 | #include "linux/blkpg.h" |
36 | #include "linux/genhd.h" | 36 | #include "linux/genhd.h" |
37 | #include "linux/spinlock.h" | 37 | #include "linux/spinlock.h" |
38 | #include "asm/atomic.h" | ||
38 | #include "asm/segment.h" | 39 | #include "asm/segment.h" |
39 | #include "asm/uaccess.h" | 40 | #include "asm/uaccess.h" |
40 | #include "asm/irq.h" | 41 | #include "asm/irq.h" |
@@ -53,20 +54,21 @@ | |||
53 | #include "mem.h" | 54 | #include "mem.h" |
54 | #include "mem_kern.h" | 55 | #include "mem_kern.h" |
55 | #include "cow.h" | 56 | #include "cow.h" |
57 | #include "aio.h" | ||
56 | 58 | ||
57 | enum ubd_req { UBD_READ, UBD_WRITE }; | 59 | enum ubd_req { UBD_READ, UBD_WRITE }; |
58 | 60 | ||
59 | struct io_thread_req { | 61 | struct io_thread_req { |
60 | enum ubd_req op; | 62 | enum aio_type op; |
61 | int fds[2]; | 63 | int fds[2]; |
62 | unsigned long offsets[2]; | 64 | unsigned long offsets[2]; |
63 | unsigned long long offset; | 65 | unsigned long long offset; |
64 | unsigned long length; | 66 | unsigned long length; |
65 | char *buffer; | 67 | char *buffer; |
66 | int sectorsize; | 68 | int sectorsize; |
67 | unsigned long sector_mask; | 69 | int bitmap_offset; |
68 | unsigned long long cow_offset; | 70 | long bitmap_start; |
69 | unsigned long bitmap_words[2]; | 71 | long bitmap_end; |
70 | int error; | 72 | int error; |
71 | }; | 73 | }; |
72 | 74 | ||
@@ -80,28 +82,31 @@ extern int create_cow_file(char *cow_file, char *backing_file, | |||
80 | unsigned long *bitmap_len_out, | 82 | unsigned long *bitmap_len_out, |
81 | int *data_offset_out); | 83 | int *data_offset_out); |
82 | extern int read_cow_bitmap(int fd, void *buf, int offset, int len); | 84 | extern int read_cow_bitmap(int fd, void *buf, int offset, int len); |
83 | extern void do_io(struct io_thread_req *req); | 85 | extern void do_io(struct io_thread_req *req, struct request *r, |
86 | unsigned long *bitmap); | ||
84 | 87 | ||
85 | static inline int ubd_test_bit(__u64 bit, unsigned char *data) | 88 | static inline int ubd_test_bit(__u64 bit, void *data) |
86 | { | 89 | { |
90 | unsigned char *buffer = data; | ||
87 | __u64 n; | 91 | __u64 n; |
88 | int bits, off; | 92 | int bits, off; |
89 | 93 | ||
90 | bits = sizeof(data[0]) * 8; | 94 | bits = sizeof(buffer[0]) * 8; |
91 | n = bit / bits; | 95 | n = bit / bits; |
92 | off = bit % bits; | 96 | off = bit % bits; |
93 | return((data[n] & (1 << off)) != 0); | 97 | return((buffer[n] & (1 << off)) != 0); |
94 | } | 98 | } |
95 | 99 | ||
96 | static inline void ubd_set_bit(__u64 bit, unsigned char *data) | 100 | static inline void ubd_set_bit(__u64 bit, void *data) |
97 | { | 101 | { |
102 | unsigned char *buffer = data; | ||
98 | __u64 n; | 103 | __u64 n; |
99 | int bits, off; | 104 | int bits, off; |
100 | 105 | ||
101 | bits = sizeof(data[0]) * 8; | 106 | bits = sizeof(buffer[0]) * 8; |
102 | n = bit / bits; | 107 | n = bit / bits; |
103 | off = bit % bits; | 108 | off = bit % bits; |
104 | data[n] |= (1 << off); | 109 | buffer[n] |= (1 << off); |
105 | } | 110 | } |
106 | /*End stuff from ubd_user.h*/ | 111 | /*End stuff from ubd_user.h*/ |
107 | 112 | ||
@@ -110,8 +115,6 @@ static inline void ubd_set_bit(__u64 bit, unsigned char *data) | |||
110 | static DEFINE_SPINLOCK(ubd_io_lock); | 115 | static DEFINE_SPINLOCK(ubd_io_lock); |
111 | static DEFINE_SPINLOCK(ubd_lock); | 116 | static DEFINE_SPINLOCK(ubd_lock); |
112 | 117 | ||
113 | static void (*do_ubd)(void); | ||
114 | |||
115 | static int ubd_open(struct inode * inode, struct file * filp); | 118 | static int ubd_open(struct inode * inode, struct file * filp); |
116 | static int ubd_release(struct inode * inode, struct file * file); | 119 | static int ubd_release(struct inode * inode, struct file * file); |
117 | static int ubd_ioctl(struct inode * inode, struct file * file, | 120 | static int ubd_ioctl(struct inode * inode, struct file * file, |
@@ -158,6 +161,8 @@ struct cow { | |||
158 | int data_offset; | 161 | int data_offset; |
159 | }; | 162 | }; |
160 | 163 | ||
164 | #define MAX_SG 64 | ||
165 | |||
161 | struct ubd { | 166 | struct ubd { |
162 | char *file; | 167 | char *file; |
163 | int count; | 168 | int count; |
@@ -168,6 +173,7 @@ struct ubd { | |||
168 | int no_cow; | 173 | int no_cow; |
169 | struct cow cow; | 174 | struct cow cow; |
170 | struct platform_device pdev; | 175 | struct platform_device pdev; |
176 | struct scatterlist sg[MAX_SG]; | ||
171 | }; | 177 | }; |
172 | 178 | ||
173 | #define DEFAULT_COW { \ | 179 | #define DEFAULT_COW { \ |
@@ -460,80 +466,113 @@ __uml_help(fakehd, | |||
460 | ); | 466 | ); |
461 | 467 | ||
462 | static void do_ubd_request(request_queue_t * q); | 468 | static void do_ubd_request(request_queue_t * q); |
463 | 469 | static int in_ubd; | |
464 | /* Only changed by ubd_init, which is an initcall. */ | ||
465 | int thread_fd = -1; | ||
466 | 470 | ||
467 | /* Changed by ubd_handler, which is serialized because interrupts only | 471 | /* Changed by ubd_handler, which is serialized because interrupts only |
468 | * happen on CPU 0. | 472 | * happen on CPU 0. |
469 | */ | 473 | */ |
470 | int intr_count = 0; | 474 | int intr_count = 0; |
471 | 475 | ||
472 | /* call ubd_finish if you need to serialize */ | 476 | static void ubd_end_request(struct request *req, int bytes, int uptodate) |
473 | static void __ubd_finish(struct request *req, int error) | ||
474 | { | 477 | { |
475 | int nsect; | 478 | if (!end_that_request_first(req, uptodate, bytes >> 9)) { |
476 | 479 | add_disk_randomness(req->rq_disk); | |
477 | if(error){ | 480 | end_that_request_last(req); |
478 | end_request(req, 0); | ||
479 | return; | ||
480 | } | 481 | } |
481 | nsect = req->current_nr_sectors; | ||
482 | req->sector += nsect; | ||
483 | req->buffer += nsect << 9; | ||
484 | req->errors = 0; | ||
485 | req->nr_sectors -= nsect; | ||
486 | req->current_nr_sectors = 0; | ||
487 | end_request(req, 1); | ||
488 | } | 482 | } |
489 | 483 | ||
490 | static inline void ubd_finish(struct request *req, int error) | 484 | /* call ubd_finish if you need to serialize */ |
485 | static void __ubd_finish(struct request *req, int bytes) | ||
491 | { | 486 | { |
492 | spin_lock(&ubd_io_lock); | 487 | if(bytes < 0){ |
493 | __ubd_finish(req, error); | 488 | ubd_end_request(req, 0, 0); |
494 | spin_unlock(&ubd_io_lock); | 489 | return; |
490 | } | ||
491 | |||
492 | ubd_end_request(req, bytes, 1); | ||
495 | } | 493 | } |
496 | 494 | ||
497 | /* Called without ubd_io_lock held */ | 495 | static inline void ubd_finish(struct request *req, int bytes) |
498 | static void ubd_handler(void) | ||
499 | { | 496 | { |
500 | struct io_thread_req req; | 497 | spin_lock(&ubd_io_lock); |
501 | struct request *rq = elv_next_request(ubd_queue); | 498 | __ubd_finish(req, bytes); |
502 | int n; | 499 | spin_unlock(&ubd_io_lock); |
503 | |||
504 | do_ubd = NULL; | ||
505 | intr_count++; | ||
506 | n = os_read_file(thread_fd, &req, sizeof(req)); | ||
507 | if(n != sizeof(req)){ | ||
508 | printk(KERN_ERR "Pid %d - spurious interrupt in ubd_handler, " | ||
509 | "err = %d\n", os_getpid(), -n); | ||
510 | spin_lock(&ubd_io_lock); | ||
511 | end_request(rq, 0); | ||
512 | spin_unlock(&ubd_io_lock); | ||
513 | return; | ||
514 | } | ||
515 | |||
516 | ubd_finish(rq, req.error); | ||
517 | reactivate_fd(thread_fd, UBD_IRQ); | ||
518 | do_ubd_request(ubd_queue); | ||
519 | } | 500 | } |
520 | 501 | ||
502 | struct bitmap_io { | ||
503 | atomic_t count; | ||
504 | struct aio_context aio; | ||
505 | }; | ||
506 | |||
507 | struct ubd_aio { | ||
508 | struct aio_context aio; | ||
509 | struct request *req; | ||
510 | int len; | ||
511 | struct bitmap_io *bitmap; | ||
512 | void *bitmap_buf; | ||
513 | }; | ||
514 | |||
515 | static int ubd_reply_fd = -1; | ||
516 | |||
521 | static irqreturn_t ubd_intr(int irq, void *dev, struct pt_regs *unused) | 517 | static irqreturn_t ubd_intr(int irq, void *dev, struct pt_regs *unused) |
522 | { | 518 | { |
523 | ubd_handler(); | 519 | struct aio_thread_reply reply; |
524 | return(IRQ_HANDLED); | 520 | struct ubd_aio *aio; |
525 | } | 521 | struct request *req; |
522 | int err, n, fd = (int) (long) dev; | ||
526 | 523 | ||
527 | /* Only changed by ubd_init, which is an initcall. */ | 524 | while(1){ |
528 | static int io_pid = -1; | 525 | err = os_read_file(fd, &reply, sizeof(reply)); |
526 | if(err == -EAGAIN) | ||
527 | break; | ||
528 | if(err < 0){ | ||
529 | printk("ubd_aio_handler - read returned err %d\n", | ||
530 | -err); | ||
531 | break; | ||
532 | } | ||
529 | 533 | ||
530 | void kill_io_thread(void) | 534 | aio = container_of(reply.data, struct ubd_aio, aio); |
531 | { | 535 | n = reply.err; |
532 | if(io_pid != -1) | 536 | |
533 | os_kill_process(io_pid, 1); | 537 | if(n == 0){ |
534 | } | 538 | req = aio->req; |
539 | req->nr_sectors -= aio->len >> 9; | ||
535 | 540 | ||
536 | __uml_exitcall(kill_io_thread); | 541 | if((aio->bitmap != NULL) && |
542 | (atomic_dec_and_test(&aio->bitmap->count))){ | ||
543 | aio->aio = aio->bitmap->aio; | ||
544 | aio->len = 0; | ||
545 | kfree(aio->bitmap); | ||
546 | aio->bitmap = NULL; | ||
547 | submit_aio(&aio->aio); | ||
548 | } | ||
549 | else { | ||
550 | if((req->nr_sectors == 0) && | ||
551 | (aio->bitmap == NULL)){ | ||
552 | int len = req->hard_nr_sectors << 9; | ||
553 | ubd_finish(req, len); | ||
554 | } | ||
555 | |||
556 | if(aio->bitmap_buf != NULL) | ||
557 | kfree(aio->bitmap_buf); | ||
558 | kfree(aio); | ||
559 | } | ||
560 | } | ||
561 | else if(n < 0){ | ||
562 | ubd_finish(aio->req, n); | ||
563 | if(aio->bitmap != NULL) | ||
564 | kfree(aio->bitmap); | ||
565 | if(aio->bitmap_buf != NULL) | ||
566 | kfree(aio->bitmap_buf); | ||
567 | kfree(aio); | ||
568 | } | ||
569 | } | ||
570 | reactivate_fd(fd, UBD_IRQ); | ||
571 | |||
572 | do_ubd_request(ubd_queue); | ||
573 | |||
574 | return(IRQ_HANDLED); | ||
575 | } | ||
537 | 576 | ||
538 | static int ubd_file_size(struct ubd *dev, __u64 *size_out) | 577 | static int ubd_file_size(struct ubd *dev, __u64 *size_out) |
539 | { | 578 | { |
@@ -569,7 +608,7 @@ static int ubd_open_dev(struct ubd *dev) | |||
569 | &dev->cow.data_offset, create_ptr); | 608 | &dev->cow.data_offset, create_ptr); |
570 | 609 | ||
571 | if((dev->fd == -ENOENT) && create_cow){ | 610 | if((dev->fd == -ENOENT) && create_cow){ |
572 | dev->fd = create_cow_file(dev->file, dev->cow.file, | 611 | dev->fd = create_cow_file(dev->file, dev->cow.file, |
573 | dev->openflags, 1 << 9, PAGE_SIZE, | 612 | dev->openflags, 1 << 9, PAGE_SIZE, |
574 | &dev->cow.bitmap_offset, | 613 | &dev->cow.bitmap_offset, |
575 | &dev->cow.bitmap_len, | 614 | &dev->cow.bitmap_len, |
@@ -831,6 +870,10 @@ int ubd_init(void) | |||
831 | { | 870 | { |
832 | int i; | 871 | int i; |
833 | 872 | ||
873 | ubd_reply_fd = init_aio_irq(UBD_IRQ, "ubd", ubd_intr); | ||
874 | if(ubd_reply_fd < 0) | ||
875 | printk("Setting up ubd AIO failed, err = %d\n", ubd_reply_fd); | ||
876 | |||
834 | devfs_mk_dir("ubd"); | 877 | devfs_mk_dir("ubd"); |
835 | if (register_blkdev(MAJOR_NR, "ubd")) | 878 | if (register_blkdev(MAJOR_NR, "ubd")) |
836 | return -1; | 879 | return -1; |
@@ -841,6 +884,7 @@ int ubd_init(void) | |||
841 | return -1; | 884 | return -1; |
842 | } | 885 | } |
843 | 886 | ||
887 | blk_queue_max_hw_segments(ubd_queue, MAX_SG); | ||
844 | if (fake_major != MAJOR_NR) { | 888 | if (fake_major != MAJOR_NR) { |
845 | char name[sizeof("ubd_nnn\0")]; | 889 | char name[sizeof("ubd_nnn\0")]; |
846 | 890 | ||
@@ -852,40 +896,12 @@ int ubd_init(void) | |||
852 | driver_register(&ubd_driver); | 896 | driver_register(&ubd_driver); |
853 | for (i = 0; i < MAX_DEV; i++) | 897 | for (i = 0; i < MAX_DEV; i++) |
854 | ubd_add(i); | 898 | ubd_add(i); |
899 | |||
855 | return 0; | 900 | return 0; |
856 | } | 901 | } |
857 | 902 | ||
858 | late_initcall(ubd_init); | 903 | late_initcall(ubd_init); |
859 | 904 | ||
860 | int ubd_driver_init(void){ | ||
861 | unsigned long stack; | ||
862 | int err; | ||
863 | |||
864 | /* Set by CONFIG_BLK_DEV_UBD_SYNC or ubd=sync.*/ | ||
865 | if(global_openflags.s){ | ||
866 | printk(KERN_INFO "ubd: Synchronous mode\n"); | ||
867 | /* Letting ubd=sync be like using ubd#s= instead of ubd#= is | ||
868 | * enough. So use anyway the io thread. */ | ||
869 | } | ||
870 | stack = alloc_stack(0, 0); | ||
871 | io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *), | ||
872 | &thread_fd); | ||
873 | if(io_pid < 0){ | ||
874 | printk(KERN_ERR | ||
875 | "ubd : Failed to start I/O thread (errno = %d) - " | ||
876 | "falling back to synchronous I/O\n", -io_pid); | ||
877 | io_pid = -1; | ||
878 | return(0); | ||
879 | } | ||
880 | err = um_request_irq(UBD_IRQ, thread_fd, IRQ_READ, ubd_intr, | ||
881 | SA_INTERRUPT, "ubd", ubd_dev); | ||
882 | if(err != 0) | ||
883 | printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err); | ||
884 | return(err); | ||
885 | } | ||
886 | |||
887 | device_initcall(ubd_driver_init); | ||
888 | |||
889 | static int ubd_open(struct inode *inode, struct file *filp) | 905 | static int ubd_open(struct inode *inode, struct file *filp) |
890 | { | 906 | { |
891 | struct gendisk *disk = inode->i_bdev->bd_disk; | 907 | struct gendisk *disk = inode->i_bdev->bd_disk; |
@@ -923,105 +939,55 @@ static int ubd_release(struct inode * inode, struct file * file) | |||
923 | return(0); | 939 | return(0); |
924 | } | 940 | } |
925 | 941 | ||
926 | static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask, | 942 | static void cowify_bitmap(struct io_thread_req *req, unsigned long *bitmap) |
927 | __u64 *cow_offset, unsigned long *bitmap, | ||
928 | __u64 bitmap_offset, unsigned long *bitmap_words, | ||
929 | __u64 bitmap_len) | ||
930 | { | 943 | { |
931 | __u64 sector = io_offset >> 9; | 944 | __u64 sector = req->offset / req->sectorsize; |
932 | int i, update_bitmap = 0; | 945 | int i; |
933 | |||
934 | for(i = 0; i < length >> 9; i++){ | ||
935 | if(cow_mask != NULL) | ||
936 | ubd_set_bit(i, (unsigned char *) cow_mask); | ||
937 | if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) | ||
938 | continue; | ||
939 | |||
940 | update_bitmap = 1; | ||
941 | ubd_set_bit(sector + i, (unsigned char *) bitmap); | ||
942 | } | ||
943 | |||
944 | if(!update_bitmap) | ||
945 | return; | ||
946 | |||
947 | *cow_offset = sector / (sizeof(unsigned long) * 8); | ||
948 | |||
949 | /* This takes care of the case where we're exactly at the end of the | ||
950 | * device, and *cow_offset + 1 is off the end. So, just back it up | ||
951 | * by one word. Thanks to Lynn Kerby for the fix and James McMechan | ||
952 | * for the original diagnosis. | ||
953 | */ | ||
954 | if(*cow_offset == ((bitmap_len + sizeof(unsigned long) - 1) / | ||
955 | sizeof(unsigned long) - 1)) | ||
956 | (*cow_offset)--; | ||
957 | |||
958 | bitmap_words[0] = bitmap[*cow_offset]; | ||
959 | bitmap_words[1] = bitmap[*cow_offset + 1]; | ||
960 | |||
961 | *cow_offset *= sizeof(unsigned long); | ||
962 | *cow_offset += bitmap_offset; | ||
963 | } | ||
964 | 946 | ||
965 | static void cowify_req(struct io_thread_req *req, unsigned long *bitmap, | 947 | for(i = 0; i < req->length / req->sectorsize; i++){ |
966 | __u64 bitmap_offset, __u64 bitmap_len) | 948 | if(ubd_test_bit(sector + i, bitmap)) |
967 | { | 949 | continue; |
968 | __u64 sector = req->offset >> 9; | ||
969 | int i; | ||
970 | 950 | ||
971 | if(req->length > (sizeof(req->sector_mask) * 8) << 9) | 951 | if(req->bitmap_start == -1) |
972 | panic("Operation too long"); | 952 | req->bitmap_start = sector + i; |
953 | req->bitmap_end = sector + i + 1; | ||
973 | 954 | ||
974 | if(req->op == UBD_READ) { | 955 | ubd_set_bit(sector + i, bitmap); |
975 | for(i = 0; i < req->length >> 9; i++){ | 956 | } |
976 | if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) | ||
977 | ubd_set_bit(i, (unsigned char *) | ||
978 | &req->sector_mask); | ||
979 | } | ||
980 | } | ||
981 | else cowify_bitmap(req->offset, req->length, &req->sector_mask, | ||
982 | &req->cow_offset, bitmap, bitmap_offset, | ||
983 | req->bitmap_words, bitmap_len); | ||
984 | } | 957 | } |
985 | 958 | ||
986 | /* Called with ubd_io_lock held */ | 959 | /* Called with ubd_io_lock held */ |
987 | static int prepare_request(struct request *req, struct io_thread_req *io_req) | 960 | static int prepare_request(struct request *req, struct io_thread_req *io_req, |
961 | unsigned long long offset, int page_offset, | ||
962 | int len, struct page *page) | ||
988 | { | 963 | { |
989 | struct gendisk *disk = req->rq_disk; | 964 | struct gendisk *disk = req->rq_disk; |
990 | struct ubd *dev = disk->private_data; | 965 | struct ubd *dev = disk->private_data; |
991 | __u64 offset; | ||
992 | int len; | ||
993 | |||
994 | if(req->rq_status == RQ_INACTIVE) return(1); | ||
995 | 966 | ||
996 | /* This should be impossible now */ | 967 | /* This should be impossible now */ |
997 | if((rq_data_dir(req) == WRITE) && !dev->openflags.w){ | 968 | if((rq_data_dir(req) == WRITE) && !dev->openflags.w){ |
998 | printk("Write attempted on readonly ubd device %s\n", | 969 | printk("Write attempted on readonly ubd device %s\n", |
999 | disk->disk_name); | 970 | disk->disk_name); |
1000 | end_request(req, 0); | 971 | ubd_end_request(req, 0, 0); |
1001 | return(1); | 972 | return(1); |
1002 | } | 973 | } |
1003 | 974 | ||
1004 | offset = ((__u64) req->sector) << 9; | ||
1005 | len = req->current_nr_sectors << 9; | ||
1006 | |||
1007 | io_req->fds[0] = (dev->cow.file != NULL) ? dev->cow.fd : dev->fd; | 975 | io_req->fds[0] = (dev->cow.file != NULL) ? dev->cow.fd : dev->fd; |
1008 | io_req->fds[1] = dev->fd; | 976 | io_req->fds[1] = dev->fd; |
1009 | io_req->cow_offset = -1; | ||
1010 | io_req->offset = offset; | 977 | io_req->offset = offset; |
1011 | io_req->length = len; | 978 | io_req->length = len; |
1012 | io_req->error = 0; | 979 | io_req->error = 0; |
1013 | io_req->sector_mask = 0; | 980 | io_req->op = (rq_data_dir(req) == READ) ? AIO_READ : AIO_WRITE; |
1014 | |||
1015 | io_req->op = (rq_data_dir(req) == READ) ? UBD_READ : UBD_WRITE; | ||
1016 | io_req->offsets[0] = 0; | 981 | io_req->offsets[0] = 0; |
1017 | io_req->offsets[1] = dev->cow.data_offset; | 982 | io_req->offsets[1] = dev->cow.data_offset; |
1018 | io_req->buffer = req->buffer; | 983 | io_req->buffer = page_address(page) + page_offset; |
1019 | io_req->sectorsize = 1 << 9; | 984 | io_req->sectorsize = 1 << 9; |
985 | io_req->bitmap_offset = dev->cow.bitmap_offset; | ||
986 | io_req->bitmap_start = -1; | ||
987 | io_req->bitmap_end = -1; | ||
1020 | 988 | ||
1021 | if(dev->cow.file != NULL) | 989 | if((dev->cow.file != NULL) && (io_req->op == UBD_WRITE)) |
1022 | cowify_req(io_req, dev->cow.bitmap, dev->cow.bitmap_offset, | 990 | cowify_bitmap(io_req, dev->cow.bitmap); |
1023 | dev->cow.bitmap_len); | ||
1024 | |||
1025 | return(0); | 991 | return(0); |
1026 | } | 992 | } |
1027 | 993 | ||
@@ -1030,30 +996,36 @@ static void do_ubd_request(request_queue_t *q) | |||
1030 | { | 996 | { |
1031 | struct io_thread_req io_req; | 997 | struct io_thread_req io_req; |
1032 | struct request *req; | 998 | struct request *req; |
1033 | int err, n; | 999 | __u64 sector; |
1034 | 1000 | int err; | |
1035 | if(thread_fd == -1){ | 1001 | |
1036 | while((req = elv_next_request(q)) != NULL){ | 1002 | if(in_ubd) |
1037 | err = prepare_request(req, &io_req); | 1003 | return; |
1038 | if(!err){ | 1004 | in_ubd = 1; |
1039 | do_io(&io_req); | 1005 | while((req = elv_next_request(q)) != NULL){ |
1040 | __ubd_finish(req, io_req.error); | 1006 | struct gendisk *disk = req->rq_disk; |
1041 | } | 1007 | struct ubd *dev = disk->private_data; |
1042 | } | 1008 | int n, i; |
1043 | } | 1009 | |
1044 | else { | 1010 | blkdev_dequeue_request(req); |
1045 | if(do_ubd || (req = elv_next_request(q)) == NULL) | 1011 | |
1046 | return; | 1012 | sector = req->sector; |
1047 | err = prepare_request(req, &io_req); | 1013 | n = blk_rq_map_sg(q, req, dev->sg); |
1048 | if(!err){ | 1014 | |
1049 | do_ubd = ubd_handler; | 1015 | for(i = 0; i < n; i++){ |
1050 | n = os_write_file(thread_fd, (char *) &io_req, | 1016 | struct scatterlist *sg = &dev->sg[i]; |
1051 | sizeof(io_req)); | 1017 | |
1052 | if(n != sizeof(io_req)) | 1018 | err = prepare_request(req, &io_req, sector << 9, |
1053 | printk("write to io thread failed, " | 1019 | sg->offset, sg->length, |
1054 | "errno = %d\n", -n); | 1020 | sg->page); |
1021 | if(err) | ||
1022 | continue; | ||
1023 | |||
1024 | sector += sg->length >> 9; | ||
1025 | do_io(&io_req, req, dev->cow.bitmap); | ||
1055 | } | 1026 | } |
1056 | } | 1027 | } |
1028 | in_ubd = 0; | ||
1057 | } | 1029 | } |
1058 | 1030 | ||
1059 | static int ubd_ioctl(struct inode * inode, struct file * file, | 1031 | static int ubd_ioctl(struct inode * inode, struct file * file, |
@@ -1269,131 +1241,95 @@ int create_cow_file(char *cow_file, char *backing_file, struct openflags flags, | |||
1269 | return(err); | 1241 | return(err); |
1270 | } | 1242 | } |
1271 | 1243 | ||
1272 | static int update_bitmap(struct io_thread_req *req) | 1244 | void do_io(struct io_thread_req *req, struct request *r, unsigned long *bitmap) |
1273 | { | 1245 | { |
1274 | int n; | 1246 | struct ubd_aio *aio; |
1275 | 1247 | struct bitmap_io *bitmap_io = NULL; | |
1276 | if(req->cow_offset == -1) | 1248 | char *buf; |
1277 | return(0); | 1249 | void *bitmap_buf = NULL; |
1278 | 1250 | unsigned long len, sector; | |
1279 | n = os_seek_file(req->fds[1], req->cow_offset); | 1251 | int nsectors, start, end, bit, err; |
1280 | if(n < 0){ | 1252 | __u64 off; |
1281 | printk("do_io - bitmap lseek failed : err = %d\n", -n); | 1253 | |
1282 | return(1); | 1254 | if(req->bitmap_start != -1){ |
1283 | } | 1255 | /* Round up to the nearest word */ |
1284 | 1256 | int round = sizeof(unsigned long); | |
1285 | n = os_write_file(req->fds[1], &req->bitmap_words, | 1257 | len = (req->bitmap_end - req->bitmap_start + |
1286 | sizeof(req->bitmap_words)); | 1258 | round * 8 - 1) / (round * 8); |
1287 | if(n != sizeof(req->bitmap_words)){ | 1259 | len *= round; |
1288 | printk("do_io - bitmap update failed, err = %d fd = %d\n", -n, | 1260 | |
1289 | req->fds[1]); | 1261 | off = req->bitmap_start / (8 * round); |
1290 | return(1); | 1262 | off *= round; |
1291 | } | 1263 | |
1292 | 1264 | bitmap_io = kmalloc(sizeof(*bitmap_io), GFP_KERNEL); | |
1293 | return(0); | 1265 | if(bitmap_io == NULL){ |
1294 | } | 1266 | printk("Failed to kmalloc bitmap IO\n"); |
1295 | 1267 | req->error = 1; | |
1296 | void do_io(struct io_thread_req *req) | 1268 | return; |
1297 | { | 1269 | } |
1298 | char *buf; | ||
1299 | unsigned long len; | ||
1300 | int n, nsectors, start, end, bit; | ||
1301 | int err; | ||
1302 | __u64 off; | ||
1303 | |||
1304 | nsectors = req->length / req->sectorsize; | ||
1305 | start = 0; | ||
1306 | do { | ||
1307 | bit = ubd_test_bit(start, (unsigned char *) &req->sector_mask); | ||
1308 | end = start; | ||
1309 | while((end < nsectors) && | ||
1310 | (ubd_test_bit(end, (unsigned char *) | ||
1311 | &req->sector_mask) == bit)) | ||
1312 | end++; | ||
1313 | |||
1314 | off = req->offset + req->offsets[bit] + | ||
1315 | start * req->sectorsize; | ||
1316 | len = (end - start) * req->sectorsize; | ||
1317 | buf = &req->buffer[start * req->sectorsize]; | ||
1318 | |||
1319 | err = os_seek_file(req->fds[bit], off); | ||
1320 | if(err < 0){ | ||
1321 | printk("do_io - lseek failed : err = %d\n", -err); | ||
1322 | req->error = 1; | ||
1323 | return; | ||
1324 | } | ||
1325 | if(req->op == UBD_READ){ | ||
1326 | n = 0; | ||
1327 | do { | ||
1328 | buf = &buf[n]; | ||
1329 | len -= n; | ||
1330 | n = os_read_file(req->fds[bit], buf, len); | ||
1331 | if (n < 0) { | ||
1332 | printk("do_io - read failed, err = %d " | ||
1333 | "fd = %d\n", -n, req->fds[bit]); | ||
1334 | req->error = 1; | ||
1335 | return; | ||
1336 | } | ||
1337 | } while((n < len) && (n != 0)); | ||
1338 | if (n < len) memset(&buf[n], 0, len - n); | ||
1339 | } else { | ||
1340 | n = os_write_file(req->fds[bit], buf, len); | ||
1341 | if(n != len){ | ||
1342 | printk("do_io - write failed err = %d " | ||
1343 | "fd = %d\n", -n, req->fds[bit]); | ||
1344 | req->error = 1; | ||
1345 | return; | ||
1346 | } | ||
1347 | } | ||
1348 | 1270 | ||
1349 | start = end; | 1271 | bitmap_buf = kmalloc(len, GFP_KERNEL); |
1350 | } while(start < nsectors); | 1272 | if(bitmap_buf == NULL){ |
1273 | printk("do_io : kmalloc of bitmap chunk " | ||
1274 | "failed\n"); | ||
1275 | kfree(bitmap_io); | ||
1276 | req->error = 1; | ||
1277 | return; | ||
1278 | } | ||
1279 | memcpy(bitmap_buf, &bitmap[off / sizeof(bitmap[0])], len); | ||
1280 | |||
1281 | *bitmap_io = ((struct bitmap_io) | ||
1282 | { .count = ATOMIC_INIT(0), | ||
1283 | .aio = INIT_AIO(AIO_WRITE, req->fds[1], | ||
1284 | bitmap_buf, len, | ||
1285 | req->bitmap_offset + off, | ||
1286 | ubd_reply_fd) } ); | ||
1287 | } | ||
1351 | 1288 | ||
1352 | req->error = update_bitmap(req); | 1289 | nsectors = req->length / req->sectorsize; |
1353 | } | 1290 | start = 0; |
1291 | end = nsectors; | ||
1292 | bit = 0; | ||
1293 | do { | ||
1294 | if(bitmap != NULL){ | ||
1295 | sector = req->offset / req->sectorsize; | ||
1296 | bit = ubd_test_bit(sector + start, bitmap); | ||
1297 | end = start; | ||
1298 | while((end < nsectors) && | ||
1299 | (ubd_test_bit(sector + end, bitmap) == bit)) | ||
1300 | end++; | ||
1301 | } | ||
1354 | 1302 | ||
1355 | /* Changed in start_io_thread, which is serialized by being called only | 1303 | off = req->offsets[bit] + req->offset + |
1356 | * from ubd_init, which is an initcall. | 1304 | start * req->sectorsize; |
1357 | */ | 1305 | len = (end - start) * req->sectorsize; |
1358 | int kernel_fd = -1; | 1306 | buf = &req->buffer[start * req->sectorsize]; |
1359 | 1307 | ||
1360 | /* Only changed by the io thread */ | 1308 | aio = kmalloc(sizeof(*aio), GFP_KERNEL); |
1361 | int io_count = 0; | 1309 | if(aio == NULL){ |
1310 | req->error = 1; | ||
1311 | return; | ||
1312 | } | ||
1362 | 1313 | ||
1363 | int io_thread(void *arg) | 1314 | *aio = ((struct ubd_aio) |
1364 | { | 1315 | { .aio = INIT_AIO(req->op, req->fds[bit], buf, |
1365 | struct io_thread_req req; | 1316 | len, off, ubd_reply_fd), |
1366 | int n; | 1317 | .len = len, |
1318 | .req = r, | ||
1319 | .bitmap = bitmap_io, | ||
1320 | .bitmap_buf = bitmap_buf }); | ||
1321 | |||
1322 | if(aio->bitmap != NULL) | ||
1323 | atomic_inc(&aio->bitmap->count); | ||
1324 | |||
1325 | err = submit_aio(&aio->aio); | ||
1326 | if(err){ | ||
1327 | printk("do_io - submit_aio failed, " | ||
1328 | "err = %d\n", err); | ||
1329 | req->error = 1; | ||
1330 | return; | ||
1331 | } | ||
1367 | 1332 | ||
1368 | ignore_sigwinch_sig(); | 1333 | start = end; |
1369 | while(1){ | 1334 | } while(start < nsectors); |
1370 | n = os_read_file(kernel_fd, &req, sizeof(req)); | ||
1371 | if(n != sizeof(req)){ | ||
1372 | if(n < 0) | ||
1373 | printk("io_thread - read failed, fd = %d, " | ||
1374 | "err = %d\n", kernel_fd, -n); | ||
1375 | else { | ||
1376 | printk("io_thread - short read, fd = %d, " | ||
1377 | "length = %d\n", kernel_fd, n); | ||
1378 | } | ||
1379 | continue; | ||
1380 | } | ||
1381 | io_count++; | ||
1382 | do_io(&req); | ||
1383 | n = os_write_file(kernel_fd, &req, sizeof(req)); | ||
1384 | if(n != sizeof(req)) | ||
1385 | printk("io_thread - write failed, fd = %d, err = %d\n", | ||
1386 | kernel_fd, -n); | ||
1387 | } | ||
1388 | } | 1335 | } |
1389 | |||
1390 | /* | ||
1391 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
1392 | * Emacs will notice this stuff at the end of the file and automatically | ||
1393 | * adjust the settings for this buffer only. This must remain at the end | ||
1394 | * of the file. | ||
1395 | * --------------------------------------------------------------------------- | ||
1396 | * Local variables: | ||
1397 | * c-file-style: "linux" | ||
1398 | * End: | ||
1399 | */ | ||