aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/drivers/ubd_kern.c
diff options
context:
space:
mode:
authorJeff Dike <jdike@addtoit.com>2005-10-10 23:10:32 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-12 11:22:26 -0400
commit91acb21f084aa244f26839406ae7ed8aa3668058 (patch)
tree5a186b9b35d2e26aaf1c85441125611c9c88dd18 /arch/um/drivers/ubd_kern.c
parentda64c6ee6bb71bfb3f09d9bb89ce1aa4b1ee7e89 (diff)
[PATCH] uml: revert block driver use of host AIO
The patch to use host AIO support that I submitted early after 2.6.13 exposed some problems in the block driver. I have fixes for these, but am not comfortable putting them into 2.6.14 at this late date. So, this patch reverts the use of host AIO. I will resubmit the original patch, plus fixes to the driver after 2.6.14 in order to get a reasonable amount of testing before they're exposed to the general public. Signed-off-by: Jeff Dike <jdike@addtoit.com> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/um/drivers/ubd_kern.c')
-rw-r--r--arch/um/drivers/ubd_kern.c556
1 files changed, 310 insertions, 246 deletions
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index e77a38da4350..f73134333f64 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -35,7 +35,6 @@
35#include "linux/blkpg.h" 35#include "linux/blkpg.h"
36#include "linux/genhd.h" 36#include "linux/genhd.h"
37#include "linux/spinlock.h" 37#include "linux/spinlock.h"
38#include "asm/atomic.h"
39#include "asm/segment.h" 38#include "asm/segment.h"
40#include "asm/uaccess.h" 39#include "asm/uaccess.h"
41#include "asm/irq.h" 40#include "asm/irq.h"
@@ -54,21 +53,20 @@
54#include "mem.h" 53#include "mem.h"
55#include "mem_kern.h" 54#include "mem_kern.h"
56#include "cow.h" 55#include "cow.h"
57#include "aio.h"
58 56
59enum ubd_req { UBD_READ, UBD_WRITE }; 57enum ubd_req { UBD_READ, UBD_WRITE };
60 58
61struct io_thread_req { 59struct io_thread_req {
62 enum aio_type op; 60 enum ubd_req op;
63 int fds[2]; 61 int fds[2];
64 unsigned long offsets[2]; 62 unsigned long offsets[2];
65 unsigned long long offset; 63 unsigned long long offset;
66 unsigned long length; 64 unsigned long length;
67 char *buffer; 65 char *buffer;
68 int sectorsize; 66 int sectorsize;
69 int bitmap_offset; 67 unsigned long sector_mask;
70 long bitmap_start; 68 unsigned long long cow_offset;
71 long bitmap_end; 69 unsigned long bitmap_words[2];
72 int error; 70 int error;
73}; 71};
74 72
@@ -82,31 +80,28 @@ extern int create_cow_file(char *cow_file, char *backing_file,
82 unsigned long *bitmap_len_out, 80 unsigned long *bitmap_len_out,
83 int *data_offset_out); 81 int *data_offset_out);
84extern int read_cow_bitmap(int fd, void *buf, int offset, int len); 82extern int read_cow_bitmap(int fd, void *buf, int offset, int len);
85extern void do_io(struct io_thread_req *req, struct request *r, 83extern void do_io(struct io_thread_req *req);
86 unsigned long *bitmap);
87 84
88static inline int ubd_test_bit(__u64 bit, void *data) 85static inline int ubd_test_bit(__u64 bit, unsigned char *data)
89{ 86{
90 unsigned char *buffer = data;
91 __u64 n; 87 __u64 n;
92 int bits, off; 88 int bits, off;
93 89
94 bits = sizeof(buffer[0]) * 8; 90 bits = sizeof(data[0]) * 8;
95 n = bit / bits; 91 n = bit / bits;
96 off = bit % bits; 92 off = bit % bits;
97 return((buffer[n] & (1 << off)) != 0); 93 return((data[n] & (1 << off)) != 0);
98} 94}
99 95
100static inline void ubd_set_bit(__u64 bit, void *data) 96static inline void ubd_set_bit(__u64 bit, unsigned char *data)
101{ 97{
102 unsigned char *buffer = data;
103 __u64 n; 98 __u64 n;
104 int bits, off; 99 int bits, off;
105 100
106 bits = sizeof(buffer[0]) * 8; 101 bits = sizeof(data[0]) * 8;
107 n = bit / bits; 102 n = bit / bits;
108 off = bit % bits; 103 off = bit % bits;
109 buffer[n] |= (1 << off); 104 data[n] |= (1 << off);
110} 105}
111/*End stuff from ubd_user.h*/ 106/*End stuff from ubd_user.h*/
112 107
@@ -115,6 +110,8 @@ static inline void ubd_set_bit(__u64 bit, void *data)
115static DEFINE_SPINLOCK(ubd_io_lock); 110static DEFINE_SPINLOCK(ubd_io_lock);
116static DEFINE_SPINLOCK(ubd_lock); 111static DEFINE_SPINLOCK(ubd_lock);
117 112
113static void (*do_ubd)(void);
114
118static int ubd_open(struct inode * inode, struct file * filp); 115static int ubd_open(struct inode * inode, struct file * filp);
119static int ubd_release(struct inode * inode, struct file * file); 116static int ubd_release(struct inode * inode, struct file * file);
120static int ubd_ioctl(struct inode * inode, struct file * file, 117static int ubd_ioctl(struct inode * inode, struct file * file,
@@ -161,8 +158,6 @@ struct cow {
161 int data_offset; 158 int data_offset;
162}; 159};
163 160
164#define MAX_SG 64
165
166struct ubd { 161struct ubd {
167 char *file; 162 char *file;
168 int count; 163 int count;
@@ -173,7 +168,6 @@ struct ubd {
173 int no_cow; 168 int no_cow;
174 struct cow cow; 169 struct cow cow;
175 struct platform_device pdev; 170 struct platform_device pdev;
176 struct scatterlist sg[MAX_SG];
177}; 171};
178 172
179#define DEFAULT_COW { \ 173#define DEFAULT_COW { \
@@ -466,114 +460,81 @@ __uml_help(fakehd,
466); 460);
467 461
468static void do_ubd_request(request_queue_t * q); 462static void do_ubd_request(request_queue_t * q);
469static int in_ubd; 463
464/* Only changed by ubd_init, which is an initcall. */
465int thread_fd = -1;
470 466
471/* Changed by ubd_handler, which is serialized because interrupts only 467/* Changed by ubd_handler, which is serialized because interrupts only
472 * happen on CPU 0. 468 * happen on CPU 0.
473 */ 469 */
474int intr_count = 0; 470int intr_count = 0;
475 471
476static void ubd_end_request(struct request *req, int bytes, int uptodate) 472/* call ubd_finish if you need to serialize */
473static void __ubd_finish(struct request *req, int error)
477{ 474{
478 if (!end_that_request_first(req, uptodate, bytes >> 9)) { 475 int nsect;
479 add_disk_randomness(req->rq_disk); 476
480 end_that_request_last(req); 477 if(error){
478 end_request(req, 0);
479 return;
481 } 480 }
481 nsect = req->current_nr_sectors;
482 req->sector += nsect;
483 req->buffer += nsect << 9;
484 req->errors = 0;
485 req->nr_sectors -= nsect;
486 req->current_nr_sectors = 0;
487 end_request(req, 1);
482} 488}
483 489
484/* call ubd_finish if you need to serialize */ 490static inline void ubd_finish(struct request *req, int error)
485static void __ubd_finish(struct request *req, int bytes)
486{ 491{
487 if(bytes < 0){ 492 spin_lock(&ubd_io_lock);
488 ubd_end_request(req, 0, 0); 493 __ubd_finish(req, error);
489 return; 494 spin_unlock(&ubd_io_lock);
490 }
491
492 ubd_end_request(req, bytes, 1);
493} 495}
494 496
495static inline void ubd_finish(struct request *req, int bytes) 497/* Called without ubd_io_lock held */
498static void ubd_handler(void)
496{ 499{
497 spin_lock(&ubd_io_lock); 500 struct io_thread_req req;
498 __ubd_finish(req, bytes); 501 struct request *rq = elv_next_request(ubd_queue);
499 spin_unlock(&ubd_io_lock); 502 int n;
503
504 do_ubd = NULL;
505 intr_count++;
506 n = os_read_file(thread_fd, &req, sizeof(req));
507 if(n != sizeof(req)){
508 printk(KERN_ERR "Pid %d - spurious interrupt in ubd_handler, "
509 "err = %d\n", os_getpid(), -n);
510 spin_lock(&ubd_io_lock);
511 end_request(rq, 0);
512 spin_unlock(&ubd_io_lock);
513 return;
514 }
515
516 ubd_finish(rq, req.error);
517 reactivate_fd(thread_fd, UBD_IRQ);
518 do_ubd_request(ubd_queue);
500} 519}
501 520
502struct bitmap_io {
503 atomic_t count;
504 struct aio_context aio;
505};
506
507struct ubd_aio {
508 struct aio_context aio;
509 struct request *req;
510 int len;
511 struct bitmap_io *bitmap;
512 void *bitmap_buf;
513};
514
515static int ubd_reply_fd = -1;
516
517static irqreturn_t ubd_intr(int irq, void *dev, struct pt_regs *unused) 521static irqreturn_t ubd_intr(int irq, void *dev, struct pt_regs *unused)
518{ 522{
519 struct aio_thread_reply reply; 523 ubd_handler();
520 struct ubd_aio *aio; 524 return(IRQ_HANDLED);
521 struct request *req; 525}
522 int err, n, fd = (int) (long) dev;
523
524 while(1){
525 err = os_read_file(fd, &reply, sizeof(reply));
526 if(err == -EAGAIN)
527 break;
528 if(err < 0){
529 printk("ubd_aio_handler - read returned err %d\n",
530 -err);
531 break;
532 }
533
534 aio = container_of(reply.data, struct ubd_aio, aio);
535 n = reply.err;
536
537 if(n == 0){
538 req = aio->req;
539 req->nr_sectors -= aio->len >> 9;
540
541 if((aio->bitmap != NULL) &&
542 (atomic_dec_and_test(&aio->bitmap->count))){
543 aio->aio = aio->bitmap->aio;
544 aio->len = 0;
545 kfree(aio->bitmap);
546 aio->bitmap = NULL;
547 submit_aio(&aio->aio);
548 }
549 else {
550 if((req->nr_sectors == 0) &&
551 (aio->bitmap == NULL)){
552 int len = req->hard_nr_sectors << 9;
553 ubd_finish(req, len);
554 }
555
556 if(aio->bitmap_buf != NULL)
557 kfree(aio->bitmap_buf);
558 kfree(aio);
559 }
560 }
561 else if(n < 0){
562 ubd_finish(aio->req, n);
563 if(aio->bitmap != NULL)
564 kfree(aio->bitmap);
565 if(aio->bitmap_buf != NULL)
566 kfree(aio->bitmap_buf);
567 kfree(aio);
568 }
569 }
570 reactivate_fd(fd, UBD_IRQ);
571 526
572 do_ubd_request(ubd_queue); 527/* Only changed by ubd_init, which is an initcall. */
528static int io_pid = -1;
573 529
574 return(IRQ_HANDLED); 530void kill_io_thread(void)
531{
532 if(io_pid != -1)
533 os_kill_process(io_pid, 1);
575} 534}
576 535
536__uml_exitcall(kill_io_thread);
537
577static int ubd_file_size(struct ubd *dev, __u64 *size_out) 538static int ubd_file_size(struct ubd *dev, __u64 *size_out)
578{ 539{
579 char *file; 540 char *file;
@@ -608,7 +569,7 @@ static int ubd_open_dev(struct ubd *dev)
608 &dev->cow.data_offset, create_ptr); 569 &dev->cow.data_offset, create_ptr);
609 570
610 if((dev->fd == -ENOENT) && create_cow){ 571 if((dev->fd == -ENOENT) && create_cow){
611 dev->fd = create_cow_file(dev->file, dev->cow.file, 572 dev->fd = create_cow_file(dev->file, dev->cow.file,
612 dev->openflags, 1 << 9, PAGE_SIZE, 573 dev->openflags, 1 << 9, PAGE_SIZE,
613 &dev->cow.bitmap_offset, 574 &dev->cow.bitmap_offset,
614 &dev->cow.bitmap_len, 575 &dev->cow.bitmap_len,
@@ -870,10 +831,6 @@ int ubd_init(void)
870{ 831{
871 int i; 832 int i;
872 833
873 ubd_reply_fd = init_aio_irq(UBD_IRQ, "ubd", ubd_intr);
874 if(ubd_reply_fd < 0)
875 printk("Setting up ubd AIO failed, err = %d\n", ubd_reply_fd);
876
877 devfs_mk_dir("ubd"); 834 devfs_mk_dir("ubd");
878 if (register_blkdev(MAJOR_NR, "ubd")) 835 if (register_blkdev(MAJOR_NR, "ubd"))
879 return -1; 836 return -1;
@@ -884,7 +841,6 @@ int ubd_init(void)
884 return -1; 841 return -1;
885 } 842 }
886 843
887 blk_queue_max_hw_segments(ubd_queue, MAX_SG);
888 if (fake_major != MAJOR_NR) { 844 if (fake_major != MAJOR_NR) {
889 char name[sizeof("ubd_nnn\0")]; 845 char name[sizeof("ubd_nnn\0")];
890 846
@@ -896,12 +852,40 @@ int ubd_init(void)
896 driver_register(&ubd_driver); 852 driver_register(&ubd_driver);
897 for (i = 0; i < MAX_DEV; i++) 853 for (i = 0; i < MAX_DEV; i++)
898 ubd_add(i); 854 ubd_add(i);
899
900 return 0; 855 return 0;
901} 856}
902 857
903late_initcall(ubd_init); 858late_initcall(ubd_init);
904 859
860int ubd_driver_init(void){
861 unsigned long stack;
862 int err;
863
864 /* Set by CONFIG_BLK_DEV_UBD_SYNC or ubd=sync.*/
865 if(global_openflags.s){
866 printk(KERN_INFO "ubd: Synchronous mode\n");
867 /* Letting ubd=sync be like using ubd#s= instead of ubd#= is
868 * enough. So use anyway the io thread. */
869 }
870 stack = alloc_stack(0, 0);
871 io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *),
872 &thread_fd);
873 if(io_pid < 0){
874 printk(KERN_ERR
875 "ubd : Failed to start I/O thread (errno = %d) - "
876 "falling back to synchronous I/O\n", -io_pid);
877 io_pid = -1;
878 return(0);
879 }
880 err = um_request_irq(UBD_IRQ, thread_fd, IRQ_READ, ubd_intr,
881 SA_INTERRUPT, "ubd", ubd_dev);
882 if(err != 0)
883 printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err);
884 return(err);
885}
886
887device_initcall(ubd_driver_init);
888
905static int ubd_open(struct inode *inode, struct file *filp) 889static int ubd_open(struct inode *inode, struct file *filp)
906{ 890{
907 struct gendisk *disk = inode->i_bdev->bd_disk; 891 struct gendisk *disk = inode->i_bdev->bd_disk;
@@ -939,55 +923,105 @@ static int ubd_release(struct inode * inode, struct file * file)
939 return(0); 923 return(0);
940} 924}
941 925
942static void cowify_bitmap(struct io_thread_req *req, unsigned long *bitmap) 926static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask,
927 __u64 *cow_offset, unsigned long *bitmap,
928 __u64 bitmap_offset, unsigned long *bitmap_words,
929 __u64 bitmap_len)
943{ 930{
944 __u64 sector = req->offset / req->sectorsize; 931 __u64 sector = io_offset >> 9;
945 int i; 932 int i, update_bitmap = 0;
933
934 for(i = 0; i < length >> 9; i++){
935 if(cow_mask != NULL)
936 ubd_set_bit(i, (unsigned char *) cow_mask);
937 if(ubd_test_bit(sector + i, (unsigned char *) bitmap))
938 continue;
946 939
947 for(i = 0; i < req->length / req->sectorsize; i++){ 940 update_bitmap = 1;
948 if(ubd_test_bit(sector + i, bitmap)) 941 ubd_set_bit(sector + i, (unsigned char *) bitmap);
949 continue; 942 }
943
944 if(!update_bitmap)
945 return;
950 946
951 if(req->bitmap_start == -1) 947 *cow_offset = sector / (sizeof(unsigned long) * 8);
952 req->bitmap_start = sector + i;
953 req->bitmap_end = sector + i + 1;
954 948
955 ubd_set_bit(sector + i, bitmap); 949 /* This takes care of the case where we're exactly at the end of the
956 } 950 * device, and *cow_offset + 1 is off the end. So, just back it up
951 * by one word. Thanks to Lynn Kerby for the fix and James McMechan
952 * for the original diagnosis.
953 */
954 if(*cow_offset == ((bitmap_len + sizeof(unsigned long) - 1) /
955 sizeof(unsigned long) - 1))
956 (*cow_offset)--;
957
958 bitmap_words[0] = bitmap[*cow_offset];
959 bitmap_words[1] = bitmap[*cow_offset + 1];
960
961 *cow_offset *= sizeof(unsigned long);
962 *cow_offset += bitmap_offset;
963}
964
965static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
966 __u64 bitmap_offset, __u64 bitmap_len)
967{
968 __u64 sector = req->offset >> 9;
969 int i;
970
971 if(req->length > (sizeof(req->sector_mask) * 8) << 9)
972 panic("Operation too long");
973
974 if(req->op == UBD_READ) {
975 for(i = 0; i < req->length >> 9; i++){
976 if(ubd_test_bit(sector + i, (unsigned char *) bitmap))
977 ubd_set_bit(i, (unsigned char *)
978 &req->sector_mask);
979 }
980 }
981 else cowify_bitmap(req->offset, req->length, &req->sector_mask,
982 &req->cow_offset, bitmap, bitmap_offset,
983 req->bitmap_words, bitmap_len);
957} 984}
958 985
959/* Called with ubd_io_lock held */ 986/* Called with ubd_io_lock held */
960static int prepare_request(struct request *req, struct io_thread_req *io_req, 987static int prepare_request(struct request *req, struct io_thread_req *io_req)
961 unsigned long long offset, int page_offset,
962 int len, struct page *page)
963{ 988{
964 struct gendisk *disk = req->rq_disk; 989 struct gendisk *disk = req->rq_disk;
965 struct ubd *dev = disk->private_data; 990 struct ubd *dev = disk->private_data;
991 __u64 offset;
992 int len;
993
994 if(req->rq_status == RQ_INACTIVE) return(1);
966 995
967 /* This should be impossible now */ 996 /* This should be impossible now */
968 if((rq_data_dir(req) == WRITE) && !dev->openflags.w){ 997 if((rq_data_dir(req) == WRITE) && !dev->openflags.w){
969 printk("Write attempted on readonly ubd device %s\n", 998 printk("Write attempted on readonly ubd device %s\n",
970 disk->disk_name); 999 disk->disk_name);
971 ubd_end_request(req, 0, 0); 1000 end_request(req, 0);
972 return(1); 1001 return(1);
973 } 1002 }
974 1003
1004 offset = ((__u64) req->sector) << 9;
1005 len = req->current_nr_sectors << 9;
1006
975 io_req->fds[0] = (dev->cow.file != NULL) ? dev->cow.fd : dev->fd; 1007 io_req->fds[0] = (dev->cow.file != NULL) ? dev->cow.fd : dev->fd;
976 io_req->fds[1] = dev->fd; 1008 io_req->fds[1] = dev->fd;
1009 io_req->cow_offset = -1;
977 io_req->offset = offset; 1010 io_req->offset = offset;
978 io_req->length = len; 1011 io_req->length = len;
979 io_req->error = 0; 1012 io_req->error = 0;
980 io_req->op = (rq_data_dir(req) == READ) ? AIO_READ : AIO_WRITE; 1013 io_req->sector_mask = 0;
1014
1015 io_req->op = (rq_data_dir(req) == READ) ? UBD_READ : UBD_WRITE;
981 io_req->offsets[0] = 0; 1016 io_req->offsets[0] = 0;
982 io_req->offsets[1] = dev->cow.data_offset; 1017 io_req->offsets[1] = dev->cow.data_offset;
983 io_req->buffer = page_address(page) + page_offset; 1018 io_req->buffer = req->buffer;
984 io_req->sectorsize = 1 << 9; 1019 io_req->sectorsize = 1 << 9;
985 io_req->bitmap_offset = dev->cow.bitmap_offset;
986 io_req->bitmap_start = -1;
987 io_req->bitmap_end = -1;
988 1020
989 if((dev->cow.file != NULL) && (io_req->op == UBD_WRITE)) 1021 if(dev->cow.file != NULL)
990 cowify_bitmap(io_req, dev->cow.bitmap); 1022 cowify_req(io_req, dev->cow.bitmap, dev->cow.bitmap_offset,
1023 dev->cow.bitmap_len);
1024
991 return(0); 1025 return(0);
992} 1026}
993 1027
@@ -996,36 +1030,30 @@ static void do_ubd_request(request_queue_t *q)
996{ 1030{
997 struct io_thread_req io_req; 1031 struct io_thread_req io_req;
998 struct request *req; 1032 struct request *req;
999 __u64 sector; 1033 int err, n;
1000 int err; 1034
1001 1035 if(thread_fd == -1){
1002 if(in_ubd) 1036 while((req = elv_next_request(q)) != NULL){
1003 return; 1037 err = prepare_request(req, &io_req);
1004 in_ubd = 1; 1038 if(!err){
1005 while((req = elv_next_request(q)) != NULL){ 1039 do_io(&io_req);
1006 struct gendisk *disk = req->rq_disk; 1040 __ubd_finish(req, io_req.error);
1007 struct ubd *dev = disk->private_data; 1041 }
1008 int n, i; 1042 }
1009 1043 }
1010 blkdev_dequeue_request(req); 1044 else {
1011 1045 if(do_ubd || (req = elv_next_request(q)) == NULL)
1012 sector = req->sector; 1046 return;
1013 n = blk_rq_map_sg(q, req, dev->sg); 1047 err = prepare_request(req, &io_req);
1014 1048 if(!err){
1015 for(i = 0; i < n; i++){ 1049 do_ubd = ubd_handler;
1016 struct scatterlist *sg = &dev->sg[i]; 1050 n = os_write_file(thread_fd, (char *) &io_req,
1017 1051 sizeof(io_req));
1018 err = prepare_request(req, &io_req, sector << 9, 1052 if(n != sizeof(io_req))
1019 sg->offset, sg->length, 1053 printk("write to io thread failed, "
1020 sg->page); 1054 "errno = %d\n", -n);
1021 if(err)
1022 continue;
1023
1024 sector += sg->length >> 9;
1025 do_io(&io_req, req, dev->cow.bitmap);
1026 } 1055 }
1027 } 1056 }
1028 in_ubd = 0;
1029} 1057}
1030 1058
1031static int ubd_ioctl(struct inode * inode, struct file * file, 1059static int ubd_ioctl(struct inode * inode, struct file * file,
@@ -1241,95 +1269,131 @@ int create_cow_file(char *cow_file, char *backing_file, struct openflags flags,
1241 return(err); 1269 return(err);
1242} 1270}
1243 1271
1244void do_io(struct io_thread_req *req, struct request *r, unsigned long *bitmap) 1272static int update_bitmap(struct io_thread_req *req)
1245{ 1273{
1246 struct ubd_aio *aio; 1274 int n;
1247 struct bitmap_io *bitmap_io = NULL;
1248 char *buf;
1249 void *bitmap_buf = NULL;
1250 unsigned long len, sector;
1251 int nsectors, start, end, bit, err;
1252 __u64 off;
1253
1254 if(req->bitmap_start != -1){
1255 /* Round up to the nearest word */
1256 int round = sizeof(unsigned long);
1257 len = (req->bitmap_end - req->bitmap_start +
1258 round * 8 - 1) / (round * 8);
1259 len *= round;
1260
1261 off = req->bitmap_start / (8 * round);
1262 off *= round;
1263
1264 bitmap_io = kmalloc(sizeof(*bitmap_io), GFP_KERNEL);
1265 if(bitmap_io == NULL){
1266 printk("Failed to kmalloc bitmap IO\n");
1267 req->error = 1;
1268 return;
1269 }
1270 1275
1271 bitmap_buf = kmalloc(len, GFP_KERNEL); 1276 if(req->cow_offset == -1)
1272 if(bitmap_buf == NULL){ 1277 return(0);
1273 printk("do_io : kmalloc of bitmap chunk "
1274 "failed\n");
1275 kfree(bitmap_io);
1276 req->error = 1;
1277 return;
1278 }
1279 memcpy(bitmap_buf, &bitmap[off / sizeof(bitmap[0])], len);
1280
1281 *bitmap_io = ((struct bitmap_io)
1282 { .count = ATOMIC_INIT(0),
1283 .aio = INIT_AIO(AIO_WRITE, req->fds[1],
1284 bitmap_buf, len,
1285 req->bitmap_offset + off,
1286 ubd_reply_fd) } );
1287 }
1288 1278
1289 nsectors = req->length / req->sectorsize; 1279 n = os_seek_file(req->fds[1], req->cow_offset);
1290 start = 0; 1280 if(n < 0){
1291 end = nsectors; 1281 printk("do_io - bitmap lseek failed : err = %d\n", -n);
1292 bit = 0; 1282 return(1);
1293 do { 1283 }
1294 if(bitmap != NULL){
1295 sector = req->offset / req->sectorsize;
1296 bit = ubd_test_bit(sector + start, bitmap);
1297 end = start;
1298 while((end < nsectors) &&
1299 (ubd_test_bit(sector + end, bitmap) == bit))
1300 end++;
1301 }
1302 1284
1303 off = req->offsets[bit] + req->offset + 1285 n = os_write_file(req->fds[1], &req->bitmap_words,
1304 start * req->sectorsize; 1286 sizeof(req->bitmap_words));
1305 len = (end - start) * req->sectorsize; 1287 if(n != sizeof(req->bitmap_words)){
1306 buf = &req->buffer[start * req->sectorsize]; 1288 printk("do_io - bitmap update failed, err = %d fd = %d\n", -n,
1289 req->fds[1]);
1290 return(1);
1291 }
1307 1292
1308 aio = kmalloc(sizeof(*aio), GFP_KERNEL); 1293 return(0);
1309 if(aio == NULL){ 1294}
1310 req->error = 1;
1311 return;
1312 }
1313 1295
1314 *aio = ((struct ubd_aio) 1296void do_io(struct io_thread_req *req)
1315 { .aio = INIT_AIO(req->op, req->fds[bit], buf, 1297{
1316 len, off, ubd_reply_fd), 1298 char *buf;
1317 .len = len, 1299 unsigned long len;
1318 .req = r, 1300 int n, nsectors, start, end, bit;
1319 .bitmap = bitmap_io, 1301 int err;
1320 .bitmap_buf = bitmap_buf }); 1302 __u64 off;
1321 1303
1322 if(aio->bitmap != NULL) 1304 nsectors = req->length / req->sectorsize;
1323 atomic_inc(&aio->bitmap->count); 1305 start = 0;
1324 1306 do {
1325 err = submit_aio(&aio->aio); 1307 bit = ubd_test_bit(start, (unsigned char *) &req->sector_mask);
1326 if(err){ 1308 end = start;
1327 printk("do_io - submit_aio failed, " 1309 while((end < nsectors) &&
1328 "err = %d\n", err); 1310 (ubd_test_bit(end, (unsigned char *)
1329 req->error = 1; 1311 &req->sector_mask) == bit))
1330 return; 1312 end++;
1331 } 1313
1314 off = req->offset + req->offsets[bit] +
1315 start * req->sectorsize;
1316 len = (end - start) * req->sectorsize;
1317 buf = &req->buffer[start * req->sectorsize];
1318
1319 err = os_seek_file(req->fds[bit], off);
1320 if(err < 0){
1321 printk("do_io - lseek failed : err = %d\n", -err);
1322 req->error = 1;
1323 return;
1324 }
1325 if(req->op == UBD_READ){
1326 n = 0;
1327 do {
1328 buf = &buf[n];
1329 len -= n;
1330 n = os_read_file(req->fds[bit], buf, len);
1331 if (n < 0) {
1332 printk("do_io - read failed, err = %d "
1333 "fd = %d\n", -n, req->fds[bit]);
1334 req->error = 1;
1335 return;
1336 }
1337 } while((n < len) && (n != 0));
1338 if (n < len) memset(&buf[n], 0, len - n);
1339 } else {
1340 n = os_write_file(req->fds[bit], buf, len);
1341 if(n != len){
1342 printk("do_io - write failed err = %d "
1343 "fd = %d\n", -n, req->fds[bit]);
1344 req->error = 1;
1345 return;
1346 }
1347 }
1348
1349 start = end;
1350 } while(start < nsectors);
1332 1351
1333 start = end; 1352 req->error = update_bitmap(req);
1334 } while(start < nsectors);
1335} 1353}
1354
1355/* Changed in start_io_thread, which is serialized by being called only
1356 * from ubd_init, which is an initcall.
1357 */
1358int kernel_fd = -1;
1359
1360/* Only changed by the io thread */
1361int io_count = 0;
1362
1363int io_thread(void *arg)
1364{
1365 struct io_thread_req req;
1366 int n;
1367
1368 ignore_sigwinch_sig();
1369 while(1){
1370 n = os_read_file(kernel_fd, &req, sizeof(req));
1371 if(n != sizeof(req)){
1372 if(n < 0)
1373 printk("io_thread - read failed, fd = %d, "
1374 "err = %d\n", kernel_fd, -n);
1375 else {
1376 printk("io_thread - short read, fd = %d, "
1377 "length = %d\n", kernel_fd, n);
1378 }
1379 continue;
1380 }
1381 io_count++;
1382 do_io(&req);
1383 n = os_write_file(kernel_fd, &req, sizeof(req));
1384 if(n != sizeof(req))
1385 printk("io_thread - write failed, fd = %d, err = %d\n",
1386 kernel_fd, -n);
1387 }
1388}
1389
1390/*
1391 * Overrides for Emacs so that we follow Linus's tabbing style.
1392 * Emacs will notice this stuff at the end of the file and automatically
1393 * adjust the settings for this buffer only. This must remain at the end
1394 * of the file.
1395 * ---------------------------------------------------------------------------
1396 * Local variables:
1397 * c-file-style: "linux"
1398 * End:
1399 */