aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Dike <jdike@addtoit.com>2007-02-10 04:44:16 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-11 13:51:23 -0500
commit62f96cb01e8de7a5daee472e540f726db2801499 (patch)
tree0ad14b68eeb8170669e8072e9e09782fc8dd4b79
parent92b4202f38cf630350a9e1eb0ab23ca4fc5b687b (diff)
[PATCH] uml: add per-device queues and locks to ubd driver
Replace global queue and lock with per-device queues and locks. Mostly a straightforward replacement of ubd_io_lock with dev->lock and ubd_queue with dev->queue. Complications - There was no way to get a request struct (and queue) from the structure sent to the io_thread, so a pointer to the request was added. This is needed in ubd_handler in order to kick do_ubd_request to process another request. Queue initialization is moved from ubd_init to ubd_add. Signed-off-by: Jeff Dike <jdike@addtoit.com> Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/um/drivers/ubd_kern.c70
1 files changed, 40 insertions, 30 deletions
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index c1d40fb738e6..d863482cdd27 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -56,6 +56,7 @@
56enum ubd_req { UBD_READ, UBD_WRITE }; 56enum ubd_req { UBD_READ, UBD_WRITE };
57 57
58struct io_thread_req { 58struct io_thread_req {
59 struct request *req;
59 enum ubd_req op; 60 enum ubd_req op;
60 int fds[2]; 61 int fds[2];
61 unsigned long offsets[2]; 62 unsigned long offsets[2];
@@ -106,10 +107,6 @@ static inline void ubd_set_bit(__u64 bit, unsigned char *data)
106 107
107#define DRIVER_NAME "uml-blkdev" 108#define DRIVER_NAME "uml-blkdev"
108 109
109/* Can be taken in interrupt context, and is passed to the block layer to lock
110 * the request queue. Kernel side code knows that. */
111static DEFINE_SPINLOCK(ubd_io_lock);
112
113static DEFINE_MUTEX(ubd_lock); 110static DEFINE_MUTEX(ubd_lock);
114 111
115/* XXX - this made sense in 2.4 days, now it's only used as a boolean, and 112/* XXX - this made sense in 2.4 days, now it's only used as a boolean, and
@@ -132,9 +129,6 @@ static struct block_device_operations ubd_blops = {
132 .getgeo = ubd_getgeo, 129 .getgeo = ubd_getgeo,
133}; 130};
134 131
135/* Protected by the queue_lock */
136static request_queue_t *ubd_queue;
137
138/* Protected by ubd_lock */ 132/* Protected by ubd_lock */
139static int fake_major = MAJOR_NR; 133static int fake_major = MAJOR_NR;
140 134
@@ -178,6 +172,8 @@ struct ubd {
178 unsigned no_cow:1; 172 unsigned no_cow:1;
179 struct cow cow; 173 struct cow cow;
180 struct platform_device pdev; 174 struct platform_device pdev;
175 struct request_queue *queue;
176 spinlock_t lock;
181}; 177};
182 178
183#define DEFAULT_COW { \ 179#define DEFAULT_COW { \
@@ -198,6 +194,7 @@ struct ubd {
198 .no_cow = 0, \ 194 .no_cow = 0, \
199 .shared = 0, \ 195 .shared = 0, \
200 .cow = DEFAULT_COW, \ 196 .cow = DEFAULT_COW, \
197 .lock = SPIN_LOCK_UNLOCKED, \
201} 198}
202 199
203struct ubd ubd_devs[MAX_DEV] = { [ 0 ... MAX_DEV - 1 ] = DEFAULT_UBD }; 200struct ubd ubd_devs[MAX_DEV] = { [ 0 ... MAX_DEV - 1 ] = DEFAULT_UBD };
@@ -504,17 +501,20 @@ static void __ubd_finish(struct request *req, int error)
504 * spin_lock_irq()/spin_lock_irqsave() */ 501 * spin_lock_irq()/spin_lock_irqsave() */
505static inline void ubd_finish(struct request *req, int error) 502static inline void ubd_finish(struct request *req, int error)
506{ 503{
507 spin_lock(&ubd_io_lock); 504 struct ubd *dev = req->rq_disk->private_data;
505
506 spin_lock(&dev->lock);
508 __ubd_finish(req, error); 507 __ubd_finish(req, error);
509 spin_unlock(&ubd_io_lock); 508 spin_unlock(&dev->lock);
510} 509}
511 510
512/* XXX - move this inside ubd_intr. */ 511/* XXX - move this inside ubd_intr. */
513/* Called without ubd_io_lock held, and only in interrupt context. */ 512/* Called without dev->lock held, and only in interrupt context. */
514static void ubd_handler(void) 513static void ubd_handler(void)
515{ 514{
516 struct io_thread_req req; 515 struct io_thread_req req;
517 struct request *rq = elv_next_request(ubd_queue); 516 struct request *rq;
517 struct ubd *dev;
518 int n; 518 int n;
519 519
520 do_ubd = 0; 520 do_ubd = 0;
@@ -523,17 +523,17 @@ static void ubd_handler(void)
523 if(n != sizeof(req)){ 523 if(n != sizeof(req)){
524 printk(KERN_ERR "Pid %d - spurious interrupt in ubd_handler, " 524 printk(KERN_ERR "Pid %d - spurious interrupt in ubd_handler, "
525 "err = %d\n", os_getpid(), -n); 525 "err = %d\n", os_getpid(), -n);
526 spin_lock(&ubd_io_lock);
527 end_request(rq, 0);
528 spin_unlock(&ubd_io_lock);
529 return; 526 return;
530 } 527 }
531 528
529 rq = req.req;
530 dev = rq->rq_disk->private_data;
531
532 ubd_finish(rq, req.error); 532 ubd_finish(rq, req.error);
533 reactivate_fd(thread_fd, UBD_IRQ); 533 reactivate_fd(thread_fd, UBD_IRQ);
534 spin_lock(&ubd_io_lock); 534 spin_lock(&dev->lock);
535 do_ubd_request(ubd_queue); 535 do_ubd_request(dev->queue);
536 spin_unlock(&ubd_io_lock); 536 spin_unlock(&dev->lock);
537} 537}
538 538
539static irqreturn_t ubd_intr(int irq, void *dev) 539static irqreturn_t ubd_intr(int irq, void *dev)
@@ -664,7 +664,7 @@ static int ubd_disk_register(int major, u64 size, int unit,
664 } 664 }
665 665
666 disk->private_data = &ubd_devs[unit]; 666 disk->private_data = &ubd_devs[unit];
667 disk->queue = ubd_queue; 667 disk->queue = ubd_devs[unit].queue;
668 add_disk(disk); 668 add_disk(disk);
669 669
670 *disk_out = disk; 670 *disk_out = disk;
@@ -689,13 +689,23 @@ static int ubd_add(int n, char **error_out)
689 689
690 ubd_dev->size = ROUND_BLOCK(ubd_dev->size); 690 ubd_dev->size = ROUND_BLOCK(ubd_dev->size);
691 691
692 err = ubd_disk_register(MAJOR_NR, ubd_dev->size, n, &ubd_gendisk[n]); 692 err = -ENOMEM;
693 if(err) 693 ubd_dev->queue = blk_init_queue(do_ubd_request, &ubd_dev->lock);
694 if (ubd_dev->queue == NULL) {
695 *error_out = "Failed to initialize device queue";
694 goto out; 696 goto out;
697 }
698 ubd_dev->queue->queuedata = ubd_dev;
699
700 err = ubd_disk_register(MAJOR_NR, ubd_dev->size, n, &ubd_gendisk[n]);
701 if(err){
702 *error_out = "Failed to register device";
703 goto out_cleanup;
704 }
695 705
696 if(fake_major != MAJOR_NR) 706 if(fake_major != MAJOR_NR)
697 ubd_disk_register(fake_major, ubd_dev->size, n, 707 ubd_disk_register(fake_major, ubd_dev->size, n,
698 &fake_gendisk[n]); 708 &fake_gendisk[n]);
699 709
700 /* perhaps this should also be under the "if (fake_major)" above */ 710 /* perhaps this should also be under the "if (fake_major)" above */
701 /* using the fake_disk->disk_name and also the fakehd_set name */ 711 /* using the fake_disk->disk_name and also the fakehd_set name */
@@ -705,6 +715,10 @@ static int ubd_add(int n, char **error_out)
705 err = 0; 715 err = 0;
706out: 716out:
707 return err; 717 return err;
718
719out_cleanup:
720 blk_cleanup_queue(ubd_dev->queue);
721 goto out;
708} 722}
709 723
710static int ubd_config(char *str, char **error_out) 724static int ubd_config(char *str, char **error_out)
@@ -816,6 +830,7 @@ static int ubd_remove(int n, char **error_out)
816 fake_gendisk[n] = NULL; 830 fake_gendisk[n] = NULL;
817 } 831 }
818 832
833 blk_cleanup_queue(ubd_dev->queue);
819 platform_device_unregister(&ubd_dev->pdev); 834 platform_device_unregister(&ubd_dev->pdev);
820 *ubd_dev = ((struct ubd) DEFAULT_UBD); 835 *ubd_dev = ((struct ubd) DEFAULT_UBD);
821 err = 0; 836 err = 0;
@@ -869,12 +884,6 @@ static int __init ubd_init(void)
869 if (register_blkdev(MAJOR_NR, "ubd")) 884 if (register_blkdev(MAJOR_NR, "ubd"))
870 return -1; 885 return -1;
871 886
872 ubd_queue = blk_init_queue(do_ubd_request, &ubd_io_lock);
873 if (!ubd_queue) {
874 unregister_blkdev(MAJOR_NR, "ubd");
875 return -1;
876 }
877
878 if (fake_major != MAJOR_NR) { 887 if (fake_major != MAJOR_NR) {
879 char name[sizeof("ubd_nnn\0")]; 888 char name[sizeof("ubd_nnn\0")];
880 889
@@ -1020,7 +1029,7 @@ static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
1020 req->bitmap_words, bitmap_len); 1029 req->bitmap_words, bitmap_len);
1021} 1030}
1022 1031
1023/* Called with ubd_io_lock held */ 1032/* Called with dev->lock held */
1024static int prepare_request(struct request *req, struct io_thread_req *io_req) 1033static int prepare_request(struct request *req, struct io_thread_req *io_req)
1025{ 1034{
1026 struct gendisk *disk = req->rq_disk; 1035 struct gendisk *disk = req->rq_disk;
@@ -1039,6 +1048,7 @@ static int prepare_request(struct request *req, struct io_thread_req *io_req)
1039 offset = ((__u64) req->sector) << 9; 1048 offset = ((__u64) req->sector) << 9;
1040 len = req->current_nr_sectors << 9; 1049 len = req->current_nr_sectors << 9;
1041 1050
1051 io_req->req = req;
1042 io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd : ubd_dev->fd; 1052 io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd : ubd_dev->fd;
1043 io_req->fds[1] = ubd_dev->fd; 1053 io_req->fds[1] = ubd_dev->fd;
1044 io_req->cow_offset = -1; 1054 io_req->cow_offset = -1;
@@ -1060,7 +1070,7 @@ static int prepare_request(struct request *req, struct io_thread_req *io_req)
1060 return(0); 1070 return(0);
1061} 1071}
1062 1072
1063/* Called with ubd_io_lock held */ 1073/* Called with dev->lock held */
1064static void do_ubd_request(request_queue_t *q) 1074static void do_ubd_request(request_queue_t *q)
1065{ 1075{
1066 struct io_thread_req io_req; 1076 struct io_thread_req io_req;