aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/drivers/ubd_kern.c
diff options
context:
space:
mode:
authorPaolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>2006-10-31 01:07:08 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-31 11:06:59 -0500
commit33f775eea185e8df7701c4afc2c8fcee85c83282 (patch)
tree5cbba5e09cba9ab84ddba06cf23e1950ffbec222 /arch/um/drivers/ubd_kern.c
parentd7fb2c3865ca0f95d92e2864c3dc9220789d83f5 (diff)
[PATCH] uml ubd driver: ubd_io_lock usage fixup
Add some comments about requirements for ubd_io_lock and expand its use. When an irq signals that the "controller" (i.e. another thread on the host, which does the actual requests and is the only one blocked on I/O on the host) has done some work, we call again the request function ourselves (do_ubd_request). We now do that with ubd_io_lock held - that's useful to protect against concurrent calls to elv_next_request and so on. XXX: Maybe we shouldn't call at all the request function. Input needed on this. Are we supposed to plug and unplug the queue? That code "indirectly" does that by setting a flag, called do_ubd, which makes the request function return (it's a residual of 2.4 block layer interface). Meanwhile, however, merge this patch, which improves things. Cc: Jens Axboe <axboe@suse.de> Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Cc: Jeff Dike <jdike@addtoit.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/um/drivers/ubd_kern.c')
-rw-r--r--arch/um/drivers/ubd_kern.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index a3061ae39b3b..6cd8988e8fd0 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -106,6 +106,8 @@ static inline void ubd_set_bit(__u64 bit, unsigned char *data)
106 106
107#define DRIVER_NAME "uml-blkdev" 107#define DRIVER_NAME "uml-blkdev"
108 108
109/* Can be taken in interrupt context, and is passed to the block layer to lock
110 * the request queue. Kernel side code knows that. */
109static DEFINE_SPINLOCK(ubd_io_lock); 111static DEFINE_SPINLOCK(ubd_io_lock);
110 112
111static DEFINE_MUTEX(ubd_lock); 113static DEFINE_MUTEX(ubd_lock);
@@ -497,6 +499,8 @@ static void __ubd_finish(struct request *req, int error)
497 end_request(req, 1); 499 end_request(req, 1);
498} 500}
499 501
502/* Callable only from interrupt context - otherwise you need to do
503 * spin_lock_irq()/spin_lock_irqsave() */
500static inline void ubd_finish(struct request *req, int error) 504static inline void ubd_finish(struct request *req, int error)
501{ 505{
502 spin_lock(&ubd_io_lock); 506 spin_lock(&ubd_io_lock);
@@ -504,7 +508,7 @@ static inline void ubd_finish(struct request *req, int error)
504 spin_unlock(&ubd_io_lock); 508 spin_unlock(&ubd_io_lock);
505} 509}
506 510
507/* Called without ubd_io_lock held */ 511/* Called without ubd_io_lock held, and only in interrupt context. */
508static void ubd_handler(void) 512static void ubd_handler(void)
509{ 513{
510 struct io_thread_req req; 514 struct io_thread_req req;
@@ -525,7 +529,9 @@ static void ubd_handler(void)
525 529
526 ubd_finish(rq, req.error); 530 ubd_finish(rq, req.error);
527 reactivate_fd(thread_fd, UBD_IRQ); 531 reactivate_fd(thread_fd, UBD_IRQ);
532 spin_lock(&ubd_io_lock);
528 do_ubd_request(ubd_queue); 533 do_ubd_request(ubd_queue);
534 spin_unlock(&ubd_io_lock);
529} 535}
530 536
531static irqreturn_t ubd_intr(int irq, void *dev) 537static irqreturn_t ubd_intr(int irq, void *dev)