diff options
author | Miklos Szeredi <miklos@szeredi.hu> | 2006-06-25 08:48:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-25 13:01:19 -0400 |
commit | a4d27e75ffb7b8ecb7eed0c7db0df975525f3fd7 (patch) | |
tree | 2353706a33196438547ed4651afd9f2d81dd96e8 /fs/fuse/dev.c | |
parent | f9a2842e5612b93fa20a624a8baa6c2a7ecea504 (diff) |
[PATCH] fuse: add request interruption
Add synchronous request interruption. This is needed for file locking
operations which have to be interruptible. However filesystem may implement
interruptibility of other operations (e.g. like NFS 'intr' mount option).
Signed-off-by: Miklos Szeredi <miklos@szeredi.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/fuse/dev.c')
-rw-r--r-- | fs/fuse/dev.c | 162 |
1 files changed, 135 insertions, 27 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 6b5f74cb7b54..1e2006caf158 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -34,6 +34,7 @@ static void fuse_request_init(struct fuse_req *req) | |||
34 | { | 34 | { |
35 | memset(req, 0, sizeof(*req)); | 35 | memset(req, 0, sizeof(*req)); |
36 | INIT_LIST_HEAD(&req->list); | 36 | INIT_LIST_HEAD(&req->list); |
37 | INIT_LIST_HEAD(&req->intr_entry); | ||
37 | init_waitqueue_head(&req->waitq); | 38 | init_waitqueue_head(&req->waitq); |
38 | atomic_set(&req->count, 1); | 39 | atomic_set(&req->count, 1); |
39 | } | 40 | } |
@@ -215,6 +216,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) | |||
215 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; | 216 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; |
216 | req->end = NULL; | 217 | req->end = NULL; |
217 | list_del(&req->list); | 218 | list_del(&req->list); |
219 | list_del(&req->intr_entry); | ||
218 | req->state = FUSE_REQ_FINISHED; | 220 | req->state = FUSE_REQ_FINISHED; |
219 | if (req->background) { | 221 | if (req->background) { |
220 | if (fc->num_background == FUSE_MAX_BACKGROUND) { | 222 | if (fc->num_background == FUSE_MAX_BACKGROUND) { |
@@ -235,28 +237,63 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) | |||
235 | fuse_put_request(fc, req); | 237 | fuse_put_request(fc, req); |
236 | } | 238 | } |
237 | 239 | ||
240 | static void wait_answer_interruptible(struct fuse_conn *fc, | ||
241 | struct fuse_req *req) | ||
242 | { | ||
243 | if (signal_pending(current)) | ||
244 | return; | ||
245 | |||
246 | spin_unlock(&fc->lock); | ||
247 | wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); | ||
248 | spin_lock(&fc->lock); | ||
249 | } | ||
250 | |||
251 | static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) | ||
252 | { | ||
253 | list_add_tail(&req->intr_entry, &fc->interrupts); | ||
254 | wake_up(&fc->waitq); | ||
255 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | ||
256 | } | ||
257 | |||
238 | /* Called with fc->lock held. Releases, and then reacquires it. */ | 258 | /* Called with fc->lock held. Releases, and then reacquires it. */ |
239 | static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) | 259 | static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) |
240 | { | 260 | { |
241 | sigset_t oldset; | 261 | if (!fc->no_interrupt) { |
262 | /* Any signal may interrupt this */ | ||
263 | wait_answer_interruptible(fc, req); | ||
242 | 264 | ||
243 | spin_unlock(&fc->lock); | 265 | if (req->aborted) |
244 | if (req->force) | 266 | goto aborted; |
267 | if (req->state == FUSE_REQ_FINISHED) | ||
268 | return; | ||
269 | |||
270 | req->interrupted = 1; | ||
271 | if (req->state == FUSE_REQ_SENT) | ||
272 | queue_interrupt(fc, req); | ||
273 | } | ||
274 | |||
275 | if (req->force) { | ||
276 | spin_unlock(&fc->lock); | ||
245 | wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); | 277 | wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); |
246 | else { | 278 | spin_lock(&fc->lock); |
279 | } else { | ||
280 | sigset_t oldset; | ||
281 | |||
282 | /* Only fatal signals may interrupt this */ | ||
247 | block_sigs(&oldset); | 283 | block_sigs(&oldset); |
248 | wait_event_interruptible(req->waitq, | 284 | wait_answer_interruptible(fc, req); |
249 | req->state == FUSE_REQ_FINISHED); | ||
250 | restore_sigs(&oldset); | 285 | restore_sigs(&oldset); |
251 | } | 286 | } |
252 | spin_lock(&fc->lock); | ||
253 | if (req->state == FUSE_REQ_FINISHED && !req->aborted) | ||
254 | return; | ||
255 | 287 | ||
256 | if (!req->aborted) { | 288 | if (req->aborted) |
257 | req->out.h.error = -EINTR; | 289 | goto aborted; |
258 | req->aborted = 1; | 290 | if (req->state == FUSE_REQ_FINISHED) |
259 | } | 291 | return; |
292 | |||
293 | req->out.h.error = -EINTR; | ||
294 | req->aborted = 1; | ||
295 | |||
296 | aborted: | ||
260 | if (req->locked) { | 297 | if (req->locked) { |
261 | /* This is uninterruptible sleep, because data is | 298 | /* This is uninterruptible sleep, because data is |
262 | being copied to/from the buffers of req. During | 299 | being copied to/from the buffers of req. During |
@@ -288,13 +325,19 @@ static unsigned len_args(unsigned numargs, struct fuse_arg *args) | |||
288 | return nbytes; | 325 | return nbytes; |
289 | } | 326 | } |
290 | 327 | ||
328 | static u64 fuse_get_unique(struct fuse_conn *fc) | ||
329 | { | ||
330 | fc->reqctr++; | ||
331 | /* zero is special */ | ||
332 | if (fc->reqctr == 0) | ||
333 | fc->reqctr = 1; | ||
334 | |||
335 | return fc->reqctr; | ||
336 | } | ||
337 | |||
291 | static void queue_request(struct fuse_conn *fc, struct fuse_req *req) | 338 | static void queue_request(struct fuse_conn *fc, struct fuse_req *req) |
292 | { | 339 | { |
293 | fc->reqctr++; | 340 | req->in.h.unique = fuse_get_unique(fc); |
294 | /* zero is special */ | ||
295 | if (fc->reqctr == 0) | ||
296 | fc->reqctr = 1; | ||
297 | req->in.h.unique = fc->reqctr; | ||
298 | req->in.h.len = sizeof(struct fuse_in_header) + | 341 | req->in.h.len = sizeof(struct fuse_in_header) + |
299 | len_args(req->in.numargs, (struct fuse_arg *) req->in.args); | 342 | len_args(req->in.numargs, (struct fuse_arg *) req->in.args); |
300 | list_add_tail(&req->list, &fc->pending); | 343 | list_add_tail(&req->list, &fc->pending); |
@@ -307,9 +350,6 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req) | |||
307 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | 350 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
308 | } | 351 | } |
309 | 352 | ||
310 | /* | ||
311 | * This can only be interrupted by a SIGKILL | ||
312 | */ | ||
313 | void request_send(struct fuse_conn *fc, struct fuse_req *req) | 353 | void request_send(struct fuse_conn *fc, struct fuse_req *req) |
314 | { | 354 | { |
315 | req->isreply = 1; | 355 | req->isreply = 1; |
@@ -566,13 +606,18 @@ static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, | |||
566 | return err; | 606 | return err; |
567 | } | 607 | } |
568 | 608 | ||
609 | static int request_pending(struct fuse_conn *fc) | ||
610 | { | ||
611 | return !list_empty(&fc->pending) || !list_empty(&fc->interrupts); | ||
612 | } | ||
613 | |||
569 | /* Wait until a request is available on the pending list */ | 614 | /* Wait until a request is available on the pending list */ |
570 | static void request_wait(struct fuse_conn *fc) | 615 | static void request_wait(struct fuse_conn *fc) |
571 | { | 616 | { |
572 | DECLARE_WAITQUEUE(wait, current); | 617 | DECLARE_WAITQUEUE(wait, current); |
573 | 618 | ||
574 | add_wait_queue_exclusive(&fc->waitq, &wait); | 619 | add_wait_queue_exclusive(&fc->waitq, &wait); |
575 | while (fc->connected && list_empty(&fc->pending)) { | 620 | while (fc->connected && !request_pending(fc)) { |
576 | set_current_state(TASK_INTERRUPTIBLE); | 621 | set_current_state(TASK_INTERRUPTIBLE); |
577 | if (signal_pending(current)) | 622 | if (signal_pending(current)) |
578 | break; | 623 | break; |
@@ -586,6 +631,45 @@ static void request_wait(struct fuse_conn *fc) | |||
586 | } | 631 | } |
587 | 632 | ||
588 | /* | 633 | /* |
634 | * Transfer an interrupt request to userspace | ||
635 | * | ||
636 | * Unlike other requests this is assembled on demand, without a need | ||
637 | * to allocate a separate fuse_req structure. | ||
638 | * | ||
639 | * Called with fc->lock held, releases it | ||
640 | */ | ||
641 | static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req, | ||
642 | const struct iovec *iov, unsigned long nr_segs) | ||
643 | { | ||
644 | struct fuse_copy_state cs; | ||
645 | struct fuse_in_header ih; | ||
646 | struct fuse_interrupt_in arg; | ||
647 | unsigned reqsize = sizeof(ih) + sizeof(arg); | ||
648 | int err; | ||
649 | |||
650 | list_del_init(&req->intr_entry); | ||
651 | req->intr_unique = fuse_get_unique(fc); | ||
652 | memset(&ih, 0, sizeof(ih)); | ||
653 | memset(&arg, 0, sizeof(arg)); | ||
654 | ih.len = reqsize; | ||
655 | ih.opcode = FUSE_INTERRUPT; | ||
656 | ih.unique = req->intr_unique; | ||
657 | arg.unique = req->in.h.unique; | ||
658 | |||
659 | spin_unlock(&fc->lock); | ||
660 | if (iov_length(iov, nr_segs) < reqsize) | ||
661 | return -EINVAL; | ||
662 | |||
663 | fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs); | ||
664 | err = fuse_copy_one(&cs, &ih, sizeof(ih)); | ||
665 | if (!err) | ||
666 | err = fuse_copy_one(&cs, &arg, sizeof(arg)); | ||
667 | fuse_copy_finish(&cs); | ||
668 | |||
669 | return err ? err : reqsize; | ||
670 | } | ||
671 | |||
672 | /* | ||
589 | * Read a single request into the userspace filesystem's buffer. This | 673 | * Read a single request into the userspace filesystem's buffer. This |
590 | * function waits until a request is available, then removes it from | 674 | * function waits until a request is available, then removes it from |
591 | * the pending list and copies request data to userspace buffer. If | 675 | * the pending list and copies request data to userspace buffer. If |
@@ -610,7 +694,7 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, | |||
610 | spin_lock(&fc->lock); | 694 | spin_lock(&fc->lock); |
611 | err = -EAGAIN; | 695 | err = -EAGAIN; |
612 | if ((file->f_flags & O_NONBLOCK) && fc->connected && | 696 | if ((file->f_flags & O_NONBLOCK) && fc->connected && |
613 | list_empty(&fc->pending)) | 697 | !request_pending(fc)) |
614 | goto err_unlock; | 698 | goto err_unlock; |
615 | 699 | ||
616 | request_wait(fc); | 700 | request_wait(fc); |
@@ -618,9 +702,15 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, | |||
618 | if (!fc->connected) | 702 | if (!fc->connected) |
619 | goto err_unlock; | 703 | goto err_unlock; |
620 | err = -ERESTARTSYS; | 704 | err = -ERESTARTSYS; |
621 | if (list_empty(&fc->pending)) | 705 | if (!request_pending(fc)) |
622 | goto err_unlock; | 706 | goto err_unlock; |
623 | 707 | ||
708 | if (!list_empty(&fc->interrupts)) { | ||
709 | req = list_entry(fc->interrupts.next, struct fuse_req, | ||
710 | intr_entry); | ||
711 | return fuse_read_interrupt(fc, req, iov, nr_segs); | ||
712 | } | ||
713 | |||
624 | req = list_entry(fc->pending.next, struct fuse_req, list); | 714 | req = list_entry(fc->pending.next, struct fuse_req, list); |
625 | req->state = FUSE_REQ_READING; | 715 | req->state = FUSE_REQ_READING; |
626 | list_move(&req->list, &fc->io); | 716 | list_move(&req->list, &fc->io); |
@@ -658,6 +748,8 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, | |||
658 | else { | 748 | else { |
659 | req->state = FUSE_REQ_SENT; | 749 | req->state = FUSE_REQ_SENT; |
660 | list_move_tail(&req->list, &fc->processing); | 750 | list_move_tail(&req->list, &fc->processing); |
751 | if (req->interrupted) | ||
752 | queue_interrupt(fc, req); | ||
661 | spin_unlock(&fc->lock); | 753 | spin_unlock(&fc->lock); |
662 | } | 754 | } |
663 | return reqsize; | 755 | return reqsize; |
@@ -684,7 +776,7 @@ static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique) | |||
684 | list_for_each(entry, &fc->processing) { | 776 | list_for_each(entry, &fc->processing) { |
685 | struct fuse_req *req; | 777 | struct fuse_req *req; |
686 | req = list_entry(entry, struct fuse_req, list); | 778 | req = list_entry(entry, struct fuse_req, list); |
687 | if (req->in.h.unique == unique) | 779 | if (req->in.h.unique == unique || req->intr_unique == unique) |
688 | return req; | 780 | return req; |
689 | } | 781 | } |
690 | return NULL; | 782 | return NULL; |
@@ -750,7 +842,6 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, | |||
750 | goto err_unlock; | 842 | goto err_unlock; |
751 | 843 | ||
752 | req = request_find(fc, oh.unique); | 844 | req = request_find(fc, oh.unique); |
753 | err = -EINVAL; | ||
754 | if (!req) | 845 | if (!req) |
755 | goto err_unlock; | 846 | goto err_unlock; |
756 | 847 | ||
@@ -761,6 +852,23 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, | |||
761 | request_end(fc, req); | 852 | request_end(fc, req); |
762 | return -ENOENT; | 853 | return -ENOENT; |
763 | } | 854 | } |
855 | /* Is it an interrupt reply? */ | ||
856 | if (req->intr_unique == oh.unique) { | ||
857 | err = -EINVAL; | ||
858 | if (nbytes != sizeof(struct fuse_out_header)) | ||
859 | goto err_unlock; | ||
860 | |||
861 | if (oh.error == -ENOSYS) | ||
862 | fc->no_interrupt = 1; | ||
863 | else if (oh.error == -EAGAIN) | ||
864 | queue_interrupt(fc, req); | ||
865 | |||
866 | spin_unlock(&fc->lock); | ||
867 | fuse_copy_finish(&cs); | ||
868 | return nbytes; | ||
869 | } | ||
870 | |||
871 | req->state = FUSE_REQ_WRITING; | ||
764 | list_move(&req->list, &fc->io); | 872 | list_move(&req->list, &fc->io); |
765 | req->out.h = oh; | 873 | req->out.h = oh; |
766 | req->locked = 1; | 874 | req->locked = 1; |
@@ -809,7 +917,7 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait) | |||
809 | spin_lock(&fc->lock); | 917 | spin_lock(&fc->lock); |
810 | if (!fc->connected) | 918 | if (!fc->connected) |
811 | mask = POLLERR; | 919 | mask = POLLERR; |
812 | else if (!list_empty(&fc->pending)) | 920 | else if (request_pending(fc)) |
813 | mask |= POLLIN | POLLRDNORM; | 921 | mask |= POLLIN | POLLRDNORM; |
814 | spin_unlock(&fc->lock); | 922 | spin_unlock(&fc->lock); |
815 | 923 | ||