summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2018-06-18 10:53:19 -0400
committerMiklos Szeredi <mszeredi@redhat.com>2019-09-12 08:59:41 -0400
commitae3aad77f46fbba56eff7141b2fc49870b60827e (patch)
tree066161473fe29b6b0c37b94cf351df23744479cf
parent0cc2656cdb0b1f234e6d29378cb061e29d7522bc (diff)
fuse: add fuse_iqueue_ops callbacks
The /dev/fuse device uses fiq->waitq and fasync to signal that requests are available. These mechanisms do not apply to virtio-fs. This patch introduces callbacks so alternative behavior can be used. Note that queue_interrupt() changes along these lines: spin_lock(&fiq->waitq.lock); wake_up_locked(&fiq->waitq); + kill_fasync(&fiq->fasync, SIGIO, POLL_IN); spin_unlock(&fiq->waitq.lock); - kill_fasync(&fiq->fasync, SIGIO, POLL_IN); Since queue_request() and queue_forget() also call kill_fasync() inside the spinlock this should be safe. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
-rw-r--r--fs/fuse/cuse.c2
-rw-r--r--fs/fuse/dev.c46
-rw-r--r--fs/fuse/fuse_i.h42
-rw-r--r--fs/fuse/inode.c13
4 files changed, 81 insertions, 22 deletions
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 2332e7f960a8..6a0de0ce4403 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -506,7 +506,7 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
506 * Limit the cuse channel to requests that can 506 * Limit the cuse channel to requests that can
507 * be represented in file->f_cred->user_ns. 507 * be represented in file->f_cred->user_ns.
508 */ 508 */
509 fuse_conn_init(&cc->fc, file->f_cred->user_ns); 509 fuse_conn_init(&cc->fc, file->f_cred->user_ns, &fuse_dev_fiq_ops, NULL);
510 510
511 fud = fuse_dev_alloc(&cc->fc); 511 fud = fuse_dev_alloc(&cc->fc);
512 if (!fud) { 512 if (!fud) {
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 943bc5cf941a..358a01435058 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -201,14 +201,33 @@ static unsigned int fuse_req_hash(u64 unique)
201 return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS); 201 return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
202} 202}
203 203
204static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req) 204/**
205 * A new request is available, wake fiq->waitq
206 */
207static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq)
208__releases(fiq->lock)
209{
210 wake_up(&fiq->waitq);
211 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
212 spin_unlock(&fiq->lock);
213}
214
215const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
216 .wake_forget_and_unlock = fuse_dev_wake_and_unlock,
217 .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock,
218 .wake_pending_and_unlock = fuse_dev_wake_and_unlock,
219};
220EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
221
222static void queue_request_and_unlock(struct fuse_iqueue *fiq,
223 struct fuse_req *req)
224__releases(fiq->lock)
205{ 225{
206 req->in.h.len = sizeof(struct fuse_in_header) + 226 req->in.h.len = sizeof(struct fuse_in_header) +
207 fuse_len_args(req->args->in_numargs, 227 fuse_len_args(req->args->in_numargs,
208 (struct fuse_arg *) req->args->in_args); 228 (struct fuse_arg *) req->args->in_args);
209 list_add_tail(&req->list, &fiq->pending); 229 list_add_tail(&req->list, &fiq->pending);
210 wake_up(&fiq->waitq); 230 fiq->ops->wake_pending_and_unlock(fiq);
211 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
212} 231}
213 232
214void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, 233void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
@@ -223,12 +242,11 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
223 if (fiq->connected) { 242 if (fiq->connected) {
224 fiq->forget_list_tail->next = forget; 243 fiq->forget_list_tail->next = forget;
225 fiq->forget_list_tail = forget; 244 fiq->forget_list_tail = forget;
226 wake_up(&fiq->waitq); 245 fiq->ops->wake_forget_and_unlock(fiq);
227 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
228 } else { 246 } else {
229 kfree(forget); 247 kfree(forget);
248 spin_unlock(&fiq->lock);
230 } 249 }
231 spin_unlock(&fiq->lock);
232} 250}
233 251
234static void flush_bg_queue(struct fuse_conn *fc) 252static void flush_bg_queue(struct fuse_conn *fc)
@@ -244,8 +262,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
244 fc->active_background++; 262 fc->active_background++;
245 spin_lock(&fiq->lock); 263 spin_lock(&fiq->lock);
246 req->in.h.unique = fuse_get_unique(fiq); 264 req->in.h.unique = fuse_get_unique(fiq);
247 queue_request(fiq, req); 265 queue_request_and_unlock(fiq, req);
248 spin_unlock(&fiq->lock);
249 } 266 }
250} 267}
251 268
@@ -334,10 +351,10 @@ static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
334 spin_unlock(&fiq->lock); 351 spin_unlock(&fiq->lock);
335 return 0; 352 return 0;
336 } 353 }
337 wake_up(&fiq->waitq); 354 fiq->ops->wake_interrupt_and_unlock(fiq);
338 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); 355 } else {
356 spin_unlock(&fiq->lock);
339 } 357 }
340 spin_unlock(&fiq->lock);
341 return 0; 358 return 0;
342} 359}
343 360
@@ -397,11 +414,10 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
397 req->out.h.error = -ENOTCONN; 414 req->out.h.error = -ENOTCONN;
398 } else { 415 } else {
399 req->in.h.unique = fuse_get_unique(fiq); 416 req->in.h.unique = fuse_get_unique(fiq);
400 queue_request(fiq, req);
401 /* acquire extra reference, since request is still needed 417 /* acquire extra reference, since request is still needed
402 after fuse_request_end() */ 418 after fuse_request_end() */
403 __fuse_get_request(req); 419 __fuse_get_request(req);
404 spin_unlock(&fiq->lock); 420 queue_request_and_unlock(fiq, req);
405 421
406 request_wait_answer(fc, req); 422 request_wait_answer(fc, req);
407 /* Pairs with smp_wmb() in fuse_request_end() */ 423 /* Pairs with smp_wmb() in fuse_request_end() */
@@ -570,14 +586,12 @@ static int fuse_simple_notify_reply(struct fuse_conn *fc,
570 586
571 spin_lock(&fiq->lock); 587 spin_lock(&fiq->lock);
572 if (fiq->connected) { 588 if (fiq->connected) {
573 queue_request(fiq, req); 589 queue_request_and_unlock(fiq, req);
574 spin_unlock(&fiq->lock);
575 } else { 590 } else {
576 err = -ENODEV; 591 err = -ENODEV;
577 spin_unlock(&fiq->lock); 592 spin_unlock(&fiq->lock);
578 fuse_put_request(fc, req); 593 fuse_put_request(fc, req);
579 } 594 }
580 spin_unlock(&fiq->lock);
581 595
582 return err; 596 return err;
583} 597}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 1902148281cc..8c13865955d4 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -355,6 +355,39 @@ struct fuse_req {
355 355
356}; 356};
357 357
358struct fuse_iqueue;
359
360/**
361 * Input queue callbacks
362 *
363 * Input queue signalling is device-specific. For example, the /dev/fuse file
364 * uses fiq->waitq and fasync to wake processes that are waiting on queue
365 * readiness. These callbacks allow other device types to respond to input
366 * queue activity.
367 */
368struct fuse_iqueue_ops {
369 /**
370 * Signal that a forget has been queued
371 */
372 void (*wake_forget_and_unlock)(struct fuse_iqueue *fiq)
373 __releases(fiq->lock);
374
375 /**
376 * Signal that an INTERRUPT request has been queued
377 */
378 void (*wake_interrupt_and_unlock)(struct fuse_iqueue *fiq)
379 __releases(fiq->lock);
380
381 /**
382 * Signal that a request has been queued
383 */
384 void (*wake_pending_and_unlock)(struct fuse_iqueue *fiq)
385 __releases(fiq->lock);
386};
387
388/** /dev/fuse input queue operations */
389extern const struct fuse_iqueue_ops fuse_dev_fiq_ops;
390
358struct fuse_iqueue { 391struct fuse_iqueue {
359 /** Connection established */ 392 /** Connection established */
360 unsigned connected; 393 unsigned connected;
@@ -383,6 +416,12 @@ struct fuse_iqueue {
383 416
384 /** O_ASYNC requests */ 417 /** O_ASYNC requests */
385 struct fasync_struct *fasync; 418 struct fasync_struct *fasync;
419
420 /** Device-specific callbacks */
421 const struct fuse_iqueue_ops *ops;
422
423 /** Device-specific state */
424 void *priv;
386}; 425};
387 426
388#define FUSE_PQ_HASH_BITS 8 427#define FUSE_PQ_HASH_BITS 8
@@ -882,7 +921,8 @@ struct fuse_conn *fuse_conn_get(struct fuse_conn *fc);
882/** 921/**
883 * Initialize fuse_conn 922 * Initialize fuse_conn
884 */ 923 */
885void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns); 924void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns,
925 const struct fuse_iqueue_ops *fiq_ops, void *fiq_priv);
886 926
887/** 927/**
888 * Release reference to fuse_conn 928 * Release reference to fuse_conn
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 30d92e633ece..734fdd597c3e 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -568,7 +568,9 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
568 return 0; 568 return 0;
569} 569}
570 570
571static void fuse_iqueue_init(struct fuse_iqueue *fiq) 571static void fuse_iqueue_init(struct fuse_iqueue *fiq,
572 const struct fuse_iqueue_ops *ops,
573 void *priv)
572{ 574{
573 memset(fiq, 0, sizeof(struct fuse_iqueue)); 575 memset(fiq, 0, sizeof(struct fuse_iqueue));
574 spin_lock_init(&fiq->lock); 576 spin_lock_init(&fiq->lock);
@@ -577,6 +579,8 @@ static void fuse_iqueue_init(struct fuse_iqueue *fiq)
577 INIT_LIST_HEAD(&fiq->interrupts); 579 INIT_LIST_HEAD(&fiq->interrupts);
578 fiq->forget_list_tail = &fiq->forget_list_head; 580 fiq->forget_list_tail = &fiq->forget_list_head;
579 fiq->connected = 1; 581 fiq->connected = 1;
582 fiq->ops = ops;
583 fiq->priv = priv;
580} 584}
581 585
582static void fuse_pqueue_init(struct fuse_pqueue *fpq) 586static void fuse_pqueue_init(struct fuse_pqueue *fpq)
@@ -590,7 +594,8 @@ static void fuse_pqueue_init(struct fuse_pqueue *fpq)
590 fpq->connected = 1; 594 fpq->connected = 1;
591} 595}
592 596
593void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns) 597void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns,
598 const struct fuse_iqueue_ops *fiq_ops, void *fiq_priv)
594{ 599{
595 memset(fc, 0, sizeof(*fc)); 600 memset(fc, 0, sizeof(*fc));
596 spin_lock_init(&fc->lock); 601 spin_lock_init(&fc->lock);
@@ -599,7 +604,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns)
599 refcount_set(&fc->count, 1); 604 refcount_set(&fc->count, 1);
600 atomic_set(&fc->dev_count, 1); 605 atomic_set(&fc->dev_count, 1);
601 init_waitqueue_head(&fc->blocked_waitq); 606 init_waitqueue_head(&fc->blocked_waitq);
602 fuse_iqueue_init(&fc->iq); 607 fuse_iqueue_init(&fc->iq, fiq_ops, fiq_priv);
603 INIT_LIST_HEAD(&fc->bg_queue); 608 INIT_LIST_HEAD(&fc->bg_queue);
604 INIT_LIST_HEAD(&fc->entry); 609 INIT_LIST_HEAD(&fc->entry);
605 INIT_LIST_HEAD(&fc->devices); 610 INIT_LIST_HEAD(&fc->devices);
@@ -1209,7 +1214,7 @@ static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc)
1209 if (!fc) 1214 if (!fc)
1210 goto err_fput; 1215 goto err_fput;
1211 1216
1212 fuse_conn_init(fc, sb->s_user_ns); 1217 fuse_conn_init(fc, sb->s_user_ns, &fuse_dev_fiq_ops, NULL);
1213 fc->release = fuse_free_conn; 1218 fc->release = fuse_free_conn;
1214 sb->s_fs_info = fc; 1219 sb->s_fs_info = fc;
1215 1220