aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse/dev.c
diff options
context:
space:
mode:
authorMiklos Szeredi <miklos@szeredi.hu>2006-04-11 01:54:58 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-04-11 09:18:49 -0400
commitce1d5a491f0ee50560416a73faa5e4ddbab074bd (patch)
tree21f91d983b467ad05df0213f54fe00aad84e5761 /fs/fuse/dev.c
parenta87046d822f2d982d25b24c4a644d34f22d4888a (diff)
[PATCH] fuse: clean up request accounting
FUSE allocated most requests from a fixed size pool filled at mount time. However in some cases (release/forget) non-pool requests were used. File locking operations aren't well served by the request pool, since they may block indefinetly thus exhausting the pool. This patch removes the request pool and always allocates requests on demand. Signed-off-by: Miklos Szeredi <miklos@szeredi.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/fuse/dev.c')
-rw-r--r--fs/fuse/dev.c73
1 files changed, 9 insertions, 64 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 6b8843d4ad8c..4dc104c0e95d 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -72,10 +72,8 @@ static void restore_sigs(sigset_t *oldset)
72 */ 72 */
73void fuse_reset_request(struct fuse_req *req) 73void fuse_reset_request(struct fuse_req *req)
74{ 74{
75 int preallocated = req->preallocated;
76 BUG_ON(atomic_read(&req->count) != 1); 75 BUG_ON(atomic_read(&req->count) != 1);
77 fuse_request_init(req); 76 fuse_request_init(req);
78 req->preallocated = preallocated;
79} 77}
80 78
81static void __fuse_get_request(struct fuse_req *req) 79static void __fuse_get_request(struct fuse_req *req)
@@ -90,71 +88,28 @@ static void __fuse_put_request(struct fuse_req *req)
90 atomic_dec(&req->count); 88 atomic_dec(&req->count);
91} 89}
92 90
93static struct fuse_req *do_get_request(struct fuse_conn *fc) 91struct fuse_req *fuse_get_req(struct fuse_conn *fc)
94{ 92{
95 struct fuse_req *req; 93 struct fuse_req *req = fuse_request_alloc();
94 if (!req)
95 return ERR_PTR(-ENOMEM);
96 96
97 spin_lock(&fc->lock); 97 atomic_inc(&fc->num_waiting);
98 BUG_ON(list_empty(&fc->unused_list));
99 req = list_entry(fc->unused_list.next, struct fuse_req, list);
100 list_del_init(&req->list);
101 spin_unlock(&fc->lock);
102 fuse_request_init(req); 98 fuse_request_init(req);
103 req->preallocated = 1;
104 req->in.h.uid = current->fsuid; 99 req->in.h.uid = current->fsuid;
105 req->in.h.gid = current->fsgid; 100 req->in.h.gid = current->fsgid;
106 req->in.h.pid = current->pid; 101 req->in.h.pid = current->pid;
107 return req; 102 return req;
108} 103}
109 104
110/* This can return NULL, but only in case it's interrupted by a SIGKILL */
111struct fuse_req *fuse_get_request(struct fuse_conn *fc)
112{
113 int intr;
114 sigset_t oldset;
115
116 atomic_inc(&fc->num_waiting);
117 block_sigs(&oldset);
118 intr = down_interruptible(&fc->outstanding_sem);
119 restore_sigs(&oldset);
120 if (intr) {
121 atomic_dec(&fc->num_waiting);
122 return NULL;
123 }
124 return do_get_request(fc);
125}
126
127/* Must be called with fc->lock held */
128static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
129{
130 if (req->preallocated) {
131 atomic_dec(&fc->num_waiting);
132 list_add(&req->list, &fc->unused_list);
133 } else
134 fuse_request_free(req);
135
136 /* If we are in debt decrease that first */
137 if (fc->outstanding_debt)
138 fc->outstanding_debt--;
139 else
140 up(&fc->outstanding_sem);
141}
142
143void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) 105void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
144{ 106{
145 if (atomic_dec_and_test(&req->count)) { 107 if (atomic_dec_and_test(&req->count)) {
146 spin_lock(&fc->lock); 108 atomic_dec(&fc->num_waiting);
147 fuse_putback_request(fc, req); 109 fuse_request_free(req);
148 spin_unlock(&fc->lock);
149 } 110 }
150} 111}
151 112
152static void fuse_put_request_locked(struct fuse_conn *fc, struct fuse_req *req)
153{
154 if (atomic_dec_and_test(&req->count))
155 fuse_putback_request(fc, req);
156}
157
158void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req) 113void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req)
159{ 114{
160 iput(req->inode); 115 iput(req->inode);
@@ -189,9 +144,9 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
189 list_del(&req->list); 144 list_del(&req->list);
190 req->state = FUSE_REQ_FINISHED; 145 req->state = FUSE_REQ_FINISHED;
191 if (!req->background) { 146 if (!req->background) {
192 wake_up(&req->waitq);
193 fuse_put_request_locked(fc, req);
194 spin_unlock(&fc->lock); 147 spin_unlock(&fc->lock);
148 wake_up(&req->waitq);
149 fuse_put_request(fc, req);
195 } else { 150 } else {
196 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; 151 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
197 req->end = NULL; 152 req->end = NULL;
@@ -302,16 +257,6 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
302 req->in.h.unique = fc->reqctr; 257 req->in.h.unique = fc->reqctr;
303 req->in.h.len = sizeof(struct fuse_in_header) + 258 req->in.h.len = sizeof(struct fuse_in_header) +
304 len_args(req->in.numargs, (struct fuse_arg *) req->in.args); 259 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
305 if (!req->preallocated) {
306 /* If request is not preallocated (either FORGET or
307 RELEASE), then still decrease outstanding_sem, so
308 user can't open infinite number of files while not
309 processing the RELEASE requests. However for
310 efficiency do it without blocking, so if down()
311 would block, just increase the debt instead */
312 if (down_trylock(&fc->outstanding_sem))
313 fc->outstanding_debt++;
314 }
315 list_add_tail(&req->list, &fc->pending); 260 list_add_tail(&req->list, &fc->pending);
316 req->state = FUSE_REQ_PENDING; 261 req->state = FUSE_REQ_PENDING;
317 wake_up(&fc->waitq); 262 wake_up(&fc->waitq);