diff options
author | Stanislav Kinsbursky <skinsbursky@parallels.com> | 2011-12-26 07:43:32 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2012-01-31 18:20:24 -0500 |
commit | ba9e097593f371ebd102580a0c5b1b2cf55636a0 (patch) | |
tree | 737cda4281e4d77738149e8cd481d7414ccaf160 /net | |
parent | 766347bec3490111e1c4482af7c7394868c2aed1 (diff) |
SUNRPC: split SUNPRC PipeFS pipe data and inode creation
Generally, pipe data is used only for pipes, and thus allocating space for it
on every RPC inode allocation is redundant. This patch splits private SUNRPC
PipeFS pipe data and inode, makes pipe data allocated only for pipe inodes.
This patch is also is a next step towards to to removing PipeFS inode
references from kernel code other than PipeFS itself.
Signed-off-by: Stanislav Kinsbursky <skinsbursky@parallels.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/auth_gss/auth_gss.c | 46 | ||||
-rw-r--r-- | net/sunrpc/rpc_pipe.c | 208 |
2 files changed, 135 insertions, 119 deletions
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index a0844f92a447..e933484e55ef 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -112,7 +112,7 @@ gss_put_ctx(struct gss_cl_ctx *ctx) | |||
112 | /* gss_cred_set_ctx: | 112 | /* gss_cred_set_ctx: |
113 | * called by gss_upcall_callback and gss_create_upcall in order | 113 | * called by gss_upcall_callback and gss_create_upcall in order |
114 | * to set the gss context. The actual exchange of an old context | 114 | * to set the gss context. The actual exchange of an old context |
115 | * and a new one is protected by the rpci->lock. | 115 | * and a new one is protected by the rpci->pipe->lock. |
116 | */ | 116 | */ |
117 | static void | 117 | static void |
118 | gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) | 118 | gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) |
@@ -297,7 +297,7 @@ static struct gss_upcall_msg * | |||
297 | __gss_find_upcall(struct rpc_inode *rpci, uid_t uid) | 297 | __gss_find_upcall(struct rpc_inode *rpci, uid_t uid) |
298 | { | 298 | { |
299 | struct gss_upcall_msg *pos; | 299 | struct gss_upcall_msg *pos; |
300 | list_for_each_entry(pos, &rpci->in_downcall, list) { | 300 | list_for_each_entry(pos, &rpci->pipe->in_downcall, list) { |
301 | if (pos->uid != uid) | 301 | if (pos->uid != uid) |
302 | continue; | 302 | continue; |
303 | atomic_inc(&pos->count); | 303 | atomic_inc(&pos->count); |
@@ -318,14 +318,14 @@ gss_add_msg(struct gss_upcall_msg *gss_msg) | |||
318 | struct rpc_inode *rpci = gss_msg->inode; | 318 | struct rpc_inode *rpci = gss_msg->inode; |
319 | struct gss_upcall_msg *old; | 319 | struct gss_upcall_msg *old; |
320 | 320 | ||
321 | spin_lock(&rpci->lock); | 321 | spin_lock(&rpci->pipe->lock); |
322 | old = __gss_find_upcall(rpci, gss_msg->uid); | 322 | old = __gss_find_upcall(rpci, gss_msg->uid); |
323 | if (old == NULL) { | 323 | if (old == NULL) { |
324 | atomic_inc(&gss_msg->count); | 324 | atomic_inc(&gss_msg->count); |
325 | list_add(&gss_msg->list, &rpci->in_downcall); | 325 | list_add(&gss_msg->list, &rpci->pipe->in_downcall); |
326 | } else | 326 | } else |
327 | gss_msg = old; | 327 | gss_msg = old; |
328 | spin_unlock(&rpci->lock); | 328 | spin_unlock(&rpci->pipe->lock); |
329 | return gss_msg; | 329 | return gss_msg; |
330 | } | 330 | } |
331 | 331 | ||
@@ -345,10 +345,10 @@ gss_unhash_msg(struct gss_upcall_msg *gss_msg) | |||
345 | 345 | ||
346 | if (list_empty(&gss_msg->list)) | 346 | if (list_empty(&gss_msg->list)) |
347 | return; | 347 | return; |
348 | spin_lock(&rpci->lock); | 348 | spin_lock(&rpci->pipe->lock); |
349 | if (!list_empty(&gss_msg->list)) | 349 | if (!list_empty(&gss_msg->list)) |
350 | __gss_unhash_msg(gss_msg); | 350 | __gss_unhash_msg(gss_msg); |
351 | spin_unlock(&rpci->lock); | 351 | spin_unlock(&rpci->pipe->lock); |
352 | } | 352 | } |
353 | 353 | ||
354 | static void | 354 | static void |
@@ -377,9 +377,9 @@ gss_upcall_callback(struct rpc_task *task) | |||
377 | struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; | 377 | struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; |
378 | struct rpc_inode *rpci = gss_msg->inode; | 378 | struct rpc_inode *rpci = gss_msg->inode; |
379 | 379 | ||
380 | spin_lock(&rpci->lock); | 380 | spin_lock(&rpci->pipe->lock); |
381 | gss_handle_downcall_result(gss_cred, gss_msg); | 381 | gss_handle_downcall_result(gss_cred, gss_msg); |
382 | spin_unlock(&rpci->lock); | 382 | spin_unlock(&rpci->pipe->lock); |
383 | task->tk_status = gss_msg->msg.errno; | 383 | task->tk_status = gss_msg->msg.errno; |
384 | gss_release_msg(gss_msg); | 384 | gss_release_msg(gss_msg); |
385 | } | 385 | } |
@@ -524,7 +524,7 @@ gss_refresh_upcall(struct rpc_task *task) | |||
524 | goto out; | 524 | goto out; |
525 | } | 525 | } |
526 | rpci = gss_msg->inode; | 526 | rpci = gss_msg->inode; |
527 | spin_lock(&rpci->lock); | 527 | spin_lock(&rpci->pipe->lock); |
528 | if (gss_cred->gc_upcall != NULL) | 528 | if (gss_cred->gc_upcall != NULL) |
529 | rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); | 529 | rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); |
530 | else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { | 530 | else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { |
@@ -537,7 +537,7 @@ gss_refresh_upcall(struct rpc_task *task) | |||
537 | gss_handle_downcall_result(gss_cred, gss_msg); | 537 | gss_handle_downcall_result(gss_cred, gss_msg); |
538 | err = gss_msg->msg.errno; | 538 | err = gss_msg->msg.errno; |
539 | } | 539 | } |
540 | spin_unlock(&rpci->lock); | 540 | spin_unlock(&rpci->pipe->lock); |
541 | gss_release_msg(gss_msg); | 541 | gss_release_msg(gss_msg); |
542 | out: | 542 | out: |
543 | dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n", | 543 | dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n", |
@@ -575,11 +575,11 @@ retry: | |||
575 | rpci = gss_msg->inode; | 575 | rpci = gss_msg->inode; |
576 | for (;;) { | 576 | for (;;) { |
577 | prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE); | 577 | prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE); |
578 | spin_lock(&rpci->lock); | 578 | spin_lock(&rpci->pipe->lock); |
579 | if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { | 579 | if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { |
580 | break; | 580 | break; |
581 | } | 581 | } |
582 | spin_unlock(&rpci->lock); | 582 | spin_unlock(&rpci->pipe->lock); |
583 | if (fatal_signal_pending(current)) { | 583 | if (fatal_signal_pending(current)) { |
584 | err = -ERESTARTSYS; | 584 | err = -ERESTARTSYS; |
585 | goto out_intr; | 585 | goto out_intr; |
@@ -590,7 +590,7 @@ retry: | |||
590 | gss_cred_set_ctx(cred, gss_msg->ctx); | 590 | gss_cred_set_ctx(cred, gss_msg->ctx); |
591 | else | 591 | else |
592 | err = gss_msg->msg.errno; | 592 | err = gss_msg->msg.errno; |
593 | spin_unlock(&rpci->lock); | 593 | spin_unlock(&rpci->pipe->lock); |
594 | out_intr: | 594 | out_intr: |
595 | finish_wait(&gss_msg->waitqueue, &wait); | 595 | finish_wait(&gss_msg->waitqueue, &wait); |
596 | gss_release_msg(gss_msg); | 596 | gss_release_msg(gss_msg); |
@@ -638,14 +638,14 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
638 | 638 | ||
639 | err = -ENOENT; | 639 | err = -ENOENT; |
640 | /* Find a matching upcall */ | 640 | /* Find a matching upcall */ |
641 | spin_lock(&rpci->lock); | 641 | spin_lock(&rpci->pipe->lock); |
642 | gss_msg = __gss_find_upcall(rpci, uid); | 642 | gss_msg = __gss_find_upcall(rpci, uid); |
643 | if (gss_msg == NULL) { | 643 | if (gss_msg == NULL) { |
644 | spin_unlock(&rpci->lock); | 644 | spin_unlock(&rpci->pipe->lock); |
645 | goto err_put_ctx; | 645 | goto err_put_ctx; |
646 | } | 646 | } |
647 | list_del_init(&gss_msg->list); | 647 | list_del_init(&gss_msg->list); |
648 | spin_unlock(&rpci->lock); | 648 | spin_unlock(&rpci->pipe->lock); |
649 | 649 | ||
650 | p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); | 650 | p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); |
651 | if (IS_ERR(p)) { | 651 | if (IS_ERR(p)) { |
@@ -673,9 +673,9 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
673 | err = mlen; | 673 | err = mlen; |
674 | 674 | ||
675 | err_release_msg: | 675 | err_release_msg: |
676 | spin_lock(&rpci->lock); | 676 | spin_lock(&rpci->pipe->lock); |
677 | __gss_unhash_msg(gss_msg); | 677 | __gss_unhash_msg(gss_msg); |
678 | spin_unlock(&rpci->lock); | 678 | spin_unlock(&rpci->pipe->lock); |
679 | gss_release_msg(gss_msg); | 679 | gss_release_msg(gss_msg); |
680 | err_put_ctx: | 680 | err_put_ctx: |
681 | gss_put_ctx(ctx); | 681 | gss_put_ctx(ctx); |
@@ -725,19 +725,19 @@ gss_pipe_release(struct inode *inode) | |||
725 | struct gss_upcall_msg *gss_msg; | 725 | struct gss_upcall_msg *gss_msg; |
726 | 726 | ||
727 | restart: | 727 | restart: |
728 | spin_lock(&rpci->lock); | 728 | spin_lock(&rpci->pipe->lock); |
729 | list_for_each_entry(gss_msg, &rpci->in_downcall, list) { | 729 | list_for_each_entry(gss_msg, &rpci->pipe->in_downcall, list) { |
730 | 730 | ||
731 | if (!list_empty(&gss_msg->msg.list)) | 731 | if (!list_empty(&gss_msg->msg.list)) |
732 | continue; | 732 | continue; |
733 | gss_msg->msg.errno = -EPIPE; | 733 | gss_msg->msg.errno = -EPIPE; |
734 | atomic_inc(&gss_msg->count); | 734 | atomic_inc(&gss_msg->count); |
735 | __gss_unhash_msg(gss_msg); | 735 | __gss_unhash_msg(gss_msg); |
736 | spin_unlock(&rpci->lock); | 736 | spin_unlock(&rpci->pipe->lock); |
737 | gss_release_msg(gss_msg); | 737 | gss_release_msg(gss_msg); |
738 | goto restart; | 738 | goto restart; |
739 | } | 739 | } |
740 | spin_unlock(&rpci->lock); | 740 | spin_unlock(&rpci->pipe->lock); |
741 | 741 | ||
742 | put_pipe_version(); | 742 | put_pipe_version(); |
743 | } | 743 | } |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 16d9b9a701a4..b6f6555128db 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -61,7 +61,7 @@ void rpc_pipefs_notifier_unregister(struct notifier_block *nb) | |||
61 | } | 61 | } |
62 | EXPORT_SYMBOL_GPL(rpc_pipefs_notifier_unregister); | 62 | EXPORT_SYMBOL_GPL(rpc_pipefs_notifier_unregister); |
63 | 63 | ||
64 | static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, | 64 | static void rpc_purge_list(struct rpc_pipe *pipe, struct list_head *head, |
65 | void (*destroy_msg)(struct rpc_pipe_msg *), int err) | 65 | void (*destroy_msg)(struct rpc_pipe_msg *), int err) |
66 | { | 66 | { |
67 | struct rpc_pipe_msg *msg; | 67 | struct rpc_pipe_msg *msg; |
@@ -74,29 +74,29 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, | |||
74 | msg->errno = err; | 74 | msg->errno = err; |
75 | destroy_msg(msg); | 75 | destroy_msg(msg); |
76 | } while (!list_empty(head)); | 76 | } while (!list_empty(head)); |
77 | wake_up(&rpci->waitq); | 77 | wake_up(&pipe->waitq); |
78 | } | 78 | } |
79 | 79 | ||
80 | static void | 80 | static void |
81 | rpc_timeout_upcall_queue(struct work_struct *work) | 81 | rpc_timeout_upcall_queue(struct work_struct *work) |
82 | { | 82 | { |
83 | LIST_HEAD(free_list); | 83 | LIST_HEAD(free_list); |
84 | struct rpc_inode *rpci = | 84 | struct rpc_pipe *pipe = |
85 | container_of(work, struct rpc_inode, queue_timeout.work); | 85 | container_of(work, struct rpc_pipe, queue_timeout.work); |
86 | void (*destroy_msg)(struct rpc_pipe_msg *); | 86 | void (*destroy_msg)(struct rpc_pipe_msg *); |
87 | 87 | ||
88 | spin_lock(&rpci->lock); | 88 | spin_lock(&pipe->lock); |
89 | if (rpci->ops == NULL) { | 89 | if (pipe->ops == NULL) { |
90 | spin_unlock(&rpci->lock); | 90 | spin_unlock(&pipe->lock); |
91 | return; | 91 | return; |
92 | } | 92 | } |
93 | destroy_msg = rpci->ops->destroy_msg; | 93 | destroy_msg = pipe->ops->destroy_msg; |
94 | if (rpci->nreaders == 0) { | 94 | if (pipe->nreaders == 0) { |
95 | list_splice_init(&rpci->pipe, &free_list); | 95 | list_splice_init(&pipe->pipe, &free_list); |
96 | rpci->pipelen = 0; | 96 | pipe->pipelen = 0; |
97 | } | 97 | } |
98 | spin_unlock(&rpci->lock); | 98 | spin_unlock(&pipe->lock); |
99 | rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT); | 99 | rpc_purge_list(pipe, &free_list, destroy_msg, -ETIMEDOUT); |
100 | } | 100 | } |
101 | 101 | ||
102 | ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg, | 102 | ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg, |
@@ -135,25 +135,25 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) | |||
135 | struct rpc_inode *rpci = RPC_I(inode); | 135 | struct rpc_inode *rpci = RPC_I(inode); |
136 | int res = -EPIPE; | 136 | int res = -EPIPE; |
137 | 137 | ||
138 | spin_lock(&rpci->lock); | 138 | spin_lock(&rpci->pipe->lock); |
139 | if (rpci->ops == NULL) | 139 | if (rpci->pipe->ops == NULL) |
140 | goto out; | 140 | goto out; |
141 | if (rpci->nreaders) { | 141 | if (rpci->pipe->nreaders) { |
142 | list_add_tail(&msg->list, &rpci->pipe); | 142 | list_add_tail(&msg->list, &rpci->pipe->pipe); |
143 | rpci->pipelen += msg->len; | 143 | rpci->pipe->pipelen += msg->len; |
144 | res = 0; | 144 | res = 0; |
145 | } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) { | 145 | } else if (rpci->pipe->flags & RPC_PIPE_WAIT_FOR_OPEN) { |
146 | if (list_empty(&rpci->pipe)) | 146 | if (list_empty(&rpci->pipe->pipe)) |
147 | queue_delayed_work(rpciod_workqueue, | 147 | queue_delayed_work(rpciod_workqueue, |
148 | &rpci->queue_timeout, | 148 | &rpci->pipe->queue_timeout, |
149 | RPC_UPCALL_TIMEOUT); | 149 | RPC_UPCALL_TIMEOUT); |
150 | list_add_tail(&msg->list, &rpci->pipe); | 150 | list_add_tail(&msg->list, &rpci->pipe->pipe); |
151 | rpci->pipelen += msg->len; | 151 | rpci->pipe->pipelen += msg->len; |
152 | res = 0; | 152 | res = 0; |
153 | } | 153 | } |
154 | out: | 154 | out: |
155 | spin_unlock(&rpci->lock); | 155 | spin_unlock(&rpci->pipe->lock); |
156 | wake_up(&rpci->waitq); | 156 | wake_up(&rpci->pipe->waitq); |
157 | return res; | 157 | return res; |
158 | } | 158 | } |
159 | EXPORT_SYMBOL_GPL(rpc_queue_upcall); | 159 | EXPORT_SYMBOL_GPL(rpc_queue_upcall); |
@@ -167,27 +167,27 @@ rpc_inode_setowner(struct inode *inode, void *private) | |||
167 | static void | 167 | static void |
168 | rpc_close_pipes(struct inode *inode) | 168 | rpc_close_pipes(struct inode *inode) |
169 | { | 169 | { |
170 | struct rpc_inode *rpci = RPC_I(inode); | 170 | struct rpc_pipe *pipe = RPC_I(inode)->pipe; |
171 | const struct rpc_pipe_ops *ops; | 171 | const struct rpc_pipe_ops *ops; |
172 | int need_release; | 172 | int need_release; |
173 | 173 | ||
174 | mutex_lock(&inode->i_mutex); | 174 | mutex_lock(&inode->i_mutex); |
175 | ops = rpci->ops; | 175 | ops = pipe->ops; |
176 | if (ops != NULL) { | 176 | if (ops != NULL) { |
177 | LIST_HEAD(free_list); | 177 | LIST_HEAD(free_list); |
178 | spin_lock(&rpci->lock); | 178 | spin_lock(&pipe->lock); |
179 | need_release = rpci->nreaders != 0 || rpci->nwriters != 0; | 179 | need_release = pipe->nreaders != 0 || pipe->nwriters != 0; |
180 | rpci->nreaders = 0; | 180 | pipe->nreaders = 0; |
181 | list_splice_init(&rpci->in_upcall, &free_list); | 181 | list_splice_init(&pipe->in_upcall, &free_list); |
182 | list_splice_init(&rpci->pipe, &free_list); | 182 | list_splice_init(&pipe->pipe, &free_list); |
183 | rpci->pipelen = 0; | 183 | pipe->pipelen = 0; |
184 | rpci->ops = NULL; | 184 | pipe->ops = NULL; |
185 | spin_unlock(&rpci->lock); | 185 | spin_unlock(&pipe->lock); |
186 | rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE); | 186 | rpc_purge_list(pipe, &free_list, ops->destroy_msg, -EPIPE); |
187 | rpci->nwriters = 0; | 187 | pipe->nwriters = 0; |
188 | if (need_release && ops->release_pipe) | 188 | if (need_release && ops->release_pipe) |
189 | ops->release_pipe(inode); | 189 | ops->release_pipe(inode); |
190 | cancel_delayed_work_sync(&rpci->queue_timeout); | 190 | cancel_delayed_work_sync(&pipe->queue_timeout); |
191 | } | 191 | } |
192 | rpc_inode_setowner(inode, NULL); | 192 | rpc_inode_setowner(inode, NULL); |
193 | mutex_unlock(&inode->i_mutex); | 193 | mutex_unlock(&inode->i_mutex); |
@@ -207,6 +207,7 @@ static void | |||
207 | rpc_i_callback(struct rcu_head *head) | 207 | rpc_i_callback(struct rcu_head *head) |
208 | { | 208 | { |
209 | struct inode *inode = container_of(head, struct inode, i_rcu); | 209 | struct inode *inode = container_of(head, struct inode, i_rcu); |
210 | kfree(RPC_I(inode)->pipe); | ||
210 | kmem_cache_free(rpc_inode_cachep, RPC_I(inode)); | 211 | kmem_cache_free(rpc_inode_cachep, RPC_I(inode)); |
211 | } | 212 | } |
212 | 213 | ||
@@ -224,18 +225,18 @@ rpc_pipe_open(struct inode *inode, struct file *filp) | |||
224 | int res = -ENXIO; | 225 | int res = -ENXIO; |
225 | 226 | ||
226 | mutex_lock(&inode->i_mutex); | 227 | mutex_lock(&inode->i_mutex); |
227 | if (rpci->ops == NULL) | 228 | if (rpci->pipe->ops == NULL) |
228 | goto out; | 229 | goto out; |
229 | first_open = rpci->nreaders == 0 && rpci->nwriters == 0; | 230 | first_open = rpci->pipe->nreaders == 0 && rpci->pipe->nwriters == 0; |
230 | if (first_open && rpci->ops->open_pipe) { | 231 | if (first_open && rpci->pipe->ops->open_pipe) { |
231 | res = rpci->ops->open_pipe(inode); | 232 | res = rpci->pipe->ops->open_pipe(inode); |
232 | if (res) | 233 | if (res) |
233 | goto out; | 234 | goto out; |
234 | } | 235 | } |
235 | if (filp->f_mode & FMODE_READ) | 236 | if (filp->f_mode & FMODE_READ) |
236 | rpci->nreaders++; | 237 | rpci->pipe->nreaders++; |
237 | if (filp->f_mode & FMODE_WRITE) | 238 | if (filp->f_mode & FMODE_WRITE) |
238 | rpci->nwriters++; | 239 | rpci->pipe->nwriters++; |
239 | res = 0; | 240 | res = 0; |
240 | out: | 241 | out: |
241 | mutex_unlock(&inode->i_mutex); | 242 | mutex_unlock(&inode->i_mutex); |
@@ -245,38 +246,38 @@ out: | |||
245 | static int | 246 | static int |
246 | rpc_pipe_release(struct inode *inode, struct file *filp) | 247 | rpc_pipe_release(struct inode *inode, struct file *filp) |
247 | { | 248 | { |
248 | struct rpc_inode *rpci = RPC_I(inode); | 249 | struct rpc_pipe *pipe = RPC_I(inode)->pipe; |
249 | struct rpc_pipe_msg *msg; | 250 | struct rpc_pipe_msg *msg; |
250 | int last_close; | 251 | int last_close; |
251 | 252 | ||
252 | mutex_lock(&inode->i_mutex); | 253 | mutex_lock(&inode->i_mutex); |
253 | if (rpci->ops == NULL) | 254 | if (pipe->ops == NULL) |
254 | goto out; | 255 | goto out; |
255 | msg = filp->private_data; | 256 | msg = filp->private_data; |
256 | if (msg != NULL) { | 257 | if (msg != NULL) { |
257 | spin_lock(&rpci->lock); | 258 | spin_lock(&pipe->lock); |
258 | msg->errno = -EAGAIN; | 259 | msg->errno = -EAGAIN; |
259 | list_del_init(&msg->list); | 260 | list_del_init(&msg->list); |
260 | spin_unlock(&rpci->lock); | 261 | spin_unlock(&pipe->lock); |
261 | rpci->ops->destroy_msg(msg); | 262 | pipe->ops->destroy_msg(msg); |
262 | } | 263 | } |
263 | if (filp->f_mode & FMODE_WRITE) | 264 | if (filp->f_mode & FMODE_WRITE) |
264 | rpci->nwriters --; | 265 | pipe->nwriters --; |
265 | if (filp->f_mode & FMODE_READ) { | 266 | if (filp->f_mode & FMODE_READ) { |
266 | rpci->nreaders --; | 267 | pipe->nreaders --; |
267 | if (rpci->nreaders == 0) { | 268 | if (pipe->nreaders == 0) { |
268 | LIST_HEAD(free_list); | 269 | LIST_HEAD(free_list); |
269 | spin_lock(&rpci->lock); | 270 | spin_lock(&pipe->lock); |
270 | list_splice_init(&rpci->pipe, &free_list); | 271 | list_splice_init(&pipe->pipe, &free_list); |
271 | rpci->pipelen = 0; | 272 | pipe->pipelen = 0; |
272 | spin_unlock(&rpci->lock); | 273 | spin_unlock(&pipe->lock); |
273 | rpc_purge_list(rpci, &free_list, | 274 | rpc_purge_list(pipe, &free_list, |
274 | rpci->ops->destroy_msg, -EAGAIN); | 275 | pipe->ops->destroy_msg, -EAGAIN); |
275 | } | 276 | } |
276 | } | 277 | } |
277 | last_close = rpci->nwriters == 0 && rpci->nreaders == 0; | 278 | last_close = pipe->nwriters == 0 && pipe->nreaders == 0; |
278 | if (last_close && rpci->ops->release_pipe) | 279 | if (last_close && pipe->ops->release_pipe) |
279 | rpci->ops->release_pipe(inode); | 280 | pipe->ops->release_pipe(inode); |
280 | out: | 281 | out: |
281 | mutex_unlock(&inode->i_mutex); | 282 | mutex_unlock(&inode->i_mutex); |
282 | return 0; | 283 | return 0; |
@@ -291,34 +292,34 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) | |||
291 | int res = 0; | 292 | int res = 0; |
292 | 293 | ||
293 | mutex_lock(&inode->i_mutex); | 294 | mutex_lock(&inode->i_mutex); |
294 | if (rpci->ops == NULL) { | 295 | if (rpci->pipe->ops == NULL) { |
295 | res = -EPIPE; | 296 | res = -EPIPE; |
296 | goto out_unlock; | 297 | goto out_unlock; |
297 | } | 298 | } |
298 | msg = filp->private_data; | 299 | msg = filp->private_data; |
299 | if (msg == NULL) { | 300 | if (msg == NULL) { |
300 | spin_lock(&rpci->lock); | 301 | spin_lock(&rpci->pipe->lock); |
301 | if (!list_empty(&rpci->pipe)) { | 302 | if (!list_empty(&rpci->pipe->pipe)) { |
302 | msg = list_entry(rpci->pipe.next, | 303 | msg = list_entry(rpci->pipe->pipe.next, |
303 | struct rpc_pipe_msg, | 304 | struct rpc_pipe_msg, |
304 | list); | 305 | list); |
305 | list_move(&msg->list, &rpci->in_upcall); | 306 | list_move(&msg->list, &rpci->pipe->in_upcall); |
306 | rpci->pipelen -= msg->len; | 307 | rpci->pipe->pipelen -= msg->len; |
307 | filp->private_data = msg; | 308 | filp->private_data = msg; |
308 | msg->copied = 0; | 309 | msg->copied = 0; |
309 | } | 310 | } |
310 | spin_unlock(&rpci->lock); | 311 | spin_unlock(&rpci->pipe->lock); |
311 | if (msg == NULL) | 312 | if (msg == NULL) |
312 | goto out_unlock; | 313 | goto out_unlock; |
313 | } | 314 | } |
314 | /* NOTE: it is up to the callback to update msg->copied */ | 315 | /* NOTE: it is up to the callback to update msg->copied */ |
315 | res = rpci->ops->upcall(filp, msg, buf, len); | 316 | res = rpci->pipe->ops->upcall(filp, msg, buf, len); |
316 | if (res < 0 || msg->len == msg->copied) { | 317 | if (res < 0 || msg->len == msg->copied) { |
317 | filp->private_data = NULL; | 318 | filp->private_data = NULL; |
318 | spin_lock(&rpci->lock); | 319 | spin_lock(&rpci->pipe->lock); |
319 | list_del_init(&msg->list); | 320 | list_del_init(&msg->list); |
320 | spin_unlock(&rpci->lock); | 321 | spin_unlock(&rpci->pipe->lock); |
321 | rpci->ops->destroy_msg(msg); | 322 | rpci->pipe->ops->destroy_msg(msg); |
322 | } | 323 | } |
323 | out_unlock: | 324 | out_unlock: |
324 | mutex_unlock(&inode->i_mutex); | 325 | mutex_unlock(&inode->i_mutex); |
@@ -334,8 +335,8 @@ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *of | |||
334 | 335 | ||
335 | mutex_lock(&inode->i_mutex); | 336 | mutex_lock(&inode->i_mutex); |
336 | res = -EPIPE; | 337 | res = -EPIPE; |
337 | if (rpci->ops != NULL) | 338 | if (rpci->pipe->ops != NULL) |
338 | res = rpci->ops->downcall(filp, buf, len); | 339 | res = rpci->pipe->ops->downcall(filp, buf, len); |
339 | mutex_unlock(&inode->i_mutex); | 340 | mutex_unlock(&inode->i_mutex); |
340 | return res; | 341 | return res; |
341 | } | 342 | } |
@@ -347,12 +348,12 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) | |||
347 | unsigned int mask = 0; | 348 | unsigned int mask = 0; |
348 | 349 | ||
349 | rpci = RPC_I(filp->f_path.dentry->d_inode); | 350 | rpci = RPC_I(filp->f_path.dentry->d_inode); |
350 | poll_wait(filp, &rpci->waitq, wait); | 351 | poll_wait(filp, &rpci->pipe->waitq, wait); |
351 | 352 | ||
352 | mask = POLLOUT | POLLWRNORM; | 353 | mask = POLLOUT | POLLWRNORM; |
353 | if (rpci->ops == NULL) | 354 | if (rpci->pipe->ops == NULL) |
354 | mask |= POLLERR | POLLHUP; | 355 | mask |= POLLERR | POLLHUP; |
355 | if (filp->private_data || !list_empty(&rpci->pipe)) | 356 | if (filp->private_data || !list_empty(&rpci->pipe->pipe)) |
356 | mask |= POLLIN | POLLRDNORM; | 357 | mask |= POLLIN | POLLRDNORM; |
357 | return mask; | 358 | return mask; |
358 | } | 359 | } |
@@ -366,18 +367,18 @@ rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
366 | 367 | ||
367 | switch (cmd) { | 368 | switch (cmd) { |
368 | case FIONREAD: | 369 | case FIONREAD: |
369 | spin_lock(&rpci->lock); | 370 | spin_lock(&rpci->pipe->lock); |
370 | if (rpci->ops == NULL) { | 371 | if (rpci->pipe->ops == NULL) { |
371 | spin_unlock(&rpci->lock); | 372 | spin_unlock(&rpci->pipe->lock); |
372 | return -EPIPE; | 373 | return -EPIPE; |
373 | } | 374 | } |
374 | len = rpci->pipelen; | 375 | len = rpci->pipe->pipelen; |
375 | if (filp->private_data) { | 376 | if (filp->private_data) { |
376 | struct rpc_pipe_msg *msg; | 377 | struct rpc_pipe_msg *msg; |
377 | msg = filp->private_data; | 378 | msg = filp->private_data; |
378 | len += msg->len - msg->copied; | 379 | len += msg->len - msg->copied; |
379 | } | 380 | } |
380 | spin_unlock(&rpci->lock); | 381 | spin_unlock(&rpci->pipe->lock); |
381 | return put_user(len, (int __user *)arg); | 382 | return put_user(len, (int __user *)arg); |
382 | default: | 383 | default: |
383 | return -EINVAL; | 384 | return -EINVAL; |
@@ -562,6 +563,23 @@ static int __rpc_mkdir(struct inode *dir, struct dentry *dentry, | |||
562 | return 0; | 563 | return 0; |
563 | } | 564 | } |
564 | 565 | ||
566 | static void | ||
567 | init_pipe(struct rpc_pipe *pipe) | ||
568 | { | ||
569 | pipe->nreaders = 0; | ||
570 | pipe->nwriters = 0; | ||
571 | INIT_LIST_HEAD(&pipe->in_upcall); | ||
572 | INIT_LIST_HEAD(&pipe->in_downcall); | ||
573 | INIT_LIST_HEAD(&pipe->pipe); | ||
574 | pipe->pipelen = 0; | ||
575 | init_waitqueue_head(&pipe->waitq); | ||
576 | INIT_DELAYED_WORK(&pipe->queue_timeout, | ||
577 | rpc_timeout_upcall_queue); | ||
578 | pipe->ops = NULL; | ||
579 | spin_lock_init(&pipe->lock); | ||
580 | |||
581 | } | ||
582 | |||
565 | static int __rpc_mkpipe(struct inode *dir, struct dentry *dentry, | 583 | static int __rpc_mkpipe(struct inode *dir, struct dentry *dentry, |
566 | umode_t mode, | 584 | umode_t mode, |
567 | const struct file_operations *i_fop, | 585 | const struct file_operations *i_fop, |
@@ -569,16 +587,24 @@ static int __rpc_mkpipe(struct inode *dir, struct dentry *dentry, | |||
569 | const struct rpc_pipe_ops *ops, | 587 | const struct rpc_pipe_ops *ops, |
570 | int flags) | 588 | int flags) |
571 | { | 589 | { |
590 | struct rpc_pipe *pipe; | ||
572 | struct rpc_inode *rpci; | 591 | struct rpc_inode *rpci; |
573 | int err; | 592 | int err; |
574 | 593 | ||
594 | pipe = kzalloc(sizeof(struct rpc_pipe), GFP_KERNEL); | ||
595 | if (!pipe) | ||
596 | return -ENOMEM; | ||
597 | init_pipe(pipe); | ||
575 | err = __rpc_create_common(dir, dentry, S_IFIFO | mode, i_fop, private); | 598 | err = __rpc_create_common(dir, dentry, S_IFIFO | mode, i_fop, private); |
576 | if (err) | 599 | if (err) { |
600 | kfree(pipe); | ||
577 | return err; | 601 | return err; |
602 | } | ||
578 | rpci = RPC_I(dentry->d_inode); | 603 | rpci = RPC_I(dentry->d_inode); |
579 | rpci->private = private; | 604 | rpci->private = private; |
580 | rpci->flags = flags; | 605 | rpci->pipe = pipe; |
581 | rpci->ops = ops; | 606 | rpci->pipe->flags = flags; |
607 | rpci->pipe->ops = ops; | ||
582 | fsnotify_create(dir, dentry); | 608 | fsnotify_create(dir, dentry); |
583 | return 0; | 609 | return 0; |
584 | } | 610 | } |
@@ -1142,17 +1168,7 @@ init_once(void *foo) | |||
1142 | 1168 | ||
1143 | inode_init_once(&rpci->vfs_inode); | 1169 | inode_init_once(&rpci->vfs_inode); |
1144 | rpci->private = NULL; | 1170 | rpci->private = NULL; |
1145 | rpci->nreaders = 0; | 1171 | rpci->pipe = NULL; |
1146 | rpci->nwriters = 0; | ||
1147 | INIT_LIST_HEAD(&rpci->in_upcall); | ||
1148 | INIT_LIST_HEAD(&rpci->in_downcall); | ||
1149 | INIT_LIST_HEAD(&rpci->pipe); | ||
1150 | rpci->pipelen = 0; | ||
1151 | init_waitqueue_head(&rpci->waitq); | ||
1152 | INIT_DELAYED_WORK(&rpci->queue_timeout, | ||
1153 | rpc_timeout_upcall_queue); | ||
1154 | rpci->ops = NULL; | ||
1155 | spin_lock_init(&rpci->lock); | ||
1156 | } | 1172 | } |
1157 | 1173 | ||
1158 | int register_rpc_pipefs(void) | 1174 | int register_rpc_pipefs(void) |