diff options
author | Miklos Szeredi <miklos@szeredi.hu> | 2006-01-17 01:14:28 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-17 02:15:29 -0500 |
commit | 8bfc016d2e2fff71c6843257f0fd0b60876331ed (patch) | |
tree | c12cff675fcd734a93a274545d93ef153939ad9f /fs/fuse/dev.c | |
parent | b3bebd94bbe4e59dfa23d85b0296a4ce8ebcc6c7 (diff) |
[PATCH] fuse: uninline some functions
Inline keyword is unnecessary in most cases. Clean them up.
Signed-off-by: Miklos Szeredi <miklos@szeredi.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/fuse/dev.c')
-rw-r--r-- | fs/fuse/dev.c | 21 |
1 files changed, 10 insertions, 11 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 8244e89a8dd6..d76432b757c9 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -21,7 +21,7 @@ MODULE_ALIAS_MISCDEV(FUSE_MINOR); | |||
21 | 21 | ||
22 | static kmem_cache_t *fuse_req_cachep; | 22 | static kmem_cache_t *fuse_req_cachep; |
23 | 23 | ||
24 | static inline struct fuse_conn *fuse_get_conn(struct file *file) | 24 | static struct fuse_conn *fuse_get_conn(struct file *file) |
25 | { | 25 | { |
26 | struct fuse_conn *fc; | 26 | struct fuse_conn *fc; |
27 | spin_lock(&fuse_lock); | 27 | spin_lock(&fuse_lock); |
@@ -32,7 +32,7 @@ static inline struct fuse_conn *fuse_get_conn(struct file *file) | |||
32 | return fc; | 32 | return fc; |
33 | } | 33 | } |
34 | 34 | ||
35 | static inline void fuse_request_init(struct fuse_req *req) | 35 | static void fuse_request_init(struct fuse_req *req) |
36 | { | 36 | { |
37 | memset(req, 0, sizeof(*req)); | 37 | memset(req, 0, sizeof(*req)); |
38 | INIT_LIST_HEAD(&req->list); | 38 | INIT_LIST_HEAD(&req->list); |
@@ -53,7 +53,7 @@ void fuse_request_free(struct fuse_req *req) | |||
53 | kmem_cache_free(fuse_req_cachep, req); | 53 | kmem_cache_free(fuse_req_cachep, req); |
54 | } | 54 | } |
55 | 55 | ||
56 | static inline void block_sigs(sigset_t *oldset) | 56 | static void block_sigs(sigset_t *oldset) |
57 | { | 57 | { |
58 | sigset_t mask; | 58 | sigset_t mask; |
59 | 59 | ||
@@ -61,7 +61,7 @@ static inline void block_sigs(sigset_t *oldset) | |||
61 | sigprocmask(SIG_BLOCK, &mask, oldset); | 61 | sigprocmask(SIG_BLOCK, &mask, oldset); |
62 | } | 62 | } |
63 | 63 | ||
64 | static inline void restore_sigs(sigset_t *oldset) | 64 | static void restore_sigs(sigset_t *oldset) |
65 | { | 65 | { |
66 | sigprocmask(SIG_SETMASK, oldset, NULL); | 66 | sigprocmask(SIG_SETMASK, oldset, NULL); |
67 | } | 67 | } |
@@ -385,7 +385,7 @@ void fuse_send_init(struct fuse_conn *fc) | |||
385 | * anything that could cause a page-fault. If the request was already | 385 | * anything that could cause a page-fault. If the request was already |
386 | * interrupted bail out. | 386 | * interrupted bail out. |
387 | */ | 387 | */ |
388 | static inline int lock_request(struct fuse_req *req) | 388 | static int lock_request(struct fuse_req *req) |
389 | { | 389 | { |
390 | int err = 0; | 390 | int err = 0; |
391 | if (req) { | 391 | if (req) { |
@@ -404,7 +404,7 @@ static inline int lock_request(struct fuse_req *req) | |||
404 | * requester thread is currently waiting for it to be unlocked, so | 404 | * requester thread is currently waiting for it to be unlocked, so |
405 | * wake it up. | 405 | * wake it up. |
406 | */ | 406 | */ |
407 | static inline void unlock_request(struct fuse_req *req) | 407 | static void unlock_request(struct fuse_req *req) |
408 | { | 408 | { |
409 | if (req) { | 409 | if (req) { |
410 | spin_lock(&fuse_lock); | 410 | spin_lock(&fuse_lock); |
@@ -440,7 +440,7 @@ static void fuse_copy_init(struct fuse_copy_state *cs, int write, | |||
440 | } | 440 | } |
441 | 441 | ||
442 | /* Unmap and put previous page of userspace buffer */ | 442 | /* Unmap and put previous page of userspace buffer */ |
443 | static inline void fuse_copy_finish(struct fuse_copy_state *cs) | 443 | static void fuse_copy_finish(struct fuse_copy_state *cs) |
444 | { | 444 | { |
445 | if (cs->mapaddr) { | 445 | if (cs->mapaddr) { |
446 | kunmap_atomic(cs->mapaddr, KM_USER0); | 446 | kunmap_atomic(cs->mapaddr, KM_USER0); |
@@ -489,8 +489,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs) | |||
489 | } | 489 | } |
490 | 490 | ||
491 | /* Do as much copy to/from userspace buffer as we can */ | 491 | /* Do as much copy to/from userspace buffer as we can */ |
492 | static inline int fuse_copy_do(struct fuse_copy_state *cs, void **val, | 492 | static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) |
493 | unsigned *size) | ||
494 | { | 493 | { |
495 | unsigned ncpy = min(*size, cs->len); | 494 | unsigned ncpy = min(*size, cs->len); |
496 | if (val) { | 495 | if (val) { |
@@ -510,8 +509,8 @@ static inline int fuse_copy_do(struct fuse_copy_state *cs, void **val, | |||
510 | * Copy a page in the request to/from the userspace buffer. Must be | 509 | * Copy a page in the request to/from the userspace buffer. Must be |
511 | * done atomically | 510 | * done atomically |
512 | */ | 511 | */ |
513 | static inline int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, | 512 | static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, |
514 | unsigned offset, unsigned count, int zeroing) | 513 | unsigned offset, unsigned count, int zeroing) |
515 | { | 514 | { |
516 | if (page && zeroing && count < PAGE_SIZE) { | 515 | if (page && zeroing && count < PAGE_SIZE) { |
517 | void *mapaddr = kmap_atomic(page, KM_USER1); | 516 | void *mapaddr = kmap_atomic(page, KM_USER1); |