diff options
author | Harvey Harrison <harvey.harrison@gmail.com> | 2008-02-08 07:19:52 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-08 12:22:31 -0500 |
commit | fc9b52cd8f5f459b88adcf67c47668425ae31a78 (patch) | |
tree | c29924eaf60d2e73641bf11fa906a23fa81f46c9 /fs | |
parent | 75acb9cd2ef0bbb463098fdd40cbcdda79d45fa3 (diff) |
fs: remove fastcall, it is always empty
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/aio.c | 17 | ||||
-rw-r--r-- | fs/buffer.c | 6 | ||||
-rw-r--r-- | fs/fcntl.c | 2 | ||||
-rw-r--r-- | fs/file_table.c | 8 | ||||
-rw-r--r-- | fs/namei.c | 16 | ||||
-rw-r--r-- | fs/open.c | 4 |
6 files changed, 26 insertions, 27 deletions
@@ -317,7 +317,7 @@ out: | |||
317 | /* wait_on_sync_kiocb: | 317 | /* wait_on_sync_kiocb: |
318 | * Waits on the given sync kiocb to complete. | 318 | * Waits on the given sync kiocb to complete. |
319 | */ | 319 | */ |
320 | ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb) | 320 | ssize_t wait_on_sync_kiocb(struct kiocb *iocb) |
321 | { | 321 | { |
322 | while (iocb->ki_users) { | 322 | while (iocb->ki_users) { |
323 | set_current_state(TASK_UNINTERRUPTIBLE); | 323 | set_current_state(TASK_UNINTERRUPTIBLE); |
@@ -336,7 +336,7 @@ ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb) | |||
336 | * go away, they will call put_ioctx and release any pinned memory | 336 | * go away, they will call put_ioctx and release any pinned memory |
337 | * associated with the request (held via struct page * references). | 337 | * associated with the request (held via struct page * references). |
338 | */ | 338 | */ |
339 | void fastcall exit_aio(struct mm_struct *mm) | 339 | void exit_aio(struct mm_struct *mm) |
340 | { | 340 | { |
341 | struct kioctx *ctx = mm->ioctx_list; | 341 | struct kioctx *ctx = mm->ioctx_list; |
342 | mm->ioctx_list = NULL; | 342 | mm->ioctx_list = NULL; |
@@ -365,7 +365,7 @@ void fastcall exit_aio(struct mm_struct *mm) | |||
365 | * Called when the last user of an aio context has gone away, | 365 | * Called when the last user of an aio context has gone away, |
366 | * and the struct needs to be freed. | 366 | * and the struct needs to be freed. |
367 | */ | 367 | */ |
368 | void fastcall __put_ioctx(struct kioctx *ctx) | 368 | void __put_ioctx(struct kioctx *ctx) |
369 | { | 369 | { |
370 | unsigned nr_events = ctx->max_reqs; | 370 | unsigned nr_events = ctx->max_reqs; |
371 | 371 | ||
@@ -397,8 +397,7 @@ void fastcall __put_ioctx(struct kioctx *ctx) | |||
397 | * This prevents races between the aio code path referencing the | 397 | * This prevents races between the aio code path referencing the |
398 | * req (after submitting it) and aio_complete() freeing the req. | 398 | * req (after submitting it) and aio_complete() freeing the req. |
399 | */ | 399 | */ |
400 | static struct kiocb *__aio_get_req(struct kioctx *ctx); | 400 | static struct kiocb *__aio_get_req(struct kioctx *ctx) |
401 | static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx) | ||
402 | { | 401 | { |
403 | struct kiocb *req = NULL; | 402 | struct kiocb *req = NULL; |
404 | struct aio_ring *ring; | 403 | struct aio_ring *ring; |
@@ -533,7 +532,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | |||
533 | * Returns true if this put was the last user of the kiocb, | 532 | * Returns true if this put was the last user of the kiocb, |
534 | * false if the request is still in use. | 533 | * false if the request is still in use. |
535 | */ | 534 | */ |
536 | int fastcall aio_put_req(struct kiocb *req) | 535 | int aio_put_req(struct kiocb *req) |
537 | { | 536 | { |
538 | struct kioctx *ctx = req->ki_ctx; | 537 | struct kioctx *ctx = req->ki_ctx; |
539 | int ret; | 538 | int ret; |
@@ -893,7 +892,7 @@ static void try_queue_kicked_iocb(struct kiocb *iocb) | |||
893 | * The retry is usually executed by aio workqueue | 892 | * The retry is usually executed by aio workqueue |
894 | * threads (See aio_kick_handler). | 893 | * threads (See aio_kick_handler). |
895 | */ | 894 | */ |
896 | void fastcall kick_iocb(struct kiocb *iocb) | 895 | void kick_iocb(struct kiocb *iocb) |
897 | { | 896 | { |
898 | /* sync iocbs are easy: they can only ever be executing from a | 897 | /* sync iocbs are easy: they can only ever be executing from a |
899 | * single context. */ | 898 | * single context. */ |
@@ -912,7 +911,7 @@ EXPORT_SYMBOL(kick_iocb); | |||
912 | * Returns true if this is the last user of the request. The | 911 | * Returns true if this is the last user of the request. The |
913 | * only other user of the request can be the cancellation code. | 912 | * only other user of the request can be the cancellation code. |
914 | */ | 913 | */ |
915 | int fastcall aio_complete(struct kiocb *iocb, long res, long res2) | 914 | int aio_complete(struct kiocb *iocb, long res, long res2) |
916 | { | 915 | { |
917 | struct kioctx *ctx = iocb->ki_ctx; | 916 | struct kioctx *ctx = iocb->ki_ctx; |
918 | struct aio_ring_info *info; | 917 | struct aio_ring_info *info; |
@@ -1523,7 +1522,7 @@ static int aio_wake_function(wait_queue_t *wait, unsigned mode, | |||
1523 | return 1; | 1522 | return 1; |
1524 | } | 1523 | } |
1525 | 1524 | ||
1526 | int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | 1525 | int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
1527 | struct iocb *iocb) | 1526 | struct iocb *iocb) |
1528 | { | 1527 | { |
1529 | struct kiocb *req; | 1528 | struct kiocb *req; |
diff --git a/fs/buffer.c b/fs/buffer.c index 11b002e01d6e..6f0bddddcf4a 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -67,14 +67,14 @@ static int sync_buffer(void *word) | |||
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
69 | 69 | ||
70 | void fastcall __lock_buffer(struct buffer_head *bh) | 70 | void __lock_buffer(struct buffer_head *bh) |
71 | { | 71 | { |
72 | wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, | 72 | wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, |
73 | TASK_UNINTERRUPTIBLE); | 73 | TASK_UNINTERRUPTIBLE); |
74 | } | 74 | } |
75 | EXPORT_SYMBOL(__lock_buffer); | 75 | EXPORT_SYMBOL(__lock_buffer); |
76 | 76 | ||
77 | void fastcall unlock_buffer(struct buffer_head *bh) | 77 | void unlock_buffer(struct buffer_head *bh) |
78 | { | 78 | { |
79 | smp_mb__before_clear_bit(); | 79 | smp_mb__before_clear_bit(); |
80 | clear_buffer_locked(bh); | 80 | clear_buffer_locked(bh); |
@@ -1164,7 +1164,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) | |||
1164 | * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, | 1164 | * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, |
1165 | * mapping->tree_lock and the global inode_lock. | 1165 | * mapping->tree_lock and the global inode_lock. |
1166 | */ | 1166 | */ |
1167 | void fastcall mark_buffer_dirty(struct buffer_head *bh) | 1167 | void mark_buffer_dirty(struct buffer_head *bh) |
1168 | { | 1168 | { |
1169 | WARN_ON_ONCE(!buffer_uptodate(bh)); | 1169 | WARN_ON_ONCE(!buffer_uptodate(bh)); |
1170 | if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) | 1170 | if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) |
diff --git a/fs/fcntl.c b/fs/fcntl.c index 7efe59ed1ed8..e632da761fc1 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <asm/siginfo.h> | 24 | #include <asm/siginfo.h> |
25 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
26 | 26 | ||
27 | void fastcall set_close_on_exec(unsigned int fd, int flag) | 27 | void set_close_on_exec(unsigned int fd, int flag) |
28 | { | 28 | { |
29 | struct files_struct *files = current->files; | 29 | struct files_struct *files = current->files; |
30 | struct fdtable *fdt; | 30 | struct fdtable *fdt; |
diff --git a/fs/file_table.c b/fs/file_table.c index 664e3f2309b8..6d27befe2d48 100644 --- a/fs/file_table.c +++ b/fs/file_table.c | |||
@@ -197,7 +197,7 @@ int init_file(struct file *file, struct vfsmount *mnt, struct dentry *dentry, | |||
197 | } | 197 | } |
198 | EXPORT_SYMBOL(init_file); | 198 | EXPORT_SYMBOL(init_file); |
199 | 199 | ||
200 | void fastcall fput(struct file *file) | 200 | void fput(struct file *file) |
201 | { | 201 | { |
202 | if (atomic_dec_and_test(&file->f_count)) | 202 | if (atomic_dec_and_test(&file->f_count)) |
203 | __fput(file); | 203 | __fput(file); |
@@ -208,7 +208,7 @@ EXPORT_SYMBOL(fput); | |||
208 | /* __fput is called from task context when aio completion releases the last | 208 | /* __fput is called from task context when aio completion releases the last |
209 | * last use of a struct file *. Do not use otherwise. | 209 | * last use of a struct file *. Do not use otherwise. |
210 | */ | 210 | */ |
211 | void fastcall __fput(struct file *file) | 211 | void __fput(struct file *file) |
212 | { | 212 | { |
213 | struct dentry *dentry = file->f_path.dentry; | 213 | struct dentry *dentry = file->f_path.dentry; |
214 | struct vfsmount *mnt = file->f_path.mnt; | 214 | struct vfsmount *mnt = file->f_path.mnt; |
@@ -241,7 +241,7 @@ void fastcall __fput(struct file *file) | |||
241 | mntput(mnt); | 241 | mntput(mnt); |
242 | } | 242 | } |
243 | 243 | ||
244 | struct file fastcall *fget(unsigned int fd) | 244 | struct file *fget(unsigned int fd) |
245 | { | 245 | { |
246 | struct file *file; | 246 | struct file *file; |
247 | struct files_struct *files = current->files; | 247 | struct files_struct *files = current->files; |
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(fget); | |||
269 | * and a flag is returned to be passed to the corresponding fput_light(). | 269 | * and a flag is returned to be passed to the corresponding fput_light(). |
270 | * There must not be a cloning between an fget_light/fput_light pair. | 270 | * There must not be a cloning between an fget_light/fput_light pair. |
271 | */ | 271 | */ |
272 | struct file fastcall *fget_light(unsigned int fd, int *fput_needed) | 272 | struct file *fget_light(unsigned int fd, int *fput_needed) |
273 | { | 273 | { |
274 | struct file *file; | 274 | struct file *file; |
275 | struct files_struct *files = current->files; | 275 | struct files_struct *files = current->files; |
diff --git a/fs/namei.c b/fs/namei.c index 241cff423653..52703986323a 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -106,7 +106,7 @@ | |||
106 | * any extra contention... | 106 | * any extra contention... |
107 | */ | 107 | */ |
108 | 108 | ||
109 | static int fastcall link_path_walk(const char *name, struct nameidata *nd); | 109 | static int link_path_walk(const char *name, struct nameidata *nd); |
110 | 110 | ||
111 | /* In order to reduce some races, while at the same time doing additional | 111 | /* In order to reduce some races, while at the same time doing additional |
112 | * checking and hopefully speeding things up, we copy filenames to the | 112 | * checking and hopefully speeding things up, we copy filenames to the |
@@ -823,7 +823,7 @@ fail: | |||
823 | * Returns 0 and nd will have valid dentry and mnt on success. | 823 | * Returns 0 and nd will have valid dentry and mnt on success. |
824 | * Returns error and drops reference to input namei data on failure. | 824 | * Returns error and drops reference to input namei data on failure. |
825 | */ | 825 | */ |
826 | static fastcall int __link_path_walk(const char * name, struct nameidata *nd) | 826 | static int __link_path_walk(const char *name, struct nameidata *nd) |
827 | { | 827 | { |
828 | struct path next; | 828 | struct path next; |
829 | struct inode *inode; | 829 | struct inode *inode; |
@@ -1015,7 +1015,7 @@ return_err: | |||
1015 | * Retry the whole path once, forcing real lookup requests | 1015 | * Retry the whole path once, forcing real lookup requests |
1016 | * instead of relying on the dcache. | 1016 | * instead of relying on the dcache. |
1017 | */ | 1017 | */ |
1018 | static int fastcall link_path_walk(const char *name, struct nameidata *nd) | 1018 | static int link_path_walk(const char *name, struct nameidata *nd) |
1019 | { | 1019 | { |
1020 | struct nameidata save = *nd; | 1020 | struct nameidata save = *nd; |
1021 | int result; | 1021 | int result; |
@@ -1039,7 +1039,7 @@ static int fastcall link_path_walk(const char *name, struct nameidata *nd) | |||
1039 | return result; | 1039 | return result; |
1040 | } | 1040 | } |
1041 | 1041 | ||
1042 | static int fastcall path_walk(const char * name, struct nameidata *nd) | 1042 | static int path_walk(const char *name, struct nameidata *nd) |
1043 | { | 1043 | { |
1044 | current->total_link_count = 0; | 1044 | current->total_link_count = 0; |
1045 | return link_path_walk(name, nd); | 1045 | return link_path_walk(name, nd); |
@@ -1116,7 +1116,7 @@ set_it: | |||
1116 | } | 1116 | } |
1117 | 1117 | ||
1118 | /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ | 1118 | /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ |
1119 | static int fastcall do_path_lookup(int dfd, const char *name, | 1119 | static int do_path_lookup(int dfd, const char *name, |
1120 | unsigned int flags, struct nameidata *nd) | 1120 | unsigned int flags, struct nameidata *nd) |
1121 | { | 1121 | { |
1122 | int retval = 0; | 1122 | int retval = 0; |
@@ -1183,7 +1183,7 @@ fput_fail: | |||
1183 | goto out_fail; | 1183 | goto out_fail; |
1184 | } | 1184 | } |
1185 | 1185 | ||
1186 | int fastcall path_lookup(const char *name, unsigned int flags, | 1186 | int path_lookup(const char *name, unsigned int flags, |
1187 | struct nameidata *nd) | 1187 | struct nameidata *nd) |
1188 | { | 1188 | { |
1189 | return do_path_lookup(AT_FDCWD, name, flags, nd); | 1189 | return do_path_lookup(AT_FDCWD, name, flags, nd); |
@@ -1409,7 +1409,7 @@ struct dentry *lookup_one_noperm(const char *name, struct dentry *base) | |||
1409 | return __lookup_hash(&this, base, NULL); | 1409 | return __lookup_hash(&this, base, NULL); |
1410 | } | 1410 | } |
1411 | 1411 | ||
1412 | int fastcall __user_walk_fd(int dfd, const char __user *name, unsigned flags, | 1412 | int __user_walk_fd(int dfd, const char __user *name, unsigned flags, |
1413 | struct nameidata *nd) | 1413 | struct nameidata *nd) |
1414 | { | 1414 | { |
1415 | char *tmp = getname(name); | 1415 | char *tmp = getname(name); |
@@ -1422,7 +1422,7 @@ int fastcall __user_walk_fd(int dfd, const char __user *name, unsigned flags, | |||
1422 | return err; | 1422 | return err; |
1423 | } | 1423 | } |
1424 | 1424 | ||
1425 | int fastcall __user_walk(const char __user *name, unsigned flags, struct nameidata *nd) | 1425 | int __user_walk(const char __user *name, unsigned flags, struct nameidata *nd) |
1426 | { | 1426 | { |
1427 | return __user_walk_fd(AT_FDCWD, name, flags, nd); | 1427 | return __user_walk_fd(AT_FDCWD, name, flags, nd); |
1428 | } | 1428 | } |
@@ -991,7 +991,7 @@ static void __put_unused_fd(struct files_struct *files, unsigned int fd) | |||
991 | files->next_fd = fd; | 991 | files->next_fd = fd; |
992 | } | 992 | } |
993 | 993 | ||
994 | void fastcall put_unused_fd(unsigned int fd) | 994 | void put_unused_fd(unsigned int fd) |
995 | { | 995 | { |
996 | struct files_struct *files = current->files; | 996 | struct files_struct *files = current->files; |
997 | spin_lock(&files->file_lock); | 997 | spin_lock(&files->file_lock); |
@@ -1014,7 +1014,7 @@ EXPORT_SYMBOL(put_unused_fd); | |||
1014 | * will follow. | 1014 | * will follow. |
1015 | */ | 1015 | */ |
1016 | 1016 | ||
1017 | void fastcall fd_install(unsigned int fd, struct file * file) | 1017 | void fd_install(unsigned int fd, struct file *file) |
1018 | { | 1018 | { |
1019 | struct files_struct *files = current->files; | 1019 | struct files_struct *files = current->files; |
1020 | struct fdtable *fdt; | 1020 | struct fdtable *fdt; |