aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-02-05 01:29:26 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:18 -0500
commit920c7a5d0c94b8ce740f1d76fa06422f2a95a757 (patch)
tree74ab4b9b5a6f4279b9b9d2a463c6700546ba0011 /mm/filemap.c
parent1e548deb5d1630ca14ba04da04e3b6b3766178c7 (diff)
mm: remove fastcall from mm/
fastcall is always defined to be empty, remove it [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 96920f840562..81fb9bff0d4f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -527,7 +527,7 @@ static inline void wake_up_page(struct page *page, int bit)
527 __wake_up_bit(page_waitqueue(page), &page->flags, bit); 527 __wake_up_bit(page_waitqueue(page), &page->flags, bit);
528} 528}
529 529
530void fastcall wait_on_page_bit(struct page *page, int bit_nr) 530void wait_on_page_bit(struct page *page, int bit_nr)
531{ 531{
532 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 532 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
533 533
@@ -551,7 +551,7 @@ EXPORT_SYMBOL(wait_on_page_bit);
551 * the clear_bit and the read of the waitqueue (to avoid SMP races with a 551 * the clear_bit and the read of the waitqueue (to avoid SMP races with a
552 * parallel wait_on_page_locked()). 552 * parallel wait_on_page_locked()).
553 */ 553 */
554void fastcall unlock_page(struct page *page) 554void unlock_page(struct page *page)
555{ 555{
556 smp_mb__before_clear_bit(); 556 smp_mb__before_clear_bit();
557 if (!TestClearPageLocked(page)) 557 if (!TestClearPageLocked(page))
@@ -585,7 +585,7 @@ EXPORT_SYMBOL(end_page_writeback);
585 * chances are that on the second loop, the block layer's plug list is empty, 585 * chances are that on the second loop, the block layer's plug list is empty,
586 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. 586 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
587 */ 587 */
588void fastcall __lock_page(struct page *page) 588void __lock_page(struct page *page)
589{ 589{
590 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 590 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
591 591
@@ -606,7 +606,7 @@ int fastcall __lock_page_killable(struct page *page)
606 * Variant of lock_page that does not require the caller to hold a reference 606 * Variant of lock_page that does not require the caller to hold a reference
607 * on the page's mapping. 607 * on the page's mapping.
608 */ 608 */
609void fastcall __lock_page_nosync(struct page *page) 609void __lock_page_nosync(struct page *page)
610{ 610{
611 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 611 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
612 __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock, 612 __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
@@ -1276,7 +1276,7 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
1276 * This adds the requested page to the page cache if it isn't already there, 1276 * This adds the requested page to the page cache if it isn't already there,
1277 * and schedules an I/O to read in its contents from disk. 1277 * and schedules an I/O to read in its contents from disk.
1278 */ 1278 */
1279static int fastcall page_cache_read(struct file * file, pgoff_t offset) 1279static int page_cache_read(struct file *file, pgoff_t offset)
1280{ 1280{
1281 struct address_space *mapping = file->f_mapping; 1281 struct address_space *mapping = file->f_mapping;
1282 struct page *page; 1282 struct page *page;