diff options
-rw-r--r-- | mm/filemap.c | 10 | ||||
-rw-r--r-- | mm/highmem.c | 4 | ||||
-rw-r--r-- | mm/internal.h | 2 | ||||
-rw-r--r-- | mm/memory.c | 3 | ||||
-rw-r--r-- | mm/page-writeback.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 16 | ||||
-rw-r--r-- | mm/swap.c | 10 |
7 files changed, 24 insertions, 23 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 96920f840562..81fb9bff0d4f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -527,7 +527,7 @@ static inline void wake_up_page(struct page *page, int bit) | |||
527 | __wake_up_bit(page_waitqueue(page), &page->flags, bit); | 527 | __wake_up_bit(page_waitqueue(page), &page->flags, bit); |
528 | } | 528 | } |
529 | 529 | ||
530 | void fastcall wait_on_page_bit(struct page *page, int bit_nr) | 530 | void wait_on_page_bit(struct page *page, int bit_nr) |
531 | { | 531 | { |
532 | DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); | 532 | DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); |
533 | 533 | ||
@@ -551,7 +551,7 @@ EXPORT_SYMBOL(wait_on_page_bit); | |||
551 | * the clear_bit and the read of the waitqueue (to avoid SMP races with a | 551 | * the clear_bit and the read of the waitqueue (to avoid SMP races with a |
552 | * parallel wait_on_page_locked()). | 552 | * parallel wait_on_page_locked()). |
553 | */ | 553 | */ |
554 | void fastcall unlock_page(struct page *page) | 554 | void unlock_page(struct page *page) |
555 | { | 555 | { |
556 | smp_mb__before_clear_bit(); | 556 | smp_mb__before_clear_bit(); |
557 | if (!TestClearPageLocked(page)) | 557 | if (!TestClearPageLocked(page)) |
@@ -585,7 +585,7 @@ EXPORT_SYMBOL(end_page_writeback); | |||
585 | * chances are that on the second loop, the block layer's plug list is empty, | 585 | * chances are that on the second loop, the block layer's plug list is empty, |
586 | * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. | 586 | * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. |
587 | */ | 587 | */ |
588 | void fastcall __lock_page(struct page *page) | 588 | void __lock_page(struct page *page) |
589 | { | 589 | { |
590 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); | 590 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); |
591 | 591 | ||
@@ -606,7 +606,7 @@ int fastcall __lock_page_killable(struct page *page) | |||
606 | * Variant of lock_page that does not require the caller to hold a reference | 606 | * Variant of lock_page that does not require the caller to hold a reference |
607 | * on the page's mapping. | 607 | * on the page's mapping. |
608 | */ | 608 | */ |
609 | void fastcall __lock_page_nosync(struct page *page) | 609 | void __lock_page_nosync(struct page *page) |
610 | { | 610 | { |
611 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); | 611 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); |
612 | __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock, | 612 | __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock, |
@@ -1276,7 +1276,7 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) | |||
1276 | * This adds the requested page to the page cache if it isn't already there, | 1276 | * This adds the requested page to the page cache if it isn't already there, |
1277 | * and schedules an I/O to read in its contents from disk. | 1277 | * and schedules an I/O to read in its contents from disk. |
1278 | */ | 1278 | */ |
1279 | static int fastcall page_cache_read(struct file * file, pgoff_t offset) | 1279 | static int page_cache_read(struct file *file, pgoff_t offset) |
1280 | { | 1280 | { |
1281 | struct address_space *mapping = file->f_mapping; | 1281 | struct address_space *mapping = file->f_mapping; |
1282 | struct page *page; | 1282 | struct page *page; |
diff --git a/mm/highmem.c b/mm/highmem.c index 7a967bc35152..35d47733cde4 100644 --- a/mm/highmem.c +++ b/mm/highmem.c | |||
@@ -163,7 +163,7 @@ start: | |||
163 | return vaddr; | 163 | return vaddr; |
164 | } | 164 | } |
165 | 165 | ||
166 | void fastcall *kmap_high(struct page *page) | 166 | void *kmap_high(struct page *page) |
167 | { | 167 | { |
168 | unsigned long vaddr; | 168 | unsigned long vaddr; |
169 | 169 | ||
@@ -185,7 +185,7 @@ void fastcall *kmap_high(struct page *page) | |||
185 | 185 | ||
186 | EXPORT_SYMBOL(kmap_high); | 186 | EXPORT_SYMBOL(kmap_high); |
187 | 187 | ||
188 | void fastcall kunmap_high(struct page *page) | 188 | void kunmap_high(struct page *page) |
189 | { | 189 | { |
190 | unsigned long vaddr; | 190 | unsigned long vaddr; |
191 | unsigned long nr; | 191 | unsigned long nr; |
diff --git a/mm/internal.h b/mm/internal.h index 953f941ea867..1e34d2462a48 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -34,7 +34,7 @@ static inline void __put_page(struct page *page) | |||
34 | atomic_dec(&page->_count); | 34 | atomic_dec(&page->_count); |
35 | } | 35 | } |
36 | 36 | ||
37 | extern void fastcall __init __free_pages_bootmem(struct page *page, | 37 | extern void __init __free_pages_bootmem(struct page *page, |
38 | unsigned int order); | 38 | unsigned int order); |
39 | 39 | ||
40 | /* | 40 | /* |
diff --git a/mm/memory.c b/mm/memory.c index 1c81fc2174cd..6a9c048f6012 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1109,7 +1109,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1109 | } | 1109 | } |
1110 | EXPORT_SYMBOL(get_user_pages); | 1110 | EXPORT_SYMBOL(get_user_pages); |
1111 | 1111 | ||
1112 | pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) | 1112 | pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, |
1113 | spinlock_t **ptl) | ||
1113 | { | 1114 | { |
1114 | pgd_t * pgd = pgd_offset(mm, addr); | 1115 | pgd_t * pgd = pgd_offset(mm, addr); |
1115 | pud_t * pud = pud_alloc(mm, pgd, addr); | 1116 | pud_t * pud = pud_alloc(mm, pgd, addr); |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index c689b60af000..a4ca162666c5 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -1073,7 +1073,7 @@ static int __set_page_dirty(struct page *page) | |||
1073 | return 0; | 1073 | return 0; |
1074 | } | 1074 | } |
1075 | 1075 | ||
1076 | int fastcall set_page_dirty(struct page *page) | 1076 | int set_page_dirty(struct page *page) |
1077 | { | 1077 | { |
1078 | int ret = __set_page_dirty(page); | 1078 | int ret = __set_page_dirty(page); |
1079 | if (ret) | 1079 | if (ret) |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 55fe57cd99a1..d73c133fdbe1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -537,7 +537,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) | |||
537 | /* | 537 | /* |
538 | * permit the bootmem allocator to evade page validation on high-order frees | 538 | * permit the bootmem allocator to evade page validation on high-order frees |
539 | */ | 539 | */ |
540 | void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order) | 540 | void __init __free_pages_bootmem(struct page *page, unsigned int order) |
541 | { | 541 | { |
542 | if (order == 0) { | 542 | if (order == 0) { |
543 | __ClearPageReserved(page); | 543 | __ClearPageReserved(page); |
@@ -974,7 +974,7 @@ void mark_free_pages(struct zone *zone) | |||
974 | /* | 974 | /* |
975 | * Free a 0-order page | 975 | * Free a 0-order page |
976 | */ | 976 | */ |
977 | static void fastcall free_hot_cold_page(struct page *page, int cold) | 977 | static void free_hot_cold_page(struct page *page, int cold) |
978 | { | 978 | { |
979 | struct zone *zone = page_zone(page); | 979 | struct zone *zone = page_zone(page); |
980 | struct per_cpu_pages *pcp; | 980 | struct per_cpu_pages *pcp; |
@@ -1007,12 +1007,12 @@ static void fastcall free_hot_cold_page(struct page *page, int cold) | |||
1007 | put_cpu(); | 1007 | put_cpu(); |
1008 | } | 1008 | } |
1009 | 1009 | ||
1010 | void fastcall free_hot_page(struct page *page) | 1010 | void free_hot_page(struct page *page) |
1011 | { | 1011 | { |
1012 | free_hot_cold_page(page, 0); | 1012 | free_hot_cold_page(page, 0); |
1013 | } | 1013 | } |
1014 | 1014 | ||
1015 | void fastcall free_cold_page(struct page *page) | 1015 | void free_cold_page(struct page *page) |
1016 | { | 1016 | { |
1017 | free_hot_cold_page(page, 1); | 1017 | free_hot_cold_page(page, 1); |
1018 | } | 1018 | } |
@@ -1641,7 +1641,7 @@ EXPORT_SYMBOL(__alloc_pages); | |||
1641 | /* | 1641 | /* |
1642 | * Common helper functions. | 1642 | * Common helper functions. |
1643 | */ | 1643 | */ |
1644 | fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) | 1644 | unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) |
1645 | { | 1645 | { |
1646 | struct page * page; | 1646 | struct page * page; |
1647 | page = alloc_pages(gfp_mask, order); | 1647 | page = alloc_pages(gfp_mask, order); |
@@ -1652,7 +1652,7 @@ fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) | |||
1652 | 1652 | ||
1653 | EXPORT_SYMBOL(__get_free_pages); | 1653 | EXPORT_SYMBOL(__get_free_pages); |
1654 | 1654 | ||
1655 | fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) | 1655 | unsigned long get_zeroed_page(gfp_t gfp_mask) |
1656 | { | 1656 | { |
1657 | struct page * page; | 1657 | struct page * page; |
1658 | 1658 | ||
@@ -1678,7 +1678,7 @@ void __pagevec_free(struct pagevec *pvec) | |||
1678 | free_hot_cold_page(pvec->pages[i], pvec->cold); | 1678 | free_hot_cold_page(pvec->pages[i], pvec->cold); |
1679 | } | 1679 | } |
1680 | 1680 | ||
1681 | fastcall void __free_pages(struct page *page, unsigned int order) | 1681 | void __free_pages(struct page *page, unsigned int order) |
1682 | { | 1682 | { |
1683 | if (put_page_testzero(page)) { | 1683 | if (put_page_testzero(page)) { |
1684 | if (order == 0) | 1684 | if (order == 0) |
@@ -1690,7 +1690,7 @@ fastcall void __free_pages(struct page *page, unsigned int order) | |||
1690 | 1690 | ||
1691 | EXPORT_SYMBOL(__free_pages); | 1691 | EXPORT_SYMBOL(__free_pages); |
1692 | 1692 | ||
1693 | fastcall void free_pages(unsigned long addr, unsigned int order) | 1693 | void free_pages(unsigned long addr, unsigned int order) |
1694 | { | 1694 | { |
1695 | if (addr != 0) { | 1695 | if (addr != 0) { |
1696 | VM_BUG_ON(!virt_addr_valid((void *)addr)); | 1696 | VM_BUG_ON(!virt_addr_valid((void *)addr)); |
@@ -41,7 +41,7 @@ static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs) = { 0, }; | |||
41 | * This path almost never happens for VM activity - pages are normally | 41 | * This path almost never happens for VM activity - pages are normally |
42 | * freed via pagevecs. But it gets used by networking. | 42 | * freed via pagevecs. But it gets used by networking. |
43 | */ | 43 | */ |
44 | static void fastcall __page_cache_release(struct page *page) | 44 | static void __page_cache_release(struct page *page) |
45 | { | 45 | { |
46 | if (PageLRU(page)) { | 46 | if (PageLRU(page)) { |
47 | unsigned long flags; | 47 | unsigned long flags; |
@@ -165,7 +165,7 @@ int rotate_reclaimable_page(struct page *page) | |||
165 | /* | 165 | /* |
166 | * FIXME: speed this up? | 166 | * FIXME: speed this up? |
167 | */ | 167 | */ |
168 | void fastcall activate_page(struct page *page) | 168 | void activate_page(struct page *page) |
169 | { | 169 | { |
170 | struct zone *zone = page_zone(page); | 170 | struct zone *zone = page_zone(page); |
171 | 171 | ||
@@ -186,7 +186,7 @@ void fastcall activate_page(struct page *page) | |||
186 | * inactive,referenced -> active,unreferenced | 186 | * inactive,referenced -> active,unreferenced |
187 | * active,unreferenced -> active,referenced | 187 | * active,unreferenced -> active,referenced |
188 | */ | 188 | */ |
189 | void fastcall mark_page_accessed(struct page *page) | 189 | void mark_page_accessed(struct page *page) |
190 | { | 190 | { |
191 | if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { | 191 | if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { |
192 | activate_page(page); | 192 | activate_page(page); |
@@ -202,7 +202,7 @@ EXPORT_SYMBOL(mark_page_accessed); | |||
202 | * lru_cache_add: add a page to the page lists | 202 | * lru_cache_add: add a page to the page lists |
203 | * @page: the page to add | 203 | * @page: the page to add |
204 | */ | 204 | */ |
205 | void fastcall lru_cache_add(struct page *page) | 205 | void lru_cache_add(struct page *page) |
206 | { | 206 | { |
207 | struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); | 207 | struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); |
208 | 208 | ||
@@ -212,7 +212,7 @@ void fastcall lru_cache_add(struct page *page) | |||
212 | put_cpu_var(lru_add_pvecs); | 212 | put_cpu_var(lru_add_pvecs); |
213 | } | 213 | } |
214 | 214 | ||
215 | void fastcall lru_cache_add_active(struct page *page) | 215 | void lru_cache_add_active(struct page *page) |
216 | { | 216 | { |
217 | struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); | 217 | struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); |
218 | 218 | ||