diff options
author | Harvey Harrison <harvey.harrison@gmail.com> | 2008-02-05 01:29:26 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-05 12:44:18 -0500 |
commit | 920c7a5d0c94b8ce740f1d76fa06422f2a95a757 (patch) | |
tree | 74ab4b9b5a6f4279b9b9d2a463c6700546ba0011 /mm/swap.c | |
parent | 1e548deb5d1630ca14ba04da04e3b6b3766178c7 (diff) |
mm: remove fastcall from mm/
fastcall is always defined to be empty, remove it
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 10 |
1 files changed, 5 insertions, 5 deletions
@@ -41,7 +41,7 @@ static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs) = { 0, }; | |||
41 | * This path almost never happens for VM activity - pages are normally | 41 | * This path almost never happens for VM activity - pages are normally |
42 | * freed via pagevecs. But it gets used by networking. | 42 | * freed via pagevecs. But it gets used by networking. |
43 | */ | 43 | */ |
44 | static void fastcall __page_cache_release(struct page *page) | 44 | static void __page_cache_release(struct page *page) |
45 | { | 45 | { |
46 | if (PageLRU(page)) { | 46 | if (PageLRU(page)) { |
47 | unsigned long flags; | 47 | unsigned long flags; |
@@ -165,7 +165,7 @@ int rotate_reclaimable_page(struct page *page) | |||
165 | /* | 165 | /* |
166 | * FIXME: speed this up? | 166 | * FIXME: speed this up? |
167 | */ | 167 | */ |
168 | void fastcall activate_page(struct page *page) | 168 | void activate_page(struct page *page) |
169 | { | 169 | { |
170 | struct zone *zone = page_zone(page); | 170 | struct zone *zone = page_zone(page); |
171 | 171 | ||
@@ -186,7 +186,7 @@ void fastcall activate_page(struct page *page) | |||
186 | * inactive,referenced -> active,unreferenced | 186 | * inactive,referenced -> active,unreferenced |
187 | * active,unreferenced -> active,referenced | 187 | * active,unreferenced -> active,referenced |
188 | */ | 188 | */ |
189 | void fastcall mark_page_accessed(struct page *page) | 189 | void mark_page_accessed(struct page *page) |
190 | { | 190 | { |
191 | if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { | 191 | if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { |
192 | activate_page(page); | 192 | activate_page(page); |
@@ -202,7 +202,7 @@ EXPORT_SYMBOL(mark_page_accessed); | |||
202 | * lru_cache_add: add a page to the page lists | 202 | * lru_cache_add: add a page to the page lists |
203 | * @page: the page to add | 203 | * @page: the page to add |
204 | */ | 204 | */ |
205 | void fastcall lru_cache_add(struct page *page) | 205 | void lru_cache_add(struct page *page) |
206 | { | 206 | { |
207 | struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); | 207 | struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); |
208 | 208 | ||
@@ -212,7 +212,7 @@ void fastcall lru_cache_add(struct page *page) | |||
212 | put_cpu_var(lru_add_pvecs); | 212 | put_cpu_var(lru_add_pvecs); |
213 | } | 213 | } |
214 | 214 | ||
215 | void fastcall lru_cache_add_active(struct page *page) | 215 | void lru_cache_add_active(struct page *page) |
216 | { | 216 | { |
217 | struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); | 217 | struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); |
218 | 218 | ||