aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-02-05 01:29:26 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:18 -0500
commit920c7a5d0c94b8ce740f1d76fa06422f2a95a757 (patch)
tree74ab4b9b5a6f4279b9b9d2a463c6700546ba0011 /mm/swap.c
parent1e548deb5d1630ca14ba04da04e3b6b3766178c7 (diff)
mm: remove fastcall from mm/
fastcall is always defined to be empty, remove it [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 9ac88323d237..57b7e25a939c 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -41,7 +41,7 @@ static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs) = { 0, };
41 * This path almost never happens for VM activity - pages are normally 41 * This path almost never happens for VM activity - pages are normally
42 * freed via pagevecs. But it gets used by networking. 42 * freed via pagevecs. But it gets used by networking.
43 */ 43 */
44static void fastcall __page_cache_release(struct page *page) 44static void __page_cache_release(struct page *page)
45{ 45{
46 if (PageLRU(page)) { 46 if (PageLRU(page)) {
47 unsigned long flags; 47 unsigned long flags;
@@ -165,7 +165,7 @@ int rotate_reclaimable_page(struct page *page)
165/* 165/*
166 * FIXME: speed this up? 166 * FIXME: speed this up?
167 */ 167 */
168void fastcall activate_page(struct page *page) 168void activate_page(struct page *page)
169{ 169{
170 struct zone *zone = page_zone(page); 170 struct zone *zone = page_zone(page);
171 171
@@ -186,7 +186,7 @@ void fastcall activate_page(struct page *page)
186 * inactive,referenced -> active,unreferenced 186 * inactive,referenced -> active,unreferenced
187 * active,unreferenced -> active,referenced 187 * active,unreferenced -> active,referenced
188 */ 188 */
189void fastcall mark_page_accessed(struct page *page) 189void mark_page_accessed(struct page *page)
190{ 190{
191 if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { 191 if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
192 activate_page(page); 192 activate_page(page);
@@ -202,7 +202,7 @@ EXPORT_SYMBOL(mark_page_accessed);
202 * lru_cache_add: add a page to the page lists 202 * lru_cache_add: add a page to the page lists
203 * @page: the page to add 203 * @page: the page to add
204 */ 204 */
205void fastcall lru_cache_add(struct page *page) 205void lru_cache_add(struct page *page)
206{ 206{
207 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); 207 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
208 208
@@ -212,7 +212,7 @@ void fastcall lru_cache_add(struct page *page)
212 put_cpu_var(lru_add_pvecs); 212 put_cpu_var(lru_add_pvecs);
213} 213}
214 214
215void fastcall lru_cache_add_active(struct page *page) 215void lru_cache_add_active(struct page *page)
216{ 216{
217 struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); 217 struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);
218 218