aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2014-06-04 19:10:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 19:54:09 -0400
commitb745bc85f21ea707e4ea1a91948055fa3e72c77b (patch)
treee4d4e8b52ca84acf64b2f333485ecb6edcab8738
parent7aeb09f9104b760fc53c98cb7d20d06640baf9e6 (diff)
mm: page_alloc: convert hot/cold parameter and immediate callers to bool
cold is a bool, make it one. Make the likely case the "if" part of the block instead of the else as according to the optimisation manual this is preferred. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Jan Kara <jack@suse.cz> Cc: Michal Hocko <mhocko@suse.cz> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Theodore Ts'o <tytso@mit.edu> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/tile/mm/homecache.c2
-rw-r--r--fs/fuse/dev.c2
-rw-r--r--include/linux/gfp.h4
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--include/linux/swap.h2
-rw-r--r--mm/page_alloc.c20
-rw-r--r--mm/swap.c4
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/vmscan.c6
9 files changed, 22 insertions, 22 deletions
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 004ba568d93f..33294fdc402e 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -417,7 +417,7 @@ void __homecache_free_pages(struct page *page, unsigned int order)
417 if (put_page_testzero(page)) { 417 if (put_page_testzero(page)) {
418 homecache_change_page_home(page, order, PAGE_HOME_HASH); 418 homecache_change_page_home(page, order, PAGE_HOME_HASH);
419 if (order == 0) { 419 if (order == 0) {
420 free_hot_cold_page(page, 0); 420 free_hot_cold_page(page, false);
421 } else { 421 } else {
422 init_page_count(page); 422 init_page_count(page);
423 __free_pages(page, order); 423 __free_pages(page, order);
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index aac71ce373e4..098f97bdcf1b 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1614,7 +1614,7 @@ out_finish:
1614 1614
1615static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req) 1615static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1616{ 1616{
1617 release_pages(req->pages, req->num_pages, 0); 1617 release_pages(req->pages, req->num_pages, false);
1618} 1618}
1619 1619
1620static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, 1620static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index d382db71e300..454c99fdb79d 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -371,8 +371,8 @@ void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
371 371
372extern void __free_pages(struct page *page, unsigned int order); 372extern void __free_pages(struct page *page, unsigned int order);
373extern void free_pages(unsigned long addr, unsigned int order); 373extern void free_pages(unsigned long addr, unsigned int order);
374extern void free_hot_cold_page(struct page *page, int cold); 374extern void free_hot_cold_page(struct page *page, bool cold);
375extern void free_hot_cold_page_list(struct list_head *list, int cold); 375extern void free_hot_cold_page_list(struct list_head *list, bool cold);
376 376
377extern void __free_kmem_pages(struct page *page, unsigned int order); 377extern void __free_kmem_pages(struct page *page, unsigned int order);
378extern void free_kmem_pages(unsigned long addr, unsigned int order); 378extern void free_kmem_pages(unsigned long addr, unsigned int order);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 718214c5584e..c16fb6d06e36 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -110,7 +110,7 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
110 110
111#define page_cache_get(page) get_page(page) 111#define page_cache_get(page) get_page(page)
112#define page_cache_release(page) put_page(page) 112#define page_cache_release(page) put_page(page)
113void release_pages(struct page **pages, int nr, int cold); 113void release_pages(struct page **pages, int nr, bool cold);
114 114
115/* 115/*
116 * speculatively take a reference to a page. 116 * speculatively take a reference to a page.
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 9155bcdcce12..97cf16164c46 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -477,7 +477,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
477#define free_page_and_swap_cache(page) \ 477#define free_page_and_swap_cache(page) \
478 page_cache_release(page) 478 page_cache_release(page)
479#define free_pages_and_swap_cache(pages, nr) \ 479#define free_pages_and_swap_cache(pages, nr) \
480 release_pages((pages), (nr), 0); 480 release_pages((pages), (nr), false);
481 481
482static inline void show_swap_cache_info(void) 482static inline void show_swap_cache_info(void)
483{ 483{
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 37ef1b87f1f3..09345ab7fb63 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1199,7 +1199,7 @@ retry_reserve:
1199 */ 1199 */
1200static int rmqueue_bulk(struct zone *zone, unsigned int order, 1200static int rmqueue_bulk(struct zone *zone, unsigned int order,
1201 unsigned long count, struct list_head *list, 1201 unsigned long count, struct list_head *list,
1202 int migratetype, int cold) 1202 int migratetype, bool cold)
1203{ 1203{
1204 int i; 1204 int i;
1205 1205
@@ -1218,7 +1218,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
1218 * merge IO requests if the physical pages are ordered 1218 * merge IO requests if the physical pages are ordered
1219 * properly. 1219 * properly.
1220 */ 1220 */
1221 if (likely(cold == 0)) 1221 if (likely(!cold))
1222 list_add(&page->lru, list); 1222 list_add(&page->lru, list);
1223 else 1223 else
1224 list_add_tail(&page->lru, list); 1224 list_add_tail(&page->lru, list);
@@ -1379,9 +1379,9 @@ void mark_free_pages(struct zone *zone)
1379 1379
1380/* 1380/*
1381 * Free a 0-order page 1381 * Free a 0-order page
1382 * cold == 1 ? free a cold page : free a hot page 1382 * cold == true ? free a cold page : free a hot page
1383 */ 1383 */
1384void free_hot_cold_page(struct page *page, int cold) 1384void free_hot_cold_page(struct page *page, bool cold)
1385{ 1385{
1386 struct zone *zone = page_zone(page); 1386 struct zone *zone = page_zone(page);
1387 struct per_cpu_pages *pcp; 1387 struct per_cpu_pages *pcp;
@@ -1413,10 +1413,10 @@ void free_hot_cold_page(struct page *page, int cold)
1413 } 1413 }
1414 1414
1415 pcp = &this_cpu_ptr(zone->pageset)->pcp; 1415 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1416 if (cold) 1416 if (!cold)
1417 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1418 else
1419 list_add(&page->lru, &pcp->lists[migratetype]); 1417 list_add(&page->lru, &pcp->lists[migratetype]);
1418 else
1419 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1420 pcp->count++; 1420 pcp->count++;
1421 if (pcp->count >= pcp->high) { 1421 if (pcp->count >= pcp->high) {
1422 unsigned long batch = ACCESS_ONCE(pcp->batch); 1422 unsigned long batch = ACCESS_ONCE(pcp->batch);
@@ -1431,7 +1431,7 @@ out:
1431/* 1431/*
1432 * Free a list of 0-order pages 1432 * Free a list of 0-order pages
1433 */ 1433 */
1434void free_hot_cold_page_list(struct list_head *list, int cold) 1434void free_hot_cold_page_list(struct list_head *list, bool cold)
1435{ 1435{
1436 struct page *page, *next; 1436 struct page *page, *next;
1437 1437
@@ -1548,7 +1548,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
1548{ 1548{
1549 unsigned long flags; 1549 unsigned long flags;
1550 struct page *page; 1550 struct page *page;
1551 int cold = !!(gfp_flags & __GFP_COLD); 1551 bool cold = ((gfp_flags & __GFP_COLD) != 0);
1552 1552
1553again: 1553again:
1554 if (likely(order == 0)) { 1554 if (likely(order == 0)) {
@@ -2823,7 +2823,7 @@ void __free_pages(struct page *page, unsigned int order)
2823{ 2823{
2824 if (put_page_testzero(page)) { 2824 if (put_page_testzero(page)) {
2825 if (order == 0) 2825 if (order == 0)
2826 free_hot_cold_page(page, 0); 2826 free_hot_cold_page(page, false);
2827 else 2827 else
2828 __free_pages_ok(page, order); 2828 __free_pages_ok(page, order);
2829 } 2829 }
diff --git a/mm/swap.c b/mm/swap.c
index c8d6df556ce6..11ebb9714f49 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -67,7 +67,7 @@ static void __page_cache_release(struct page *page)
67static void __put_single_page(struct page *page) 67static void __put_single_page(struct page *page)
68{ 68{
69 __page_cache_release(page); 69 __page_cache_release(page);
70 free_hot_cold_page(page, 0); 70 free_hot_cold_page(page, false);
71} 71}
72 72
73static void __put_compound_page(struct page *page) 73static void __put_compound_page(struct page *page)
@@ -860,7 +860,7 @@ void lru_add_drain_all(void)
860 * grabbed the page via the LRU. If it did, give up: shrink_inactive_list() 860 * grabbed the page via the LRU. If it did, give up: shrink_inactive_list()
861 * will free it. 861 * will free it.
862 */ 862 */
863void release_pages(struct page **pages, int nr, int cold) 863void release_pages(struct page **pages, int nr, bool cold)
864{ 864{
865 int i; 865 int i;
866 LIST_HEAD(pages_to_free); 866 LIST_HEAD(pages_to_free);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index e76ace30d436..2972eee184a4 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -270,7 +270,7 @@ void free_pages_and_swap_cache(struct page **pages, int nr)
270 270
271 for (i = 0; i < todo; i++) 271 for (i = 0; i < todo; i++)
272 free_swap_cache(pagep[i]); 272 free_swap_cache(pagep[i]);
273 release_pages(pagep, todo, 0); 273 release_pages(pagep, todo, false);
274 pagep += todo; 274 pagep += todo;
275 nr -= todo; 275 nr -= todo;
276 } 276 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9253e188000f..494cd632178c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1121,7 +1121,7 @@ keep:
1121 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); 1121 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1122 } 1122 }
1123 1123
1124 free_hot_cold_page_list(&free_pages, 1); 1124 free_hot_cold_page_list(&free_pages, true);
1125 1125
1126 list_splice(&ret_pages, page_list); 1126 list_splice(&ret_pages, page_list);
1127 count_vm_events(PGACTIVATE, pgactivate); 1127 count_vm_events(PGACTIVATE, pgactivate);
@@ -1532,7 +1532,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1532 1532
1533 spin_unlock_irq(&zone->lru_lock); 1533 spin_unlock_irq(&zone->lru_lock);
1534 1534
1535 free_hot_cold_page_list(&page_list, 1); 1535 free_hot_cold_page_list(&page_list, true);
1536 1536
1537 /* 1537 /*
1538 * If reclaim is isolating dirty pages under writeback, it implies 1538 * If reclaim is isolating dirty pages under writeback, it implies
@@ -1755,7 +1755,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
1755 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); 1755 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1756 spin_unlock_irq(&zone->lru_lock); 1756 spin_unlock_irq(&zone->lru_lock);
1757 1757
1758 free_hot_cold_page_list(&l_hold, 1); 1758 free_hot_cold_page_list(&l_hold, true);
1759} 1759}
1760 1760
1761#ifdef CONFIG_SWAP 1761#ifdef CONFIG_SWAP