summaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2018-08-22 00:52:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-22 13:52:43 -0400
commitfe5266d5d5948abc6e71cdabb98e3f5cba811205 (patch)
tree69d3ce4c5679a0449e3b5903b3340a2b2ac60036 /mm/swapfile.c
parent59d98bf3c2b9218ff4cdb2a70aa52fffedf1786c (diff)
mm/swapfile.c: replace some #ifdef with IS_ENABLED()
In mm/swapfile.c, THP (Transparent Huge Page) swap specific code is enclosed by #ifdef CONFIG_THP_SWAP/#endif to avoid code dilating when THP isn't enabled. But #ifdef/#endif in .c file hurt the code readability, so Dave suggested to use IS_ENABLED(CONFIG_THP_SWAP) instead and let compiler to do the dirty job for us. This has potential to remove some duplicated code too. From output of `size`, text data bss dec hex filename THP=y: 26269 2076 340 28685 700d mm/swapfile.o ifdef/endif: 24115 2028 340 26483 6773 mm/swapfile.o IS_ENABLED: 24179 2028 340 26547 67b3 mm/swapfile.o IS_ENABLED() based solution works quite well, almost as good as that of #ifdef/#endif. And from the diffstat, the removed lines are more than added lines. One #ifdef for split_swap_cluster() is kept. Because it is a public function with a stub implementation for CONFIG_THP_SWAP=n in swap.h. Link: http://lkml.kernel.org/r/20180720071845.17920-3-ying.huang@intel.com Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Suggested-and-acked-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Shaohua Li <shli@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@redhat.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c60
1 files changed, 20 insertions, 40 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 52a9dd9dab8e..618358ad464b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -868,7 +868,6 @@ no_page:
868 return n_ret; 868 return n_ret;
869} 869}
870 870
871#ifdef CONFIG_THP_SWAP
872static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot) 871static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
873{ 872{
874 unsigned long idx; 873 unsigned long idx;
@@ -876,6 +875,15 @@ static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
876 unsigned long offset, i; 875 unsigned long offset, i;
877 unsigned char *map; 876 unsigned char *map;
878 877
878 /*
879 * Should not even be attempting cluster allocations when huge
880 * page swap is disabled. Warn and fail the allocation.
881 */
882 if (!IS_ENABLED(CONFIG_THP_SWAP)) {
883 VM_WARN_ON_ONCE(1);
884 return 0;
885 }
886
879 if (cluster_list_empty(&si->free_clusters)) 887 if (cluster_list_empty(&si->free_clusters))
880 return 0; 888 return 0;
881 889
@@ -906,13 +914,6 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
906 unlock_cluster(ci); 914 unlock_cluster(ci);
907 swap_range_free(si, offset, SWAPFILE_CLUSTER); 915 swap_range_free(si, offset, SWAPFILE_CLUSTER);
908} 916}
909#else
910static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
911{
912 VM_WARN_ON_ONCE(1);
913 return 0;
914}
915#endif /* CONFIG_THP_SWAP */
916 917
917static unsigned long scan_swap_map(struct swap_info_struct *si, 918static unsigned long scan_swap_map(struct swap_info_struct *si,
918 unsigned char usage) 919 unsigned char usage)
@@ -1200,7 +1201,6 @@ static void swapcache_free(swp_entry_t entry)
1200 } 1201 }
1201} 1202}
1202 1203
1203#ifdef CONFIG_THP_SWAP
1204static void swapcache_free_cluster(swp_entry_t entry) 1204static void swapcache_free_cluster(swp_entry_t entry)
1205{ 1205{
1206 unsigned long offset = swp_offset(entry); 1206 unsigned long offset = swp_offset(entry);
@@ -1211,6 +1211,9 @@ static void swapcache_free_cluster(swp_entry_t entry)
1211 unsigned int i, free_entries = 0; 1211 unsigned int i, free_entries = 0;
1212 unsigned char val; 1212 unsigned char val;
1213 1213
1214 if (!IS_ENABLED(CONFIG_THP_SWAP))
1215 return;
1216
1214 si = _swap_info_get(entry); 1217 si = _swap_info_get(entry);
1215 if (!si) 1218 if (!si)
1216 return; 1219 return;
@@ -1246,6 +1249,7 @@ static void swapcache_free_cluster(swp_entry_t entry)
1246 } 1249 }
1247} 1250}
1248 1251
1252#ifdef CONFIG_THP_SWAP
1249int split_swap_cluster(swp_entry_t entry) 1253int split_swap_cluster(swp_entry_t entry)
1250{ 1254{
1251 struct swap_info_struct *si; 1255 struct swap_info_struct *si;
@@ -1260,11 +1264,7 @@ int split_swap_cluster(swp_entry_t entry)
1260 unlock_cluster(ci); 1264 unlock_cluster(ci);
1261 return 0; 1265 return 0;
1262} 1266}
1263#else 1267#endif
1264static inline void swapcache_free_cluster(swp_entry_t entry)
1265{
1266}
1267#endif /* CONFIG_THP_SWAP */
1268 1268
1269void put_swap_page(struct page *page, swp_entry_t entry) 1269void put_swap_page(struct page *page, swp_entry_t entry)
1270{ 1270{
@@ -1414,7 +1414,6 @@ out:
1414 return count; 1414 return count;
1415} 1415}
1416 1416
1417#ifdef CONFIG_THP_SWAP
1418static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, 1417static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1419 swp_entry_t entry) 1418 swp_entry_t entry)
1420{ 1419{
@@ -1425,6 +1424,9 @@ static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1425 int i; 1424 int i;
1426 bool ret = false; 1425 bool ret = false;
1427 1426
1427 if (!IS_ENABLED(CONFIG_THP_SWAP))
1428 return swap_swapcount(si, entry) != 0;
1429
1428 ci = lock_cluster_or_swap_info(si, offset); 1430 ci = lock_cluster_or_swap_info(si, offset);
1429 if (!ci || !cluster_is_huge(ci)) { 1431 if (!ci || !cluster_is_huge(ci)) {
1430 if (map[roffset] != SWAP_HAS_CACHE) 1432 if (map[roffset] != SWAP_HAS_CACHE)
@@ -1447,7 +1449,7 @@ static bool page_swapped(struct page *page)
1447 swp_entry_t entry; 1449 swp_entry_t entry;
1448 struct swap_info_struct *si; 1450 struct swap_info_struct *si;
1449 1451
1450 if (likely(!PageTransCompound(page))) 1452 if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page)))
1451 return page_swapcount(page) != 0; 1453 return page_swapcount(page) != 0;
1452 1454
1453 page = compound_head(page); 1455 page = compound_head(page);
@@ -1471,10 +1473,8 @@ static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
1471 /* hugetlbfs shouldn't call it */ 1473 /* hugetlbfs shouldn't call it */
1472 VM_BUG_ON_PAGE(PageHuge(page), page); 1474 VM_BUG_ON_PAGE(PageHuge(page), page);
1473 1475
1474 if (likely(!PageTransCompound(page))) { 1476 if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) {
1475 mapcount = atomic_read(&page->_mapcount) + 1; 1477 mapcount = page_trans_huge_mapcount(page, total_mapcount);
1476 if (total_mapcount)
1477 *total_mapcount = mapcount;
1478 if (PageSwapCache(page)) 1478 if (PageSwapCache(page))
1479 swapcount = page_swapcount(page); 1479 swapcount = page_swapcount(page);
1480 if (total_swapcount) 1480 if (total_swapcount)
@@ -1521,26 +1521,6 @@ static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
1521 1521
1522 return map_swapcount; 1522 return map_swapcount;
1523} 1523}
1524#else
1525#define swap_page_trans_huge_swapped(si, entry) swap_swapcount(si, entry)
1526#define page_swapped(page) (page_swapcount(page) != 0)
1527
1528static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
1529 int *total_swapcount)
1530{
1531 int mapcount, swapcount = 0;
1532
1533 /* hugetlbfs shouldn't call it */
1534 VM_BUG_ON_PAGE(PageHuge(page), page);
1535
1536 mapcount = page_trans_huge_mapcount(page, total_mapcount);
1537 if (PageSwapCache(page))
1538 swapcount = page_swapcount(page);
1539 if (total_swapcount)
1540 *total_swapcount = swapcount;
1541 return mapcount + swapcount;
1542}
1543#endif
1544 1524
1545/* 1525/*
1546 * We can write to an anon page without COW if there are no other references 1526 * We can write to an anon page without COW if there are no other references