summaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2018-08-22 00:52:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-22 13:52:44 -0400
commita448f2d07f891ac65cc48017f17735ec73086bf0 (patch)
tree445cc44f37546b6428076b01d351cddf0274250a /mm/swapfile.c
parent33ee011e5656edef6a58952006c486d342b7bbb5 (diff)
mm/swapfile.c: unify normal/huge code path in put_swap_page()
In this patch, the normal/huge code path in put_swap_page() and several helper functions are unified to avoid duplicated code, bugs, etc. and make it easier to review the code. The removed lines are more than added lines. And the binary size is kept exactly same when CONFIG_TRANSPARENT_HUGEPAGE=n. Link: http://lkml.kernel.org/r/20180720071845.17920-6-ying.huang@intel.com Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Suggested-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Shaohua Li <shli@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@redhat.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c83
1 files changed, 37 insertions, 46 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 3c1172b53436..043645e7f0b5 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -204,8 +204,16 @@ static void discard_swap_cluster(struct swap_info_struct *si,
204 204
205#ifdef CONFIG_THP_SWAP 205#ifdef CONFIG_THP_SWAP
206#define SWAPFILE_CLUSTER HPAGE_PMD_NR 206#define SWAPFILE_CLUSTER HPAGE_PMD_NR
207
208#define swap_entry_size(size) (size)
207#else 209#else
208#define SWAPFILE_CLUSTER 256 210#define SWAPFILE_CLUSTER 256
211
212/*
213 * Define swap_entry_size() as constant to let compiler to optimize
214 * out some code if !CONFIG_THP_SWAP
215 */
216#define swap_entry_size(size) 1
209#endif 217#endif
210#define LATENCY_LIMIT 256 218#define LATENCY_LIMIT 256
211 219
@@ -1192,18 +1200,7 @@ void swap_free(swp_entry_t entry)
1192/* 1200/*
1193 * Called after dropping swapcache to decrease refcnt to swap entries. 1201 * Called after dropping swapcache to decrease refcnt to swap entries.
1194 */ 1202 */
1195static void swapcache_free(swp_entry_t entry) 1203void put_swap_page(struct page *page, swp_entry_t entry)
1196{
1197 struct swap_info_struct *p;
1198
1199 p = _swap_info_get(entry);
1200 if (p) {
1201 if (!__swap_entry_free(p, entry, SWAP_HAS_CACHE))
1202 free_swap_slot(entry);
1203 }
1204}
1205
1206static void swapcache_free_cluster(swp_entry_t entry)
1207{ 1204{
1208 unsigned long offset = swp_offset(entry); 1205 unsigned long offset = swp_offset(entry);
1209 unsigned long idx = offset / SWAPFILE_CLUSTER; 1206 unsigned long idx = offset / SWAPFILE_CLUSTER;
@@ -1212,39 +1209,41 @@ static void swapcache_free_cluster(swp_entry_t entry)
1212 unsigned char *map; 1209 unsigned char *map;
1213 unsigned int i, free_entries = 0; 1210 unsigned int i, free_entries = 0;
1214 unsigned char val; 1211 unsigned char val;
1215 1212 int size = swap_entry_size(hpage_nr_pages(page));
1216 if (!IS_ENABLED(CONFIG_THP_SWAP))
1217 return;
1218 1213
1219 si = _swap_info_get(entry); 1214 si = _swap_info_get(entry);
1220 if (!si) 1215 if (!si)
1221 return; 1216 return;
1222 1217
1223 ci = lock_cluster(si, offset); 1218 if (size == SWAPFILE_CLUSTER) {
1224 VM_BUG_ON(!cluster_is_huge(ci));
1225 map = si->swap_map + offset;
1226 for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1227 val = map[i];
1228 VM_BUG_ON(!(val & SWAP_HAS_CACHE));
1229 if (val == SWAP_HAS_CACHE)
1230 free_entries++;
1231 }
1232 if (!free_entries) {
1233 for (i = 0; i < SWAPFILE_CLUSTER; i++)
1234 map[i] &= ~SWAP_HAS_CACHE;
1235 }
1236 cluster_clear_huge(ci);
1237 unlock_cluster(ci);
1238 if (free_entries == SWAPFILE_CLUSTER) {
1239 spin_lock(&si->lock);
1240 ci = lock_cluster(si, offset); 1219 ci = lock_cluster(si, offset);
1241 memset(map, 0, SWAPFILE_CLUSTER); 1220 VM_BUG_ON(!cluster_is_huge(ci));
1221 map = si->swap_map + offset;
1222 for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1223 val = map[i];
1224 VM_BUG_ON(!(val & SWAP_HAS_CACHE));
1225 if (val == SWAP_HAS_CACHE)
1226 free_entries++;
1227 }
1228 if (!free_entries) {
1229 for (i = 0; i < SWAPFILE_CLUSTER; i++)
1230 map[i] &= ~SWAP_HAS_CACHE;
1231 }
1232 cluster_clear_huge(ci);
1242 unlock_cluster(ci); 1233 unlock_cluster(ci);
1243 mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER); 1234 if (free_entries == SWAPFILE_CLUSTER) {
1244 swap_free_cluster(si, idx); 1235 spin_lock(&si->lock);
1245 spin_unlock(&si->lock); 1236 ci = lock_cluster(si, offset);
1246 } else if (free_entries) { 1237 memset(map, 0, SWAPFILE_CLUSTER);
1247 for (i = 0; i < SWAPFILE_CLUSTER; i++, entry.val++) { 1238 unlock_cluster(ci);
1239 mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
1240 swap_free_cluster(si, idx);
1241 spin_unlock(&si->lock);
1242 return;
1243 }
1244 }
1245 if (size == 1 || free_entries) {
1246 for (i = 0; i < size; i++, entry.val++) {
1248 if (!__swap_entry_free(si, entry, SWAP_HAS_CACHE)) 1247 if (!__swap_entry_free(si, entry, SWAP_HAS_CACHE))
1249 free_swap_slot(entry); 1248 free_swap_slot(entry);
1250 } 1249 }
@@ -1268,14 +1267,6 @@ int split_swap_cluster(swp_entry_t entry)
1268} 1267}
1269#endif 1268#endif
1270 1269
1271void put_swap_page(struct page *page, swp_entry_t entry)
1272{
1273 if (!PageTransHuge(page))
1274 swapcache_free(entry);
1275 else
1276 swapcache_free_cluster(entry);
1277}
1278
1279static int swp_entry_cmp(const void *ent1, const void *ent2) 1270static int swp_entry_cmp(const void *ent1, const void *ent2)
1280{ 1271{
1281 const swp_entry_t *e1 = ent1, *e2 = ent2; 1272 const swp_entry_t *e1 = ent1, *e2 = ent2;