aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 12:04:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 12:04:48 -0400
commit95211279c5ad00a317c98221d7e4365e02f20836 (patch)
tree2ddc8625378d2915b8c96392f3cf6663b705ed55 /mm/swapfile.c
parent5375871d432ae9fc581014ac117b96aaee3cd0c7 (diff)
parent12724850e8064f64b6223d26d78c0597c742c65a (diff)
Merge branch 'akpm' (Andrew's patch-bomb)
Merge first batch of patches from Andrew Morton: "A few misc things and all the MM queue" * emailed from Andrew Morton <akpm@linux-foundation.org>: (92 commits) memcg: avoid THP split in task migration thp: add HPAGE_PMD_* definitions for !CONFIG_TRANSPARENT_HUGEPAGE memcg: clean up existing move charge code mm/memcontrol.c: remove unnecessary 'break' in mem_cgroup_read() mm/memcontrol.c: remove redundant BUG_ON() in mem_cgroup_usage_unregister_event() mm/memcontrol.c: s/stealed/stolen/ memcg: fix performance of mem_cgroup_begin_update_page_stat() memcg: remove PCG_FILE_MAPPED memcg: use new logic for page stat accounting memcg: remove PCG_MOVE_LOCK flag from page_cgroup memcg: simplify move_account() check memcg: remove EXPORT_SYMBOL(mem_cgroup_update_page_stat) memcg: kill dead prev_priority stubs memcg: remove PCG_CACHE page_cgroup flag memcg: let css_get_next() rely upon rcu_read_lock() cgroup: revert ss_id_lock to spinlock idr: make idr_get_next() good for rcu_read_lock() memcg: remove unnecessary thp check in page stat accounting memcg: remove redundant returns memcg: enum lru_list lru ...
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c58
1 files changed, 2 insertions, 56 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6bf67ab6e469..dae42f380d6e 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
932 pmd = pmd_offset(pud, addr); 932 pmd = pmd_offset(pud, addr);
933 do { 933 do {
934 next = pmd_addr_end(addr, end); 934 next = pmd_addr_end(addr, end);
935 if (unlikely(pmd_trans_huge(*pmd))) 935 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
936 continue;
937 if (pmd_none_or_clear_bad(pmd))
938 continue; 936 continue;
939 ret = unuse_pte_range(vma, pmd, addr, next, entry, page); 937 ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
940 if (ret) 938 if (ret)
@@ -2107,7 +2105,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2107 p->flags |= SWP_SOLIDSTATE; 2105 p->flags |= SWP_SOLIDSTATE;
2108 p->cluster_next = 1 + (random32() % p->highest_bit); 2106 p->cluster_next = 1 + (random32() % p->highest_bit);
2109 } 2107 }
2110 if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD)) 2108 if ((swap_flags & SWAP_FLAG_DISCARD) && discard_swap(p) == 0)
2111 p->flags |= SWP_DISCARDABLE; 2109 p->flags |= SWP_DISCARDABLE;
2112 } 2110 }
2113 2111
@@ -2292,58 +2290,6 @@ int swapcache_prepare(swp_entry_t entry)
2292} 2290}
2293 2291
2294/* 2292/*
2295 * swap_lock prevents swap_map being freed. Don't grab an extra
2296 * reference on the swaphandle, it doesn't matter if it becomes unused.
2297 */
2298int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
2299{
2300 struct swap_info_struct *si;
2301 int our_page_cluster = page_cluster;
2302 pgoff_t target, toff;
2303 pgoff_t base, end;
2304 int nr_pages = 0;
2305
2306 if (!our_page_cluster) /* no readahead */
2307 return 0;
2308
2309 si = swap_info[swp_type(entry)];
2310 target = swp_offset(entry);
2311 base = (target >> our_page_cluster) << our_page_cluster;
2312 end = base + (1 << our_page_cluster);
2313 if (!base) /* first page is swap header */
2314 base++;
2315
2316 spin_lock(&swap_lock);
2317 if (end > si->max) /* don't go beyond end of map */
2318 end = si->max;
2319
2320 /* Count contiguous allocated slots above our target */
2321 for (toff = target; ++toff < end; nr_pages++) {
2322 /* Don't read in free or bad pages */
2323 if (!si->swap_map[toff])
2324 break;
2325 if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
2326 break;
2327 }
2328 /* Count contiguous allocated slots below our target */
2329 for (toff = target; --toff >= base; nr_pages++) {
2330 /* Don't read in free or bad pages */
2331 if (!si->swap_map[toff])
2332 break;
2333 if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
2334 break;
2335 }
2336 spin_unlock(&swap_lock);
2337
2338 /*
2339 * Indicate starting offset, and return number of pages to get:
2340 * if only 1, say 0, since there's then no readahead to be done.
2341 */
2342 *offset = ++toff;
2343 return nr_pages? ++nr_pages: 0;
2344}
2345
2346/*
2347 * add_swap_count_continuation - called when a swap count is duplicated 2293 * add_swap_count_continuation - called when a swap count is duplicated
2348 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's 2294 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
2349 * page of the original vmalloc'ed swap_map, to hold the continuation count 2295 * page of the original vmalloc'ed swap_map, to hold the continuation count