aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/kmemleak.c5
-rw-r--r--mm/ksm.c10
-rw-r--r--mm/memcontrol.c127
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/swapfile.c12
-rw-r--r--mm/vmalloc.c50
7 files changed, 104 insertions, 108 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index edd300aca173..57963c6063d1 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -224,7 +224,9 @@ config KSM
224 the many instances by a single resident page with that content, so 224 the many instances by a single resident page with that content, so
225 saving memory until one or another app needs to modify the content. 225 saving memory until one or another app needs to modify the content.
226 Recommended for use with KVM, or with other duplicative applications. 226 Recommended for use with KVM, or with other duplicative applications.
227 See Documentation/vm/ksm.txt for more information. 227 See Documentation/vm/ksm.txt for more information: KSM is inactive
228 until a program has madvised that an area is MADV_MERGEABLE, and
229 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
228 230
229config DEFAULT_MMAP_MIN_ADDR 231config DEFAULT_MMAP_MIN_ADDR
230 int "Low address space to protect from user allocation" 232 int "Low address space to protect from user allocation"
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 4ea4510e2996..8bf765c4f58d 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -833,12 +833,15 @@ static void early_alloc(struct early_log *log)
833 */ 833 */
834 rcu_read_lock(); 834 rcu_read_lock();
835 object = create_object((unsigned long)log->ptr, log->size, 835 object = create_object((unsigned long)log->ptr, log->size,
836 log->min_count, GFP_KERNEL); 836 log->min_count, GFP_ATOMIC);
837 if (!object)
838 goto out;
837 spin_lock_irqsave(&object->lock, flags); 839 spin_lock_irqsave(&object->lock, flags);
838 for (i = 0; i < log->trace_len; i++) 840 for (i = 0; i < log->trace_len; i++)
839 object->trace[i] = log->trace[i]; 841 object->trace[i] = log->trace[i];
840 object->trace_len = log->trace_len; 842 object->trace_len = log->trace_len;
841 spin_unlock_irqrestore(&object->lock, flags); 843 spin_unlock_irqrestore(&object->lock, flags);
844out:
842 rcu_read_unlock(); 845 rcu_read_unlock();
843} 846}
844 847
diff --git a/mm/ksm.c b/mm/ksm.c
index f7edac356f46..bef1af4f77e3 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -184,11 +184,6 @@ static DEFINE_SPINLOCK(ksm_mmlist_lock);
184 sizeof(struct __struct), __alignof__(struct __struct),\ 184 sizeof(struct __struct), __alignof__(struct __struct),\
185 (__flags), NULL) 185 (__flags), NULL)
186 186
187static void __init ksm_init_max_kernel_pages(void)
188{
189 ksm_max_kernel_pages = nr_free_buffer_pages() / 4;
190}
191
192static int __init ksm_slab_init(void) 187static int __init ksm_slab_init(void)
193{ 188{
194 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); 189 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
@@ -1673,7 +1668,7 @@ static int __init ksm_init(void)
1673 struct task_struct *ksm_thread; 1668 struct task_struct *ksm_thread;
1674 int err; 1669 int err;
1675 1670
1676 ksm_init_max_kernel_pages(); 1671 ksm_max_kernel_pages = totalram_pages / 4;
1677 1672
1678 err = ksm_slab_init(); 1673 err = ksm_slab_init();
1679 if (err) 1674 if (err)
@@ -1697,6 +1692,9 @@ static int __init ksm_init(void)
1697 kthread_stop(ksm_thread); 1692 kthread_stop(ksm_thread);
1698 goto out_free2; 1693 goto out_free2;
1699 } 1694 }
1695#else
1696 ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
1697
1700#endif /* CONFIG_SYSFS */ 1698#endif /* CONFIG_SYSFS */
1701 1699
1702 return 0; 1700 return 0;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e2b98a6875c0..f99f5991d6bb 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -313,7 +313,8 @@ soft_limit_tree_from_page(struct page *page)
313static void 313static void
314__mem_cgroup_insert_exceeded(struct mem_cgroup *mem, 314__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
315 struct mem_cgroup_per_zone *mz, 315 struct mem_cgroup_per_zone *mz,
316 struct mem_cgroup_tree_per_zone *mctz) 316 struct mem_cgroup_tree_per_zone *mctz,
317 unsigned long long new_usage_in_excess)
317{ 318{
318 struct rb_node **p = &mctz->rb_root.rb_node; 319 struct rb_node **p = &mctz->rb_root.rb_node;
319 struct rb_node *parent = NULL; 320 struct rb_node *parent = NULL;
@@ -322,7 +323,9 @@ __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
322 if (mz->on_tree) 323 if (mz->on_tree)
323 return; 324 return;
324 325
325 mz->usage_in_excess = res_counter_soft_limit_excess(&mem->res); 326 mz->usage_in_excess = new_usage_in_excess;
327 if (!mz->usage_in_excess)
328 return;
326 while (*p) { 329 while (*p) {
327 parent = *p; 330 parent = *p;
328 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, 331 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
@@ -353,16 +356,6 @@ __mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
353} 356}
354 357
355static void 358static void
356mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
357 struct mem_cgroup_per_zone *mz,
358 struct mem_cgroup_tree_per_zone *mctz)
359{
360 spin_lock(&mctz->lock);
361 __mem_cgroup_insert_exceeded(mem, mz, mctz);
362 spin_unlock(&mctz->lock);
363}
364
365static void
366mem_cgroup_remove_exceeded(struct mem_cgroup *mem, 359mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
367 struct mem_cgroup_per_zone *mz, 360 struct mem_cgroup_per_zone *mz,
368 struct mem_cgroup_tree_per_zone *mctz) 361 struct mem_cgroup_tree_per_zone *mctz)
@@ -392,34 +385,36 @@ static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
392 385
393static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) 386static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
394{ 387{
395 unsigned long long prev_usage_in_excess, new_usage_in_excess; 388 unsigned long long excess;
396 bool updated_tree = false;
397 struct mem_cgroup_per_zone *mz; 389 struct mem_cgroup_per_zone *mz;
398 struct mem_cgroup_tree_per_zone *mctz; 390 struct mem_cgroup_tree_per_zone *mctz;
399 391 int nid = page_to_nid(page);
400 mz = mem_cgroup_zoneinfo(mem, page_to_nid(page), page_zonenum(page)); 392 int zid = page_zonenum(page);
401 mctz = soft_limit_tree_from_page(page); 393 mctz = soft_limit_tree_from_page(page);
402 394
403 /* 395 /*
404 * We do updates in lazy mode, mem's are removed 396 * Necessary to update all ancestors when hierarchy is used.
405 * lazily from the per-zone, per-node rb tree 397 * because their event counter is not touched.
406 */ 398 */
407 prev_usage_in_excess = mz->usage_in_excess; 399 for (; mem; mem = parent_mem_cgroup(mem)) {
408 400 mz = mem_cgroup_zoneinfo(mem, nid, zid);
409 new_usage_in_excess = res_counter_soft_limit_excess(&mem->res); 401 excess = res_counter_soft_limit_excess(&mem->res);
410 if (prev_usage_in_excess) { 402 /*
411 mem_cgroup_remove_exceeded(mem, mz, mctz); 403 * We have to update the tree if mz is on RB-tree or
412 updated_tree = true; 404 * mem is over its softlimit.
413 } 405 */
414 if (!new_usage_in_excess) 406 if (excess || mz->on_tree) {
415 goto done; 407 spin_lock(&mctz->lock);
416 mem_cgroup_insert_exceeded(mem, mz, mctz); 408 /* if on-tree, remove it */
417 409 if (mz->on_tree)
418done: 410 __mem_cgroup_remove_exceeded(mem, mz, mctz);
419 if (updated_tree) { 411 /*
420 spin_lock(&mctz->lock); 412 * Insert again. mz->usage_in_excess will be updated.
421 mz->usage_in_excess = new_usage_in_excess; 413 * If excess is 0, no tree ops.
422 spin_unlock(&mctz->lock); 414 */
415 __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
416 spin_unlock(&mctz->lock);
417 }
423 } 418 }
424} 419}
425 420
@@ -447,9 +442,10 @@ static struct mem_cgroup_per_zone *
447__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 442__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
448{ 443{
449 struct rb_node *rightmost = NULL; 444 struct rb_node *rightmost = NULL;
450 struct mem_cgroup_per_zone *mz = NULL; 445 struct mem_cgroup_per_zone *mz;
451 446
452retry: 447retry:
448 mz = NULL;
453 rightmost = rb_last(&mctz->rb_root); 449 rightmost = rb_last(&mctz->rb_root);
454 if (!rightmost) 450 if (!rightmost)
455 goto done; /* Nothing to reclaim from */ 451 goto done; /* Nothing to reclaim from */
@@ -1270,9 +1266,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1270 gfp_t gfp_mask, struct mem_cgroup **memcg, 1266 gfp_t gfp_mask, struct mem_cgroup **memcg,
1271 bool oom, struct page *page) 1267 bool oom, struct page *page)
1272{ 1268{
1273 struct mem_cgroup *mem, *mem_over_limit, *mem_over_soft_limit; 1269 struct mem_cgroup *mem, *mem_over_limit;
1274 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 1270 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1275 struct res_counter *fail_res, *soft_fail_res = NULL; 1271 struct res_counter *fail_res;
1276 1272
1277 if (unlikely(test_thread_flag(TIF_MEMDIE))) { 1273 if (unlikely(test_thread_flag(TIF_MEMDIE))) {
1278 /* Don't account this! */ 1274 /* Don't account this! */
@@ -1304,17 +1300,16 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1304 1300
1305 if (mem_cgroup_is_root(mem)) 1301 if (mem_cgroup_is_root(mem))
1306 goto done; 1302 goto done;
1307 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res, 1303 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
1308 &soft_fail_res);
1309 if (likely(!ret)) { 1304 if (likely(!ret)) {
1310 if (!do_swap_account) 1305 if (!do_swap_account)
1311 break; 1306 break;
1312 ret = res_counter_charge(&mem->memsw, PAGE_SIZE, 1307 ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
1313 &fail_res, NULL); 1308 &fail_res);
1314 if (likely(!ret)) 1309 if (likely(!ret))
1315 break; 1310 break;
1316 /* mem+swap counter fails */ 1311 /* mem+swap counter fails */
1317 res_counter_uncharge(&mem->res, PAGE_SIZE, NULL); 1312 res_counter_uncharge(&mem->res, PAGE_SIZE);
1318 flags |= MEM_CGROUP_RECLAIM_NOSWAP; 1313 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
1319 mem_over_limit = mem_cgroup_from_res_counter(fail_res, 1314 mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1320 memsw); 1315 memsw);
@@ -1353,16 +1348,11 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1353 } 1348 }
1354 } 1349 }
1355 /* 1350 /*
1356 * Insert just the ancestor, we should trickle down to the correct 1351 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
1357 * cgroup for reclaim, since the other nodes will be below their 1352 * if they exceeds softlimit.
1358 * soft limit
1359 */ 1353 */
1360 if (soft_fail_res) { 1354 if (mem_cgroup_soft_limit_check(mem))
1361 mem_over_soft_limit = 1355 mem_cgroup_update_tree(mem, page);
1362 mem_cgroup_from_res_counter(soft_fail_res, res);
1363 if (mem_cgroup_soft_limit_check(mem_over_soft_limit))
1364 mem_cgroup_update_tree(mem_over_soft_limit, page);
1365 }
1366done: 1356done:
1367 return 0; 1357 return 0;
1368nomem: 1358nomem:
@@ -1437,10 +1427,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1437 if (unlikely(PageCgroupUsed(pc))) { 1427 if (unlikely(PageCgroupUsed(pc))) {
1438 unlock_page_cgroup(pc); 1428 unlock_page_cgroup(pc);
1439 if (!mem_cgroup_is_root(mem)) { 1429 if (!mem_cgroup_is_root(mem)) {
1440 res_counter_uncharge(&mem->res, PAGE_SIZE, NULL); 1430 res_counter_uncharge(&mem->res, PAGE_SIZE);
1441 if (do_swap_account) 1431 if (do_swap_account)
1442 res_counter_uncharge(&mem->memsw, PAGE_SIZE, 1432 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1443 NULL);
1444 } 1433 }
1445 css_put(&mem->css); 1434 css_put(&mem->css);
1446 return; 1435 return;
@@ -1519,7 +1508,7 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
1519 goto out; 1508 goto out;
1520 1509
1521 if (!mem_cgroup_is_root(from)) 1510 if (!mem_cgroup_is_root(from))
1522 res_counter_uncharge(&from->res, PAGE_SIZE, NULL); 1511 res_counter_uncharge(&from->res, PAGE_SIZE);
1523 mem_cgroup_charge_statistics(from, pc, false); 1512 mem_cgroup_charge_statistics(from, pc, false);
1524 1513
1525 page = pc->page; 1514 page = pc->page;
@@ -1539,7 +1528,7 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
1539 } 1528 }
1540 1529
1541 if (do_swap_account && !mem_cgroup_is_root(from)) 1530 if (do_swap_account && !mem_cgroup_is_root(from))
1542 res_counter_uncharge(&from->memsw, PAGE_SIZE, NULL); 1531 res_counter_uncharge(&from->memsw, PAGE_SIZE);
1543 css_put(&from->css); 1532 css_put(&from->css);
1544 1533
1545 css_get(&to->css); 1534 css_get(&to->css);
@@ -1610,9 +1599,9 @@ uncharge:
1610 css_put(&parent->css); 1599 css_put(&parent->css);
1611 /* uncharge if move fails */ 1600 /* uncharge if move fails */
1612 if (!mem_cgroup_is_root(parent)) { 1601 if (!mem_cgroup_is_root(parent)) {
1613 res_counter_uncharge(&parent->res, PAGE_SIZE, NULL); 1602 res_counter_uncharge(&parent->res, PAGE_SIZE);
1614 if (do_swap_account) 1603 if (do_swap_account)
1615 res_counter_uncharge(&parent->memsw, PAGE_SIZE, NULL); 1604 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
1616 } 1605 }
1617 return ret; 1606 return ret;
1618} 1607}
@@ -1803,8 +1792,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
1803 * calling css_tryget 1792 * calling css_tryget
1804 */ 1793 */
1805 if (!mem_cgroup_is_root(memcg)) 1794 if (!mem_cgroup_is_root(memcg))
1806 res_counter_uncharge(&memcg->memsw, PAGE_SIZE, 1795 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1807 NULL);
1808 mem_cgroup_swap_statistics(memcg, false); 1796 mem_cgroup_swap_statistics(memcg, false);
1809 mem_cgroup_put(memcg); 1797 mem_cgroup_put(memcg);
1810 } 1798 }
@@ -1831,9 +1819,9 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1831 if (!mem) 1819 if (!mem)
1832 return; 1820 return;
1833 if (!mem_cgroup_is_root(mem)) { 1821 if (!mem_cgroup_is_root(mem)) {
1834 res_counter_uncharge(&mem->res, PAGE_SIZE, NULL); 1822 res_counter_uncharge(&mem->res, PAGE_SIZE);
1835 if (do_swap_account) 1823 if (do_swap_account)
1836 res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL); 1824 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1837 } 1825 }
1838 css_put(&mem->css); 1826 css_put(&mem->css);
1839} 1827}
@@ -1848,7 +1836,6 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1848 struct page_cgroup *pc; 1836 struct page_cgroup *pc;
1849 struct mem_cgroup *mem = NULL; 1837 struct mem_cgroup *mem = NULL;
1850 struct mem_cgroup_per_zone *mz; 1838 struct mem_cgroup_per_zone *mz;
1851 bool soft_limit_excess = false;
1852 1839
1853 if (mem_cgroup_disabled()) 1840 if (mem_cgroup_disabled())
1854 return NULL; 1841 return NULL;
@@ -1888,10 +1875,10 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1888 } 1875 }
1889 1876
1890 if (!mem_cgroup_is_root(mem)) { 1877 if (!mem_cgroup_is_root(mem)) {
1891 res_counter_uncharge(&mem->res, PAGE_SIZE, &soft_limit_excess); 1878 res_counter_uncharge(&mem->res, PAGE_SIZE);
1892 if (do_swap_account && 1879 if (do_swap_account &&
1893 (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)) 1880 (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1894 res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL); 1881 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1895 } 1882 }
1896 if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 1883 if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1897 mem_cgroup_swap_statistics(mem, true); 1884 mem_cgroup_swap_statistics(mem, true);
@@ -1908,7 +1895,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1908 mz = page_cgroup_zoneinfo(pc); 1895 mz = page_cgroup_zoneinfo(pc);
1909 unlock_page_cgroup(pc); 1896 unlock_page_cgroup(pc);
1910 1897
1911 if (soft_limit_excess && mem_cgroup_soft_limit_check(mem)) 1898 if (mem_cgroup_soft_limit_check(mem))
1912 mem_cgroup_update_tree(mem, page); 1899 mem_cgroup_update_tree(mem, page);
1913 /* at swapout, this memcg will be accessed to record to swap */ 1900 /* at swapout, this memcg will be accessed to record to swap */
1914 if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 1901 if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
@@ -1986,7 +1973,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
1986 * This memcg can be obsolete one. We avoid calling css_tryget 1973 * This memcg can be obsolete one. We avoid calling css_tryget
1987 */ 1974 */
1988 if (!mem_cgroup_is_root(memcg)) 1975 if (!mem_cgroup_is_root(memcg))
1989 res_counter_uncharge(&memcg->memsw, PAGE_SIZE, NULL); 1976 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1990 mem_cgroup_swap_statistics(memcg, false); 1977 mem_cgroup_swap_statistics(memcg, false);
1991 mem_cgroup_put(memcg); 1978 mem_cgroup_put(memcg);
1992 } 1979 }
@@ -2233,6 +2220,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2233 unsigned long reclaimed; 2220 unsigned long reclaimed;
2234 int loop = 0; 2221 int loop = 0;
2235 struct mem_cgroup_tree_per_zone *mctz; 2222 struct mem_cgroup_tree_per_zone *mctz;
2223 unsigned long long excess;
2236 2224
2237 if (order > 0) 2225 if (order > 0)
2238 return 0; 2226 return 0;
@@ -2284,9 +2272,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2284 break; 2272 break;
2285 } while (1); 2273 } while (1);
2286 } 2274 }
2287 mz->usage_in_excess =
2288 res_counter_soft_limit_excess(&mz->mem->res);
2289 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); 2275 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
2276 excess = res_counter_soft_limit_excess(&mz->mem->res);
2290 /* 2277 /*
2291 * One school of thought says that we should not add 2278 * One school of thought says that we should not add
2292 * back the node to the tree if reclaim returns 0. 2279 * back the node to the tree if reclaim returns 0.
@@ -2295,8 +2282,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2295 * memory to reclaim from. Consider this as a longer 2282 * memory to reclaim from. Consider this as a longer
2296 * term TODO. 2283 * term TODO.
2297 */ 2284 */
2298 if (mz->usage_in_excess) 2285 /* If excess == 0, no tree ops */
2299 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz); 2286 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
2300 spin_unlock(&mctz->lock); 2287 spin_unlock(&mctz->lock);
2301 css_put(&mz->mem->css); 2288 css_put(&mz->mem->css);
2302 loop++; 2289 loop++;
diff --git a/mm/rmap.c b/mm/rmap.c
index 28aafe2b5306..dd43373a483f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -242,8 +242,8 @@ vma_address(struct page *page, struct vm_area_struct *vma)
242} 242}
243 243
244/* 244/*
245 * At what user virtual address is page expected in vma? checking that the 245 * At what user virtual address is page expected in vma?
246 * page matches the vma: currently only used on anon pages, by unuse_vma; 246 * checking that the page matches the vma.
247 */ 247 */
248unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 248unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
249{ 249{
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 4de7f02f820b..a1bc6b9af9a2 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1974,12 +1974,14 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
1974 goto bad_swap; 1974 goto bad_swap;
1975 } 1975 }
1976 1976
1977 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { 1977 if (p->bdev) {
1978 p->flags |= SWP_SOLIDSTATE; 1978 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
1979 p->cluster_next = 1 + (random32() % p->highest_bit); 1979 p->flags |= SWP_SOLIDSTATE;
1980 p->cluster_next = 1 + (random32() % p->highest_bit);
1981 }
1982 if (discard_swap(p) == 0)
1983 p->flags |= SWP_DISCARDABLE;
1980 } 1984 }
1981 if (discard_swap(p) == 0)
1982 p->flags |= SWP_DISCARDABLE;
1983 1985
1984 mutex_lock(&swapon_mutex); 1986 mutex_lock(&swapon_mutex);
1985 spin_lock(&swap_lock); 1987 spin_lock(&swap_lock);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 69511e663234..0f551a4a44cd 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -12,6 +12,7 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/highmem.h> 14#include <linux/highmem.h>
15#include <linux/sched.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <linux/spinlock.h> 17#include <linux/spinlock.h>
17#include <linux/interrupt.h> 18#include <linux/interrupt.h>
@@ -25,10 +26,10 @@
25#include <linux/rcupdate.h> 26#include <linux/rcupdate.h>
26#include <linux/pfn.h> 27#include <linux/pfn.h>
27#include <linux/kmemleak.h> 28#include <linux/kmemleak.h>
28#include <linux/highmem.h>
29#include <asm/atomic.h> 29#include <asm/atomic.h>
30#include <asm/uaccess.h> 30#include <asm/uaccess.h>
31#include <asm/tlbflush.h> 31#include <asm/tlbflush.h>
32#include <asm/shmparam.h>
32 33
33 34
34/*** Page table manipulation functions ***/ 35/*** Page table manipulation functions ***/
@@ -1156,12 +1157,11 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1156} 1157}
1157 1158
1158static struct vm_struct *__get_vm_area_node(unsigned long size, 1159static struct vm_struct *__get_vm_area_node(unsigned long size,
1159 unsigned long flags, unsigned long start, unsigned long end, 1160 unsigned long align, unsigned long flags, unsigned long start,
1160 int node, gfp_t gfp_mask, void *caller) 1161 unsigned long end, int node, gfp_t gfp_mask, void *caller)
1161{ 1162{
1162 static struct vmap_area *va; 1163 static struct vmap_area *va;
1163 struct vm_struct *area; 1164 struct vm_struct *area;
1164 unsigned long align = 1;
1165 1165
1166 BUG_ON(in_interrupt()); 1166 BUG_ON(in_interrupt());
1167 if (flags & VM_IOREMAP) { 1167 if (flags & VM_IOREMAP) {
@@ -1201,7 +1201,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
1201struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1201struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1202 unsigned long start, unsigned long end) 1202 unsigned long start, unsigned long end)
1203{ 1203{
1204 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, 1204 return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
1205 __builtin_return_address(0)); 1205 __builtin_return_address(0));
1206} 1206}
1207EXPORT_SYMBOL_GPL(__get_vm_area); 1207EXPORT_SYMBOL_GPL(__get_vm_area);
@@ -1210,7 +1210,7 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1210 unsigned long start, unsigned long end, 1210 unsigned long start, unsigned long end,
1211 void *caller) 1211 void *caller)
1212{ 1212{
1213 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, 1213 return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
1214 caller); 1214 caller);
1215} 1215}
1216 1216
@@ -1225,22 +1225,22 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1225 */ 1225 */
1226struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 1226struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1227{ 1227{
1228 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 1228 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1229 -1, GFP_KERNEL, __builtin_return_address(0)); 1229 -1, GFP_KERNEL, __builtin_return_address(0));
1230} 1230}
1231 1231
1232struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 1232struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1233 void *caller) 1233 void *caller)
1234{ 1234{
1235 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 1235 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1236 -1, GFP_KERNEL, caller); 1236 -1, GFP_KERNEL, caller);
1237} 1237}
1238 1238
1239struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, 1239struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
1240 int node, gfp_t gfp_mask) 1240 int node, gfp_t gfp_mask)
1241{ 1241{
1242 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, 1242 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1243 gfp_mask, __builtin_return_address(0)); 1243 node, gfp_mask, __builtin_return_address(0));
1244} 1244}
1245 1245
1246static struct vm_struct *find_vm_area(const void *addr) 1246static struct vm_struct *find_vm_area(const void *addr)
@@ -1403,7 +1403,8 @@ void *vmap(struct page **pages, unsigned int count,
1403} 1403}
1404EXPORT_SYMBOL(vmap); 1404EXPORT_SYMBOL(vmap);
1405 1405
1406static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 1406static void *__vmalloc_node(unsigned long size, unsigned long align,
1407 gfp_t gfp_mask, pgprot_t prot,
1407 int node, void *caller); 1408 int node, void *caller);
1408static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 1409static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1409 pgprot_t prot, int node, void *caller) 1410 pgprot_t prot, int node, void *caller)
@@ -1417,7 +1418,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1417 area->nr_pages = nr_pages; 1418 area->nr_pages = nr_pages;
1418 /* Please note that the recursion is strictly bounded. */ 1419 /* Please note that the recursion is strictly bounded. */
1419 if (array_size > PAGE_SIZE) { 1420 if (array_size > PAGE_SIZE) {
1420 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, 1421 pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO,
1421 PAGE_KERNEL, node, caller); 1422 PAGE_KERNEL, node, caller);
1422 area->flags |= VM_VPAGES; 1423 area->flags |= VM_VPAGES;
1423 } else { 1424 } else {
@@ -1476,6 +1477,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1476/** 1477/**
1477 * __vmalloc_node - allocate virtually contiguous memory 1478 * __vmalloc_node - allocate virtually contiguous memory
1478 * @size: allocation size 1479 * @size: allocation size
1480 * @align: desired alignment
1479 * @gfp_mask: flags for the page level allocator 1481 * @gfp_mask: flags for the page level allocator
1480 * @prot: protection mask for the allocated pages 1482 * @prot: protection mask for the allocated pages
1481 * @node: node to use for allocation or -1 1483 * @node: node to use for allocation or -1
@@ -1485,8 +1487,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1485 * allocator with @gfp_mask flags. Map them into contiguous 1487 * allocator with @gfp_mask flags. Map them into contiguous
1486 * kernel virtual space, using a pagetable protection of @prot. 1488 * kernel virtual space, using a pagetable protection of @prot.
1487 */ 1489 */
1488static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 1490static void *__vmalloc_node(unsigned long size, unsigned long align,
1489 int node, void *caller) 1491 gfp_t gfp_mask, pgprot_t prot,
1492 int node, void *caller)
1490{ 1493{
1491 struct vm_struct *area; 1494 struct vm_struct *area;
1492 void *addr; 1495 void *addr;
@@ -1496,8 +1499,8 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
1496 if (!size || (size >> PAGE_SHIFT) > totalram_pages) 1499 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1497 return NULL; 1500 return NULL;
1498 1501
1499 area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, 1502 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
1500 node, gfp_mask, caller); 1503 VMALLOC_END, node, gfp_mask, caller);
1501 1504
1502 if (!area) 1505 if (!area)
1503 return NULL; 1506 return NULL;
@@ -1516,7 +1519,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
1516 1519
1517void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1520void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1518{ 1521{
1519 return __vmalloc_node(size, gfp_mask, prot, -1, 1522 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
1520 __builtin_return_address(0)); 1523 __builtin_return_address(0));
1521} 1524}
1522EXPORT_SYMBOL(__vmalloc); 1525EXPORT_SYMBOL(__vmalloc);
@@ -1532,7 +1535,7 @@ EXPORT_SYMBOL(__vmalloc);
1532 */ 1535 */
1533void *vmalloc(unsigned long size) 1536void *vmalloc(unsigned long size)
1534{ 1537{
1535 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1538 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1536 -1, __builtin_return_address(0)); 1539 -1, __builtin_return_address(0));
1537} 1540}
1538EXPORT_SYMBOL(vmalloc); 1541EXPORT_SYMBOL(vmalloc);
@@ -1549,7 +1552,8 @@ void *vmalloc_user(unsigned long size)
1549 struct vm_struct *area; 1552 struct vm_struct *area;
1550 void *ret; 1553 void *ret;
1551 1554
1552 ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 1555 ret = __vmalloc_node(size, SHMLBA,
1556 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1553 PAGE_KERNEL, -1, __builtin_return_address(0)); 1557 PAGE_KERNEL, -1, __builtin_return_address(0));
1554 if (ret) { 1558 if (ret) {
1555 area = find_vm_area(ret); 1559 area = find_vm_area(ret);
@@ -1572,7 +1576,7 @@ EXPORT_SYMBOL(vmalloc_user);
1572 */ 1576 */
1573void *vmalloc_node(unsigned long size, int node) 1577void *vmalloc_node(unsigned long size, int node)
1574{ 1578{
1575 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1579 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1576 node, __builtin_return_address(0)); 1580 node, __builtin_return_address(0));
1577} 1581}
1578EXPORT_SYMBOL(vmalloc_node); 1582EXPORT_SYMBOL(vmalloc_node);
@@ -1595,7 +1599,7 @@ EXPORT_SYMBOL(vmalloc_node);
1595 1599
1596void *vmalloc_exec(unsigned long size) 1600void *vmalloc_exec(unsigned long size)
1597{ 1601{
1598 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 1602 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
1599 -1, __builtin_return_address(0)); 1603 -1, __builtin_return_address(0));
1600} 1604}
1601 1605
@@ -1616,7 +1620,7 @@ void *vmalloc_exec(unsigned long size)
1616 */ 1620 */
1617void *vmalloc_32(unsigned long size) 1621void *vmalloc_32(unsigned long size)
1618{ 1622{
1619 return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL, 1623 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
1620 -1, __builtin_return_address(0)); 1624 -1, __builtin_return_address(0));
1621} 1625}
1622EXPORT_SYMBOL(vmalloc_32); 1626EXPORT_SYMBOL(vmalloc_32);
@@ -1633,7 +1637,7 @@ void *vmalloc_32_user(unsigned long size)
1633 struct vm_struct *area; 1637 struct vm_struct *area;
1634 void *ret; 1638 void *ret;
1635 1639
1636 ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 1640 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1637 -1, __builtin_return_address(0)); 1641 -1, __builtin_return_address(0));
1638 if (ret) { 1642 if (ret) {
1639 area = find_vm_area(ret); 1643 area = find_vm_area(ret);