aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-08 14:31:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-08 14:31:16 -0400
commit3f17ea6dea8ba5668873afa54628a91aaa3fb1c0 (patch)
treeafbeb2accd4c2199ddd705ae943995b143a0af02 /mm/mempolicy.c
parent1860e379875dfe7271c649058aeddffe5afd9d0d (diff)
parent1a5700bc2d10cd379a795fd2bb377a190af5acd4 (diff)
Merge branch 'next' (accumulated 3.16 merge window patches) into master
Now that 3.15 is released, this merges the 'next' branch into 'master', bringing us to the normal situation where my 'master' branch is the merge window. * accumulated work in next: (6809 commits) ufs: sb mutex merge + mutex_destroy powerpc: update comments for generic idle conversion cris: update comments for generic idle conversion idle: remove cpu_idle() forward declarations nbd: zero from and len fields in NBD_CMD_DISCONNECT. mm: convert some level-less printks to pr_* MAINTAINERS: adi-buildroot-devel is moderated MAINTAINERS: add linux-api for review of API/ABI changes mm/kmemleak-test.c: use pr_fmt for logging fs/dlm/debug_fs.c: replace seq_printf by seq_puts fs/dlm/lockspace.c: convert simple_str to kstr fs/dlm/config.c: convert simple_str to kstr mm: mark remap_file_pages() syscall as deprecated mm: memcontrol: remove unnecessary memcg argument from soft limit functions mm: memcontrol: clean up memcg zoneinfo lookup mm/memblock.c: call kmemleak directly from memblock_(alloc|free) mm/mempool.c: update the kmemleak stack trace for mempool allocations lib/radix-tree.c: update the kmemleak stack trace for radix tree allocations mm: introduce kmemleak_update_trace() mm/kmemleak.c: use %u to print ->checksum ...
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c35
1 files changed, 19 insertions, 16 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 30cc47f8ffa0..284974230459 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -65,6 +65,8 @@
65 kernel is not always grateful with that. 65 kernel is not always grateful with that.
66*/ 66*/
67 67
68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
68#include <linux/mempolicy.h> 70#include <linux/mempolicy.h>
69#include <linux/mm.h> 71#include <linux/mm.h>
70#include <linux/highmem.h> 72#include <linux/highmem.h>
@@ -91,6 +93,7 @@
91#include <linux/ctype.h> 93#include <linux/ctype.h>
92#include <linux/mm_inline.h> 94#include <linux/mm_inline.h>
93#include <linux/mmu_notifier.h> 95#include <linux/mmu_notifier.h>
96#include <linux/printk.h>
94 97
95#include <asm/tlbflush.h> 98#include <asm/tlbflush.h>
96#include <asm/uaccess.h> 99#include <asm/uaccess.h>
@@ -1032,7 +1035,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1032 flags | MPOL_MF_DISCONTIG_OK, &pagelist); 1035 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1033 1036
1034 if (!list_empty(&pagelist)) { 1037 if (!list_empty(&pagelist)) {
1035 err = migrate_pages(&pagelist, new_node_page, dest, 1038 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
1036 MIGRATE_SYNC, MR_SYSCALL); 1039 MIGRATE_SYNC, MR_SYSCALL);
1037 if (err) 1040 if (err)
1038 putback_movable_pages(&pagelist); 1041 putback_movable_pages(&pagelist);
@@ -1281,7 +1284,7 @@ static long do_mbind(unsigned long start, unsigned long len,
1281 if (!list_empty(&pagelist)) { 1284 if (!list_empty(&pagelist)) {
1282 WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1285 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1283 nr_failed = migrate_pages(&pagelist, new_vma_page, 1286 nr_failed = migrate_pages(&pagelist, new_vma_page,
1284 (unsigned long)vma, 1287 NULL, (unsigned long)vma,
1285 MIGRATE_SYNC, MR_MEMPOLICY_MBIND); 1288 MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1286 if (nr_failed) 1289 if (nr_failed)
1287 putback_movable_pages(&pagelist); 1290 putback_movable_pages(&pagelist);
@@ -1366,7 +1369,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1366} 1369}
1367 1370
1368SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1371SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1369 unsigned long, mode, unsigned long __user *, nmask, 1372 unsigned long, mode, const unsigned long __user *, nmask,
1370 unsigned long, maxnode, unsigned, flags) 1373 unsigned long, maxnode, unsigned, flags)
1371{ 1374{
1372 nodemask_t nodes; 1375 nodemask_t nodes;
@@ -1387,7 +1390,7 @@ SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1387} 1390}
1388 1391
1389/* Set the process memory policy */ 1392/* Set the process memory policy */
1390SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask, 1393SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1391 unsigned long, maxnode) 1394 unsigned long, maxnode)
1392{ 1395{
1393 int err; 1396 int err;
@@ -1610,9 +1613,9 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1610 1613
1611/* 1614/*
1612 * get_vma_policy(@task, @vma, @addr) 1615 * get_vma_policy(@task, @vma, @addr)
1613 * @task - task for fallback if vma policy == default 1616 * @task: task for fallback if vma policy == default
1614 * @vma - virtual memory area whose policy is sought 1617 * @vma: virtual memory area whose policy is sought
1615 * @addr - address in @vma for shared policy lookup 1618 * @addr: address in @vma for shared policy lookup
1616 * 1619 *
1617 * Returns effective policy for a VMA at specified address. 1620 * Returns effective policy for a VMA at specified address.
1618 * Falls back to @task or system default policy, as necessary. 1621 * Falls back to @task or system default policy, as necessary.
@@ -1858,11 +1861,11 @@ int node_random(const nodemask_t *maskp)
1858#ifdef CONFIG_HUGETLBFS 1861#ifdef CONFIG_HUGETLBFS
1859/* 1862/*
1860 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol) 1863 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1861 * @vma = virtual memory area whose policy is sought 1864 * @vma: virtual memory area whose policy is sought
1862 * @addr = address in @vma for shared policy lookup and interleave policy 1865 * @addr: address in @vma for shared policy lookup and interleave policy
1863 * @gfp_flags = for requested zone 1866 * @gfp_flags: for requested zone
1864 * @mpol = pointer to mempolicy pointer for reference counted mempolicy 1867 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1865 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask 1868 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1866 * 1869 *
1867 * Returns a zonelist suitable for a huge page allocation and a pointer 1870 * Returns a zonelist suitable for a huge page allocation and a pointer
1868 * to the struct mempolicy for conditional unref after allocation. 1871 * to the struct mempolicy for conditional unref after allocation.
@@ -2274,9 +2277,9 @@ static void sp_free(struct sp_node *n)
2274/** 2277/**
2275 * mpol_misplaced - check whether current page node is valid in policy 2278 * mpol_misplaced - check whether current page node is valid in policy
2276 * 2279 *
2277 * @page - page to be checked 2280 * @page: page to be checked
2278 * @vma - vm area where page mapped 2281 * @vma: vm area where page mapped
2279 * @addr - virtual address where page mapped 2282 * @addr: virtual address where page mapped
2280 * 2283 *
2281 * Lookup current policy node id for vma,addr and "compare to" page's 2284 * Lookup current policy node id for vma,addr and "compare to" page's
2282 * node id. 2285 * node id.
@@ -2649,7 +2652,7 @@ void __init numa_policy_init(void)
2649 node_set(prefer, interleave_nodes); 2652 node_set(prefer, interleave_nodes);
2650 2653
2651 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2654 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2652 printk("numa_policy_init: interleaving failed\n"); 2655 pr_err("%s: interleaving failed\n", __func__);
2653 2656
2654 check_numabalancing_enable(); 2657 check_numabalancing_enable();
2655} 2658}