aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mempolicy.c61
4 files changed, 43 insertions, 28 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index b7b1be6dbd83..5c74b68935ac 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -604,7 +604,7 @@ void __lock_page(struct page *page)
604} 604}
605EXPORT_SYMBOL(__lock_page); 605EXPORT_SYMBOL(__lock_page);
606 606
607int fastcall __lock_page_killable(struct page *page) 607int __lock_page_killable(struct page *page)
608{ 608{
609 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 609 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
610 610
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d9a380312467..cb1b3a7ecdfc 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -24,14 +24,15 @@
24const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 24const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; 25static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26static unsigned long surplus_huge_pages; 26static unsigned long surplus_huge_pages;
27static unsigned long nr_overcommit_huge_pages;
27unsigned long max_huge_pages; 28unsigned long max_huge_pages;
29unsigned long sysctl_overcommit_huge_pages;
28static struct list_head hugepage_freelists[MAX_NUMNODES]; 30static struct list_head hugepage_freelists[MAX_NUMNODES];
29static unsigned int nr_huge_pages_node[MAX_NUMNODES]; 31static unsigned int nr_huge_pages_node[MAX_NUMNODES];
30static unsigned int free_huge_pages_node[MAX_NUMNODES]; 32static unsigned int free_huge_pages_node[MAX_NUMNODES];
31static unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 33static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
32static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 34static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
33unsigned long hugepages_treat_as_movable; 35unsigned long hugepages_treat_as_movable;
34unsigned long nr_overcommit_huge_pages;
35static int hugetlb_next_nid; 36static int hugetlb_next_nid;
36 37
37/* 38/*
@@ -609,8 +610,9 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
609 struct file *file, void __user *buffer, 610 struct file *file, void __user *buffer,
610 size_t *length, loff_t *ppos) 611 size_t *length, loff_t *ppos)
611{ 612{
612 spin_lock(&hugetlb_lock);
613 proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 613 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
614 spin_lock(&hugetlb_lock);
615 nr_overcommit_huge_pages = sysctl_overcommit_huge_pages;
614 spin_unlock(&hugetlb_lock); 616 spin_unlock(&hugetlb_lock);
615 return 0; 617 return 0;
616} 618}
diff --git a/mm/memory.c b/mm/memory.c
index e5628a5fd678..717aa0e3be2d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -989,6 +989,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
989 int i; 989 int i;
990 unsigned int vm_flags; 990 unsigned int vm_flags;
991 991
992 if (len <= 0)
993 return 0;
992 /* 994 /*
993 * Require read or write permissions. 995 * Require read or write permissions.
994 * If 'force' is set, we only require the "MAY" flags. 996 * If 'force' is set, we only require the "MAY" flags.
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 83c69f8a64c2..8d246c3b340f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -116,22 +116,51 @@ static void mpol_rebind_policy(struct mempolicy *pol,
116/* Do sanity checking on a policy */ 116/* Do sanity checking on a policy */
117static int mpol_check_policy(int mode, nodemask_t *nodes) 117static int mpol_check_policy(int mode, nodemask_t *nodes)
118{ 118{
119 int empty = nodes_empty(*nodes); 119 int was_empty, is_empty;
120
121 if (!nodes)
122 return 0;
123
124 /*
125 * "Contextualize" the in-coming nodemast for cpusets:
126 * Remember whether in-coming nodemask was empty, If not,
127 * restrict the nodes to the allowed nodes in the cpuset.
128 * This is guaranteed to be a subset of nodes with memory.
129 */
130 cpuset_update_task_memory_state();
131 is_empty = was_empty = nodes_empty(*nodes);
132 if (!was_empty) {
133 nodes_and(*nodes, *nodes, cpuset_current_mems_allowed);
134 is_empty = nodes_empty(*nodes); /* after "contextualization" */
135 }
120 136
121 switch (mode) { 137 switch (mode) {
122 case MPOL_DEFAULT: 138 case MPOL_DEFAULT:
123 if (!empty) 139 /*
140 * require caller to specify an empty nodemask
141 * before "contextualization"
142 */
143 if (!was_empty)
124 return -EINVAL; 144 return -EINVAL;
125 break; 145 break;
126 case MPOL_BIND: 146 case MPOL_BIND:
127 case MPOL_INTERLEAVE: 147 case MPOL_INTERLEAVE:
128 /* Preferred will only use the first bit, but allow 148 /*
129 more for now. */ 149 * require at least 1 valid node after "contextualization"
130 if (empty) 150 */
151 if (is_empty)
152 return -EINVAL;
153 break;
154 case MPOL_PREFERRED:
155 /*
156 * Did caller specify invalid nodes?
157 * Don't silently accept this as "local allocation".
158 */
159 if (!was_empty && is_empty)
131 return -EINVAL; 160 return -EINVAL;
132 break; 161 break;
133 } 162 }
134 return nodes_subset(*nodes, node_states[N_HIGH_MEMORY]) ? 0 : -EINVAL; 163 return 0;
135} 164}
136 165
137/* Generate a custom zonelist for the BIND policy. */ 166/* Generate a custom zonelist for the BIND policy. */
@@ -188,8 +217,6 @@ static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
188 switch (mode) { 217 switch (mode) {
189 case MPOL_INTERLEAVE: 218 case MPOL_INTERLEAVE:
190 policy->v.nodes = *nodes; 219 policy->v.nodes = *nodes;
191 nodes_and(policy->v.nodes, policy->v.nodes,
192 node_states[N_HIGH_MEMORY]);
193 if (nodes_weight(policy->v.nodes) == 0) { 220 if (nodes_weight(policy->v.nodes) == 0) {
194 kmem_cache_free(policy_cache, policy); 221 kmem_cache_free(policy_cache, policy);
195 return ERR_PTR(-EINVAL); 222 return ERR_PTR(-EINVAL);
@@ -421,18 +448,6 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
421 return err; 448 return err;
422} 449}
423 450
424static int contextualize_policy(int mode, nodemask_t *nodes)
425{
426 if (!nodes)
427 return 0;
428
429 cpuset_update_task_memory_state();
430 if (!cpuset_nodes_subset_current_mems_allowed(*nodes))
431 return -EINVAL;
432 return mpol_check_policy(mode, nodes);
433}
434
435
436/* 451/*
437 * Update task->flags PF_MEMPOLICY bit: set iff non-default 452 * Update task->flags PF_MEMPOLICY bit: set iff non-default
438 * mempolicy. Allows more rapid checking of this (combined perhaps 453 * mempolicy. Allows more rapid checking of this (combined perhaps
@@ -468,7 +483,7 @@ static long do_set_mempolicy(int mode, nodemask_t *nodes)
468{ 483{
469 struct mempolicy *new; 484 struct mempolicy *new;
470 485
471 if (contextualize_policy(mode, nodes)) 486 if (mpol_check_policy(mode, nodes))
472 return -EINVAL; 487 return -EINVAL;
473 new = mpol_new(mode, nodes); 488 new = mpol_new(mode, nodes);
474 if (IS_ERR(new)) 489 if (IS_ERR(new))
@@ -915,10 +930,6 @@ asmlinkage long sys_mbind(unsigned long start, unsigned long len,
915 err = get_nodes(&nodes, nmask, maxnode); 930 err = get_nodes(&nodes, nmask, maxnode);
916 if (err) 931 if (err)
917 return err; 932 return err;
918#ifdef CONFIG_CPUSETS
919 /* Restrict the nodes to the allowed nodes in the cpuset */
920 nodes_and(nodes, nodes, current->mems_allowed);
921#endif
922 return do_mbind(start, len, mode, &nodes, flags); 933 return do_mbind(start, len, mode, &nodes, flags);
923} 934}
924 935