aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/madvise.c21
-rw-r--r--mm/memory.c10
-rw-r--r--mm/mempolicy.c20
-rw-r--r--mm/page_alloc.c44
-rw-r--r--mm/swap.c2
6 files changed, 70 insertions, 31 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 67f29516662a..508707704d2c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -85,7 +85,7 @@ void free_huge_page(struct page *page)
85 BUG_ON(page_count(page)); 85 BUG_ON(page_count(page));
86 86
87 INIT_LIST_HEAD(&page->lru); 87 INIT_LIST_HEAD(&page->lru);
88 page[1].mapping = NULL; 88 page[1].lru.next = NULL; /* reset dtor */
89 89
90 spin_lock(&hugetlb_lock); 90 spin_lock(&hugetlb_lock);
91 enqueue_huge_page(page); 91 enqueue_huge_page(page);
@@ -105,7 +105,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
105 } 105 }
106 spin_unlock(&hugetlb_lock); 106 spin_unlock(&hugetlb_lock);
107 set_page_count(page, 1); 107 set_page_count(page, 1);
108 page[1].mapping = (void *)free_huge_page; 108 page[1].lru.next = (void *)free_huge_page; /* set dtor */
109 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) 109 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
110 clear_user_highpage(&page[i], addr); 110 clear_user_highpage(&page[i], addr);
111 return page; 111 return page;
diff --git a/mm/madvise.c b/mm/madvise.c
index ae0ae3ea299a..af3d573b0141 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -22,16 +22,23 @@ static long madvise_behavior(struct vm_area_struct * vma,
22 struct mm_struct * mm = vma->vm_mm; 22 struct mm_struct * mm = vma->vm_mm;
23 int error = 0; 23 int error = 0;
24 pgoff_t pgoff; 24 pgoff_t pgoff;
25 int new_flags = vma->vm_flags & ~VM_READHINTMASK; 25 int new_flags = vma->vm_flags;
26 26
27 switch (behavior) { 27 switch (behavior) {
28 case MADV_NORMAL:
29 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
30 break;
28 case MADV_SEQUENTIAL: 31 case MADV_SEQUENTIAL:
29 new_flags |= VM_SEQ_READ; 32 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
30 break; 33 break;
31 case MADV_RANDOM: 34 case MADV_RANDOM:
32 new_flags |= VM_RAND_READ; 35 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
33 break; 36 break;
34 default: 37 case MADV_DONTFORK:
38 new_flags |= VM_DONTCOPY;
39 break;
40 case MADV_DOFORK:
41 new_flags &= ~VM_DONTCOPY;
35 break; 42 break;
36 } 43 }
37 44
@@ -177,6 +184,12 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
177 long error; 184 long error;
178 185
179 switch (behavior) { 186 switch (behavior) {
187 case MADV_DOFORK:
188 if (vma->vm_flags & VM_IO) {
189 error = -EINVAL;
190 break;
191 }
192 case MADV_DONTFORK:
180 case MADV_NORMAL: 193 case MADV_NORMAL:
181 case MADV_SEQUENTIAL: 194 case MADV_SEQUENTIAL:
182 case MADV_RANDOM: 195 case MADV_RANDOM:
diff --git a/mm/memory.c b/mm/memory.c
index 2bee1f21aa8a..9abc6008544b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -82,6 +82,16 @@ EXPORT_SYMBOL(num_physpages);
82EXPORT_SYMBOL(high_memory); 82EXPORT_SYMBOL(high_memory);
83EXPORT_SYMBOL(vmalloc_earlyreserve); 83EXPORT_SYMBOL(vmalloc_earlyreserve);
84 84
85int randomize_va_space __read_mostly = 1;
86
87static int __init disable_randmaps(char *s)
88{
89 randomize_va_space = 0;
90 return 0;
91}
92__setup("norandmaps", disable_randmaps);
93
94
85/* 95/*
86 * If a p?d_bad entry is found while walking page tables, report 96 * If a p?d_bad entry is found while walking page tables, report
87 * the error, before resetting entry to p?d_none. Usually (but 97 * the error, before resetting entry to p?d_none. Usually (but
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 3bd7fb7e4b75..bedfa4f09c80 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -132,19 +132,29 @@ static int mpol_check_policy(int mode, nodemask_t *nodes)
132 } 132 }
133 return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL; 133 return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL;
134} 134}
135
135/* Generate a custom zonelist for the BIND policy. */ 136/* Generate a custom zonelist for the BIND policy. */
136static struct zonelist *bind_zonelist(nodemask_t *nodes) 137static struct zonelist *bind_zonelist(nodemask_t *nodes)
137{ 138{
138 struct zonelist *zl; 139 struct zonelist *zl;
139 int num, max, nd; 140 int num, max, nd, k;
140 141
141 max = 1 + MAX_NR_ZONES * nodes_weight(*nodes); 142 max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
142 zl = kmalloc(sizeof(void *) * max, GFP_KERNEL); 143 zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
143 if (!zl) 144 if (!zl)
144 return NULL; 145 return NULL;
145 num = 0; 146 num = 0;
146 for_each_node_mask(nd, *nodes) 147 /* First put in the highest zones from all nodes, then all the next
147 zl->zones[num++] = &NODE_DATA(nd)->node_zones[policy_zone]; 148 lower zones etc. Avoid empty zones because the memory allocator
149 doesn't like them. If you implement node hot removal you
150 have to fix that. */
151 for (k = policy_zone; k >= 0; k--) {
152 for_each_node_mask(nd, *nodes) {
153 struct zone *z = &NODE_DATA(nd)->node_zones[k];
154 if (z->present_pages > 0)
155 zl->zones[num++] = z;
156 }
157 }
148 zl->zones[num] = NULL; 158 zl->zones[num] = NULL;
149 return zl; 159 return zl;
150} 160}
@@ -798,6 +808,8 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
798 nodes_clear(*nodes); 808 nodes_clear(*nodes);
799 if (maxnode == 0 || !nmask) 809 if (maxnode == 0 || !nmask)
800 return 0; 810 return 0;
811 if (maxnode > PAGE_SIZE)
812 return -EINVAL;
801 813
802 nlongs = BITS_TO_LONGS(maxnode); 814 nlongs = BITS_TO_LONGS(maxnode);
803 if ((maxnode % BITS_PER_LONG) == 0) 815 if ((maxnode % BITS_PER_LONG) == 0)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index dde04ff4be31..208812b25597 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -56,6 +56,7 @@ long nr_swap_pages;
56int percpu_pagelist_fraction; 56int percpu_pagelist_fraction;
57 57
58static void fastcall free_hot_cold_page(struct page *page, int cold); 58static void fastcall free_hot_cold_page(struct page *page, int cold);
59static void __free_pages_ok(struct page *page, unsigned int order);
59 60
60/* 61/*
61 * results with 256, 32 in the lowmem_reserve sysctl: 62 * results with 256, 32 in the lowmem_reserve sysctl:
@@ -169,20 +170,23 @@ static void bad_page(struct page *page)
169 * All pages have PG_compound set. All pages have their ->private pointing at 170 * All pages have PG_compound set. All pages have their ->private pointing at
170 * the head page (even the head page has this). 171 * the head page (even the head page has this).
171 * 172 *
172 * The first tail page's ->mapping, if non-zero, holds the address of the 173 * The first tail page's ->lru.next holds the address of the compound page's
173 * compound page's put_page() function. 174 * put_page() function. Its ->lru.prev holds the order of allocation.
174 * 175 * This usage means that zero-order pages may not be compound.
175 * The order of the allocation is stored in the first tail page's ->index
176 * This is only for debug at present. This usage means that zero-order pages
177 * may not be compound.
178 */ 176 */
177
178static void free_compound_page(struct page *page)
179{
180 __free_pages_ok(page, (unsigned long)page[1].lru.prev);
181}
182
179static void prep_compound_page(struct page *page, unsigned long order) 183static void prep_compound_page(struct page *page, unsigned long order)
180{ 184{
181 int i; 185 int i;
182 int nr_pages = 1 << order; 186 int nr_pages = 1 << order;
183 187
184 page[1].mapping = NULL; 188 page[1].lru.next = (void *)free_compound_page; /* set dtor */
185 page[1].index = order; 189 page[1].lru.prev = (void *)order;
186 for (i = 0; i < nr_pages; i++) { 190 for (i = 0; i < nr_pages; i++) {
187 struct page *p = page + i; 191 struct page *p = page + i;
188 192
@@ -196,7 +200,7 @@ static void destroy_compound_page(struct page *page, unsigned long order)
196 int i; 200 int i;
197 int nr_pages = 1 << order; 201 int nr_pages = 1 << order;
198 202
199 if (unlikely(page[1].index != order)) 203 if (unlikely((unsigned long)page[1].lru.prev != order))
200 bad_page(page); 204 bad_page(page);
201 205
202 for (i = 0; i < nr_pages; i++) { 206 for (i = 0; i < nr_pages; i++) {
@@ -1537,29 +1541,29 @@ static int __initdata node_load[MAX_NUMNODES];
1537 */ 1541 */
1538static int __init find_next_best_node(int node, nodemask_t *used_node_mask) 1542static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
1539{ 1543{
1540 int i, n, val; 1544 int n, val;
1541 int min_val = INT_MAX; 1545 int min_val = INT_MAX;
1542 int best_node = -1; 1546 int best_node = -1;
1543 1547
1544 for_each_online_node(i) { 1548 /* Use the local node if we haven't already */
1545 cpumask_t tmp; 1549 if (!node_isset(node, *used_node_mask)) {
1550 node_set(node, *used_node_mask);
1551 return node;
1552 }
1546 1553
1547 /* Start from local node */ 1554 for_each_online_node(n) {
1548 n = (node+i) % num_online_nodes(); 1555 cpumask_t tmp;
1549 1556
1550 /* Don't want a node to appear more than once */ 1557 /* Don't want a node to appear more than once */
1551 if (node_isset(n, *used_node_mask)) 1558 if (node_isset(n, *used_node_mask))
1552 continue; 1559 continue;
1553 1560
1554 /* Use the local node if we haven't already */
1555 if (!node_isset(node, *used_node_mask)) {
1556 best_node = node;
1557 break;
1558 }
1559
1560 /* Use the distance array to find the distance */ 1561 /* Use the distance array to find the distance */
1561 val = node_distance(node, n); 1562 val = node_distance(node, n);
1562 1563
1564 /* Penalize nodes under us ("prefer the next node") */
1565 val += (n < node);
1566
1563 /* Give preference to headless and unused nodes */ 1567 /* Give preference to headless and unused nodes */
1564 tmp = node_to_cpumask(n); 1568 tmp = node_to_cpumask(n);
1565 if (!cpus_empty(tmp)) 1569 if (!cpus_empty(tmp))
diff --git a/mm/swap.c b/mm/swap.c
index 76247424dea1..cce3dda59c59 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -40,7 +40,7 @@ static void put_compound_page(struct page *page)
40 if (put_page_testzero(page)) { 40 if (put_page_testzero(page)) {
41 void (*dtor)(struct page *page); 41 void (*dtor)(struct page *page);
42 42
43 dtor = (void (*)(struct page *))page[1].mapping; 43 dtor = (void (*)(struct page *))page[1].lru.next;
44 (*dtor)(page); 44 (*dtor)(page);
45 } 45 }
46} 46}