aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/ksm.c10
-rw-r--r--mm/vmalloc.c49
3 files changed, 33 insertions, 30 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index edd300aca17..57963c6063d 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -224,7 +224,9 @@ config KSM
224 the many instances by a single resident page with that content, so 224 the many instances by a single resident page with that content, so
225 saving memory until one or another app needs to modify the content. 225 saving memory until one or another app needs to modify the content.
226 Recommended for use with KVM, or with other duplicative applications. 226 Recommended for use with KVM, or with other duplicative applications.
227 See Documentation/vm/ksm.txt for more information. 227 See Documentation/vm/ksm.txt for more information: KSM is inactive
228 until a program has madvised that an area is MADV_MERGEABLE, and
229 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
228 230
229config DEFAULT_MMAP_MIN_ADDR 231config DEFAULT_MMAP_MIN_ADDR
230 int "Low address space to protect from user allocation" 232 int "Low address space to protect from user allocation"
diff --git a/mm/ksm.c b/mm/ksm.c
index f7edac356f4..bef1af4f77e 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -184,11 +184,6 @@ static DEFINE_SPINLOCK(ksm_mmlist_lock);
184 sizeof(struct __struct), __alignof__(struct __struct),\ 184 sizeof(struct __struct), __alignof__(struct __struct),\
185 (__flags), NULL) 185 (__flags), NULL)
186 186
187static void __init ksm_init_max_kernel_pages(void)
188{
189 ksm_max_kernel_pages = nr_free_buffer_pages() / 4;
190}
191
192static int __init ksm_slab_init(void) 187static int __init ksm_slab_init(void)
193{ 188{
194 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); 189 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
@@ -1673,7 +1668,7 @@ static int __init ksm_init(void)
1673 struct task_struct *ksm_thread; 1668 struct task_struct *ksm_thread;
1674 int err; 1669 int err;
1675 1670
1676 ksm_init_max_kernel_pages(); 1671 ksm_max_kernel_pages = totalram_pages / 4;
1677 1672
1678 err = ksm_slab_init(); 1673 err = ksm_slab_init();
1679 if (err) 1674 if (err)
@@ -1697,6 +1692,9 @@ static int __init ksm_init(void)
1697 kthread_stop(ksm_thread); 1692 kthread_stop(ksm_thread);
1698 goto out_free2; 1693 goto out_free2;
1699 } 1694 }
1695#else
1696 ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
1697
1700#endif /* CONFIG_SYSFS */ 1698#endif /* CONFIG_SYSFS */
1701 1699
1702 return 0; 1700 return 0;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 69511e66323..5e7aed0802b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -25,10 +25,10 @@
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/pfn.h> 26#include <linux/pfn.h>
27#include <linux/kmemleak.h> 27#include <linux/kmemleak.h>
28#include <linux/highmem.h>
29#include <asm/atomic.h> 28#include <asm/atomic.h>
30#include <asm/uaccess.h> 29#include <asm/uaccess.h>
31#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
31#include <asm/shmparam.h>
32 32
33 33
34/*** Page table manipulation functions ***/ 34/*** Page table manipulation functions ***/
@@ -1156,12 +1156,11 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1156} 1156}
1157 1157
1158static struct vm_struct *__get_vm_area_node(unsigned long size, 1158static struct vm_struct *__get_vm_area_node(unsigned long size,
1159 unsigned long flags, unsigned long start, unsigned long end, 1159 unsigned long align, unsigned long flags, unsigned long start,
1160 int node, gfp_t gfp_mask, void *caller) 1160 unsigned long end, int node, gfp_t gfp_mask, void *caller)
1161{ 1161{
1162 static struct vmap_area *va; 1162 static struct vmap_area *va;
1163 struct vm_struct *area; 1163 struct vm_struct *area;
1164 unsigned long align = 1;
1165 1164
1166 BUG_ON(in_interrupt()); 1165 BUG_ON(in_interrupt());
1167 if (flags & VM_IOREMAP) { 1166 if (flags & VM_IOREMAP) {
@@ -1201,7 +1200,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
1201struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1200struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1202 unsigned long start, unsigned long end) 1201 unsigned long start, unsigned long end)
1203{ 1202{
1204 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, 1203 return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
1205 __builtin_return_address(0)); 1204 __builtin_return_address(0));
1206} 1205}
1207EXPORT_SYMBOL_GPL(__get_vm_area); 1206EXPORT_SYMBOL_GPL(__get_vm_area);
@@ -1210,7 +1209,7 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1210 unsigned long start, unsigned long end, 1209 unsigned long start, unsigned long end,
1211 void *caller) 1210 void *caller)
1212{ 1211{
1213 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, 1212 return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
1214 caller); 1213 caller);
1215} 1214}
1216 1215
@@ -1225,22 +1224,22 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1225 */ 1224 */
1226struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 1225struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1227{ 1226{
1228 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 1227 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1229 -1, GFP_KERNEL, __builtin_return_address(0)); 1228 -1, GFP_KERNEL, __builtin_return_address(0));
1230} 1229}
1231 1230
1232struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 1231struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1233 void *caller) 1232 void *caller)
1234{ 1233{
1235 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 1234 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1236 -1, GFP_KERNEL, caller); 1235 -1, GFP_KERNEL, caller);
1237} 1236}
1238 1237
1239struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, 1238struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
1240 int node, gfp_t gfp_mask) 1239 int node, gfp_t gfp_mask)
1241{ 1240{
1242 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, 1241 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1243 gfp_mask, __builtin_return_address(0)); 1242 node, gfp_mask, __builtin_return_address(0));
1244} 1243}
1245 1244
1246static struct vm_struct *find_vm_area(const void *addr) 1245static struct vm_struct *find_vm_area(const void *addr)
@@ -1403,7 +1402,8 @@ void *vmap(struct page **pages, unsigned int count,
1403} 1402}
1404EXPORT_SYMBOL(vmap); 1403EXPORT_SYMBOL(vmap);
1405 1404
1406static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 1405static void *__vmalloc_node(unsigned long size, unsigned long align,
1406 gfp_t gfp_mask, pgprot_t prot,
1407 int node, void *caller); 1407 int node, void *caller);
1408static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 1408static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1409 pgprot_t prot, int node, void *caller) 1409 pgprot_t prot, int node, void *caller)
@@ -1417,7 +1417,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1417 area->nr_pages = nr_pages; 1417 area->nr_pages = nr_pages;
1418 /* Please note that the recursion is strictly bounded. */ 1418 /* Please note that the recursion is strictly bounded. */
1419 if (array_size > PAGE_SIZE) { 1419 if (array_size > PAGE_SIZE) {
1420 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, 1420 pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO,
1421 PAGE_KERNEL, node, caller); 1421 PAGE_KERNEL, node, caller);
1422 area->flags |= VM_VPAGES; 1422 area->flags |= VM_VPAGES;
1423 } else { 1423 } else {
@@ -1476,6 +1476,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1476/** 1476/**
1477 * __vmalloc_node - allocate virtually contiguous memory 1477 * __vmalloc_node - allocate virtually contiguous memory
1478 * @size: allocation size 1478 * @size: allocation size
1479 * @align: desired alignment
1479 * @gfp_mask: flags for the page level allocator 1480 * @gfp_mask: flags for the page level allocator
1480 * @prot: protection mask for the allocated pages 1481 * @prot: protection mask for the allocated pages
1481 * @node: node to use for allocation or -1 1482 * @node: node to use for allocation or -1
@@ -1485,8 +1486,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1485 * allocator with @gfp_mask flags. Map them into contiguous 1486 * allocator with @gfp_mask flags. Map them into contiguous
1486 * kernel virtual space, using a pagetable protection of @prot. 1487 * kernel virtual space, using a pagetable protection of @prot.
1487 */ 1488 */
1488static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 1489static void *__vmalloc_node(unsigned long size, unsigned long align,
1489 int node, void *caller) 1490 gfp_t gfp_mask, pgprot_t prot,
1491 int node, void *caller)
1490{ 1492{
1491 struct vm_struct *area; 1493 struct vm_struct *area;
1492 void *addr; 1494 void *addr;
@@ -1496,8 +1498,8 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
1496 if (!size || (size >> PAGE_SHIFT) > totalram_pages) 1498 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1497 return NULL; 1499 return NULL;
1498 1500
1499 area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, 1501 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
1500 node, gfp_mask, caller); 1502 VMALLOC_END, node, gfp_mask, caller);
1501 1503
1502 if (!area) 1504 if (!area)
1503 return NULL; 1505 return NULL;
@@ -1516,7 +1518,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
1516 1518
1517void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1519void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1518{ 1520{
1519 return __vmalloc_node(size, gfp_mask, prot, -1, 1521 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
1520 __builtin_return_address(0)); 1522 __builtin_return_address(0));
1521} 1523}
1522EXPORT_SYMBOL(__vmalloc); 1524EXPORT_SYMBOL(__vmalloc);
@@ -1532,7 +1534,7 @@ EXPORT_SYMBOL(__vmalloc);
1532 */ 1534 */
1533void *vmalloc(unsigned long size) 1535void *vmalloc(unsigned long size)
1534{ 1536{
1535 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1537 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1536 -1, __builtin_return_address(0)); 1538 -1, __builtin_return_address(0));
1537} 1539}
1538EXPORT_SYMBOL(vmalloc); 1540EXPORT_SYMBOL(vmalloc);
@@ -1549,7 +1551,8 @@ void *vmalloc_user(unsigned long size)
1549 struct vm_struct *area; 1551 struct vm_struct *area;
1550 void *ret; 1552 void *ret;
1551 1553
1552 ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 1554 ret = __vmalloc_node(size, SHMLBA,
1555 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1553 PAGE_KERNEL, -1, __builtin_return_address(0)); 1556 PAGE_KERNEL, -1, __builtin_return_address(0));
1554 if (ret) { 1557 if (ret) {
1555 area = find_vm_area(ret); 1558 area = find_vm_area(ret);
@@ -1572,7 +1575,7 @@ EXPORT_SYMBOL(vmalloc_user);
1572 */ 1575 */
1573void *vmalloc_node(unsigned long size, int node) 1576void *vmalloc_node(unsigned long size, int node)
1574{ 1577{
1575 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1578 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1576 node, __builtin_return_address(0)); 1579 node, __builtin_return_address(0));
1577} 1580}
1578EXPORT_SYMBOL(vmalloc_node); 1581EXPORT_SYMBOL(vmalloc_node);
@@ -1595,7 +1598,7 @@ EXPORT_SYMBOL(vmalloc_node);
1595 1598
1596void *vmalloc_exec(unsigned long size) 1599void *vmalloc_exec(unsigned long size)
1597{ 1600{
1598 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 1601 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
1599 -1, __builtin_return_address(0)); 1602 -1, __builtin_return_address(0));
1600} 1603}
1601 1604
@@ -1616,7 +1619,7 @@ void *vmalloc_exec(unsigned long size)
1616 */ 1619 */
1617void *vmalloc_32(unsigned long size) 1620void *vmalloc_32(unsigned long size)
1618{ 1621{
1619 return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL, 1622 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
1620 -1, __builtin_return_address(0)); 1623 -1, __builtin_return_address(0));
1621} 1624}
1622EXPORT_SYMBOL(vmalloc_32); 1625EXPORT_SYMBOL(vmalloc_32);
@@ -1633,7 +1636,7 @@ void *vmalloc_32_user(unsigned long size)
1633 struct vm_struct *area; 1636 struct vm_struct *area;
1634 void *ret; 1637 void *ret;
1635 1638
1636 ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 1639 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1637 -1, __builtin_return_address(0)); 1640 -1, __builtin_return_address(0));
1638 if (ret) { 1641 if (ret) {
1639 area = find_vm_area(ret); 1642 area = find_vm_area(ret);