aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-07-14 00:29:49 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-07-14 00:29:49 -0400
commit11c2d8174ed3dc4f1971564732689b4a39129702 (patch)
treeac00daa548ea8ac24ae7a5c8062312e335ab9858 /mm
parentcde274c0c789404df8ece3f9e7d6506caf0127e2 (diff)
parentbce7f793daec3e65ec5c5705d2457b81fe7b5725 (diff)
Merge commit 'origin/HEAD' into test-merge
Manual fixup of include/asm-powerpc/pgtable-ppc64.h
Diffstat (limited to 'mm')
-rw-r--r--mm/allocpercpu.c2
-rw-r--r--mm/memory.c16
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/page_alloc.c1
-rw-r--r--mm/slub.c18
-rw-r--r--mm/sparse-vmemmap.c2
7 files changed, 36 insertions, 11 deletions
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index f4026bae6eed..05f2b4009ccc 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/mm/allocpercpu.c 2 * linux/mm/allocpercpu.c
3 * 3 *
4 * Separated from slab.c August 11, 2006 Christoph Lameter <clameter@sgi.com> 4 * Separated from slab.c August 11, 2006 Christoph Lameter
5 */ 5 */
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/module.h> 7#include <linux/module.h>
diff --git a/mm/memory.c b/mm/memory.c
index d14b251a25a6..2302d228fe04 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1151,7 +1151,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1151 * be processed until returning to user space. 1151 * be processed until returning to user space.
1152 */ 1152 */
1153 if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE))) 1153 if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE)))
1154 return -ENOMEM; 1154 return i ? i : -ENOMEM;
1155 1155
1156 if (write) 1156 if (write)
1157 foll_flags |= FOLL_WRITE; 1157 foll_flags |= FOLL_WRITE;
@@ -1697,8 +1697,19 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1697 struct page *dirty_page = NULL; 1697 struct page *dirty_page = NULL;
1698 1698
1699 old_page = vm_normal_page(vma, address, orig_pte); 1699 old_page = vm_normal_page(vma, address, orig_pte);
1700 if (!old_page) 1700 if (!old_page) {
1701 /*
1702 * VM_MIXEDMAP !pfn_valid() case
1703 *
1704 * We should not cow pages in a shared writeable mapping.
1705 * Just mark the pages writable as we can't do any dirty
1706 * accounting on raw pfn maps.
1707 */
1708 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
1709 (VM_WRITE|VM_SHARED))
1710 goto reuse;
1701 goto gotten; 1711 goto gotten;
1712 }
1702 1713
1703 /* 1714 /*
1704 * Take out anonymous pages first, anonymous shared vmas are 1715 * Take out anonymous pages first, anonymous shared vmas are
@@ -1751,6 +1762,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1751 } 1762 }
1752 1763
1753 if (reuse) { 1764 if (reuse) {
1765reuse:
1754 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1766 flush_cache_page(vma, address, pte_pfn(orig_pte));
1755 entry = pte_mkyoung(orig_pte); 1767 entry = pte_mkyoung(orig_pte);
1756 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1768 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index a37a5034f63d..c94e58b192c3 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -729,7 +729,11 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
729 } else { 729 } else {
730 *policy = pol == &default_policy ? MPOL_DEFAULT : 730 *policy = pol == &default_policy ? MPOL_DEFAULT :
731 pol->mode; 731 pol->mode;
732 *policy |= pol->flags; 732 /*
733 * Internal mempolicy flags must be masked off before exposing
734 * the policy to userspace.
735 */
736 *policy |= (pol->flags & MPOL_MODE_FLAGS);
733 } 737 }
734 738
735 if (vma) { 739 if (vma) {
diff --git a/mm/migrate.c b/mm/migrate.c
index 112bcaeaa104..55bd355d170d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -9,7 +9,7 @@
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp> 10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com> 11 * Dave Hansen <haveblue@us.ibm.com>
12 * Christoph Lameter <clameter@sgi.com> 12 * Christoph Lameter
13 */ 13 */
14 14
15#include <linux/migrate.h> 15#include <linux/migrate.h>
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2f552955a02f..f32fae3121f0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2328,7 +2328,6 @@ static void build_zonelists(pg_data_t *pgdat)
2328static void build_zonelist_cache(pg_data_t *pgdat) 2328static void build_zonelist_cache(pg_data_t *pgdat)
2329{ 2329{
2330 pgdat->node_zonelists[0].zlcache_ptr = NULL; 2330 pgdat->node_zonelists[0].zlcache_ptr = NULL;
2331 pgdat->node_zonelists[1].zlcache_ptr = NULL;
2332} 2331}
2333 2332
2334#endif /* CONFIG_NUMA */ 2333#endif /* CONFIG_NUMA */
diff --git a/mm/slub.c b/mm/slub.c
index 0987d1cd943c..315c392253c7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5,7 +5,7 @@
5 * The allocator synchronizes using per slab locks and only 5 * The allocator synchronizes using per slab locks and only
6 * uses a centralized lock to manage a pool of partial slabs. 6 * uses a centralized lock to manage a pool of partial slabs.
7 * 7 *
8 * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com> 8 * (C) 2007 SGI, Christoph Lameter
9 */ 9 */
10 10
11#include <linux/mm.h> 11#include <linux/mm.h>
@@ -1628,9 +1628,11 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1628 void **object; 1628 void **object;
1629 struct kmem_cache_cpu *c; 1629 struct kmem_cache_cpu *c;
1630 unsigned long flags; 1630 unsigned long flags;
1631 unsigned int objsize;
1631 1632
1632 local_irq_save(flags); 1633 local_irq_save(flags);
1633 c = get_cpu_slab(s, smp_processor_id()); 1634 c = get_cpu_slab(s, smp_processor_id());
1635 objsize = c->objsize;
1634 if (unlikely(!c->freelist || !node_match(c, node))) 1636 if (unlikely(!c->freelist || !node_match(c, node)))
1635 1637
1636 object = __slab_alloc(s, gfpflags, node, addr, c); 1638 object = __slab_alloc(s, gfpflags, node, addr, c);
@@ -1643,7 +1645,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1643 local_irq_restore(flags); 1645 local_irq_restore(flags);
1644 1646
1645 if (unlikely((gfpflags & __GFP_ZERO) && object)) 1647 if (unlikely((gfpflags & __GFP_ZERO) && object))
1646 memset(object, 0, c->objsize); 1648 memset(object, 0, objsize);
1647 1649
1648 return object; 1650 return object;
1649} 1651}
@@ -2995,8 +2997,6 @@ void __init kmem_cache_init(void)
2995 create_kmalloc_cache(&kmalloc_caches[1], 2997 create_kmalloc_cache(&kmalloc_caches[1],
2996 "kmalloc-96", 96, GFP_KERNEL); 2998 "kmalloc-96", 96, GFP_KERNEL);
2997 caches++; 2999 caches++;
2998 }
2999 if (KMALLOC_MIN_SIZE <= 128) {
3000 create_kmalloc_cache(&kmalloc_caches[2], 3000 create_kmalloc_cache(&kmalloc_caches[2],
3001 "kmalloc-192", 192, GFP_KERNEL); 3001 "kmalloc-192", 192, GFP_KERNEL);
3002 caches++; 3002 caches++;
@@ -3026,6 +3026,16 @@ void __init kmem_cache_init(void)
3026 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) 3026 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
3027 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; 3027 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
3028 3028
3029 if (KMALLOC_MIN_SIZE == 128) {
3030 /*
3031 * The 192 byte sized cache is not used if the alignment
3032 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3033 * instead.
3034 */
3035 for (i = 128 + 8; i <= 192; i += 8)
3036 size_index[(i - 1) / 8] = 8;
3037 }
3038
3029 slab_state = UP; 3039 slab_state = UP;
3030 3040
3031 /* Provide the correct kmalloc names now that the caches are up */ 3041 /* Provide the correct kmalloc names now that the caches are up */
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 99c4f36eb8a3..a91b5f8fcaf6 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Virtual Memory Map support 2 * Virtual Memory Map support
3 * 3 *
4 * (C) 2007 sgi. Christoph Lameter <clameter@sgi.com>. 4 * (C) 2007 sgi. Christoph Lameter.
5 * 5 *
6 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, 6 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
7 * virt_to_page, page_address() to be implemented as a base offset 7 * virt_to_page, page_address() to be implemented as a base offset