aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig3
-rw-r--r--mm/hugetlb.c62
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/mempolicy.c1
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/slab.c1
-rw-r--r--mm/slob.c1
-rw-r--r--mm/slub.c27
-rw-r--r--mm/sparse.c1
-rw-r--r--mm/util.c15
10 files changed, 87 insertions, 28 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 446c6588c753..0bd9c2dbb2a0 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -77,9 +77,6 @@ config FLAT_NODE_MEM_MAP
77 def_bool y 77 def_bool y
78 depends on !SPARSEMEM 78 depends on !SPARSEMEM
79 79
80config HAVE_GET_USER_PAGES_FAST
81 bool
82
83# 80#
84# Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's 81# Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's
85# to represent different areas of memory. This variable allows 82# to represent different areas of memory. This variable allows
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 757ca983fd99..67a71191136e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -565,7 +565,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
565 huge_page_order(h)); 565 huge_page_order(h));
566 if (page) { 566 if (page) {
567 if (arch_prepare_hugepage(page)) { 567 if (arch_prepare_hugepage(page)) {
568 __free_pages(page, HUGETLB_PAGE_ORDER); 568 __free_pages(page, huge_page_order(h));
569 return NULL; 569 return NULL;
570 } 570 }
571 prep_new_huge_page(h, page, nid); 571 prep_new_huge_page(h, page, nid);
@@ -665,6 +665,11 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
665 __GFP_REPEAT|__GFP_NOWARN, 665 __GFP_REPEAT|__GFP_NOWARN,
666 huge_page_order(h)); 666 huge_page_order(h));
667 667
668 if (page && arch_prepare_hugepage(page)) {
669 __free_pages(page, huge_page_order(h));
670 return NULL;
671 }
672
668 spin_lock(&hugetlb_lock); 673 spin_lock(&hugetlb_lock);
669 if (page) { 674 if (page) {
670 /* 675 /*
@@ -1937,6 +1942,18 @@ retry:
1937 lock_page(page); 1942 lock_page(page);
1938 } 1943 }
1939 1944
1945 /*
1946 * If we are going to COW a private mapping later, we examine the
1947 * pending reservations for this page now. This will ensure that
1948 * any allocations necessary to record that reservation occur outside
1949 * the spinlock.
1950 */
1951 if (write_access && !(vma->vm_flags & VM_SHARED))
1952 if (vma_needs_reservation(h, vma, address) < 0) {
1953 ret = VM_FAULT_OOM;
1954 goto backout_unlocked;
1955 }
1956
1940 spin_lock(&mm->page_table_lock); 1957 spin_lock(&mm->page_table_lock);
1941 size = i_size_read(mapping->host) >> huge_page_shift(h); 1958 size = i_size_read(mapping->host) >> huge_page_shift(h);
1942 if (idx >= size) 1959 if (idx >= size)
@@ -1962,6 +1979,7 @@ out:
1962 1979
1963backout: 1980backout:
1964 spin_unlock(&mm->page_table_lock); 1981 spin_unlock(&mm->page_table_lock);
1982backout_unlocked:
1965 unlock_page(page); 1983 unlock_page(page);
1966 put_page(page); 1984 put_page(page);
1967 goto out; 1985 goto out;
@@ -1973,6 +1991,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1973 pte_t *ptep; 1991 pte_t *ptep;
1974 pte_t entry; 1992 pte_t entry;
1975 int ret; 1993 int ret;
1994 struct page *pagecache_page = NULL;
1976 static DEFINE_MUTEX(hugetlb_instantiation_mutex); 1995 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
1977 struct hstate *h = hstate_vma(vma); 1996 struct hstate *h = hstate_vma(vma);
1978 1997
@@ -1989,25 +2008,44 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1989 entry = huge_ptep_get(ptep); 2008 entry = huge_ptep_get(ptep);
1990 if (huge_pte_none(entry)) { 2009 if (huge_pte_none(entry)) {
1991 ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 2010 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
1992 mutex_unlock(&hugetlb_instantiation_mutex); 2011 goto out_unlock;
1993 return ret;
1994 } 2012 }
1995 2013
1996 ret = 0; 2014 ret = 0;
1997 2015
2016 /*
2017 * If we are going to COW the mapping later, we examine the pending
2018 * reservations for this page now. This will ensure that any
2019 * allocations necessary to record that reservation occur outside the
2020 * spinlock. For private mappings, we also lookup the pagecache
2021 * page now as it is used to determine if a reservation has been
2022 * consumed.
2023 */
2024 if (write_access && !pte_write(entry)) {
2025 if (vma_needs_reservation(h, vma, address) < 0) {
2026 ret = VM_FAULT_OOM;
2027 goto out_unlock;
2028 }
2029
2030 if (!(vma->vm_flags & VM_SHARED))
2031 pagecache_page = hugetlbfs_pagecache_page(h,
2032 vma, address);
2033 }
2034
1998 spin_lock(&mm->page_table_lock); 2035 spin_lock(&mm->page_table_lock);
1999 /* Check for a racing update before calling hugetlb_cow */ 2036 /* Check for a racing update before calling hugetlb_cow */
2000 if (likely(pte_same(entry, huge_ptep_get(ptep)))) 2037 if (likely(pte_same(entry, huge_ptep_get(ptep))))
2001 if (write_access && !pte_write(entry)) { 2038 if (write_access && !pte_write(entry))
2002 struct page *page; 2039 ret = hugetlb_cow(mm, vma, address, ptep, entry,
2003 page = hugetlbfs_pagecache_page(h, vma, address); 2040 pagecache_page);
2004 ret = hugetlb_cow(mm, vma, address, ptep, entry, page);
2005 if (page) {
2006 unlock_page(page);
2007 put_page(page);
2008 }
2009 }
2010 spin_unlock(&mm->page_table_lock); 2041 spin_unlock(&mm->page_table_lock);
2042
2043 if (pagecache_page) {
2044 unlock_page(pagecache_page);
2045 put_page(pagecache_page);
2046 }
2047
2048out_unlock:
2011 mutex_unlock(&hugetlb_instantiation_mutex); 2049 mutex_unlock(&hugetlb_instantiation_mutex);
2012 2050
2013 return ret; 2051 return ret;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7056c3bdb478..0f1f7a7374ba 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -796,6 +796,8 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
796 796
797 if (mem_cgroup_subsys.disabled) 797 if (mem_cgroup_subsys.disabled)
798 return 0; 798 return 0;
799 if (!mm)
800 return 0;
799 801
800 rcu_read_lock(); 802 rcu_read_lock();
801 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 803 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index e550bec20582..83369058ec13 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -803,7 +803,6 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
803int do_migrate_pages(struct mm_struct *mm, 803int do_migrate_pages(struct mm_struct *mm,
804 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) 804 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
805{ 805{
806 LIST_HEAD(pagelist);
807 int busy = 0; 806 int busy = 0;
808 int err = 0; 807 int err = 0;
809 nodemask_t tmp; 808 nodemask_t tmp;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 401d104d2bb6..af982f7cdb2a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4437,7 +4437,7 @@ void *__init alloc_large_system_hash(const char *tablename,
4437 do { 4437 do {
4438 size = bucketsize << log2qty; 4438 size = bucketsize << log2qty;
4439 if (flags & HASH_EARLY) 4439 if (flags & HASH_EARLY)
4440 table = alloc_bootmem(size); 4440 table = alloc_bootmem_nopanic(size);
4441 else if (hashdist) 4441 else if (hashdist)
4442 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 4442 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4443 else { 4443 else {
diff --git a/mm/slab.c b/mm/slab.c
index 918f04f7fef1..e76eee466886 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4472,4 +4472,3 @@ size_t ksize(const void *objp)
4472 4472
4473 return obj_size(virt_to_cache(objp)); 4473 return obj_size(virt_to_cache(objp));
4474} 4474}
4475EXPORT_SYMBOL(ksize);
diff --git a/mm/slob.c b/mm/slob.c
index d8fbd4d1bfa7..4c82dd41f32e 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -519,7 +519,6 @@ size_t ksize(const void *block)
519 else 519 else
520 return sp->page.private; 520 return sp->page.private;
521} 521}
522EXPORT_SYMBOL(ksize);
523 522
524struct kmem_cache { 523struct kmem_cache {
525 unsigned int size, align; 524 unsigned int size, align;
diff --git a/mm/slub.c b/mm/slub.c
index b7e2cd5d82db..4f5b96149458 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1329,7 +1329,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1329 n = get_node(s, zone_to_nid(zone)); 1329 n = get_node(s, zone_to_nid(zone));
1330 1330
1331 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1331 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1332 n->nr_partial > MIN_PARTIAL) { 1332 n->nr_partial > n->min_partial) {
1333 page = get_partial_node(n); 1333 page = get_partial_node(n);
1334 if (page) 1334 if (page)
1335 return page; 1335 return page;
@@ -1381,7 +1381,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1381 slab_unlock(page); 1381 slab_unlock(page);
1382 } else { 1382 } else {
1383 stat(c, DEACTIVATE_EMPTY); 1383 stat(c, DEACTIVATE_EMPTY);
1384 if (n->nr_partial < MIN_PARTIAL) { 1384 if (n->nr_partial < n->min_partial) {
1385 /* 1385 /*
1386 * Adding an empty slab to the partial slabs in order 1386 * Adding an empty slab to the partial slabs in order
1387 * to avoid page allocator overhead. This slab needs 1387 * to avoid page allocator overhead. This slab needs
@@ -1913,9 +1913,21 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
1913#endif 1913#endif
1914} 1914}
1915 1915
1916static void init_kmem_cache_node(struct kmem_cache_node *n) 1916static void
1917init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
1917{ 1918{
1918 n->nr_partial = 0; 1919 n->nr_partial = 0;
1920
1921 /*
1922 * The larger the object size is, the more pages we want on the partial
1923 * list to avoid pounding the page allocator excessively.
1924 */
1925 n->min_partial = ilog2(s->size);
1926 if (n->min_partial < MIN_PARTIAL)
1927 n->min_partial = MIN_PARTIAL;
1928 else if (n->min_partial > MAX_PARTIAL)
1929 n->min_partial = MAX_PARTIAL;
1930
1919 spin_lock_init(&n->list_lock); 1931 spin_lock_init(&n->list_lock);
1920 INIT_LIST_HEAD(&n->partial); 1932 INIT_LIST_HEAD(&n->partial);
1921#ifdef CONFIG_SLUB_DEBUG 1933#ifdef CONFIG_SLUB_DEBUG
@@ -2087,7 +2099,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2087 init_object(kmalloc_caches, n, 1); 2099 init_object(kmalloc_caches, n, 1);
2088 init_tracking(kmalloc_caches, n); 2100 init_tracking(kmalloc_caches, n);
2089#endif 2101#endif
2090 init_kmem_cache_node(n); 2102 init_kmem_cache_node(n, kmalloc_caches);
2091 inc_slabs_node(kmalloc_caches, node, page->objects); 2103 inc_slabs_node(kmalloc_caches, node, page->objects);
2092 2104
2093 /* 2105 /*
@@ -2144,7 +2156,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2144 2156
2145 } 2157 }
2146 s->node[node] = n; 2158 s->node[node] = n;
2147 init_kmem_cache_node(n); 2159 init_kmem_cache_node(n, s);
2148 } 2160 }
2149 return 1; 2161 return 1;
2150} 2162}
@@ -2155,7 +2167,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
2155 2167
2156static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 2168static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2157{ 2169{
2158 init_kmem_cache_node(&s->local_node); 2170 init_kmem_cache_node(&s->local_node, s);
2159 return 1; 2171 return 1;
2160} 2172}
2161#endif 2173#endif
@@ -2715,7 +2727,6 @@ size_t ksize(const void *object)
2715 */ 2727 */
2716 return s->size; 2728 return s->size;
2717} 2729}
2718EXPORT_SYMBOL(ksize);
2719 2730
2720void kfree(const void *x) 2731void kfree(const void *x)
2721{ 2732{
@@ -2890,7 +2901,7 @@ static int slab_mem_going_online_callback(void *arg)
2890 ret = -ENOMEM; 2901 ret = -ENOMEM;
2891 goto out; 2902 goto out;
2892 } 2903 }
2893 init_kmem_cache_node(n); 2904 init_kmem_cache_node(n, s);
2894 s->node[nid] = n; 2905 s->node[nid] = n;
2895 } 2906 }
2896out: 2907out:
diff --git a/mm/sparse.c b/mm/sparse.c
index 5d9dbbb9d39e..39db301b920d 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -12,7 +12,6 @@
12#include <asm/dma.h> 12#include <asm/dma.h>
13#include <asm/pgalloc.h> 13#include <asm/pgalloc.h>
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include "internal.h"
16 15
17/* 16/*
18 * Permanent SPARSEMEM data: 17 * Permanent SPARSEMEM data:
diff --git a/mm/util.c b/mm/util.c
index 9341ca77bd88..cb00b748ce47 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -171,3 +171,18 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
171 mm->unmap_area = arch_unmap_area; 171 mm->unmap_area = arch_unmap_area;
172} 172}
173#endif 173#endif
174
175int __attribute__((weak)) get_user_pages_fast(unsigned long start,
176 int nr_pages, int write, struct page **pages)
177{
178 struct mm_struct *mm = current->mm;
179 int ret;
180
181 down_read(&mm->mmap_sem);
182 ret = get_user_pages(current, mm, start, nr_pages,
183 write, 0, pages, NULL);
184 up_read(&mm->mmap_sem);
185
186 return ret;
187}
188EXPORT_SYMBOL_GPL(get_user_pages_fast);