aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 22:05:45 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 22:05:45 -0500
commitdf32e43a54d04eda35d2859beaf90e3864d53288 (patch)
tree7a61cf658b2949bd426285eb9902be7758ced1ba /mm/memory.c
parentfbd918a2026d0464ce9c23f57b7de4bcfccdc2e6 (diff)
parent78d5506e82b21a1a1de68c24182db2c2fe521422 (diff)
Merge branch 'akpm' (incoming from Andrew)
Merge first patch-bomb from Andrew Morton: - a couple of misc things - inotify/fsnotify work from Jan - ocfs2 updates (partial) - about half of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (117 commits) mm/migrate: remove unused function, fail_migrate_page() mm/migrate: remove putback_lru_pages, fix comment on putback_movable_pages mm/migrate: correct failure handling if !hugepage_migration_support() mm/migrate: add comment about permanent failure path mm, page_alloc: warn for non-blockable __GFP_NOFAIL allocation failure mm: compaction: reset scanner positions immediately when they meet mm: compaction: do not mark unmovable pageblocks as skipped in async compaction mm: compaction: detect when scanners meet in isolate_freepages mm: compaction: reset cached scanner pfn's before reading them mm: compaction: encapsulate defer reset logic mm: compaction: trace compaction begin and end memcg, oom: lock mem_cgroup_print_oom_info sched: add tracepoints related to NUMA task migration mm: numa: do not automatically migrate KSM pages mm: numa: trace tasks that fail migration due to rate limiting mm: numa: limit scope of lock for NUMA migrate rate limiting mm: numa: make NUMA-migrate related functions static lib/show_mem.c: show num_poisoned_pages when oom mm/hwpoison: add '#' to hwpoison_inject mm/memblock: use WARN_ONCE when MAX_NUMNODES passed as input parameter ...
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 6768ce9e57d2..86487dfa5e59 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -59,6 +59,7 @@
59#include <linux/gfp.h> 59#include <linux/gfp.h>
60#include <linux/migrate.h> 60#include <linux/migrate.h>
61#include <linux/string.h> 61#include <linux/string.h>
62#include <linux/dma-debug.h>
62 63
63#include <asm/io.h> 64#include <asm/io.h>
64#include <asm/pgalloc.h> 65#include <asm/pgalloc.h>
@@ -2559,6 +2560,8 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
2559 2560
2560static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) 2561static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
2561{ 2562{
2563 debug_dma_assert_idle(src);
2564
2562 /* 2565 /*
2563 * If the source page was a PFN mapping, we don't have 2566 * If the source page was a PFN mapping, we don't have
2564 * a "struct page" for it. We do a best-effort copy by 2567 * a "struct page" for it. We do a best-effort copy by
@@ -4272,11 +4275,20 @@ void copy_user_huge_page(struct page *dst, struct page *src,
4272#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 4275#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
4273 4276
4274#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS 4277#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
4278
4279static struct kmem_cache *page_ptl_cachep;
4280
4281void __init ptlock_cache_init(void)
4282{
4283 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
4284 SLAB_PANIC, NULL);
4285}
4286
4275bool ptlock_alloc(struct page *page) 4287bool ptlock_alloc(struct page *page)
4276{ 4288{
4277 spinlock_t *ptl; 4289 spinlock_t *ptl;
4278 4290
4279 ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL); 4291 ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
4280 if (!ptl) 4292 if (!ptl)
4281 return false; 4293 return false;
4282 page->ptl = ptl; 4294 page->ptl = ptl;
@@ -4285,6 +4297,6 @@ bool ptlock_alloc(struct page *page)
4285 4297
4286void ptlock_free(struct page *page) 4298void ptlock_free(struct page *page)
4287{ 4299{
4288 kfree(page->ptl); 4300 kmem_cache_free(page_ptl_cachep, page->ptl);
4289} 4301}
4290#endif 4302#endif