aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c11
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/page_alloc.c9
-rw-r--r--mm/page_isolation.c1
-rw-r--r--mm/quicklist.c9
-rw-r--r--mm/slub.c4
-rw-r--r--mm/truncate.c4
-rw-r--r--mm/vmstat.c19
8 files changed, 51 insertions, 10 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 54e968650855..876bc595d0f8 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2129,13 +2129,20 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2129 * After a write we want buffered reads to be sure to go to disk to get 2129 * After a write we want buffered reads to be sure to go to disk to get
2130 * the new data. We invalidate clean cached page from the region we're 2130 * the new data. We invalidate clean cached page from the region we're
2131 * about to write. We do this *before* the write so that we can return 2131 * about to write. We do this *before* the write so that we can return
2132 * -EIO without clobbering -EIOCBQUEUED from ->direct_IO(). 2132 * without clobbering -EIOCBQUEUED from ->direct_IO().
2133 */ 2133 */
2134 if (mapping->nrpages) { 2134 if (mapping->nrpages) {
2135 written = invalidate_inode_pages2_range(mapping, 2135 written = invalidate_inode_pages2_range(mapping,
2136 pos >> PAGE_CACHE_SHIFT, end); 2136 pos >> PAGE_CACHE_SHIFT, end);
2137 if (written) 2137 /*
2138 * If a page can not be invalidated, return 0 to fall back
2139 * to buffered write.
2140 */
2141 if (written) {
2142 if (written == -EBUSY)
2143 return 0;
2138 goto out; 2144 goto out;
2145 }
2139 } 2146 }
2140 2147
2141 written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs); 2148 written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
diff --git a/mm/mmap.c b/mm/mmap.c
index 339cf5c4d5d8..e7a5a68a9c2e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1030,6 +1030,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
1030 } else { 1030 } else {
1031 switch (flags & MAP_TYPE) { 1031 switch (flags & MAP_TYPE) {
1032 case MAP_SHARED: 1032 case MAP_SHARED:
1033 /*
1034 * Ignore pgoff.
1035 */
1036 pgoff = 0;
1033 vm_flags |= VM_SHARED | VM_MAYSHARE; 1037 vm_flags |= VM_SHARED | VM_MAYSHARE;
1034 break; 1038 break;
1035 case MAP_PRIVATE: 1039 case MAP_PRIVATE:
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index af982f7cdb2a..e293c58bea58 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -694,6 +694,9 @@ static int move_freepages(struct zone *zone,
694#endif 694#endif
695 695
696 for (page = start_page; page <= end_page;) { 696 for (page = start_page; page <= end_page;) {
697 /* Make sure we are not inadvertently changing nodes */
698 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
699
697 if (!pfn_valid_within(page_to_pfn(page))) { 700 if (!pfn_valid_within(page_to_pfn(page))) {
698 page++; 701 page++;
699 continue; 702 continue;
@@ -2516,6 +2519,10 @@ static void setup_zone_migrate_reserve(struct zone *zone)
2516 continue; 2519 continue;
2517 page = pfn_to_page(pfn); 2520 page = pfn_to_page(pfn);
2518 2521
2522 /* Watch out for overlapping nodes */
2523 if (page_to_nid(page) != zone_to_nid(zone))
2524 continue;
2525
2519 /* Blocks with reserved pages will never free, skip them. */ 2526 /* Blocks with reserved pages will never free, skip them. */
2520 if (PageReserved(page)) 2527 if (PageReserved(page))
2521 continue; 2528 continue;
@@ -4064,7 +4071,7 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
4064} 4071}
4065 4072
4066#ifndef CONFIG_NEED_MULTIPLE_NODES 4073#ifndef CONFIG_NEED_MULTIPLE_NODES
4067struct pglist_data contig_page_data = { .bdata = &bootmem_node_data[0] }; 4074struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4068EXPORT_SYMBOL(contig_page_data); 4075EXPORT_SYMBOL(contig_page_data);
4069#endif 4076#endif
4070 4077
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 3444b58033c8..c69f84fe038d 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -2,7 +2,6 @@
2 * linux/mm/page_isolation.c 2 * linux/mm/page_isolation.c
3 */ 3 */
4 4
5#include <stddef.h>
6#include <linux/mm.h> 5#include <linux/mm.h>
7#include <linux/page-isolation.h> 6#include <linux/page-isolation.h>
8#include <linux/pageblock-flags.h> 7#include <linux/pageblock-flags.h>
diff --git a/mm/quicklist.c b/mm/quicklist.c
index 3f703f7cb398..8dbb6805ef35 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -26,7 +26,10 @@ DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
26static unsigned long max_pages(unsigned long min_pages) 26static unsigned long max_pages(unsigned long min_pages)
27{ 27{
28 unsigned long node_free_pages, max; 28 unsigned long node_free_pages, max;
29 struct zone *zones = NODE_DATA(numa_node_id())->node_zones; 29 int node = numa_node_id();
30 struct zone *zones = NODE_DATA(node)->node_zones;
31 int num_cpus_on_node;
32 node_to_cpumask_ptr(cpumask_on_node, node);
30 33
31 node_free_pages = 34 node_free_pages =
32#ifdef CONFIG_ZONE_DMA 35#ifdef CONFIG_ZONE_DMA
@@ -38,6 +41,10 @@ static unsigned long max_pages(unsigned long min_pages)
38 zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES); 41 zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
39 42
40 max = node_free_pages / FRACTION_OF_NODE_MEM; 43 max = node_free_pages / FRACTION_OF_NODE_MEM;
44
45 num_cpus_on_node = cpus_weight_nr(*cpumask_on_node);
46 max /= num_cpus_on_node;
47
41 return max(max, min_pages); 48 return max(max, min_pages);
42} 49}
43 50
diff --git a/mm/slub.c b/mm/slub.c
index 4f5b96149458..fb486d5540f8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2312,7 +2312,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2312 2312
2313 s->refcount = 1; 2313 s->refcount = 1;
2314#ifdef CONFIG_NUMA 2314#ifdef CONFIG_NUMA
2315 s->remote_node_defrag_ratio = 100; 2315 s->remote_node_defrag_ratio = 1000;
2316#endif 2316#endif
2317 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) 2317 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
2318 goto error; 2318 goto error;
@@ -4058,7 +4058,7 @@ static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
4058 if (err) 4058 if (err)
4059 return err; 4059 return err;
4060 4060
4061 if (ratio < 100) 4061 if (ratio <= 100)
4062 s->remote_node_defrag_ratio = ratio * 10; 4062 s->remote_node_defrag_ratio = ratio * 10;
4063 4063
4064 return length; 4064 return length;
diff --git a/mm/truncate.c b/mm/truncate.c
index 250505091d37..6650c1d878b4 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -380,7 +380,7 @@ static int do_launder_page(struct address_space *mapping, struct page *page)
380 * Any pages which are found to be mapped into pagetables are unmapped prior to 380 * Any pages which are found to be mapped into pagetables are unmapped prior to
381 * invalidation. 381 * invalidation.
382 * 382 *
383 * Returns -EIO if any pages could not be invalidated. 383 * Returns -EBUSY if any pages could not be invalidated.
384 */ 384 */
385int invalidate_inode_pages2_range(struct address_space *mapping, 385int invalidate_inode_pages2_range(struct address_space *mapping,
386 pgoff_t start, pgoff_t end) 386 pgoff_t start, pgoff_t end)
@@ -440,7 +440,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
440 ret2 = do_launder_page(mapping, page); 440 ret2 = do_launder_page(mapping, page);
441 if (ret2 == 0) { 441 if (ret2 == 0) {
442 if (!invalidate_complete_page2(mapping, page)) 442 if (!invalidate_complete_page2(mapping, page))
443 ret2 = -EIO; 443 ret2 = -EBUSY;
444 } 444 }
445 if (ret2 < 0) 445 if (ret2 < 0)
446 ret = ret2; 446 ret = ret2;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index b0d08e667ece..d7826af2fb07 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -516,9 +516,26 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m,
516 continue; 516 continue;
517 517
518 page = pfn_to_page(pfn); 518 page = pfn_to_page(pfn);
519#ifdef CONFIG_ARCH_FLATMEM_HAS_HOLES
520 /*
521 * Ordinarily, memory holes in flatmem still have a valid
522 * memmap for the PFN range. However, an architecture for
523 * embedded systems (e.g. ARM) can free up the memmap backing
524 * holes to save memory on the assumption the memmap is
525 * never used. The page_zone linkages are then broken even
526 * though pfn_valid() returns true. Skip the page if the
527 * linkages are broken. Even if this test passed, the impact
528 * is that the counters for the movable type are off but
529 * fragmentation monitoring is likely meaningless on small
530 * systems.
531 */
532 if (page_zone(page) != zone)
533 continue;
534#endif
519 mtype = get_pageblock_migratetype(page); 535 mtype = get_pageblock_migratetype(page);
520 536
521 count[mtype]++; 537 if (mtype < MIGRATE_TYPES)
538 count[mtype]++;
522 } 539 }
523 540
524 /* Print counts */ 541 /* Print counts */