aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig7
-rw-r--r--mm/bootmem.c2
-rw-r--r--mm/bounce.c2
-rw-r--r--mm/fadvise.c2
-rw-r--r--mm/filemap.c29
-rw-r--r--mm/highmem.c5
-rw-r--r--mm/hugetlb.c23
-rw-r--r--mm/memcontrol.c18
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/mmzone.c2
-rw-r--r--mm/page-writeback.c14
-rw-r--r--mm/page_alloc.c24
-rw-r--r--mm/page_isolation.c13
-rw-r--r--mm/pdflush.c2
-rw-r--r--mm/quicklist.c9
-rw-r--r--mm/readahead.c2
-rw-r--r--mm/shmem.c5
-rw-r--r--mm/slob.c8
-rw-r--r--mm/slub.c5
-rw-r--r--mm/tiny-shmem.c26
-rw-r--r--mm/truncate.c6
-rw-r--r--mm/vmalloc.c7
-rw-r--r--mm/vmstat.c19
23 files changed, 165 insertions, 69 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 0bd9c2dbb2a0..1a501a4de95c 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -101,7 +101,7 @@ config HAVE_MEMORY_PRESENT
101# with gcc 3.4 and later. 101# with gcc 3.4 and later.
102# 102#
103config SPARSEMEM_STATIC 103config SPARSEMEM_STATIC
104 def_bool n 104 bool
105 105
106# 106#
107# Architecture platforms which require a two level mem_section in SPARSEMEM 107# Architecture platforms which require a two level mem_section in SPARSEMEM
@@ -113,7 +113,7 @@ config SPARSEMEM_EXTREME
113 depends on SPARSEMEM && !SPARSEMEM_STATIC 113 depends on SPARSEMEM && !SPARSEMEM_STATIC
114 114
115config SPARSEMEM_VMEMMAP_ENABLE 115config SPARSEMEM_VMEMMAP_ENABLE
116 def_bool n 116 bool
117 117
118config SPARSEMEM_VMEMMAP 118config SPARSEMEM_VMEMMAP
119 bool "Sparse Memory virtual memmap" 119 bool "Sparse Memory virtual memmap"
@@ -187,6 +187,9 @@ config RESOURCES_64BIT
187 help 187 help
188 This option allows memory and IO resources to be 64 bit. 188 This option allows memory and IO resources to be 64 bit.
189 189
190config PHYS_ADDR_T_64BIT
191 def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT
192
190config ZONE_DMA_FLAG 193config ZONE_DMA_FLAG
191 int 194 int
192 default "0" if !ZONE_DMA 195 default "0" if !ZONE_DMA
diff --git a/mm/bootmem.c b/mm/bootmem.c
index ad8eec6e44a8..ac5a891f142a 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -48,7 +48,7 @@ early_param("bootmem_debug", bootmem_debug_setup);
48 if (unlikely(bootmem_debug)) \ 48 if (unlikely(bootmem_debug)) \
49 printk(KERN_INFO \ 49 printk(KERN_INFO \
50 "bootmem::%s " fmt, \ 50 "bootmem::%s " fmt, \
51 __FUNCTION__, ## args); \ 51 __func__, ## args); \
52}) 52})
53 53
54static unsigned long __init bootmap_bytes(unsigned long pages) 54static unsigned long __init bootmap_bytes(unsigned long pages)
diff --git a/mm/bounce.c b/mm/bounce.c
index b6d2d0f1019b..06722c403058 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -267,7 +267,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
267 /* 267 /*
268 * Data-less bio, nothing to bounce 268 * Data-less bio, nothing to bounce
269 */ 269 */
270 if (bio_empty_barrier(*bio_orig)) 270 if (!bio_has_data(*bio_orig))
271 return; 271 return;
272 272
273 /* 273 /*
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 343cfdfebd9e..a1da969bd980 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2002, Linus Torvalds 4 * Copyright (C) 2002, Linus Torvalds
5 * 5 *
6 * 11Jan2003 akpm@digeo.com 6 * 11Jan2003 Andrew Morton
7 * Initial version. 7 * Initial version.
8 */ 8 */
9 9
diff --git a/mm/filemap.c b/mm/filemap.c
index 54e968650855..903bf316912a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1100,8 +1100,9 @@ page_ok:
1100 1100
1101page_not_up_to_date: 1101page_not_up_to_date:
1102 /* Get exclusive access to the page ... */ 1102 /* Get exclusive access to the page ... */
1103 if (lock_page_killable(page)) 1103 error = lock_page_killable(page);
1104 goto readpage_eio; 1104 if (unlikely(error))
1105 goto readpage_error;
1105 1106
1106page_not_up_to_date_locked: 1107page_not_up_to_date_locked:
1107 /* Did it get truncated before we got the lock? */ 1108 /* Did it get truncated before we got the lock? */
@@ -1130,8 +1131,9 @@ readpage:
1130 } 1131 }
1131 1132
1132 if (!PageUptodate(page)) { 1133 if (!PageUptodate(page)) {
1133 if (lock_page_killable(page)) 1134 error = lock_page_killable(page);
1134 goto readpage_eio; 1135 if (unlikely(error))
1136 goto readpage_error;
1135 if (!PageUptodate(page)) { 1137 if (!PageUptodate(page)) {
1136 if (page->mapping == NULL) { 1138 if (page->mapping == NULL) {
1137 /* 1139 /*
@@ -1143,15 +1145,14 @@ readpage:
1143 } 1145 }
1144 unlock_page(page); 1146 unlock_page(page);
1145 shrink_readahead_size_eio(filp, ra); 1147 shrink_readahead_size_eio(filp, ra);
1146 goto readpage_eio; 1148 error = -EIO;
1149 goto readpage_error;
1147 } 1150 }
1148 unlock_page(page); 1151 unlock_page(page);
1149 } 1152 }
1150 1153
1151 goto page_ok; 1154 goto page_ok;
1152 1155
1153readpage_eio:
1154 error = -EIO;
1155readpage_error: 1156readpage_error:
1156 /* UHHUH! A synchronous read error occurred. Report it */ 1157 /* UHHUH! A synchronous read error occurred. Report it */
1157 desc->error = error; 1158 desc->error = error;
@@ -1186,8 +1187,7 @@ out:
1186 ra->prev_pos |= prev_offset; 1187 ra->prev_pos |= prev_offset;
1187 1188
1188 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; 1189 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1189 if (filp) 1190 file_accessed(filp);
1190 file_accessed(filp);
1191} 1191}
1192 1192
1193int file_read_actor(read_descriptor_t *desc, struct page *page, 1193int file_read_actor(read_descriptor_t *desc, struct page *page,
@@ -2129,13 +2129,20 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2129 * After a write we want buffered reads to be sure to go to disk to get 2129 * After a write we want buffered reads to be sure to go to disk to get
2130 * the new data. We invalidate clean cached page from the region we're 2130 * the new data. We invalidate clean cached page from the region we're
2131 * about to write. We do this *before* the write so that we can return 2131 * about to write. We do this *before* the write so that we can return
2132 * -EIO without clobbering -EIOCBQUEUED from ->direct_IO(). 2132 * without clobbering -EIOCBQUEUED from ->direct_IO().
2133 */ 2133 */
2134 if (mapping->nrpages) { 2134 if (mapping->nrpages) {
2135 written = invalidate_inode_pages2_range(mapping, 2135 written = invalidate_inode_pages2_range(mapping,
2136 pos >> PAGE_CACHE_SHIFT, end); 2136 pos >> PAGE_CACHE_SHIFT, end);
2137 if (written) 2137 /*
2138 * If a page can not be invalidated, return 0 to fall back
2139 * to buffered write.
2140 */
2141 if (written) {
2142 if (written == -EBUSY)
2143 return 0;
2138 goto out; 2144 goto out;
2145 }
2139 } 2146 }
2140 2147
2141 written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs); 2148 written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
diff --git a/mm/highmem.c b/mm/highmem.c
index e16e1523b688..b36b83b920ff 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -70,6 +70,7 @@ static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
70static void flush_all_zero_pkmaps(void) 70static void flush_all_zero_pkmaps(void)
71{ 71{
72 int i; 72 int i;
73 int need_flush = 0;
73 74
74 flush_cache_kmaps(); 75 flush_cache_kmaps();
75 76
@@ -101,8 +102,10 @@ static void flush_all_zero_pkmaps(void)
101 &pkmap_page_table[i]); 102 &pkmap_page_table[i]);
102 103
103 set_page_address(page, NULL); 104 set_page_address(page, NULL);
105 need_flush = 1;
104 } 106 }
105 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); 107 if (need_flush)
108 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
106} 109}
107 110
108/** 111/**
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 67a71191136e..38633864a93e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2008,7 +2008,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2008 entry = huge_ptep_get(ptep); 2008 entry = huge_ptep_get(ptep);
2009 if (huge_pte_none(entry)) { 2009 if (huge_pte_none(entry)) {
2010 ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 2010 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
2011 goto out_unlock; 2011 goto out_mutex;
2012 } 2012 }
2013 2013
2014 ret = 0; 2014 ret = 0;
@@ -2024,7 +2024,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2024 if (write_access && !pte_write(entry)) { 2024 if (write_access && !pte_write(entry)) {
2025 if (vma_needs_reservation(h, vma, address) < 0) { 2025 if (vma_needs_reservation(h, vma, address) < 0) {
2026 ret = VM_FAULT_OOM; 2026 ret = VM_FAULT_OOM;
2027 goto out_unlock; 2027 goto out_mutex;
2028 } 2028 }
2029 2029
2030 if (!(vma->vm_flags & VM_SHARED)) 2030 if (!(vma->vm_flags & VM_SHARED))
@@ -2034,10 +2034,23 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2034 2034
2035 spin_lock(&mm->page_table_lock); 2035 spin_lock(&mm->page_table_lock);
2036 /* Check for a racing update before calling hugetlb_cow */ 2036 /* Check for a racing update before calling hugetlb_cow */
2037 if (likely(pte_same(entry, huge_ptep_get(ptep)))) 2037 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2038 if (write_access && !pte_write(entry)) 2038 goto out_page_table_lock;
2039
2040
2041 if (write_access) {
2042 if (!pte_write(entry)) {
2039 ret = hugetlb_cow(mm, vma, address, ptep, entry, 2043 ret = hugetlb_cow(mm, vma, address, ptep, entry,
2040 pagecache_page); 2044 pagecache_page);
2045 goto out_page_table_lock;
2046 }
2047 entry = pte_mkdirty(entry);
2048 }
2049 entry = pte_mkyoung(entry);
2050 if (huge_ptep_set_access_flags(vma, address, ptep, entry, write_access))
2051 update_mmu_cache(vma, address, entry);
2052
2053out_page_table_lock:
2041 spin_unlock(&mm->page_table_lock); 2054 spin_unlock(&mm->page_table_lock);
2042 2055
2043 if (pagecache_page) { 2056 if (pagecache_page) {
@@ -2045,7 +2058,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2045 put_page(pagecache_page); 2058 put_page(pagecache_page);
2046 } 2059 }
2047 2060
2048out_unlock: 2061out_mutex:
2049 mutex_unlock(&hugetlb_instantiation_mutex); 2062 mutex_unlock(&hugetlb_instantiation_mutex);
2050 2063
2051 return ret; 2064 return ret;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0f1f7a7374ba..36896f3eb7f5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -250,6 +250,14 @@ static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
250 250
251struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 251struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
252{ 252{
253 /*
254 * mm_update_next_owner() may clear mm->owner to NULL
255 * if it races with swapoff, page migration, etc.
256 * So this can be called with p == NULL.
257 */
258 if (unlikely(!p))
259 return NULL;
260
253 return container_of(task_subsys_state(p, mem_cgroup_subsys_id), 261 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
254 struct mem_cgroup, css); 262 struct mem_cgroup, css);
255} 263}
@@ -549,6 +557,11 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
549 if (likely(!memcg)) { 557 if (likely(!memcg)) {
550 rcu_read_lock(); 558 rcu_read_lock();
551 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 559 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
560 if (unlikely(!mem)) {
561 rcu_read_unlock();
562 kmem_cache_free(page_cgroup_cache, pc);
563 return 0;
564 }
552 /* 565 /*
553 * For every charge from the cgroup, increment reference count 566 * For every charge from the cgroup, increment reference count
554 */ 567 */
@@ -801,11 +814,16 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
801 814
802 rcu_read_lock(); 815 rcu_read_lock();
803 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 816 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
817 if (unlikely(!mem)) {
818 rcu_read_unlock();
819 return 0;
820 }
804 css_get(&mem->css); 821 css_get(&mem->css);
805 rcu_read_unlock(); 822 rcu_read_unlock();
806 823
807 do { 824 do {
808 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask); 825 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
826 progress += res_counter_check_under_limit(&mem->res);
809 } while (!progress && --retry); 827 } while (!progress && --retry);
810 828
811 css_put(&mem->css); 829 css_put(&mem->css);
diff --git a/mm/mmap.c b/mm/mmap.c
index 339cf5c4d5d8..e7a5a68a9c2e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1030,6 +1030,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
1030 } else { 1030 } else {
1031 switch (flags & MAP_TYPE) { 1031 switch (flags & MAP_TYPE) {
1032 case MAP_SHARED: 1032 case MAP_SHARED:
1033 /*
1034 * Ignore pgoff.
1035 */
1036 pgoff = 0;
1033 vm_flags |= VM_SHARED | VM_MAYSHARE; 1037 vm_flags |= VM_SHARED | VM_MAYSHARE;
1034 break; 1038 break;
1035 case MAP_PRIVATE: 1039 case MAP_PRIVATE:
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 486ed595ee6f..16ce8b955dcf 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -69,6 +69,6 @@ struct zoneref *next_zones_zonelist(struct zoneref *z,
69 (z->zone && !zref_in_nodemask(z, nodes))) 69 (z->zone && !zref_in_nodemask(z, nodes)))
70 z++; 70 z++;
71 71
72 *zone = zonelist_zone(z++); 72 *zone = zonelist_zone(z);
73 return z; 73 return z;
74} 74}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 24de8b65fdbd..b40f6d5f8fe9 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -7,7 +7,7 @@
7 * Contains functions related to writing back dirty pages at the 7 * Contains functions related to writing back dirty pages at the
8 * address_space level. 8 * address_space level.
9 * 9 *
10 * 10Apr2002 akpm@zip.com.au 10 * 10Apr2002 Andrew Morton
11 * Initial version 11 * Initial version
12 */ 12 */
13 13
@@ -876,6 +876,7 @@ int write_cache_pages(struct address_space *mapping,
876 pgoff_t end; /* Inclusive */ 876 pgoff_t end; /* Inclusive */
877 int scanned = 0; 877 int scanned = 0;
878 int range_whole = 0; 878 int range_whole = 0;
879 long nr_to_write = wbc->nr_to_write;
879 880
880 if (wbc->nonblocking && bdi_write_congested(bdi)) { 881 if (wbc->nonblocking && bdi_write_congested(bdi)) {
881 wbc->encountered_congestion = 1; 882 wbc->encountered_congestion = 1;
@@ -939,7 +940,7 @@ retry:
939 unlock_page(page); 940 unlock_page(page);
940 ret = 0; 941 ret = 0;
941 } 942 }
942 if (ret || (--(wbc->nr_to_write) <= 0)) 943 if (ret || (--nr_to_write <= 0))
943 done = 1; 944 done = 1;
944 if (wbc->nonblocking && bdi_write_congested(bdi)) { 945 if (wbc->nonblocking && bdi_write_congested(bdi)) {
945 wbc->encountered_congestion = 1; 946 wbc->encountered_congestion = 1;
@@ -958,11 +959,12 @@ retry:
958 index = 0; 959 index = 0;
959 goto retry; 960 goto retry;
960 } 961 }
961 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 962 if (!wbc->no_nrwrite_index_update) {
962 mapping->writeback_index = index; 963 if (wbc->range_cyclic || (range_whole && nr_to_write > 0))
964 mapping->writeback_index = index;
965 wbc->nr_to_write = nr_to_write;
966 }
963 967
964 if (wbc->range_cont)
965 wbc->range_start = index << PAGE_CACHE_SHIFT;
966 return ret; 968 return ret;
967} 969}
968EXPORT_SYMBOL(write_cache_pages); 970EXPORT_SYMBOL(write_cache_pages);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index af982f7cdb2a..9eb9eb928285 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -268,13 +268,14 @@ void prep_compound_page(struct page *page, unsigned long order)
268{ 268{
269 int i; 269 int i;
270 int nr_pages = 1 << order; 270 int nr_pages = 1 << order;
271 struct page *p = page + 1;
271 272
272 set_compound_page_dtor(page, free_compound_page); 273 set_compound_page_dtor(page, free_compound_page);
273 set_compound_order(page, order); 274 set_compound_order(page, order);
274 __SetPageHead(page); 275 __SetPageHead(page);
275 for (i = 1; i < nr_pages; i++) { 276 for (i = 1; i < nr_pages; i++, p++) {
276 struct page *p = page + i; 277 if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0))
277 278 p = pfn_to_page(page_to_pfn(page) + i);
278 __SetPageTail(p); 279 __SetPageTail(p);
279 p->first_page = page; 280 p->first_page = page;
280 } 281 }
@@ -284,6 +285,7 @@ static void destroy_compound_page(struct page *page, unsigned long order)
284{ 285{
285 int i; 286 int i;
286 int nr_pages = 1 << order; 287 int nr_pages = 1 << order;
288 struct page *p = page + 1;
287 289
288 if (unlikely(compound_order(page) != order)) 290 if (unlikely(compound_order(page) != order))
289 bad_page(page); 291 bad_page(page);
@@ -291,8 +293,9 @@ static void destroy_compound_page(struct page *page, unsigned long order)
291 if (unlikely(!PageHead(page))) 293 if (unlikely(!PageHead(page)))
292 bad_page(page); 294 bad_page(page);
293 __ClearPageHead(page); 295 __ClearPageHead(page);
294 for (i = 1; i < nr_pages; i++) { 296 for (i = 1; i < nr_pages; i++, p++) {
295 struct page *p = page + i; 297 if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0))
298 p = pfn_to_page(page_to_pfn(page) + i);
296 299
297 if (unlikely(!PageTail(p) | 300 if (unlikely(!PageTail(p) |
298 (p->first_page != page))) 301 (p->first_page != page)))
@@ -694,6 +697,9 @@ static int move_freepages(struct zone *zone,
694#endif 697#endif
695 698
696 for (page = start_page; page <= end_page;) { 699 for (page = start_page; page <= end_page;) {
700 /* Make sure we are not inadvertently changing nodes */
701 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
702
697 if (!pfn_valid_within(page_to_pfn(page))) { 703 if (!pfn_valid_within(page_to_pfn(page))) {
698 page++; 704 page++;
699 continue; 705 continue;
@@ -2516,6 +2522,10 @@ static void setup_zone_migrate_reserve(struct zone *zone)
2516 continue; 2522 continue;
2517 page = pfn_to_page(pfn); 2523 page = pfn_to_page(pfn);
2518 2524
2525 /* Watch out for overlapping nodes */
2526 if (page_to_nid(page) != zone_to_nid(zone))
2527 continue;
2528
2519 /* Blocks with reserved pages will never free, skip them. */ 2529 /* Blocks with reserved pages will never free, skip them. */
2520 if (PageReserved(page)) 2530 if (PageReserved(page))
2521 continue; 2531 continue;
@@ -3942,7 +3952,7 @@ static void check_for_regular_memory(pg_data_t *pgdat)
3942void __init free_area_init_nodes(unsigned long *max_zone_pfn) 3952void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3943{ 3953{
3944 unsigned long nid; 3954 unsigned long nid;
3945 enum zone_type i; 3955 int i;
3946 3956
3947 /* Sort early_node_map as initialisation assumes it is sorted */ 3957 /* Sort early_node_map as initialisation assumes it is sorted */
3948 sort_node_map(); 3958 sort_node_map();
@@ -4064,7 +4074,7 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
4064} 4074}
4065 4075
4066#ifndef CONFIG_NEED_MULTIPLE_NODES 4076#ifndef CONFIG_NEED_MULTIPLE_NODES
4067struct pglist_data contig_page_data = { .bdata = &bootmem_node_data[0] }; 4077struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4068EXPORT_SYMBOL(contig_page_data); 4078EXPORT_SYMBOL(contig_page_data);
4069#endif 4079#endif
4070 4080
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 3444b58033c8..b70a7fec1ff6 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -2,7 +2,6 @@
2 * linux/mm/page_isolation.c 2 * linux/mm/page_isolation.c
3 */ 3 */
4 4
5#include <stddef.h>
6#include <linux/mm.h> 5#include <linux/mm.h>
7#include <linux/page-isolation.h> 6#include <linux/page-isolation.h>
8#include <linux/pageblock-flags.h> 7#include <linux/pageblock-flags.h>
@@ -115,8 +114,10 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
115 114
116int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) 115int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
117{ 116{
118 unsigned long pfn; 117 unsigned long pfn, flags;
119 struct page *page; 118 struct page *page;
119 struct zone *zone;
120 int ret;
120 121
121 pfn = start_pfn; 122 pfn = start_pfn;
122 /* 123 /*
@@ -132,7 +133,9 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
132 if (pfn < end_pfn) 133 if (pfn < end_pfn)
133 return -EBUSY; 134 return -EBUSY;
134 /* Check all pages are free or Marked as ISOLATED */ 135 /* Check all pages are free or Marked as ISOLATED */
135 if (__test_page_isolated_in_pageblock(start_pfn, end_pfn)) 136 zone = page_zone(pfn_to_page(pfn));
136 return 0; 137 spin_lock_irqsave(&zone->lock, flags);
137 return -EBUSY; 138 ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn);
139 spin_unlock_irqrestore(&zone->lock, flags);
140 return ret ? 0 : -EBUSY;
138} 141}
diff --git a/mm/pdflush.c b/mm/pdflush.c
index 0cbe0c60c6bf..a0a14c4d5072 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2002, Linus Torvalds. 4 * Copyright (C) 2002, Linus Torvalds.
5 * 5 *
6 * 09Apr2002 akpm@zip.com.au 6 * 09Apr2002 Andrew Morton
7 * Initial version 7 * Initial version
8 * 29Feb2004 kaos@sgi.com 8 * 29Feb2004 kaos@sgi.com
9 * Move worker thread creation to kthread to avoid chewing 9 * Move worker thread creation to kthread to avoid chewing
diff --git a/mm/quicklist.c b/mm/quicklist.c
index 3f703f7cb398..8dbb6805ef35 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -26,7 +26,10 @@ DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
26static unsigned long max_pages(unsigned long min_pages) 26static unsigned long max_pages(unsigned long min_pages)
27{ 27{
28 unsigned long node_free_pages, max; 28 unsigned long node_free_pages, max;
29 struct zone *zones = NODE_DATA(numa_node_id())->node_zones; 29 int node = numa_node_id();
30 struct zone *zones = NODE_DATA(node)->node_zones;
31 int num_cpus_on_node;
32 node_to_cpumask_ptr(cpumask_on_node, node);
30 33
31 node_free_pages = 34 node_free_pages =
32#ifdef CONFIG_ZONE_DMA 35#ifdef CONFIG_ZONE_DMA
@@ -38,6 +41,10 @@ static unsigned long max_pages(unsigned long min_pages)
38 zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES); 41 zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
39 42
40 max = node_free_pages / FRACTION_OF_NODE_MEM; 43 max = node_free_pages / FRACTION_OF_NODE_MEM;
44
45 num_cpus_on_node = cpus_weight_nr(*cpumask_on_node);
46 max /= num_cpus_on_node;
47
41 return max(max, min_pages); 48 return max(max, min_pages);
42} 49}
43 50
diff --git a/mm/readahead.c b/mm/readahead.c
index 77e8ddf945e9..6cbd9a72fde2 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2002, Linus Torvalds 4 * Copyright (C) 2002, Linus Torvalds
5 * 5 *
6 * 09Apr2002 akpm@zip.com.au 6 * 09Apr2002 Andrew Morton
7 * Initial version. 7 * Initial version.
8 */ 8 */
9 9
diff --git a/mm/shmem.c b/mm/shmem.c
index 04fb4f1ab88e..d87958a5f03e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -50,14 +50,12 @@
50#include <linux/migrate.h> 50#include <linux/migrate.h>
51#include <linux/highmem.h> 51#include <linux/highmem.h>
52#include <linux/seq_file.h> 52#include <linux/seq_file.h>
53#include <linux/magic.h>
53 54
54#include <asm/uaccess.h> 55#include <asm/uaccess.h>
55#include <asm/div64.h> 56#include <asm/div64.h>
56#include <asm/pgtable.h> 57#include <asm/pgtable.h>
57 58
58/* This magic number is used in glibc for posix shared memory */
59#define TMPFS_MAGIC 0x01021994
60
61#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) 59#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
62#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) 60#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
63#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 61#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
@@ -2582,6 +2580,7 @@ put_memory:
2582 shmem_unacct_size(flags, size); 2580 shmem_unacct_size(flags, size);
2583 return ERR_PTR(error); 2581 return ERR_PTR(error);
2584} 2582}
2583EXPORT_SYMBOL_GPL(shmem_file_setup);
2585 2584
2586/** 2585/**
2587 * shmem_zero_setup - setup a shared anonymous mapping 2586 * shmem_zero_setup - setup a shared anonymous mapping
diff --git a/mm/slob.c b/mm/slob.c
index 4c82dd41f32e..cb675d126791 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -514,9 +514,11 @@ size_t ksize(const void *block)
514 return 0; 514 return 0;
515 515
516 sp = (struct slob_page *)virt_to_page(block); 516 sp = (struct slob_page *)virt_to_page(block);
517 if (slob_page(sp)) 517 if (slob_page(sp)) {
518 return ((slob_t *)block - 1)->units + SLOB_UNIT; 518 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
519 else 519 unsigned int *m = (unsigned int *)(block - align);
520 return SLOB_UNITS(*m) * SLOB_UNIT;
521 } else
520 return sp->page.private; 522 return sp->page.private;
521} 523}
522 524
diff --git a/mm/slub.c b/mm/slub.c
index 4f5b96149458..0c83e6afe7b2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1932,6 +1932,7 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
1932 INIT_LIST_HEAD(&n->partial); 1932 INIT_LIST_HEAD(&n->partial);
1933#ifdef CONFIG_SLUB_DEBUG 1933#ifdef CONFIG_SLUB_DEBUG
1934 atomic_long_set(&n->nr_slabs, 0); 1934 atomic_long_set(&n->nr_slabs, 0);
1935 atomic_long_set(&n->total_objects, 0);
1935 INIT_LIST_HEAD(&n->full); 1936 INIT_LIST_HEAD(&n->full);
1936#endif 1937#endif
1937} 1938}
@@ -2312,7 +2313,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2312 2313
2313 s->refcount = 1; 2314 s->refcount = 1;
2314#ifdef CONFIG_NUMA 2315#ifdef CONFIG_NUMA
2315 s->remote_node_defrag_ratio = 100; 2316 s->remote_node_defrag_ratio = 1000;
2316#endif 2317#endif
2317 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) 2318 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
2318 goto error; 2319 goto error;
@@ -4058,7 +4059,7 @@ static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
4058 if (err) 4059 if (err)
4059 return err; 4060 return err;
4060 4061
4061 if (ratio < 100) 4062 if (ratio <= 100)
4062 s->remote_node_defrag_ratio = ratio * 10; 4063 s->remote_node_defrag_ratio = ratio * 10;
4063 4064
4064 return length; 4065 return length;
diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c
index ae532f501943..8d7a27a6335c 100644
--- a/mm/tiny-shmem.c
+++ b/mm/tiny-shmem.c
@@ -65,31 +65,31 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
65 if (!dentry) 65 if (!dentry)
66 goto put_memory; 66 goto put_memory;
67 67
68 error = -ENFILE;
69 file = get_empty_filp();
70 if (!file)
71 goto put_dentry;
72
68 error = -ENOSPC; 73 error = -ENOSPC;
69 inode = ramfs_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); 74 inode = ramfs_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
70 if (!inode) 75 if (!inode)
71 goto put_dentry; 76 goto close_file;
72 77
73 d_instantiate(dentry, inode); 78 d_instantiate(dentry, inode);
74 error = -ENFILE; 79 inode->i_size = size;
75 file = alloc_file(shm_mnt, dentry, FMODE_WRITE | FMODE_READ,
76 &ramfs_file_operations);
77 if (!file)
78 goto put_dentry;
79
80 inode->i_nlink = 0; /* It is unlinked */ 80 inode->i_nlink = 0; /* It is unlinked */
81 init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ,
82 &ramfs_file_operations);
81 83
82 /* notify everyone as to the change of file size */ 84#ifndef CONFIG_MMU
83 error = do_truncate(dentry, size, 0, file); 85 error = ramfs_nommu_expand_for_mapping(inode, size);
84 if (error < 0) 86 if (error)
85 goto close_file; 87 goto close_file;
86 88#endif
87 return file; 89 return file;
88 90
89close_file: 91close_file:
90 put_filp(file); 92 put_filp(file);
91 return ERR_PTR(error);
92
93put_dentry: 93put_dentry:
94 dput(dentry); 94 dput(dentry);
95put_memory: 95put_memory:
diff --git a/mm/truncate.c b/mm/truncate.c
index 250505091d37..e83e4b114ef1 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2002, Linus Torvalds 4 * Copyright (C) 2002, Linus Torvalds
5 * 5 *
6 * 10Sep2002 akpm@zip.com.au 6 * 10Sep2002 Andrew Morton
7 * Initial version. 7 * Initial version.
8 */ 8 */
9 9
@@ -380,7 +380,7 @@ static int do_launder_page(struct address_space *mapping, struct page *page)
380 * Any pages which are found to be mapped into pagetables are unmapped prior to 380 * Any pages which are found to be mapped into pagetables are unmapped prior to
381 * invalidation. 381 * invalidation.
382 * 382 *
383 * Returns -EIO if any pages could not be invalidated. 383 * Returns -EBUSY if any pages could not be invalidated.
384 */ 384 */
385int invalidate_inode_pages2_range(struct address_space *mapping, 385int invalidate_inode_pages2_range(struct address_space *mapping,
386 pgoff_t start, pgoff_t end) 386 pgoff_t start, pgoff_t end)
@@ -440,7 +440,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
440 ret2 = do_launder_page(mapping, page); 440 ret2 = do_launder_page(mapping, page);
441 if (ret2 == 0) { 441 if (ret2 == 0) {
442 if (!invalidate_complete_page2(mapping, page)) 442 if (!invalidate_complete_page2(mapping, page))
443 ret2 = -EIO; 443 ret2 = -EBUSY;
444 } 444 }
445 if (ret2 < 0) 445 if (ret2 < 0)
446 ret = ret2; 446 ret = ret2;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 85b9a0d2c877..bba06c41fc59 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -180,6 +180,13 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
180 pmd_t *pmd; 180 pmd_t *pmd;
181 pte_t *ptep, pte; 181 pte_t *ptep, pte;
182 182
183 /*
184 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
185 * architectures that do not vmalloc module space
186 */
187 VIRTUAL_BUG_ON(!is_vmalloc_addr(vmalloc_addr) &&
188 !is_module_address(addr));
189
183 if (!pgd_none(*pgd)) { 190 if (!pgd_none(*pgd)) {
184 pud = pud_offset(pgd, addr); 191 pud = pud_offset(pgd, addr);
185 if (!pud_none(*pud)) { 192 if (!pud_none(*pud)) {
diff --git a/mm/vmstat.c b/mm/vmstat.c
index b0d08e667ece..d7826af2fb07 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -516,9 +516,26 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m,
516 continue; 516 continue;
517 517
518 page = pfn_to_page(pfn); 518 page = pfn_to_page(pfn);
519#ifdef CONFIG_ARCH_FLATMEM_HAS_HOLES
520 /*
521 * Ordinarily, memory holes in flatmem still have a valid
522 * memmap for the PFN range. However, an architecture for
523 * embedded systems (e.g. ARM) can free up the memmap backing
524 * holes to save memory on the assumption the memmap is
525 * never used. The page_zone linkages are then broken even
526 * though pfn_valid() returns true. Skip the page if the
527 * linkages are broken. Even if this test passed, the impact
528 * is that the counters for the movable type are off but
529 * fragmentation monitoring is likely meaningless on small
530 * systems.
531 */
532 if (page_zone(page) != zone)
533 continue;
534#endif
519 mtype = get_pageblock_migratetype(page); 535 mtype = get_pageblock_migratetype(page);
520 536
521 count[mtype]++; 537 if (mtype < MIGRATE_TYPES)
538 count[mtype]++;
522 } 539 }
523 540
524 /* Print counts */ 541 /* Print counts */