aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2008-10-13 12:13:56 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2008-10-13 12:13:56 -0400
commite758936e02700ff88a0b08b722a3847b95283ef2 (patch)
tree50c919bef1b459a778b85159d5929de95b6c4a01 /mm
parent239cfbde1f5843c4a24199f117d5f67f637d72d5 (diff)
parent4480f15b3306f43bbb0310d461142b4e897ca45b (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: include/asm-x86/statfs.h
Diffstat (limited to 'mm')
-rw-r--r--mm/bounce.c2
-rw-r--r--mm/filemap.c11
-rw-r--r--mm/highmem.c5
-rw-r--r--mm/memcontrol.c18
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/mmzone.c2
-rw-r--r--mm/page_alloc.c22
-rw-r--r--mm/page_isolation.c12
-rw-r--r--mm/quicklist.c9
-rw-r--r--mm/slob.c8
-rw-r--r--mm/slub.c1
-rw-r--r--mm/tiny-shmem.c26
-rw-r--r--mm/truncate.c4
-rw-r--r--mm/vmalloc.c7
14 files changed, 97 insertions, 34 deletions
diff --git a/mm/bounce.c b/mm/bounce.c
index b6d2d0f1019b..06722c403058 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -267,7 +267,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
267 /* 267 /*
268 * Data-less bio, nothing to bounce 268 * Data-less bio, nothing to bounce
269 */ 269 */
270 if (bio_empty_barrier(*bio_orig)) 270 if (!bio_has_data(*bio_orig))
271 return; 271 return;
272 272
273 /* 273 /*
diff --git a/mm/filemap.c b/mm/filemap.c
index 54e968650855..876bc595d0f8 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2129,13 +2129,20 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2129 * After a write we want buffered reads to be sure to go to disk to get 2129 * After a write we want buffered reads to be sure to go to disk to get
2130 * the new data. We invalidate clean cached page from the region we're 2130 * the new data. We invalidate clean cached page from the region we're
2131 * about to write. We do this *before* the write so that we can return 2131 * about to write. We do this *before* the write so that we can return
2132 * -EIO without clobbering -EIOCBQUEUED from ->direct_IO(). 2132 * without clobbering -EIOCBQUEUED from ->direct_IO().
2133 */ 2133 */
2134 if (mapping->nrpages) { 2134 if (mapping->nrpages) {
2135 written = invalidate_inode_pages2_range(mapping, 2135 written = invalidate_inode_pages2_range(mapping,
2136 pos >> PAGE_CACHE_SHIFT, end); 2136 pos >> PAGE_CACHE_SHIFT, end);
2137 if (written) 2137 /*
2138 * If a page can not be invalidated, return 0 to fall back
2139 * to buffered write.
2140 */
2141 if (written) {
2142 if (written == -EBUSY)
2143 return 0;
2138 goto out; 2144 goto out;
2145 }
2139 } 2146 }
2140 2147
2141 written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs); 2148 written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
diff --git a/mm/highmem.c b/mm/highmem.c
index e16e1523b688..b36b83b920ff 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -70,6 +70,7 @@ static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
70static void flush_all_zero_pkmaps(void) 70static void flush_all_zero_pkmaps(void)
71{ 71{
72 int i; 72 int i;
73 int need_flush = 0;
73 74
74 flush_cache_kmaps(); 75 flush_cache_kmaps();
75 76
@@ -101,8 +102,10 @@ static void flush_all_zero_pkmaps(void)
101 &pkmap_page_table[i]); 102 &pkmap_page_table[i]);
102 103
103 set_page_address(page, NULL); 104 set_page_address(page, NULL);
105 need_flush = 1;
104 } 106 }
105 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); 107 if (need_flush)
108 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
106} 109}
107 110
108/** 111/**
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0f1f7a7374ba..36896f3eb7f5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -250,6 +250,14 @@ static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
250 250
251struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 251struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
252{ 252{
253 /*
254 * mm_update_next_owner() may clear mm->owner to NULL
255 * if it races with swapoff, page migration, etc.
256 * So this can be called with p == NULL.
257 */
258 if (unlikely(!p))
259 return NULL;
260
253 return container_of(task_subsys_state(p, mem_cgroup_subsys_id), 261 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
254 struct mem_cgroup, css); 262 struct mem_cgroup, css);
255} 263}
@@ -549,6 +557,11 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
549 if (likely(!memcg)) { 557 if (likely(!memcg)) {
550 rcu_read_lock(); 558 rcu_read_lock();
551 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 559 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
560 if (unlikely(!mem)) {
561 rcu_read_unlock();
562 kmem_cache_free(page_cgroup_cache, pc);
563 return 0;
564 }
552 /* 565 /*
553 * For every charge from the cgroup, increment reference count 566 * For every charge from the cgroup, increment reference count
554 */ 567 */
@@ -801,11 +814,16 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
801 814
802 rcu_read_lock(); 815 rcu_read_lock();
803 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 816 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
817 if (unlikely(!mem)) {
818 rcu_read_unlock();
819 return 0;
820 }
804 css_get(&mem->css); 821 css_get(&mem->css);
805 rcu_read_unlock(); 822 rcu_read_unlock();
806 823
807 do { 824 do {
808 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask); 825 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
826 progress += res_counter_check_under_limit(&mem->res);
809 } while (!progress && --retry); 827 } while (!progress && --retry);
810 828
811 css_put(&mem->css); 829 css_put(&mem->css);
diff --git a/mm/mmap.c b/mm/mmap.c
index 339cf5c4d5d8..e7a5a68a9c2e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1030,6 +1030,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
1030 } else { 1030 } else {
1031 switch (flags & MAP_TYPE) { 1031 switch (flags & MAP_TYPE) {
1032 case MAP_SHARED: 1032 case MAP_SHARED:
1033 /*
1034 * Ignore pgoff.
1035 */
1036 pgoff = 0;
1033 vm_flags |= VM_SHARED | VM_MAYSHARE; 1037 vm_flags |= VM_SHARED | VM_MAYSHARE;
1034 break; 1038 break;
1035 case MAP_PRIVATE: 1039 case MAP_PRIVATE:
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 486ed595ee6f..16ce8b955dcf 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -69,6 +69,6 @@ struct zoneref *next_zones_zonelist(struct zoneref *z,
69 (z->zone && !zref_in_nodemask(z, nodes))) 69 (z->zone && !zref_in_nodemask(z, nodes)))
70 z++; 70 z++;
71 71
72 *zone = zonelist_zone(z++); 72 *zone = zonelist_zone(z);
73 return z; 73 return z;
74} 74}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index af982f7cdb2a..27b8681139fd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -268,13 +268,14 @@ void prep_compound_page(struct page *page, unsigned long order)
268{ 268{
269 int i; 269 int i;
270 int nr_pages = 1 << order; 270 int nr_pages = 1 << order;
271 struct page *p = page + 1;
271 272
272 set_compound_page_dtor(page, free_compound_page); 273 set_compound_page_dtor(page, free_compound_page);
273 set_compound_order(page, order); 274 set_compound_order(page, order);
274 __SetPageHead(page); 275 __SetPageHead(page);
275 for (i = 1; i < nr_pages; i++) { 276 for (i = 1; i < nr_pages; i++, p++) {
276 struct page *p = page + i; 277 if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0))
277 278 p = pfn_to_page(page_to_pfn(page) + i);
278 __SetPageTail(p); 279 __SetPageTail(p);
279 p->first_page = page; 280 p->first_page = page;
280 } 281 }
@@ -284,6 +285,7 @@ static void destroy_compound_page(struct page *page, unsigned long order)
284{ 285{
285 int i; 286 int i;
286 int nr_pages = 1 << order; 287 int nr_pages = 1 << order;
288 struct page *p = page + 1;
287 289
288 if (unlikely(compound_order(page) != order)) 290 if (unlikely(compound_order(page) != order))
289 bad_page(page); 291 bad_page(page);
@@ -291,8 +293,9 @@ static void destroy_compound_page(struct page *page, unsigned long order)
291 if (unlikely(!PageHead(page))) 293 if (unlikely(!PageHead(page)))
292 bad_page(page); 294 bad_page(page);
293 __ClearPageHead(page); 295 __ClearPageHead(page);
294 for (i = 1; i < nr_pages; i++) { 296 for (i = 1; i < nr_pages; i++, p++) {
295 struct page *p = page + i; 297 if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0))
298 p = pfn_to_page(page_to_pfn(page) + i);
296 299
297 if (unlikely(!PageTail(p) | 300 if (unlikely(!PageTail(p) |
298 (p->first_page != page))) 301 (p->first_page != page)))
@@ -694,6 +697,9 @@ static int move_freepages(struct zone *zone,
694#endif 697#endif
695 698
696 for (page = start_page; page <= end_page;) { 699 for (page = start_page; page <= end_page;) {
700 /* Make sure we are not inadvertently changing nodes */
701 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
702
697 if (!pfn_valid_within(page_to_pfn(page))) { 703 if (!pfn_valid_within(page_to_pfn(page))) {
698 page++; 704 page++;
699 continue; 705 continue;
@@ -2516,6 +2522,10 @@ static void setup_zone_migrate_reserve(struct zone *zone)
2516 continue; 2522 continue;
2517 page = pfn_to_page(pfn); 2523 page = pfn_to_page(pfn);
2518 2524
2525 /* Watch out for overlapping nodes */
2526 if (page_to_nid(page) != zone_to_nid(zone))
2527 continue;
2528
2519 /* Blocks with reserved pages will never free, skip them. */ 2529 /* Blocks with reserved pages will never free, skip them. */
2520 if (PageReserved(page)) 2530 if (PageReserved(page))
2521 continue; 2531 continue;
@@ -4064,7 +4074,7 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
4064} 4074}
4065 4075
4066#ifndef CONFIG_NEED_MULTIPLE_NODES 4076#ifndef CONFIG_NEED_MULTIPLE_NODES
4067struct pglist_data contig_page_data = { .bdata = &bootmem_node_data[0] }; 4077struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4068EXPORT_SYMBOL(contig_page_data); 4078EXPORT_SYMBOL(contig_page_data);
4069#endif 4079#endif
4070 4080
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index c69f84fe038d..b70a7fec1ff6 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -114,8 +114,10 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
114 114
115int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) 115int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
116{ 116{
117 unsigned long pfn; 117 unsigned long pfn, flags;
118 struct page *page; 118 struct page *page;
119 struct zone *zone;
120 int ret;
119 121
120 pfn = start_pfn; 122 pfn = start_pfn;
121 /* 123 /*
@@ -131,7 +133,9 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
131 if (pfn < end_pfn) 133 if (pfn < end_pfn)
132 return -EBUSY; 134 return -EBUSY;
133 /* Check all pages are free or Marked as ISOLATED */ 135 /* Check all pages are free or Marked as ISOLATED */
134 if (__test_page_isolated_in_pageblock(start_pfn, end_pfn)) 136 zone = page_zone(pfn_to_page(pfn));
135 return 0; 137 spin_lock_irqsave(&zone->lock, flags);
136 return -EBUSY; 138 ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn);
139 spin_unlock_irqrestore(&zone->lock, flags);
140 return ret ? 0 : -EBUSY;
137} 141}
diff --git a/mm/quicklist.c b/mm/quicklist.c
index 3f703f7cb398..8dbb6805ef35 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -26,7 +26,10 @@ DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
26static unsigned long max_pages(unsigned long min_pages) 26static unsigned long max_pages(unsigned long min_pages)
27{ 27{
28 unsigned long node_free_pages, max; 28 unsigned long node_free_pages, max;
29 struct zone *zones = NODE_DATA(numa_node_id())->node_zones; 29 int node = numa_node_id();
30 struct zone *zones = NODE_DATA(node)->node_zones;
31 int num_cpus_on_node;
32 node_to_cpumask_ptr(cpumask_on_node, node);
30 33
31 node_free_pages = 34 node_free_pages =
32#ifdef CONFIG_ZONE_DMA 35#ifdef CONFIG_ZONE_DMA
@@ -38,6 +41,10 @@ static unsigned long max_pages(unsigned long min_pages)
38 zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES); 41 zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
39 42
40 max = node_free_pages / FRACTION_OF_NODE_MEM; 43 max = node_free_pages / FRACTION_OF_NODE_MEM;
44
45 num_cpus_on_node = cpus_weight_nr(*cpumask_on_node);
46 max /= num_cpus_on_node;
47
41 return max(max, min_pages); 48 return max(max, min_pages);
42} 49}
43 50
diff --git a/mm/slob.c b/mm/slob.c
index 4c82dd41f32e..cb675d126791 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -514,9 +514,11 @@ size_t ksize(const void *block)
514 return 0; 514 return 0;
515 515
516 sp = (struct slob_page *)virt_to_page(block); 516 sp = (struct slob_page *)virt_to_page(block);
517 if (slob_page(sp)) 517 if (slob_page(sp)) {
518 return ((slob_t *)block - 1)->units + SLOB_UNIT; 518 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
519 else 519 unsigned int *m = (unsigned int *)(block - align);
520 return SLOB_UNITS(*m) * SLOB_UNIT;
521 } else
520 return sp->page.private; 522 return sp->page.private;
521} 523}
522 524
diff --git a/mm/slub.c b/mm/slub.c
index fb486d5540f8..0c83e6afe7b2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1932,6 +1932,7 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
1932 INIT_LIST_HEAD(&n->partial); 1932 INIT_LIST_HEAD(&n->partial);
1933#ifdef CONFIG_SLUB_DEBUG 1933#ifdef CONFIG_SLUB_DEBUG
1934 atomic_long_set(&n->nr_slabs, 0); 1934 atomic_long_set(&n->nr_slabs, 0);
1935 atomic_long_set(&n->total_objects, 0);
1935 INIT_LIST_HEAD(&n->full); 1936 INIT_LIST_HEAD(&n->full);
1936#endif 1937#endif
1937} 1938}
diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c
index ae532f501943..8d7a27a6335c 100644
--- a/mm/tiny-shmem.c
+++ b/mm/tiny-shmem.c
@@ -65,31 +65,31 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
65 if (!dentry) 65 if (!dentry)
66 goto put_memory; 66 goto put_memory;
67 67
68 error = -ENFILE;
69 file = get_empty_filp();
70 if (!file)
71 goto put_dentry;
72
68 error = -ENOSPC; 73 error = -ENOSPC;
69 inode = ramfs_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); 74 inode = ramfs_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
70 if (!inode) 75 if (!inode)
71 goto put_dentry; 76 goto close_file;
72 77
73 d_instantiate(dentry, inode); 78 d_instantiate(dentry, inode);
74 error = -ENFILE; 79 inode->i_size = size;
75 file = alloc_file(shm_mnt, dentry, FMODE_WRITE | FMODE_READ,
76 &ramfs_file_operations);
77 if (!file)
78 goto put_dentry;
79
80 inode->i_nlink = 0; /* It is unlinked */ 80 inode->i_nlink = 0; /* It is unlinked */
81 init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ,
82 &ramfs_file_operations);
81 83
82 /* notify everyone as to the change of file size */ 84#ifndef CONFIG_MMU
83 error = do_truncate(dentry, size, 0, file); 85 error = ramfs_nommu_expand_for_mapping(inode, size);
84 if (error < 0) 86 if (error)
85 goto close_file; 87 goto close_file;
86 88#endif
87 return file; 89 return file;
88 90
89close_file: 91close_file:
90 put_filp(file); 92 put_filp(file);
91 return ERR_PTR(error);
92
93put_dentry: 93put_dentry:
94 dput(dentry); 94 dput(dentry);
95put_memory: 95put_memory:
diff --git a/mm/truncate.c b/mm/truncate.c
index 250505091d37..6650c1d878b4 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -380,7 +380,7 @@ static int do_launder_page(struct address_space *mapping, struct page *page)
380 * Any pages which are found to be mapped into pagetables are unmapped prior to 380 * Any pages which are found to be mapped into pagetables are unmapped prior to
381 * invalidation. 381 * invalidation.
382 * 382 *
383 * Returns -EIO if any pages could not be invalidated. 383 * Returns -EBUSY if any pages could not be invalidated.
384 */ 384 */
385int invalidate_inode_pages2_range(struct address_space *mapping, 385int invalidate_inode_pages2_range(struct address_space *mapping,
386 pgoff_t start, pgoff_t end) 386 pgoff_t start, pgoff_t end)
@@ -440,7 +440,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
440 ret2 = do_launder_page(mapping, page); 440 ret2 = do_launder_page(mapping, page);
441 if (ret2 == 0) { 441 if (ret2 == 0) {
442 if (!invalidate_complete_page2(mapping, page)) 442 if (!invalidate_complete_page2(mapping, page))
443 ret2 = -EIO; 443 ret2 = -EBUSY;
444 } 444 }
445 if (ret2 < 0) 445 if (ret2 < 0)
446 ret = ret2; 446 ret = ret2;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 85b9a0d2c877..bba06c41fc59 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -180,6 +180,13 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
180 pmd_t *pmd; 180 pmd_t *pmd;
181 pte_t *ptep, pte; 181 pte_t *ptep, pte;
182 182
183 /*
184 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
185 * architectures that do not vmalloc module space
186 */
187 VIRTUAL_BUG_ON(!is_vmalloc_addr(vmalloc_addr) &&
188 !is_module_address(addr));
189
183 if (!pgd_none(*pgd)) { 190 if (!pgd_none(*pgd)) {
184 pud = pud_offset(pgd, addr); 191 pud = pud_offset(pgd, addr);
185 if (!pud_none(*pud)) { 192 if (!pud_none(*pud)) {