aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig3
-rw-r--r--mm/bounce.c2
-rw-r--r--mm/filemap.c15
-rw-r--r--mm/highmem.c5
-rw-r--r--mm/memcontrol.c18
-rw-r--r--mm/page_alloc.c13
-rw-r--r--mm/page_isolation.c12
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slob.c8
-rw-r--r--mm/slub.c1
-rw-r--r--mm/tiny-shmem.c26
-rw-r--r--mm/vmalloc.c7
12 files changed, 77 insertions, 37 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 0bd9c2dbb2a0..91ee3922510a 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -187,6 +187,9 @@ config RESOURCES_64BIT
187 help 187 help
188 This option allows memory and IO resources to be 64 bit. 188 This option allows memory and IO resources to be 64 bit.
189 189
190config PHYS_ADDR_T_64BIT
191 def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT
192
190config ZONE_DMA_FLAG 193config ZONE_DMA_FLAG
191 int 194 int
192 default "0" if !ZONE_DMA 195 default "0" if !ZONE_DMA
diff --git a/mm/bounce.c b/mm/bounce.c
index b6d2d0f1019b..06722c403058 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -267,7 +267,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
267 /* 267 /*
268 * Data-less bio, nothing to bounce 268 * Data-less bio, nothing to bounce
269 */ 269 */
270 if (bio_empty_barrier(*bio_orig)) 270 if (!bio_has_data(*bio_orig))
271 return; 271 return;
272 272
273 /* 273 /*
diff --git a/mm/filemap.c b/mm/filemap.c
index 876bc595d0f8..494ff20b6cfa 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1100,8 +1100,9 @@ page_ok:
1100 1100
1101page_not_up_to_date: 1101page_not_up_to_date:
1102 /* Get exclusive access to the page ... */ 1102 /* Get exclusive access to the page ... */
1103 if (lock_page_killable(page)) 1103 error = lock_page_killable(page);
1104 goto readpage_eio; 1104 if (unlikely(error))
1105 goto readpage_error;
1105 1106
1106page_not_up_to_date_locked: 1107page_not_up_to_date_locked:
1107 /* Did it get truncated before we got the lock? */ 1108 /* Did it get truncated before we got the lock? */
@@ -1130,8 +1131,9 @@ readpage:
1130 } 1131 }
1131 1132
1132 if (!PageUptodate(page)) { 1133 if (!PageUptodate(page)) {
1133 if (lock_page_killable(page)) 1134 error = lock_page_killable(page);
1134 goto readpage_eio; 1135 if (unlikely(error))
1136 goto readpage_error;
1135 if (!PageUptodate(page)) { 1137 if (!PageUptodate(page)) {
1136 if (page->mapping == NULL) { 1138 if (page->mapping == NULL) {
1137 /* 1139 /*
@@ -1143,15 +1145,14 @@ readpage:
1143 } 1145 }
1144 unlock_page(page); 1146 unlock_page(page);
1145 shrink_readahead_size_eio(filp, ra); 1147 shrink_readahead_size_eio(filp, ra);
1146 goto readpage_eio; 1148 error = -EIO;
1149 goto readpage_error;
1147 } 1150 }
1148 unlock_page(page); 1151 unlock_page(page);
1149 } 1152 }
1150 1153
1151 goto page_ok; 1154 goto page_ok;
1152 1155
1153readpage_eio:
1154 error = -EIO;
1155readpage_error: 1156readpage_error:
1156 /* UHHUH! A synchronous read error occurred. Report it */ 1157 /* UHHUH! A synchronous read error occurred. Report it */
1157 desc->error = error; 1158 desc->error = error;
diff --git a/mm/highmem.c b/mm/highmem.c
index e16e1523b688..b36b83b920ff 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -70,6 +70,7 @@ static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
70static void flush_all_zero_pkmaps(void) 70static void flush_all_zero_pkmaps(void)
71{ 71{
72 int i; 72 int i;
73 int need_flush = 0;
73 74
74 flush_cache_kmaps(); 75 flush_cache_kmaps();
75 76
@@ -101,8 +102,10 @@ static void flush_all_zero_pkmaps(void)
101 &pkmap_page_table[i]); 102 &pkmap_page_table[i]);
102 103
103 set_page_address(page, NULL); 104 set_page_address(page, NULL);
105 need_flush = 1;
104 } 106 }
105 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); 107 if (need_flush)
108 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
106} 109}
107 110
108/** 111/**
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0f1f7a7374ba..36896f3eb7f5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -250,6 +250,14 @@ static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
250 250
251struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 251struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
252{ 252{
253 /*
254 * mm_update_next_owner() may clear mm->owner to NULL
255 * if it races with swapoff, page migration, etc.
256 * So this can be called with p == NULL.
257 */
258 if (unlikely(!p))
259 return NULL;
260
253 return container_of(task_subsys_state(p, mem_cgroup_subsys_id), 261 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
254 struct mem_cgroup, css); 262 struct mem_cgroup, css);
255} 263}
@@ -549,6 +557,11 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
549 if (likely(!memcg)) { 557 if (likely(!memcg)) {
550 rcu_read_lock(); 558 rcu_read_lock();
551 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 559 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
560 if (unlikely(!mem)) {
561 rcu_read_unlock();
562 kmem_cache_free(page_cgroup_cache, pc);
563 return 0;
564 }
552 /* 565 /*
553 * For every charge from the cgroup, increment reference count 566 * For every charge from the cgroup, increment reference count
554 */ 567 */
@@ -801,11 +814,16 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
801 814
802 rcu_read_lock(); 815 rcu_read_lock();
803 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 816 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
817 if (unlikely(!mem)) {
818 rcu_read_unlock();
819 return 0;
820 }
804 css_get(&mem->css); 821 css_get(&mem->css);
805 rcu_read_unlock(); 822 rcu_read_unlock();
806 823
807 do { 824 do {
808 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask); 825 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
826 progress += res_counter_check_under_limit(&mem->res);
809 } while (!progress && --retry); 827 } while (!progress && --retry);
810 828
811 css_put(&mem->css); 829 css_put(&mem->css);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e293c58bea58..27b8681139fd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -268,13 +268,14 @@ void prep_compound_page(struct page *page, unsigned long order)
268{ 268{
269 int i; 269 int i;
270 int nr_pages = 1 << order; 270 int nr_pages = 1 << order;
271 struct page *p = page + 1;
271 272
272 set_compound_page_dtor(page, free_compound_page); 273 set_compound_page_dtor(page, free_compound_page);
273 set_compound_order(page, order); 274 set_compound_order(page, order);
274 __SetPageHead(page); 275 __SetPageHead(page);
275 for (i = 1; i < nr_pages; i++) { 276 for (i = 1; i < nr_pages; i++, p++) {
276 struct page *p = page + i; 277 if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0))
277 278 p = pfn_to_page(page_to_pfn(page) + i);
278 __SetPageTail(p); 279 __SetPageTail(p);
279 p->first_page = page; 280 p->first_page = page;
280 } 281 }
@@ -284,6 +285,7 @@ static void destroy_compound_page(struct page *page, unsigned long order)
284{ 285{
285 int i; 286 int i;
286 int nr_pages = 1 << order; 287 int nr_pages = 1 << order;
288 struct page *p = page + 1;
287 289
288 if (unlikely(compound_order(page) != order)) 290 if (unlikely(compound_order(page) != order))
289 bad_page(page); 291 bad_page(page);
@@ -291,8 +293,9 @@ static void destroy_compound_page(struct page *page, unsigned long order)
291 if (unlikely(!PageHead(page))) 293 if (unlikely(!PageHead(page)))
292 bad_page(page); 294 bad_page(page);
293 __ClearPageHead(page); 295 __ClearPageHead(page);
294 for (i = 1; i < nr_pages; i++) { 296 for (i = 1; i < nr_pages; i++, p++) {
295 struct page *p = page + i; 297 if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0))
298 p = pfn_to_page(page_to_pfn(page) + i);
296 299
297 if (unlikely(!PageTail(p) | 300 if (unlikely(!PageTail(p) |
298 (p->first_page != page))) 301 (p->first_page != page)))
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index c69f84fe038d..b70a7fec1ff6 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -114,8 +114,10 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
114 114
115int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) 115int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
116{ 116{
117 unsigned long pfn; 117 unsigned long pfn, flags;
118 struct page *page; 118 struct page *page;
119 struct zone *zone;
120 int ret;
119 121
120 pfn = start_pfn; 122 pfn = start_pfn;
121 /* 123 /*
@@ -131,7 +133,9 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
131 if (pfn < end_pfn) 133 if (pfn < end_pfn)
132 return -EBUSY; 134 return -EBUSY;
133 /* Check all pages are free or Marked as ISOLATED */ 135 /* Check all pages are free or Marked as ISOLATED */
134 if (__test_page_isolated_in_pageblock(start_pfn, end_pfn)) 136 zone = page_zone(pfn_to_page(pfn));
135 return 0; 137 spin_lock_irqsave(&zone->lock, flags);
136 return -EBUSY; 138 ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn);
139 spin_unlock_irqrestore(&zone->lock, flags);
140 return ret ? 0 : -EBUSY;
137} 141}
diff --git a/mm/shmem.c b/mm/shmem.c
index 04fb4f1ab88e..bf66d0191baf 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -50,14 +50,12 @@
50#include <linux/migrate.h> 50#include <linux/migrate.h>
51#include <linux/highmem.h> 51#include <linux/highmem.h>
52#include <linux/seq_file.h> 52#include <linux/seq_file.h>
53#include <linux/magic.h>
53 54
54#include <asm/uaccess.h> 55#include <asm/uaccess.h>
55#include <asm/div64.h> 56#include <asm/div64.h>
56#include <asm/pgtable.h> 57#include <asm/pgtable.h>
57 58
58/* This magic number is used in glibc for posix shared memory */
59#define TMPFS_MAGIC 0x01021994
60
61#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) 59#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
62#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) 60#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
63#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 61#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
diff --git a/mm/slob.c b/mm/slob.c
index 4c82dd41f32e..cb675d126791 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -514,9 +514,11 @@ size_t ksize(const void *block)
514 return 0; 514 return 0;
515 515
516 sp = (struct slob_page *)virt_to_page(block); 516 sp = (struct slob_page *)virt_to_page(block);
517 if (slob_page(sp)) 517 if (slob_page(sp)) {
518 return ((slob_t *)block - 1)->units + SLOB_UNIT; 518 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
519 else 519 unsigned int *m = (unsigned int *)(block - align);
520 return SLOB_UNITS(*m) * SLOB_UNIT;
521 } else
520 return sp->page.private; 522 return sp->page.private;
521} 523}
522 524
diff --git a/mm/slub.c b/mm/slub.c
index fb486d5540f8..0c83e6afe7b2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1932,6 +1932,7 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
1932 INIT_LIST_HEAD(&n->partial); 1932 INIT_LIST_HEAD(&n->partial);
1933#ifdef CONFIG_SLUB_DEBUG 1933#ifdef CONFIG_SLUB_DEBUG
1934 atomic_long_set(&n->nr_slabs, 0); 1934 atomic_long_set(&n->nr_slabs, 0);
1935 atomic_long_set(&n->total_objects, 0);
1935 INIT_LIST_HEAD(&n->full); 1936 INIT_LIST_HEAD(&n->full);
1936#endif 1937#endif
1937} 1938}
diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c
index ae532f501943..8d7a27a6335c 100644
--- a/mm/tiny-shmem.c
+++ b/mm/tiny-shmem.c
@@ -65,31 +65,31 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
65 if (!dentry) 65 if (!dentry)
66 goto put_memory; 66 goto put_memory;
67 67
68 error = -ENFILE;
69 file = get_empty_filp();
70 if (!file)
71 goto put_dentry;
72
68 error = -ENOSPC; 73 error = -ENOSPC;
69 inode = ramfs_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); 74 inode = ramfs_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
70 if (!inode) 75 if (!inode)
71 goto put_dentry; 76 goto close_file;
72 77
73 d_instantiate(dentry, inode); 78 d_instantiate(dentry, inode);
74 error = -ENFILE; 79 inode->i_size = size;
75 file = alloc_file(shm_mnt, dentry, FMODE_WRITE | FMODE_READ,
76 &ramfs_file_operations);
77 if (!file)
78 goto put_dentry;
79
80 inode->i_nlink = 0; /* It is unlinked */ 80 inode->i_nlink = 0; /* It is unlinked */
81 init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ,
82 &ramfs_file_operations);
81 83
82 /* notify everyone as to the change of file size */ 84#ifndef CONFIG_MMU
83 error = do_truncate(dentry, size, 0, file); 85 error = ramfs_nommu_expand_for_mapping(inode, size);
84 if (error < 0) 86 if (error)
85 goto close_file; 87 goto close_file;
86 88#endif
87 return file; 89 return file;
88 90
89close_file: 91close_file:
90 put_filp(file); 92 put_filp(file);
91 return ERR_PTR(error);
92
93put_dentry: 93put_dentry:
94 dput(dentry); 94 dput(dentry);
95put_memory: 95put_memory:
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 85b9a0d2c877..bba06c41fc59 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -180,6 +180,13 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
180 pmd_t *pmd; 180 pmd_t *pmd;
181 pte_t *ptep, pte; 181 pte_t *ptep, pte;
182 182
183 /*
184 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
185 * architectures that do not vmalloc module space
186 */
187 VIRTUAL_BUG_ON(!is_vmalloc_addr(vmalloc_addr) &&
188 !is_module_address(addr));
189
183 if (!pgd_none(*pgd)) { 190 if (!pgd_none(*pgd)) {
184 pud = pud_offset(pgd, addr); 191 pud = pud_offset(pgd, addr);
185 if (!pud_none(*pud)) { 192 if (!pud_none(*pud)) {