aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/fadvise.c20
-rw-r--r--mm/highmem.c15
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mmap.c9
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/slab.c18
-rw-r--r--mm/swap_state.c3
-rw-r--r--mm/swapfile.c14
-rw-r--r--mm/vmalloc.c3
10 files changed, 32 insertions, 60 deletions
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 907c39257ca0..0a03357a1f8e 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -35,17 +35,6 @@
35 * 35 *
36 * LINUX_FADV_ASYNC_WRITE: push some or all of the dirty pages at the disk. 36 * LINUX_FADV_ASYNC_WRITE: push some or all of the dirty pages at the disk.
37 * 37 *
38 * LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE: push all of the currently
39 * dirty pages at the disk.
40 *
41 * LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE, LINUX_FADV_WRITE_WAIT: push
42 * all of the currently dirty pages at the disk, wait until they have been
43 * written.
44 *
45 * It should be noted that none of these operations write out the file's
46 * metadata. So unless the application is strictly performing overwrites of
47 * already-instantiated disk blocks, there are no guarantees here that the data
48 * will be available after a crash.
49 */ 38 */
50asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) 39asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
51{ 40{
@@ -129,15 +118,6 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
129 invalidate_mapping_pages(mapping, start_index, 118 invalidate_mapping_pages(mapping, start_index,
130 end_index); 119 end_index);
131 break; 120 break;
132 case LINUX_FADV_ASYNC_WRITE:
133 ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
134 WB_SYNC_NONE);
135 break;
136 case LINUX_FADV_WRITE_WAIT:
137 ret = wait_on_page_writeback_range(mapping,
138 offset >> PAGE_CACHE_SHIFT,
139 endbyte >> PAGE_CACHE_SHIFT);
140 break;
141 default: 121 default:
142 ret = -EINVAL; 122 ret = -EINVAL;
143 } 123 }
diff --git a/mm/highmem.c b/mm/highmem.c
index 55885f64af40..9b274fdf9d08 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -74,8 +74,7 @@ static void flush_all_zero_pkmaps(void)
74 pkmap_count[i] = 0; 74 pkmap_count[i] = 0;
75 75
76 /* sanity check */ 76 /* sanity check */
77 if (pte_none(pkmap_page_table[i])) 77 BUG_ON(pte_none(pkmap_page_table[i]));
78 BUG();
79 78
80 /* 79 /*
81 * Don't need an atomic fetch-and-clear op here; 80 * Don't need an atomic fetch-and-clear op here;
@@ -158,8 +157,7 @@ void fastcall *kmap_high(struct page *page)
158 if (!vaddr) 157 if (!vaddr)
159 vaddr = map_new_virtual(page); 158 vaddr = map_new_virtual(page);
160 pkmap_count[PKMAP_NR(vaddr)]++; 159 pkmap_count[PKMAP_NR(vaddr)]++;
161 if (pkmap_count[PKMAP_NR(vaddr)] < 2) 160 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
162 BUG();
163 spin_unlock(&kmap_lock); 161 spin_unlock(&kmap_lock);
164 return (void*) vaddr; 162 return (void*) vaddr;
165} 163}
@@ -174,8 +172,7 @@ void fastcall kunmap_high(struct page *page)
174 172
175 spin_lock(&kmap_lock); 173 spin_lock(&kmap_lock);
176 vaddr = (unsigned long)page_address(page); 174 vaddr = (unsigned long)page_address(page);
177 if (!vaddr) 175 BUG_ON(!vaddr);
178 BUG();
179 nr = PKMAP_NR(vaddr); 176 nr = PKMAP_NR(vaddr);
180 177
181 /* 178 /*
@@ -220,8 +217,7 @@ static __init int init_emergency_pool(void)
220 return 0; 217 return 0;
221 218
222 page_pool = mempool_create_page_pool(POOL_SIZE, 0); 219 page_pool = mempool_create_page_pool(POOL_SIZE, 0);
223 if (!page_pool) 220 BUG_ON(!page_pool);
224 BUG();
225 printk("highmem bounce pool size: %d pages\n", POOL_SIZE); 221 printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
226 222
227 return 0; 223 return 0;
@@ -264,8 +260,7 @@ int init_emergency_isa_pool(void)
264 260
265 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, 261 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
266 mempool_free_pages, (void *) 0); 262 mempool_free_pages, (void *) 0);
267 if (!isa_page_pool) 263 BUG_ON(!isa_page_pool);
268 BUG();
269 264
270 printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); 265 printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
271 return 0; 266 return 0;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ebad6bbb3501..832f676ca038 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -334,6 +334,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
334 return nr_huge_pages; 334 return nr_huge_pages;
335 335
336 spin_lock(&hugetlb_lock); 336 spin_lock(&hugetlb_lock);
337 count = max(count, reserved_huge_pages);
337 try_to_free_low(count); 338 try_to_free_low(count);
338 while (count < nr_huge_pages) { 339 while (count < nr_huge_pages) {
339 struct page *page = dequeue_huge_page(NULL, 0); 340 struct page *page = dequeue_huge_page(NULL, 0);
@@ -697,9 +698,10 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
697 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; 698 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
698 page = pte_page(*pte); 699 page = pte_page(*pte);
699same_page: 700same_page:
700 get_page(page); 701 if (pages) {
701 if (pages) 702 get_page(page);
702 pages[i] = page + pfn_offset; 703 pages[i] = page + pfn_offset;
704 }
703 705
704 if (vmas) 706 if (vmas)
705 vmas[i] = vma; 707 vmas[i] = vma;
diff --git a/mm/memory.c b/mm/memory.c
index 8d8f52569f32..0ec7bc644271 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -87,7 +87,7 @@ int randomize_va_space __read_mostly = 1;
87static int __init disable_randmaps(char *s) 87static int __init disable_randmaps(char *s)
88{ 88{
89 randomize_va_space = 0; 89 randomize_va_space = 0;
90 return 0; 90 return 1;
91} 91}
92__setup("norandmaps", disable_randmaps); 92__setup("norandmaps", disable_randmaps);
93 93
diff --git a/mm/mmap.c b/mm/mmap.c
index 4f5b5709136a..e780d19aa214 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -294,8 +294,7 @@ void validate_mm(struct mm_struct *mm)
294 i = browse_rb(&mm->mm_rb); 294 i = browse_rb(&mm->mm_rb);
295 if (i != mm->map_count) 295 if (i != mm->map_count)
296 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1; 296 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
297 if (bug) 297 BUG_ON(bug);
298 BUG();
299} 298}
300#else 299#else
301#define validate_mm(mm) do { } while (0) 300#define validate_mm(mm) do { } while (0)
@@ -432,8 +431,7 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
432 struct rb_node ** rb_link, * rb_parent; 431 struct rb_node ** rb_link, * rb_parent;
433 432
434 __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent); 433 __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
435 if (__vma && __vma->vm_start < vma->vm_end) 434 BUG_ON(__vma && __vma->vm_start < vma->vm_end);
436 BUG();
437 __vma_link(mm, vma, prev, rb_link, rb_parent); 435 __vma_link(mm, vma, prev, rb_link, rb_parent);
438 mm->map_count++; 436 mm->map_count++;
439} 437}
@@ -813,8 +811,7 @@ try_prev:
813 * (e.g. stash info in next's anon_vma_node when assigning 811 * (e.g. stash info in next's anon_vma_node when assigning
814 * an anon_vma, or when trying vma_merge). Another time. 812 * an anon_vma, or when trying vma_merge). Another time.
815 */ 813 */
816 if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma) 814 BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
817 BUG();
818 if (!near) 815 if (!near)
819 goto none; 816 goto none;
820 817
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 893d7677579e..6dcce3a4bbdc 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -258,7 +258,7 @@ static void balance_dirty_pages(struct address_space *mapping)
258/** 258/**
259 * balance_dirty_pages_ratelimited_nr - balance dirty memory state 259 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
260 * @mapping: address_space which was dirtied 260 * @mapping: address_space which was dirtied
261 * @nr_pages: number of pages which the caller has just dirtied 261 * @nr_pages_dirtied: number of pages which the caller has just dirtied
262 * 262 *
263 * Processes which are dirtying memory should call in here once for each page 263 * Processes which are dirtying memory should call in here once for each page
264 * which was newly dirtied. The function will periodically check the system's 264 * which was newly dirtied. The function will periodically check the system's
diff --git a/mm/slab.c b/mm/slab.c
index 4cbf8bb13557..f055c1420216 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1297,8 +1297,7 @@ void __init kmem_cache_init(void)
1297 if (cache_cache.num) 1297 if (cache_cache.num)
1298 break; 1298 break;
1299 } 1299 }
1300 if (!cache_cache.num) 1300 BUG_ON(!cache_cache.num);
1301 BUG();
1302 cache_cache.gfporder = order; 1301 cache_cache.gfporder = order;
1303 cache_cache.colour = left_over / cache_cache.colour_off; 1302 cache_cache.colour = left_over / cache_cache.colour_off;
1304 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1303 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
@@ -1974,8 +1973,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1974 * Always checks flags, a caller might be expecting debug support which 1973 * Always checks flags, a caller might be expecting debug support which
1975 * isn't available. 1974 * isn't available.
1976 */ 1975 */
1977 if (flags & ~CREATE_MASK) 1976 BUG_ON(flags & ~CREATE_MASK);
1978 BUG();
1979 1977
1980 /* 1978 /*
1981 * Check that size is in terms of words. This is needed to avoid 1979 * Check that size is in terms of words. This is needed to avoid
@@ -2206,8 +2204,7 @@ static int __node_shrink(struct kmem_cache *cachep, int node)
2206 2204
2207 slabp = list_entry(l3->slabs_free.prev, struct slab, list); 2205 slabp = list_entry(l3->slabs_free.prev, struct slab, list);
2208#if DEBUG 2206#if DEBUG
2209 if (slabp->inuse) 2207 BUG_ON(slabp->inuse);
2210 BUG();
2211#endif 2208#endif
2212 list_del(&slabp->list); 2209 list_del(&slabp->list);
2213 2210
@@ -2248,8 +2245,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
2248 */ 2245 */
2249int kmem_cache_shrink(struct kmem_cache *cachep) 2246int kmem_cache_shrink(struct kmem_cache *cachep)
2250{ 2247{
2251 if (!cachep || in_interrupt()) 2248 BUG_ON(!cachep || in_interrupt());
2252 BUG();
2253 2249
2254 return __cache_shrink(cachep); 2250 return __cache_shrink(cachep);
2255} 2251}
@@ -2277,8 +2273,7 @@ int kmem_cache_destroy(struct kmem_cache *cachep)
2277 int i; 2273 int i;
2278 struct kmem_list3 *l3; 2274 struct kmem_list3 *l3;
2279 2275
2280 if (!cachep || in_interrupt()) 2276 BUG_ON(!cachep || in_interrupt());
2281 BUG();
2282 2277
2283 /* Don't let CPUs to come and go */ 2278 /* Don't let CPUs to come and go */
2284 lock_cpu_hotplug(); 2279 lock_cpu_hotplug();
@@ -2477,8 +2472,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
2477 * Be lazy and only check for valid flags here, keeping it out of the 2472 * Be lazy and only check for valid flags here, keeping it out of the
2478 * critical path in kmem_cache_alloc(). 2473 * critical path in kmem_cache_alloc().
2479 */ 2474 */
2480 if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)) 2475 BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
2481 BUG();
2482 if (flags & SLAB_NO_GROW) 2476 if (flags & SLAB_NO_GROW)
2483 return 0; 2477 return 0;
2484 2478
diff --git a/mm/swap_state.c b/mm/swap_state.c
index d7af296833fc..e0e1583f32c2 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -148,8 +148,7 @@ int add_to_swap(struct page * page, gfp_t gfp_mask)
148 swp_entry_t entry; 148 swp_entry_t entry;
149 int err; 149 int err;
150 150
151 if (!PageLocked(page)) 151 BUG_ON(!PageLocked(page));
152 BUG();
153 152
154 for (;;) { 153 for (;;) {
155 entry = get_swap_page(); 154 entry = get_swap_page();
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 39aa9d129612..e5fd5385f0cc 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -397,18 +397,24 @@ void free_swap_and_cache(swp_entry_t entry)
397 397
398 p = swap_info_get(entry); 398 p = swap_info_get(entry);
399 if (p) { 399 if (p) {
400 if (swap_entry_free(p, swp_offset(entry)) == 1) 400 if (swap_entry_free(p, swp_offset(entry)) == 1) {
401 page = find_trylock_page(&swapper_space, entry.val); 401 page = find_get_page(&swapper_space, entry.val);
402 if (page && unlikely(TestSetPageLocked(page))) {
403 page_cache_release(page);
404 page = NULL;
405 }
406 }
402 spin_unlock(&swap_lock); 407 spin_unlock(&swap_lock);
403 } 408 }
404 if (page) { 409 if (page) {
405 int one_user; 410 int one_user;
406 411
407 BUG_ON(PagePrivate(page)); 412 BUG_ON(PagePrivate(page));
408 page_cache_get(page);
409 one_user = (page_count(page) == 2); 413 one_user = (page_count(page) == 2);
410 /* Only cache user (+us), or swap space full? Free it! */ 414 /* Only cache user (+us), or swap space full? Free it! */
411 if (!PageWriteback(page) && (one_user || vm_swap_full())) { 415 /* Also recheck PageSwapCache after page is locked (above) */
416 if (PageSwapCache(page) && !PageWriteback(page) &&
417 (one_user || vm_swap_full())) {
412 delete_from_swap_cache(page); 418 delete_from_swap_cache(page);
413 SetPageDirty(page); 419 SetPageDirty(page);
414 } 420 }
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 729eb3eec75f..c0504f1e34eb 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -321,8 +321,7 @@ void __vunmap(void *addr, int deallocate_pages)
321 int i; 321 int i;
322 322
323 for (i = 0; i < area->nr_pages; i++) { 323 for (i = 0; i < area->nr_pages; i++) {
324 if (unlikely(!area->pages[i])) 324 BUG_ON(!area->pages[i]);
325 BUG();
326 __free_page(area->pages[i]); 325 __free_page(area->pages[i]);
327 } 326 }
328 327