aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c22
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page_alloc.c31
-rw-r--r--mm/rmap.c5
-rw-r--r--mm/shmem_acl.c2
-rw-r--r--mm/truncate.c3
6 files changed, 33 insertions, 32 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1d709ff528e1..2dbec90dc3ba 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -356,8 +356,8 @@ nomem:
356 return -ENOMEM; 356 return -ENOMEM;
357} 357}
358 358
359void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 359void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
360 unsigned long end) 360 unsigned long end)
361{ 361{
362 struct mm_struct *mm = vma->vm_mm; 362 struct mm_struct *mm = vma->vm_mm;
363 unsigned long address; 363 unsigned long address;
@@ -398,6 +398,24 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
398 } 398 }
399} 399}
400 400
401void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
402 unsigned long end)
403{
404 /*
405 * It is undesirable to test vma->vm_file as it should be non-null
406 * for valid hugetlb area. However, vm_file will be NULL in the error
407 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
408 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
409 * to clean up. Since no pte has actually been setup, it is safe to
410 * do nothing in this case.
411 */
412 if (vma->vm_file) {
413 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
414 __unmap_hugepage_range(vma, start, end);
415 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
416 }
417}
418
401static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 419static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
402 unsigned long address, pte_t *ptep, pte_t pte) 420 unsigned long address, pte_t *ptep, pte_t pte)
403{ 421{
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 25788b1b7fcf..617fb31086ee 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -727,7 +727,7 @@ int do_migrate_pages(struct mm_struct *mm,
727 return -ENOSYS; 727 return -ENOSYS;
728} 728}
729 729
730static struct page *new_vma_page(struct page *page, unsigned long private) 730static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
731{ 731{
732 return NULL; 732 return NULL;
733} 733}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a8c003e7b3d5..40db96a655d0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -495,17 +495,16 @@ static void __free_pages_ok(struct page *page, unsigned int order)
495 int i; 495 int i;
496 int reserved = 0; 496 int reserved = 0;
497 497
498 arch_free_page(page, order);
499 if (!PageHighMem(page))
500 debug_check_no_locks_freed(page_address(page),
501 PAGE_SIZE<<order);
502
503 for (i = 0 ; i < (1 << order) ; ++i) 498 for (i = 0 ; i < (1 << order) ; ++i)
504 reserved += free_pages_check(page + i); 499 reserved += free_pages_check(page + i);
505 if (reserved) 500 if (reserved)
506 return; 501 return;
507 502
503 if (!PageHighMem(page))
504 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
505 arch_free_page(page, order);
508 kernel_map_pages(page, 1 << order, 0); 506 kernel_map_pages(page, 1 << order, 0);
507
509 local_irq_save(flags); 508 local_irq_save(flags);
510 __count_vm_events(PGFREE, 1 << order); 509 __count_vm_events(PGFREE, 1 << order);
511 free_one_page(page_zone(page), page, order); 510 free_one_page(page_zone(page), page, order);
@@ -781,13 +780,14 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
781 struct per_cpu_pages *pcp; 780 struct per_cpu_pages *pcp;
782 unsigned long flags; 781 unsigned long flags;
783 782
784 arch_free_page(page, 0);
785
786 if (PageAnon(page)) 783 if (PageAnon(page))
787 page->mapping = NULL; 784 page->mapping = NULL;
788 if (free_pages_check(page)) 785 if (free_pages_check(page))
789 return; 786 return;
790 787
788 if (!PageHighMem(page))
789 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
790 arch_free_page(page, 0);
791 kernel_map_pages(page, 1, 0); 791 kernel_map_pages(page, 1, 0);
792 792
793 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 793 pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
@@ -2294,19 +2294,6 @@ unsigned long __init zone_absent_pages_in_node(int nid,
2294 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 2294 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
2295} 2295}
2296 2296
2297/* Return the zone index a PFN is in */
2298int memmap_zone_idx(struct page *lmem_map)
2299{
2300 int i;
2301 unsigned long phys_addr = virt_to_phys(lmem_map);
2302 unsigned long pfn = phys_addr >> PAGE_SHIFT;
2303
2304 for (i = 0; i < MAX_NR_ZONES; i++)
2305 if (pfn < arch_zone_highest_possible_pfn[i])
2306 break;
2307
2308 return i;
2309}
2310#else 2297#else
2311static inline unsigned long zone_spanned_pages_in_node(int nid, 2298static inline unsigned long zone_spanned_pages_in_node(int nid,
2312 unsigned long zone_type, 2299 unsigned long zone_type,
@@ -2325,10 +2312,6 @@ static inline unsigned long zone_absent_pages_in_node(int nid,
2325 return zholes_size[zone_type]; 2312 return zholes_size[zone_type];
2326} 2313}
2327 2314
2328static inline int memmap_zone_idx(struct page *lmem_map)
2329{
2330 return MAX_NR_ZONES;
2331}
2332#endif 2315#endif
2333 2316
2334static void __init calculate_node_totalpages(struct pglist_data *pgdat, 2317static void __init calculate_node_totalpages(struct pglist_data *pgdat,
diff --git a/mm/rmap.c b/mm/rmap.c
index e2155d791d99..a9136d8b7577 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -576,15 +576,14 @@ void page_add_file_rmap(struct page *page)
576void page_remove_rmap(struct page *page) 576void page_remove_rmap(struct page *page)
577{ 577{
578 if (atomic_add_negative(-1, &page->_mapcount)) { 578 if (atomic_add_negative(-1, &page->_mapcount)) {
579#ifdef CONFIG_DEBUG_VM
580 if (unlikely(page_mapcount(page) < 0)) { 579 if (unlikely(page_mapcount(page) < 0)) {
581 printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); 580 printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
582 printk (KERN_EMERG " page->flags = %lx\n", page->flags); 581 printk (KERN_EMERG " page->flags = %lx\n", page->flags);
583 printk (KERN_EMERG " page->count = %x\n", page_count(page)); 582 printk (KERN_EMERG " page->count = %x\n", page_count(page));
584 printk (KERN_EMERG " page->mapping = %p\n", page->mapping); 583 printk (KERN_EMERG " page->mapping = %p\n", page->mapping);
584 BUG();
585 } 585 }
586#endif 586
587 BUG_ON(page_mapcount(page) < 0);
588 /* 587 /*
589 * It would be tidy to reset the PageAnon mapping here, 588 * It would be tidy to reset the PageAnon mapping here,
590 * but that might overwrite a racing page_add_anon_rmap 589 * but that might overwrite a racing page_add_anon_rmap
diff --git a/mm/shmem_acl.c b/mm/shmem_acl.c
index c946bf468718..f5664c5b9eb1 100644
--- a/mm/shmem_acl.c
+++ b/mm/shmem_acl.c
@@ -35,7 +35,7 @@ shmem_get_acl(struct inode *inode, int type)
35} 35}
36 36
37/** 37/**
38 * shmem_get_acl - generic_acl_operations->setacl() operation 38 * shmem_set_acl - generic_acl_operations->setacl() operation
39 */ 39 */
40static void 40static void
41shmem_set_acl(struct inode *inode, int type, struct posix_acl *acl) 41shmem_set_acl(struct inode *inode, int type, struct posix_acl *acl)
diff --git a/mm/truncate.c b/mm/truncate.c
index f4edbc179d14..11ca480701dd 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -302,7 +302,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
302 if (page->mapping != mapping) 302 if (page->mapping != mapping)
303 return 0; 303 return 0;
304 304
305 if (PagePrivate(page) && !try_to_release_page(page, 0)) 305 if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
306 return 0; 306 return 0;
307 307
308 write_lock_irq(&mapping->tree_lock); 308 write_lock_irq(&mapping->tree_lock);
@@ -396,6 +396,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
396 pagevec_release(&pvec); 396 pagevec_release(&pvec);
397 cond_resched(); 397 cond_resched();
398 } 398 }
399 WARN_ON_ONCE(ret);
399 return ret; 400 return ret;
400} 401}
401EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); 402EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);