From 239f49c0800778c863585a103805c58afbad6748 Mon Sep 17 00:00:00 2001 From: MinChan Kim Date: Mon, 19 May 2008 22:12:08 +0900 Subject: slob: Fix to return wrong pointer Although slob_alloc return NULL, __kmalloc_node returns NULL + align. Because align always can be changed, it is very hard for debugging problem of no page if it don't return NULL. We have to return NULL in case of no page. [penberg@cs.helsinki.fi: fix formatting as suggested by Matt.] Acked-by: Matt Mackall Signed-off-by: MinChan Kim Signed-off-by: Pekka Enberg --- mm/slob.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/slob.c b/mm/slob.c index 6038cbadf796..a3ad6671adf1 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -469,8 +469,9 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) return ZERO_SIZE_PTR; m = slob_alloc(size + align, gfp, align, node); - if (m) - *m = size; + if (!m) + return NULL; + *m = size; return (void *)m + align; } else { void *ret; -- cgit v1.2.2 From 76994412f8e824e79a593d6777ec327d85f942b2 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Thu, 22 May 2008 19:22:25 +0300 Subject: slub: ksize() abuse checks Add a WARN_ON for pages that don't have PageSlab nor PageCompound set to catch the worst abusers of ksize() in the kernel. Acked-by: Christoph Lameter Cc: Matt Mackall Signed-off-by: Pekka Enberg --- mm/slub.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index a505a828ef41..0987d1cd943c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2726,9 +2726,10 @@ size_t ksize(const void *object) page = virt_to_head_page(object); - if (unlikely(!PageSlab(page))) + if (unlikely(!PageSlab(page))) { + WARN_ON(!PageCompound(page)); return PAGE_SIZE << compound_order(page); - + } s = page->slab; #ifdef CONFIG_SLUB_DEBUG -- cgit v1.2.2 From 42172d751b4596b8ca4346a1c251b5f1c661ab0c Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 23 May 2008 13:04:18 -0700 Subject: mm: allow pfnmap ->fault()s Take out an assertion to allow ->fault handlers to service PFNMAP regions. This is required to reimplement .nopfn handlers with .fault handlers and subsequently remove nopfn. Signed-off-by: Nick Piggin Acked-by: Jes Sorensen Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index fb5608a120ed..19e0ae9beecb 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2295,8 +2295,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, vmf.flags = flags; vmf.page = NULL; - BUG_ON(vma->vm_flags & VM_PFNMAP); - ret = vma->vm_ops->fault(vma, &vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) return ret; -- cgit v1.2.2 From f7232154198f928fc25f420d6190468212a7632a Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 23 May 2008 13:04:21 -0700 Subject: mm: don't drop a partial page in a zone's memory map size In a zone's present pages number, account for all pages occupied by the memory map, including a partial. Signed-off-by: Johannes Weiner Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 63835579323a..035300299f94 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3378,7 +3378,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, * is used by this zone for memmap. This affects the watermark * and per-cpu initialisations */ - memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT; + memmap_pages = + PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; if (realsize >= memmap_pages) { realsize -= memmap_pages; printk(KERN_DEBUG -- cgit v1.2.2 From 80119ef5c8153e0a6cc5edf00c083dc98a9bd348 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Fri, 23 May 2008 13:04:31 -0700 Subject: mm: fix atomic_t overflow in vm The atomic_t type is 32bit but a 64bit system can have more than 2^32 pages of virtual address space available. Without this we overflow on ludicrously large mappings Signed-off-by: Alan Cox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 4 ++-- mm/nommu.c | 4 ++-- mm/swap.c | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index fac66337da2a..669499e7c2f5 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -80,7 +80,7 @@ EXPORT_SYMBOL(vm_get_page_prot); int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ int sysctl_overcommit_ratio = 50; /* default is 50% */ int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; -atomic_t vm_committed_space = ATOMIC_INIT(0); +atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); /* * Check that a process has enough memory to allocate a new virtual @@ -177,7 +177,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) * cast `allowed' as a signed long because vm_committed_space * sometimes has a negative value */ - if (atomic_read(&vm_committed_space) < (long)allowed) + if (atomic_long_read(&vm_committed_space) < (long)allowed) return 0; error: vm_unacct_memory(pages); diff --git a/mm/nommu.c b/mm/nommu.c index ef8c62cec697..dca93fcb8b7a 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -39,7 +39,7 @@ struct page *mem_map; unsigned long max_mapnr; unsigned long num_physpages; unsigned long askedalloc, realalloc; -atomic_t vm_committed_space = ATOMIC_INIT(0); +atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ int sysctl_overcommit_ratio = 50; /* default is 50% */ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; @@ -1410,7 +1410,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) * cast `allowed' as a signed long because vm_committed_space * sometimes has a negative value */ - if (atomic_read(&vm_committed_space) < (long)allowed) + if (atomic_long_read(&vm_committed_space) < (long)allowed) return 0; error: vm_unacct_memory(pages); diff --git a/mm/swap.c b/mm/swap.c index 91e194445a5e..45c9f25a8a3b 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -503,7 +503,7 @@ void vm_acct_memory(long pages) local = &__get_cpu_var(committed_space); *local += pages; if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) { - atomic_add(*local, &vm_committed_space); + atomic_long_add(*local, &vm_committed_space); *local = 0; } preempt_enable(); @@ -520,7 +520,7 @@ static int cpu_swap_callback(struct notifier_block *nfb, committed = &per_cpu(committed_space, (long)hcpu); if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { - atomic_add(*committed, &vm_committed_space); + atomic_long_add(*committed, &vm_committed_space); *committed = 0; drain_cpu_pagevecs((long)hcpu); } -- cgit v1.2.2 From 7eb54824b76793dd86afb54f182ef9aa64b3a45a Mon Sep 17 00:00:00 2001 From: Andy Whitcroft Date: Fri, 23 May 2008 13:04:50 -0700 Subject: zonelists: handle a node zonelist with no applicable entries When booting 2.6.26-rc3 on a multi-node x86_32 numa system we are seeing panics when trying node local allocations: BUG: unable to handle kernel NULL pointer dereference at 0000034c IP: [] get_page_from_freelist+0x4a/0x18e *pdpt = 00000000013a7001 *pde = 0000000000000000 Oops: 0000 [#1] SMP Modules linked in: Pid: 0, comm: swapper Not tainted (2.6.26-rc3-00003-g5abc28d #82) EIP: 0060:[] EFLAGS: 00010282 CPU: 0 EIP is at get_page_from_freelist+0x4a/0x18e EAX: c1371ed8 EBX: 00000000 ECX: 00000000 EDX: 00000000 ESI: f7801180 EDI: 00000000 EBP: 00000000 ESP: c1371ec0 DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0068 Process swapper (pid: 0, ti=c1370000 task=c12f5b40 task.ti=c1370000) Stack: 00000000 00000000 00000000 00000000 000612d0 000412d0 00000000 000412d0 f7801180 f7c0101c f7c01018 c10426e4 f7c01018 00000001 00000044 00000000 00000001 c12f5b40 00000001 00000010 00000000 000412d0 00000286 000412d0 Call Trace: [] __alloc_pages_internal+0x99/0x378 [] __alloc_pages+0x7/0x9 [] kmem_getpages+0x66/0xef [] cache_grow+0x8f/0x123 [] ____cache_alloc_node+0xb9/0xe4 [] kmem_cache_alloc_node+0x92/0xd2 [] setup_cpu_cache+0xaf/0x177 [] kmem_cache_create+0x2c8/0x353 [] kmem_cache_init+0x1ce/0x3ad [] start_kernel+0x178/0x1ee This occurs when we are scanning the zonelists looking for a ZONE_NORMAL page. In this system there is only ZONE_DMA and ZONE_NORMAL memory on node 0, all other nodes are mapped above 4GB physical. Here is a dump of the zonelists from this system: zonelists pgdat=c1400000 0: c14006c0:2 f7c006c0:2 f7e006c0:2 c1400360:1 c1400000:0 1: c14006c0:2 c1400360:1 c1400000:0 zonelists pgdat=f7c00000 0: f7c006c0:2 f7e006c0:2 c14006c0:2 c1400360:1 c1400000:0 1: f7c006c0:2 zonelists pgdat=f7e00000 0: f7e006c0:2 c14006c0:2 f7c006c0:2 c1400360:1 c1400000:0 1: f7e006c0:2 When performing a node local allocation we call get_page_from_freelist() looking for a page. It in turn calls first_zones_zonelist() which returns a preferred_zone. Where there are no applicable zones this will be NULL. However we use this unconditionally, leading to this panic. Where there are no applicable zones there is no possibility of a successful allocation, so simply fail the allocation. Signed-off-by: Andy Whitcroft Acked-by: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 035300299f94..7f4c66ff65b7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1396,6 +1396,9 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone); + if (!preferred_zone) + return NULL; + classzone_idx = zone_idx(preferred_zone); zonelist_scan: -- cgit v1.2.2 From cd94b9dbfa300fc42e45f230010623fc08d59563 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 23 May 2008 13:04:52 -0700 Subject: memory hotplug: fix early allocation handling Trying to add memory via add_memory() from within an initcall function results in bootmem alloc of 163840 bytes failed! Kernel panic - not syncing: Out of memory This is caused by zone_wait_table_init() which uses system_state to decide if it should use the bootmem allocator or not. When initcalls are handled the system_state is still SYSTEM_BOOTING but the bootmem allocator doesn't work anymore. So the allocation will fail. To fix this use slab_is_available() instead as indicator like we do it everywhere else. [akpm@linux-foundation.org: coding-style fix] Reviewed-by: Andy Whitcroft Cc: Dave Hansen Cc: Gerald Schaefer Cc: KAMEZAWA Hiroyuki Acked-by: Yasunori Goto Signed-off-by: Heiko Carstens Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7f4c66ff65b7..8e83f02cd2d3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2807,7 +2807,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) alloc_size = zone->wait_table_hash_nr_entries * sizeof(wait_queue_head_t); - if (system_state == SYSTEM_BOOTING) { + if (!slab_is_available()) { zone->wait_table = (wait_queue_head_t *) alloc_bootmem_node(pgdat, alloc_size); } else { -- cgit v1.2.2 From 4647875819aa210115d926242aa18e034517cece Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Thu, 5 Jun 2008 22:45:57 -0700 Subject: hugetlb: fix lockdep error ============================================= [ INFO: possible recursive locking detected ] 2.6.26-rc4 #30 --------------------------------------------- heap-overflow/2250 is trying to acquire lock: (&mm->page_table_lock){--..}, at: [] .copy_hugetlb_page_range+0x108/0x280 but task is already holding lock: (&mm->page_table_lock){--..}, at: [] .copy_hugetlb_page_range+0xfc/0x280 other info that might help us debug this: 3 locks held by heap-overflow/2250: #0: (&mm->mmap_sem){----}, at: [] .dup_mm+0x134/0x410 #1: (&mm->mmap_sem/1){--..}, at: [] .dup_mm+0x144/0x410 #2: (&mm->page_table_lock){--..}, at: [] .copy_hugetlb_page_range+0xfc/0x280 stack backtrace: Call Trace: [c00000003b2774e0] [c000000000010ce4] .show_stack+0x74/0x1f0 (unreliable) [c00000003b2775a0] [c0000000003f10e0] .dump_stack+0x20/0x34 [c00000003b277620] [c0000000000889bc] .__lock_acquire+0xaac/0x1080 [c00000003b277740] [c000000000089000] .lock_acquire+0x70/0xb0 [c00000003b2777d0] [c0000000003ee15c] ._spin_lock+0x4c/0x80 [c00000003b277870] [c0000000000cf2e8] .copy_hugetlb_page_range+0x108/0x280 [c00000003b277950] [c0000000000bcaa8] .copy_page_range+0x558/0x790 [c00000003b277ac0] [c000000000050fe0] .dup_mm+0x2d0/0x410 [c00000003b277ba0] [c000000000051d24] .copy_process+0xb94/0x1020 [c00000003b277ca0] [c000000000052244] .do_fork+0x94/0x310 [c00000003b277db0] [c000000000011240] .sys_clone+0x60/0x80 [c00000003b277e30] [c0000000000078c4] .ppc_clone+0x8/0xc Fix is the same way that mm/memory.c copy_page_range does the lockdep annotation. Acked-by: KOSAKI Motohiro Acked-by: Adam Litke Acked-by: Nishanth Aravamudan Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index bbf953eeb58b..ab171274ef21 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -785,7 +785,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, continue; spin_lock(&dst->page_table_lock); - spin_lock(&src->page_table_lock); + spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); if (!huge_pte_none(huge_ptep_get(src_pte))) { if (cow) huge_ptep_set_wrprotect(src, addr, src_pte); -- cgit v1.2.2 From a5b4592cf77b973c29e7c9695873a26052b58951 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Thu, 5 Jun 2008 22:46:05 -0700 Subject: brk: make sys_brk() honor COMPAT_BRK when computing lower bound Fix a regression introduced by commit 4cc6028d4040f95cdb590a87db478b42b8be0508 Author: Jiri Kosina Date: Wed Feb 6 22:39:44 2008 +0100 brk: check the lower bound properly The check in sys_brk() on minimum value the brk might have must take CONFIG_COMPAT_BRK setting into account. When this option is turned on (i.e. we support ancient legacy binaries, e.g. libc5-linked stuff), the lower bound on brk value is mm->end_code, otherwise the brk start is allowed to be arbitrarily shifted. Signed-off-by: Jiri Kosina Tested-by: Geert Uytterhoeven Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 669499e7c2f5..3354fdd83d4b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -245,10 +245,16 @@ asmlinkage unsigned long sys_brk(unsigned long brk) unsigned long rlim, retval; unsigned long newbrk, oldbrk; struct mm_struct *mm = current->mm; + unsigned long min_brk; down_write(&mm->mmap_sem); - if (brk < mm->start_brk) +#ifdef CONFIG_COMPAT_BRK + min_brk = mm->end_code; +#else + min_brk = mm->start_brk; +#endif + if (brk < min_brk) goto out; /* -- cgit v1.2.2 From 6cfd53fc03670c7a544a56d441eb1a6cc800d72b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 5 Jun 2008 22:46:08 -0700 Subject: nommu: fix kobjsize() for SLOB and SLUB kobjsize() has been abusing page->index as a method for sorting out compound order, which blows up both for page cache pages, and SLOB's reuse of the index in struct slob_page. Presently we are not able to accurately size arbitrary pointers that don't come from kmalloc(), so the best we can do is sort out the compound order from the head page if it's a compound page, or default to 0-order if it's impossible to ksize() the object. Obviously this leaves quite a bit to be desired in terms of object sizing accuracy, but the behaviour is unchanged over the existing implementation, while fixing the page->index oopses originally reported here: http://marc.info/?l=linux-mm&m=121127773325245&w=2 Accuracy could also be improved by having SLUB and SLOB both set PG_slab on ksizeable pages, rather than just handling the __GFP_COMP cases irregardless of the PG_slab setting, as made possibly with Pekka's patches: http://marc.info/?l=linux-kernel&m=121139439900534&w=2 http://marc.info/?l=linux-kernel&m=121139440000537&w=2 http://marc.info/?l=linux-kernel&m=121139440000540&w=2 This is primarily a bugfix for nommu systems for 2.6.26, with the aim being to gradually kill off kobjsize() and its particular brand of object abuse entirely. Reviewed-by: Pekka Enberg Signed-off-by: Paul Mundt Acked-by: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/nommu.c | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/nommu.c b/mm/nommu.c index dca93fcb8b7a..3abd0845bda4 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -104,21 +104,43 @@ EXPORT_SYMBOL(vmtruncate); unsigned int kobjsize(const void *objp) { struct page *page; + int order = 0; /* * If the object we have should not have ksize performed on it, * return size of 0 */ - if (!objp || (unsigned long)objp >= memory_end || !((page = virt_to_page(objp)))) + if (!objp) return 0; + if ((unsigned long)objp >= memory_end) + return 0; + + page = virt_to_head_page(objp); + if (!page) + return 0; + + /* + * If the allocator sets PageSlab, we know the pointer came from + * kmalloc(). + */ if (PageSlab(page)) return ksize(objp); - BUG_ON(page->index < 0); - BUG_ON(page->index >= MAX_ORDER); + /* + * The ksize() function is only guaranteed to work for pointers + * returned by kmalloc(). So handle arbitrary pointers, that we expect + * always to be compound pages, here. + */ + if (PageCompound(page)) + order = compound_order(page); - return (PAGE_SIZE << page->index); + /* + * Finally, handle arbitrary pointers that don't set PageSlab. + * Default to 0-order in the case when we're unable to ksize() + * the object. + */ + return PAGE_SIZE << order; } /* -- cgit v1.2.2 From dfa7e20cc0d1a7a620def4dce97de1ae5375f99b Mon Sep 17 00:00:00 2001 From: Russ Anderson Date: Mon, 9 Jun 2008 11:18:45 -0500 Subject: mm: Minor clean-up of page flags in mm/page_alloc.c Minor source code cleanup of page flags in mm/page_alloc.c. Move the definition of the groups of bits to page-flags.h. The purpose of this clean up is that the next patch will conditionally add a page flag to the groups. Doing that in a header file is cleaner than adding #ifdefs to the C code. Signed-off-by: Russ Anderson Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 34 +++------------------------------- 1 file changed, 3 insertions(+), 31 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8e83f02cd2d3..2f552955a02f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -237,16 +237,7 @@ static void bad_page(struct page *page) printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n" KERN_EMERG "Backtrace:\n"); dump_stack(); - page->flags &= ~(1 << PG_lru | - 1 << PG_private | - 1 << PG_locked | - 1 << PG_active | - 1 << PG_dirty | - 1 << PG_reclaim | - 1 << PG_slab | - 1 << PG_swapcache | - 1 << PG_writeback | - 1 << PG_buddy ); + page->flags &= ~PAGE_FLAGS_CLEAR_WHEN_BAD; set_page_count(page, 0); reset_page_mapcount(page); page->mapping = NULL; @@ -463,16 +454,7 @@ static inline int free_pages_check(struct page *page) (page->mapping != NULL) | (page_get_page_cgroup(page) != NULL) | (page_count(page) != 0) | - (page->flags & ( - 1 << PG_lru | - 1 << PG_private | - 1 << PG_locked | - 1 << PG_active | - 1 << PG_slab | - 1 << PG_swapcache | - 1 << PG_writeback | - 1 << PG_reserved | - 1 << PG_buddy )))) + (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) bad_page(page); if (PageDirty(page)) __ClearPageDirty(page); @@ -616,17 +598,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) (page->mapping != NULL) | (page_get_page_cgroup(page) != NULL) | (page_count(page) != 0) | - (page->flags & ( - 1 << PG_lru | - 1 << PG_private | - 1 << PG_locked | - 1 << PG_active | - 1 << PG_dirty | - 1 << PG_slab | - 1 << PG_swapcache | - 1 << PG_writeback | - 1 << PG_reserved | - 1 << PG_buddy )))) + (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) bad_page(page); /* -- cgit v1.2.2 From 5a1603be58f11edb1b30cb1e40cfbdd4439289d0 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 12 Jun 2008 16:29:55 +0900 Subject: nommu: Correct kobjsize() page validity checks. This implements a few changes on top of the recent kobjsize() refactoring introduced by commit 6cfd53fc03670c7a544a56d441eb1a6cc800d72b. As Christoph points out: virt_to_head_page cannot return NULL. virt_to_page also does not return NULL. pfn_valid() needs to be used to figure out if a page is valid. Otherwise the page struct reference that was returned may have PageReserved() set to indicate that it is not a valid page. As discussed further in the thread, virt_addr_valid() is the preferable way to validate the object pointer in this case. In addition to fixing up the reserved page case, it also has the benefit of encapsulating the hack introduced by commit 4016a1390d07f15b267eecb20e76a48fd5c524ef on the impacted platforms, allowing us to get rid of the extra checking in kobjsize() for the platforms that don't perform this type of bizarre memory_end abuse (every nommu platform that isn't blackfin). If blackfin decides to get in line with every other platform and use PageReserved for the DMA pages in question, kobjsize() will also continue to work fine. It also turns out that compound_order() will give us back 0-order for non-head pages, so we can get rid of the PageCompound check and just use compound_order() directly. Clean that up while we're at it. Signed-off-by: Paul Mundt Reviewed-by: Christoph Lameter Acked-by: David Howells Signed-off-by: Linus Torvalds --- mm/nommu.c | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) (limited to 'mm') diff --git a/mm/nommu.c b/mm/nommu.c index 3abd0845bda4..4462b6a3fcb9 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -104,21 +104,15 @@ EXPORT_SYMBOL(vmtruncate); unsigned int kobjsize(const void *objp) { struct page *page; - int order = 0; /* * If the object we have should not have ksize performed on it, * return size of 0 */ - if (!objp) - return 0; - - if ((unsigned long)objp >= memory_end) + if (!objp || !virt_addr_valid(objp)) return 0; page = virt_to_head_page(objp); - if (!page) - return 0; /* * If the allocator sets PageSlab, we know the pointer came from @@ -129,18 +123,9 @@ unsigned int kobjsize(const void *objp) /* * The ksize() function is only guaranteed to work for pointers - * returned by kmalloc(). So handle arbitrary pointers, that we expect - * always to be compound pages, here. - */ - if (PageCompound(page)) - order = compound_order(page); - - /* - * Finally, handle arbitrary pointers that don't set PageSlab. - * Default to 0-order in the case when we're unable to ksize() - * the object. + * returned by kmalloc(). So handle arbitrary pointers here. */ - return PAGE_SIZE << order; + return PAGE_SIZE << compound_order(page); } /* -- cgit v1.2.2 From c700be3d1320d2be4f04c8a5330186b7df724438 Mon Sep 17 00:00:00 2001 From: "kosaki.motohiro@jp.fujitsu.com" Date: Thu, 12 Jun 2008 15:21:27 -0700 Subject: mm: fix incorrect variable type in do_try_to_free_pages() "Smarter retry of costly-order allocations" patch series change behaver of do_try_to_free_pages(). But unfortunately ret variable type was unchanged. Thus an overflow is possible. Signed-off-by: KOSAKI Motohiro Acked-by: Nishanth Aravamudan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 9a29901ad3b3..967d30ccd92b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1307,7 +1307,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, struct scan_control *sc) { int priority; - int ret = 0; + unsigned long ret = 0; unsigned long total_scanned = 0; unsigned long nr_reclaimed = 0; struct reclaim_state *reclaim_state = current->reclaim_state; -- cgit v1.2.2 From 2165009bdf63f79716a36ad545df14c3cdf958b7 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Thu, 12 Jun 2008 15:21:47 -0700 Subject: pagemap: pass mm into pagewalkers We need this at least for huge page detection for now, because powerpc needs the vm_area_struct to be able to determine whether a virtual address is referring to a huge page (its pmd_huge() doesn't work). It might also come in handy for some of the other users. Signed-off-by: Dave Hansen Acked-by: Matt Mackall Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/pagewalk.c | 42 ++++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 0afd2387e507..d5878bed7841 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -3,14 +3,14 @@ #include static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, - const struct mm_walk *walk, void *private) + struct mm_walk *walk) { pte_t *pte; int err = 0; pte = pte_offset_map(pmd, addr); for (;;) { - err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, private); + err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); if (err) break; addr += PAGE_SIZE; @@ -24,7 +24,7 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, } static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, - const struct mm_walk *walk, void *private) + struct mm_walk *walk) { pmd_t *pmd; unsigned long next; @@ -35,15 +35,15 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) { if (walk->pte_hole) - err = walk->pte_hole(addr, next, private); + err = walk->pte_hole(addr, next, walk); if (err) break; continue; } if (walk->pmd_entry) - err = walk->pmd_entry(pmd, addr, next, private); + err = walk->pmd_entry(pmd, addr, next, walk); if (!err && walk->pte_entry) - err = walk_pte_range(pmd, addr, next, walk, private); + err = walk_pte_range(pmd, addr, next, walk); if (err) break; } while (pmd++, addr = next, addr != end); @@ -52,7 +52,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, } static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, - const struct mm_walk *walk, void *private) + struct mm_walk *walk) { pud_t *pud; unsigned long next; @@ -63,15 +63,15 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) { if (walk->pte_hole) - err = walk->pte_hole(addr, next, private); + err = walk->pte_hole(addr, next, walk); if (err) break; continue; } if (walk->pud_entry) - err = walk->pud_entry(pud, addr, next, private); + err = walk->pud_entry(pud, addr, next, walk); if (!err && (walk->pmd_entry || walk->pte_entry)) - err = walk_pmd_range(pud, addr, next, walk, private); + err = walk_pmd_range(pud, addr, next, walk); if (err) break; } while (pud++, addr = next, addr != end); @@ -85,15 +85,15 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, * @addr: starting address * @end: ending address * @walk: set of callbacks to invoke for each level of the tree - * @private: private data passed to the callback function * * Recursively walk the page table for the memory area in a VMA, * calling supplied callbacks. Callbacks are called in-order (first * PGD, first PUD, first PMD, first PTE, second PTE... second PMD, * etc.). If lower-level callbacks are omitted, walking depth is reduced. * - * Each callback receives an entry pointer, the start and end of the - * associated range, and a caller-supplied private data pointer. + * Each callback receives an entry pointer and the start and end of the + * associated range, and a copy of the original mm_walk for access to + * the ->private or ->mm fields. * * No locks are taken, but the bottom level iterator will map PTE * directories from highmem if necessary. @@ -101,9 +101,8 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, * If any callback returns a non-zero value, the walk is aborted and * the return value is propagated back to the caller. Otherwise 0 is returned. */ -int walk_page_range(const struct mm_struct *mm, - unsigned long addr, unsigned long end, - const struct mm_walk *walk, void *private) +int walk_page_range(unsigned long addr, unsigned long end, + struct mm_walk *walk) { pgd_t *pgd; unsigned long next; @@ -112,21 +111,24 @@ int walk_page_range(const struct mm_struct *mm, if (addr >= end) return err; - pgd = pgd_offset(mm, addr); + if (!walk->mm) + return -EINVAL; + + pgd = pgd_offset(walk->mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) { if (walk->pte_hole) - err = walk->pte_hole(addr, next, private); + err = walk->pte_hole(addr, next, walk); if (err) break; continue; } if (walk->pgd_entry) - err = walk->pgd_entry(pgd, addr, next, private); + err = walk->pgd_entry(pgd, addr, next, walk); if (!err && (walk->pud_entry || walk->pmd_entry || walk->pte_entry)) - err = walk_pud_range(pgd, addr, next, walk, private); + err = walk_pud_range(pgd, addr, next, walk); if (err) break; } while (pgd++, addr = next, addr != end); -- cgit v1.2.2