From f0188f47482efdbd2e005103bb4f0224a835dfad Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Fri, 10 Feb 2006 01:51:13 -0800 Subject: [PATCH] slab: Avoid deadlock at kmem_cache_create/kmem_cache_destroy Prevents deadlock situation between kmem_cache_create()/kmem_cache_destory(), and kmem_cache_create() /cpu hotplug. The locking order probably got moved over time. Signed-off-by: Ravikiran Thirumalai Signed-off-by: Shai Fultheim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index d66c2b0d9715..add05d808a4a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1717,6 +1717,12 @@ kmem_cache_create (const char *name, size_t size, size_t align, BUG(); } + /* + * Prevent CPUs from coming and going. + * lock_cpu_hotplug() nests outside cache_chain_mutex + */ + lock_cpu_hotplug(); + mutex_lock(&cache_chain_mutex); list_for_each(p, &cache_chain) { @@ -1918,8 +1924,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, cachep->dtor = dtor; cachep->name = name; - /* Don't let CPUs to come and go */ - lock_cpu_hotplug(); if (g_cpucache_up == FULL) { enable_cpucache(cachep); @@ -1978,12 +1982,12 @@ kmem_cache_create (const char *name, size_t size, size_t align, /* cache setup completed, link it into the list */ list_add(&cachep->next, &cache_chain); - unlock_cpu_hotplug(); oops: if (!cachep && (flags & SLAB_PANIC)) panic("kmem_cache_create(): failed to create slab `%s'\n", name); mutex_unlock(&cache_chain_mutex); + unlock_cpu_hotplug(); return cachep; } EXPORT_SYMBOL(kmem_cache_create); -- cgit v1.2.2 From 418aade459f03318defd18ef0b11981a63bd81b0 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 10 Feb 2006 01:51:15 -0800 Subject: [PATCH] Updates for page migration This adds some additional comments in order to help others figure out how exactly the code works. And fix a variable name. Also swap_page does need to ignore all reference bits when unmapping a page. Otherwise we may have to repeatedly unmap a frequently touched page. So change the try_to_unmap parameter to 1. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 5a610804cd06..5db32fdfaf39 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -632,7 +632,7 @@ static int swap_page(struct page *page) struct address_space *mapping = page_mapping(page); if (page_mapped(page) && mapping) - if (try_to_unmap(page, 0) != SWAP_SUCCESS) + if (try_to_unmap(page, 1) != SWAP_SUCCESS) goto unlock_retry; if (PageDirty(page)) { @@ -839,7 +839,7 @@ EXPORT_SYMBOL(migrate_page); * pages are swapped out. * * The function returns after 10 attempts or if no pages - * are movable anymore because t has become empty + * are movable anymore because to has become empty * or no retryable pages exist anymore. * * Return: Number of pages not migrated when "to" ran empty. @@ -928,12 +928,21 @@ redo: goto unlock_both; if (mapping->a_ops->migratepage) { + /* + * Most pages have a mapping and most filesystems + * should provide a migration function. Anonymous + * pages are part of swap space which also has its + * own migration function. This is the most common + * path for page migration. + */ rc = mapping->a_ops->migratepage(newpage, page); goto unlock_both; } /* - * Trigger writeout if page is dirty + * Default handling if a filesystem does not provide + * a migration function. We can only migrate clean + * pages so try to write out any dirty pages first. */ if (PageDirty(page)) { switch (pageout(page, mapping)) { @@ -949,9 +958,10 @@ redo: ; /* try to migrate the page below */ } } + /* - * If we have no buffer or can release the buffer - * then do a simple migration. + * Buffers are managed in a filesystem specific way. + * We must have no buffers or drop them. */ if (!page_has_buffers(page) || try_to_release_page(page, GFP_KERNEL)) { @@ -966,6 +976,11 @@ redo: * swap them out. */ if (pass > 4) { + /* + * Persistently unable to drop buffers..... As a + * measure of last resort we fall back to + * swap_page(). + */ unlock_page(newpage); newpage = NULL; rc = swap_page(page); -- cgit v1.2.2 From 80e4342601abfafacb5f20571e40b56d73d10819 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 11 Feb 2006 17:55:53 -0800 Subject: [PATCH] zone reclaim: do not check references to a page during zone reclaim shrink_list() and refill_inactive() check all ptes pointing to a page for reference bits in order to decide if the page should be put on the active list. This is not necessary for zone_reclaim since we are only interested in removing unmapped pages. Skip the checks in both functions. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 5db32fdfaf39..e1c64230ffdd 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -443,6 +443,10 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) BUG_ON(PageActive(page)); sc->nr_scanned++; + + if (!sc->may_swap && page_mapped(page)) + goto keep_locked; + /* Double the slab pressure for mapped and swapcache pages */ if (page_mapped(page) || PageSwapCache(page)) sc->nr_scanned++; @@ -1231,7 +1235,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc) * Now use this metric to decide whether to start moving mapped memory * onto the inactive list. */ - if (swap_tendency >= 100) + if (swap_tendency >= 100 && sc->may_swap) reclaim_mapped = 1; while (!list_empty(&l_hold)) { -- cgit v1.2.2 From 072eaa5d9cc3e63f567ffd9ad87b36194fdd8010 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 11 Feb 2006 17:55:54 -0800 Subject: [PATCH] vmscan: remove duplicate increment of reclaim_in_progress shrink_zone() already increments reclaim_in_progress. No need to do it in balance_pgdat. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index e1c64230ffdd..58ed5125b1a7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1614,9 +1614,7 @@ scan: sc.nr_reclaimed = 0; sc.priority = priority; sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; - atomic_inc(&zone->reclaim_in_progress); shrink_zone(zone, &sc); - atomic_dec(&zone->reclaim_in_progress); reclaim_state->reclaimed_slab = 0; nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages); -- cgit v1.2.2 From 2903fb1694dcb08a3c1d9d823cfae7ba30e66cd3 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 11 Feb 2006 17:55:55 -0800 Subject: [PATCH] vmscan: skip reclaim_mapped determination if we do not swap This puts the variables and the way to get to reclaim_mapped in one block. And allows zone_reclaim or other things to skip the determination (maybe this whole block of code does not belong into refill_inactive_zone()?) Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 75 +++++++++++++++++++++++++++++++++---------------------------- 1 file changed, 41 insertions(+), 34 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 58ed5125b1a7..1838c15ca4fd 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1195,9 +1195,47 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc) struct page *page; struct pagevec pvec; int reclaim_mapped = 0; - long mapped_ratio; - long distress; - long swap_tendency; + + if (unlikely(sc->may_swap)) { + long mapped_ratio; + long distress; + long swap_tendency; + + /* + * `distress' is a measure of how much trouble we're having + * reclaiming pages. 0 -> no problems. 100 -> great trouble. + */ + distress = 100 >> zone->prev_priority; + + /* + * The point of this algorithm is to decide when to start + * reclaiming mapped memory instead of just pagecache. Work out + * how much memory + * is mapped. + */ + mapped_ratio = (sc->nr_mapped * 100) / total_memory; + + /* + * Now decide how much we really want to unmap some pages. The + * mapped ratio is downgraded - just because there's a lot of + * mapped memory doesn't necessarily mean that page reclaim + * isn't succeeding. + * + * The distress ratio is important - we don't want to start + * going oom. + * + * A 100% value of vm_swappiness overrides this algorithm + * altogether. + */ + swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; + + /* + * Now use this metric to decide whether to start moving mapped + * memory onto the inactive list. + */ + if (swap_tendency >= 100) + reclaim_mapped = 1; + } lru_add_drain(); spin_lock_irq(&zone->lru_lock); @@ -1207,37 +1245,6 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc) zone->nr_active -= pgmoved; spin_unlock_irq(&zone->lru_lock); - /* - * `distress' is a measure of how much trouble we're having reclaiming - * pages. 0 -> no problems. 100 -> great trouble. - */ - distress = 100 >> zone->prev_priority; - - /* - * The point of this algorithm is to decide when to start reclaiming - * mapped memory instead of just pagecache. Work out how much memory - * is mapped. - */ - mapped_ratio = (sc->nr_mapped * 100) / total_memory; - - /* - * Now decide how much we really want to unmap some pages. The mapped - * ratio is downgraded - just because there's a lot of mapped memory - * doesn't necessarily mean that page reclaim isn't succeeding. - * - * The distress ratio is important - we don't want to start going oom. - * - * A 100% value of vm_swappiness overrides this algorithm altogether. - */ - swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; - - /* - * Now use this metric to decide whether to start moving mapped memory - * onto the inactive list. - */ - if (swap_tendency >= 100 && sc->may_swap) - reclaim_mapped = 1; - while (!list_empty(&l_hold)) { cond_resched(); page = lru_to_page(&l_hold); -- cgit v1.2.2 From 41d78ba55037468e6c86c53e3076d1a74841de39 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 14 Feb 2006 13:52:58 -0800 Subject: [PATCH] compound page: use page[1].lru If a compound page has its own put_page_testzero destructor (the only current example is free_huge_page), that is noted in page[1].mapping of the compound page. But that's rather a poor place to keep it: functions which call set_page_dirty_lock after get_user_pages (e.g. Infiniband's __ib_umem_release) ought to be checking first, otherwise set_page_dirty is liable to crash on what's not the address of a struct address_space. And now I'm about to make that worse: it turns out that every compound page needs a destructor, so we can no longer rely on hugetlb pages going their own special way, to avoid further problems of page->mapping reuse. For example, not many people know that: on 50% of i386 -Os builds, the first tail page of a compound page purports to be PageAnon (when its destructor has an odd address), which surprises page_add_file_rmap. Keep the compound page destructor in page[1].lru.next instead. And to free up the common pairing of mapping and index, also move compound page order from index to lru.prev. Slab reuses page->lru too: but if we ever need slab to use compound pages, it can easily stack its use above this. (akpm: decoded version of the above: the tail pages of a compound page now have ->mapping==NULL, so there's no need for the set_page_dirty[_lock]() caller to check that they're not compund pages before doing the dirty). Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 4 ++-- mm/page_alloc.c | 15 ++++++--------- mm/swap.c | 2 +- 3 files changed, 9 insertions(+), 12 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 67f29516662a..508707704d2c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -85,7 +85,7 @@ void free_huge_page(struct page *page) BUG_ON(page_count(page)); INIT_LIST_HEAD(&page->lru); - page[1].mapping = NULL; + page[1].lru.next = NULL; /* reset dtor */ spin_lock(&hugetlb_lock); enqueue_huge_page(page); @@ -105,7 +105,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) } spin_unlock(&hugetlb_lock); set_page_count(page, 1); - page[1].mapping = (void *)free_huge_page; + page[1].lru.next = (void *)free_huge_page; /* set dtor */ for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) clear_user_highpage(&page[i], addr); return page; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dde04ff4be31..eec89ab39bb6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -169,20 +169,17 @@ static void bad_page(struct page *page) * All pages have PG_compound set. All pages have their ->private pointing at * the head page (even the head page has this). * - * The first tail page's ->mapping, if non-zero, holds the address of the - * compound page's put_page() function. - * - * The order of the allocation is stored in the first tail page's ->index - * This is only for debug at present. This usage means that zero-order pages - * may not be compound. + * The first tail page's ->lru.next holds the address of the compound page's + * put_page() function. Its ->lru.prev holds the order of allocation. + * This usage means that zero-order pages may not be compound. */ static void prep_compound_page(struct page *page, unsigned long order) { int i; int nr_pages = 1 << order; - page[1].mapping = NULL; - page[1].index = order; + page[1].lru.next = NULL; /* set dtor */ + page[1].lru.prev = (void *)order; for (i = 0; i < nr_pages; i++) { struct page *p = page + i; @@ -196,7 +193,7 @@ static void destroy_compound_page(struct page *page, unsigned long order) int i; int nr_pages = 1 << order; - if (unlikely(page[1].index != order)) + if (unlikely((unsigned long)page[1].lru.prev != order)) bad_page(page); for (i = 0; i < nr_pages; i++) { diff --git a/mm/swap.c b/mm/swap.c index 76247424dea1..cce3dda59c59 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -40,7 +40,7 @@ static void put_compound_page(struct page *page) if (put_page_testzero(page)) { void (*dtor)(struct page *page); - dtor = (void (*)(struct page *))page[1].mapping; + dtor = (void (*)(struct page *))page[1].lru.next; (*dtor)(page); } } -- cgit v1.2.2 From d98c7a09843621f1b145ca5ae8ed03ff04085edb Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 14 Feb 2006 13:52:59 -0800 Subject: [PATCH] compound page: default destructor Somehow I imagined that calling a NULL destructor would free a compound page rather than oopsing. No, we must supply a default destructor, __free_pages_ok using the order noted by prep_compound_page. hugetlb can still replace this as before with its own free_huge_page pointer. The case that needs this is not common: rarely does put_compound_page's put_page_testzero bring the count down to 0. But if get_user_pages is applied to some part of a compound page, without immediate release (e.g. AIO or Infiniband), then it's possible for its put_page to come after the containing vma has been unmapped and the driver done its free_pages. That's just the kind of case compound pages are supposed to be guarding against (but Nick points out, nor did PageReserved handle this right). Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index eec89ab39bb6..62c122528587 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -56,6 +56,7 @@ long nr_swap_pages; int percpu_pagelist_fraction; static void fastcall free_hot_cold_page(struct page *page, int cold); +static void __free_pages_ok(struct page *page, unsigned int order); /* * results with 256, 32 in the lowmem_reserve sysctl: @@ -173,12 +174,18 @@ static void bad_page(struct page *page) * put_page() function. Its ->lru.prev holds the order of allocation. * This usage means that zero-order pages may not be compound. */ + +static void free_compound_page(struct page *page) +{ + __free_pages_ok(page, (unsigned long)page[1].lru.prev); +} + static void prep_compound_page(struct page *page, unsigned long order) { int i; int nr_pages = 1 << order; - page[1].lru.next = NULL; /* set dtor */ + page[1].lru.next = (void *)free_compound_page; /* set dtor */ page[1].lru.prev = (void *)order; for (i = 0; i < nr_pages; i++) { struct page *p = page + i; -- cgit v1.2.2 From f822566165dd46ff5de9bf895cfa6c51f53bb0c4 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 14 Feb 2006 13:53:08 -0800 Subject: [PATCH] madvise MADV_DONTFORK/MADV_DOFORK Currently, copy-on-write may change the physical address of a page even if the user requested that the page is pinned in memory (either by mlock or by get_user_pages). This happens if the process forks meanwhile, and the parent writes to that page. As a result, the page is orphaned: in case of get_user_pages, the application will never see any data hardware DMA's into this page after the COW. In case of mlock'd memory, the parent is not getting the realtime/security benefits of mlock. In particular, this affects the Infiniband modules which do DMA from and into user pages all the time. This patch adds madvise options to control whether memory range is inherited across fork. Useful e.g. for when hardware is doing DMA from/into these pages. Could also be useful to an application wanting to speed up its forks by cutting large areas out of consideration. Signed-off-by: Michael S. Tsirkin Acked-by: Hugh Dickins Cc: Michael Kerrisk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/madvise.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/madvise.c b/mm/madvise.c index ae0ae3ea299a..af3d573b0141 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -22,16 +22,23 @@ static long madvise_behavior(struct vm_area_struct * vma, struct mm_struct * mm = vma->vm_mm; int error = 0; pgoff_t pgoff; - int new_flags = vma->vm_flags & ~VM_READHINTMASK; + int new_flags = vma->vm_flags; switch (behavior) { + case MADV_NORMAL: + new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; + break; case MADV_SEQUENTIAL: - new_flags |= VM_SEQ_READ; + new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; break; case MADV_RANDOM: - new_flags |= VM_RAND_READ; + new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; break; - default: + case MADV_DONTFORK: + new_flags |= VM_DONTCOPY; + break; + case MADV_DOFORK: + new_flags &= ~VM_DONTCOPY; break; } @@ -177,6 +184,12 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, long error; switch (behavior) { + case MADV_DOFORK: + if (vma->vm_flags & VM_IO) { + error = -EINVAL; + break; + } + case MADV_DONTFORK: case MADV_NORMAL: case MADV_SEQUENTIAL: case MADV_RANDOM: -- cgit v1.2.2 From a62eaf151d9cb478d127cfbc2e93c498869785b0 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 16 Feb 2006 23:41:58 +0100 Subject: [PATCH] x86_64: Add boot option to disable randomized mappings and cleanup AMD SimNow!'s JIT doesn't like them at all in the guest. For distribution installation it's easiest if it's a boot time option. Also I moved the variable to a more appropiate place and make it independent from sysctl And marked __read_mostly which it is. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- mm/memory.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 2bee1f21aa8a..9abc6008544b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -82,6 +82,16 @@ EXPORT_SYMBOL(num_physpages); EXPORT_SYMBOL(high_memory); EXPORT_SYMBOL(vmalloc_earlyreserve); +int randomize_va_space __read_mostly = 1; + +static int __init disable_randmaps(char *s) +{ + randomize_va_space = 0; + return 0; +} +__setup("norandmaps", disable_randmaps); + + /* * If a p?d_bad entry is found while walking page tables, report * the error, before resetting entry to p?d_none. Usually (but -- cgit v1.2.2 From dd942ae331425812930cd01766178b7e28e65f2d Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 17 Feb 2006 01:39:16 +0100 Subject: [PATCH] Handle all and empty zones when setting up custom zonelists for mbind The memory allocator doesn't like empty zones (which have an uninitialized freelist), so a x86-64 system with a node fully in GFP_DMA32 only would crash on mbind. Fix that up by putting all possible zones as fallback into the zonelist and skipping the empty ones. In fact the code always enough allocated space for all zones, but only used it for the highest. This change just uses all the memory that was allocated before. This should work fine for now, but whoever implements node hot removal needs to fix this somewhere else too (or make sure zone datastructures by itself never go away, only their memory) Signed-off-by: Andi Kleen Acked-by: Christoph Lameter Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3bd7fb7e4b75..323fdcf128c4 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -132,19 +132,29 @@ static int mpol_check_policy(int mode, nodemask_t *nodes) } return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL; } + /* Generate a custom zonelist for the BIND policy. */ static struct zonelist *bind_zonelist(nodemask_t *nodes) { struct zonelist *zl; - int num, max, nd; + int num, max, nd, k; max = 1 + MAX_NR_ZONES * nodes_weight(*nodes); - zl = kmalloc(sizeof(void *) * max, GFP_KERNEL); + zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL); if (!zl) return NULL; num = 0; - for_each_node_mask(nd, *nodes) - zl->zones[num++] = &NODE_DATA(nd)->node_zones[policy_zone]; + /* First put in the highest zones from all nodes, then all the next + lower zones etc. Avoid empty zones because the memory allocator + doesn't like them. If you implement node hot removal you + have to fix that. */ + for (k = policy_zone; k >= 0; k--) { + for_each_node_mask(nd, *nodes) { + struct zone *z = &NODE_DATA(nd)->node_zones[k]; + if (z->present_pages > 0) + zl->zones[num++] = z; + } + } zl->zones[num] = NULL; return zl; } -- cgit v1.2.2 From 4cf808eb443ead42777a0230b73aec0cee7fb298 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 17 Feb 2006 20:38:21 +0100 Subject: [PATCH] Handle holes in node mask in node fallback list setup Change the find_next_best_node algorithm to correctly skip over holes in the node online mask. Previously it would not handle missing nodes correctly and cause crashes at boot. [Written by Linus, tested by AK] Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 62c122528587..208812b25597 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1541,29 +1541,29 @@ static int __initdata node_load[MAX_NUMNODES]; */ static int __init find_next_best_node(int node, nodemask_t *used_node_mask) { - int i, n, val; + int n, val; int min_val = INT_MAX; int best_node = -1; - for_each_online_node(i) { - cpumask_t tmp; + /* Use the local node if we haven't already */ + if (!node_isset(node, *used_node_mask)) { + node_set(node, *used_node_mask); + return node; + } - /* Start from local node */ - n = (node+i) % num_online_nodes(); + for_each_online_node(n) { + cpumask_t tmp; /* Don't want a node to appear more than once */ if (node_isset(n, *used_node_mask)) continue; - /* Use the local node if we haven't already */ - if (!node_isset(node, *used_node_mask)) { - best_node = node; - break; - } - /* Use the distance array to find the distance */ val = node_distance(node, n); + /* Penalize nodes under us ("prefer the next node") */ + val += (n < node); + /* Give preference to headless and unused nodes */ tmp = node_to_cpumask(n); if (!cpus_empty(tmp)) -- cgit v1.2.2 From 636f13c174dd7c84a437d3c3e8fa66f03f7fda63 Mon Sep 17 00:00:00 2001 From: Chris Wright Date: Fri, 17 Feb 2006 13:59:36 -0800 Subject: [PATCH] sys_mbind sanity checking Make sure maxnodes is safe size before calculating nlongs in get_nodes(). Signed-off-by: Chris Wright Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 323fdcf128c4..bedfa4f09c80 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -808,6 +808,8 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, nodes_clear(*nodes); if (maxnode == 0 || !nmask) return 0; + if (maxnode > PAGE_SIZE) + return -EINVAL; nlongs = BITS_TO_LONGS(maxnode); if ((maxnode % BITS_PER_LONG) == 0) -- cgit v1.2.2 From 9827b781f20828e5ceb911b879f268f78fe90815 Mon Sep 17 00:00:00 2001 From: Kurt Garloff Date: Mon, 20 Feb 2006 18:27:51 -0800 Subject: [PATCH] OOM kill: children accounting In the badness() calculation, there's currently this piece of code: /* * Processes which fork a lot of child processes are likely * a good choice. We add the vmsize of the children if they * have an own mm. This prevents forking servers to flood the * machine with an endless amount of children */ list_for_each(tsk, &p->children) { struct task_struct *chld; chld = list_entry(tsk, struct task_struct, sibling); if (chld->mm = p->mm && chld->mm) points += chld->mm->total_vm; } The intention is clear: If some server (apache) keeps spawning new children and we run OOM, we want to kill the father rather than picking a child. This -- to some degree -- also helps a bit with getting fork bombs under control, though I'd consider this a desirable side-effect rather than a feature. There's one problem with this: No matter how many or few children there are, if just one of them misbehaves, and all others (including the father) do everything right, we still always kill the whole family. This hits in real life; whether it's javascript in konqueror resulting in kdeinit (and thus the whole KDE session) being hit or just a classical server that spawns children. Sidenote: The killer does kill all direct children as well, not only the selected father, see oom_kill_process(). The idea in attached patch is that we do want to account the memory consumption of the (direct) children to the father -- however not fully. This maintains the property that fathers with too many children will still very likely be picked, whereas a single misbehaving child has the chance to be picked by the OOM killer. In the patch I account only half (rounded up) of the children's vm_size to the parent. This means that if one child eats more mem than the rest of the family, it will be picked, otherwise it's still the father and thus the whole family that gets selected. This is heuristics -- we could debate whether accounting for a fourth would be better than for half of it. Or -- if people would consider it worth the trouble -- make it a sysctl. For now I sticked to accounting for half, which should IMHO be a significant improvement. The patch does one more thing: As users tend to be irritated by the choice of killed processes (mainly because the children are killed first, despite some of them having a very low OOM score), I added some more output: The selected (father) process will be reported first and it's oom_score printed to syslog. Description: Only account for half of children's vm size in oom score calculation This should still give the parent enough point in case of fork bombs. If any child however has more than 50% of the vm size of all children together, it'll get a higher score and be elected. This patch also makes the kernel display the oom_score. Signed-off-by: Kurt Garloff Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index b05ab8f2a562..949eba1d5ba3 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -58,15 +58,17 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) /* * Processes which fork a lot of child processes are likely - * a good choice. We add the vmsize of the children if they + * a good choice. We add half the vmsize of the children if they * have an own mm. This prevents forking servers to flood the - * machine with an endless amount of children + * machine with an endless amount of children. In case a single + * child is eating the vast majority of memory, adding only half + * to the parents will make the child our kill candidate of choice. */ list_for_each(tsk, &p->children) { struct task_struct *chld; chld = list_entry(tsk, struct task_struct, sibling); if (chld->mm != p->mm && chld->mm) - points += chld->mm->total_vm; + points += chld->mm->total_vm/2 + 1; } /* @@ -136,12 +138,12 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) * * (not docbooked, we don't want this one cluttering up the manual) */ -static struct task_struct * select_bad_process(void) +static struct task_struct *select_bad_process(unsigned long *ppoints) { - unsigned long maxpoints = 0; struct task_struct *g, *p; struct task_struct *chosen = NULL; struct timespec uptime; + *ppoints = 0; do_posix_clock_monotonic_gettime(&uptime); do_each_thread(g, p) { @@ -169,9 +171,9 @@ static struct task_struct * select_bad_process(void) return p; points = badness(p, uptime.tv_sec); - if (points > maxpoints || !chosen) { + if (points > *ppoints || !chosen) { chosen = p; - maxpoints = points; + *ppoints = points; } } while_each_thread(g, p); return chosen; @@ -237,12 +239,15 @@ static struct mm_struct *oom_kill_task(task_t *p) return mm; } -static struct mm_struct *oom_kill_process(struct task_struct *p) +static struct mm_struct *oom_kill_process(struct task_struct *p, + unsigned long points) { struct mm_struct *mm; struct task_struct *c; struct list_head *tsk; + printk(KERN_ERR "Out of Memory: Kill process %d (%s) score %li and " + "children.\n", p->pid, p->comm, points); /* Try to kill a child first */ list_for_each(tsk, &p->children) { c = list_entry(tsk, struct task_struct, sibling); @@ -267,6 +272,7 @@ void out_of_memory(gfp_t gfp_mask, int order) { struct mm_struct *mm = NULL; task_t * p; + unsigned long points; if (printk_ratelimit()) { printk("oom-killer: gfp_mask=0x%x, order=%d\n", @@ -278,7 +284,7 @@ void out_of_memory(gfp_t gfp_mask, int order) cpuset_lock(); read_lock(&tasklist_lock); retry: - p = select_bad_process(); + p = select_bad_process(&points); if (PTR_ERR(p) == -1UL) goto out; @@ -290,7 +296,7 @@ retry: panic("Out of memory and no killable processes...\n"); } - mm = oom_kill_process(p); + mm = oom_kill_process(p, points); if (!mm) goto retry; -- cgit v1.2.2 From 9b0f8b040acd8dfd23860754c0d09ff4f44e2cbc Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 20 Feb 2006 18:27:52 -0800 Subject: [PATCH] Terminate process that fails on a constrained allocation Some allocations are restricted to a limited set of nodes (due to memory policies or cpuset constraints). If the page allocator is not able to find enough memory then that does not mean that overall system memory is low. In particular going postal and more or less randomly shooting at processes is not likely going to help the situation but may just lead to suicide (the whole system coming down). It is better to signal to the process that no memory exists given the constraints that the process (or the configuration of the process) has placed on the allocation behavior. The process may be killed but then the sysadmin or developer can investigate the situation. The solution is similar to what we do when running out of hugepages. This patch adds a check before we kill processes. At that point performance considerations do not matter much so we just scan the zonelist and reconstruct a list of nodes. If the list of nodes does not contain all online nodes then this is a constrained allocation and we should kill the current process. Signed-off-by: Christoph Lameter Cc: Nick Piggin Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 103 ++++++++++++++++++++++++++++++++++++++++++-------------- mm/page_alloc.c | 2 +- 2 files changed, 79 insertions(+), 26 deletions(-) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 949eba1d5ba3..8123fad5a485 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -132,6 +132,36 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) return points; } +/* + * Types of limitations to the nodes from which allocations may occur + */ +#define CONSTRAINT_NONE 1 +#define CONSTRAINT_MEMORY_POLICY 2 +#define CONSTRAINT_CPUSET 3 + +/* + * Determine the type of allocation constraint. + */ +static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask) +{ +#ifdef CONFIG_NUMA + struct zone **z; + nodemask_t nodes = node_online_map; + + for (z = zonelist->zones; *z; z++) + if (cpuset_zone_allowed(*z, gfp_mask)) + node_clear((*z)->zone_pgdat->node_id, + nodes); + else + return CONSTRAINT_CPUSET; + + if (!nodes_empty(nodes)) + return CONSTRAINT_MEMORY_POLICY; +#endif + + return CONSTRAINT_NONE; +} + /* * Simple selection loop. We chose the process with the highest * number of 'points'. We expect the caller will lock the tasklist. @@ -184,7 +214,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints) * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that * we select a process with CAP_SYS_RAW_IO set). */ -static void __oom_kill_task(task_t *p) +static void __oom_kill_task(task_t *p, const char *message) { if (p->pid == 1) { WARN_ON(1); @@ -200,8 +230,8 @@ static void __oom_kill_task(task_t *p) return; } task_unlock(p); - printk(KERN_ERR "Out of Memory: Killed process %d (%s).\n", - p->pid, p->comm); + printk(KERN_ERR "%s: Killed process %d (%s).\n", + message, p->pid, p->comm); /* * We give our sacrificial lamb high priority and access to @@ -214,7 +244,7 @@ static void __oom_kill_task(task_t *p) force_sig(SIGKILL, p); } -static struct mm_struct *oom_kill_task(task_t *p) +static struct mm_struct *oom_kill_task(task_t *p, const char *message) { struct mm_struct *mm = get_task_mm(p); task_t * g, * q; @@ -226,21 +256,21 @@ static struct mm_struct *oom_kill_task(task_t *p) return NULL; } - __oom_kill_task(p); + __oom_kill_task(p, message); /* * kill all processes that share the ->mm (i.e. all threads), * but are in a different thread group */ do_each_thread(g, q) if (q->mm == mm && q->tgid != p->tgid) - __oom_kill_task(q); + __oom_kill_task(q, message); while_each_thread(g, q); return mm; } static struct mm_struct *oom_kill_process(struct task_struct *p, - unsigned long points) + unsigned long points, const char *message) { struct mm_struct *mm; struct task_struct *c; @@ -253,11 +283,11 @@ static struct mm_struct *oom_kill_process(struct task_struct *p, c = list_entry(tsk, struct task_struct, sibling); if (c->mm == p->mm) continue; - mm = oom_kill_task(c); + mm = oom_kill_task(c, message); if (mm) return mm; } - return oom_kill_task(p); + return oom_kill_task(p, message); } /** @@ -268,10 +298,10 @@ static struct mm_struct *oom_kill_process(struct task_struct *p, * OR try to be smart about which process to kill. Note that we * don't have to be perfect here, we just have to be good. */ -void out_of_memory(gfp_t gfp_mask, int order) +void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) { struct mm_struct *mm = NULL; - task_t * p; + task_t *p; unsigned long points; if (printk_ratelimit()) { @@ -283,25 +313,48 @@ void out_of_memory(gfp_t gfp_mask, int order) cpuset_lock(); read_lock(&tasklist_lock); + + /* + * Check if there were limitations on the allocation (only relevant for + * NUMA) that may require different handling. + */ + switch (constrained_alloc(zonelist, gfp_mask)) { + case CONSTRAINT_MEMORY_POLICY: + mm = oom_kill_process(current, points, + "No available memory (MPOL_BIND)"); + break; + + case CONSTRAINT_CPUSET: + mm = oom_kill_process(current, points, + "No available memory in cpuset"); + break; + + case CONSTRAINT_NONE: retry: - p = select_bad_process(&points); + /* + * Rambo mode: Shoot down a process and hope it solves whatever + * issues we may have. + */ + p = select_bad_process(&points); - if (PTR_ERR(p) == -1UL) - goto out; + if (PTR_ERR(p) == -1UL) + goto out; - /* Found nothing?!?! Either we hang forever, or we panic. */ - if (!p) { - read_unlock(&tasklist_lock); - cpuset_unlock(); - panic("Out of memory and no killable processes...\n"); - } + /* Found nothing?!?! Either we hang forever, or we panic. */ + if (!p) { + read_unlock(&tasklist_lock); + cpuset_unlock(); + panic("Out of memory and no killable processes...\n"); + } - mm = oom_kill_process(p, points); - if (!mm) - goto retry; + mm = oom_kill_process(p, points, "Out of memory"); + if (!mm) + goto retry; + + break; + } - out: - read_unlock(&tasklist_lock); +out: cpuset_unlock(); if (mm) mmput(mm); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 208812b25597..791690d7d3fa 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1015,7 +1015,7 @@ rebalance: if (page) goto got_pg; - out_of_memory(gfp_mask, order); + out_of_memory(zonelist, gfp_mask, order); goto restart; } -- cgit v1.2.2 From a9c930bac163c5e616ca0ba9378e7dc746c93227 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 20 Feb 2006 18:27:59 -0800 Subject: [PATCH] Fix units in mbind check maxnode is a bit index and can't be directly compared against a byte length like PAGE_SIZE Signed-off-by: Andi Kleen Cc: Chris Wright Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index bedfa4f09c80..6422fe478113 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -808,7 +808,7 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, nodes_clear(*nodes); if (maxnode == 0 || !nmask) return 0; - if (maxnode > PAGE_SIZE) + if (maxnode > PAGE_SIZE*BITS_PER_BYTE) return -EINVAL; nlongs = BITS_TO_LONGS(maxnode); -- cgit v1.2.2 From 7a9166e3b037296366cea6f3c97f705d33e209e6 Mon Sep 17 00:00:00 2001 From: Luke Yang Date: Mon, 20 Feb 2006 18:28:07 -0800 Subject: [PATCH] Fix undefined symbols for nommu architecture Signed-off-by: Luke Yang Acked-by: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/nommu.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'mm') diff --git a/mm/nommu.c b/mm/nommu.c index c10262d68232..99d21020ec9d 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -57,6 +57,8 @@ EXPORT_SYMBOL(vmalloc); EXPORT_SYMBOL(vfree); EXPORT_SYMBOL(vmalloc_to_page); EXPORT_SYMBOL(vmalloc_32); +EXPORT_SYMBOL(vmap); +EXPORT_SYMBOL(vunmap); /* * Handle all mappings that got truncated by a "truncate()" -- cgit v1.2.2 From fcab6f351305029fc5e3c632209d45cae57e4835 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Mon, 20 Feb 2006 18:28:10 -0800 Subject: [PATCH] mm/mempolicy.c: fix 'if ();' typo [akpm; it happens that the code was still correct, only inefficient ] Signed-off-by: Alexey Dobriyan Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 6422fe478113..880831bd3003 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -587,7 +587,7 @@ redo: } list_add(&page->lru, &newlist); nr_pages++; - if (nr_pages > MIGRATE_CHUNK_SIZE); + if (nr_pages > MIGRATE_CHUNK_SIZE) break; } err = migrate_pages(pagelist, &newlist, &moved, &failed); -- cgit v1.2.2 From b00dc3ad74fdb676552d46ee573b88e927240d0c Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 21 Feb 2006 23:49:47 +0000 Subject: [PATCH] tmpfs: fix mount mpol nodelist parsing I've been dissatisfied with the mpol_nodelist mount option which was added to tmpfs earlier in -rc. Replace it by mpol=policy:nodelist. And it was broken: a nodelist is a comma-separated list of numbers and ranges; the mount options are a comma-separated list of token=values. Whoops, blindly strsep'ing on commas doesn't work so well: since we've no numeric tokens, and unlikely to add them, use that to distinguish. Move the mpol= parsing to shmem_parse_mpol under CONFIG_NUMA, reject all its options as invalid if not NUMA. /proc shows MPOL_PREFERRED as "prefer", so use that name for the policy instead of "preferred". Enforce that mpol=default has no nodelist; that mpol=prefer has one node only; that mpol=bind has a nodelist; but let mpol=interleave use node_online_map if no nodelist given. Describe this in tmpfs.txt. Signed-off-by: Hugh Dickins Acked-by: Robin Holt Acked-by: Andi Kleen Signed-off-by: Linus Torvalds --- mm/shmem.c | 81 ++++++++++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 69 insertions(+), 12 deletions(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index f7ac7b812f92..7c455fbaff7b 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -874,6 +875,51 @@ redirty: } #ifdef CONFIG_NUMA +static int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes) +{ + char *nodelist = strchr(value, ':'); + int err = 1; + + if (nodelist) { + /* NUL-terminate policy string */ + *nodelist++ = '\0'; + if (nodelist_parse(nodelist, *policy_nodes)) + goto out; + } + if (!strcmp(value, "default")) { + *policy = MPOL_DEFAULT; + /* Don't allow a nodelist */ + if (!nodelist) + err = 0; + } else if (!strcmp(value, "prefer")) { + *policy = MPOL_PREFERRED; + /* Insist on a nodelist of one node only */ + if (nodelist) { + char *rest = nodelist; + while (isdigit(*rest)) + rest++; + if (!*rest) + err = 0; + } + } else if (!strcmp(value, "bind")) { + *policy = MPOL_BIND; + /* Insist on a nodelist */ + if (nodelist) + err = 0; + } else if (!strcmp(value, "interleave")) { + *policy = MPOL_INTERLEAVE; + /* Default to nodes online if no nodelist */ + if (!nodelist) + *policy_nodes = node_online_map; + err = 0; + } +out: + /* Restore string for error message */ + if (nodelist) + *--nodelist = ':'; + return err; +} + static struct page *shmem_swapin_async(struct shared_policy *p, swp_entry_t entry, unsigned long idx) { @@ -926,6 +972,11 @@ shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, return page; } #else +static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes) +{ + return 1; +} + static inline struct page * shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) { @@ -1859,7 +1910,23 @@ static int shmem_parse_options(char *options, int *mode, uid_t *uid, { char *this_char, *value, *rest; - while ((this_char = strsep(&options, ",")) != NULL) { + while (options != NULL) { + this_char = options; + for (;;) { + /* + * NUL-terminate this option: unfortunately, + * mount options form a comma-separated list, + * but mpol's nodelist may also contain commas. + */ + options = strchr(options, ','); + if (options == NULL) + break; + options++; + if (!isdigit(*options)) { + options[-1] = '\0'; + break; + } + } if (!*this_char) continue; if ((value = strchr(this_char,'=')) != NULL) { @@ -1910,18 +1977,8 @@ static int shmem_parse_options(char *options, int *mode, uid_t *uid, if (*rest) goto bad_val; } else if (!strcmp(this_char,"mpol")) { - if (!strcmp(value,"default")) - *policy = MPOL_DEFAULT; - else if (!strcmp(value,"preferred")) - *policy = MPOL_PREFERRED; - else if (!strcmp(value,"bind")) - *policy = MPOL_BIND; - else if (!strcmp(value,"interleave")) - *policy = MPOL_INTERLEAVE; - else + if (shmem_parse_mpol(value,policy,policy_nodes)) goto bad_val; - } else if (!strcmp(this_char,"mpol_nodelist")) { - nodelist_parse(value, *policy_nodes); } else { printk(KERN_ERR "tmpfs: Bad mount option %s\n", this_char); -- cgit v1.2.2 From 1e275d406bf6b88e4de6925cf594b64bb2ec49bc Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 24 Feb 2006 13:04:12 -0800 Subject: [PATCH] page migration: Fix MPOL_INTERLEAVE behavior for migration via mbind() migrate_pages_to() allocates a list of new pages on the intended target node or with the intended policy and then uses the list of new pages as targets for the migration of a list of pages out of place. When the pages are allocated it is not clear which of the out of place pages will be moved to the new pages. So we cannot specify an address as needed by alloc_page_vma(). This causes problem for MPOL_INTERLEAVE which will currently allocate the pages on the first node of the set. If mbind is used with vma that has the policy of MPOL_INTERLEAVE then the interleaving of pages may be destroyed. This patch fixes that by generating a fake address for each alloc_page_vma which will result is a distribution of pages as prescribed by MPOL_INTERLEAVE. Lee also noted that the sequence of nodes for the new pages seems to be inverted. So we also invert the way the lists of pages for migration are build. Signed-off-by: Christoph Lameter Signed-off-by: Lee Schermerhorn Looks-ok-to: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 880831bd3003..67af4cea1e23 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -552,7 +552,7 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist, */ if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { if (isolate_lru_page(page)) - list_add(&page->lru, pagelist); + list_add_tail(&page->lru, pagelist); } } @@ -569,6 +569,7 @@ static int migrate_pages_to(struct list_head *pagelist, LIST_HEAD(moved); LIST_HEAD(failed); int err = 0; + unsigned long offset = 0; int nr_pages; struct page *page; struct list_head *p; @@ -576,8 +577,21 @@ static int migrate_pages_to(struct list_head *pagelist, redo: nr_pages = 0; list_for_each(p, pagelist) { - if (vma) - page = alloc_page_vma(GFP_HIGHUSER, vma, vma->vm_start); + if (vma) { + /* + * The address passed to alloc_page_vma is used to + * generate the proper interleave behavior. We fake + * the address here by an increasing offset in order + * to get the proper distribution of pages. + * + * No decision has been made as to which page + * a certain old page is moved to so we cannot + * specify the correct address. + */ + page = alloc_page_vma(GFP_HIGHUSER, vma, + offset + vma->vm_start); + offset += PAGE_SIZE; + } else page = alloc_pages_node(dest, GFP_HIGHUSER, 0); @@ -585,7 +599,7 @@ redo: err = -ENOMEM; goto out; } - list_add(&page->lru, &newlist); + list_add_tail(&page->lru, &newlist); nr_pages++; if (nr_pages > MIGRATE_CHUNK_SIZE) break; -- cgit v1.2.2 From d4f7796e9b387e471ab0e8ed4e0c2bd616b3c193 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 24 Feb 2006 13:04:22 -0800 Subject: [PATCH] vmscan: fix zone_reclaim - PF_SWAPWRITE needs to be set for RECLAIM_SWAP to be able to write out pages to swap. Currently RECLAIM_SWAP may not do that. - remove setting nr_reclaimed pages after slab reclaim since the slab shrinking code does not use that and the nr_reclaimed pages is just right for the intended follow up action. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 1838c15ca4fd..b0af7593d01e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1908,7 +1908,12 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) sc.swap_cluster_max = SWAP_CLUSTER_MAX; cond_resched(); - p->flags |= PF_MEMALLOC; + /* + * We need to be able to allocate from the reserves for RECLAIM_SWAP + * and we also need to be able to write out pages for RECLAIM_WRITE + * and RECLAIM_SWAP. + */ + p->flags |= PF_MEMALLOC | PF_SWAPWRITE; reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; @@ -1932,11 +1937,10 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) * a long time. */ shrink_slab(sc.nr_scanned, gfp_mask, order); - sc.nr_reclaimed = 1; /* Avoid getting the off node timeout */ } p->reclaim_state = NULL; - current->flags &= ~PF_MEMALLOC; + current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); if (sc.nr_reclaimed == 0) zone->last_unsuccessful_zone_reclaim = jiffies; -- cgit v1.2.2 From 511030bcd24119fa3759ef3f914d354e107ef839 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 28 Feb 2006 16:58:57 -0800 Subject: [PATCH] Fix sys_migrate_pages: Move all pages when invoked from root Currently sys_migrate_pages only moves pages belonging to a process. This is okay when invoked from a regular user. But if invoked from root it should move all pages as documented in the migrate_pages manpage. Signed-off-by: Christoph Lameter Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 67af4cea1e23..5643cfed6b0f 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -954,7 +954,8 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, goto out; } - err = do_migrate_pages(mm, &old, &new, MPOL_MF_MOVE); + err = do_migrate_pages(mm, &old, &new, + capable(CAP_SYS_ADMIN) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); out: mmput(mm); return err; -- cgit v1.2.2 From e8788c0cce63e0cc8689a123d1ce0af1e28cd583 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 28 Feb 2006 16:59:16 -0800 Subject: [PATCH] remove_from_swap: fix locking remove_from_swap() currently attempts to use page_lock_anon_vma to obtain an anon_vma lock. That is not working since the page may have been remapped via swap ptes in order to move the page. However, do_migrate_pages() obtain the mmap_sem lock and therefore there is a guarantee that the anonymous vma will not vanish from under us. There is therefore no need to use page_lock_anon_vma. Signed-off-by: Christoph Lameter Acked-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/rmap.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/rmap.c b/mm/rmap.c index df2c41c2a9a2..d8ce5ff61454 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -212,25 +212,33 @@ out: * through real pte's pointing to valid pages and then releasing * the page from the swap cache. * - * Must hold page lock on page. + * Must hold page lock on page and mmap_sem of one vma that contains + * the page. */ void remove_from_swap(struct page *page) { struct anon_vma *anon_vma; struct vm_area_struct *vma; + unsigned long mapping; - if (!PageAnon(page) || !PageSwapCache(page)) + if (!PageSwapCache(page)) return; - anon_vma = page_lock_anon_vma(page); - if (!anon_vma) + mapping = (unsigned long)page->mapping; + + if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0) return; + /* + * We hold the mmap_sem lock. So no need to call page_lock_anon_vma. + */ + anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON); + spin_lock(&anon_vma->lock); + list_for_each_entry(vma, &anon_vma->head, anon_vma_node) remove_vma_swap(vma, page); spin_unlock(&anon_vma->lock); - delete_from_swap_cache(page); } EXPORT_SYMBOL(remove_from_swap); -- cgit v1.2.2 From f61388822a6040ff462c5f7260daa0f1017f2db0 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 28 Feb 2006 16:59:18 -0800 Subject: [PATCH] nommu: implement vmalloc_node() Fix oprofile linkage. Pointed out by "Luke Yang" . Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/nommu.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/nommu.c b/mm/nommu.c index 99d21020ec9d..4951f4786f28 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -53,7 +53,6 @@ DECLARE_RWSEM(nommu_vma_sem); struct vm_operations_struct generic_file_vm_ops = { }; -EXPORT_SYMBOL(vmalloc); EXPORT_SYMBOL(vfree); EXPORT_SYMBOL(vmalloc_to_page); EXPORT_SYMBOL(vmalloc_32); @@ -205,6 +204,13 @@ void *vmalloc(unsigned long size) { return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); } +EXPORT_SYMBOL(vmalloc); + +void *vmalloc_node(unsigned long size, int node) +{ + return vmalloc(size); +} +EXPORT_SYMBOL(vmalloc_node); /* * vmalloc_32 - allocate virtually continguos memory (32bit addressable) -- cgit v1.2.2 From d6713e046336ffa98060418c4d2c65243639e107 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 28 Feb 2006 16:59:19 -0800 Subject: [PATCH] out_of_memory(): use of uninitialised Under some circumstances `points' can get printed before it's initialised. Spotted by Carlos Martin . Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 8123fad5a485..c86c737d2433 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -302,7 +302,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) { struct mm_struct *mm = NULL; task_t *p; - unsigned long points; + unsigned long points = 0; if (printk_ratelimit()) { printk("oom-killer: gfp_mask=0x%x, order=%d\n", -- cgit v1.2.2 From 140ffcec4def3ee3af7565b2cf1d3b2580f7e180 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 2 Mar 2006 02:54:28 -0800 Subject: [PATCH] out_of_memory() locking fix I seem to have lost this read_unlock(). While we're there, let's turn that interruptible sleep unto uninterruptible, so we don't get a busywait if signal_pending(). (Again. We seem to have a habit of doing this). Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index c86c737d2433..78747afad6b0 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -355,6 +355,7 @@ retry: } out: + read_unlock(&tasklist_lock); cpuset_unlock(); if (mm) mmput(mm); @@ -364,5 +365,5 @@ out: * retry to allocate memory unless "p" is current */ if (!test_thread_flag(TIF_MEMDIE)) - schedule_timeout_interruptible(1); + schedule_timeout_uninterruptible(1); } -- cgit v1.2.2 From a57ebfdb2cf9fa60dfa2f403f70ef6c432ca2a62 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 2 Mar 2006 02:54:37 -0800 Subject: [PATCH] numa_maps: Fix potential crash on non IA64 platforms numa_maps should not scan over huge vmas in order not to cause problems for non IA64 platforms that may have pte entries pointing to huge pages in a variety of ways in their page tables. Add a simple check to ignore vmas containing huge pages. Signed-off-by: Christoph Lameter Cc: Hugh Dickins Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 5643cfed6b0f..1a210088ad80 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1793,7 +1793,8 @@ int show_numa_map(struct seq_file *m, void *v) if (!md) return 0; - check_pgd_range(vma, vma->vm_start, vma->vm_end, + if (!is_vm_hugetlb_page(vma)) + check_pgd_range(vma, vma->vm_start, vma->vm_end, &node_online_map, MPOL_MF_STATS, md); if (md->pages) { -- cgit v1.2.2 From 264132bc62fe071d0ff378c1103bae9d33212f10 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 6 Mar 2006 12:10:07 -0800 Subject: Fix "check_slabp" printout size calculation We want to use the "struct slab" size, not the size of the pointer to same. As it is, we'd not print out the last entry pointers in the slab (where is ~10, depending on whether it's a 32-bit or 64-bit kernel). Gaah, that slab code was written by somebody who likes unreadable crud. Signed-off-by: Linus Torvalds --- mm/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index add05d808a4a..2b0b1519bb74 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2554,7 +2554,7 @@ static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n", cachep->name, cachep->num, slabp, slabp->inuse); for (i = 0; - i < sizeof(slabp) + cachep->num * sizeof(kmem_bufctl_t); + i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t); i++) { if ((i % 16) == 0) printk("\n%03x:", i); -- cgit v1.2.2 From 9888e6fa7b68d9c8cc2c162a90979825ab45150a Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 6 Mar 2006 17:44:43 -0800 Subject: slab: clarify and fix calculate_slab_order() If we triggered the 'offslab_limit' test, we would return with cachep->gfporder incremented once too many times. This clarifies the logic somewhat, and fixes that bug. Signed-off-by: Linus Torvalds --- mm/slab.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index 2b0b1519bb74..f2e92dc1c9ce 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1628,36 +1628,36 @@ static inline size_t calculate_slab_order(struct kmem_cache *cachep, size_t size, size_t align, unsigned long flags) { size_t left_over = 0; + int gfporder; - for (;; cachep->gfporder++) { + for (gfporder = 0 ; gfporder <= MAX_GFP_ORDER; gfporder++) { unsigned int num; size_t remainder; - if (cachep->gfporder > MAX_GFP_ORDER) { - cachep->num = 0; - break; - } - - cache_estimate(cachep->gfporder, size, align, flags, - &remainder, &num); + cache_estimate(gfporder, size, align, flags, &remainder, &num); if (!num) continue; + /* More than offslab_limit objects will cause problems */ - if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit) + if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit) break; + /* Found something acceptable - save it away */ cachep->num = num; + cachep->gfporder = gfporder; left_over = remainder; /* * Large number of objects is good, but very large slabs are * currently bad for the gfp()s. */ - if (cachep->gfporder >= slab_break_gfp_order) + if (gfporder >= slab_break_gfp_order) break; - if ((left_over * 8) <= (PAGE_SIZE << cachep->gfporder)) - /* Acceptable internal fragmentation */ + /* + * Acceptable internal fragmentation? + */ + if ((left_over * 8) <= (PAGE_SIZE << gfporder)) break; } return left_over; -- cgit v1.2.2 From 397874dfe9862b494e1fdcd2baef4ac432d224c8 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 6 Mar 2006 15:42:53 -0800 Subject: [PATCH] numa_maps update Change the format of numa_maps to be more compact and contain additional information that is useful for managing and troubleshooting memory on a NUMA system. Numa_maps can now also support huge pages. Fixes: 1. More compact format. Only display fields if they contain additional information. 2. Always display information for all vmas. The old numa_maps did not display vma with no mapped entries. This was a bit confusing because page migration removes ptes for file backed vmas. After page migration a part of the vmas vanished. 3. Rename maxref to maxmap. This is the maximum mapcount of all the pages in a vma and may be used as an indicator as to how many processes may be using a certain vma. 4. Include the ability to scan over huge page vmas. New items shown: dirty Number of pages in a vma that have either the dirty bit set in the page_struct or in the pte. file= The file backing the pages if any stack Stack area heap Heap area huge Huge page area. The number of pages shows is the number of huge pages not the regular sized pages. swapcache Number of pages with swap references. Must be >0 in order to be shown. active Number of active pages. Only displayed if different from the number of pages mapped. writeback Number of pages under writeback. Only displayed if >0. Sample ouput of a process using huge pages: 00000000 default 2000000000000000 default file=/lib/ld-2.3.90.so mapped=13 mapmax=30 N0=13 2000000000044000 default file=/lib/ld-2.3.90.so anon=2 dirty=2 swapcache=2 N2=2 2000000000064000 default file=/lib/librt-2.3.90.so mapped=2 active=1 N1=1 N3=1 2000000000074000 default file=/lib/librt-2.3.90.so 2000000000080000 default file=/lib/librt-2.3.90.so anon=1 swapcache=1 N2=1 2000000000084000 default 2000000000088000 default file=/lib/libc-2.3.90.so mapped=52 mapmax=32 active=48 N0=52 20000000002bc000 default file=/lib/libc-2.3.90.so 20000000002c8000 default file=/lib/libc-2.3.90.so anon=3 dirty=2 swapcache=3 active=2 N1=1 N2=2 20000000002d4000 default anon=1 swapcache=1 N1=1 20000000002d8000 default file=/lib/libpthread-2.3.90.so mapped=8 mapmax=3 active=7 N2=2 N3=6 20000000002fc000 default file=/lib/libpthread-2.3.90.so 2000000000308000 default file=/lib/libpthread-2.3.90.so anon=1 dirty=1 swapcache=1 N1=1 200000000030c000 default anon=1 dirty=1 swapcache=1 N1=1 2000000000320000 default anon=1 dirty=1 N1=1 200000000071c000 default 2000000000720000 default anon=2 dirty=2 swapcache=1 N1=1 N2=1 2000000000f1c000 default 2000000000f20000 default anon=2 dirty=2 swapcache=1 active=1 N2=1 N3=1 200000000171c000 default 2000000001720000 default anon=1 dirty=1 swapcache=1 N1=1 2000000001b20000 default 2000000001b38000 default file=/lib/libgcc_s.so.1 mapped=2 N1=2 2000000001b48000 default file=/lib/libgcc_s.so.1 2000000001b54000 default file=/lib/libgcc_s.so.1 anon=1 dirty=1 active=0 N1=1 2000000001b58000 default file=/lib/libunwind.so.7.0.0 mapped=2 active=1 N1=2 2000000001b74000 default file=/lib/libunwind.so.7.0.0 2000000001b80000 default file=/lib/libunwind.so.7.0.0 2000000001b84000 default 4000000000000000 default file=/media/huge/test9 mapped=1 N1=1 6000000000000000 default file=/media/huge/test9 anon=1 dirty=1 active=0 N1=1 6000000000004000 default heap 607fffff7fffc000 default anon=1 dirty=1 swapcache=1 N2=1 607fffffff06c000 default stack anon=1 dirty=1 active=0 N1=1 8000000060000000 default file=/mnt/huge/test0 huge dirty=3 N1=3 8000000090000000 default file=/mnt/huge/test1 huge dirty=3 N0=1 N2=2 80000000c0000000 default file=/mnt/huge/test2 huge dirty=3 N1=1 N3=2 Signed-off-by: Christoph Lameter Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 120 +++++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 95 insertions(+), 25 deletions(-) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 1a210088ad80..d80fa7d8f720 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -197,7 +197,7 @@ static struct mempolicy *mpol_new(int mode, nodemask_t *nodes) return policy; } -static void gather_stats(struct page *, void *); +static void gather_stats(struct page *, void *, int pte_dirty); static void migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags); @@ -239,7 +239,7 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, continue; if (flags & MPOL_MF_STATS) - gather_stats(page, private); + gather_stats(page, private, pte_dirty(*pte)); else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) migrate_page_add(page, private, flags); else @@ -1753,67 +1753,137 @@ static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) struct numa_maps { unsigned long pages; unsigned long anon; - unsigned long mapped; + unsigned long active; + unsigned long writeback; unsigned long mapcount_max; + unsigned long dirty; + unsigned long swapcache; unsigned long node[MAX_NUMNODES]; }; -static void gather_stats(struct page *page, void *private) +static void gather_stats(struct page *page, void *private, int pte_dirty) { struct numa_maps *md = private; int count = page_mapcount(page); - if (count) - md->mapped++; + md->pages++; + if (pte_dirty || PageDirty(page)) + md->dirty++; - if (count > md->mapcount_max) - md->mapcount_max = count; + if (PageSwapCache(page)) + md->swapcache++; - md->pages++; + if (PageActive(page)) + md->active++; + + if (PageWriteback(page)) + md->writeback++; if (PageAnon(page)) md->anon++; + if (count > md->mapcount_max) + md->mapcount_max = count; + md->node[page_to_nid(page)]++; cond_resched(); } +static void check_huge_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end, + struct numa_maps *md) +{ + unsigned long addr; + struct page *page; + + for (addr = start; addr < end; addr += HPAGE_SIZE) { + pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK); + pte_t pte; + + if (!ptep) + continue; + + pte = *ptep; + if (pte_none(pte)) + continue; + + page = pte_page(pte); + if (!page) + continue; + + gather_stats(page, md, pte_dirty(*ptep)); + } +} + int show_numa_map(struct seq_file *m, void *v) { struct task_struct *task = m->private; struct vm_area_struct *vma = v; struct numa_maps *md; + struct file *file = vma->vm_file; + struct mm_struct *mm = vma->vm_mm; int n; char buffer[50]; - if (!vma->vm_mm) + if (!mm) return 0; md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL); if (!md) return 0; - if (!is_vm_hugetlb_page(vma)) + mpol_to_str(buffer, sizeof(buffer), + get_vma_policy(task, vma, vma->vm_start)); + + seq_printf(m, "%08lx %s", vma->vm_start, buffer); + + if (file) { + seq_printf(m, " file="); + seq_path(m, file->f_vfsmnt, file->f_dentry, "\n\t= "); + } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { + seq_printf(m, " heap"); + } else if (vma->vm_start <= mm->start_stack && + vma->vm_end >= mm->start_stack) { + seq_printf(m, " stack"); + } + + if (is_vm_hugetlb_page(vma)) { + check_huge_range(vma, vma->vm_start, vma->vm_end, md); + seq_printf(m, " huge"); + } else { check_pgd_range(vma, vma->vm_start, vma->vm_end, - &node_online_map, MPOL_MF_STATS, md); + &node_online_map, MPOL_MF_STATS, md); + } + + if (!md->pages) + goto out; - if (md->pages) { - mpol_to_str(buffer, sizeof(buffer), - get_vma_policy(task, vma, vma->vm_start)); + if (md->anon) + seq_printf(m," anon=%lu",md->anon); - seq_printf(m, "%08lx %s pages=%lu mapped=%lu maxref=%lu", - vma->vm_start, buffer, md->pages, - md->mapped, md->mapcount_max); + if (md->dirty) + seq_printf(m," dirty=%lu",md->dirty); - if (md->anon) - seq_printf(m," anon=%lu",md->anon); + if (md->pages != md->anon && md->pages != md->dirty) + seq_printf(m, " mapped=%lu", md->pages); - for_each_online_node(n) - if (md->node[n]) - seq_printf(m, " N%d=%lu", n, md->node[n]); + if (md->mapcount_max > 1) + seq_printf(m, " mapmax=%lu", md->mapcount_max); - seq_putc(m, '\n'); - } + if (md->swapcache) + seq_printf(m," swapcache=%lu", md->swapcache); + + if (md->active < md->pages && !is_vm_hugetlb_page(vma)) + seq_printf(m," active=%lu", md->active); + + if (md->writeback) + seq_printf(m," writeback=%lu", md->writeback); + + for_each_online_node(n) + if (md->node[n]) + seq_printf(m, " N%d=%lu", n, md->node[n]); +out: + seq_putc(m, '\n'); kfree(md); if (m->count < m->size) -- cgit v1.2.2 From f78bb8ad482267b92c122f0e37a7dce69c880247 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 8 Mar 2006 10:33:05 -0800 Subject: slab: fix calculate_slab_order() for SLAB_RECLAIM_ACCOUNT Instead of having a hard-to-read and confusing conditional in the caller, just make the slab order calculation handle this special case, since it's simple and obvious there. Signed-off-by: Linus Torvalds --- mm/slab.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index f2e92dc1c9ce..6ad6bd5a0b3e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1647,6 +1647,14 @@ static inline size_t calculate_slab_order(struct kmem_cache *cachep, cachep->gfporder = gfporder; left_over = remainder; + /* + * A VFS-reclaimable slab tends to have most allocations + * as GFP_NOFS and we really don't want to have to be allocating + * higher-order pages when we are unable to shrink dcache. + */ + if (flags & SLAB_RECLAIM_ACCOUNT) + break; + /* * Large number of objects is good, but very large slabs are * currently bad for the gfp()s. @@ -1869,17 +1877,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, size = ALIGN(size, align); - if ((flags & SLAB_RECLAIM_ACCOUNT) && size <= PAGE_SIZE) { - /* - * A VFS-reclaimable slab tends to have most allocations - * as GFP_NOFS and we really don't want to have to be allocating - * higher-order pages when we are unable to shrink dcache. - */ - cachep->gfporder = 0; - cache_estimate(cachep->gfporder, size, align, flags, - &left_over, &cachep->num); - } else - left_over = calculate_slab_order(cachep, size, align, flags); + left_over = calculate_slab_order(cachep, size, align, flags); if (!cachep->num) { printk("kmem_cache_create: couldn't create cache %s.\n", name); -- cgit v1.2.2 From 7f709ed0e3ccd3e88e0632b69f00174e83f8d98b Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 7 Mar 2006 21:55:22 -0800 Subject: [PATCH] numa_maps-update fix Fix the mm/mempolicy.c build for !CONFIG_HUGETLB_PAGE. Cc: Christoph Lameter Cc: Martin Bligh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index d80fa7d8f720..954981b14303 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1789,6 +1789,7 @@ static void gather_stats(struct page *page, void *private, int pte_dirty) cond_resched(); } +#ifdef CONFIG_HUGETLB_PAGE static void check_huge_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct numa_maps *md) @@ -1814,6 +1815,13 @@ static void check_huge_range(struct vm_area_struct *vma, gather_stats(page, md, pte_dirty(*ptep)); } } +#else +static inline void check_huge_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end, + struct numa_maps *md) +{ +} +#endif int show_numa_map(struct seq_file *m, void *v) { -- cgit v1.2.2 From e2bab3d92486fb781f4d06f56339264ed1492392 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 7 Mar 2006 21:55:31 -0800 Subject: [PATCH] percpu_counter_sum() Implement percpu_counter_sum(). This is a more accurate but slower version of percpu_counter_read_positive(). We need this for Alex's speedup-ext3_statfs patch and for the nr_file accounting fix. Otherwise these things would be too inaccurate on large CPU counts. Cc: Ravikiran G Thirumalai Cc: Alex Tomas Cc: "David S. Miller" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swap.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/swap.c b/mm/swap.c index cce3dda59c59..e9ec06d845e8 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -489,13 +489,34 @@ void percpu_counter_mod(struct percpu_counter *fbc, long amount) if (count >= FBC_BATCH || count <= -FBC_BATCH) { spin_lock(&fbc->lock); fbc->count += count; + *pcount = 0; spin_unlock(&fbc->lock); - count = 0; + } else { + *pcount = count; } - *pcount = count; put_cpu(); } EXPORT_SYMBOL(percpu_counter_mod); + +/* + * Add up all the per-cpu counts, return the result. This is a more accurate + * but much slower version of percpu_counter_read_positive() + */ +long percpu_counter_sum(struct percpu_counter *fbc) +{ + long ret; + int cpu; + + spin_lock(&fbc->lock); + ret = fbc->count; + for_each_cpu(cpu) { + long *pcount = per_cpu_ptr(fbc->counters, cpu); + ret += *pcount; + } + spin_unlock(&fbc->lock); + return ret < 0 ? 0 : ret; +} +EXPORT_SYMBOL(percpu_counter_sum); #endif /* -- cgit v1.2.2 From 07ed76b2a085a31f427c2a912a562627947dc7de Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Tue, 7 Mar 2006 21:55:46 -0800 Subject: [PATCH] slab: allocate larger cache_cache if order 0 fails kmem_cache_init() incorrectly assumes that the cache_cache object will fit in an order 0 allocation. On very large systems, this is not true. Change the code to try larger order allocations if order 0 fails. Signed-off-by: Jack Steiner Cc: Manfred Spraul Cc: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index 6ad6bd5a0b3e..61800b88e241 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1124,6 +1124,7 @@ void __init kmem_cache_init(void) struct cache_sizes *sizes; struct cache_names *names; int i; + int order; for (i = 0; i < NUM_INIT_LISTS; i++) { kmem_list3_init(&initkmem_list3[i]); @@ -1167,11 +1168,15 @@ void __init kmem_cache_init(void) cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size()); - cache_estimate(0, cache_cache.buffer_size, cache_line_size(), 0, - &left_over, &cache_cache.num); + for (order = 0; order < MAX_ORDER; order++) { + cache_estimate(order, cache_cache.buffer_size, + cache_line_size(), 0, &left_over, &cache_cache.num); + if (cache_cache.num) + break; + } if (!cache_cache.num) BUG(); - + cache_cache.gfporder = order; cache_cache.colour = left_over / cache_cache.colour_off; cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + sizeof(struct slab), cache_line_size()); -- cgit v1.2.2 From 85a6cd03a97f04ffff7bfedfa3172894ca9a617b Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 9 Mar 2006 17:33:34 -0800 Subject: [PATCH] page_add_file_rmap(): remove BUG_ON()s Remove two early-development BUG_ONs from page_add_file_rmap. The pfn_valid test (originally useful for checking that nobody passed an artificial struct page) comes too late, since we already have the struct page. The PageAnon test (useful when anon was first distinguished from file rmap) prevents ->nopage implementations from reusing ->mapping, which would otherwise be available. Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/rmap.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'mm') diff --git a/mm/rmap.c b/mm/rmap.c index d8ce5ff61454..67f0e20b101f 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -537,9 +537,6 @@ void page_add_new_anon_rmap(struct page *page, */ void page_add_file_rmap(struct page *page) { - BUG_ON(PageAnon(page)); - BUG_ON(!pfn_valid(page_to_pfn(page))); - if (atomic_inc_and_test(&page->_mapcount)) __inc_page_state(nr_mapped); } -- cgit v1.2.2 From a6bf527091b1dd40f1b6a496812ce7520621c282 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 9 Mar 2006 17:33:47 -0800 Subject: [PATCH] vmscan: no zone_reclaim if PF_MALLOC is set If the process has already set PF_MALLOC and is already using current->reclaim_state then do not try to reclaim memory from the zone. This is set by kswapd and/or synchrononous global reclaim which will not take it lightly if we zap the reclaim_state. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index b0af7593d01e..7ccf763bb30b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1883,7 +1883,8 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) if (!(gfp_mask & __GFP_WAIT) || zone->all_unreclaimable || - atomic_read(&zone->reclaim_in_progress) > 0) + atomic_read(&zone->reclaim_in_progress) > 0 || + (p->flags & PF_MEMALLOC)) return 0; node_id = zone->zone_pgdat->node_id; -- cgit v1.2.2 From f2937be5895dbae23ff66767a2fc17793e63159c Mon Sep 17 00:00:00 2001 From: Yasunori Goto Date: Thu, 9 Mar 2006 17:33:51 -0800 Subject: [PATCH] memory hotadd: pgdat->node_present_pages fix When pages are onlined, not only zone->present_pages but also pgdat->node_present_pages should be refreshed. This parameter is used to show information at /sys/device/system/node/nodeX/meminfo via si_meminfo_node(). So, it shows strange value for MemUsed which is calculated (node_present_pages - all zones free pages). Signed-off-by: Yasunori Goto Cc: Dave Hansen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory_hotplug.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm') diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a918f77f02f3..1fe76d963ac2 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -130,6 +130,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) onlined_pages++; } zone->present_pages += onlined_pages; + zone->zone_pgdat->node_present_pages += onlined_pages; setup_per_zone_pages_min(); -- cgit v1.2.2 From 8fce4d8e3b9e3cf47cc8afeb6077e22ab795d989 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 9 Mar 2006 17:33:54 -0800 Subject: [PATCH] slab: Node rotor for freeing alien caches and remote per cpu pages. The cache reaper currently tries to free all alien caches and all remote per cpu pages in each pass of cache_reap. For a machines with large number of nodes (such as Altix) this may lead to sporadic delays of around ~10ms. Interrupts are disabled while reclaiming creating unacceptable delays. This patch changes that behavior by adding a per cpu reap_node variable. Instead of attempting to free all caches, we free only one alien cache and the per cpu pages from one remote node. That reduces the time spend in cache_reap. However, doing so will lengthen the time it takes to completely drain all remote per cpu pagesets and all alien caches. The time needed will grow with the number of nodes in the system. All caches are drained when they overflow their respective capacity. So the drawback here is only that a bit of memory may be wasted for awhile longer. Details: 1. Rename drain_remote_pages to drain_node_pages to allow the specification of the node to drain of pcp pages. 2. Add additional functions init_reap_node, next_reap_node for NUMA that manage a per cpu reap_node counter. 3. Add a reap_alien function that reaps only from the current reap_node. For us this seems to be a critical issue. Holdoffs of an average of ~7ms cause some HPC benchmarks to slow down significantly. F.e. NAS parallel slows down dramatically. NAS parallel has a 12-16 seconds runtime w/o rotor compared to 5.8 secs with the rotor patches. It gets down to 5.05 secs with the additional interrupt holdoff reductions. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 17 +++++++-------- mm/slab.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 70 insertions(+), 12 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 791690d7d3fa..234bd4895d14 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -590,21 +590,20 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, } #ifdef CONFIG_NUMA -/* Called from the slab reaper to drain remote pagesets */ -void drain_remote_pages(void) +/* + * Called from the slab reaper to drain pagesets on a particular node that + * belong to the currently executing processor. + */ +void drain_node_pages(int nodeid) { - struct zone *zone; - int i; + int i, z; unsigned long flags; local_irq_save(flags); - for_each_zone(zone) { + for (z = 0; z < MAX_NR_ZONES; z++) { + struct zone *zone = NODE_DATA(nodeid)->node_zones + z; struct per_cpu_pageset *pset; - /* Do not drain local pagesets */ - if (zone->zone_pgdat->node_id == numa_node_id()) - continue; - pset = zone_pcp(zone, smp_processor_id()); for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { struct per_cpu_pages *pcp; diff --git a/mm/slab.c b/mm/slab.c index 61800b88e241..d0bd7f07ab04 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -789,6 +789,47 @@ static void __slab_error(const char *function, struct kmem_cache *cachep, char * dump_stack(); } +#ifdef CONFIG_NUMA +/* + * Special reaping functions for NUMA systems called from cache_reap(). + * These take care of doing round robin flushing of alien caches (containing + * objects freed on different nodes from which they were allocated) and the + * flushing of remote pcps by calling drain_node_pages. + */ +static DEFINE_PER_CPU(unsigned long, reap_node); + +static void init_reap_node(int cpu) +{ + int node; + + node = next_node(cpu_to_node(cpu), node_online_map); + if (node == MAX_NUMNODES) + node = 0; + + __get_cpu_var(reap_node) = node; +} + +static void next_reap_node(void) +{ + int node = __get_cpu_var(reap_node); + + /* + * Also drain per cpu pages on remote zones + */ + if (node != numa_node_id()) + drain_node_pages(node); + + node = next_node(node, node_online_map); + if (unlikely(node >= MAX_NUMNODES)) + node = first_node(node_online_map); + __get_cpu_var(reap_node) = node; +} + +#else +#define init_reap_node(cpu) do { } while (0) +#define next_reap_node(void) do { } while (0) +#endif + /* * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz * via the workqueue/eventd. @@ -806,6 +847,7 @@ static void __devinit start_cpu_timer(int cpu) * at that time. */ if (keventd_up() && reap_work->func == NULL) { + init_reap_node(cpu); INIT_WORK(reap_work, cache_reap, NULL); schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); } @@ -884,6 +926,23 @@ static void __drain_alien_cache(struct kmem_cache *cachep, } } +/* + * Called from cache_reap() to regularly drain alien caches round robin. + */ +static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) +{ + int node = __get_cpu_var(reap_node); + + if (l3->alien) { + struct array_cache *ac = l3->alien[node]; + if (ac && ac->avail) { + spin_lock_irq(&ac->lock); + __drain_alien_cache(cachep, ac, node); + spin_unlock_irq(&ac->lock); + } + } +} + static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien) { int i = 0; @@ -902,6 +961,7 @@ static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **al #else #define drain_alien_cache(cachep, alien) do { } while (0) +#define reap_alien(cachep, l3) do { } while (0) static inline struct array_cache **alloc_alien_cache(int node, int limit) { @@ -3497,8 +3557,7 @@ static void cache_reap(void *unused) check_irq_on(); l3 = searchp->nodelists[numa_node_id()]; - if (l3->alien) - drain_alien_cache(searchp, l3->alien); + reap_alien(searchp, l3); spin_lock_irq(&l3->list_lock); drain_array_locked(searchp, cpu_cache_get(searchp), 0, @@ -3548,7 +3607,7 @@ static void cache_reap(void *unused) } check_irq_on(); mutex_unlock(&cache_chain_mutex); - drain_remote_pages(); + next_reap_node(); /* Setup the next iteration */ schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); } -- cgit v1.2.2 From 4983da07f1e2e8dc81cb9d640fbf35b899cdbdf2 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 14 Mar 2006 19:50:19 -0800 Subject: [PATCH] page migration: fail if page is in a vma flagged VM_LOCKED page migration currently simply retries a couple of times if try_to_unmap() fails without inspecting the return code. However, SWAP_FAIL indicates that the page is in a vma that has the VM_LOCKED flag set (if ignore_refs ==1). We can check for that return code and avoid retrying the migration. migrate_page_remove_references() now needs to return a reason why the failure occured. So switch migrate_page_remove_references to use -Exx style error messages. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 7ccf763bb30b..4fe7e3aa02e2 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -700,7 +700,7 @@ int migrate_page_remove_references(struct page *newpage, * the page. */ if (!mapping || page_mapcount(page) + nr_refs != page_count(page)) - return 1; + return -EAGAIN; /* * Establish swap ptes for anonymous pages or destroy pte @@ -721,13 +721,15 @@ int migrate_page_remove_references(struct page *newpage, * If the page was not migrated then the PageSwapCache bit * is still set and the operation may continue. */ - try_to_unmap(page, 1); + if (try_to_unmap(page, 1) == SWAP_FAIL) + /* A vma has VM_LOCKED set -> Permanent failure */ + return -EPERM; /* * Give up if we were unable to remove all mappings. */ if (page_mapcount(page)) - return 1; + return -EAGAIN; write_lock_irq(&mapping->tree_lock); @@ -738,7 +740,7 @@ int migrate_page_remove_references(struct page *newpage, if (!page_mapping(page) || page_count(page) != nr_refs || *radix_pointer != page) { write_unlock_irq(&mapping->tree_lock); - return 1; + return -EAGAIN; } /* @@ -813,10 +815,14 @@ EXPORT_SYMBOL(migrate_page_copy); */ int migrate_page(struct page *newpage, struct page *page) { + int rc; + BUG_ON(PageWriteback(page)); /* Writeback must be complete */ - if (migrate_page_remove_references(newpage, page, 2)) - return -EAGAIN; + rc = migrate_page_remove_references(newpage, page, 2); + + if (rc) + return rc; migrate_page_copy(newpage, page); -- cgit v1.2.2 From 74c002410548c7cb1744b45d17a5fa21da515b63 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 14 Mar 2006 19:50:21 -0800 Subject: [PATCH] Consistent capabilites associated with MPOL_MOVE_ALL It seems that setting scheduling policy and priorities is also the kind of thing that might be performed in apps that also use the NUMA API, so it would seem consistent to use CAP_SYS_NICE for NUMA also. So use CAP_SYS_NICE for controlling migration permissions. Signed-off-by: Christoph Lameter Cc: Michael Kerrisk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 954981b14303..2a8206009422 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -748,7 +748,7 @@ long do_mbind(unsigned long start, unsigned long len, MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) || mode > MPOL_MAX) return -EINVAL; - if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_RESOURCE)) + if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) return -EPERM; if (start & ~PAGE_MASK) @@ -942,20 +942,20 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, */ if ((current->euid != task->suid) && (current->euid != task->uid) && (current->uid != task->suid) && (current->uid != task->uid) && - !capable(CAP_SYS_ADMIN)) { + !capable(CAP_SYS_NICE)) { err = -EPERM; goto out; } task_nodes = cpuset_mems_allowed(task); /* Is the user allowed to access the target nodes? */ - if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_ADMIN)) { + if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) { err = -EPERM; goto out; } err = do_migrate_pages(mm, &old, &new, - capable(CAP_SYS_ADMIN) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); + capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); out: mmput(mm); return err; -- cgit v1.2.2