aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-05-12 10:48:52 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-05-12 10:48:52 -0400
commit7d63b54a65ce902f9aaa8efe8192aa3b983264d4 (patch)
tree250a77bebe92cbd6edac70a649866044295876db /mm
parentfd88de569b802c4a04aaa6ee74667775f4aed8c6 (diff)
parentd8c3291c73b958243b33f8509d4507e76dafd055 (diff)
Merge branch 'master'
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c32
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/migrate.c11
-rw-r--r--mm/slab.c3
-rw-r--r--mm/sparse.c9
5 files changed, 56 insertions, 5 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 1120338a5d0f..82c448898d05 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -697,6 +697,38 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
697 return ret; 697 return ret;
698} 698}
699 699
700/**
701 * find_get_pages_contig - gang contiguous pagecache lookup
702 * @mapping: The address_space to search
703 * @index: The starting page index
704 * @nr_pages: The maximum number of pages
705 * @pages: Where the resulting pages are placed
706 *
707 * find_get_pages_contig() works exactly like find_get_pages(), except
708 * that the returned number of pages are guaranteed to be contiguous.
709 *
710 * find_get_pages_contig() returns the number of pages which were found.
711 */
712unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
713 unsigned int nr_pages, struct page **pages)
714{
715 unsigned int i;
716 unsigned int ret;
717
718 read_lock_irq(&mapping->tree_lock);
719 ret = radix_tree_gang_lookup(&mapping->page_tree,
720 (void **)pages, index, nr_pages);
721 for (i = 0; i < ret; i++) {
722 if (pages[i]->mapping == NULL || pages[i]->index != index)
723 break;
724
725 page_cache_get(pages[i]);
726 index++;
727 }
728 read_unlock_irq(&mapping->tree_lock);
729 return i;
730}
731
700/* 732/*
701 * Like find_get_pages, except we only return pages which are tagged with 733 * Like find_get_pages, except we only return pages which are tagged with
702 * `tag'. We update *index to index the next page for the traversal. 734 * `tag'. We update *index to index the next page for the traversal.
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1fe76d963ac2..1ae2b2cc3a54 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -69,12 +69,16 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
69 for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) { 69 for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) {
70 err = __add_section(zone, phys_start_pfn + i); 70 err = __add_section(zone, phys_start_pfn + i);
71 71
72 if (err) 72 /* We want to keep adding the rest of the
73 * sections if the first ones already exist
74 */
75 if (err && (err != -EEXIST))
73 break; 76 break;
74 } 77 }
75 78
76 return err; 79 return err;
77} 80}
81EXPORT_SYMBOL_GPL(__add_pages);
78 82
79static void grow_zone_span(struct zone *zone, 83static void grow_zone_span(struct zone *zone,
80 unsigned long start_pfn, unsigned long end_pfn) 84 unsigned long start_pfn, unsigned long end_pfn)
diff --git a/mm/migrate.c b/mm/migrate.c
index d444229f2599..1c25040693d2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -439,6 +439,17 @@ redo:
439 goto unlock_both; 439 goto unlock_both;
440 } 440 }
441 441
442 /* Make sure the dirty bit is up to date */
443 if (try_to_unmap(page, 1) == SWAP_FAIL) {
444 rc = -EPERM;
445 goto unlock_both;
446 }
447
448 if (page_mapcount(page)) {
449 rc = -EAGAIN;
450 goto unlock_both;
451 }
452
442 /* 453 /*
443 * Default handling if a filesystem does not provide 454 * Default handling if a filesystem does not provide
444 * a migration function. We can only migrate clean 455 * a migration function. We can only migrate clean
diff --git a/mm/slab.c b/mm/slab.c
index af5c5237e11a..c32af7e7581e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -979,7 +979,8 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
979 * That way we could avoid the overhead of putting the objects 979 * That way we could avoid the overhead of putting the objects
980 * into the free lists and getting them back later. 980 * into the free lists and getting them back later.
981 */ 981 */
982 transfer_objects(rl3->shared, ac, ac->limit); 982 if (rl3->shared)
983 transfer_objects(rl3->shared, ac, ac->limit);
983 984
984 free_block(cachep, ac->entry, ac->avail, node); 985 free_block(cachep, ac->entry, ac->avail, node);
985 ac->avail = 0; 986 ac->avail = 0;
diff --git a/mm/sparse.c b/mm/sparse.c
index 0a51f36ba3a1..d7c32de99ee8 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -32,7 +32,10 @@ static struct mem_section *sparse_index_alloc(int nid)
32 unsigned long array_size = SECTIONS_PER_ROOT * 32 unsigned long array_size = SECTIONS_PER_ROOT *
33 sizeof(struct mem_section); 33 sizeof(struct mem_section);
34 34
35 section = alloc_bootmem_node(NODE_DATA(nid), array_size); 35 if (system_state == SYSTEM_RUNNING)
36 section = kmalloc_node(array_size, GFP_KERNEL, nid);
37 else
38 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
36 39
37 if (section) 40 if (section)
38 memset(section, 0, array_size); 41 memset(section, 0, array_size);
@@ -281,9 +284,9 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
281 284
282 ret = sparse_init_one_section(ms, section_nr, memmap); 285 ret = sparse_init_one_section(ms, section_nr, memmap);
283 286
284 if (ret <= 0)
285 __kfree_section_memmap(memmap, nr_pages);
286out: 287out:
287 pgdat_resize_unlock(pgdat, &flags); 288 pgdat_resize_unlock(pgdat, &flags);
289 if (ret <= 0)
290 __kfree_section_memmap(memmap, nr_pages);
288 return ret; 291 return ret;
289} 292}