diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 32 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 6 | ||||
-rw-r--r-- | mm/mempolicy.c | 1 | ||||
-rw-r--r-- | mm/migrate.c | 11 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/shmem.c | 3 | ||||
-rw-r--r-- | mm/slab.c | 5 | ||||
-rw-r--r-- | mm/sparse.c | 9 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
9 files changed, 62 insertions, 9 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 3ef20739e725..fd57442186cb 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -697,6 +697,38 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, | |||
697 | return ret; | 697 | return ret; |
698 | } | 698 | } |
699 | 699 | ||
700 | /** | ||
701 | * find_get_pages_contig - gang contiguous pagecache lookup | ||
702 | * @mapping: The address_space to search | ||
703 | * @index: The starting page index | ||
704 | * @nr_pages: The maximum number of pages | ||
705 | * @pages: Where the resulting pages are placed | ||
706 | * | ||
707 | * find_get_pages_contig() works exactly like find_get_pages(), except | ||
708 | * that the returned number of pages are guaranteed to be contiguous. | ||
709 | * | ||
710 | * find_get_pages_contig() returns the number of pages which were found. | ||
711 | */ | ||
712 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, | ||
713 | unsigned int nr_pages, struct page **pages) | ||
714 | { | ||
715 | unsigned int i; | ||
716 | unsigned int ret; | ||
717 | |||
718 | read_lock_irq(&mapping->tree_lock); | ||
719 | ret = radix_tree_gang_lookup(&mapping->page_tree, | ||
720 | (void **)pages, index, nr_pages); | ||
721 | for (i = 0; i < ret; i++) { | ||
722 | if (pages[i]->mapping == NULL || pages[i]->index != index) | ||
723 | break; | ||
724 | |||
725 | page_cache_get(pages[i]); | ||
726 | index++; | ||
727 | } | ||
728 | read_unlock_irq(&mapping->tree_lock); | ||
729 | return i; | ||
730 | } | ||
731 | |||
700 | /* | 732 | /* |
701 | * Like find_get_pages, except we only return pages which are tagged with | 733 | * Like find_get_pages, except we only return pages which are tagged with |
702 | * `tag'. We update *index to index the next page for the traversal. | 734 | * `tag'. We update *index to index the next page for the traversal. |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 1fe76d963ac2..1ae2b2cc3a54 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -69,12 +69,16 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn, | |||
69 | for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) { | 69 | for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) { |
70 | err = __add_section(zone, phys_start_pfn + i); | 70 | err = __add_section(zone, phys_start_pfn + i); |
71 | 71 | ||
72 | if (err) | 72 | /* We want to keep adding the rest of the |
73 | * sections if the first ones already exist | ||
74 | */ | ||
75 | if (err && (err != -EEXIST)) | ||
73 | break; | 76 | break; |
74 | } | 77 | } |
75 | 78 | ||
76 | return err; | 79 | return err; |
77 | } | 80 | } |
81 | EXPORT_SYMBOL_GPL(__add_pages); | ||
78 | 82 | ||
79 | static void grow_zone_span(struct zone *zone, | 83 | static void grow_zone_span(struct zone *zone, |
80 | unsigned long start_pfn, unsigned long end_pfn) | 84 | unsigned long start_pfn, unsigned long end_pfn) |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index dec8249e972d..8778f58880c4 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1761,7 +1761,6 @@ static void gather_stats(struct page *page, void *private, int pte_dirty) | |||
1761 | md->mapcount_max = count; | 1761 | md->mapcount_max = count; |
1762 | 1762 | ||
1763 | md->node[page_to_nid(page)]++; | 1763 | md->node[page_to_nid(page)]++; |
1764 | cond_resched(); | ||
1765 | } | 1764 | } |
1766 | 1765 | ||
1767 | #ifdef CONFIG_HUGETLB_PAGE | 1766 | #ifdef CONFIG_HUGETLB_PAGE |
diff --git a/mm/migrate.c b/mm/migrate.c index d444229f2599..1c25040693d2 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -439,6 +439,17 @@ redo: | |||
439 | goto unlock_both; | 439 | goto unlock_both; |
440 | } | 440 | } |
441 | 441 | ||
442 | /* Make sure the dirty bit is up to date */ | ||
443 | if (try_to_unmap(page, 1) == SWAP_FAIL) { | ||
444 | rc = -EPERM; | ||
445 | goto unlock_both; | ||
446 | } | ||
447 | |||
448 | if (page_mapcount(page)) { | ||
449 | rc = -EAGAIN; | ||
450 | goto unlock_both; | ||
451 | } | ||
452 | |||
442 | /* | 453 | /* |
443 | * Default handling if a filesystem does not provide | 454 | * Default handling if a filesystem does not provide |
444 | * a migration function. We can only migrate clean | 455 | * a migration function. We can only migrate clean |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 123c60586740..ea77c999047e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1962,7 +1962,7 @@ static inline void free_zone_pagesets(int cpu) | |||
1962 | } | 1962 | } |
1963 | } | 1963 | } |
1964 | 1964 | ||
1965 | static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, | 1965 | static int pageset_cpuup_callback(struct notifier_block *nfb, |
1966 | unsigned long action, | 1966 | unsigned long action, |
1967 | void *hcpu) | 1967 | void *hcpu) |
1968 | { | 1968 | { |
diff --git a/mm/shmem.c b/mm/shmem.c index 37eaf42ed2c6..4c5e68e4e9ae 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -46,6 +46,8 @@ | |||
46 | #include <linux/mempolicy.h> | 46 | #include <linux/mempolicy.h> |
47 | #include <linux/namei.h> | 47 | #include <linux/namei.h> |
48 | #include <linux/ctype.h> | 48 | #include <linux/ctype.h> |
49 | #include <linux/migrate.h> | ||
50 | |||
49 | #include <asm/uaccess.h> | 51 | #include <asm/uaccess.h> |
50 | #include <asm/div64.h> | 52 | #include <asm/div64.h> |
51 | #include <asm/pgtable.h> | 53 | #include <asm/pgtable.h> |
@@ -2173,6 +2175,7 @@ static struct address_space_operations shmem_aops = { | |||
2173 | .prepare_write = shmem_prepare_write, | 2175 | .prepare_write = shmem_prepare_write, |
2174 | .commit_write = simple_commit_write, | 2176 | .commit_write = simple_commit_write, |
2175 | #endif | 2177 | #endif |
2178 | .migratepage = migrate_page, | ||
2176 | }; | 2179 | }; |
2177 | 2180 | ||
2178 | static struct file_operations shmem_file_operations = { | 2181 | static struct file_operations shmem_file_operations = { |
@@ -979,7 +979,8 @@ static void __drain_alien_cache(struct kmem_cache *cachep, | |||
979 | * That way we could avoid the overhead of putting the objects | 979 | * That way we could avoid the overhead of putting the objects |
980 | * into the free lists and getting them back later. | 980 | * into the free lists and getting them back later. |
981 | */ | 981 | */ |
982 | transfer_objects(rl3->shared, ac, ac->limit); | 982 | if (rl3->shared) |
983 | transfer_objects(rl3->shared, ac, ac->limit); | ||
983 | 984 | ||
984 | free_block(cachep, ac->entry, ac->avail, node); | 985 | free_block(cachep, ac->entry, ac->avail, node); |
985 | ac->avail = 0; | 986 | ac->avail = 0; |
@@ -1036,7 +1037,7 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) | |||
1036 | 1037 | ||
1037 | #endif | 1038 | #endif |
1038 | 1039 | ||
1039 | static int __devinit cpuup_callback(struct notifier_block *nfb, | 1040 | static int cpuup_callback(struct notifier_block *nfb, |
1040 | unsigned long action, void *hcpu) | 1041 | unsigned long action, void *hcpu) |
1041 | { | 1042 | { |
1042 | long cpu = (long)hcpu; | 1043 | long cpu = (long)hcpu; |
diff --git a/mm/sparse.c b/mm/sparse.c index 0a51f36ba3a1..d7c32de99ee8 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -32,7 +32,10 @@ static struct mem_section *sparse_index_alloc(int nid) | |||
32 | unsigned long array_size = SECTIONS_PER_ROOT * | 32 | unsigned long array_size = SECTIONS_PER_ROOT * |
33 | sizeof(struct mem_section); | 33 | sizeof(struct mem_section); |
34 | 34 | ||
35 | section = alloc_bootmem_node(NODE_DATA(nid), array_size); | 35 | if (system_state == SYSTEM_RUNNING) |
36 | section = kmalloc_node(array_size, GFP_KERNEL, nid); | ||
37 | else | ||
38 | section = alloc_bootmem_node(NODE_DATA(nid), array_size); | ||
36 | 39 | ||
37 | if (section) | 40 | if (section) |
38 | memset(section, 0, array_size); | 41 | memset(section, 0, array_size); |
@@ -281,9 +284,9 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, | |||
281 | 284 | ||
282 | ret = sparse_init_one_section(ms, section_nr, memmap); | 285 | ret = sparse_init_one_section(ms, section_nr, memmap); |
283 | 286 | ||
284 | if (ret <= 0) | ||
285 | __kfree_section_memmap(memmap, nr_pages); | ||
286 | out: | 287 | out: |
287 | pgdat_resize_unlock(pgdat, &flags); | 288 | pgdat_resize_unlock(pgdat, &flags); |
289 | if (ret <= 0) | ||
290 | __kfree_section_memmap(memmap, nr_pages); | ||
288 | return ret; | 291 | return ret; |
289 | } | 292 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index acdf001d6941..4649a63a8cb6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1328,7 +1328,7 @@ repeat: | |||
1328 | not required for correctness. So if the last cpu in a node goes | 1328 | not required for correctness. So if the last cpu in a node goes |
1329 | away, we get changed to run anywhere: as the first one comes back, | 1329 | away, we get changed to run anywhere: as the first one comes back, |
1330 | restore their cpu bindings. */ | 1330 | restore their cpu bindings. */ |
1331 | static int __devinit cpu_callback(struct notifier_block *nfb, | 1331 | static int cpu_callback(struct notifier_block *nfb, |
1332 | unsigned long action, void *hcpu) | 1332 | unsigned long action, void *hcpu) |
1333 | { | 1333 | { |
1334 | pg_data_t *pgdat; | 1334 | pg_data_t *pgdat; |