diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 32 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 5 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
4 files changed, 37 insertions, 4 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 3ef20739e725..fd57442186cb 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -697,6 +697,38 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, | |||
697 | return ret; | 697 | return ret; |
698 | } | 698 | } |
699 | 699 | ||
700 | /** | ||
701 | * find_get_pages_contig - gang contiguous pagecache lookup | ||
702 | * @mapping: The address_space to search | ||
703 | * @index: The starting page index | ||
704 | * @nr_pages: The maximum number of pages | ||
705 | * @pages: Where the resulting pages are placed | ||
706 | * | ||
707 | * find_get_pages_contig() works exactly like find_get_pages(), except | ||
708 | * that the returned number of pages are guaranteed to be contiguous. | ||
709 | * | ||
710 | * find_get_pages_contig() returns the number of pages which were found. | ||
711 | */ | ||
712 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, | ||
713 | unsigned int nr_pages, struct page **pages) | ||
714 | { | ||
715 | unsigned int i; | ||
716 | unsigned int ret; | ||
717 | |||
718 | read_lock_irq(&mapping->tree_lock); | ||
719 | ret = radix_tree_gang_lookup(&mapping->page_tree, | ||
720 | (void **)pages, index, nr_pages); | ||
721 | for (i = 0; i < ret; i++) { | ||
722 | if (pages[i]->mapping == NULL || pages[i]->index != index) | ||
723 | break; | ||
724 | |||
725 | page_cache_get(pages[i]); | ||
726 | index++; | ||
727 | } | ||
728 | read_unlock_irq(&mapping->tree_lock); | ||
729 | return i; | ||
730 | } | ||
731 | |||
700 | /* | 732 | /* |
701 | * Like find_get_pages, except we only return pages which are tagged with | 733 | * Like find_get_pages, except we only return pages which are tagged with |
702 | * `tag'. We update *index to index the next page for the traversal. | 734 | * `tag'. We update *index to index the next page for the traversal. |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 123c60586740..ea77c999047e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1962,7 +1962,7 @@ static inline void free_zone_pagesets(int cpu) | |||
1962 | } | 1962 | } |
1963 | } | 1963 | } |
1964 | 1964 | ||
1965 | static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, | 1965 | static int pageset_cpuup_callback(struct notifier_block *nfb, |
1966 | unsigned long action, | 1966 | unsigned long action, |
1967 | void *hcpu) | 1967 | void *hcpu) |
1968 | { | 1968 | { |
@@ -979,7 +979,8 @@ static void __drain_alien_cache(struct kmem_cache *cachep, | |||
979 | * That way we could avoid the overhead of putting the objects | 979 | * That way we could avoid the overhead of putting the objects |
980 | * into the free lists and getting them back later. | 980 | * into the free lists and getting them back later. |
981 | */ | 981 | */ |
982 | transfer_objects(rl3->shared, ac, ac->limit); | 982 | if (rl3->shared) |
983 | transfer_objects(rl3->shared, ac, ac->limit); | ||
983 | 984 | ||
984 | free_block(cachep, ac->entry, ac->avail, node); | 985 | free_block(cachep, ac->entry, ac->avail, node); |
985 | ac->avail = 0; | 986 | ac->avail = 0; |
@@ -1036,7 +1037,7 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) | |||
1036 | 1037 | ||
1037 | #endif | 1038 | #endif |
1038 | 1039 | ||
1039 | static int __devinit cpuup_callback(struct notifier_block *nfb, | 1040 | static int cpuup_callback(struct notifier_block *nfb, |
1040 | unsigned long action, void *hcpu) | 1041 | unsigned long action, void *hcpu) |
1041 | { | 1042 | { |
1042 | long cpu = (long)hcpu; | 1043 | long cpu = (long)hcpu; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index acdf001d6941..4649a63a8cb6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1328,7 +1328,7 @@ repeat: | |||
1328 | not required for correctness. So if the last cpu in a node goes | 1328 | not required for correctness. So if the last cpu in a node goes |
1329 | away, we get changed to run anywhere: as the first one comes back, | 1329 | away, we get changed to run anywhere: as the first one comes back, |
1330 | restore their cpu bindings. */ | 1330 | restore their cpu bindings. */ |
1331 | static int __devinit cpu_callback(struct notifier_block *nfb, | 1331 | static int cpu_callback(struct notifier_block *nfb, |
1332 | unsigned long action, void *hcpu) | 1332 | unsigned long action, void *hcpu) |
1333 | { | 1333 | { |
1334 | pg_data_t *pgdat; | 1334 | pg_data_t *pgdat; |