diff options
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 58 |
1 files changed, 39 insertions, 19 deletions
@@ -34,8 +34,6 @@ | |||
34 | /* How many pages do we try to swap or page in/out together? */ | 34 | /* How many pages do we try to swap or page in/out together? */ |
35 | int page_cluster; | 35 | int page_cluster; |
36 | 36 | ||
37 | #ifdef CONFIG_HUGETLB_PAGE | ||
38 | |||
39 | void put_page(struct page *page) | 37 | void put_page(struct page *page) |
40 | { | 38 | { |
41 | if (unlikely(PageCompound(page))) { | 39 | if (unlikely(PageCompound(page))) { |
@@ -52,7 +50,6 @@ void put_page(struct page *page) | |||
52 | __page_cache_release(page); | 50 | __page_cache_release(page); |
53 | } | 51 | } |
54 | EXPORT_SYMBOL(put_page); | 52 | EXPORT_SYMBOL(put_page); |
55 | #endif | ||
56 | 53 | ||
57 | /* | 54 | /* |
58 | * Writeback is about to end against a page which has been marked for immediate | 55 | * Writeback is about to end against a page which has been marked for immediate |
@@ -159,18 +156,50 @@ void fastcall lru_cache_add_active(struct page *page) | |||
159 | put_cpu_var(lru_add_active_pvecs); | 156 | put_cpu_var(lru_add_active_pvecs); |
160 | } | 157 | } |
161 | 158 | ||
162 | void lru_add_drain(void) | 159 | static void __lru_add_drain(int cpu) |
163 | { | 160 | { |
164 | struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); | 161 | struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu); |
165 | 162 | ||
163 | /* CPU is dead, so no locking needed. */ | ||
166 | if (pagevec_count(pvec)) | 164 | if (pagevec_count(pvec)) |
167 | __pagevec_lru_add(pvec); | 165 | __pagevec_lru_add(pvec); |
168 | pvec = &__get_cpu_var(lru_add_active_pvecs); | 166 | pvec = &per_cpu(lru_add_active_pvecs, cpu); |
169 | if (pagevec_count(pvec)) | 167 | if (pagevec_count(pvec)) |
170 | __pagevec_lru_add_active(pvec); | 168 | __pagevec_lru_add_active(pvec); |
171 | put_cpu_var(lru_add_pvecs); | ||
172 | } | 169 | } |
173 | 170 | ||
171 | void lru_add_drain(void) | ||
172 | { | ||
173 | __lru_add_drain(get_cpu()); | ||
174 | put_cpu(); | ||
175 | } | ||
176 | |||
177 | #ifdef CONFIG_NUMA | ||
178 | static void lru_add_drain_per_cpu(void *dummy) | ||
179 | { | ||
180 | lru_add_drain(); | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * Returns 0 for success | ||
185 | */ | ||
186 | int lru_add_drain_all(void) | ||
187 | { | ||
188 | return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); | ||
189 | } | ||
190 | |||
191 | #else | ||
192 | |||
193 | /* | ||
194 | * Returns 0 for success | ||
195 | */ | ||
196 | int lru_add_drain_all(void) | ||
197 | { | ||
198 | lru_add_drain(); | ||
199 | return 0; | ||
200 | } | ||
201 | #endif | ||
202 | |||
174 | /* | 203 | /* |
175 | * This path almost never happens for VM activity - pages are normally | 204 | * This path almost never happens for VM activity - pages are normally |
176 | * freed via pagevecs. But it gets used by networking. | 205 | * freed via pagevecs. But it gets used by networking. |
@@ -381,6 +410,8 @@ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, | |||
381 | return pagevec_count(pvec); | 410 | return pagevec_count(pvec); |
382 | } | 411 | } |
383 | 412 | ||
413 | EXPORT_SYMBOL(pagevec_lookup); | ||
414 | |||
384 | unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, | 415 | unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, |
385 | pgoff_t *index, int tag, unsigned nr_pages) | 416 | pgoff_t *index, int tag, unsigned nr_pages) |
386 | { | 417 | { |
@@ -415,17 +446,6 @@ void vm_acct_memory(long pages) | |||
415 | } | 446 | } |
416 | 447 | ||
417 | #ifdef CONFIG_HOTPLUG_CPU | 448 | #ifdef CONFIG_HOTPLUG_CPU |
418 | static void lru_drain_cache(unsigned int cpu) | ||
419 | { | ||
420 | struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu); | ||
421 | |||
422 | /* CPU is dead, so no locking needed. */ | ||
423 | if (pagevec_count(pvec)) | ||
424 | __pagevec_lru_add(pvec); | ||
425 | pvec = &per_cpu(lru_add_active_pvecs, cpu); | ||
426 | if (pagevec_count(pvec)) | ||
427 | __pagevec_lru_add_active(pvec); | ||
428 | } | ||
429 | 449 | ||
430 | /* Drop the CPU's cached committed space back into the central pool. */ | 450 | /* Drop the CPU's cached committed space back into the central pool. */ |
431 | static int cpu_swap_callback(struct notifier_block *nfb, | 451 | static int cpu_swap_callback(struct notifier_block *nfb, |
@@ -438,7 +458,7 @@ static int cpu_swap_callback(struct notifier_block *nfb, | |||
438 | if (action == CPU_DEAD) { | 458 | if (action == CPU_DEAD) { |
439 | atomic_add(*committed, &vm_committed_space); | 459 | atomic_add(*committed, &vm_committed_space); |
440 | *committed = 0; | 460 | *committed = 0; |
441 | lru_drain_cache((long)hcpu); | 461 | __lru_add_drain((long)hcpu); |
442 | } | 462 | } |
443 | return NOTIFY_OK; | 463 | return NOTIFY_OK; |
444 | } | 464 | } |