diff options
Diffstat (limited to 'mm/swap.c')
| -rw-r--r-- | mm/swap.c | 17 |
1 files changed, 10 insertions, 7 deletions
| @@ -320,11 +320,6 @@ static inline void activate_page_drain(int cpu) | |||
| 320 | { | 320 | { |
| 321 | } | 321 | } |
| 322 | 322 | ||
| 323 | static bool need_activate_page_drain(int cpu) | ||
| 324 | { | ||
| 325 | return false; | ||
| 326 | } | ||
| 327 | |||
| 328 | void activate_page(struct page *page) | 323 | void activate_page(struct page *page) |
| 329 | { | 324 | { |
| 330 | struct zone *zone = page_zone(page); | 325 | struct zone *zone = page_zone(page); |
| @@ -653,13 +648,15 @@ void lru_add_drain(void) | |||
| 653 | put_cpu(); | 648 | put_cpu(); |
| 654 | } | 649 | } |
| 655 | 650 | ||
| 651 | #ifdef CONFIG_SMP | ||
| 652 | |||
| 653 | static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); | ||
| 654 | |||
| 656 | static void lru_add_drain_per_cpu(struct work_struct *dummy) | 655 | static void lru_add_drain_per_cpu(struct work_struct *dummy) |
| 657 | { | 656 | { |
| 658 | lru_add_drain(); | 657 | lru_add_drain(); |
| 659 | } | 658 | } |
| 660 | 659 | ||
| 661 | static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); | ||
| 662 | |||
| 663 | /* | 660 | /* |
| 664 | * Doesn't need any cpu hotplug locking because we do rely on per-cpu | 661 | * Doesn't need any cpu hotplug locking because we do rely on per-cpu |
| 665 | * kworkers being shut down before our page_alloc_cpu_dead callback is | 662 | * kworkers being shut down before our page_alloc_cpu_dead callback is |
| @@ -702,6 +699,12 @@ void lru_add_drain_all(void) | |||
| 702 | 699 | ||
| 703 | mutex_unlock(&lock); | 700 | mutex_unlock(&lock); |
| 704 | } | 701 | } |
| 702 | #else | ||
| 703 | void lru_add_drain_all(void) | ||
| 704 | { | ||
| 705 | lru_add_drain(); | ||
| 706 | } | ||
| 707 | #endif | ||
| 705 | 708 | ||
| 706 | /** | 709 | /** |
| 707 | * release_pages - batched put_page() | 710 | * release_pages - batched put_page() |
