diff options
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 107 |
1 files changed, 94 insertions, 13 deletions
@@ -30,15 +30,92 @@ | |||
30 | #include <linux/notifier.h> | 30 | #include <linux/notifier.h> |
31 | #include <linux/backing-dev.h> | 31 | #include <linux/backing-dev.h> |
32 | #include <linux/memcontrol.h> | 32 | #include <linux/memcontrol.h> |
33 | #include <linux/interrupt.h> | ||
33 | 34 | ||
34 | #include "internal.h" | 35 | #include "internal.h" |
35 | 36 | ||
36 | /* How many pages do we try to swap or page in/out together? */ | 37 | /* How many pages do we try to swap or page in/out together? */ |
37 | int page_cluster; | 38 | int page_cluster; |
38 | 39 | ||
40 | #ifdef CONFIG_PREEMPT_RT | ||
41 | /* | ||
42 | * On PREEMPT_RT we don't want to disable preemption for cpu variables. | ||
43 | * We grab a cpu and then use that cpu to lock the variables accordingly. | ||
44 | * | ||
45 | * (On !PREEMPT_RT this turns into normal preempt-off sections, as before.) | ||
46 | */ | ||
47 | static DEFINE_PER_CPU_LOCKED(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); | ||
48 | static DEFINE_PER_CPU_LOCKED(struct pagevec, lru_rotate_pvecs); | ||
49 | |||
50 | #define swap_get_cpu_var_irq_save(var, flags, cpu) \ | ||
51 | ({ \ | ||
52 | (void)flags; \ | ||
53 | &get_cpu_var_locked(var, &cpu); \ | ||
54 | }) | ||
55 | |||
56 | #define swap_put_cpu_var_irq_restore(var, flags, cpu) \ | ||
57 | put_cpu_var_locked(var, cpu) | ||
58 | |||
59 | #define swap_get_cpu_var(var, cpu) \ | ||
60 | &get_cpu_var_locked(var, &cpu) | ||
61 | |||
62 | #define swap_put_cpu_var(var, cpu) \ | ||
63 | put_cpu_var_locked(var, cpu) | ||
64 | |||
65 | #define swap_per_cpu_lock(var, cpu) \ | ||
66 | ({ \ | ||
67 | spin_lock(&__get_cpu_lock(var, cpu)); \ | ||
68 | &__get_cpu_var_locked(var, cpu); \ | ||
69 | }) | ||
70 | |||
71 | #define swap_per_cpu_unlock(var, cpu) \ | ||
72 | spin_unlock(&__get_cpu_lock(var, cpu)); | ||
73 | |||
74 | #define swap_get_cpu() raw_smp_processor_id() | ||
75 | |||
76 | #define swap_put_cpu() do { } while (0) | ||
77 | |||
78 | #define swap_irq_save(flags) do { (void)flags; } while (0) | ||
79 | |||
80 | #define swap_irq_restore(flags) do { (void)flags; } while (0) | ||
81 | |||
82 | #else | ||
83 | |||
39 | static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); | 84 | static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); |
40 | static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); | 85 | static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); |
41 | 86 | ||
87 | #define swap_get_cpu_var_irq_save(var, flags, cpu) \ | ||
88 | ({ \ | ||
89 | (void)cpu; \ | ||
90 | local_irq_save(flags); \ | ||
91 | &__get_cpu_var(var); \ | ||
92 | }) | ||
93 | |||
94 | #define swap_put_cpu_var_irq_restore(var, flags, cpu) \ | ||
95 | local_irq_restore(flags) | ||
96 | |||
97 | #define swap_get_cpu_var(var, cpu) \ | ||
98 | ({ \ | ||
99 | (void)cpu; \ | ||
100 | &get_cpu_var(var); \ | ||
101 | }) | ||
102 | |||
103 | #define swap_put_cpu_var(var, cpu) put_cpu_var(var) | ||
104 | |||
105 | #define swap_per_cpu_lock(var, cpu) &per_cpu(var, cpu) | ||
106 | |||
107 | #define swap_per_cpu_unlock(var, cpu) do { } while (0) | ||
108 | |||
109 | #define swap_get_cpu() get_cpu() | ||
110 | |||
111 | #define swap_put_cpu() put_cpu() | ||
112 | |||
113 | #define swap_irq_save(flags) local_irq_save(flags) | ||
114 | |||
115 | #define swap_irq_restore(flags) local_irq_restore(flags) | ||
116 | |||
117 | #endif | ||
118 | |||
42 | /* | 119 | /* |
43 | * This path almost never happens for VM activity - pages are normally | 120 | * This path almost never happens for VM activity - pages are normally |
44 | * freed via pagevecs. But it gets used by networking. | 121 | * freed via pagevecs. But it gets used by networking. |
@@ -141,13 +218,13 @@ void rotate_reclaimable_page(struct page *page) | |||
141 | !PageUnevictable(page) && PageLRU(page)) { | 218 | !PageUnevictable(page) && PageLRU(page)) { |
142 | struct pagevec *pvec; | 219 | struct pagevec *pvec; |
143 | unsigned long flags; | 220 | unsigned long flags; |
221 | int cpu; | ||
144 | 222 | ||
145 | page_cache_get(page); | 223 | page_cache_get(page); |
146 | local_irq_save(flags); | 224 | pvec = swap_get_cpu_var_irq_save(lru_rotate_pvecs, flags, cpu); |
147 | pvec = &__get_cpu_var(lru_rotate_pvecs); | ||
148 | if (!pagevec_add(pvec, page)) | 225 | if (!pagevec_add(pvec, page)) |
149 | pagevec_move_tail(pvec); | 226 | pagevec_move_tail(pvec); |
150 | local_irq_restore(flags); | 227 | swap_put_cpu_var_irq_restore(lru_rotate_pvecs, flags, cpu); |
151 | } | 228 | } |
152 | } | 229 | } |
153 | 230 | ||
@@ -216,12 +293,14 @@ EXPORT_SYMBOL(mark_page_accessed); | |||
216 | 293 | ||
217 | void __lru_cache_add(struct page *page, enum lru_list lru) | 294 | void __lru_cache_add(struct page *page, enum lru_list lru) |
218 | { | 295 | { |
219 | struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; | 296 | struct pagevec *pvec; |
297 | int cpu; | ||
220 | 298 | ||
299 | pvec = swap_get_cpu_var(lru_add_pvecs, cpu)[lru]; | ||
221 | page_cache_get(page); | 300 | page_cache_get(page); |
222 | if (!pagevec_add(pvec, page)) | 301 | if (!pagevec_add(pvec, page)) |
223 | ____pagevec_lru_add(pvec, lru); | 302 | ____pagevec_lru_add(pvec, lru); |
224 | put_cpu_var(lru_add_pvecs); | 303 | swap_put_cpu_var(lru_add_pvecs, cpu); |
225 | } | 304 | } |
226 | 305 | ||
227 | /** | 306 | /** |
@@ -271,31 +350,33 @@ void add_page_to_unevictable_list(struct page *page) | |||
271 | */ | 350 | */ |
272 | static void drain_cpu_pagevecs(int cpu) | 351 | static void drain_cpu_pagevecs(int cpu) |
273 | { | 352 | { |
274 | struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu); | 353 | struct pagevec *pvecs, *pvec; |
275 | struct pagevec *pvec; | ||
276 | int lru; | 354 | int lru; |
277 | 355 | ||
356 | pvecs = swap_per_cpu_lock(lru_add_pvecs, cpu)[0]; | ||
278 | for_each_lru(lru) { | 357 | for_each_lru(lru) { |
279 | pvec = &pvecs[lru - LRU_BASE]; | 358 | pvec = &pvecs[lru - LRU_BASE]; |
280 | if (pagevec_count(pvec)) | 359 | if (pagevec_count(pvec)) |
281 | ____pagevec_lru_add(pvec, lru); | 360 | ____pagevec_lru_add(pvec, lru); |
282 | } | 361 | } |
362 | swap_per_cpu_unlock(lru_add_pvecs, cpu); | ||
283 | 363 | ||
284 | pvec = &per_cpu(lru_rotate_pvecs, cpu); | 364 | pvec = swap_per_cpu_lock(lru_rotate_pvecs, cpu); |
285 | if (pagevec_count(pvec)) { | 365 | if (pagevec_count(pvec)) { |
286 | unsigned long flags; | 366 | unsigned long flags; |
287 | 367 | ||
288 | /* No harm done if a racing interrupt already did this */ | 368 | /* No harm done if a racing interrupt already did this */ |
289 | local_irq_save(flags); | 369 | swap_irq_save(flags); |
290 | pagevec_move_tail(pvec); | 370 | pagevec_move_tail(pvec); |
291 | local_irq_restore(flags); | 371 | swap_irq_restore(flags); |
292 | } | 372 | } |
373 | swap_per_cpu_unlock(lru_rotate_pvecs, cpu); | ||
293 | } | 374 | } |
294 | 375 | ||
295 | void lru_add_drain(void) | 376 | void lru_add_drain(void) |
296 | { | 377 | { |
297 | drain_cpu_pagevecs(get_cpu()); | 378 | drain_cpu_pagevecs(swap_get_cpu()); |
298 | put_cpu(); | 379 | swap_put_cpu(); |
299 | } | 380 | } |
300 | 381 | ||
301 | static void lru_add_drain_per_cpu(struct work_struct *dummy) | 382 | static void lru_add_drain_per_cpu(struct work_struct *dummy) |
@@ -369,7 +450,7 @@ void release_pages(struct page **pages, int nr, int cold) | |||
369 | } | 450 | } |
370 | __pagevec_free(&pages_to_free); | 451 | __pagevec_free(&pages_to_free); |
371 | pagevec_reinit(&pages_to_free); | 452 | pagevec_reinit(&pages_to_free); |
372 | } | 453 | } |
373 | } | 454 | } |
374 | if (zone) | 455 | if (zone) |
375 | spin_unlock_irqrestore(&zone->lru_lock, flags); | 456 | spin_unlock_irqrestore(&zone->lru_lock, flags); |