diff options
author | Peter Zijlstra <peterz@infradead.org> | 2009-07-03 09:44:05 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-07-24 11:45:47 -0400 |
commit | 1578a2b7d1300f4e27cea087e6cdce9b8fbbcb4a (patch) | |
tree | bffa5be9a29b71d5299f975ba6b652f70da827ba | |
parent | 46167aec68f48cbbeff23cae9173bc4d19a7bcda (diff) |
mm: quicklist: Convert to percpu locked
Use per cpu locked for quicklists as well to make the code
preemptible.
[ tglx: folded Ingo's "release before free page fix" ]
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | include/linux/quicklist.h | 27 | ||||
-rw-r--r-- | mm/quicklist.c | 15 |
2 files changed, 24 insertions, 18 deletions
diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h index bd466439c588..1bc3d46b9294 100644 --- a/include/linux/quicklist.h +++ b/include/linux/quicklist.h | |||
@@ -18,7 +18,7 @@ struct quicklist { | |||
18 | int nr_pages; | 18 | int nr_pages; |
19 | }; | 19 | }; |
20 | 20 | ||
21 | DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; | 21 | DECLARE_PER_CPU_LOCKED(struct quicklist, quicklist)[CONFIG_NR_QUICK]; |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * The two key functions quicklist_alloc and quicklist_free are inline so | 24 | * The two key functions quicklist_alloc and quicklist_free are inline so |
@@ -30,19 +30,27 @@ DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; | |||
30 | * The fast patch in quicklist_alloc touched only a per cpu cacheline and | 30 | * The fast patch in quicklist_alloc touched only a per cpu cacheline and |
31 | * the first cacheline of the page itself. There is minmal overhead involved. | 31 | * the first cacheline of the page itself. There is minmal overhead involved. |
32 | */ | 32 | */ |
33 | static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *)) | 33 | static inline void *__quicklist_alloc(struct quicklist *q) |
34 | { | 34 | { |
35 | struct quicklist *q; | 35 | void **p = q->page; |
36 | void **p = NULL; | ||
37 | 36 | ||
38 | q =&get_cpu_var(quicklist)[nr]; | ||
39 | p = q->page; | ||
40 | if (likely(p)) { | 37 | if (likely(p)) { |
41 | q->page = p[0]; | 38 | q->page = p[0]; |
42 | p[0] = NULL; | 39 | p[0] = NULL; |
43 | q->nr_pages--; | 40 | q->nr_pages--; |
44 | } | 41 | } |
45 | put_cpu_var(quicklist); | 42 | return p; |
43 | } | ||
44 | |||
45 | static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *)) | ||
46 | { | ||
47 | struct quicklist *q; | ||
48 | void **p; | ||
49 | int cpu; | ||
50 | |||
51 | q = &get_cpu_var_locked(quicklist, &cpu)[nr]; | ||
52 | p = __quicklist_alloc(q); | ||
53 | put_cpu_var_locked(quicklist, cpu); | ||
46 | if (likely(p)) | 54 | if (likely(p)) |
47 | return p; | 55 | return p; |
48 | 56 | ||
@@ -56,12 +64,13 @@ static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p, | |||
56 | struct page *page) | 64 | struct page *page) |
57 | { | 65 | { |
58 | struct quicklist *q; | 66 | struct quicklist *q; |
67 | int cpu; | ||
59 | 68 | ||
60 | q = &get_cpu_var(quicklist)[nr]; | 69 | q = &get_cpu_var_locked(quicklist, &cpu)[nr]; |
61 | *(void **)p = q->page; | 70 | *(void **)p = q->page; |
62 | q->page = p; | 71 | q->page = p; |
63 | q->nr_pages++; | 72 | q->nr_pages++; |
64 | put_cpu_var(quicklist); | 73 | put_cpu_var_locked(quicklist, cpu); |
65 | } | 74 | } |
66 | 75 | ||
67 | static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp) | 76 | static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp) |
diff --git a/mm/quicklist.c b/mm/quicklist.c index e66d07d1b4ff..03341b014c2b 100644 --- a/mm/quicklist.c +++ b/mm/quicklist.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/quicklist.h> | 20 | #include <linux/quicklist.h> |
21 | 21 | ||
22 | DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; | 22 | DEFINE_PER_CPU_LOCKED(struct quicklist, quicklist)[CONFIG_NR_QUICK]; |
23 | 23 | ||
24 | #define FRACTION_OF_NODE_MEM 16 | 24 | #define FRACTION_OF_NODE_MEM 16 |
25 | 25 | ||
@@ -66,17 +66,14 @@ void quicklist_trim(int nr, void (*dtor)(void *), | |||
66 | { | 66 | { |
67 | long pages_to_free; | 67 | long pages_to_free; |
68 | struct quicklist *q; | 68 | struct quicklist *q; |
69 | int cpu; | ||
69 | 70 | ||
70 | q = &get_cpu_var(quicklist)[nr]; | 71 | q = &get_cpu_var_locked(quicklist, &cpu)[nr]; |
71 | if (q->nr_pages > min_pages) { | 72 | if (q->nr_pages > min_pages) { |
72 | pages_to_free = min_pages_to_free(q, min_pages, max_free); | 73 | pages_to_free = min_pages_to_free(q, min_pages, max_free); |
73 | 74 | ||
74 | while (pages_to_free > 0) { | 75 | while (pages_to_free > 0) { |
75 | /* | 76 | void *p = __quicklist_alloc(q); |
76 | * We pass a gfp_t of 0 to quicklist_alloc here | ||
77 | * because we will never call into the page allocator. | ||
78 | */ | ||
79 | void *p = quicklist_alloc(nr, 0, NULL); | ||
80 | 77 | ||
81 | if (dtor) | 78 | if (dtor) |
82 | dtor(p); | 79 | dtor(p); |
@@ -84,7 +81,7 @@ void quicklist_trim(int nr, void (*dtor)(void *), | |||
84 | pages_to_free--; | 81 | pages_to_free--; |
85 | } | 82 | } |
86 | } | 83 | } |
87 | put_cpu_var(quicklist); | 84 | put_cpu_var_locked(quicklist, cpu); |
88 | } | 85 | } |
89 | 86 | ||
90 | unsigned long quicklist_total_size(void) | 87 | unsigned long quicklist_total_size(void) |
@@ -94,7 +91,7 @@ unsigned long quicklist_total_size(void) | |||
94 | struct quicklist *ql, *q; | 91 | struct quicklist *ql, *q; |
95 | 92 | ||
96 | for_each_online_cpu(cpu) { | 93 | for_each_online_cpu(cpu) { |
97 | ql = per_cpu(quicklist, cpu); | 94 | ql = per_cpu_var_locked(quicklist, cpu); |
98 | for (q = ql; q < ql + CONFIG_NR_QUICK; q++) | 95 | for (q = ql; q < ql + CONFIG_NR_QUICK; q++) |
99 | count += q->nr_pages; | 96 | count += q->nr_pages; |
100 | } | 97 | } |