diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/blk-mq.h | 1 | ||||
-rw-r--r-- | include/linux/flex_proportions.h | 5 | ||||
-rw-r--r-- | include/linux/percpu-refcount.h | 122 | ||||
-rw-r--r-- | include/linux/percpu.h | 13 | ||||
-rw-r--r-- | include/linux/percpu_counter.h | 10 | ||||
-rw-r--r-- | include/linux/proportions.h | 5 |
6 files changed, 102 insertions, 54 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index a1e31f274fcd..c13a0c09faea 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -140,6 +140,7 @@ enum { | |||
140 | }; | 140 | }; |
141 | 141 | ||
142 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); | 142 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); |
143 | void blk_mq_finish_init(struct request_queue *q); | ||
143 | int blk_mq_register_disk(struct gendisk *); | 144 | int blk_mq_register_disk(struct gendisk *); |
144 | void blk_mq_unregister_disk(struct gendisk *); | 145 | void blk_mq_unregister_disk(struct gendisk *); |
145 | 146 | ||
diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h index 4ebc49fae391..0d348e011a6e 100644 --- a/include/linux/flex_proportions.h +++ b/include/linux/flex_proportions.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/percpu_counter.h> | 10 | #include <linux/percpu_counter.h> |
11 | #include <linux/spinlock.h> | 11 | #include <linux/spinlock.h> |
12 | #include <linux/seqlock.h> | 12 | #include <linux/seqlock.h> |
13 | #include <linux/gfp.h> | ||
13 | 14 | ||
14 | /* | 15 | /* |
15 | * When maximum proportion of some event type is specified, this is the | 16 | * When maximum proportion of some event type is specified, this is the |
@@ -32,7 +33,7 @@ struct fprop_global { | |||
32 | seqcount_t sequence; | 33 | seqcount_t sequence; |
33 | }; | 34 | }; |
34 | 35 | ||
35 | int fprop_global_init(struct fprop_global *p); | 36 | int fprop_global_init(struct fprop_global *p, gfp_t gfp); |
36 | void fprop_global_destroy(struct fprop_global *p); | 37 | void fprop_global_destroy(struct fprop_global *p); |
37 | bool fprop_new_period(struct fprop_global *p, int periods); | 38 | bool fprop_new_period(struct fprop_global *p, int periods); |
38 | 39 | ||
@@ -79,7 +80,7 @@ struct fprop_local_percpu { | |||
79 | raw_spinlock_t lock; /* Protect period and numerator */ | 80 | raw_spinlock_t lock; /* Protect period and numerator */ |
80 | }; | 81 | }; |
81 | 82 | ||
82 | int fprop_local_init_percpu(struct fprop_local_percpu *pl); | 83 | int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp); |
83 | void fprop_local_destroy_percpu(struct fprop_local_percpu *pl); | 84 | void fprop_local_destroy_percpu(struct fprop_local_percpu *pl); |
84 | void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl); | 85 | void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl); |
85 | void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl, | 86 | void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl, |
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 68a64f11ce02..d5c89e0dd0e6 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
@@ -13,7 +13,7 @@ | |||
13 | * | 13 | * |
14 | * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less | 14 | * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less |
15 | * than an atomic_t - this is because of the way shutdown works, see | 15 | * than an atomic_t - this is because of the way shutdown works, see |
16 | * percpu_ref_kill()/PCPU_COUNT_BIAS. | 16 | * percpu_ref_kill()/PERCPU_COUNT_BIAS. |
17 | * | 17 | * |
18 | * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the | 18 | * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the |
19 | * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() | 19 | * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() |
@@ -49,29 +49,60 @@ | |||
49 | #include <linux/kernel.h> | 49 | #include <linux/kernel.h> |
50 | #include <linux/percpu.h> | 50 | #include <linux/percpu.h> |
51 | #include <linux/rcupdate.h> | 51 | #include <linux/rcupdate.h> |
52 | #include <linux/gfp.h> | ||
52 | 53 | ||
53 | struct percpu_ref; | 54 | struct percpu_ref; |
54 | typedef void (percpu_ref_func_t)(struct percpu_ref *); | 55 | typedef void (percpu_ref_func_t)(struct percpu_ref *); |
55 | 56 | ||
57 | /* flags set in the lower bits of percpu_ref->percpu_count_ptr */ | ||
58 | enum { | ||
59 | __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */ | ||
60 | __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */ | ||
61 | __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD, | ||
62 | |||
63 | __PERCPU_REF_FLAG_BITS = 2, | ||
64 | }; | ||
65 | |||
66 | /* @flags for percpu_ref_init() */ | ||
67 | enum { | ||
68 | /* | ||
69 | * Start w/ ref == 1 in atomic mode. Can be switched to percpu | ||
70 | * operation using percpu_ref_switch_to_percpu(). If initialized | ||
71 | * with this flag, the ref will stay in atomic mode until | ||
72 | * percpu_ref_switch_to_percpu() is invoked on it. | ||
73 | */ | ||
74 | PERCPU_REF_INIT_ATOMIC = 1 << 0, | ||
75 | |||
76 | /* | ||
77 | * Start dead w/ ref == 0 in atomic mode. Must be revived with | ||
78 | * percpu_ref_reinit() before used. Implies INIT_ATOMIC. | ||
79 | */ | ||
80 | PERCPU_REF_INIT_DEAD = 1 << 1, | ||
81 | }; | ||
82 | |||
56 | struct percpu_ref { | 83 | struct percpu_ref { |
57 | atomic_t count; | 84 | atomic_long_t count; |
58 | /* | 85 | /* |
59 | * The low bit of the pointer indicates whether the ref is in percpu | 86 | * The low bit of the pointer indicates whether the ref is in percpu |
60 | * mode; if set, then get/put will manipulate the atomic_t. | 87 | * mode; if set, then get/put will manipulate the atomic_t. |
61 | */ | 88 | */ |
62 | unsigned long pcpu_count_ptr; | 89 | unsigned long percpu_count_ptr; |
63 | percpu_ref_func_t *release; | 90 | percpu_ref_func_t *release; |
64 | percpu_ref_func_t *confirm_kill; | 91 | percpu_ref_func_t *confirm_switch; |
92 | bool force_atomic:1; | ||
65 | struct rcu_head rcu; | 93 | struct rcu_head rcu; |
66 | }; | 94 | }; |
67 | 95 | ||
68 | int __must_check percpu_ref_init(struct percpu_ref *ref, | 96 | int __must_check percpu_ref_init(struct percpu_ref *ref, |
69 | percpu_ref_func_t *release); | 97 | percpu_ref_func_t *release, unsigned int flags, |
70 | void percpu_ref_reinit(struct percpu_ref *ref); | 98 | gfp_t gfp); |
71 | void percpu_ref_exit(struct percpu_ref *ref); | 99 | void percpu_ref_exit(struct percpu_ref *ref); |
100 | void percpu_ref_switch_to_atomic(struct percpu_ref *ref, | ||
101 | percpu_ref_func_t *confirm_switch); | ||
102 | void percpu_ref_switch_to_percpu(struct percpu_ref *ref); | ||
72 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | 103 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, |
73 | percpu_ref_func_t *confirm_kill); | 104 | percpu_ref_func_t *confirm_kill); |
74 | void __percpu_ref_kill_expedited(struct percpu_ref *ref); | 105 | void percpu_ref_reinit(struct percpu_ref *ref); |
75 | 106 | ||
76 | /** | 107 | /** |
77 | * percpu_ref_kill - drop the initial ref | 108 | * percpu_ref_kill - drop the initial ref |
@@ -88,26 +119,24 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) | |||
88 | return percpu_ref_kill_and_confirm(ref, NULL); | 119 | return percpu_ref_kill_and_confirm(ref, NULL); |
89 | } | 120 | } |
90 | 121 | ||
91 | #define PCPU_REF_DEAD 1 | ||
92 | |||
93 | /* | 122 | /* |
94 | * Internal helper. Don't use outside percpu-refcount proper. The | 123 | * Internal helper. Don't use outside percpu-refcount proper. The |
95 | * function doesn't return the pointer and let the caller test it for NULL | 124 | * function doesn't return the pointer and let the caller test it for NULL |
96 | * because doing so forces the compiler to generate two conditional | 125 | * because doing so forces the compiler to generate two conditional |
97 | * branches as it can't assume that @ref->pcpu_count is not NULL. | 126 | * branches as it can't assume that @ref->percpu_count is not NULL. |
98 | */ | 127 | */ |
99 | static inline bool __pcpu_ref_alive(struct percpu_ref *ref, | 128 | static inline bool __ref_is_percpu(struct percpu_ref *ref, |
100 | unsigned __percpu **pcpu_countp) | 129 | unsigned long __percpu **percpu_countp) |
101 | { | 130 | { |
102 | unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr); | 131 | unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr); |
103 | 132 | ||
104 | /* paired with smp_store_release() in percpu_ref_reinit() */ | 133 | /* paired with smp_store_release() in percpu_ref_reinit() */ |
105 | smp_read_barrier_depends(); | 134 | smp_read_barrier_depends(); |
106 | 135 | ||
107 | if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) | 136 | if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC)) |
108 | return false; | 137 | return false; |
109 | 138 | ||
110 | *pcpu_countp = (unsigned __percpu *)pcpu_ptr; | 139 | *percpu_countp = (unsigned long __percpu *)percpu_ptr; |
111 | return true; | 140 | return true; |
112 | } | 141 | } |
113 | 142 | ||
@@ -115,18 +144,20 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref, | |||
115 | * percpu_ref_get - increment a percpu refcount | 144 | * percpu_ref_get - increment a percpu refcount |
116 | * @ref: percpu_ref to get | 145 | * @ref: percpu_ref to get |
117 | * | 146 | * |
118 | * Analagous to atomic_inc(). | 147 | * Analagous to atomic_long_inc(). |
119 | */ | 148 | * |
149 | * This function is safe to call as long as @ref is between init and exit. | ||
150 | */ | ||
120 | static inline void percpu_ref_get(struct percpu_ref *ref) | 151 | static inline void percpu_ref_get(struct percpu_ref *ref) |
121 | { | 152 | { |
122 | unsigned __percpu *pcpu_count; | 153 | unsigned long __percpu *percpu_count; |
123 | 154 | ||
124 | rcu_read_lock_sched(); | 155 | rcu_read_lock_sched(); |
125 | 156 | ||
126 | if (__pcpu_ref_alive(ref, &pcpu_count)) | 157 | if (__ref_is_percpu(ref, &percpu_count)) |
127 | this_cpu_inc(*pcpu_count); | 158 | this_cpu_inc(*percpu_count); |
128 | else | 159 | else |
129 | atomic_inc(&ref->count); | 160 | atomic_long_inc(&ref->count); |
130 | 161 | ||
131 | rcu_read_unlock_sched(); | 162 | rcu_read_unlock_sched(); |
132 | } | 163 | } |
@@ -138,20 +169,20 @@ static inline void percpu_ref_get(struct percpu_ref *ref) | |||
138 | * Increment a percpu refcount unless its count already reached zero. | 169 | * Increment a percpu refcount unless its count already reached zero. |
139 | * Returns %true on success; %false on failure. | 170 | * Returns %true on success; %false on failure. |
140 | * | 171 | * |
141 | * The caller is responsible for ensuring that @ref stays accessible. | 172 | * This function is safe to call as long as @ref is between init and exit. |
142 | */ | 173 | */ |
143 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) | 174 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) |
144 | { | 175 | { |
145 | unsigned __percpu *pcpu_count; | 176 | unsigned long __percpu *percpu_count; |
146 | int ret = false; | 177 | int ret; |
147 | 178 | ||
148 | rcu_read_lock_sched(); | 179 | rcu_read_lock_sched(); |
149 | 180 | ||
150 | if (__pcpu_ref_alive(ref, &pcpu_count)) { | 181 | if (__ref_is_percpu(ref, &percpu_count)) { |
151 | this_cpu_inc(*pcpu_count); | 182 | this_cpu_inc(*percpu_count); |
152 | ret = true; | 183 | ret = true; |
153 | } else { | 184 | } else { |
154 | ret = atomic_inc_not_zero(&ref->count); | 185 | ret = atomic_long_inc_not_zero(&ref->count); |
155 | } | 186 | } |
156 | 187 | ||
157 | rcu_read_unlock_sched(); | 188 | rcu_read_unlock_sched(); |
@@ -166,23 +197,26 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) | |||
166 | * Increment a percpu refcount unless it has already been killed. Returns | 197 | * Increment a percpu refcount unless it has already been killed. Returns |
167 | * %true on success; %false on failure. | 198 | * %true on success; %false on failure. |
168 | * | 199 | * |
169 | * Completion of percpu_ref_kill() in itself doesn't guarantee that tryget | 200 | * Completion of percpu_ref_kill() in itself doesn't guarantee that this |
170 | * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be | 201 | * function will fail. For such guarantee, percpu_ref_kill_and_confirm() |
171 | * used. After the confirm_kill callback is invoked, it's guaranteed that | 202 | * should be used. After the confirm_kill callback is invoked, it's |
172 | * no new reference will be given out by percpu_ref_tryget(). | 203 | * guaranteed that no new reference will be given out by |
204 | * percpu_ref_tryget_live(). | ||
173 | * | 205 | * |
174 | * The caller is responsible for ensuring that @ref stays accessible. | 206 | * This function is safe to call as long as @ref is between init and exit. |
175 | */ | 207 | */ |
176 | static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) | 208 | static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) |
177 | { | 209 | { |
178 | unsigned __percpu *pcpu_count; | 210 | unsigned long __percpu *percpu_count; |
179 | int ret = false; | 211 | int ret = false; |
180 | 212 | ||
181 | rcu_read_lock_sched(); | 213 | rcu_read_lock_sched(); |
182 | 214 | ||
183 | if (__pcpu_ref_alive(ref, &pcpu_count)) { | 215 | if (__ref_is_percpu(ref, &percpu_count)) { |
184 | this_cpu_inc(*pcpu_count); | 216 | this_cpu_inc(*percpu_count); |
185 | ret = true; | 217 | ret = true; |
218 | } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) { | ||
219 | ret = atomic_long_inc_not_zero(&ref->count); | ||
186 | } | 220 | } |
187 | 221 | ||
188 | rcu_read_unlock_sched(); | 222 | rcu_read_unlock_sched(); |
@@ -196,16 +230,18 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) | |||
196 | * | 230 | * |
197 | * Decrement the refcount, and if 0, call the release function (which was passed | 231 | * Decrement the refcount, and if 0, call the release function (which was passed |
198 | * to percpu_ref_init()) | 232 | * to percpu_ref_init()) |
233 | * | ||
234 | * This function is safe to call as long as @ref is between init and exit. | ||
199 | */ | 235 | */ |
200 | static inline void percpu_ref_put(struct percpu_ref *ref) | 236 | static inline void percpu_ref_put(struct percpu_ref *ref) |
201 | { | 237 | { |
202 | unsigned __percpu *pcpu_count; | 238 | unsigned long __percpu *percpu_count; |
203 | 239 | ||
204 | rcu_read_lock_sched(); | 240 | rcu_read_lock_sched(); |
205 | 241 | ||
206 | if (__pcpu_ref_alive(ref, &pcpu_count)) | 242 | if (__ref_is_percpu(ref, &percpu_count)) |
207 | this_cpu_dec(*pcpu_count); | 243 | this_cpu_dec(*percpu_count); |
208 | else if (unlikely(atomic_dec_and_test(&ref->count))) | 244 | else if (unlikely(atomic_long_dec_and_test(&ref->count))) |
209 | ref->release(ref); | 245 | ref->release(ref); |
210 | 246 | ||
211 | rcu_read_unlock_sched(); | 247 | rcu_read_unlock_sched(); |
@@ -216,14 +252,16 @@ static inline void percpu_ref_put(struct percpu_ref *ref) | |||
216 | * @ref: percpu_ref to test | 252 | * @ref: percpu_ref to test |
217 | * | 253 | * |
218 | * Returns %true if @ref reached zero. | 254 | * Returns %true if @ref reached zero. |
255 | * | ||
256 | * This function is safe to call as long as @ref is between init and exit. | ||
219 | */ | 257 | */ |
220 | static inline bool percpu_ref_is_zero(struct percpu_ref *ref) | 258 | static inline bool percpu_ref_is_zero(struct percpu_ref *ref) |
221 | { | 259 | { |
222 | unsigned __percpu *pcpu_count; | 260 | unsigned long __percpu *percpu_count; |
223 | 261 | ||
224 | if (__pcpu_ref_alive(ref, &pcpu_count)) | 262 | if (__ref_is_percpu(ref, &percpu_count)) |
225 | return false; | 263 | return false; |
226 | return !atomic_read(&ref->count); | 264 | return !atomic_long_read(&ref->count); |
227 | } | 265 | } |
228 | 266 | ||
229 | #endif | 267 | #endif |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 6f61b61b7996..a3aa63e47637 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -48,9 +48,9 @@ | |||
48 | * intelligent way to determine this would be nice. | 48 | * intelligent way to determine this would be nice. |
49 | */ | 49 | */ |
50 | #if BITS_PER_LONG > 32 | 50 | #if BITS_PER_LONG > 32 |
51 | #define PERCPU_DYNAMIC_RESERVE (20 << 10) | 51 | #define PERCPU_DYNAMIC_RESERVE (28 << 10) |
52 | #else | 52 | #else |
53 | #define PERCPU_DYNAMIC_RESERVE (12 << 10) | 53 | #define PERCPU_DYNAMIC_RESERVE (20 << 10) |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | extern void *pcpu_base_addr; | 56 | extern void *pcpu_base_addr; |
@@ -122,11 +122,16 @@ extern void __init setup_per_cpu_areas(void); | |||
122 | #endif | 122 | #endif |
123 | extern void __init percpu_init_late(void); | 123 | extern void __init percpu_init_late(void); |
124 | 124 | ||
125 | extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp); | ||
125 | extern void __percpu *__alloc_percpu(size_t size, size_t align); | 126 | extern void __percpu *__alloc_percpu(size_t size, size_t align); |
126 | extern void free_percpu(void __percpu *__pdata); | 127 | extern void free_percpu(void __percpu *__pdata); |
127 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); | 128 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); |
128 | 129 | ||
129 | #define alloc_percpu(type) \ | 130 | #define alloc_percpu_gfp(type, gfp) \ |
130 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) | 131 | (typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \ |
132 | __alignof__(type), gfp) | ||
133 | #define alloc_percpu(type) \ | ||
134 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ | ||
135 | __alignof__(type)) | ||
131 | 136 | ||
132 | #endif /* __LINUX_PERCPU_H */ | 137 | #endif /* __LINUX_PERCPU_H */ |
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index d5dd4657c8d6..50e50095c8d1 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/threads.h> | 12 | #include <linux/threads.h> |
13 | #include <linux/percpu.h> | 13 | #include <linux/percpu.h> |
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/gfp.h> | ||
15 | 16 | ||
16 | #ifdef CONFIG_SMP | 17 | #ifdef CONFIG_SMP |
17 | 18 | ||
@@ -26,14 +27,14 @@ struct percpu_counter { | |||
26 | 27 | ||
27 | extern int percpu_counter_batch; | 28 | extern int percpu_counter_batch; |
28 | 29 | ||
29 | int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, | 30 | int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp, |
30 | struct lock_class_key *key); | 31 | struct lock_class_key *key); |
31 | 32 | ||
32 | #define percpu_counter_init(fbc, value) \ | 33 | #define percpu_counter_init(fbc, value, gfp) \ |
33 | ({ \ | 34 | ({ \ |
34 | static struct lock_class_key __key; \ | 35 | static struct lock_class_key __key; \ |
35 | \ | 36 | \ |
36 | __percpu_counter_init(fbc, value, &__key); \ | 37 | __percpu_counter_init(fbc, value, gfp, &__key); \ |
37 | }) | 38 | }) |
38 | 39 | ||
39 | void percpu_counter_destroy(struct percpu_counter *fbc); | 40 | void percpu_counter_destroy(struct percpu_counter *fbc); |
@@ -89,7 +90,8 @@ struct percpu_counter { | |||
89 | s64 count; | 90 | s64 count; |
90 | }; | 91 | }; |
91 | 92 | ||
92 | static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount) | 93 | static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount, |
94 | gfp_t gfp) | ||
93 | { | 95 | { |
94 | fbc->count = amount; | 96 | fbc->count = amount; |
95 | return 0; | 97 | return 0; |
diff --git a/include/linux/proportions.h b/include/linux/proportions.h index 26a8a4ed9b07..00e8e8fa7358 100644 --- a/include/linux/proportions.h +++ b/include/linux/proportions.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/percpu_counter.h> | 12 | #include <linux/percpu_counter.h> |
13 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
14 | #include <linux/mutex.h> | 14 | #include <linux/mutex.h> |
15 | #include <linux/gfp.h> | ||
15 | 16 | ||
16 | struct prop_global { | 17 | struct prop_global { |
17 | /* | 18 | /* |
@@ -40,7 +41,7 @@ struct prop_descriptor { | |||
40 | struct mutex mutex; /* serialize the prop_global switch */ | 41 | struct mutex mutex; /* serialize the prop_global switch */ |
41 | }; | 42 | }; |
42 | 43 | ||
43 | int prop_descriptor_init(struct prop_descriptor *pd, int shift); | 44 | int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp); |
44 | void prop_change_shift(struct prop_descriptor *pd, int new_shift); | 45 | void prop_change_shift(struct prop_descriptor *pd, int new_shift); |
45 | 46 | ||
46 | /* | 47 | /* |
@@ -61,7 +62,7 @@ struct prop_local_percpu { | |||
61 | raw_spinlock_t lock; /* protect the snapshot state */ | 62 | raw_spinlock_t lock; /* protect the snapshot state */ |
62 | }; | 63 | }; |
63 | 64 | ||
64 | int prop_local_init_percpu(struct prop_local_percpu *pl); | 65 | int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp); |
65 | void prop_local_destroy_percpu(struct prop_local_percpu *pl); | 66 | void prop_local_destroy_percpu(struct prop_local_percpu *pl); |
66 | void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl); | 67 | void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl); |
67 | void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl, | 68 | void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl, |