diff options
author | Dan Williams <dan.j.williams@intel.com> | 2019-06-13 18:56:27 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-06-13 23:34:56 -0400 |
commit | 795ee30648c708502da9df637f83c33361d68dcc (patch) | |
tree | 83a9204335deb0f02dfcb2e6a033a9cc6d1d21c5 | |
parent | e615a191216e3fb4e9c0d239007f2b0cd48f28bf (diff) |
lib/genalloc: introduce chunk owners
The p2pdma facility enables a provider to publish a pool of dma
addresses for a consumer to allocate. A genpool is used internally by
p2pdma to collect dma resources, 'chunks', to be handed out to
consumers. Whenever a consumer allocates a resource it needs to pin the
'struct dev_pagemap' instance that backs the chunk selected by
pci_alloc_p2pmem().
Currently that reference is taken globally on the entire provider
device. That sets up a lifetime mismatch whereby the p2pdma core needs
to maintain hacks to make sure the percpu_ref is not released twice.
This lifetime mismatch also stands in the way of a fix to
devm_memremap_pages() whereby devm_memremap_pages_release() must wait for
the percpu_ref ->release() callback to complete before it can proceed to
teardown pages.
So, towards fixing this situation, introduce the ability to store a 'chunk
owner' at gen_pool_add() time, and a facility to retrieve the owner at
gen_pool_{alloc,free}() time. For p2pdma this will be used to store and
recall individual dev_pagemap reference counter instances per-chunk.
Link: http://lkml.kernel.org/r/155727338118.292046.13407378933221579644.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Logan Gunthorpe <logang@deltatee.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "Rafael J. Wysocki" <rafael@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/genalloc.h | 55 | ||||
-rw-r--r-- | lib/genalloc.c | 51 |
2 files changed, 74 insertions, 32 deletions
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index dd0a452373e7..a337313e064f 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h | |||
@@ -75,6 +75,7 @@ struct gen_pool_chunk { | |||
75 | struct list_head next_chunk; /* next chunk in pool */ | 75 | struct list_head next_chunk; /* next chunk in pool */ |
76 | atomic_long_t avail; | 76 | atomic_long_t avail; |
77 | phys_addr_t phys_addr; /* physical starting address of memory chunk */ | 77 | phys_addr_t phys_addr; /* physical starting address of memory chunk */ |
78 | void *owner; /* private data to retrieve at alloc time */ | ||
78 | unsigned long start_addr; /* start address of memory chunk */ | 79 | unsigned long start_addr; /* start address of memory chunk */ |
79 | unsigned long end_addr; /* end address of memory chunk (inclusive) */ | 80 | unsigned long end_addr; /* end address of memory chunk (inclusive) */ |
80 | unsigned long bits[0]; /* bitmap for allocating memory chunk */ | 81 | unsigned long bits[0]; /* bitmap for allocating memory chunk */ |
@@ -96,8 +97,15 @@ struct genpool_data_fixed { | |||
96 | 97 | ||
97 | extern struct gen_pool *gen_pool_create(int, int); | 98 | extern struct gen_pool *gen_pool_create(int, int); |
98 | extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); | 99 | extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); |
99 | extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t, | 100 | extern int gen_pool_add_owner(struct gen_pool *, unsigned long, phys_addr_t, |
100 | size_t, int); | 101 | size_t, int, void *); |
102 | |||
103 | static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr, | ||
104 | phys_addr_t phys, size_t size, int nid) | ||
105 | { | ||
106 | return gen_pool_add_owner(pool, addr, phys, size, nid, NULL); | ||
107 | } | ||
108 | |||
101 | /** | 109 | /** |
102 | * gen_pool_add - add a new chunk of special memory to the pool | 110 | * gen_pool_add - add a new chunk of special memory to the pool |
103 | * @pool: pool to add new memory chunk to | 111 | * @pool: pool to add new memory chunk to |
@@ -116,12 +124,47 @@ static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr, | |||
116 | return gen_pool_add_virt(pool, addr, -1, size, nid); | 124 | return gen_pool_add_virt(pool, addr, -1, size, nid); |
117 | } | 125 | } |
118 | extern void gen_pool_destroy(struct gen_pool *); | 126 | extern void gen_pool_destroy(struct gen_pool *); |
119 | extern unsigned long gen_pool_alloc(struct gen_pool *, size_t); | 127 | unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size, |
120 | extern unsigned long gen_pool_alloc_algo(struct gen_pool *, size_t, | 128 | genpool_algo_t algo, void *data, void **owner); |
121 | genpool_algo_t algo, void *data); | 129 | |
130 | static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool, | ||
131 | size_t size, void **owner) | ||
132 | { | ||
133 | return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data, | ||
134 | owner); | ||
135 | } | ||
136 | |||
137 | static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool, | ||
138 | size_t size, genpool_algo_t algo, void *data) | ||
139 | { | ||
140 | return gen_pool_alloc_algo_owner(pool, size, algo, data, NULL); | ||
141 | } | ||
142 | |||
143 | /** | ||
144 | * gen_pool_alloc - allocate special memory from the pool | ||
145 | * @pool: pool to allocate from | ||
146 | * @size: number of bytes to allocate from the pool | ||
147 | * | ||
148 | * Allocate the requested number of bytes from the specified pool. | ||
149 | * Uses the pool allocation function (with first-fit algorithm by default). | ||
150 | * Can not be used in NMI handler on architectures without | ||
151 | * NMI-safe cmpxchg implementation. | ||
152 | */ | ||
153 | static inline unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | ||
154 | { | ||
155 | return gen_pool_alloc_algo(pool, size, pool->algo, pool->data); | ||
156 | } | ||
157 | |||
122 | extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, | 158 | extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, |
123 | dma_addr_t *dma); | 159 | dma_addr_t *dma); |
124 | extern void gen_pool_free(struct gen_pool *, unsigned long, size_t); | 160 | extern void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, |
161 | size_t size, void **owner); | ||
162 | static inline void gen_pool_free(struct gen_pool *pool, unsigned long addr, | ||
163 | size_t size) | ||
164 | { | ||
165 | gen_pool_free_owner(pool, addr, size, NULL); | ||
166 | } | ||
167 | |||
125 | extern void gen_pool_for_each_chunk(struct gen_pool *, | 168 | extern void gen_pool_for_each_chunk(struct gen_pool *, |
126 | void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *); | 169 | void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *); |
127 | extern size_t gen_pool_avail(struct gen_pool *); | 170 | extern size_t gen_pool_avail(struct gen_pool *); |
diff --git a/lib/genalloc.c b/lib/genalloc.c index 7e85d1e37a6e..770c769d7cb7 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
@@ -168,20 +168,21 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) | |||
168 | EXPORT_SYMBOL(gen_pool_create); | 168 | EXPORT_SYMBOL(gen_pool_create); |
169 | 169 | ||
170 | /** | 170 | /** |
171 | * gen_pool_add_virt - add a new chunk of special memory to the pool | 171 | * gen_pool_add_owner- add a new chunk of special memory to the pool |
172 | * @pool: pool to add new memory chunk to | 172 | * @pool: pool to add new memory chunk to |
173 | * @virt: virtual starting address of memory chunk to add to pool | 173 | * @virt: virtual starting address of memory chunk to add to pool |
174 | * @phys: physical starting address of memory chunk to add to pool | 174 | * @phys: physical starting address of memory chunk to add to pool |
175 | * @size: size in bytes of the memory chunk to add to pool | 175 | * @size: size in bytes of the memory chunk to add to pool |
176 | * @nid: node id of the node the chunk structure and bitmap should be | 176 | * @nid: node id of the node the chunk structure and bitmap should be |
177 | * allocated on, or -1 | 177 | * allocated on, or -1 |
178 | * @owner: private data the publisher would like to recall at alloc time | ||
178 | * | 179 | * |
179 | * Add a new chunk of special memory to the specified pool. | 180 | * Add a new chunk of special memory to the specified pool. |
180 | * | 181 | * |
181 | * Returns 0 on success or a -ve errno on failure. | 182 | * Returns 0 on success or a -ve errno on failure. |
182 | */ | 183 | */ |
183 | int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, | 184 | int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, |
184 | size_t size, int nid) | 185 | size_t size, int nid, void *owner) |
185 | { | 186 | { |
186 | struct gen_pool_chunk *chunk; | 187 | struct gen_pool_chunk *chunk; |
187 | int nbits = size >> pool->min_alloc_order; | 188 | int nbits = size >> pool->min_alloc_order; |
@@ -195,6 +196,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy | |||
195 | chunk->phys_addr = phys; | 196 | chunk->phys_addr = phys; |
196 | chunk->start_addr = virt; | 197 | chunk->start_addr = virt; |
197 | chunk->end_addr = virt + size - 1; | 198 | chunk->end_addr = virt + size - 1; |
199 | chunk->owner = owner; | ||
198 | atomic_long_set(&chunk->avail, size); | 200 | atomic_long_set(&chunk->avail, size); |
199 | 201 | ||
200 | spin_lock(&pool->lock); | 202 | spin_lock(&pool->lock); |
@@ -203,7 +205,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy | |||
203 | 205 | ||
204 | return 0; | 206 | return 0; |
205 | } | 207 | } |
206 | EXPORT_SYMBOL(gen_pool_add_virt); | 208 | EXPORT_SYMBOL(gen_pool_add_owner); |
207 | 209 | ||
208 | /** | 210 | /** |
209 | * gen_pool_virt_to_phys - return the physical address of memory | 211 | * gen_pool_virt_to_phys - return the physical address of memory |
@@ -260,35 +262,20 @@ void gen_pool_destroy(struct gen_pool *pool) | |||
260 | EXPORT_SYMBOL(gen_pool_destroy); | 262 | EXPORT_SYMBOL(gen_pool_destroy); |
261 | 263 | ||
262 | /** | 264 | /** |
263 | * gen_pool_alloc - allocate special memory from the pool | 265 | * gen_pool_alloc_algo_owner - allocate special memory from the pool |
264 | * @pool: pool to allocate from | ||
265 | * @size: number of bytes to allocate from the pool | ||
266 | * | ||
267 | * Allocate the requested number of bytes from the specified pool. | ||
268 | * Uses the pool allocation function (with first-fit algorithm by default). | ||
269 | * Can not be used in NMI handler on architectures without | ||
270 | * NMI-safe cmpxchg implementation. | ||
271 | */ | ||
272 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | ||
273 | { | ||
274 | return gen_pool_alloc_algo(pool, size, pool->algo, pool->data); | ||
275 | } | ||
276 | EXPORT_SYMBOL(gen_pool_alloc); | ||
277 | |||
278 | /** | ||
279 | * gen_pool_alloc_algo - allocate special memory from the pool | ||
280 | * @pool: pool to allocate from | 266 | * @pool: pool to allocate from |
281 | * @size: number of bytes to allocate from the pool | 267 | * @size: number of bytes to allocate from the pool |
282 | * @algo: algorithm passed from caller | 268 | * @algo: algorithm passed from caller |
283 | * @data: data passed to algorithm | 269 | * @data: data passed to algorithm |
270 | * @owner: optionally retrieve the chunk owner | ||
284 | * | 271 | * |
285 | * Allocate the requested number of bytes from the specified pool. | 272 | * Allocate the requested number of bytes from the specified pool. |
286 | * Uses the pool allocation function (with first-fit algorithm by default). | 273 | * Uses the pool allocation function (with first-fit algorithm by default). |
287 | * Can not be used in NMI handler on architectures without | 274 | * Can not be used in NMI handler on architectures without |
288 | * NMI-safe cmpxchg implementation. | 275 | * NMI-safe cmpxchg implementation. |
289 | */ | 276 | */ |
290 | unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, | 277 | unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size, |
291 | genpool_algo_t algo, void *data) | 278 | genpool_algo_t algo, void *data, void **owner) |
292 | { | 279 | { |
293 | struct gen_pool_chunk *chunk; | 280 | struct gen_pool_chunk *chunk; |
294 | unsigned long addr = 0; | 281 | unsigned long addr = 0; |
@@ -299,6 +286,9 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, | |||
299 | BUG_ON(in_nmi()); | 286 | BUG_ON(in_nmi()); |
300 | #endif | 287 | #endif |
301 | 288 | ||
289 | if (owner) | ||
290 | *owner = NULL; | ||
291 | |||
302 | if (size == 0) | 292 | if (size == 0) |
303 | return 0; | 293 | return 0; |
304 | 294 | ||
@@ -326,12 +316,14 @@ retry: | |||
326 | addr = chunk->start_addr + ((unsigned long)start_bit << order); | 316 | addr = chunk->start_addr + ((unsigned long)start_bit << order); |
327 | size = nbits << order; | 317 | size = nbits << order; |
328 | atomic_long_sub(size, &chunk->avail); | 318 | atomic_long_sub(size, &chunk->avail); |
319 | if (owner) | ||
320 | *owner = chunk->owner; | ||
329 | break; | 321 | break; |
330 | } | 322 | } |
331 | rcu_read_unlock(); | 323 | rcu_read_unlock(); |
332 | return addr; | 324 | return addr; |
333 | } | 325 | } |
334 | EXPORT_SYMBOL(gen_pool_alloc_algo); | 326 | EXPORT_SYMBOL(gen_pool_alloc_algo_owner); |
335 | 327 | ||
336 | /** | 328 | /** |
337 | * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage | 329 | * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage |
@@ -367,12 +359,14 @@ EXPORT_SYMBOL(gen_pool_dma_alloc); | |||
367 | * @pool: pool to free to | 359 | * @pool: pool to free to |
368 | * @addr: starting address of memory to free back to pool | 360 | * @addr: starting address of memory to free back to pool |
369 | * @size: size in bytes of memory to free | 361 | * @size: size in bytes of memory to free |
362 | * @owner: private data stashed at gen_pool_add() time | ||
370 | * | 363 | * |
371 | * Free previously allocated special memory back to the specified | 364 | * Free previously allocated special memory back to the specified |
372 | * pool. Can not be used in NMI handler on architectures without | 365 | * pool. Can not be used in NMI handler on architectures without |
373 | * NMI-safe cmpxchg implementation. | 366 | * NMI-safe cmpxchg implementation. |
374 | */ | 367 | */ |
375 | void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) | 368 | void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size, |
369 | void **owner) | ||
376 | { | 370 | { |
377 | struct gen_pool_chunk *chunk; | 371 | struct gen_pool_chunk *chunk; |
378 | int order = pool->min_alloc_order; | 372 | int order = pool->min_alloc_order; |
@@ -382,6 +376,9 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) | |||
382 | BUG_ON(in_nmi()); | 376 | BUG_ON(in_nmi()); |
383 | #endif | 377 | #endif |
384 | 378 | ||
379 | if (owner) | ||
380 | *owner = NULL; | ||
381 | |||
385 | nbits = (size + (1UL << order) - 1) >> order; | 382 | nbits = (size + (1UL << order) - 1) >> order; |
386 | rcu_read_lock(); | 383 | rcu_read_lock(); |
387 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { | 384 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
@@ -392,6 +389,8 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) | |||
392 | BUG_ON(remain); | 389 | BUG_ON(remain); |
393 | size = nbits << order; | 390 | size = nbits << order; |
394 | atomic_long_add(size, &chunk->avail); | 391 | atomic_long_add(size, &chunk->avail); |
392 | if (owner) | ||
393 | *owner = chunk->owner; | ||
395 | rcu_read_unlock(); | 394 | rcu_read_unlock(); |
396 | return; | 395 | return; |
397 | } | 396 | } |
@@ -399,7 +398,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) | |||
399 | rcu_read_unlock(); | 398 | rcu_read_unlock(); |
400 | BUG(); | 399 | BUG(); |
401 | } | 400 | } |
402 | EXPORT_SYMBOL(gen_pool_free); | 401 | EXPORT_SYMBOL(gen_pool_free_owner); |
403 | 402 | ||
404 | /** | 403 | /** |
405 | * gen_pool_for_each_chunk - call func for every chunk of generic memory pool | 404 | * gen_pool_for_each_chunk - call func for every chunk of generic memory pool |