aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew@wil.cx>2007-12-03 14:08:28 -0500
committerMatthew Wilcox <matthew@wil.cx>2007-12-04 10:39:57 -0500
commita35a3455142976e3fffdf27027f3082cbaba6e8c (patch)
treee8def40d5d97dfbb7c9293ae1f7c13a996e02feb /mm
parent6182a0943af2235756836ed7e021fa22b93ec68b (diff)
Change dmapool free block management
Use a list of free blocks within a page instead of using a bitmap. Update documentation to reflect this. As well as being a slight reduction in memory allocation, locked ops and lines of code, it speeds up a transaction processing benchmark by 0.4%. Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/dmapool.c119
1 files changed, 58 insertions, 61 deletions
diff --git a/mm/dmapool.c b/mm/dmapool.c
index e2ea4543abb4..72e7ece7ee9d 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -17,7 +17,9 @@
17 * The current design of this allocator is fairly simple. The pool is 17 * The current design of this allocator is fairly simple. The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of 18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at 19 * allocated pages. Each page in the page_list is split into blocks of at
20 * least 'size' bytes. 20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page.
21 */ 23 */
22 24
23#include <linux/device.h> 25#include <linux/device.h>
@@ -38,7 +40,6 @@
38struct dma_pool { /* the pool */ 40struct dma_pool { /* the pool */
39 struct list_head page_list; 41 struct list_head page_list;
40 spinlock_t lock; 42 spinlock_t lock;
41 size_t blocks_per_page;
42 size_t size; 43 size_t size;
43 struct device *dev; 44 struct device *dev;
44 size_t allocation; 45 size_t allocation;
@@ -51,8 +52,8 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
51 struct list_head page_list; 52 struct list_head page_list;
52 void *vaddr; 53 void *vaddr;
53 dma_addr_t dma; 54 dma_addr_t dma;
54 unsigned in_use; 55 unsigned int in_use;
55 unsigned long bitmap[0]; 56 unsigned int offset;
56}; 57};
57 58
58#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) 59#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
@@ -87,8 +88,8 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf)
87 88
88 /* per-pool info, no real statistics yet */ 89 /* per-pool info, no real statistics yet */
89 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", 90 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
90 pool->name, 91 pool->name, blocks,
91 blocks, pages * pool->blocks_per_page, 92 pages * (pool->allocation / pool->size),
92 pool->size, pages); 93 pool->size, pages);
93 size -= temp; 94 size -= temp;
94 next += temp; 95 next += temp;
@@ -132,8 +133,11 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
132 return NULL; 133 return NULL;
133 } 134 }
134 135
135 if (size == 0) 136 if (size == 0) {
136 return NULL; 137 return NULL;
138 } else if (size < 4) {
139 size = 4;
140 }
137 141
138 if ((size % align) != 0) 142 if ((size % align) != 0)
139 size = ALIGN(size, align); 143 size = ALIGN(size, align);
@@ -160,7 +164,6 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
160 spin_lock_init(&retval->lock); 164 spin_lock_init(&retval->lock);
161 retval->size = size; 165 retval->size = size;
162 retval->allocation = allocation; 166 retval->allocation = allocation;
163 retval->blocks_per_page = allocation / size;
164 init_waitqueue_head(&retval->waitq); 167 init_waitqueue_head(&retval->waitq);
165 168
166 if (dev) { 169 if (dev) {
@@ -186,28 +189,36 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
186} 189}
187EXPORT_SYMBOL(dma_pool_create); 190EXPORT_SYMBOL(dma_pool_create);
188 191
192static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
193{
194 unsigned int offset = 0;
195
196 do {
197 unsigned int next = offset + pool->size;
198 if (unlikely((next + pool->size) >= pool->allocation))
199 next = pool->allocation;
200 *(int *)(page->vaddr + offset) = next;
201 offset = next;
202 } while (offset < pool->allocation);
203}
204
189static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) 205static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
190{ 206{
191 struct dma_page *page; 207 struct dma_page *page;
192 int mapsize;
193
194 mapsize = pool->blocks_per_page;
195 mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
196 mapsize *= sizeof(long);
197 208
198 page = kmalloc(mapsize + sizeof *page, mem_flags); 209 page = kmalloc(sizeof(*page), mem_flags);
199 if (!page) 210 if (!page)
200 return NULL; 211 return NULL;
201 page->vaddr = dma_alloc_coherent(pool->dev, 212 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
202 pool->allocation,
203 &page->dma, mem_flags); 213 &page->dma, mem_flags);
204 if (page->vaddr) { 214 if (page->vaddr) {
205 memset(page->bitmap, 0xff, mapsize); /* bit set == free */
206#ifdef CONFIG_DEBUG_SLAB 215#ifdef CONFIG_DEBUG_SLAB
207 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 216 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
208#endif 217#endif
218 pool_initialise_page(pool, page);
209 list_add(&page->page_list, &pool->page_list); 219 list_add(&page->page_list, &pool->page_list);
210 page->in_use = 0; 220 page->in_use = 0;
221 page->offset = 0;
211 } else { 222 } else {
212 kfree(page); 223 kfree(page);
213 page = NULL; 224 page = NULL;
@@ -215,14 +226,9 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
215 return page; 226 return page;
216} 227}
217 228
218static inline int is_page_busy(int blocks, unsigned long *bitmap) 229static inline int is_page_busy(struct dma_page *page)
219{ 230{
220 while (blocks > 0) { 231 return page->in_use != 0;
221 if (*bitmap++ != ~0UL)
222 return 1;
223 blocks -= BITS_PER_LONG;
224 }
225 return 0;
226} 232}
227 233
228static void pool_free_page(struct dma_pool *pool, struct dma_page *page) 234static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
@@ -257,7 +263,7 @@ void dma_pool_destroy(struct dma_pool *pool)
257 struct dma_page *page; 263 struct dma_page *page;
258 page = list_entry(pool->page_list.next, 264 page = list_entry(pool->page_list.next,
259 struct dma_page, page_list); 265 struct dma_page, page_list);
260 if (is_page_busy(pool->blocks_per_page, page->bitmap)) { 266 if (is_page_busy(page)) {
261 if (pool->dev) 267 if (pool->dev)
262 dev_err(pool->dev, 268 dev_err(pool->dev,
263 "dma_pool_destroy %s, %p busy\n", 269 "dma_pool_destroy %s, %p busy\n",
@@ -292,27 +298,14 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
292{ 298{
293 unsigned long flags; 299 unsigned long flags;
294 struct dma_page *page; 300 struct dma_page *page;
295 int map, block;
296 size_t offset; 301 size_t offset;
297 void *retval; 302 void *retval;
298 303
299 spin_lock_irqsave(&pool->lock, flags); 304 spin_lock_irqsave(&pool->lock, flags);
300 restart: 305 restart:
301 list_for_each_entry(page, &pool->page_list, page_list) { 306 list_for_each_entry(page, &pool->page_list, page_list) {
302 int i; 307 if (page->offset < pool->allocation)
303 /* only cachable accesses here ... */ 308 goto ready;
304 for (map = 0, i = 0;
305 i < pool->blocks_per_page; i += BITS_PER_LONG, map++) {
306 if (page->bitmap[map] == 0)
307 continue;
308 block = ffz(~page->bitmap[map]);
309 if ((i + block) < pool->blocks_per_page) {
310 clear_bit(block, &page->bitmap[map]);
311 offset = (BITS_PER_LONG * map) + block;
312 offset *= pool->size;
313 goto ready;
314 }
315 }
316 } 309 }
317 page = pool_alloc_page(pool, GFP_ATOMIC); 310 page = pool_alloc_page(pool, GFP_ATOMIC);
318 if (!page) { 311 if (!page) {
@@ -333,10 +326,10 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
333 goto done; 326 goto done;
334 } 327 }
335 328
336 clear_bit(0, &page->bitmap[0]);
337 offset = 0;
338 ready: 329 ready:
339 page->in_use++; 330 page->in_use++;
331 offset = page->offset;
332 page->offset = *(int *)(page->vaddr + offset);
340 retval = offset + page->vaddr; 333 retval = offset + page->vaddr;
341 *handle = offset + page->dma; 334 *handle = offset + page->dma;
342#ifdef CONFIG_DEBUG_SLAB 335#ifdef CONFIG_DEBUG_SLAB
@@ -379,7 +372,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
379{ 372{
380 struct dma_page *page; 373 struct dma_page *page;
381 unsigned long flags; 374 unsigned long flags;
382 int map, block; 375 unsigned int offset;
383 376
384 page = pool_find_page(pool, dma); 377 page = pool_find_page(pool, dma);
385 if (!page) { 378 if (!page) {
@@ -393,13 +386,9 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
393 return; 386 return;
394 } 387 }
395 388
396 block = dma - page->dma; 389 offset = vaddr - page->vaddr;
397 block /= pool->size;
398 map = block / BITS_PER_LONG;
399 block %= BITS_PER_LONG;
400
401#ifdef CONFIG_DEBUG_SLAB 390#ifdef CONFIG_DEBUG_SLAB
402 if (((dma - page->dma) + (void *)page->vaddr) != vaddr) { 391 if ((dma - page->dma) != offset) {
403 if (pool->dev) 392 if (pool->dev)
404 dev_err(pool->dev, 393 dev_err(pool->dev,
405 "dma_pool_free %s, %p (bad vaddr)/%Lx\n", 394 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
@@ -410,28 +399,36 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
410 pool->name, vaddr, (unsigned long long)dma); 399 pool->name, vaddr, (unsigned long long)dma);
411 return; 400 return;
412 } 401 }
413 if (page->bitmap[map] & (1UL << block)) { 402 {
414 if (pool->dev) 403 unsigned int chain = page->offset;
415 dev_err(pool->dev, 404 while (chain < pool->allocation) {
416 "dma_pool_free %s, dma %Lx already free\n", 405 if (chain != offset) {
417 pool->name, (unsigned long long)dma); 406 chain = *(int *)(page->vaddr + chain);
418 else 407 continue;
419 printk(KERN_ERR 408 }
420 "dma_pool_free %s, dma %Lx already free\n", 409 if (pool->dev)
421 pool->name, (unsigned long long)dma); 410 dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
422 return; 411 "already free\n", pool->name,
412 (unsigned long long)dma);
413 else
414 printk(KERN_ERR "dma_pool_free %s, dma %Lx "
415 "already free\n", pool->name,
416 (unsigned long long)dma);
417 return;
418 }
423 } 419 }
424 memset(vaddr, POOL_POISON_FREED, pool->size); 420 memset(vaddr, POOL_POISON_FREED, pool->size);
425#endif 421#endif
426 422
427 spin_lock_irqsave(&pool->lock, flags); 423 spin_lock_irqsave(&pool->lock, flags);
428 page->in_use--; 424 page->in_use--;
429 set_bit(block, &page->bitmap[map]); 425 *(int *)vaddr = page->offset;
426 page->offset = offset;
430 if (waitqueue_active(&pool->waitq)) 427 if (waitqueue_active(&pool->waitq))
431 wake_up_locked(&pool->waitq); 428 wake_up_locked(&pool->waitq);
432 /* 429 /*
433 * Resist a temptation to do 430 * Resist a temptation to do
434 * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page); 431 * if (!is_page_busy(page)) pool_free_page(pool, page);
435 * Better have a few empty pages hang around. 432 * Better have a few empty pages hang around.
436 */ 433 */
437 spin_unlock_irqrestore(&pool->lock, flags); 434 spin_unlock_irqrestore(&pool->lock, flags);