diff options
| -rw-r--r-- | drivers/base/Makefile | 2 | ||||
| -rw-r--r-- | drivers/base/dmapool.c | 481 | ||||
| -rw-r--r-- | mm/Makefile | 1 | ||||
| -rw-r--r-- | mm/dmapool.c | 500 |
4 files changed, 502 insertions, 482 deletions
diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 63e09c015ca0..c66637392bbc 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile | |||
| @@ -5,7 +5,7 @@ obj-y := core.o sys.o bus.o dd.o \ | |||
| 5 | cpu.o firmware.o init.o map.o devres.o \ | 5 | cpu.o firmware.o init.o map.o devres.o \ |
| 6 | attribute_container.o transport_class.o | 6 | attribute_container.o transport_class.o |
| 7 | obj-y += power/ | 7 | obj-y += power/ |
| 8 | obj-$(CONFIG_HAS_DMA) += dma-mapping.o dmapool.o | 8 | obj-$(CONFIG_HAS_DMA) += dma-mapping.o |
| 9 | obj-$(CONFIG_ISA) += isa.o | 9 | obj-$(CONFIG_ISA) += isa.o |
| 10 | obj-$(CONFIG_FW_LOADER) += firmware_class.o | 10 | obj-$(CONFIG_FW_LOADER) += firmware_class.o |
| 11 | obj-$(CONFIG_NUMA) += node.o | 11 | obj-$(CONFIG_NUMA) += node.o |
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c deleted file mode 100644 index b5034dc72a05..000000000000 --- a/drivers/base/dmapool.c +++ /dev/null | |||
| @@ -1,481 +0,0 @@ | |||
| 1 | |||
| 2 | #include <linux/device.h> | ||
| 3 | #include <linux/mm.h> | ||
| 4 | #include <asm/io.h> /* Needed for i386 to build */ | ||
| 5 | #include <linux/dma-mapping.h> | ||
| 6 | #include <linux/dmapool.h> | ||
| 7 | #include <linux/slab.h> | ||
| 8 | #include <linux/module.h> | ||
| 9 | #include <linux/poison.h> | ||
| 10 | #include <linux/sched.h> | ||
| 11 | |||
| 12 | /* | ||
| 13 | * Pool allocator ... wraps the dma_alloc_coherent page allocator, so | ||
| 14 | * small blocks are easily used by drivers for bus mastering controllers. | ||
| 15 | * This should probably be sharing the guts of the slab allocator. | ||
| 16 | */ | ||
| 17 | |||
| 18 | struct dma_pool { /* the pool */ | ||
| 19 | struct list_head page_list; | ||
| 20 | spinlock_t lock; | ||
| 21 | size_t blocks_per_page; | ||
| 22 | size_t size; | ||
| 23 | struct device *dev; | ||
| 24 | size_t allocation; | ||
| 25 | char name [32]; | ||
| 26 | wait_queue_head_t waitq; | ||
| 27 | struct list_head pools; | ||
| 28 | }; | ||
| 29 | |||
| 30 | struct dma_page { /* cacheable header for 'allocation' bytes */ | ||
| 31 | struct list_head page_list; | ||
| 32 | void *vaddr; | ||
| 33 | dma_addr_t dma; | ||
| 34 | unsigned in_use; | ||
| 35 | unsigned long bitmap [0]; | ||
| 36 | }; | ||
| 37 | |||
| 38 | #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) | ||
| 39 | |||
| 40 | static DEFINE_MUTEX (pools_lock); | ||
| 41 | |||
| 42 | static ssize_t | ||
| 43 | show_pools (struct device *dev, struct device_attribute *attr, char *buf) | ||
| 44 | { | ||
| 45 | unsigned temp; | ||
| 46 | unsigned size; | ||
| 47 | char *next; | ||
| 48 | struct dma_page *page; | ||
| 49 | struct dma_pool *pool; | ||
| 50 | |||
| 51 | next = buf; | ||
| 52 | size = PAGE_SIZE; | ||
| 53 | |||
| 54 | temp = scnprintf(next, size, "poolinfo - 0.1\n"); | ||
| 55 | size -= temp; | ||
| 56 | next += temp; | ||
| 57 | |||
| 58 | mutex_lock(&pools_lock); | ||
| 59 | list_for_each_entry(pool, &dev->dma_pools, pools) { | ||
| 60 | unsigned pages = 0; | ||
| 61 | unsigned blocks = 0; | ||
| 62 | |||
| 63 | list_for_each_entry(page, &pool->page_list, page_list) { | ||
| 64 | pages++; | ||
| 65 | blocks += page->in_use; | ||
| 66 | } | ||
| 67 | |||
| 68 | /* per-pool info, no real statistics yet */ | ||
| 69 | temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", | ||
| 70 | pool->name, | ||
| 71 | blocks, pages * pool->blocks_per_page, | ||
| 72 | pool->size, pages); | ||
| 73 | size -= temp; | ||
| 74 | next += temp; | ||
| 75 | } | ||
| 76 | mutex_unlock(&pools_lock); | ||
| 77 | |||
| 78 | return PAGE_SIZE - size; | ||
| 79 | } | ||
| 80 | static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL); | ||
| 81 | |||
| 82 | /** | ||
| 83 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. | ||
| 84 | * @name: name of pool, for diagnostics | ||
| 85 | * @dev: device that will be doing the DMA | ||
| 86 | * @size: size of the blocks in this pool. | ||
| 87 | * @align: alignment requirement for blocks; must be a power of two | ||
| 88 | * @allocation: returned blocks won't cross this boundary (or zero) | ||
| 89 | * Context: !in_interrupt() | ||
| 90 | * | ||
| 91 | * Returns a dma allocation pool with the requested characteristics, or | ||
| 92 | * null if one can't be created. Given one of these pools, dma_pool_alloc() | ||
| 93 | * may be used to allocate memory. Such memory will all have "consistent" | ||
| 94 | * DMA mappings, accessible by the device and its driver without using | ||
| 95 | * cache flushing primitives. The actual size of blocks allocated may be | ||
| 96 | * larger than requested because of alignment. | ||
| 97 | * | ||
| 98 | * If allocation is nonzero, objects returned from dma_pool_alloc() won't | ||
| 99 | * cross that size boundary. This is useful for devices which have | ||
| 100 | * addressing restrictions on individual DMA transfers, such as not crossing | ||
| 101 | * boundaries of 4KBytes. | ||
| 102 | */ | ||
| 103 | struct dma_pool * | ||
| 104 | dma_pool_create (const char *name, struct device *dev, | ||
| 105 | size_t size, size_t align, size_t allocation) | ||
| 106 | { | ||
| 107 | struct dma_pool *retval; | ||
| 108 | |||
| 109 | if (align == 0) | ||
| 110 | align = 1; | ||
| 111 | if (size == 0) | ||
| 112 | return NULL; | ||
| 113 | else if (size < align) | ||
| 114 | size = align; | ||
| 115 | else if ((size % align) != 0) { | ||
| 116 | size += align + 1; | ||
| 117 | size &= ~(align - 1); | ||
| 118 | } | ||
| 119 | |||
| 120 | if (allocation == 0) { | ||
| 121 | if (PAGE_SIZE < size) | ||
| 122 | allocation = size; | ||
| 123 | else | ||
| 124 | allocation = PAGE_SIZE; | ||
| 125 | // FIXME: round up for less fragmentation | ||
| 126 | } else if (allocation < size) | ||
| 127 | return NULL; | ||
| 128 | |||
| 129 | if (!(retval = kmalloc_node (sizeof *retval, GFP_KERNEL, dev_to_node(dev)))) | ||
| 130 | return retval; | ||
| 131 | |||
| 132 | strlcpy (retval->name, name, sizeof retval->name); | ||
| 133 | |||
| 134 | retval->dev = dev; | ||
| 135 | |||
| 136 | INIT_LIST_HEAD (&retval->page_list); | ||
| 137 | spin_lock_init (&retval->lock); | ||
| 138 | retval->size = size; | ||
| 139 | retval->allocation = allocation; | ||
| 140 | retval->blocks_per_page = allocation / size; | ||
| 141 | init_waitqueue_head (&retval->waitq); | ||
| 142 | |||
| 143 | if (dev) { | ||
| 144 | int ret; | ||
| 145 | |||
| 146 | mutex_lock(&pools_lock); | ||
| 147 | if (list_empty (&dev->dma_pools)) | ||
| 148 | ret = device_create_file (dev, &dev_attr_pools); | ||
| 149 | else | ||
| 150 | ret = 0; | ||
| 151 | /* note: not currently insisting "name" be unique */ | ||
| 152 | if (!ret) | ||
| 153 | list_add (&retval->pools, &dev->dma_pools); | ||
| 154 | else { | ||
| 155 | kfree(retval); | ||
| 156 | retval = NULL; | ||
| 157 | } | ||
| 158 | mutex_unlock(&pools_lock); | ||
| 159 | } else | ||
| 160 | INIT_LIST_HEAD (&retval->pools); | ||
| 161 | |||
| 162 | return retval; | ||
| 163 | } | ||
| 164 | |||
| 165 | |||
| 166 | static struct dma_page * | ||
| 167 | pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags) | ||
| 168 | { | ||
| 169 | struct dma_page *page; | ||
| 170 | int mapsize; | ||
| 171 | |||
| 172 | mapsize = pool->blocks_per_page; | ||
| 173 | mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; | ||
| 174 | mapsize *= sizeof (long); | ||
| 175 | |||
| 176 | page = kmalloc(mapsize + sizeof *page, mem_flags); | ||
| 177 | if (!page) | ||
| 178 | return NULL; | ||
| 179 | page->vaddr = dma_alloc_coherent (pool->dev, | ||
| 180 | pool->allocation, | ||
| 181 | &page->dma, | ||
| 182 | mem_flags); | ||
| 183 | if (page->vaddr) { | ||
| 184 | memset (page->bitmap, 0xff, mapsize); // bit set == free | ||
| 185 | #ifdef CONFIG_DEBUG_SLAB | ||
| 186 | memset (page->vaddr, POOL_POISON_FREED, pool->allocation); | ||
| 187 | #endif | ||
| 188 | list_add (&page->page_list, &pool->page_list); | ||
| 189 | page->in_use = 0; | ||
| 190 | } else { | ||
| 191 | kfree (page); | ||
| 192 | page = NULL; | ||
| 193 | } | ||
| 194 | return page; | ||
| 195 | } | ||
| 196 | |||
| 197 | |||
| 198 | static inline int | ||
| 199 | is_page_busy (int blocks, unsigned long *bitmap) | ||
| 200 | { | ||
| 201 | while (blocks > 0) { | ||
| 202 | if (*bitmap++ != ~0UL) | ||
| 203 | return 1; | ||
| 204 | blocks -= BITS_PER_LONG; | ||
| 205 | } | ||
| 206 | return 0; | ||
| 207 | } | ||
| 208 | |||
| 209 | static void | ||
| 210 | pool_free_page (struct dma_pool *pool, struct dma_page *page) | ||
| 211 | { | ||
| 212 | dma_addr_t dma = page->dma; | ||
| 213 | |||
| 214 | #ifdef CONFIG_DEBUG_SLAB | ||
| 215 | memset (page->vaddr, POOL_POISON_FREED, pool->allocation); | ||
| 216 | #endif | ||
| 217 | dma_free_coherent (pool->dev, pool->allocation, page->vaddr, dma); | ||
| 218 | list_del (&page->page_list); | ||
| 219 | kfree (page); | ||
| 220 | } | ||
| 221 | |||
| 222 | |||
| 223 | /** | ||
| 224 | * dma_pool_destroy - destroys a pool of dma memory blocks. | ||
| 225 | * @pool: dma pool that will be destroyed | ||
| 226 | * Context: !in_interrupt() | ||
| 227 | * | ||
| 228 | * Caller guarantees that no more memory from the pool is in use, | ||
| 229 | * and that nothing will try to use the pool after this call. | ||
| 230 | */ | ||
| 231 | void | ||
| 232 | dma_pool_destroy (struct dma_pool *pool) | ||
| 233 | { | ||
| 234 | mutex_lock(&pools_lock); | ||
| 235 | list_del (&pool->pools); | ||
| 236 | if (pool->dev && list_empty (&pool->dev->dma_pools)) | ||
| 237 | device_remove_file (pool->dev, &dev_attr_pools); | ||
| 238 | mutex_unlock(&pools_lock); | ||
| 239 | |||
| 240 | while (!list_empty (&pool->page_list)) { | ||
| 241 | struct dma_page *page; | ||
| 242 | page = list_entry (pool->page_list.next, | ||
| 243 | struct dma_page, page_list); | ||
| 244 | if (is_page_busy (pool->blocks_per_page, page->bitmap)) { | ||
| 245 | if (pool->dev) | ||
| 246 | dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n", | ||
| 247 | pool->name, page->vaddr); | ||
| 248 | else | ||
| 249 | printk (KERN_ERR "dma_pool_destroy %s, %p busy\n", | ||
| 250 | pool->name, page->vaddr); | ||
| 251 | /* leak the still-in-use consistent memory */ | ||
| 252 | list_del (&page->page_list); | ||
| 253 | kfree (page); | ||
| 254 | } else | ||
| 255 | pool_free_page (pool, page); | ||
| 256 | } | ||
| 257 | |||
| 258 | kfree (pool); | ||
| 259 | } | ||
| 260 | |||
| 261 | |||
| 262 | /** | ||
| 263 | * dma_pool_alloc - get a block of consistent memory | ||
| 264 | * @pool: dma pool that will produce the block | ||
| 265 | * @mem_flags: GFP_* bitmask | ||
| 266 | * @handle: pointer to dma address of block | ||
| 267 | * | ||
| 268 | * This returns the kernel virtual address of a currently unused block, | ||
| 269 | * and reports its dma address through the handle. | ||
| 270 | * If such a memory block can't be allocated, null is returned. | ||
| 271 | */ | ||
| 272 | void * | ||
| 273 | dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) | ||
| 274 | { | ||
| 275 | unsigned long flags; | ||
| 276 | struct dma_page *page; | ||
| 277 | int map, block; | ||
| 278 | size_t offset; | ||
| 279 | void *retval; | ||
| 280 | |||
| 281 | restart: | ||
| 282 | spin_lock_irqsave (&pool->lock, flags); | ||
| 283 | list_for_each_entry(page, &pool->page_list, page_list) { | ||
| 284 | int i; | ||
| 285 | /* only cachable accesses here ... */ | ||
| 286 | for (map = 0, i = 0; | ||
| 287 | i < pool->blocks_per_page; | ||
| 288 | i += BITS_PER_LONG, map++) { | ||
| 289 | if (page->bitmap [map] == 0) | ||
| 290 | continue; | ||
| 291 | block = ffz (~ page->bitmap [map]); | ||
| 292 | if ((i + block) < pool->blocks_per_page) { | ||
| 293 | clear_bit (block, &page->bitmap [map]); | ||
| 294 | offset = (BITS_PER_LONG * map) + block; | ||
| 295 | offset *= pool->size; | ||
| 296 | goto ready; | ||
| 297 | } | ||
| 298 | } | ||
| 299 | } | ||
| 300 | if (!(page = pool_alloc_page (pool, GFP_ATOMIC))) { | ||
| 301 | if (mem_flags & __GFP_WAIT) { | ||
| 302 | DECLARE_WAITQUEUE (wait, current); | ||
| 303 | |||
| 304 | __set_current_state(TASK_INTERRUPTIBLE); | ||
| 305 | add_wait_queue (&pool->waitq, &wait); | ||
| 306 | spin_unlock_irqrestore (&pool->lock, flags); | ||
| 307 | |||
| 308 | schedule_timeout (POOL_TIMEOUT_JIFFIES); | ||
| 309 | |||
| 310 | remove_wait_queue (&pool->waitq, &wait); | ||
| 311 | goto restart; | ||
| 312 | } | ||
| 313 | retval = NULL; | ||
| 314 | goto done; | ||
| 315 | } | ||
| 316 | |||
| 317 | clear_bit (0, &page->bitmap [0]); | ||
| 318 | offset = 0; | ||
| 319 | ready: | ||
| 320 | page->in_use++; | ||
| 321 | retval = offset + page->vaddr; | ||
| 322 | *handle = offset + page->dma; | ||
| 323 | #ifdef CONFIG_DEBUG_SLAB | ||
| 324 | memset (retval, POOL_POISON_ALLOCATED, pool->size); | ||
| 325 | #endif | ||
| 326 | done: | ||
| 327 | spin_unlock_irqrestore (&pool->lock, flags); | ||
| 328 | return retval; | ||
| 329 | } | ||
| 330 | |||
| 331 | |||
| 332 | static struct dma_page * | ||
| 333 | pool_find_page (struct dma_pool *pool, dma_addr_t dma) | ||
| 334 | { | ||
| 335 | unsigned long flags; | ||
| 336 | struct dma_page *page; | ||
| 337 | |||
| 338 | spin_lock_irqsave (&pool->lock, flags); | ||
| 339 | list_for_each_entry(page, &pool->page_list, page_list) { | ||
| 340 | if (dma < page->dma) | ||
| 341 | continue; | ||
| 342 | if (dma < (page->dma + pool->allocation)) | ||
| 343 | goto done; | ||
| 344 | } | ||
| 345 | page = NULL; | ||
| 346 | done: | ||
| 347 | spin_unlock_irqrestore (&pool->lock, flags); | ||
| 348 | return page; | ||
| 349 | } | ||
| 350 | |||
| 351 | |||
| 352 | /** | ||
| 353 | * dma_pool_free - put block back into dma pool | ||
| 354 | * @pool: the dma pool holding the block | ||
| 355 | * @vaddr: virtual address of block | ||
| 356 | * @dma: dma address of block | ||
| 357 | * | ||
| 358 | * Caller promises neither device nor driver will again touch this block | ||
| 359 | * unless it is first re-allocated. | ||
| 360 | */ | ||
| 361 | void | ||
| 362 | dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma) | ||
| 363 | { | ||
| 364 | struct dma_page *page; | ||
| 365 | unsigned long flags; | ||
| 366 | int map, block; | ||
| 367 | |||
| 368 | if ((page = pool_find_page(pool, dma)) == NULL) { | ||
| 369 | if (pool->dev) | ||
| 370 | dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n", | ||
| 371 | pool->name, vaddr, (unsigned long) dma); | ||
| 372 | else | ||
| 373 | printk (KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", | ||
| 374 | pool->name, vaddr, (unsigned long) dma); | ||
| 375 | return; | ||
| 376 | } | ||
| 377 | |||
| 378 | block = dma - page->dma; | ||
| 379 | block /= pool->size; | ||
| 380 | map = block / BITS_PER_LONG; | ||
| 381 | block %= BITS_PER_LONG; | ||
| 382 | |||
| 383 | #ifdef CONFIG_DEBUG_SLAB | ||
| 384 | if (((dma - page->dma) + (void *)page->vaddr) != vaddr) { | ||
| 385 | if (pool->dev) | ||
| 386 | dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | ||
| 387 | pool->name, vaddr, (unsigned long long) dma); | ||
| 388 | else | ||
| 389 | printk (KERN_ERR "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | ||
| 390 | pool->name, vaddr, (unsigned long long) dma); | ||
| 391 | return; | ||
| 392 | } | ||
| 393 | if (page->bitmap [map] & (1UL << block)) { | ||
| 394 | if (pool->dev) | ||
| 395 | dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n", | ||
| 396 | pool->name, (unsigned long long)dma); | ||
| 397 | else | ||
| 398 | printk (KERN_ERR "dma_pool_free %s, dma %Lx already free\n", | ||
| 399 | pool->name, (unsigned long long)dma); | ||
| 400 | return; | ||
| 401 | } | ||
| 402 | memset (vaddr, POOL_POISON_FREED, pool->size); | ||
| 403 | #endif | ||
| 404 | |||
| 405 | spin_lock_irqsave (&pool->lock, flags); | ||
| 406 | page->in_use--; | ||
| 407 | set_bit (block, &page->bitmap [map]); | ||
| 408 | if (waitqueue_active (&pool->waitq)) | ||
| 409 | wake_up (&pool->waitq); | ||
| 410 | /* | ||
| 411 | * Resist a temptation to do | ||
| 412 | * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page); | ||
| 413 | * Better have a few empty pages hang around. | ||
| 414 | */ | ||
| 415 | spin_unlock_irqrestore (&pool->lock, flags); | ||
| 416 | } | ||
| 417 | |||
| 418 | /* | ||
| 419 | * Managed DMA pool | ||
| 420 | */ | ||
| 421 | static void dmam_pool_release(struct device *dev, void *res) | ||
| 422 | { | ||
| 423 | struct dma_pool *pool = *(struct dma_pool **)res; | ||
| 424 | |||
| 425 | dma_pool_destroy(pool); | ||
| 426 | } | ||
| 427 | |||
| 428 | static int dmam_pool_match(struct device *dev, void *res, void *match_data) | ||
| 429 | { | ||
| 430 | return *(struct dma_pool **)res == match_data; | ||
| 431 | } | ||
| 432 | |||
| 433 | /** | ||
| 434 | * dmam_pool_create - Managed dma_pool_create() | ||
| 435 | * @name: name of pool, for diagnostics | ||
| 436 | * @dev: device that will be doing the DMA | ||
| 437 | * @size: size of the blocks in this pool. | ||
| 438 | * @align: alignment requirement for blocks; must be a power of two | ||
| 439 | * @allocation: returned blocks won't cross this boundary (or zero) | ||
| 440 | * | ||
| 441 | * Managed dma_pool_create(). DMA pool created with this function is | ||
| 442 | * automatically destroyed on driver detach. | ||
| 443 | */ | ||
| 444 | struct dma_pool *dmam_pool_create(const char *name, struct device *dev, | ||
| 445 | size_t size, size_t align, size_t allocation) | ||
| 446 | { | ||
| 447 | struct dma_pool **ptr, *pool; | ||
| 448 | |||
| 449 | ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); | ||
| 450 | if (!ptr) | ||
| 451 | return NULL; | ||
| 452 | |||
| 453 | pool = *ptr = dma_pool_create(name, dev, size, align, allocation); | ||
| 454 | if (pool) | ||
| 455 | devres_add(dev, ptr); | ||
| 456 | else | ||
| 457 | devres_free(ptr); | ||
| 458 | |||
| 459 | return pool; | ||
| 460 | } | ||
| 461 | |||
| 462 | /** | ||
| 463 | * dmam_pool_destroy - Managed dma_pool_destroy() | ||
| 464 | * @pool: dma pool that will be destroyed | ||
| 465 | * | ||
| 466 | * Managed dma_pool_destroy(). | ||
| 467 | */ | ||
| 468 | void dmam_pool_destroy(struct dma_pool *pool) | ||
| 469 | { | ||
| 470 | struct device *dev = pool->dev; | ||
| 471 | |||
| 472 | dma_pool_destroy(pool); | ||
| 473 | WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); | ||
| 474 | } | ||
| 475 | |||
| 476 | EXPORT_SYMBOL (dma_pool_create); | ||
| 477 | EXPORT_SYMBOL (dma_pool_destroy); | ||
| 478 | EXPORT_SYMBOL (dma_pool_alloc); | ||
| 479 | EXPORT_SYMBOL (dma_pool_free); | ||
| 480 | EXPORT_SYMBOL (dmam_pool_create); | ||
| 481 | EXPORT_SYMBOL (dmam_pool_destroy); | ||
diff --git a/mm/Makefile b/mm/Makefile index 44e2528af70c..4af5dff37277 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
| @@ -16,6 +16,7 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ | |||
| 16 | obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o | 16 | obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o |
| 17 | obj-$(CONFIG_BOUNCE) += bounce.o | 17 | obj-$(CONFIG_BOUNCE) += bounce.o |
| 18 | obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o | 18 | obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o |
| 19 | obj-$(CONFIG_HAS_DMA) += dmapool.o | ||
| 19 | obj-$(CONFIG_HUGETLBFS) += hugetlb.o | 20 | obj-$(CONFIG_HUGETLBFS) += hugetlb.o |
| 20 | obj-$(CONFIG_NUMA) += mempolicy.o | 21 | obj-$(CONFIG_NUMA) += mempolicy.o |
| 21 | obj-$(CONFIG_SPARSEMEM) += sparse.o | 22 | obj-$(CONFIG_SPARSEMEM) += sparse.o |
diff --git a/mm/dmapool.c b/mm/dmapool.c new file mode 100644 index 000000000000..34aaac451a96 --- /dev/null +++ b/mm/dmapool.c | |||
| @@ -0,0 +1,500 @@ | |||
| 1 | /* | ||
| 2 | * DMA Pool allocator | ||
| 3 | * | ||
| 4 | * Copyright 2001 David Brownell | ||
| 5 | * Copyright 2007 Intel Corporation | ||
| 6 | * Author: Matthew Wilcox <willy@linux.intel.com> | ||
| 7 | * | ||
| 8 | * This software may be redistributed and/or modified under the terms of | ||
| 9 | * the GNU General Public License ("GPL") version 2 as published by the | ||
| 10 | * Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This allocator returns small blocks of a given size which are DMA-able by | ||
| 13 | * the given device. It uses the dma_alloc_coherent page allocator to get | ||
| 14 | * new pages, then splits them up into blocks of the required size. | ||
| 15 | * Many older drivers still have their own code to do this. | ||
| 16 | * | ||
| 17 | * The current design of this allocator is fairly simple. The pool is | ||
| 18 | * represented by the 'struct dma_pool' which keeps a doubly-linked list of | ||
| 19 | * allocated pages. Each page in the page_list is split into blocks of at | ||
| 20 | * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked | ||
| 21 | * list of free blocks within the page. Used blocks aren't tracked, but we | ||
| 22 | * keep a count of how many are currently allocated from each page. | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include <linux/device.h> | ||
| 26 | #include <linux/dma-mapping.h> | ||
| 27 | #include <linux/dmapool.h> | ||
| 28 | #include <linux/kernel.h> | ||
| 29 | #include <linux/list.h> | ||
| 30 | #include <linux/module.h> | ||
| 31 | #include <linux/mutex.h> | ||
| 32 | #include <linux/poison.h> | ||
| 33 | #include <linux/sched.h> | ||
| 34 | #include <linux/slab.h> | ||
| 35 | #include <linux/spinlock.h> | ||
| 36 | #include <linux/string.h> | ||
| 37 | #include <linux/types.h> | ||
| 38 | #include <linux/wait.h> | ||
| 39 | |||
| 40 | struct dma_pool { /* the pool */ | ||
| 41 | struct list_head page_list; | ||
| 42 | spinlock_t lock; | ||
| 43 | size_t size; | ||
| 44 | struct device *dev; | ||
| 45 | size_t allocation; | ||
| 46 | size_t boundary; | ||
| 47 | char name[32]; | ||
| 48 | wait_queue_head_t waitq; | ||
| 49 | struct list_head pools; | ||
| 50 | }; | ||
| 51 | |||
| 52 | struct dma_page { /* cacheable header for 'allocation' bytes */ | ||
| 53 | struct list_head page_list; | ||
| 54 | void *vaddr; | ||
| 55 | dma_addr_t dma; | ||
| 56 | unsigned int in_use; | ||
| 57 | unsigned int offset; | ||
| 58 | }; | ||
| 59 | |||
| 60 | #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) | ||
| 61 | |||
| 62 | static DEFINE_MUTEX(pools_lock); | ||
| 63 | |||
| 64 | static ssize_t | ||
| 65 | show_pools(struct device *dev, struct device_attribute *attr, char *buf) | ||
| 66 | { | ||
| 67 | unsigned temp; | ||
| 68 | unsigned size; | ||
| 69 | char *next; | ||
| 70 | struct dma_page *page; | ||
| 71 | struct dma_pool *pool; | ||
| 72 | |||
| 73 | next = buf; | ||
| 74 | size = PAGE_SIZE; | ||
| 75 | |||
| 76 | temp = scnprintf(next, size, "poolinfo - 0.1\n"); | ||
| 77 | size -= temp; | ||
| 78 | next += temp; | ||
| 79 | |||
| 80 | mutex_lock(&pools_lock); | ||
| 81 | list_for_each_entry(pool, &dev->dma_pools, pools) { | ||
| 82 | unsigned pages = 0; | ||
| 83 | unsigned blocks = 0; | ||
| 84 | |||
| 85 | list_for_each_entry(page, &pool->page_list, page_list) { | ||
| 86 | pages++; | ||
| 87 | blocks += page->in_use; | ||
| 88 | } | ||
| 89 | |||
| 90 | /* per-pool info, no real statistics yet */ | ||
| 91 | temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", | ||
| 92 | pool->name, blocks, | ||
| 93 | pages * (pool->allocation / pool->size), | ||
| 94 | pool->size, pages); | ||
| 95 | size -= temp; | ||
| 96 | next += temp; | ||
| 97 | } | ||
| 98 | mutex_unlock(&pools_lock); | ||
| 99 | |||
| 100 | return PAGE_SIZE - size; | ||
| 101 | } | ||
| 102 | |||
| 103 | static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); | ||
| 104 | |||
| 105 | /** | ||
| 106 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. | ||
| 107 | * @name: name of pool, for diagnostics | ||
| 108 | * @dev: device that will be doing the DMA | ||
| 109 | * @size: size of the blocks in this pool. | ||
| 110 | * @align: alignment requirement for blocks; must be a power of two | ||
| 111 | * @boundary: returned blocks won't cross this power of two boundary | ||
| 112 | * Context: !in_interrupt() | ||
| 113 | * | ||
| 114 | * Returns a dma allocation pool with the requested characteristics, or | ||
| 115 | * null if one can't be created. Given one of these pools, dma_pool_alloc() | ||
| 116 | * may be used to allocate memory. Such memory will all have "consistent" | ||
| 117 | * DMA mappings, accessible by the device and its driver without using | ||
| 118 | * cache flushing primitives. The actual size of blocks allocated may be | ||
| 119 | * larger than requested because of alignment. | ||
| 120 | * | ||
| 121 | * If @boundary is nonzero, objects returned from dma_pool_alloc() won't | ||
| 122 | * cross that size boundary. This is useful for devices which have | ||
| 123 | * addressing restrictions on individual DMA transfers, such as not crossing | ||
| 124 | * boundaries of 4KBytes. | ||
| 125 | */ | ||
| 126 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, | ||
| 127 | size_t size, size_t align, size_t boundary) | ||
| 128 | { | ||
| 129 | struct dma_pool *retval; | ||
| 130 | size_t allocation; | ||
| 131 | |||
| 132 | if (align == 0) { | ||
| 133 | align = 1; | ||
| 134 | } else if (align & (align - 1)) { | ||
| 135 | return NULL; | ||
| 136 | } | ||
| 137 | |||
| 138 | if (size == 0) { | ||
| 139 | return NULL; | ||
| 140 | } else if (size < 4) { | ||
| 141 | size = 4; | ||
| 142 | } | ||
| 143 | |||
| 144 | if ((size % align) != 0) | ||
| 145 | size = ALIGN(size, align); | ||
| 146 | |||
| 147 | allocation = max_t(size_t, size, PAGE_SIZE); | ||
| 148 | |||
| 149 | if (!boundary) { | ||
| 150 | boundary = allocation; | ||
| 151 | } else if ((boundary < size) || (boundary & (boundary - 1))) { | ||
| 152 | return NULL; | ||
| 153 | } | ||
| 154 | |||
| 155 | retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev)); | ||
| 156 | if (!retval) | ||
| 157 | return retval; | ||
| 158 | |||
| 159 | strlcpy(retval->name, name, sizeof(retval->name)); | ||
| 160 | |||
| 161 | retval->dev = dev; | ||
| 162 | |||
| 163 | INIT_LIST_HEAD(&retval->page_list); | ||
| 164 | spin_lock_init(&retval->lock); | ||
| 165 | retval->size = size; | ||
| 166 | retval->boundary = boundary; | ||
| 167 | retval->allocation = allocation; | ||
| 168 | init_waitqueue_head(&retval->waitq); | ||
| 169 | |||
| 170 | if (dev) { | ||
| 171 | int ret; | ||
| 172 | |||
| 173 | mutex_lock(&pools_lock); | ||
| 174 | if (list_empty(&dev->dma_pools)) | ||
| 175 | ret = device_create_file(dev, &dev_attr_pools); | ||
| 176 | else | ||
| 177 | ret = 0; | ||
| 178 | /* note: not currently insisting "name" be unique */ | ||
| 179 | if (!ret) | ||
| 180 | list_add(&retval->pools, &dev->dma_pools); | ||
| 181 | else { | ||
| 182 | kfree(retval); | ||
| 183 | retval = NULL; | ||
| 184 | } | ||
| 185 | mutex_unlock(&pools_lock); | ||
| 186 | } else | ||
| 187 | INIT_LIST_HEAD(&retval->pools); | ||
| 188 | |||
| 189 | return retval; | ||
| 190 | } | ||
| 191 | EXPORT_SYMBOL(dma_pool_create); | ||
| 192 | |||
| 193 | static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) | ||
| 194 | { | ||
| 195 | unsigned int offset = 0; | ||
| 196 | unsigned int next_boundary = pool->boundary; | ||
| 197 | |||
| 198 | do { | ||
| 199 | unsigned int next = offset + pool->size; | ||
| 200 | if (unlikely((next + pool->size) >= next_boundary)) { | ||
| 201 | next = next_boundary; | ||
| 202 | next_boundary += pool->boundary; | ||
| 203 | } | ||
| 204 | *(int *)(page->vaddr + offset) = next; | ||
| 205 | offset = next; | ||
| 206 | } while (offset < pool->allocation); | ||
| 207 | } | ||
| 208 | |||
| 209 | static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) | ||
| 210 | { | ||
| 211 | struct dma_page *page; | ||
| 212 | |||
| 213 | page = kmalloc(sizeof(*page), mem_flags); | ||
| 214 | if (!page) | ||
| 215 | return NULL; | ||
| 216 | page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, | ||
| 217 | &page->dma, mem_flags); | ||
| 218 | if (page->vaddr) { | ||
| 219 | #ifdef CONFIG_DEBUG_SLAB | ||
| 220 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); | ||
| 221 | #endif | ||
| 222 | pool_initialise_page(pool, page); | ||
| 223 | list_add(&page->page_list, &pool->page_list); | ||
| 224 | page->in_use = 0; | ||
| 225 | page->offset = 0; | ||
| 226 | } else { | ||
| 227 | kfree(page); | ||
| 228 | page = NULL; | ||
| 229 | } | ||
| 230 | return page; | ||
| 231 | } | ||
| 232 | |||
| 233 | static inline int is_page_busy(struct dma_page *page) | ||
| 234 | { | ||
| 235 | return page->in_use != 0; | ||
| 236 | } | ||
| 237 | |||
| 238 | static void pool_free_page(struct dma_pool *pool, struct dma_page *page) | ||
| 239 | { | ||
| 240 | dma_addr_t dma = page->dma; | ||
| 241 | |||
| 242 | #ifdef CONFIG_DEBUG_SLAB | ||
| 243 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); | ||
| 244 | #endif | ||
| 245 | dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); | ||
| 246 | list_del(&page->page_list); | ||
| 247 | kfree(page); | ||
| 248 | } | ||
| 249 | |||
| 250 | /** | ||
| 251 | * dma_pool_destroy - destroys a pool of dma memory blocks. | ||
| 252 | * @pool: dma pool that will be destroyed | ||
| 253 | * Context: !in_interrupt() | ||
| 254 | * | ||
| 255 | * Caller guarantees that no more memory from the pool is in use, | ||
| 256 | * and that nothing will try to use the pool after this call. | ||
| 257 | */ | ||
| 258 | void dma_pool_destroy(struct dma_pool *pool) | ||
| 259 | { | ||
| 260 | mutex_lock(&pools_lock); | ||
| 261 | list_del(&pool->pools); | ||
| 262 | if (pool->dev && list_empty(&pool->dev->dma_pools)) | ||
| 263 | device_remove_file(pool->dev, &dev_attr_pools); | ||
| 264 | mutex_unlock(&pools_lock); | ||
| 265 | |||
| 266 | while (!list_empty(&pool->page_list)) { | ||
| 267 | struct dma_page *page; | ||
| 268 | page = list_entry(pool->page_list.next, | ||
| 269 | struct dma_page, page_list); | ||
| 270 | if (is_page_busy(page)) { | ||
| 271 | if (pool->dev) | ||
| 272 | dev_err(pool->dev, | ||
| 273 | "dma_pool_destroy %s, %p busy\n", | ||
| 274 | pool->name, page->vaddr); | ||
| 275 | else | ||
| 276 | printk(KERN_ERR | ||
| 277 | "dma_pool_destroy %s, %p busy\n", | ||
| 278 | pool->name, page->vaddr); | ||
| 279 | /* leak the still-in-use consistent memory */ | ||
| 280 | list_del(&page->page_list); | ||
| 281 | kfree(page); | ||
| 282 | } else | ||
| 283 | pool_free_page(pool, page); | ||
| 284 | } | ||
| 285 | |||
| 286 | kfree(pool); | ||
| 287 | } | ||
| 288 | EXPORT_SYMBOL(dma_pool_destroy); | ||
| 289 | |||
| 290 | /** | ||
| 291 | * dma_pool_alloc - get a block of consistent memory | ||
| 292 | * @pool: dma pool that will produce the block | ||
| 293 | * @mem_flags: GFP_* bitmask | ||
| 294 | * @handle: pointer to dma address of block | ||
| 295 | * | ||
| 296 | * This returns the kernel virtual address of a currently unused block, | ||
| 297 | * and reports its dma address through the handle. | ||
| 298 | * If such a memory block can't be allocated, %NULL is returned. | ||
| 299 | */ | ||
| 300 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, | ||
| 301 | dma_addr_t *handle) | ||
| 302 | { | ||
| 303 | unsigned long flags; | ||
| 304 | struct dma_page *page; | ||
| 305 | size_t offset; | ||
| 306 | void *retval; | ||
| 307 | |||
| 308 | spin_lock_irqsave(&pool->lock, flags); | ||
| 309 | restart: | ||
| 310 | list_for_each_entry(page, &pool->page_list, page_list) { | ||
| 311 | if (page->offset < pool->allocation) | ||
| 312 | goto ready; | ||
| 313 | } | ||
| 314 | page = pool_alloc_page(pool, GFP_ATOMIC); | ||
| 315 | if (!page) { | ||
| 316 | if (mem_flags & __GFP_WAIT) { | ||
| 317 | DECLARE_WAITQUEUE(wait, current); | ||
| 318 | |||
| 319 | __set_current_state(TASK_INTERRUPTIBLE); | ||
| 320 | __add_wait_queue(&pool->waitq, &wait); | ||
| 321 | spin_unlock_irqrestore(&pool->lock, flags); | ||
| 322 | |||
| 323 | schedule_timeout(POOL_TIMEOUT_JIFFIES); | ||
| 324 | |||
| 325 | spin_lock_irqsave(&pool->lock, flags); | ||
| 326 | __remove_wait_queue(&pool->waitq, &wait); | ||
| 327 | goto restart; | ||
| 328 | } | ||
| 329 | retval = NULL; | ||
| 330 | goto done; | ||
| 331 | } | ||
| 332 | |||
| 333 | ready: | ||
| 334 | page->in_use++; | ||
| 335 | offset = page->offset; | ||
| 336 | page->offset = *(int *)(page->vaddr + offset); | ||
| 337 | retval = offset + page->vaddr; | ||
| 338 | *handle = offset + page->dma; | ||
| 339 | #ifdef CONFIG_DEBUG_SLAB | ||
| 340 | memset(retval, POOL_POISON_ALLOCATED, pool->size); | ||
| 341 | #endif | ||
| 342 | done: | ||
| 343 | spin_unlock_irqrestore(&pool->lock, flags); | ||
| 344 | return retval; | ||
| 345 | } | ||
| 346 | EXPORT_SYMBOL(dma_pool_alloc); | ||
| 347 | |||
| 348 | static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) | ||
| 349 | { | ||
| 350 | unsigned long flags; | ||
| 351 | struct dma_page *page; | ||
| 352 | |||
| 353 | spin_lock_irqsave(&pool->lock, flags); | ||
| 354 | list_for_each_entry(page, &pool->page_list, page_list) { | ||
| 355 | if (dma < page->dma) | ||
| 356 | continue; | ||
| 357 | if (dma < (page->dma + pool->allocation)) | ||
| 358 | goto done; | ||
| 359 | } | ||
| 360 | page = NULL; | ||
| 361 | done: | ||
| 362 | spin_unlock_irqrestore(&pool->lock, flags); | ||
| 363 | return page; | ||
| 364 | } | ||
| 365 | |||
| 366 | /** | ||
| 367 | * dma_pool_free - put block back into dma pool | ||
| 368 | * @pool: the dma pool holding the block | ||
| 369 | * @vaddr: virtual address of block | ||
| 370 | * @dma: dma address of block | ||
| 371 | * | ||
| 372 | * Caller promises neither device nor driver will again touch this block | ||
| 373 | * unless it is first re-allocated. | ||
| 374 | */ | ||
| 375 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) | ||
| 376 | { | ||
| 377 | struct dma_page *page; | ||
| 378 | unsigned long flags; | ||
| 379 | unsigned int offset; | ||
| 380 | |||
| 381 | page = pool_find_page(pool, dma); | ||
| 382 | if (!page) { | ||
| 383 | if (pool->dev) | ||
| 384 | dev_err(pool->dev, | ||
| 385 | "dma_pool_free %s, %p/%lx (bad dma)\n", | ||
| 386 | pool->name, vaddr, (unsigned long)dma); | ||
| 387 | else | ||
| 388 | printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", | ||
| 389 | pool->name, vaddr, (unsigned long)dma); | ||
| 390 | return; | ||
| 391 | } | ||
| 392 | |||
| 393 | offset = vaddr - page->vaddr; | ||
| 394 | #ifdef CONFIG_DEBUG_SLAB | ||
| 395 | if ((dma - page->dma) != offset) { | ||
| 396 | if (pool->dev) | ||
| 397 | dev_err(pool->dev, | ||
| 398 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | ||
| 399 | pool->name, vaddr, (unsigned long long)dma); | ||
| 400 | else | ||
| 401 | printk(KERN_ERR | ||
| 402 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | ||
| 403 | pool->name, vaddr, (unsigned long long)dma); | ||
| 404 | return; | ||
| 405 | } | ||
| 406 | { | ||
| 407 | unsigned int chain = page->offset; | ||
| 408 | while (chain < pool->allocation) { | ||
| 409 | if (chain != offset) { | ||
| 410 | chain = *(int *)(page->vaddr + chain); | ||
| 411 | continue; | ||
| 412 | } | ||
| 413 | if (pool->dev) | ||
| 414 | dev_err(pool->dev, "dma_pool_free %s, dma %Lx " | ||
| 415 | "already free\n", pool->name, | ||
| 416 | (unsigned long long)dma); | ||
| 417 | else | ||
| 418 | printk(KERN_ERR "dma_pool_free %s, dma %Lx " | ||
| 419 | "already free\n", pool->name, | ||
| 420 | (unsigned long long)dma); | ||
| 421 | return; | ||
| 422 | } | ||
| 423 | } | ||
| 424 | memset(vaddr, POOL_POISON_FREED, pool->size); | ||
| 425 | #endif | ||
| 426 | |||
| 427 | spin_lock_irqsave(&pool->lock, flags); | ||
| 428 | page->in_use--; | ||
| 429 | *(int *)vaddr = page->offset; | ||
| 430 | page->offset = offset; | ||
| 431 | if (waitqueue_active(&pool->waitq)) | ||
| 432 | wake_up_locked(&pool->waitq); | ||
| 433 | /* | ||
| 434 | * Resist a temptation to do | ||
| 435 | * if (!is_page_busy(page)) pool_free_page(pool, page); | ||
| 436 | * Better have a few empty pages hang around. | ||
| 437 | */ | ||
| 438 | spin_unlock_irqrestore(&pool->lock, flags); | ||
| 439 | } | ||
| 440 | EXPORT_SYMBOL(dma_pool_free); | ||
| 441 | |||
| 442 | /* | ||
| 443 | * Managed DMA pool | ||
| 444 | */ | ||
| 445 | static void dmam_pool_release(struct device *dev, void *res) | ||
| 446 | { | ||
| 447 | struct dma_pool *pool = *(struct dma_pool **)res; | ||
| 448 | |||
| 449 | dma_pool_destroy(pool); | ||
| 450 | } | ||
| 451 | |||
| 452 | static int dmam_pool_match(struct device *dev, void *res, void *match_data) | ||
| 453 | { | ||
| 454 | return *(struct dma_pool **)res == match_data; | ||
| 455 | } | ||
| 456 | |||
| 457 | /** | ||
| 458 | * dmam_pool_create - Managed dma_pool_create() | ||
| 459 | * @name: name of pool, for diagnostics | ||
| 460 | * @dev: device that will be doing the DMA | ||
| 461 | * @size: size of the blocks in this pool. | ||
| 462 | * @align: alignment requirement for blocks; must be a power of two | ||
| 463 | * @allocation: returned blocks won't cross this boundary (or zero) | ||
| 464 | * | ||
| 465 | * Managed dma_pool_create(). DMA pool created with this function is | ||
| 466 | * automatically destroyed on driver detach. | ||
| 467 | */ | ||
| 468 | struct dma_pool *dmam_pool_create(const char *name, struct device *dev, | ||
| 469 | size_t size, size_t align, size_t allocation) | ||
| 470 | { | ||
| 471 | struct dma_pool **ptr, *pool; | ||
| 472 | |||
| 473 | ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); | ||
| 474 | if (!ptr) | ||
| 475 | return NULL; | ||
| 476 | |||
| 477 | pool = *ptr = dma_pool_create(name, dev, size, align, allocation); | ||
| 478 | if (pool) | ||
| 479 | devres_add(dev, ptr); | ||
| 480 | else | ||
| 481 | devres_free(ptr); | ||
| 482 | |||
| 483 | return pool; | ||
| 484 | } | ||
| 485 | EXPORT_SYMBOL(dmam_pool_create); | ||
| 486 | |||
| 487 | /** | ||
| 488 | * dmam_pool_destroy - Managed dma_pool_destroy() | ||
| 489 | * @pool: dma pool that will be destroyed | ||
| 490 | * | ||
| 491 | * Managed dma_pool_destroy(). | ||
| 492 | */ | ||
| 493 | void dmam_pool_destroy(struct dma_pool *pool) | ||
| 494 | { | ||
| 495 | struct device *dev = pool->dev; | ||
| 496 | |||
| 497 | dma_pool_destroy(pool); | ||
| 498 | WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); | ||
| 499 | } | ||
| 500 | EXPORT_SYMBOL(dmam_pool_destroy); | ||
