diff options
author | Matthew Wilcox <matthew@wil.cx> | 2007-12-03 12:04:31 -0500 |
---|---|---|
committer | Matthew Wilcox <matthew@wil.cx> | 2007-12-04 10:39:55 -0500 |
commit | e87aa773747fb5e4217d716ea22a573c03b6693a (patch) | |
tree | 4f2dc318c7aa69f49e250e9dc12e58dde7c18d6f /mm | |
parent | 141e9d4b5492499c4735d764b599c21e83dac154 (diff) |
dmapool: Fix style problems
Run Lindent and fix all issues reported by checkpatch.pl
Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/dmapool.c | 288 |
1 files changed, 142 insertions, 146 deletions
diff --git a/mm/dmapool.c b/mm/dmapool.c index b5034dc72a05..92e886d37e90 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c | |||
@@ -15,32 +15,32 @@ | |||
15 | * This should probably be sharing the guts of the slab allocator. | 15 | * This should probably be sharing the guts of the slab allocator. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | struct dma_pool { /* the pool */ | 18 | struct dma_pool { /* the pool */ |
19 | struct list_head page_list; | 19 | struct list_head page_list; |
20 | spinlock_t lock; | 20 | spinlock_t lock; |
21 | size_t blocks_per_page; | 21 | size_t blocks_per_page; |
22 | size_t size; | 22 | size_t size; |
23 | struct device *dev; | 23 | struct device *dev; |
24 | size_t allocation; | 24 | size_t allocation; |
25 | char name [32]; | 25 | char name[32]; |
26 | wait_queue_head_t waitq; | 26 | wait_queue_head_t waitq; |
27 | struct list_head pools; | 27 | struct list_head pools; |
28 | }; | 28 | }; |
29 | 29 | ||
30 | struct dma_page { /* cacheable header for 'allocation' bytes */ | 30 | struct dma_page { /* cacheable header for 'allocation' bytes */ |
31 | struct list_head page_list; | 31 | struct list_head page_list; |
32 | void *vaddr; | 32 | void *vaddr; |
33 | dma_addr_t dma; | 33 | dma_addr_t dma; |
34 | unsigned in_use; | 34 | unsigned in_use; |
35 | unsigned long bitmap [0]; | 35 | unsigned long bitmap[0]; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) | 38 | #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) |
39 | 39 | ||
40 | static DEFINE_MUTEX (pools_lock); | 40 | static DEFINE_MUTEX(pools_lock); |
41 | 41 | ||
42 | static ssize_t | 42 | static ssize_t |
43 | show_pools (struct device *dev, struct device_attribute *attr, char *buf) | 43 | show_pools(struct device *dev, struct device_attribute *attr, char *buf) |
44 | { | 44 | { |
45 | unsigned temp; | 45 | unsigned temp; |
46 | unsigned size; | 46 | unsigned size; |
@@ -67,9 +67,9 @@ show_pools (struct device *dev, struct device_attribute *attr, char *buf) | |||
67 | 67 | ||
68 | /* per-pool info, no real statistics yet */ | 68 | /* per-pool info, no real statistics yet */ |
69 | temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", | 69 | temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", |
70 | pool->name, | 70 | pool->name, |
71 | blocks, pages * pool->blocks_per_page, | 71 | blocks, pages * pool->blocks_per_page, |
72 | pool->size, pages); | 72 | pool->size, pages); |
73 | size -= temp; | 73 | size -= temp; |
74 | next += temp; | 74 | next += temp; |
75 | } | 75 | } |
@@ -77,7 +77,8 @@ show_pools (struct device *dev, struct device_attribute *attr, char *buf) | |||
77 | 77 | ||
78 | return PAGE_SIZE - size; | 78 | return PAGE_SIZE - size; |
79 | } | 79 | } |
80 | static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL); | 80 | |
81 | static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); | ||
81 | 82 | ||
82 | /** | 83 | /** |
83 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. | 84 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. |
@@ -100,11 +101,10 @@ static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL); | |||
100 | * addressing restrictions on individual DMA transfers, such as not crossing | 101 | * addressing restrictions on individual DMA transfers, such as not crossing |
101 | * boundaries of 4KBytes. | 102 | * boundaries of 4KBytes. |
102 | */ | 103 | */ |
103 | struct dma_pool * | 104 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, |
104 | dma_pool_create (const char *name, struct device *dev, | 105 | size_t size, size_t align, size_t allocation) |
105 | size_t size, size_t align, size_t allocation) | ||
106 | { | 106 | { |
107 | struct dma_pool *retval; | 107 | struct dma_pool *retval; |
108 | 108 | ||
109 | if (align == 0) | 109 | if (align == 0) |
110 | align = 1; | 110 | align = 1; |
@@ -122,81 +122,79 @@ dma_pool_create (const char *name, struct device *dev, | |||
122 | allocation = size; | 122 | allocation = size; |
123 | else | 123 | else |
124 | allocation = PAGE_SIZE; | 124 | allocation = PAGE_SIZE; |
125 | // FIXME: round up for less fragmentation | 125 | /* FIXME: round up for less fragmentation */ |
126 | } else if (allocation < size) | 126 | } else if (allocation < size) |
127 | return NULL; | 127 | return NULL; |
128 | 128 | ||
129 | if (!(retval = kmalloc_node (sizeof *retval, GFP_KERNEL, dev_to_node(dev)))) | 129 | if (! |
130 | (retval = | ||
131 | kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev)))) | ||
130 | return retval; | 132 | return retval; |
131 | 133 | ||
132 | strlcpy (retval->name, name, sizeof retval->name); | 134 | strlcpy(retval->name, name, sizeof retval->name); |
133 | 135 | ||
134 | retval->dev = dev; | 136 | retval->dev = dev; |
135 | 137 | ||
136 | INIT_LIST_HEAD (&retval->page_list); | 138 | INIT_LIST_HEAD(&retval->page_list); |
137 | spin_lock_init (&retval->lock); | 139 | spin_lock_init(&retval->lock); |
138 | retval->size = size; | 140 | retval->size = size; |
139 | retval->allocation = allocation; | 141 | retval->allocation = allocation; |
140 | retval->blocks_per_page = allocation / size; | 142 | retval->blocks_per_page = allocation / size; |
141 | init_waitqueue_head (&retval->waitq); | 143 | init_waitqueue_head(&retval->waitq); |
142 | 144 | ||
143 | if (dev) { | 145 | if (dev) { |
144 | int ret; | 146 | int ret; |
145 | 147 | ||
146 | mutex_lock(&pools_lock); | 148 | mutex_lock(&pools_lock); |
147 | if (list_empty (&dev->dma_pools)) | 149 | if (list_empty(&dev->dma_pools)) |
148 | ret = device_create_file (dev, &dev_attr_pools); | 150 | ret = device_create_file(dev, &dev_attr_pools); |
149 | else | 151 | else |
150 | ret = 0; | 152 | ret = 0; |
151 | /* note: not currently insisting "name" be unique */ | 153 | /* note: not currently insisting "name" be unique */ |
152 | if (!ret) | 154 | if (!ret) |
153 | list_add (&retval->pools, &dev->dma_pools); | 155 | list_add(&retval->pools, &dev->dma_pools); |
154 | else { | 156 | else { |
155 | kfree(retval); | 157 | kfree(retval); |
156 | retval = NULL; | 158 | retval = NULL; |
157 | } | 159 | } |
158 | mutex_unlock(&pools_lock); | 160 | mutex_unlock(&pools_lock); |
159 | } else | 161 | } else |
160 | INIT_LIST_HEAD (&retval->pools); | 162 | INIT_LIST_HEAD(&retval->pools); |
161 | 163 | ||
162 | return retval; | 164 | return retval; |
163 | } | 165 | } |
166 | EXPORT_SYMBOL(dma_pool_create); | ||
164 | 167 | ||
165 | 168 | static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) | |
166 | static struct dma_page * | ||
167 | pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags) | ||
168 | { | 169 | { |
169 | struct dma_page *page; | 170 | struct dma_page *page; |
170 | int mapsize; | 171 | int mapsize; |
171 | 172 | ||
172 | mapsize = pool->blocks_per_page; | 173 | mapsize = pool->blocks_per_page; |
173 | mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; | 174 | mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; |
174 | mapsize *= sizeof (long); | 175 | mapsize *= sizeof(long); |
175 | 176 | ||
176 | page = kmalloc(mapsize + sizeof *page, mem_flags); | 177 | page = kmalloc(mapsize + sizeof *page, mem_flags); |
177 | if (!page) | 178 | if (!page) |
178 | return NULL; | 179 | return NULL; |
179 | page->vaddr = dma_alloc_coherent (pool->dev, | 180 | page->vaddr = dma_alloc_coherent(pool->dev, |
180 | pool->allocation, | 181 | pool->allocation, |
181 | &page->dma, | 182 | &page->dma, mem_flags); |
182 | mem_flags); | ||
183 | if (page->vaddr) { | 183 | if (page->vaddr) { |
184 | memset (page->bitmap, 0xff, mapsize); // bit set == free | 184 | memset(page->bitmap, 0xff, mapsize); /* bit set == free */ |
185 | #ifdef CONFIG_DEBUG_SLAB | 185 | #ifdef CONFIG_DEBUG_SLAB |
186 | memset (page->vaddr, POOL_POISON_FREED, pool->allocation); | 186 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
187 | #endif | 187 | #endif |
188 | list_add (&page->page_list, &pool->page_list); | 188 | list_add(&page->page_list, &pool->page_list); |
189 | page->in_use = 0; | 189 | page->in_use = 0; |
190 | } else { | 190 | } else { |
191 | kfree (page); | 191 | kfree(page); |
192 | page = NULL; | 192 | page = NULL; |
193 | } | 193 | } |
194 | return page; | 194 | return page; |
195 | } | 195 | } |
196 | 196 | ||
197 | 197 | static inline int is_page_busy(int blocks, unsigned long *bitmap) | |
198 | static inline int | ||
199 | is_page_busy (int blocks, unsigned long *bitmap) | ||
200 | { | 198 | { |
201 | while (blocks > 0) { | 199 | while (blocks > 0) { |
202 | if (*bitmap++ != ~0UL) | 200 | if (*bitmap++ != ~0UL) |
@@ -206,20 +204,18 @@ is_page_busy (int blocks, unsigned long *bitmap) | |||
206 | return 0; | 204 | return 0; |
207 | } | 205 | } |
208 | 206 | ||
209 | static void | 207 | static void pool_free_page(struct dma_pool *pool, struct dma_page *page) |
210 | pool_free_page (struct dma_pool *pool, struct dma_page *page) | ||
211 | { | 208 | { |
212 | dma_addr_t dma = page->dma; | 209 | dma_addr_t dma = page->dma; |
213 | 210 | ||
214 | #ifdef CONFIG_DEBUG_SLAB | 211 | #ifdef CONFIG_DEBUG_SLAB |
215 | memset (page->vaddr, POOL_POISON_FREED, pool->allocation); | 212 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
216 | #endif | 213 | #endif |
217 | dma_free_coherent (pool->dev, pool->allocation, page->vaddr, dma); | 214 | dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); |
218 | list_del (&page->page_list); | 215 | list_del(&page->page_list); |
219 | kfree (page); | 216 | kfree(page); |
220 | } | 217 | } |
221 | 218 | ||
222 | |||
223 | /** | 219 | /** |
224 | * dma_pool_destroy - destroys a pool of dma memory blocks. | 220 | * dma_pool_destroy - destroys a pool of dma memory blocks. |
225 | * @pool: dma pool that will be destroyed | 221 | * @pool: dma pool that will be destroyed |
@@ -228,36 +224,37 @@ pool_free_page (struct dma_pool *pool, struct dma_page *page) | |||
228 | * Caller guarantees that no more memory from the pool is in use, | 224 | * Caller guarantees that no more memory from the pool is in use, |
229 | * and that nothing will try to use the pool after this call. | 225 | * and that nothing will try to use the pool after this call. |
230 | */ | 226 | */ |
231 | void | 227 | void dma_pool_destroy(struct dma_pool *pool) |
232 | dma_pool_destroy (struct dma_pool *pool) | ||
233 | { | 228 | { |
234 | mutex_lock(&pools_lock); | 229 | mutex_lock(&pools_lock); |
235 | list_del (&pool->pools); | 230 | list_del(&pool->pools); |
236 | if (pool->dev && list_empty (&pool->dev->dma_pools)) | 231 | if (pool->dev && list_empty(&pool->dev->dma_pools)) |
237 | device_remove_file (pool->dev, &dev_attr_pools); | 232 | device_remove_file(pool->dev, &dev_attr_pools); |
238 | mutex_unlock(&pools_lock); | 233 | mutex_unlock(&pools_lock); |
239 | 234 | ||
240 | while (!list_empty (&pool->page_list)) { | 235 | while (!list_empty(&pool->page_list)) { |
241 | struct dma_page *page; | 236 | struct dma_page *page; |
242 | page = list_entry (pool->page_list.next, | 237 | page = list_entry(pool->page_list.next, |
243 | struct dma_page, page_list); | 238 | struct dma_page, page_list); |
244 | if (is_page_busy (pool->blocks_per_page, page->bitmap)) { | 239 | if (is_page_busy(pool->blocks_per_page, page->bitmap)) { |
245 | if (pool->dev) | 240 | if (pool->dev) |
246 | dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n", | 241 | dev_err(pool->dev, |
242 | "dma_pool_destroy %s, %p busy\n", | ||
247 | pool->name, page->vaddr); | 243 | pool->name, page->vaddr); |
248 | else | 244 | else |
249 | printk (KERN_ERR "dma_pool_destroy %s, %p busy\n", | 245 | printk(KERN_ERR |
250 | pool->name, page->vaddr); | 246 | "dma_pool_destroy %s, %p busy\n", |
247 | pool->name, page->vaddr); | ||
251 | /* leak the still-in-use consistent memory */ | 248 | /* leak the still-in-use consistent memory */ |
252 | list_del (&page->page_list); | 249 | list_del(&page->page_list); |
253 | kfree (page); | 250 | kfree(page); |
254 | } else | 251 | } else |
255 | pool_free_page (pool, page); | 252 | pool_free_page(pool, page); |
256 | } | 253 | } |
257 | 254 | ||
258 | kfree (pool); | 255 | kfree(pool); |
259 | } | 256 | } |
260 | 257 | EXPORT_SYMBOL(dma_pool_destroy); | |
261 | 258 | ||
262 | /** | 259 | /** |
263 | * dma_pool_alloc - get a block of consistent memory | 260 | * dma_pool_alloc - get a block of consistent memory |
@@ -269,73 +266,72 @@ dma_pool_destroy (struct dma_pool *pool) | |||
269 | * and reports its dma address through the handle. | 266 | * and reports its dma address through the handle. |
270 | * If such a memory block can't be allocated, null is returned. | 267 | * If such a memory block can't be allocated, null is returned. |
271 | */ | 268 | */ |
272 | void * | 269 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
273 | dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) | 270 | dma_addr_t *handle) |
274 | { | 271 | { |
275 | unsigned long flags; | 272 | unsigned long flags; |
276 | struct dma_page *page; | 273 | struct dma_page *page; |
277 | int map, block; | 274 | int map, block; |
278 | size_t offset; | 275 | size_t offset; |
279 | void *retval; | 276 | void *retval; |
280 | 277 | ||
281 | restart: | 278 | restart: |
282 | spin_lock_irqsave (&pool->lock, flags); | 279 | spin_lock_irqsave(&pool->lock, flags); |
283 | list_for_each_entry(page, &pool->page_list, page_list) { | 280 | list_for_each_entry(page, &pool->page_list, page_list) { |
284 | int i; | 281 | int i; |
285 | /* only cachable accesses here ... */ | 282 | /* only cachable accesses here ... */ |
286 | for (map = 0, i = 0; | 283 | for (map = 0, i = 0; |
287 | i < pool->blocks_per_page; | 284 | i < pool->blocks_per_page; i += BITS_PER_LONG, map++) { |
288 | i += BITS_PER_LONG, map++) { | 285 | if (page->bitmap[map] == 0) |
289 | if (page->bitmap [map] == 0) | ||
290 | continue; | 286 | continue; |
291 | block = ffz (~ page->bitmap [map]); | 287 | block = ffz(~page->bitmap[map]); |
292 | if ((i + block) < pool->blocks_per_page) { | 288 | if ((i + block) < pool->blocks_per_page) { |
293 | clear_bit (block, &page->bitmap [map]); | 289 | clear_bit(block, &page->bitmap[map]); |
294 | offset = (BITS_PER_LONG * map) + block; | 290 | offset = (BITS_PER_LONG * map) + block; |
295 | offset *= pool->size; | 291 | offset *= pool->size; |
296 | goto ready; | 292 | goto ready; |
297 | } | 293 | } |
298 | } | 294 | } |
299 | } | 295 | } |
300 | if (!(page = pool_alloc_page (pool, GFP_ATOMIC))) { | 296 | page = pool_alloc_page(pool, GFP_ATOMIC); |
297 | if (!page) { | ||
301 | if (mem_flags & __GFP_WAIT) { | 298 | if (mem_flags & __GFP_WAIT) { |
302 | DECLARE_WAITQUEUE (wait, current); | 299 | DECLARE_WAITQUEUE(wait, current); |
303 | 300 | ||
304 | __set_current_state(TASK_INTERRUPTIBLE); | 301 | __set_current_state(TASK_INTERRUPTIBLE); |
305 | add_wait_queue (&pool->waitq, &wait); | 302 | add_wait_queue(&pool->waitq, &wait); |
306 | spin_unlock_irqrestore (&pool->lock, flags); | 303 | spin_unlock_irqrestore(&pool->lock, flags); |
307 | 304 | ||
308 | schedule_timeout (POOL_TIMEOUT_JIFFIES); | 305 | schedule_timeout(POOL_TIMEOUT_JIFFIES); |
309 | 306 | ||
310 | remove_wait_queue (&pool->waitq, &wait); | 307 | remove_wait_queue(&pool->waitq, &wait); |
311 | goto restart; | 308 | goto restart; |
312 | } | 309 | } |
313 | retval = NULL; | 310 | retval = NULL; |
314 | goto done; | 311 | goto done; |
315 | } | 312 | } |
316 | 313 | ||
317 | clear_bit (0, &page->bitmap [0]); | 314 | clear_bit(0, &page->bitmap[0]); |
318 | offset = 0; | 315 | offset = 0; |
319 | ready: | 316 | ready: |
320 | page->in_use++; | 317 | page->in_use++; |
321 | retval = offset + page->vaddr; | 318 | retval = offset + page->vaddr; |
322 | *handle = offset + page->dma; | 319 | *handle = offset + page->dma; |
323 | #ifdef CONFIG_DEBUG_SLAB | 320 | #ifdef CONFIG_DEBUG_SLAB |
324 | memset (retval, POOL_POISON_ALLOCATED, pool->size); | 321 | memset(retval, POOL_POISON_ALLOCATED, pool->size); |
325 | #endif | 322 | #endif |
326 | done: | 323 | done: |
327 | spin_unlock_irqrestore (&pool->lock, flags); | 324 | spin_unlock_irqrestore(&pool->lock, flags); |
328 | return retval; | 325 | return retval; |
329 | } | 326 | } |
327 | EXPORT_SYMBOL(dma_pool_alloc); | ||
330 | 328 | ||
331 | 329 | static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) | |
332 | static struct dma_page * | ||
333 | pool_find_page (struct dma_pool *pool, dma_addr_t dma) | ||
334 | { | 330 | { |
335 | unsigned long flags; | 331 | unsigned long flags; |
336 | struct dma_page *page; | 332 | struct dma_page *page; |
337 | 333 | ||
338 | spin_lock_irqsave (&pool->lock, flags); | 334 | spin_lock_irqsave(&pool->lock, flags); |
339 | list_for_each_entry(page, &pool->page_list, page_list) { | 335 | list_for_each_entry(page, &pool->page_list, page_list) { |
340 | if (dma < page->dma) | 336 | if (dma < page->dma) |
341 | continue; | 337 | continue; |
@@ -343,12 +339,11 @@ pool_find_page (struct dma_pool *pool, dma_addr_t dma) | |||
343 | goto done; | 339 | goto done; |
344 | } | 340 | } |
345 | page = NULL; | 341 | page = NULL; |
346 | done: | 342 | done: |
347 | spin_unlock_irqrestore (&pool->lock, flags); | 343 | spin_unlock_irqrestore(&pool->lock, flags); |
348 | return page; | 344 | return page; |
349 | } | 345 | } |
350 | 346 | ||
351 | |||
352 | /** | 347 | /** |
353 | * dma_pool_free - put block back into dma pool | 348 | * dma_pool_free - put block back into dma pool |
354 | * @pool: the dma pool holding the block | 349 | * @pool: the dma pool holding the block |
@@ -358,20 +353,21 @@ done: | |||
358 | * Caller promises neither device nor driver will again touch this block | 353 | * Caller promises neither device nor driver will again touch this block |
359 | * unless it is first re-allocated. | 354 | * unless it is first re-allocated. |
360 | */ | 355 | */ |
361 | void | 356 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) |
362 | dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma) | ||
363 | { | 357 | { |
364 | struct dma_page *page; | 358 | struct dma_page *page; |
365 | unsigned long flags; | 359 | unsigned long flags; |
366 | int map, block; | 360 | int map, block; |
367 | 361 | ||
368 | if ((page = pool_find_page(pool, dma)) == NULL) { | 362 | page = pool_find_page(pool, dma); |
363 | if (!page) { | ||
369 | if (pool->dev) | 364 | if (pool->dev) |
370 | dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n", | 365 | dev_err(pool->dev, |
371 | pool->name, vaddr, (unsigned long) dma); | 366 | "dma_pool_free %s, %p/%lx (bad dma)\n", |
367 | pool->name, vaddr, (unsigned long)dma); | ||
372 | else | 368 | else |
373 | printk (KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", | 369 | printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", |
374 | pool->name, vaddr, (unsigned long) dma); | 370 | pool->name, vaddr, (unsigned long)dma); |
375 | return; | 371 | return; |
376 | } | 372 | } |
377 | 373 | ||
@@ -383,37 +379,42 @@ dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma) | |||
383 | #ifdef CONFIG_DEBUG_SLAB | 379 | #ifdef CONFIG_DEBUG_SLAB |
384 | if (((dma - page->dma) + (void *)page->vaddr) != vaddr) { | 380 | if (((dma - page->dma) + (void *)page->vaddr) != vaddr) { |
385 | if (pool->dev) | 381 | if (pool->dev) |
386 | dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | 382 | dev_err(pool->dev, |
387 | pool->name, vaddr, (unsigned long long) dma); | 383 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", |
384 | pool->name, vaddr, (unsigned long long)dma); | ||
388 | else | 385 | else |
389 | printk (KERN_ERR "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | 386 | printk(KERN_ERR |
390 | pool->name, vaddr, (unsigned long long) dma); | 387 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", |
388 | pool->name, vaddr, (unsigned long long)dma); | ||
391 | return; | 389 | return; |
392 | } | 390 | } |
393 | if (page->bitmap [map] & (1UL << block)) { | 391 | if (page->bitmap[map] & (1UL << block)) { |
394 | if (pool->dev) | 392 | if (pool->dev) |
395 | dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n", | 393 | dev_err(pool->dev, |
394 | "dma_pool_free %s, dma %Lx already free\n", | ||
396 | pool->name, (unsigned long long)dma); | 395 | pool->name, (unsigned long long)dma); |
397 | else | 396 | else |
398 | printk (KERN_ERR "dma_pool_free %s, dma %Lx already free\n", | 397 | printk(KERN_ERR |
399 | pool->name, (unsigned long long)dma); | 398 | "dma_pool_free %s, dma %Lx already free\n", |
399 | pool->name, (unsigned long long)dma); | ||
400 | return; | 400 | return; |
401 | } | 401 | } |
402 | memset (vaddr, POOL_POISON_FREED, pool->size); | 402 | memset(vaddr, POOL_POISON_FREED, pool->size); |
403 | #endif | 403 | #endif |
404 | 404 | ||
405 | spin_lock_irqsave (&pool->lock, flags); | 405 | spin_lock_irqsave(&pool->lock, flags); |
406 | page->in_use--; | 406 | page->in_use--; |
407 | set_bit (block, &page->bitmap [map]); | 407 | set_bit(block, &page->bitmap[map]); |
408 | if (waitqueue_active (&pool->waitq)) | 408 | if (waitqueue_active(&pool->waitq)) |
409 | wake_up (&pool->waitq); | 409 | wake_up(&pool->waitq); |
410 | /* | 410 | /* |
411 | * Resist a temptation to do | 411 | * Resist a temptation to do |
412 | * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page); | 412 | * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page); |
413 | * Better have a few empty pages hang around. | 413 | * Better have a few empty pages hang around. |
414 | */ | 414 | */ |
415 | spin_unlock_irqrestore (&pool->lock, flags); | 415 | spin_unlock_irqrestore(&pool->lock, flags); |
416 | } | 416 | } |
417 | EXPORT_SYMBOL(dma_pool_free); | ||
417 | 418 | ||
418 | /* | 419 | /* |
419 | * Managed DMA pool | 420 | * Managed DMA pool |
@@ -458,6 +459,7 @@ struct dma_pool *dmam_pool_create(const char *name, struct device *dev, | |||
458 | 459 | ||
459 | return pool; | 460 | return pool; |
460 | } | 461 | } |
462 | EXPORT_SYMBOL(dmam_pool_create); | ||
461 | 463 | ||
462 | /** | 464 | /** |
463 | * dmam_pool_destroy - Managed dma_pool_destroy() | 465 | * dmam_pool_destroy - Managed dma_pool_destroy() |
@@ -472,10 +474,4 @@ void dmam_pool_destroy(struct dma_pool *pool) | |||
472 | dma_pool_destroy(pool); | 474 | dma_pool_destroy(pool); |
473 | WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); | 475 | WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); |
474 | } | 476 | } |
475 | 477 | EXPORT_SYMBOL(dmam_pool_destroy); | |
476 | EXPORT_SYMBOL (dma_pool_create); | ||
477 | EXPORT_SYMBOL (dma_pool_destroy); | ||
478 | EXPORT_SYMBOL (dma_pool_alloc); | ||
479 | EXPORT_SYMBOL (dma_pool_free); | ||
480 | EXPORT_SYMBOL (dmam_pool_create); | ||
481 | EXPORT_SYMBOL (dmam_pool_destroy); | ||