aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/avr32/kernel/syscall_table.S2
-rw-r--r--arch/frv/kernel/entry.S2
-rw-r--r--arch/m32r/kernel/syscall_table.S2
-rw-r--r--arch/sh/kernel/syscalls_32.S2
-rw-r--r--arch/sh/kernel/syscalls_64.S2
-rw-r--r--arch/x86/mm/pageattr.c1
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/dmapool.c481
-rw-r--r--fs/Kconfig28
-rw-r--r--fs/smbfs/inode.c7
-rw-r--r--include/asm-avr32/unistd.h2
-rw-r--r--include/asm-frv/unistd.h2
-rw-r--r--include/asm-m32r/unistd.h2
-rw-r--r--include/asm-sh/unistd_32.h2
-rw-r--r--include/asm-sh/unistd_64.h2
-rw-r--r--mm/Makefile1
-rw-r--r--mm/dmapool.c500
17 files changed, 533 insertions, 507 deletions
diff --git a/arch/avr32/kernel/syscall_table.S b/arch/avr32/kernel/syscall_table.S
index 75c81f2dd0b3..478bda4c4a09 100644
--- a/arch/avr32/kernel/syscall_table.S
+++ b/arch/avr32/kernel/syscall_table.S
@@ -293,6 +293,6 @@ sys_call_table:
293 .long sys_shmctl 293 .long sys_shmctl
294 .long sys_utimensat 294 .long sys_utimensat
295 .long sys_signalfd 295 .long sys_signalfd
296 .long sys_timerfd /* 280 */ 296 .long sys_ni_syscall /* 280, was sys_timerfd */
297 .long sys_eventfd 297 .long sys_eventfd
298 .long sys_ni_syscall /* r8 is saturated at nr_syscalls */ 298 .long sys_ni_syscall /* r8 is saturated at nr_syscalls */
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index 99046b1f51c8..ca6a345b87e4 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -1494,7 +1494,7 @@ sys_call_table:
1494 .long sys_epoll_pwait 1494 .long sys_epoll_pwait
1495 .long sys_utimensat /* 320 */ 1495 .long sys_utimensat /* 320 */
1496 .long sys_signalfd 1496 .long sys_signalfd
1497 .long sys_timerfd 1497 .long sys_ni_syscall
1498 .long sys_eventfd 1498 .long sys_eventfd
1499 .long sys_fallocate 1499 .long sys_fallocate
1500 1500
diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S
index 95aa79874847..aa3bf4cfab37 100644
--- a/arch/m32r/kernel/syscall_table.S
+++ b/arch/m32r/kernel/syscall_table.S
@@ -321,6 +321,6 @@ ENTRY(sys_call_table)
321 .long sys_epoll_pwait 321 .long sys_epoll_pwait
322 .long sys_utimensat /* 320 */ 322 .long sys_utimensat /* 320 */
323 .long sys_signalfd 323 .long sys_signalfd
324 .long sys_timerfd 324 .long sys_ni_syscall
325 .long sys_eventfd 325 .long sys_eventfd
326 .long sys_fallocate 326 .long sys_fallocate
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 10bec45415ba..719e127a7c05 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -338,6 +338,6 @@ ENTRY(sys_call_table)
338 .long sys_epoll_pwait 338 .long sys_epoll_pwait
339 .long sys_utimensat /* 320 */ 339 .long sys_utimensat /* 320 */
340 .long sys_signalfd 340 .long sys_signalfd
341 .long sys_timerfd 341 .long sys_ni_syscall
342 .long sys_eventfd 342 .long sys_eventfd
343 .long sys_fallocate 343 .long sys_fallocate
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index 98a93efe3691..12c7340356ae 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -376,6 +376,6 @@ sys_call_table:
376 .long sys_epoll_pwait 376 .long sys_epoll_pwait
377 .long sys_utimensat 377 .long sys_utimensat
378 .long sys_signalfd 378 .long sys_signalfd
379 .long sys_timerfd /* 350 */ 379 .long sys_ni_syscall /* 350 */
380 .long sys_eventfd 380 .long sys_eventfd
381 .long sys_fallocate 381 .long sys_fallocate
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index bb55a78dcd62..16ce841f08d6 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -237,7 +237,6 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
237 if (!SHARED_KERNEL_PMD) { 237 if (!SHARED_KERNEL_PMD) {
238 struct page *page; 238 struct page *page;
239 239
240 address = __pa(address);
241 list_for_each_entry(page, &pgd_list, lru) { 240 list_for_each_entry(page, &pgd_list, lru) {
242 pgd_t *pgd; 241 pgd_t *pgd;
243 pud_t *pud; 242 pud_t *pud;
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 63e09c015ca0..c66637392bbc 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -5,7 +5,7 @@ obj-y := core.o sys.o bus.o dd.o \
5 cpu.o firmware.o init.o map.o devres.o \ 5 cpu.o firmware.o init.o map.o devres.o \
6 attribute_container.o transport_class.o 6 attribute_container.o transport_class.o
7obj-y += power/ 7obj-y += power/
8obj-$(CONFIG_HAS_DMA) += dma-mapping.o dmapool.o 8obj-$(CONFIG_HAS_DMA) += dma-mapping.o
9obj-$(CONFIG_ISA) += isa.o 9obj-$(CONFIG_ISA) += isa.o
10obj-$(CONFIG_FW_LOADER) += firmware_class.o 10obj-$(CONFIG_FW_LOADER) += firmware_class.o
11obj-$(CONFIG_NUMA) += node.o 11obj-$(CONFIG_NUMA) += node.o
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c
deleted file mode 100644
index b5034dc72a05..000000000000
--- a/drivers/base/dmapool.c
+++ /dev/null
@@ -1,481 +0,0 @@
1
2#include <linux/device.h>
3#include <linux/mm.h>
4#include <asm/io.h> /* Needed for i386 to build */
5#include <linux/dma-mapping.h>
6#include <linux/dmapool.h>
7#include <linux/slab.h>
8#include <linux/module.h>
9#include <linux/poison.h>
10#include <linux/sched.h>
11
12/*
13 * Pool allocator ... wraps the dma_alloc_coherent page allocator, so
14 * small blocks are easily used by drivers for bus mastering controllers.
15 * This should probably be sharing the guts of the slab allocator.
16 */
17
18struct dma_pool { /* the pool */
19 struct list_head page_list;
20 spinlock_t lock;
21 size_t blocks_per_page;
22 size_t size;
23 struct device *dev;
24 size_t allocation;
25 char name [32];
26 wait_queue_head_t waitq;
27 struct list_head pools;
28};
29
30struct dma_page { /* cacheable header for 'allocation' bytes */
31 struct list_head page_list;
32 void *vaddr;
33 dma_addr_t dma;
34 unsigned in_use;
35 unsigned long bitmap [0];
36};
37
38#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
39
40static DEFINE_MUTEX (pools_lock);
41
42static ssize_t
43show_pools (struct device *dev, struct device_attribute *attr, char *buf)
44{
45 unsigned temp;
46 unsigned size;
47 char *next;
48 struct dma_page *page;
49 struct dma_pool *pool;
50
51 next = buf;
52 size = PAGE_SIZE;
53
54 temp = scnprintf(next, size, "poolinfo - 0.1\n");
55 size -= temp;
56 next += temp;
57
58 mutex_lock(&pools_lock);
59 list_for_each_entry(pool, &dev->dma_pools, pools) {
60 unsigned pages = 0;
61 unsigned blocks = 0;
62
63 list_for_each_entry(page, &pool->page_list, page_list) {
64 pages++;
65 blocks += page->in_use;
66 }
67
68 /* per-pool info, no real statistics yet */
69 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
70 pool->name,
71 blocks, pages * pool->blocks_per_page,
72 pool->size, pages);
73 size -= temp;
74 next += temp;
75 }
76 mutex_unlock(&pools_lock);
77
78 return PAGE_SIZE - size;
79}
80static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL);
81
82/**
83 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
84 * @name: name of pool, for diagnostics
85 * @dev: device that will be doing the DMA
86 * @size: size of the blocks in this pool.
87 * @align: alignment requirement for blocks; must be a power of two
88 * @allocation: returned blocks won't cross this boundary (or zero)
89 * Context: !in_interrupt()
90 *
91 * Returns a dma allocation pool with the requested characteristics, or
92 * null if one can't be created. Given one of these pools, dma_pool_alloc()
93 * may be used to allocate memory. Such memory will all have "consistent"
94 * DMA mappings, accessible by the device and its driver without using
95 * cache flushing primitives. The actual size of blocks allocated may be
96 * larger than requested because of alignment.
97 *
98 * If allocation is nonzero, objects returned from dma_pool_alloc() won't
99 * cross that size boundary. This is useful for devices which have
100 * addressing restrictions on individual DMA transfers, such as not crossing
101 * boundaries of 4KBytes.
102 */
103struct dma_pool *
104dma_pool_create (const char *name, struct device *dev,
105 size_t size, size_t align, size_t allocation)
106{
107 struct dma_pool *retval;
108
109 if (align == 0)
110 align = 1;
111 if (size == 0)
112 return NULL;
113 else if (size < align)
114 size = align;
115 else if ((size % align) != 0) {
116 size += align + 1;
117 size &= ~(align - 1);
118 }
119
120 if (allocation == 0) {
121 if (PAGE_SIZE < size)
122 allocation = size;
123 else
124 allocation = PAGE_SIZE;
125 // FIXME: round up for less fragmentation
126 } else if (allocation < size)
127 return NULL;
128
129 if (!(retval = kmalloc_node (sizeof *retval, GFP_KERNEL, dev_to_node(dev))))
130 return retval;
131
132 strlcpy (retval->name, name, sizeof retval->name);
133
134 retval->dev = dev;
135
136 INIT_LIST_HEAD (&retval->page_list);
137 spin_lock_init (&retval->lock);
138 retval->size = size;
139 retval->allocation = allocation;
140 retval->blocks_per_page = allocation / size;
141 init_waitqueue_head (&retval->waitq);
142
143 if (dev) {
144 int ret;
145
146 mutex_lock(&pools_lock);
147 if (list_empty (&dev->dma_pools))
148 ret = device_create_file (dev, &dev_attr_pools);
149 else
150 ret = 0;
151 /* note: not currently insisting "name" be unique */
152 if (!ret)
153 list_add (&retval->pools, &dev->dma_pools);
154 else {
155 kfree(retval);
156 retval = NULL;
157 }
158 mutex_unlock(&pools_lock);
159 } else
160 INIT_LIST_HEAD (&retval->pools);
161
162 return retval;
163}
164
165
166static struct dma_page *
167pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags)
168{
169 struct dma_page *page;
170 int mapsize;
171
172 mapsize = pool->blocks_per_page;
173 mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
174 mapsize *= sizeof (long);
175
176 page = kmalloc(mapsize + sizeof *page, mem_flags);
177 if (!page)
178 return NULL;
179 page->vaddr = dma_alloc_coherent (pool->dev,
180 pool->allocation,
181 &page->dma,
182 mem_flags);
183 if (page->vaddr) {
184 memset (page->bitmap, 0xff, mapsize); // bit set == free
185#ifdef CONFIG_DEBUG_SLAB
186 memset (page->vaddr, POOL_POISON_FREED, pool->allocation);
187#endif
188 list_add (&page->page_list, &pool->page_list);
189 page->in_use = 0;
190 } else {
191 kfree (page);
192 page = NULL;
193 }
194 return page;
195}
196
197
198static inline int
199is_page_busy (int blocks, unsigned long *bitmap)
200{
201 while (blocks > 0) {
202 if (*bitmap++ != ~0UL)
203 return 1;
204 blocks -= BITS_PER_LONG;
205 }
206 return 0;
207}
208
209static void
210pool_free_page (struct dma_pool *pool, struct dma_page *page)
211{
212 dma_addr_t dma = page->dma;
213
214#ifdef CONFIG_DEBUG_SLAB
215 memset (page->vaddr, POOL_POISON_FREED, pool->allocation);
216#endif
217 dma_free_coherent (pool->dev, pool->allocation, page->vaddr, dma);
218 list_del (&page->page_list);
219 kfree (page);
220}
221
222
223/**
224 * dma_pool_destroy - destroys a pool of dma memory blocks.
225 * @pool: dma pool that will be destroyed
226 * Context: !in_interrupt()
227 *
228 * Caller guarantees that no more memory from the pool is in use,
229 * and that nothing will try to use the pool after this call.
230 */
231void
232dma_pool_destroy (struct dma_pool *pool)
233{
234 mutex_lock(&pools_lock);
235 list_del (&pool->pools);
236 if (pool->dev && list_empty (&pool->dev->dma_pools))
237 device_remove_file (pool->dev, &dev_attr_pools);
238 mutex_unlock(&pools_lock);
239
240 while (!list_empty (&pool->page_list)) {
241 struct dma_page *page;
242 page = list_entry (pool->page_list.next,
243 struct dma_page, page_list);
244 if (is_page_busy (pool->blocks_per_page, page->bitmap)) {
245 if (pool->dev)
246 dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n",
247 pool->name, page->vaddr);
248 else
249 printk (KERN_ERR "dma_pool_destroy %s, %p busy\n",
250 pool->name, page->vaddr);
251 /* leak the still-in-use consistent memory */
252 list_del (&page->page_list);
253 kfree (page);
254 } else
255 pool_free_page (pool, page);
256 }
257
258 kfree (pool);
259}
260
261
262/**
263 * dma_pool_alloc - get a block of consistent memory
264 * @pool: dma pool that will produce the block
265 * @mem_flags: GFP_* bitmask
266 * @handle: pointer to dma address of block
267 *
268 * This returns the kernel virtual address of a currently unused block,
269 * and reports its dma address through the handle.
270 * If such a memory block can't be allocated, null is returned.
271 */
272void *
273dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle)
274{
275 unsigned long flags;
276 struct dma_page *page;
277 int map, block;
278 size_t offset;
279 void *retval;
280
281restart:
282 spin_lock_irqsave (&pool->lock, flags);
283 list_for_each_entry(page, &pool->page_list, page_list) {
284 int i;
285 /* only cachable accesses here ... */
286 for (map = 0, i = 0;
287 i < pool->blocks_per_page;
288 i += BITS_PER_LONG, map++) {
289 if (page->bitmap [map] == 0)
290 continue;
291 block = ffz (~ page->bitmap [map]);
292 if ((i + block) < pool->blocks_per_page) {
293 clear_bit (block, &page->bitmap [map]);
294 offset = (BITS_PER_LONG * map) + block;
295 offset *= pool->size;
296 goto ready;
297 }
298 }
299 }
300 if (!(page = pool_alloc_page (pool, GFP_ATOMIC))) {
301 if (mem_flags & __GFP_WAIT) {
302 DECLARE_WAITQUEUE (wait, current);
303
304 __set_current_state(TASK_INTERRUPTIBLE);
305 add_wait_queue (&pool->waitq, &wait);
306 spin_unlock_irqrestore (&pool->lock, flags);
307
308 schedule_timeout (POOL_TIMEOUT_JIFFIES);
309
310 remove_wait_queue (&pool->waitq, &wait);
311 goto restart;
312 }
313 retval = NULL;
314 goto done;
315 }
316
317 clear_bit (0, &page->bitmap [0]);
318 offset = 0;
319ready:
320 page->in_use++;
321 retval = offset + page->vaddr;
322 *handle = offset + page->dma;
323#ifdef CONFIG_DEBUG_SLAB
324 memset (retval, POOL_POISON_ALLOCATED, pool->size);
325#endif
326done:
327 spin_unlock_irqrestore (&pool->lock, flags);
328 return retval;
329}
330
331
332static struct dma_page *
333pool_find_page (struct dma_pool *pool, dma_addr_t dma)
334{
335 unsigned long flags;
336 struct dma_page *page;
337
338 spin_lock_irqsave (&pool->lock, flags);
339 list_for_each_entry(page, &pool->page_list, page_list) {
340 if (dma < page->dma)
341 continue;
342 if (dma < (page->dma + pool->allocation))
343 goto done;
344 }
345 page = NULL;
346done:
347 spin_unlock_irqrestore (&pool->lock, flags);
348 return page;
349}
350
351
352/**
353 * dma_pool_free - put block back into dma pool
354 * @pool: the dma pool holding the block
355 * @vaddr: virtual address of block
356 * @dma: dma address of block
357 *
358 * Caller promises neither device nor driver will again touch this block
359 * unless it is first re-allocated.
360 */
361void
362dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma)
363{
364 struct dma_page *page;
365 unsigned long flags;
366 int map, block;
367
368 if ((page = pool_find_page(pool, dma)) == NULL) {
369 if (pool->dev)
370 dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n",
371 pool->name, vaddr, (unsigned long) dma);
372 else
373 printk (KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
374 pool->name, vaddr, (unsigned long) dma);
375 return;
376 }
377
378 block = dma - page->dma;
379 block /= pool->size;
380 map = block / BITS_PER_LONG;
381 block %= BITS_PER_LONG;
382
383#ifdef CONFIG_DEBUG_SLAB
384 if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
385 if (pool->dev)
386 dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
387 pool->name, vaddr, (unsigned long long) dma);
388 else
389 printk (KERN_ERR "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
390 pool->name, vaddr, (unsigned long long) dma);
391 return;
392 }
393 if (page->bitmap [map] & (1UL << block)) {
394 if (pool->dev)
395 dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n",
396 pool->name, (unsigned long long)dma);
397 else
398 printk (KERN_ERR "dma_pool_free %s, dma %Lx already free\n",
399 pool->name, (unsigned long long)dma);
400 return;
401 }
402 memset (vaddr, POOL_POISON_FREED, pool->size);
403#endif
404
405 spin_lock_irqsave (&pool->lock, flags);
406 page->in_use--;
407 set_bit (block, &page->bitmap [map]);
408 if (waitqueue_active (&pool->waitq))
409 wake_up (&pool->waitq);
410 /*
411 * Resist a temptation to do
412 * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
413 * Better have a few empty pages hang around.
414 */
415 spin_unlock_irqrestore (&pool->lock, flags);
416}
417
418/*
419 * Managed DMA pool
420 */
421static void dmam_pool_release(struct device *dev, void *res)
422{
423 struct dma_pool *pool = *(struct dma_pool **)res;
424
425 dma_pool_destroy(pool);
426}
427
428static int dmam_pool_match(struct device *dev, void *res, void *match_data)
429{
430 return *(struct dma_pool **)res == match_data;
431}
432
433/**
434 * dmam_pool_create - Managed dma_pool_create()
435 * @name: name of pool, for diagnostics
436 * @dev: device that will be doing the DMA
437 * @size: size of the blocks in this pool.
438 * @align: alignment requirement for blocks; must be a power of two
439 * @allocation: returned blocks won't cross this boundary (or zero)
440 *
441 * Managed dma_pool_create(). DMA pool created with this function is
442 * automatically destroyed on driver detach.
443 */
444struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
445 size_t size, size_t align, size_t allocation)
446{
447 struct dma_pool **ptr, *pool;
448
449 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
450 if (!ptr)
451 return NULL;
452
453 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
454 if (pool)
455 devres_add(dev, ptr);
456 else
457 devres_free(ptr);
458
459 return pool;
460}
461
462/**
463 * dmam_pool_destroy - Managed dma_pool_destroy()
464 * @pool: dma pool that will be destroyed
465 *
466 * Managed dma_pool_destroy().
467 */
468void dmam_pool_destroy(struct dma_pool *pool)
469{
470 struct device *dev = pool->dev;
471
472 dma_pool_destroy(pool);
473 WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
474}
475
476EXPORT_SYMBOL (dma_pool_create);
477EXPORT_SYMBOL (dma_pool_destroy);
478EXPORT_SYMBOL (dma_pool_alloc);
479EXPORT_SYMBOL (dma_pool_free);
480EXPORT_SYMBOL (dmam_pool_create);
481EXPORT_SYMBOL (dmam_pool_destroy);
diff --git a/fs/Kconfig b/fs/Kconfig
index 987b5d7cb21a..ea5b35947623 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -1152,7 +1152,7 @@ config BEFS_DEBUG
1152 depends on BEFS_FS 1152 depends on BEFS_FS
1153 help 1153 help
1154 If you say Y here, you can use the 'debug' mount option to enable 1154 If you say Y here, you can use the 'debug' mount option to enable
1155 debugging output from the driver. 1155 debugging output from the driver.
1156 1156
1157config BFS_FS 1157config BFS_FS
1158 tristate "BFS file system support (EXPERIMENTAL)" 1158 tristate "BFS file system support (EXPERIMENTAL)"
@@ -1263,7 +1263,7 @@ config JFFS2_FS_XATTR
1263 Extended attributes are name:value pairs associated with inodes by 1263 Extended attributes are name:value pairs associated with inodes by
1264 the kernel or by users (see the attr(5) manual page, or visit 1264 the kernel or by users (see the attr(5) manual page, or visit
1265 <http://acl.bestbits.at/> for details). 1265 <http://acl.bestbits.at/> for details).
1266 1266
1267 If unsure, say N. 1267 If unsure, say N.
1268 1268
1269config JFFS2_FS_POSIX_ACL 1269config JFFS2_FS_POSIX_ACL
@@ -1274,10 +1274,10 @@ config JFFS2_FS_POSIX_ACL
1274 help 1274 help
1275 Posix Access Control Lists (ACLs) support permissions for users and 1275 Posix Access Control Lists (ACLs) support permissions for users and
1276 groups beyond the owner/group/world scheme. 1276 groups beyond the owner/group/world scheme.
1277 1277
1278 To learn more about Access Control Lists, visit the Posix ACLs for 1278 To learn more about Access Control Lists, visit the Posix ACLs for
1279 Linux website <http://acl.bestbits.at/>. 1279 Linux website <http://acl.bestbits.at/>.
1280 1280
1281 If you don't know what Access Control Lists are, say N 1281 If you don't know what Access Control Lists are, say N
1282 1282
1283config JFFS2_FS_SECURITY 1283config JFFS2_FS_SECURITY
@@ -1289,7 +1289,7 @@ config JFFS2_FS_SECURITY
1289 implemented by security modules like SELinux. This option 1289 implemented by security modules like SELinux. This option
1290 enables an extended attribute handler for file security 1290 enables an extended attribute handler for file security
1291 labels in the jffs2 filesystem. 1291 labels in the jffs2 filesystem.
1292 1292
1293 If you are not using a security module that requires using 1293 If you are not using a security module that requires using
1294 extended attributes for file security labels, say N. 1294 extended attributes for file security labels, say N.
1295 1295
@@ -1835,7 +1835,7 @@ config RPCSEC_GSS_SPKM3
1835 If unsure, say N. 1835 If unsure, say N.
1836 1836
1837config SMB_FS 1837config SMB_FS
1838 tristate "SMB file system support (to mount Windows shares etc.)" 1838 tristate "SMB file system support (OBSOLETE, please use CIFS)"
1839 depends on INET 1839 depends on INET
1840 select NLS 1840 select NLS
1841 help 1841 help
@@ -1858,8 +1858,8 @@ config SMB_FS
1858 General information about how to connect Linux, Windows machines and 1858 General information about how to connect Linux, Windows machines and
1859 Macs is on the WWW at <http://www.eats.com/linux_mac_win.html>. 1859 Macs is on the WWW at <http://www.eats.com/linux_mac_win.html>.
1860 1860
1861 To compile the SMB support as a module, choose M here: the module will 1861 To compile the SMB support as a module, choose M here:
1862 be called smbfs. Most people say N, however. 1862 the module will be called smbfs. Most people say N, however.
1863 1863
1864config SMB_NLS_DEFAULT 1864config SMB_NLS_DEFAULT
1865 bool "Use a default NLS" 1865 bool "Use a default NLS"
@@ -1891,7 +1891,7 @@ config SMB_NLS_REMOTE
1891 smbmount from samba 2.2.0 or later supports this. 1891 smbmount from samba 2.2.0 or later supports this.
1892 1892
1893config CIFS 1893config CIFS
1894 tristate "CIFS support (advanced network filesystem for Samba, Window and other CIFS compliant servers)" 1894 tristate "CIFS support (advanced network filesystem, SMBFS successor)"
1895 depends on INET 1895 depends on INET
1896 select NLS 1896 select NLS
1897 help 1897 help
@@ -1949,16 +1949,16 @@ config CIFS_WEAK_PW_HASH
1949 LANMAN based servers such as OS/2 and Windows 95, but such 1949 LANMAN based servers such as OS/2 and Windows 95, but such
1950 mounts may be less secure than mounts using NTLM or more recent 1950 mounts may be less secure than mounts using NTLM or more recent
1951 security mechanisms if you are on a public network. Unless you 1951 security mechanisms if you are on a public network. Unless you
1952 have a need to access old SMB servers (and are on a private 1952 have a need to access old SMB servers (and are on a private
1953 network) you probably want to say N. Even if this support 1953 network) you probably want to say N. Even if this support
1954 is enabled in the kernel build, LANMAN authentication will not be 1954 is enabled in the kernel build, LANMAN authentication will not be
1955 used automatically. At runtime LANMAN mounts are disabled but 1955 used automatically. At runtime LANMAN mounts are disabled but
1956 can be set to required (or optional) either in 1956 can be set to required (or optional) either in
1957 /proc/fs/cifs (see fs/cifs/README for more detail) or via an 1957 /proc/fs/cifs (see fs/cifs/README for more detail) or via an
1958 option on the mount command. This support is disabled by 1958 option on the mount command. This support is disabled by
1959 default in order to reduce the possibility of a downgrade 1959 default in order to reduce the possibility of a downgrade
1960 attack. 1960 attack.
1961 1961
1962 If unsure, say N. 1962 If unsure, say N.
1963 1963
1964config CIFS_XATTR 1964config CIFS_XATTR
@@ -1999,7 +1999,7 @@ config CIFS_DEBUG2
1999 messages in some error paths, slowing performance. This 1999 messages in some error paths, slowing performance. This
2000 option can be turned off unless you are debugging 2000 option can be turned off unless you are debugging
2001 cifs problems. If unsure, say N. 2001 cifs problems. If unsure, say N.
2002 2002
2003config CIFS_EXPERIMENTAL 2003config CIFS_EXPERIMENTAL
2004 bool "CIFS Experimental Features (EXPERIMENTAL)" 2004 bool "CIFS Experimental Features (EXPERIMENTAL)"
2005 depends on CIFS && EXPERIMENTAL 2005 depends on CIFS && EXPERIMENTAL
@@ -2090,7 +2090,7 @@ config CODA_FS_OLD_API
2090 However this new API is not backward compatible with older 2090 However this new API is not backward compatible with older
2091 clients. If you really need to run the old Coda userspace 2091 clients. If you really need to run the old Coda userspace
2092 cache manager then say Y. 2092 cache manager then say Y.
2093 2093
2094 For most cases you probably want to say N. 2094 For most cases you probably want to say N.
2095 2095
2096config AFS_FS 2096config AFS_FS
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 9416ead0c7aa..4e5c22ca802e 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -500,6 +500,13 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent)
500 struct smb_fattr root; 500 struct smb_fattr root;
501 int ver; 501 int ver;
502 void *mem; 502 void *mem;
503 static int warn_count;
504
505 if (warn_count < 5) {
506 warn_count++;
507 printk(KERN_EMERG "smbfs is deprecated and will be removed"
508 "from the 2.6.27 kernel. Please migrate to cifs\n");
509 }
503 510
504 if (!raw_data) 511 if (!raw_data)
505 goto out_no_data; 512 goto out_no_data;
diff --git a/include/asm-avr32/unistd.h b/include/asm-avr32/unistd.h
index de09009593f8..89861a27543e 100644
--- a/include/asm-avr32/unistd.h
+++ b/include/asm-avr32/unistd.h
@@ -297,7 +297,7 @@
297 297
298#define __NR_utimensat 278 298#define __NR_utimensat 278
299#define __NR_signalfd 279 299#define __NR_signalfd 279
300#define __NR_timerfd 280 300/* 280 was __NR_timerfd */
301#define __NR_eventfd 281 301#define __NR_eventfd 281
302 302
303#ifdef __KERNEL__ 303#ifdef __KERNEL__
diff --git a/include/asm-frv/unistd.h b/include/asm-frv/unistd.h
index cd84f1771e34..e8c986667532 100644
--- a/include/asm-frv/unistd.h
+++ b/include/asm-frv/unistd.h
@@ -328,7 +328,7 @@
328#define __NR_epoll_pwait 319 328#define __NR_epoll_pwait 319
329#define __NR_utimensat 320 329#define __NR_utimensat 320
330#define __NR_signalfd 321 330#define __NR_signalfd 321
331#define __NR_timerfd 322 331/* #define __NR_timerfd 322 removed */
332#define __NR_eventfd 323 332#define __NR_eventfd 323
333#define __NR_fallocate 324 333#define __NR_fallocate 324
334 334
diff --git a/include/asm-m32r/unistd.h b/include/asm-m32r/unistd.h
index f467eac9ba70..cf701c933249 100644
--- a/include/asm-m32r/unistd.h
+++ b/include/asm-m32r/unistd.h
@@ -327,7 +327,7 @@
327#define __NR_epoll_pwait 319 327#define __NR_epoll_pwait 319
328#define __NR_utimensat 320 328#define __NR_utimensat 320
329#define __NR_signalfd 321 329#define __NR_signalfd 321
330#define __NR_timerfd 322 330/* #define __NR_timerfd 322 removed */
331#define __NR_eventfd 323 331#define __NR_eventfd 323
332#define __NR_fallocate 324 332#define __NR_fallocate 324
333 333
diff --git a/include/asm-sh/unistd_32.h b/include/asm-sh/unistd_32.h
index b182b1cb05fd..433fd1b48fa2 100644
--- a/include/asm-sh/unistd_32.h
+++ b/include/asm-sh/unistd_32.h
@@ -330,7 +330,7 @@
330#define __NR_epoll_pwait 319 330#define __NR_epoll_pwait 319
331#define __NR_utimensat 320 331#define __NR_utimensat 320
332#define __NR_signalfd 321 332#define __NR_signalfd 321
333#define __NR_timerfd 322 333/* #define __NR_timerfd 322 removed */
334#define __NR_eventfd 323 334#define __NR_eventfd 323
335#define __NR_fallocate 324 335#define __NR_fallocate 324
336 336
diff --git a/include/asm-sh/unistd_64.h b/include/asm-sh/unistd_64.h
index 944511882cac..108d2ba897fe 100644
--- a/include/asm-sh/unistd_64.h
+++ b/include/asm-sh/unistd_64.h
@@ -370,7 +370,7 @@
370#define __NR_epoll_pwait 347 370#define __NR_epoll_pwait 347
371#define __NR_utimensat 348 371#define __NR_utimensat 348
372#define __NR_signalfd 349 372#define __NR_signalfd 349
373#define __NR_timerfd 350 373/* #define __NR_timerfd 350 removed */
374#define __NR_eventfd 351 374#define __NR_eventfd 351
375#define __NR_fallocate 352 375#define __NR_fallocate 352
376 376
diff --git a/mm/Makefile b/mm/Makefile
index 44e2528af70c..4af5dff37277 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -16,6 +16,7 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
16obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o 16obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o
17obj-$(CONFIG_BOUNCE) += bounce.o 17obj-$(CONFIG_BOUNCE) += bounce.o
18obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o 18obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
19obj-$(CONFIG_HAS_DMA) += dmapool.o
19obj-$(CONFIG_HUGETLBFS) += hugetlb.o 20obj-$(CONFIG_HUGETLBFS) += hugetlb.o
20obj-$(CONFIG_NUMA) += mempolicy.o 21obj-$(CONFIG_NUMA) += mempolicy.o
21obj-$(CONFIG_SPARSEMEM) += sparse.o 22obj-$(CONFIG_SPARSEMEM) += sparse.o
diff --git a/mm/dmapool.c b/mm/dmapool.c
new file mode 100644
index 000000000000..34aaac451a96
--- /dev/null
+++ b/mm/dmapool.c
@@ -0,0 +1,500 @@
1/*
2 * DMA Pool allocator
3 *
4 * Copyright 2001 David Brownell
5 * Copyright 2007 Intel Corporation
6 * Author: Matthew Wilcox <willy@linux.intel.com>
7 *
8 * This software may be redistributed and/or modified under the terms of
9 * the GNU General Public License ("GPL") version 2 as published by the
10 * Free Software Foundation.
11 *
12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size.
15 * Many older drivers still have their own code to do this.
16 *
17 * The current design of this allocator is fairly simple. The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at
20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page.
23 */
24
25#include <linux/device.h>
26#include <linux/dma-mapping.h>
27#include <linux/dmapool.h>
28#include <linux/kernel.h>
29#include <linux/list.h>
30#include <linux/module.h>
31#include <linux/mutex.h>
32#include <linux/poison.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/spinlock.h>
36#include <linux/string.h>
37#include <linux/types.h>
38#include <linux/wait.h>
39
40struct dma_pool { /* the pool */
41 struct list_head page_list;
42 spinlock_t lock;
43 size_t size;
44 struct device *dev;
45 size_t allocation;
46 size_t boundary;
47 char name[32];
48 wait_queue_head_t waitq;
49 struct list_head pools;
50};
51
52struct dma_page { /* cacheable header for 'allocation' bytes */
53 struct list_head page_list;
54 void *vaddr;
55 dma_addr_t dma;
56 unsigned int in_use;
57 unsigned int offset;
58};
59
60#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
61
62static DEFINE_MUTEX(pools_lock);
63
64static ssize_t
65show_pools(struct device *dev, struct device_attribute *attr, char *buf)
66{
67 unsigned temp;
68 unsigned size;
69 char *next;
70 struct dma_page *page;
71 struct dma_pool *pool;
72
73 next = buf;
74 size = PAGE_SIZE;
75
76 temp = scnprintf(next, size, "poolinfo - 0.1\n");
77 size -= temp;
78 next += temp;
79
80 mutex_lock(&pools_lock);
81 list_for_each_entry(pool, &dev->dma_pools, pools) {
82 unsigned pages = 0;
83 unsigned blocks = 0;
84
85 list_for_each_entry(page, &pool->page_list, page_list) {
86 pages++;
87 blocks += page->in_use;
88 }
89
90 /* per-pool info, no real statistics yet */
91 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
92 pool->name, blocks,
93 pages * (pool->allocation / pool->size),
94 pool->size, pages);
95 size -= temp;
96 next += temp;
97 }
98 mutex_unlock(&pools_lock);
99
100 return PAGE_SIZE - size;
101}
102
103static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
104
105/**
106 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
107 * @name: name of pool, for diagnostics
108 * @dev: device that will be doing the DMA
109 * @size: size of the blocks in this pool.
110 * @align: alignment requirement for blocks; must be a power of two
111 * @boundary: returned blocks won't cross this power of two boundary
112 * Context: !in_interrupt()
113 *
114 * Returns a dma allocation pool with the requested characteristics, or
115 * null if one can't be created. Given one of these pools, dma_pool_alloc()
116 * may be used to allocate memory. Such memory will all have "consistent"
117 * DMA mappings, accessible by the device and its driver without using
118 * cache flushing primitives. The actual size of blocks allocated may be
119 * larger than requested because of alignment.
120 *
121 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
122 * cross that size boundary. This is useful for devices which have
123 * addressing restrictions on individual DMA transfers, such as not crossing
124 * boundaries of 4KBytes.
125 */
126struct dma_pool *dma_pool_create(const char *name, struct device *dev,
127 size_t size, size_t align, size_t boundary)
128{
129 struct dma_pool *retval;
130 size_t allocation;
131
132 if (align == 0) {
133 align = 1;
134 } else if (align & (align - 1)) {
135 return NULL;
136 }
137
138 if (size == 0) {
139 return NULL;
140 } else if (size < 4) {
141 size = 4;
142 }
143
144 if ((size % align) != 0)
145 size = ALIGN(size, align);
146
147 allocation = max_t(size_t, size, PAGE_SIZE);
148
149 if (!boundary) {
150 boundary = allocation;
151 } else if ((boundary < size) || (boundary & (boundary - 1))) {
152 return NULL;
153 }
154
155 retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
156 if (!retval)
157 return retval;
158
159 strlcpy(retval->name, name, sizeof(retval->name));
160
161 retval->dev = dev;
162
163 INIT_LIST_HEAD(&retval->page_list);
164 spin_lock_init(&retval->lock);
165 retval->size = size;
166 retval->boundary = boundary;
167 retval->allocation = allocation;
168 init_waitqueue_head(&retval->waitq);
169
170 if (dev) {
171 int ret;
172
173 mutex_lock(&pools_lock);
174 if (list_empty(&dev->dma_pools))
175 ret = device_create_file(dev, &dev_attr_pools);
176 else
177 ret = 0;
178 /* note: not currently insisting "name" be unique */
179 if (!ret)
180 list_add(&retval->pools, &dev->dma_pools);
181 else {
182 kfree(retval);
183 retval = NULL;
184 }
185 mutex_unlock(&pools_lock);
186 } else
187 INIT_LIST_HEAD(&retval->pools);
188
189 return retval;
190}
191EXPORT_SYMBOL(dma_pool_create);
192
193static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
194{
195 unsigned int offset = 0;
196 unsigned int next_boundary = pool->boundary;
197
198 do {
199 unsigned int next = offset + pool->size;
200 if (unlikely((next + pool->size) >= next_boundary)) {
201 next = next_boundary;
202 next_boundary += pool->boundary;
203 }
204 *(int *)(page->vaddr + offset) = next;
205 offset = next;
206 } while (offset < pool->allocation);
207}
208
209static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
210{
211 struct dma_page *page;
212
213 page = kmalloc(sizeof(*page), mem_flags);
214 if (!page)
215 return NULL;
216 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
217 &page->dma, mem_flags);
218 if (page->vaddr) {
219#ifdef CONFIG_DEBUG_SLAB
220 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
221#endif
222 pool_initialise_page(pool, page);
223 list_add(&page->page_list, &pool->page_list);
224 page->in_use = 0;
225 page->offset = 0;
226 } else {
227 kfree(page);
228 page = NULL;
229 }
230 return page;
231}
232
233static inline int is_page_busy(struct dma_page *page)
234{
235 return page->in_use != 0;
236}
237
238static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
239{
240 dma_addr_t dma = page->dma;
241
242#ifdef CONFIG_DEBUG_SLAB
243 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
244#endif
245 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
246 list_del(&page->page_list);
247 kfree(page);
248}
249
250/**
251 * dma_pool_destroy - destroys a pool of dma memory blocks.
252 * @pool: dma pool that will be destroyed
253 * Context: !in_interrupt()
254 *
255 * Caller guarantees that no more memory from the pool is in use,
256 * and that nothing will try to use the pool after this call.
257 */
258void dma_pool_destroy(struct dma_pool *pool)
259{
260 mutex_lock(&pools_lock);
261 list_del(&pool->pools);
262 if (pool->dev && list_empty(&pool->dev->dma_pools))
263 device_remove_file(pool->dev, &dev_attr_pools);
264 mutex_unlock(&pools_lock);
265
266 while (!list_empty(&pool->page_list)) {
267 struct dma_page *page;
268 page = list_entry(pool->page_list.next,
269 struct dma_page, page_list);
270 if (is_page_busy(page)) {
271 if (pool->dev)
272 dev_err(pool->dev,
273 "dma_pool_destroy %s, %p busy\n",
274 pool->name, page->vaddr);
275 else
276 printk(KERN_ERR
277 "dma_pool_destroy %s, %p busy\n",
278 pool->name, page->vaddr);
279 /* leak the still-in-use consistent memory */
280 list_del(&page->page_list);
281 kfree(page);
282 } else
283 pool_free_page(pool, page);
284 }
285
286 kfree(pool);
287}
288EXPORT_SYMBOL(dma_pool_destroy);
289
290/**
291 * dma_pool_alloc - get a block of consistent memory
292 * @pool: dma pool that will produce the block
293 * @mem_flags: GFP_* bitmask
294 * @handle: pointer to dma address of block
295 *
296 * This returns the kernel virtual address of a currently unused block,
297 * and reports its dma address through the handle.
298 * If such a memory block can't be allocated, %NULL is returned.
299 */
300void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
301 dma_addr_t *handle)
302{
303 unsigned long flags;
304 struct dma_page *page;
305 size_t offset;
306 void *retval;
307
308 spin_lock_irqsave(&pool->lock, flags);
309 restart:
310 list_for_each_entry(page, &pool->page_list, page_list) {
311 if (page->offset < pool->allocation)
312 goto ready;
313 }
314 page = pool_alloc_page(pool, GFP_ATOMIC);
315 if (!page) {
316 if (mem_flags & __GFP_WAIT) {
317 DECLARE_WAITQUEUE(wait, current);
318
319 __set_current_state(TASK_INTERRUPTIBLE);
320 __add_wait_queue(&pool->waitq, &wait);
321 spin_unlock_irqrestore(&pool->lock, flags);
322
323 schedule_timeout(POOL_TIMEOUT_JIFFIES);
324
325 spin_lock_irqsave(&pool->lock, flags);
326 __remove_wait_queue(&pool->waitq, &wait);
327 goto restart;
328 }
329 retval = NULL;
330 goto done;
331 }
332
333 ready:
334 page->in_use++;
335 offset = page->offset;
336 page->offset = *(int *)(page->vaddr + offset);
337 retval = offset + page->vaddr;
338 *handle = offset + page->dma;
339#ifdef CONFIG_DEBUG_SLAB
340 memset(retval, POOL_POISON_ALLOCATED, pool->size);
341#endif
342 done:
343 spin_unlock_irqrestore(&pool->lock, flags);
344 return retval;
345}
346EXPORT_SYMBOL(dma_pool_alloc);
347
348static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
349{
350 unsigned long flags;
351 struct dma_page *page;
352
353 spin_lock_irqsave(&pool->lock, flags);
354 list_for_each_entry(page, &pool->page_list, page_list) {
355 if (dma < page->dma)
356 continue;
357 if (dma < (page->dma + pool->allocation))
358 goto done;
359 }
360 page = NULL;
361 done:
362 spin_unlock_irqrestore(&pool->lock, flags);
363 return page;
364}
365
366/**
367 * dma_pool_free - put block back into dma pool
368 * @pool: the dma pool holding the block
369 * @vaddr: virtual address of block
370 * @dma: dma address of block
371 *
372 * Caller promises neither device nor driver will again touch this block
373 * unless it is first re-allocated.
374 */
375void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
376{
377 struct dma_page *page;
378 unsigned long flags;
379 unsigned int offset;
380
381 page = pool_find_page(pool, dma);
382 if (!page) {
383 if (pool->dev)
384 dev_err(pool->dev,
385 "dma_pool_free %s, %p/%lx (bad dma)\n",
386 pool->name, vaddr, (unsigned long)dma);
387 else
388 printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
389 pool->name, vaddr, (unsigned long)dma);
390 return;
391 }
392
393 offset = vaddr - page->vaddr;
394#ifdef CONFIG_DEBUG_SLAB
395 if ((dma - page->dma) != offset) {
396 if (pool->dev)
397 dev_err(pool->dev,
398 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
399 pool->name, vaddr, (unsigned long long)dma);
400 else
401 printk(KERN_ERR
402 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
403 pool->name, vaddr, (unsigned long long)dma);
404 return;
405 }
406 {
407 unsigned int chain = page->offset;
408 while (chain < pool->allocation) {
409 if (chain != offset) {
410 chain = *(int *)(page->vaddr + chain);
411 continue;
412 }
413 if (pool->dev)
414 dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
415 "already free\n", pool->name,
416 (unsigned long long)dma);
417 else
418 printk(KERN_ERR "dma_pool_free %s, dma %Lx "
419 "already free\n", pool->name,
420 (unsigned long long)dma);
421 return;
422 }
423 }
424 memset(vaddr, POOL_POISON_FREED, pool->size);
425#endif
426
427 spin_lock_irqsave(&pool->lock, flags);
428 page->in_use--;
429 *(int *)vaddr = page->offset;
430 page->offset = offset;
431 if (waitqueue_active(&pool->waitq))
432 wake_up_locked(&pool->waitq);
433 /*
434 * Resist a temptation to do
435 * if (!is_page_busy(page)) pool_free_page(pool, page);
436 * Better have a few empty pages hang around.
437 */
438 spin_unlock_irqrestore(&pool->lock, flags);
439}
440EXPORT_SYMBOL(dma_pool_free);
441
442/*
443 * Managed DMA pool
444 */
445static void dmam_pool_release(struct device *dev, void *res)
446{
447 struct dma_pool *pool = *(struct dma_pool **)res;
448
449 dma_pool_destroy(pool);
450}
451
452static int dmam_pool_match(struct device *dev, void *res, void *match_data)
453{
454 return *(struct dma_pool **)res == match_data;
455}
456
457/**
458 * dmam_pool_create - Managed dma_pool_create()
459 * @name: name of pool, for diagnostics
460 * @dev: device that will be doing the DMA
461 * @size: size of the blocks in this pool.
462 * @align: alignment requirement for blocks; must be a power of two
463 * @allocation: returned blocks won't cross this boundary (or zero)
464 *
465 * Managed dma_pool_create(). DMA pool created with this function is
466 * automatically destroyed on driver detach.
467 */
468struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
469 size_t size, size_t align, size_t allocation)
470{
471 struct dma_pool **ptr, *pool;
472
473 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
474 if (!ptr)
475 return NULL;
476
477 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
478 if (pool)
479 devres_add(dev, ptr);
480 else
481 devres_free(ptr);
482
483 return pool;
484}
485EXPORT_SYMBOL(dmam_pool_create);
486
487/**
488 * dmam_pool_destroy - Managed dma_pool_destroy()
489 * @pool: dma pool that will be destroyed
490 *
491 * Managed dma_pool_destroy().
492 */
493void dmam_pool_destroy(struct dma_pool *pool)
494{
495 struct device *dev = pool->dev;
496
497 dma_pool_destroy(pool);
498 WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
499}
500EXPORT_SYMBOL(dmam_pool_destroy);