aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/lib/dma-noncoherent.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/lib/dma-noncoherent.c')
-rw-r--r--arch/powerpc/lib/dma-noncoherent.c237
1 files changed, 0 insertions, 237 deletions
diff --git a/arch/powerpc/lib/dma-noncoherent.c b/arch/powerpc/lib/dma-noncoherent.c
deleted file mode 100644
index 005a28d380af..000000000000
--- a/arch/powerpc/lib/dma-noncoherent.c
+++ /dev/null
@@ -1,237 +0,0 @@
1/*
2 * PowerPC version derived from arch/arm/mm/consistent.c
3 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
4 *
5 * Copyright (C) 2000 Russell King
6 *
7 * Consistent memory allocators. Used for DMA devices that want to
8 * share uncached memory with the processor core. The function return
9 * is the virtual address and 'dma_handle' is the physical address.
10 * Mostly stolen from the ARM port, with some changes for PowerPC.
11 * -- Dan
12 *
13 * Reorganized to get rid of the arch-specific consistent_* functions
14 * and provide non-coherent implementations for the DMA API. -Matt
15 *
16 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17 * implementation. This is pulled straight from ARM and barely
18 * modified. -Matt
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
23 */
24
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/highmem.h>
31#include <linux/dma-mapping.h>
32#include <linux/vmalloc.h>
33
34#include <asm/tlbflush.h>
35
36/*
37 * Allocate DMA-coherent memory space and return both the kernel remapped
38 * virtual and bus address for that space.
39 */
40void *
41__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
42{
43 struct page *page;
44 unsigned long order;
45 int i;
46 unsigned int nr_pages = PAGE_ALIGN(size)>>PAGE_SHIFT;
47 unsigned int array_size = nr_pages * sizeof(struct page *);
48 struct page **pages;
49 struct page *end;
50 u64 mask = 0x00ffffff, limit; /* ISA default */
51 struct vm_struct *area;
52
53 BUG_ON(!mem_init_done);
54 size = PAGE_ALIGN(size);
55 limit = (mask + 1) & ~mask;
56 if (limit && size >= limit) {
57 printk(KERN_WARNING "coherent allocation too big (requested "
58 "%#x mask %#Lx)\n", size, mask);
59 return NULL;
60 }
61
62 order = get_order(size);
63
64 if (mask != 0xffffffff)
65 gfp |= GFP_DMA;
66
67 page = alloc_pages(gfp, order);
68 if (!page)
69 goto no_page;
70
71 end = page + (1 << order);
72
73 /*
74 * Invalidate any data that might be lurking in the
75 * kernel direct-mapped region for device DMA.
76 */
77 {
78 unsigned long kaddr = (unsigned long)page_address(page);
79 memset(page_address(page), 0, size);
80 flush_dcache_range(kaddr, kaddr + size);
81 }
82
83 split_page(page, order);
84
85 /*
86 * Set the "dma handle"
87 */
88 *handle = page_to_phys(page);
89
90 area = get_vm_area_caller(size, VM_IOREMAP,
91 __builtin_return_address(1));
92 if (!area)
93 goto out_free_pages;
94
95 if (array_size > PAGE_SIZE) {
96 pages = vmalloc(array_size);
97 area->flags |= VM_VPAGES;
98 } else {
99 pages = kmalloc(array_size, GFP_KERNEL);
100 }
101 if (!pages)
102 goto out_free_area;
103
104 area->pages = pages;
105 area->nr_pages = nr_pages;
106
107 for (i = 0; i < nr_pages; i++)
108 pages[i] = page + i;
109
110 if (map_vm_area(area, pgprot_noncached(PAGE_KERNEL), &pages))
111 goto out_unmap;
112
113 /*
114 * Free the otherwise unused pages.
115 */
116 page += nr_pages;
117 while (page < end) {
118 __free_page(page);
119 page++;
120 }
121
122 return area->addr;
123out_unmap:
124 vunmap(area->addr);
125 if (array_size > PAGE_SIZE)
126 vfree(pages);
127 else
128 kfree(pages);
129 goto out_free_pages;
130out_free_area:
131 free_vm_area(area);
132out_free_pages:
133 if (page)
134 __free_pages(page, order);
135no_page:
136 return NULL;
137}
138EXPORT_SYMBOL(__dma_alloc_coherent);
139
140/*
141 * free a page as defined by the above mapping.
142 */
143void __dma_free_coherent(size_t size, void *vaddr)
144{
145 vfree(vaddr);
146
147}
148EXPORT_SYMBOL(__dma_free_coherent);
149
150/*
151 * make an area consistent.
152 */
153void __dma_sync(void *vaddr, size_t size, int direction)
154{
155 unsigned long start = (unsigned long)vaddr;
156 unsigned long end = start + size;
157
158 switch (direction) {
159 case DMA_NONE:
160 BUG();
161 case DMA_FROM_DEVICE:
162 /*
163 * invalidate only when cache-line aligned otherwise there is
164 * the potential for discarding uncommitted data from the cache
165 */
166 if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1)))
167 flush_dcache_range(start, end);
168 else
169 invalidate_dcache_range(start, end);
170 break;
171 case DMA_TO_DEVICE: /* writeback only */
172 clean_dcache_range(start, end);
173 break;
174 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
175 flush_dcache_range(start, end);
176 break;
177 }
178}
179EXPORT_SYMBOL(__dma_sync);
180
181#ifdef CONFIG_HIGHMEM
182/*
183 * __dma_sync_page() implementation for systems using highmem.
184 * In this case, each page of a buffer must be kmapped/kunmapped
185 * in order to have a virtual address for __dma_sync(). This must
186 * not sleep so kmap_atomic()/kunmap_atomic() are used.
187 *
188 * Note: yes, it is possible and correct to have a buffer extend
189 * beyond the first page.
190 */
191static inline void __dma_sync_page_highmem(struct page *page,
192 unsigned long offset, size_t size, int direction)
193{
194 size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
195 size_t cur_size = seg_size;
196 unsigned long flags, start, seg_offset = offset;
197 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
198 int seg_nr = 0;
199
200 local_irq_save(flags);
201
202 do {
203 start = (unsigned long)kmap_atomic(page + seg_nr,
204 KM_PPC_SYNC_PAGE) + seg_offset;
205
206 /* Sync this buffer segment */
207 __dma_sync((void *)start, seg_size, direction);
208 kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
209 seg_nr++;
210
211 /* Calculate next buffer segment size */
212 seg_size = min((size_t)PAGE_SIZE, size - cur_size);
213
214 /* Add the segment size to our running total */
215 cur_size += seg_size;
216 seg_offset = 0;
217 } while (seg_nr < nr_segs);
218
219 local_irq_restore(flags);
220}
221#endif /* CONFIG_HIGHMEM */
222
223/*
224 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
225 * takes a struct page instead of a virtual address
226 */
227void __dma_sync_page(struct page *page, unsigned long offset,
228 size_t size, int direction)
229{
230#ifdef CONFIG_HIGHMEM
231 __dma_sync_page_highmem(page, offset, size, direction);
232#else
233 unsigned long start = (unsigned long)page_address(page) + offset;
234 __dma_sync((void *)start, size, direction);
235#endif
236}
237EXPORT_SYMBOL(__dma_sync_page);