aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-03-24 16:50:06 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-03-29 19:44:00 -0400
commit6090912c4abcfc6c81b156cf2bb4cda23ae6e847 (patch)
tree9bddd5b697883f706a53ef0413181845bb735250 /arch/powerpc
parent15d260b36facc1aa769fb39b0efc41f4c8c44729 (diff)
powerpc: Implement dma_mmap_coherent()
This is used by Alsa to mmap buffers allocated with dma_alloc_coherent() into userspace. We need a special variant to handle machines with non-coherent DMAs as those buffers have "special" virt addresses and require non-cachable mappings Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h6
-rw-r--r--arch/powerpc/kernel/dma.c18
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c20
3 files changed, 44 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 6d2416a85709..dd70fac57ec8 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -42,6 +42,7 @@ extern void __dma_free_coherent(size_t size, void *vaddr);
42extern void __dma_sync(void *vaddr, size_t size, int direction); 42extern void __dma_sync(void *vaddr, size_t size, int direction);
43extern void __dma_sync_page(struct page *page, unsigned long offset, 43extern void __dma_sync_page(struct page *page, unsigned long offset,
44 size_t size, int direction); 44 size_t size, int direction);
45extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr);
45 46
46#else /* ! CONFIG_NOT_COHERENT_CACHE */ 47#else /* ! CONFIG_NOT_COHERENT_CACHE */
47/* 48/*
@@ -198,6 +199,11 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
198#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 199#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
199#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 200#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
200 201
202extern int dma_mmap_coherent(struct device *, struct vm_area_struct *,
203 void *, dma_addr_t, size_t);
204#define ARCH_HAS_DMA_MMAP_COHERENT
205
206
201static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 207static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
202 enum dma_data_direction direction) 208 enum dma_data_direction direction)
203{ 209{
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index cf02cad62d9a..d238c082c3c5 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -179,3 +179,21 @@ static int __init dma_init(void)
179 return 0; 179 return 0;
180} 180}
181fs_initcall(dma_init); 181fs_initcall(dma_init);
182
183int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
184 void *cpu_addr, dma_addr_t handle, size_t size)
185{
186 unsigned long pfn;
187
188#ifdef CONFIG_NOT_COHERENT_CACHE
189 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
190 pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
191#else
192 pfn = page_to_pfn(virt_to_page(cpu_addr));
193#endif
194 return remap_pfn_range(vma, vma->vm_start,
195 pfn + vma->vm_pgoff,
196 vma->vm_end - vma->vm_start,
197 vma->vm_page_prot);
198}
199EXPORT_SYMBOL_GPL(dma_mmap_coherent);
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 757c0bed9a91..b42f76c4948d 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -399,3 +399,23 @@ void __dma_sync_page(struct page *page, unsigned long offset,
399#endif 399#endif
400} 400}
401EXPORT_SYMBOL(__dma_sync_page); 401EXPORT_SYMBOL(__dma_sync_page);
402
403/*
404 * Return the PFN for a given cpu virtual address returned by
405 * __dma_alloc_coherent. This is used by dma_mmap_coherent()
406 */
407unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
408{
409 /* This should always be populated, so we don't test every
410 * level. If that fails, we'll have a nice crash which
411 * will be as good as a BUG_ON()
412 */
413 pgd_t *pgd = pgd_offset_k(cpu_addr);
414 pud_t *pud = pud_offset(pgd, cpu_addr);
415 pmd_t *pmd = pmd_offset(pud, cpu_addr);
416 pte_t *ptep = pte_offset_kernel(pmd, cpu_addr);
417
418 if (pte_none(*ptep) || !pte_present(*ptep))
419 return 0;
420 return pte_pfn(*ptep);
421}