aboutsummaryrefslogtreecommitdiffstats
path: root/arch/microblaze
diff options
context:
space:
mode:
authorLars-Peter Clausen <lars@metafoo.de>2014-12-03 10:07:28 -0500
committerMichal Simek <michal.simek@xilinx.com>2014-12-17 06:59:59 -0500
commit3a8e3265179b7e6394d7aab4d6df5651b49e7243 (patch)
tree82194a886cb05fe27ba3ddf0dca4ec05c3977e4a /arch/microblaze
parentb2776bf7149bddd1f4161f14f79520f17fc1d71d (diff)
microblaze: Fix mmap for cache coherent memory
When running in non-cache coherent configuration the memory that was allocated with dma_alloc_coherent() has a custom mapping and so there is no 1-to-1 relationship between the kernel virtual address and the PFN. This means that virt_to_pfn() will not work correctly for those addresses and the default mmap implementation in the form of dma_common_mmap() will map some random, but not the requested, memory area. Fix this by providing a custom mmap implementation that looks up the PFN from the page table rather than using virt_to_pfn. Signed-off-by: Lars-Peter Clausen <lars@metafoo.de> Signed-off-by: Michal Simek <michal.simek@xilinx.com>
Diffstat (limited to 'arch/microblaze')
-rw-r--r--arch/microblaze/include/asm/pgtable.h1
-rw-r--r--arch/microblaze/kernel/dma.c27
-rw-r--r--arch/microblaze/mm/consistent.c25
3 files changed, 48 insertions, 5 deletions
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 95cef0b5f836..df19d0c47be8 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -565,6 +565,7 @@ void consistent_free(size_t size, void *vaddr);
565void consistent_sync(void *vaddr, size_t size, int direction); 565void consistent_sync(void *vaddr, size_t size, int direction);
566void consistent_sync_page(struct page *page, unsigned long offset, 566void consistent_sync_page(struct page *page, unsigned long offset,
567 size_t size, int direction); 567 size_t size, int direction);
568unsigned long consistent_virt_to_pfn(void *vaddr);
568 569
569void setup_memory(void); 570void setup_memory(void);
570#endif /* __ASSEMBLY__ */ 571#endif /* __ASSEMBLY__ */
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index 4633c36c1b32..ed7ba8a11822 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -154,9 +154,36 @@ dma_direct_sync_sg_for_device(struct device *dev,
154 __dma_sync(sg->dma_address, sg->length, direction); 154 __dma_sync(sg->dma_address, sg->length, direction);
155} 155}
156 156
157int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
158 void *cpu_addr, dma_addr_t handle, size_t size,
159 struct dma_attrs *attrs)
160{
161#ifdef CONFIG_MMU
162 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
163 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
164 unsigned long off = vma->vm_pgoff;
165 unsigned long pfn;
166
167 if (off >= count || user_count > (count - off))
168 return -ENXIO;
169
170#ifdef NOT_COHERENT_CACHE
171 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
172 pfn = consistent_virt_to_pfn(cpu_addr);
173#else
174 pfn = virt_to_pfn(cpu_addr);
175#endif
176 return remap_pfn_range(vma, vma->vm_start, pfn + off,
177 vma->vm_end - vma->vm_start, vma->vm_page_prot);
178#else
179 return -ENXIO;
180#endif
181}
182
157struct dma_map_ops dma_direct_ops = { 183struct dma_map_ops dma_direct_ops = {
158 .alloc = dma_direct_alloc_coherent, 184 .alloc = dma_direct_alloc_coherent,
159 .free = dma_direct_free_coherent, 185 .free = dma_direct_free_coherent,
186 .mmap = dma_direct_mmap_coherent,
160 .map_sg = dma_direct_map_sg, 187 .map_sg = dma_direct_map_sg,
161 .dma_supported = dma_direct_dma_supported, 188 .dma_supported = dma_direct_dma_supported,
162 .map_page = dma_direct_map_page, 189 .map_page = dma_direct_map_page,
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index e10ad930895e..b06c3a7faf20 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -156,6 +156,25 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
156} 156}
157EXPORT_SYMBOL(consistent_alloc); 157EXPORT_SYMBOL(consistent_alloc);
158 158
159#ifdef CONFIG_MMU
160static pte_t *consistent_virt_to_pte(void *vaddr)
161{
162 unsigned long addr = (unsigned long)vaddr;
163
164 return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr);
165}
166
167unsigned long consistent_virt_to_pfn(void *vaddr)
168{
169 pte_t *ptep = consistent_virt_to_pte(vaddr);
170
171 if (pte_none(*ptep) || !pte_present(*ptep))
172 return 0;
173
174 return pte_pfn(*ptep);
175}
176#endif
177
159/* 178/*
160 * free page(s) as defined by the above mapping. 179 * free page(s) as defined by the above mapping.
161 */ 180 */
@@ -181,13 +200,9 @@ void consistent_free(size_t size, void *vaddr)
181 } while (size -= PAGE_SIZE); 200 } while (size -= PAGE_SIZE);
182#else 201#else
183 do { 202 do {
184 pte_t *ptep; 203 pte_t *ptep = consistent_virt_to_pte(vaddr);
185 unsigned long pfn; 204 unsigned long pfn;
186 205
187 ptep = pte_offset_kernel(pmd_offset(pgd_offset_k(
188 (unsigned int)vaddr),
189 (unsigned int)vaddr),
190 (unsigned int)vaddr);
191 if (!pte_none(*ptep) && pte_present(*ptep)) { 206 if (!pte_none(*ptep) && pte_present(*ptep)) {
192 pfn = pte_pfn(*ptep); 207 pfn = pte_pfn(*ptep);
193 pte_clear(&init_mm, (unsigned int)vaddr, ptep); 208 pte_clear(&init_mm, (unsigned int)vaddr, ptep);