aboutsummaryrefslogtreecommitdiffstats
path: root/arch/unicore32/mm
diff options
context:
space:
mode:
authorGuanXuetao <gxt@mprc.pku.edu.cn>2011-01-15 05:18:29 -0500
committerGuanXuetao <gxt@mprc.pku.edu.cn>2011-03-16 21:19:09 -0400
commit10c9c10c31514564b09c153432a42ffaea3ce831 (patch)
tree04a60b9a1e48eaa2d9346e265a1c2fe2db5ec670 /arch/unicore32/mm
parent56372b0b2f533c9a25bd40a0577405f6ddb7cff2 (diff)
unicore32 core architecture: mm related: consistent device DMA handling
This patch implements consistent device DMA handling of memory management. DMA device operations are also here. Signed-off-by: Guan Xuetao <gxt@mprc.pku.edu.cn> Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/unicore32/mm')
-rw-r--r--arch/unicore32/mm/cache-ucv2.S212
-rw-r--r--arch/unicore32/mm/dma-swiotlb.c34
-rw-r--r--arch/unicore32/mm/flush.c98
-rw-r--r--arch/unicore32/mm/tlb-ucv2.S89
4 files changed, 433 insertions, 0 deletions
diff --git a/arch/unicore32/mm/cache-ucv2.S b/arch/unicore32/mm/cache-ucv2.S
new file mode 100644
index 00000000000..ecaa1727f90
--- /dev/null
+++ b/arch/unicore32/mm/cache-ucv2.S
@@ -0,0 +1,212 @@
1/*
2 * linux/arch/unicore32/mm/cache-ucv2.S
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This is the "shell" of the UniCore-v2 processor support.
13 */
14#include <linux/linkage.h>
15#include <linux/init.h>
16#include <asm/assembler.h>
17#include <asm/page.h>
18
19#include "proc-macros.S"
20
21/*
22 * __cpuc_flush_icache_all()
23 * __cpuc_flush_kern_all()
24 * __cpuc_flush_user_all()
25 *
26 * Flush the entire cache.
27 */
28ENTRY(__cpuc_flush_icache_all)
29 /*FALLTHROUGH*/
30ENTRY(__cpuc_flush_kern_all)
31 /*FALLTHROUGH*/
32ENTRY(__cpuc_flush_user_all)
33 mov r0, #0
34 movc p0.c5, r0, #14 @ Dcache flush all
35 nop8
36
37 mov r0, #0
38 movc p0.c5, r0, #20 @ Icache invalidate all
39 nop8
40
41 mov pc, lr
42
43/*
44 * __cpuc_flush_user_range(start, end, flags)
45 *
46 * Flush a range of TLB entries in the specified address space.
47 *
48 * - start - start address (may not be aligned)
49 * - end - end address (exclusive, may not be aligned)
50 * - flags - vm_area_struct flags describing address space
51 */
52ENTRY(__cpuc_flush_user_range)
53 cxor.a r2, #0
54 beq __cpuc_dma_flush_range
55
56#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
57 andn r0, r0, #CACHE_LINESIZE - 1 @ Safety check
58 sub r1, r1, r0
59 csub.a r1, #MAX_AREA_SIZE
60 bsg 2f
61
62 andn r1, r1, #CACHE_LINESIZE - 1
63 add r1, r1, #CACHE_LINESIZE
64
65101: dcacheline_flush r0, r11, r12
66
67 add r0, r0, #CACHE_LINESIZE
68 sub.a r1, r1, #CACHE_LINESIZE
69 bns 101b
70 b 3f
71#endif
722: mov ip, #0
73 movc p0.c5, ip, #14 @ Dcache flush all
74 nop8
75
763: mov ip, #0
77 movc p0.c5, ip, #20 @ Icache invalidate all
78 nop8
79
80 mov pc, lr
81
82/*
83 * __cpuc_coherent_kern_range(start,end)
84 * __cpuc_coherent_user_range(start,end)
85 *
86 * Ensure that the I and D caches are coherent within specified
87 * region. This is typically used when code has been written to
88 * a memory region, and will be executed.
89 *
90 * - start - virtual start address of region
91 * - end - virtual end address of region
92 */
93ENTRY(__cpuc_coherent_kern_range)
94 /* FALLTHROUGH */
95ENTRY(__cpuc_coherent_user_range)
96#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
97 andn r0, r0, #CACHE_LINESIZE - 1 @ Safety check
98 sub r1, r1, r0
99 csub.a r1, #MAX_AREA_SIZE
100 bsg 2f
101
102 andn r1, r1, #CACHE_LINESIZE - 1
103 add r1, r1, #CACHE_LINESIZE
104
105 @ r0 va2pa r10
106 mov r9, #PAGE_SZ
107 sub r9, r9, #1 @ PAGE_MASK
108101: va2pa r0, r10, r11, r12, r13, 2f @ r10 is PA
109 b 103f
110102: cand.a r0, r9
111 beq 101b
112
113103: movc p0.c5, r10, #11 @ Dcache clean line of R10
114 nop8
115
116 add r0, r0, #CACHE_LINESIZE
117 add r10, r10, #CACHE_LINESIZE
118 sub.a r1, r1, #CACHE_LINESIZE
119 bns 102b
120 b 3f
121#endif
1222: mov ip, #0
123 movc p0.c5, ip, #10 @ Dcache clean all
124 nop8
125
1263: mov ip, #0
127 movc p0.c5, ip, #20 @ Icache invalidate all
128 nop8
129
130 mov pc, lr
131
132/*
133 * __cpuc_flush_kern_dcache_area(void *addr, size_t size)
134 *
135 * - addr - kernel address
136 * - size - region size
137 */
138ENTRY(__cpuc_flush_kern_dcache_area)
139 mov ip, #0
140 movc p0.c5, ip, #14 @ Dcache flush all
141 nop8
142 mov pc, lr
143
144/*
145 * __cpuc_dma_clean_range(start,end)
146 * - start - virtual start address of region
147 * - end - virtual end address of region
148 */
149ENTRY(__cpuc_dma_clean_range)
150#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
151 andn r0, r0, #CACHE_LINESIZE - 1
152 sub r1, r1, r0
153 andn r1, r1, #CACHE_LINESIZE - 1
154 add r1, r1, #CACHE_LINESIZE
155
156 csub.a r1, #MAX_AREA_SIZE
157 bsg 2f
158
159 @ r0 va2pa r10
160 mov r9, #PAGE_SZ
161 sub r9, r9, #1 @ PAGE_MASK
162101: va2pa r0, r10, r11, r12, r13, 2f @ r10 is PA
163 b 1f
164102: cand.a r0, r9
165 beq 101b
166
1671: movc p0.c5, r10, #11 @ Dcache clean line of R10
168 nop8
169 add r0, r0, #CACHE_LINESIZE
170 add r10, r10, #CACHE_LINESIZE
171 sub.a r1, r1, #CACHE_LINESIZE
172 bns 102b
173 mov pc, lr
174#endif
1752: mov ip, #0
176 movc p0.c5, ip, #10 @ Dcache clean all
177 nop8
178
179 mov pc, lr
180
181/*
182 * __cpuc_dma_inv_range(start,end)
183 * __cpuc_dma_flush_range(start,end)
184 * - start - virtual start address of region
185 * - end - virtual end address of region
186 */
187__cpuc_dma_inv_range:
188 /* FALLTHROUGH */
189ENTRY(__cpuc_dma_flush_range)
190#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
191 andn r0, r0, #CACHE_LINESIZE - 1
192 sub r1, r1, r0
193 andn r1, r1, #CACHE_LINESIZE - 1
194 add r1, r1, #CACHE_LINESIZE
195
196 csub.a r1, #MAX_AREA_SIZE
197 bsg 2f
198
199 @ r0 va2pa r10
200101: dcacheline_flush r0, r11, r12
201
202 add r0, r0, #CACHE_LINESIZE
203 sub.a r1, r1, #CACHE_LINESIZE
204 bns 101b
205 mov pc, lr
206#endif
2072: mov ip, #0
208 movc p0.c5, ip, #14 @ Dcache flush all
209 nop8
210
211 mov pc, lr
212
diff --git a/arch/unicore32/mm/dma-swiotlb.c b/arch/unicore32/mm/dma-swiotlb.c
new file mode 100644
index 00000000000..bfa9fbb2bbb
--- /dev/null
+++ b/arch/unicore32/mm/dma-swiotlb.c
@@ -0,0 +1,34 @@
1/*
2 * Contains routines needed to support swiotlb for UniCore32.
3 *
4 * Copyright (C) 2010 Guan Xuetao
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11#include <linux/pci.h>
12#include <linux/cache.h>
13#include <linux/module.h>
14#include <linux/dma-mapping.h>
15#include <linux/swiotlb.h>
16#include <linux/bootmem.h>
17
18#include <asm/dma.h>
19
20struct dma_map_ops swiotlb_dma_map_ops = {
21 .alloc_coherent = swiotlb_alloc_coherent,
22 .free_coherent = swiotlb_free_coherent,
23 .map_sg = swiotlb_map_sg_attrs,
24 .unmap_sg = swiotlb_unmap_sg_attrs,
25 .dma_supported = swiotlb_dma_supported,
26 .map_page = swiotlb_map_page,
27 .unmap_page = swiotlb_unmap_page,
28 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
29 .sync_single_for_device = swiotlb_sync_single_for_device,
30 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
31 .sync_sg_for_device = swiotlb_sync_sg_for_device,
32 .mapping_error = swiotlb_dma_mapping_error,
33};
34EXPORT_SYMBOL(swiotlb_dma_map_ops);
diff --git a/arch/unicore32/mm/flush.c b/arch/unicore32/mm/flush.c
new file mode 100644
index 00000000000..93478cc8b26
--- /dev/null
+++ b/arch/unicore32/mm/flush.c
@@ -0,0 +1,98 @@
1/*
2 * linux/arch/unicore32/mm/flush.c
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/pagemap.h>
15
16#include <asm/cacheflush.h>
17#include <asm/system.h>
18#include <asm/tlbflush.h>
19
20void flush_cache_mm(struct mm_struct *mm)
21{
22}
23
24void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
25 unsigned long end)
26{
27 if (vma->vm_flags & VM_EXEC)
28 __flush_icache_all();
29}
30
31void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr,
32 unsigned long pfn)
33{
34}
35
36static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
37 unsigned long uaddr, void *kaddr, unsigned long len)
38{
39 /* VIPT non-aliasing D-cache */
40 if (vma->vm_flags & VM_EXEC) {
41 unsigned long addr = (unsigned long)kaddr;
42
43 __cpuc_coherent_kern_range(addr, addr + len);
44 }
45}
46
47/*
48 * Copy user data from/to a page which is mapped into a different
49 * processes address space. Really, we want to allow our "user
50 * space" model to handle this.
51 *
52 * Note that this code needs to run on the current CPU.
53 */
54void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
55 unsigned long uaddr, void *dst, const void *src,
56 unsigned long len)
57{
58 memcpy(dst, src, len);
59 flush_ptrace_access(vma, page, uaddr, dst, len);
60}
61
62void __flush_dcache_page(struct address_space *mapping, struct page *page)
63{
64 /*
65 * Writeback any data associated with the kernel mapping of this
66 * page. This ensures that data in the physical page is mutually
67 * coherent with the kernels mapping.
68 */
69 __cpuc_flush_kern_dcache_area(page_address(page), PAGE_SIZE);
70}
71
72/*
73 * Ensure cache coherency between kernel mapping and userspace mapping
74 * of this page.
75 */
76void flush_dcache_page(struct page *page)
77{
78 struct address_space *mapping;
79
80 /*
81 * The zero page is never written to, so never has any dirty
82 * cache lines, and therefore never needs to be flushed.
83 */
84 if (page == ZERO_PAGE(0))
85 return;
86
87 mapping = page_mapping(page);
88
89 if (mapping && !mapping_mapped(mapping))
90 clear_bit(PG_dcache_clean, &page->flags);
91 else {
92 __flush_dcache_page(mapping, page);
93 if (mapping)
94 __flush_icache_all();
95 set_bit(PG_dcache_clean, &page->flags);
96 }
97}
98EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/unicore32/mm/tlb-ucv2.S b/arch/unicore32/mm/tlb-ucv2.S
new file mode 100644
index 00000000000..061d455f9a1
--- /dev/null
+++ b/arch/unicore32/mm/tlb-ucv2.S
@@ -0,0 +1,89 @@
1/*
2 * linux/arch/unicore32/mm/tlb-ucv2.S
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/assembler.h>
15#include <asm/page.h>
16#include <asm/tlbflush.h>
17#include "proc-macros.S"
18
19/*
20 * __cpu_flush_user_tlb_range(start, end, vma)
21 *
22 * Invalidate a range of TLB entries in the specified address space.
23 *
24 * - start - start address (may not be aligned)
25 * - end - end address (exclusive, may not be aligned)
26 * - vma - vma_struct describing address range
27 */
28ENTRY(__cpu_flush_user_tlb_range)
29#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
30 mov r0, r0 >> #PAGE_SHIFT @ align address
31 mov r0, r0 << #PAGE_SHIFT
32 vma_vm_flags r2, r2 @ get vma->vm_flags
331:
34 movc p0.c6, r0, #3
35 nop8
36
37 cand.a r2, #VM_EXEC @ Executable area ?
38 beq 2f
39
40 movc p0.c6, r0, #5
41 nop8
422:
43 add r0, r0, #PAGE_SZ
44 csub.a r0, r1
45 beb 1b
46#else
47 movc p0.c6, r0, #2
48 nop8
49
50 cand.a r2, #VM_EXEC @ Executable area ?
51 beq 2f
52
53 movc p0.c6, r0, #4
54 nop8
552:
56#endif
57 mov pc, lr
58
59/*
60 * __cpu_flush_kern_tlb_range(start,end)
61 *
62 * Invalidate a range of kernel TLB entries
63 *
64 * - start - start address (may not be aligned)
65 * - end - end address (exclusive, may not be aligned)
66 */
67ENTRY(__cpu_flush_kern_tlb_range)
68#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
69 mov r0, r0 >> #PAGE_SHIFT @ align address
70 mov r0, r0 << #PAGE_SHIFT
711:
72 movc p0.c6, r0, #3
73 nop8
74
75 movc p0.c6, r0, #5
76 nop8
77
78 add r0, r0, #PAGE_SZ
79 csub.a r0, r1
80 beb 1b
81#else
82 movc p0.c6, r0, #2
83 nop8
84
85 movc p0.c6, r0, #4
86 nop8
87#endif
88 mov pc, lr
89