aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig17
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/copypage-v6.c6
-rw-r--r--arch/arm/mm/copypage-xscale.S113
-rw-r--r--arch/arm/mm/copypage-xscale.c131
-rw-r--r--arch/arm/mm/fault-armv.c31
-rw-r--r--arch/arm/mm/flush.c44
-rw-r--r--arch/arm/mm/ioremap.c47
-rw-r--r--arch/arm/mm/minicache.c73
9 files changed, 218 insertions, 246 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 48bac7da8c70..95606b4a3ba6 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -62,7 +62,7 @@ config CPU_ARM720T
62# ARM920T 62# ARM920T
63config CPU_ARM920T 63config CPU_ARM920T
64 bool "Support ARM920T processor" if !ARCH_S3C2410 64 bool "Support ARM920T processor" if !ARCH_S3C2410
65 depends on ARCH_INTEGRATOR || ARCH_S3C2410 || ARCH_IMX 65 depends on ARCH_INTEGRATOR || ARCH_S3C2410 || ARCH_IMX || ARCH_AAEC2000
66 default y if ARCH_S3C2410 66 default y if ARCH_S3C2410
67 select CPU_32v4 67 select CPU_32v4
68 select CPU_ABRT_EV4T 68 select CPU_ABRT_EV4T
@@ -228,7 +228,6 @@ config CPU_SA1100
228 select CPU_CACHE_V4WB 228 select CPU_CACHE_V4WB
229 select CPU_CACHE_VIVT 229 select CPU_CACHE_VIVT
230 select CPU_TLB_V4WB 230 select CPU_TLB_V4WB
231 select CPU_MINICACHE
232 231
233# XScale 232# XScale
234config CPU_XSCALE 233config CPU_XSCALE
@@ -239,7 +238,6 @@ config CPU_XSCALE
239 select CPU_ABRT_EV5T 238 select CPU_ABRT_EV5T
240 select CPU_CACHE_VIVT 239 select CPU_CACHE_VIVT
241 select CPU_TLB_V4WBI 240 select CPU_TLB_V4WBI
242 select CPU_MINICACHE
243 241
244# ARMv6 242# ARMv6
245config CPU_V6 243config CPU_V6
@@ -345,11 +343,6 @@ config CPU_TLB_V4WBI
345config CPU_TLB_V6 343config CPU_TLB_V6
346 bool 344 bool
347 345
348config CPU_MINICACHE
349 bool
350 help
351 Processor has a minicache.
352
353comment "Processor Features" 346comment "Processor Features"
354 347
355config ARM_THUMB 348config ARM_THUMB
@@ -429,3 +422,11 @@ config HAS_TLS_REG
429 assume directly accessing that register and always obtain the 422 assume directly accessing that register and always obtain the
430 expected value only on ARMv7 and above. 423 expected value only on ARMv7 and above.
431 424
425config NEEDS_SYSCALL_FOR_CMPXCHG
426 bool
427 default y if SMP && (CPU_32v5 || CPU_32v4 || CPU_32v3)
428 help
429 SMP on a pre-ARMv6 processor? Well OK then.
430 Forget about fast user space cmpxchg support.
431 It is just not possible.
432
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index ccf316c11e02..59f47d4c2dfe 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -31,8 +31,6 @@ obj-$(CONFIG_CPU_COPY_V6) += copypage-v6.o mmu.o
31obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o 31obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o
32obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o 32obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o
33 33
34obj-$(CONFIG_CPU_MINICACHE) += minicache.o
35
36obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o 34obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o
37obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o 35obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o
38obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o 36obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index a8c00236bd3d..27d041574ea7 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -30,8 +30,6 @@
30 30
31static DEFINE_SPINLOCK(v6_lock); 31static DEFINE_SPINLOCK(v6_lock);
32 32
33#define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
34
35/* 33/*
36 * Copy the user page. No aliasing to deal with so we can just 34 * Copy the user page. No aliasing to deal with so we can just
37 * attack the kernel's existing mapping of these pages. 35 * attack the kernel's existing mapping of these pages.
@@ -55,7 +53,7 @@ void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
55 */ 53 */
56void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr) 54void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
57{ 55{
58 unsigned int offset = DCACHE_COLOUR(vaddr); 56 unsigned int offset = CACHE_COLOUR(vaddr);
59 unsigned long from, to; 57 unsigned long from, to;
60 58
61 /* 59 /*
@@ -95,7 +93,7 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd
95 */ 93 */
96void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) 94void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
97{ 95{
98 unsigned int offset = DCACHE_COLOUR(vaddr); 96 unsigned int offset = CACHE_COLOUR(vaddr);
99 unsigned long to = to_address + (offset << PAGE_SHIFT); 97 unsigned long to = to_address + (offset << PAGE_SHIFT);
100 98
101 /* 99 /*
diff --git a/arch/arm/mm/copypage-xscale.S b/arch/arm/mm/copypage-xscale.S
deleted file mode 100644
index bb277316ef52..000000000000
--- a/arch/arm/mm/copypage-xscale.S
+++ /dev/null
@@ -1,113 +0,0 @@
1/*
2 * linux/arch/arm/lib/copypage-xscale.S
3 *
4 * Copyright (C) 2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/linkage.h>
11#include <linux/init.h>
12#include <asm/constants.h>
13
14/*
15 * General note:
16 * We don't really want write-allocate cache behaviour for these functions
17 * since that will just eat through 8K of the cache.
18 */
19
20 .text
21 .align 5
22/*
23 * XScale optimised copy_user_page
24 * r0 = destination
25 * r1 = source
26 * r2 = virtual user address of ultimate destination page
27 *
28 * The source page may have some clean entries in the cache already, but we
29 * can safely ignore them - break_cow() will flush them out of the cache
30 * if we eventually end up using our copied page.
31 *
32 * What we could do is use the mini-cache to buffer reads from the source
33 * page. We rely on the mini-cache being smaller than one page, so we'll
34 * cycle through the complete cache anyway.
35 */
36ENTRY(xscale_mc_copy_user_page)
37 stmfd sp!, {r4, r5, lr}
38 mov r5, r0
39 mov r0, r1
40 bl map_page_minicache
41 mov r1, r5
42 mov lr, #PAGE_SZ/64-1
43
44 /*
45 * Strangely enough, best performance is achieved
46 * when prefetching destination as well. (NP)
47 */
48 pld [r0, #0]
49 pld [r0, #32]
50 pld [r1, #0]
51 pld [r1, #32]
52
531: pld [r0, #64]
54 pld [r0, #96]
55 pld [r1, #64]
56 pld [r1, #96]
57
582: ldrd r2, [r0], #8
59 ldrd r4, [r0], #8
60 mov ip, r1
61 strd r2, [r1], #8
62 ldrd r2, [r0], #8
63 strd r4, [r1], #8
64 ldrd r4, [r0], #8
65 strd r2, [r1], #8
66 strd r4, [r1], #8
67 mcr p15, 0, ip, c7, c10, 1 @ clean D line
68 ldrd r2, [r0], #8
69 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
70 ldrd r4, [r0], #8
71 mov ip, r1
72 strd r2, [r1], #8
73 ldrd r2, [r0], #8
74 strd r4, [r1], #8
75 ldrd r4, [r0], #8
76 strd r2, [r1], #8
77 strd r4, [r1], #8
78 mcr p15, 0, ip, c7, c10, 1 @ clean D line
79 subs lr, lr, #1
80 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
81 bgt 1b
82 beq 2b
83
84 ldmfd sp!, {r4, r5, pc}
85
86 .align 5
87/*
88 * XScale optimised clear_user_page
89 * r0 = destination
90 * r1 = virtual user address of ultimate destination page
91 */
92ENTRY(xscale_mc_clear_user_page)
93 mov r1, #PAGE_SZ/32
94 mov r2, #0
95 mov r3, #0
961: mov ip, r0
97 strd r2, [r0], #8
98 strd r2, [r0], #8
99 strd r2, [r0], #8
100 strd r2, [r0], #8
101 mcr p15, 0, ip, c7, c10, 1 @ clean D line
102 subs r1, r1, #1
103 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
104 bne 1b
105 mov pc, lr
106
107 __INITDATA
108
109 .type xscale_mc_user_fns, #object
110ENTRY(xscale_mc_user_fns)
111 .long xscale_mc_clear_user_page
112 .long xscale_mc_copy_user_page
113 .size xscale_mc_user_fns, . - xscale_mc_user_fns
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
new file mode 100644
index 000000000000..42a6ee255ce0
--- /dev/null
+++ b/arch/arm/mm/copypage-xscale.c
@@ -0,0 +1,131 @@
1/*
2 * linux/arch/arm/lib/copypage-xscale.S
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This handles the mini data cache, as found on SA11x0 and XScale
11 * processors. When we copy a user page page, we map it in such a way
12 * that accesses to this page will not touch the main data cache, but
13 * will be cached in the mini data cache. This prevents us thrashing
14 * the main data cache on page faults.
15 */
16#include <linux/init.h>
17#include <linux/mm.h>
18
19#include <asm/page.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
22
23/*
24 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
25 * specific hacks for copying pages efficiently.
26 */
27#define COPYPAGE_MINICACHE 0xffff8000
28
29#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
30 L_PTE_CACHEABLE)
31
32#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
33
34static DEFINE_SPINLOCK(minicache_lock);
35
36/*
37 * XScale mini-dcache optimised copy_user_page
38 *
39 * We flush the destination cache lines just before we write the data into the
40 * corresponding address. Since the Dcache is read-allocate, this removes the
41 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
42 * and merged as appropriate.
43 */
44static void __attribute__((naked))
45mc_copy_user_page(void *from, void *to)
46{
47 /*
48 * Strangely enough, best performance is achieved
49 * when prefetching destination as well. (NP)
50 */
51 asm volatile(
52 "stmfd sp!, {r4, r5, lr} \n\
53 mov lr, %2 \n\
54 pld [r0, #0] \n\
55 pld [r0, #32] \n\
56 pld [r1, #0] \n\
57 pld [r1, #32] \n\
581: pld [r0, #64] \n\
59 pld [r0, #96] \n\
60 pld [r1, #64] \n\
61 pld [r1, #96] \n\
622: ldrd r2, [r0], #8 \n\
63 ldrd r4, [r0], #8 \n\
64 mov ip, r1 \n\
65 strd r2, [r1], #8 \n\
66 ldrd r2, [r0], #8 \n\
67 strd r4, [r1], #8 \n\
68 ldrd r4, [r0], #8 \n\
69 strd r2, [r1], #8 \n\
70 strd r4, [r1], #8 \n\
71 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
72 ldrd r2, [r0], #8 \n\
73 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
74 ldrd r4, [r0], #8 \n\
75 mov ip, r1 \n\
76 strd r2, [r1], #8 \n\
77 ldrd r2, [r0], #8 \n\
78 strd r4, [r1], #8 \n\
79 ldrd r4, [r0], #8 \n\
80 strd r2, [r1], #8 \n\
81 strd r4, [r1], #8 \n\
82 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
83 subs lr, lr, #1 \n\
84 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
85 bgt 1b \n\
86 beq 2b \n\
87 ldmfd sp!, {r4, r5, pc} "
88 :
89 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
90}
91
92void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
93{
94 spin_lock(&minicache_lock);
95
96 set_pte(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot));
97 flush_tlb_kernel_page(COPYPAGE_MINICACHE);
98
99 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
100
101 spin_unlock(&minicache_lock);
102}
103
104/*
105 * XScale optimised clear_user_page
106 */
107void __attribute__((naked))
108xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr)
109{
110 asm volatile(
111 "mov r1, %0 \n\
112 mov r2, #0 \n\
113 mov r3, #0 \n\
1141: mov ip, r0 \n\
115 strd r2, [r0], #8 \n\
116 strd r2, [r0], #8 \n\
117 strd r2, [r0], #8 \n\
118 strd r2, [r0], #8 \n\
119 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
120 subs r1, r1, #1 \n\
121 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
122 bne 1b \n\
123 mov pc, lr"
124 :
125 : "I" (PAGE_SIZE / 32));
126}
127
128struct cpu_user_fns xscale_mc_user_fns __initdata = {
129 .cpu_clear_user_page = xscale_mc_clear_user_page,
130 .cpu_copy_user_page = xscale_mc_copy_user_page,
131};
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 01967ddeef53..be4ab3d73c91 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -77,9 +77,8 @@ no_pmd:
77} 77}
78 78
79static void 79static void
80make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty) 80make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
81{ 81{
82 struct address_space *mapping = page_mapping(page);
83 struct mm_struct *mm = vma->vm_mm; 82 struct mm_struct *mm = vma->vm_mm;
84 struct vm_area_struct *mpnt; 83 struct vm_area_struct *mpnt;
85 struct prio_tree_iter iter; 84 struct prio_tree_iter iter;
@@ -87,9 +86,6 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
87 pgoff_t pgoff; 86 pgoff_t pgoff;
88 int aliases = 0; 87 int aliases = 0;
89 88
90 if (!mapping)
91 return;
92
93 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); 89 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
94 90
95 /* 91 /*
@@ -115,9 +111,11 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
115 if (aliases) 111 if (aliases)
116 adjust_pte(vma, addr); 112 adjust_pte(vma, addr);
117 else 113 else
118 flush_cache_page(vma, addr, page_to_pfn(page)); 114 flush_cache_page(vma, addr, pfn);
119} 115}
120 116
117void __flush_dcache_page(struct address_space *mapping, struct page *page);
118
121/* 119/*
122 * Take care of architecture specific things when placing a new PTE into 120 * Take care of architecture specific things when placing a new PTE into
123 * a page table, or changing an existing PTE. Basically, there are two 121 * a page table, or changing an existing PTE. Basically, there are two
@@ -134,29 +132,22 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
134void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) 132void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
135{ 133{
136 unsigned long pfn = pte_pfn(pte); 134 unsigned long pfn = pte_pfn(pte);
135 struct address_space *mapping;
137 struct page *page; 136 struct page *page;
138 137
139 if (!pfn_valid(pfn)) 138 if (!pfn_valid(pfn))
140 return; 139 return;
140
141 page = pfn_to_page(pfn); 141 page = pfn_to_page(pfn);
142 if (page_mapping(page)) { 142 mapping = page_mapping(page);
143 if (mapping) {
143 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); 144 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
144 145
145 if (dirty) { 146 if (dirty)
146 /* 147 __flush_dcache_page(mapping, page);
147 * This is our first userspace mapping of this page.
148 * Ensure that the physical page is coherent with
149 * the kernel mapping.
150 *
151 * FIXME: only need to do this on VIVT and aliasing
152 * VIPT cache architectures. We can do that
153 * by choosing whether to set this bit...
154 */
155 __cpuc_flush_dcache_page(page_address(page));
156 }
157 148
158 if (cache_is_vivt()) 149 if (cache_is_vivt())
159 make_coherent(vma, addr, page, dirty); 150 make_coherent(mapping, vma, addr, pfn);
160 } 151 }
161} 152}
162 153
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 4085ed983e46..191788fb18d1 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -37,13 +37,8 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
37#define flush_pfn_alias(pfn,vaddr) do { } while (0) 37#define flush_pfn_alias(pfn,vaddr) do { } while (0)
38#endif 38#endif
39 39
40static void __flush_dcache_page(struct address_space *mapping, struct page *page) 40void __flush_dcache_page(struct address_space *mapping, struct page *page)
41{ 41{
42 struct mm_struct *mm = current->active_mm;
43 struct vm_area_struct *mpnt;
44 struct prio_tree_iter iter;
45 pgoff_t pgoff;
46
47 /* 42 /*
48 * Writeback any data associated with the kernel mapping of this 43 * Writeback any data associated with the kernel mapping of this
49 * page. This ensures that data in the physical page is mutually 44 * page. This ensures that data in the physical page is mutually
@@ -52,24 +47,21 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page
52 __cpuc_flush_dcache_page(page_address(page)); 47 __cpuc_flush_dcache_page(page_address(page));
53 48
54 /* 49 /*
55 * If there's no mapping pointer here, then this page isn't 50 * If this is a page cache page, and we have an aliasing VIPT cache,
56 * visible to userspace yet, so there are no cache lines 51 * we only need to do one flush - which would be at the relevant
57 * associated with any other aliases.
58 */
59 if (!mapping)
60 return;
61
62 /*
63 * This is a page cache page. If we have a VIPT cache, we
64 * only need to do one flush - which would be at the relevant
65 * userspace colour, which is congruent with page->index. 52 * userspace colour, which is congruent with page->index.
66 */ 53 */
67 if (cache_is_vipt()) { 54 if (mapping && cache_is_vipt_aliasing())
68 if (cache_is_vipt_aliasing()) 55 flush_pfn_alias(page_to_pfn(page),
69 flush_pfn_alias(page_to_pfn(page), 56 page->index << PAGE_CACHE_SHIFT);
70 page->index << PAGE_CACHE_SHIFT); 57}
71 return; 58
72 } 59static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
60{
61 struct mm_struct *mm = current->active_mm;
62 struct vm_area_struct *mpnt;
63 struct prio_tree_iter iter;
64 pgoff_t pgoff;
73 65
74 /* 66 /*
75 * There are possible user space mappings of this page: 67 * There are possible user space mappings of this page:
@@ -116,12 +108,12 @@ void flush_dcache_page(struct page *page)
116{ 108{
117 struct address_space *mapping = page_mapping(page); 109 struct address_space *mapping = page_mapping(page);
118 110
119 if (cache_is_vipt_nonaliasing())
120 return;
121
122 if (mapping && !mapping_mapped(mapping)) 111 if (mapping && !mapping_mapped(mapping))
123 set_bit(PG_dcache_dirty, &page->flags); 112 set_bit(PG_dcache_dirty, &page->flags);
124 else 113 else {
125 __flush_dcache_page(mapping, page); 114 __flush_dcache_page(mapping, page);
115 if (mapping && cache_is_vivt())
116 __flush_dcache_aliases(mapping, page);
117 }
126} 118}
127EXPORT_SYMBOL(flush_dcache_page); 119EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 00bb8fd37a59..7110e54182b1 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -170,3 +170,50 @@ void __iounmap(void __iomem *addr)
170 vfree((void *) (PAGE_MASK & (unsigned long) addr)); 170 vfree((void *) (PAGE_MASK & (unsigned long) addr));
171} 171}
172EXPORT_SYMBOL(__iounmap); 172EXPORT_SYMBOL(__iounmap);
173
174#ifdef __io
175void __iomem *ioport_map(unsigned long port, unsigned int nr)
176{
177 return __io(port);
178}
179EXPORT_SYMBOL(ioport_map);
180
181void ioport_unmap(void __iomem *addr)
182{
183}
184EXPORT_SYMBOL(ioport_unmap);
185#endif
186
187#ifdef CONFIG_PCI
188#include <linux/pci.h>
189#include <linux/ioport.h>
190
191void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
192{
193 unsigned long start = pci_resource_start(dev, bar);
194 unsigned long len = pci_resource_len(dev, bar);
195 unsigned long flags = pci_resource_flags(dev, bar);
196
197 if (!len || !start)
198 return NULL;
199 if (maxlen && len > maxlen)
200 len = maxlen;
201 if (flags & IORESOURCE_IO)
202 return ioport_map(start, len);
203 if (flags & IORESOURCE_MEM) {
204 if (flags & IORESOURCE_CACHEABLE)
205 return ioremap(start, len);
206 return ioremap_nocache(start, len);
207 }
208 return NULL;
209}
210EXPORT_SYMBOL(pci_iomap);
211
212void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
213{
214 if ((unsigned long)addr >= VMALLOC_START &&
215 (unsigned long)addr < VMALLOC_END)
216 iounmap(addr);
217}
218EXPORT_SYMBOL(pci_iounmap);
219#endif
diff --git a/arch/arm/mm/minicache.c b/arch/arm/mm/minicache.c
deleted file mode 100644
index dedf2ab01b2a..000000000000
--- a/arch/arm/mm/minicache.c
+++ /dev/null
@@ -1,73 +0,0 @@
1/*
2 * linux/arch/arm/mm/minicache.c
3 *
4 * Copyright (C) 2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This handles the mini data cache, as found on SA11x0 and XScale
11 * processors. When we copy a user page page, we map it in such a way
12 * that accesses to this page will not touch the main data cache, but
13 * will be cached in the mini data cache. This prevents us thrashing
14 * the main data cache on page faults.
15 */
16#include <linux/init.h>
17#include <linux/mm.h>
18
19#include <asm/page.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
22
23/*
24 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
25 * specific hacks for copying pages efficiently.
26 */
27#define minicache_address (0xffff8000)
28#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
29 L_PTE_CACHEABLE)
30
31static pte_t *minicache_pte;
32
33/*
34 * Note that this is intended to be called only from the copy_user_page
35 * asm code; anything else will require special locking to prevent the
36 * mini-cache space being re-used. (Note: probably preempt unsafe).
37 *
38 * We rely on the fact that the minicache is 2K, and we'll be pushing
39 * 4K of data through it, so we don't actually have to specifically
40 * flush the minicache when we change the mapping.
41 *
42 * Note also: assert(PAGE_OFFSET <= virt < high_memory).
43 * Unsafe: preempt, kmap.
44 */
45unsigned long map_page_minicache(unsigned long virt)
46{
47 set_pte(minicache_pte, pfn_pte(__pa(virt) >> PAGE_SHIFT, minicache_pgprot));
48 flush_tlb_kernel_page(minicache_address);
49
50 return minicache_address;
51}
52
53static int __init minicache_init(void)
54{
55 pgd_t *pgd;
56 pmd_t *pmd;
57
58 spin_lock(&init_mm.page_table_lock);
59
60 pgd = pgd_offset_k(minicache_address);
61 pmd = pmd_alloc(&init_mm, pgd, minicache_address);
62 if (!pmd)
63 BUG();
64 minicache_pte = pte_alloc_kernel(&init_mm, pmd, minicache_address);
65 if (!minicache_pte)
66 BUG();
67
68 spin_unlock(&init_mm.page_table_lock);
69
70 return 0;
71}
72
73core_initcall(minicache_init);