aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-03-26 11:18:44 -0400
committerIngo Molnar <mingo@kernel.org>2012-03-26 11:19:03 -0400
commit7fd52392c56361a40f0c630a82b36b95ca31eac6 (patch)
tree14091de24c6b28ea4cae9826f98aeedb7be091f5 /arch/arm/mm
parentb01c3a0010aabadf745f3e7fdb9cab682e0a28a2 (diff)
parente22057c8599373e5caef0bc42bdb95d2a361ab0d (diff)
Merge branch 'linus' into perf/urgent
Merge reason: we need to fix a non-trivial merge conflict. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/copypage-fa.c12
-rw-r--r--arch/arm/mm/copypage-feroceon.c12
-rw-r--r--arch/arm/mm/copypage-v3.c12
-rw-r--r--arch/arm/mm/copypage-v4mc.c8
-rw-r--r--arch/arm/mm/copypage-v4wb.c12
-rw-r--r--arch/arm/mm/copypage-v4wt.c12
-rw-r--r--arch/arm/mm/copypage-v6.c12
-rw-r--r--arch/arm/mm/copypage-xsc3.c12
-rw-r--r--arch/arm/mm/copypage-xscale.c8
-rw-r--r--arch/arm/mm/highmem.c4
-rw-r--r--arch/arm/mm/init.c1
-rw-r--r--arch/arm/mm/iomap.c3
12 files changed, 52 insertions, 56 deletions
diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c
index d2852e1635b1..d130a5ece5d5 100644
--- a/arch/arm/mm/copypage-fa.c
+++ b/arch/arm/mm/copypage-fa.c
@@ -44,11 +44,11 @@ void fa_copy_user_highpage(struct page *to, struct page *from,
44{ 44{
45 void *kto, *kfrom; 45 void *kto, *kfrom;
46 46
47 kto = kmap_atomic(to, KM_USER0); 47 kto = kmap_atomic(to);
48 kfrom = kmap_atomic(from, KM_USER1); 48 kfrom = kmap_atomic(from);
49 fa_copy_user_page(kto, kfrom); 49 fa_copy_user_page(kto, kfrom);
50 kunmap_atomic(kfrom, KM_USER1); 50 kunmap_atomic(kfrom);
51 kunmap_atomic(kto, KM_USER0); 51 kunmap_atomic(kto);
52} 52}
53 53
54/* 54/*
@@ -58,7 +58,7 @@ void fa_copy_user_highpage(struct page *to, struct page *from,
58 */ 58 */
59void fa_clear_user_highpage(struct page *page, unsigned long vaddr) 59void fa_clear_user_highpage(struct page *page, unsigned long vaddr)
60{ 60{
61 void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 61 void *ptr, *kaddr = kmap_atomic(page);
62 asm volatile("\ 62 asm volatile("\
63 mov r1, %2 @ 1\n\ 63 mov r1, %2 @ 1\n\
64 mov r2, #0 @ 1\n\ 64 mov r2, #0 @ 1\n\
@@ -77,7 +77,7 @@ void fa_clear_user_highpage(struct page *page, unsigned long vaddr)
77 : "=r" (ptr) 77 : "=r" (ptr)
78 : "0" (kaddr), "I" (PAGE_SIZE / 32) 78 : "0" (kaddr), "I" (PAGE_SIZE / 32)
79 : "r1", "r2", "r3", "ip", "lr"); 79 : "r1", "r2", "r3", "ip", "lr");
80 kunmap_atomic(kaddr, KM_USER0); 80 kunmap_atomic(kaddr);
81} 81}
82 82
83struct cpu_user_fns fa_user_fns __initdata = { 83struct cpu_user_fns fa_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
index ac163de7dc01..49ee0c1a7209 100644
--- a/arch/arm/mm/copypage-feroceon.c
+++ b/arch/arm/mm/copypage-feroceon.c
@@ -72,17 +72,17 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from,
72{ 72{
73 void *kto, *kfrom; 73 void *kto, *kfrom;
74 74
75 kto = kmap_atomic(to, KM_USER0); 75 kto = kmap_atomic(to);
76 kfrom = kmap_atomic(from, KM_USER1); 76 kfrom = kmap_atomic(from);
77 flush_cache_page(vma, vaddr, page_to_pfn(from)); 77 flush_cache_page(vma, vaddr, page_to_pfn(from));
78 feroceon_copy_user_page(kto, kfrom); 78 feroceon_copy_user_page(kto, kfrom);
79 kunmap_atomic(kfrom, KM_USER1); 79 kunmap_atomic(kfrom);
80 kunmap_atomic(kto, KM_USER0); 80 kunmap_atomic(kto);
81} 81}
82 82
83void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) 83void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
84{ 84{
85 void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 85 void *ptr, *kaddr = kmap_atomic(page);
86 asm volatile ("\ 86 asm volatile ("\
87 mov r1, %2 \n\ 87 mov r1, %2 \n\
88 mov r2, #0 \n\ 88 mov r2, #0 \n\
@@ -102,7 +102,7 @@ void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
102 : "=r" (ptr) 102 : "=r" (ptr)
103 : "0" (kaddr), "I" (PAGE_SIZE / 32) 103 : "0" (kaddr), "I" (PAGE_SIZE / 32)
104 : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); 104 : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr");
105 kunmap_atomic(kaddr, KM_USER0); 105 kunmap_atomic(kaddr);
106} 106}
107 107
108struct cpu_user_fns feroceon_user_fns __initdata = { 108struct cpu_user_fns feroceon_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c
index f72303e1d804..3935bddd4769 100644
--- a/arch/arm/mm/copypage-v3.c
+++ b/arch/arm/mm/copypage-v3.c
@@ -42,11 +42,11 @@ void v3_copy_user_highpage(struct page *to, struct page *from,
42{ 42{
43 void *kto, *kfrom; 43 void *kto, *kfrom;
44 44
45 kto = kmap_atomic(to, KM_USER0); 45 kto = kmap_atomic(to);
46 kfrom = kmap_atomic(from, KM_USER1); 46 kfrom = kmap_atomic(from);
47 v3_copy_user_page(kto, kfrom); 47 v3_copy_user_page(kto, kfrom);
48 kunmap_atomic(kfrom, KM_USER1); 48 kunmap_atomic(kfrom);
49 kunmap_atomic(kto, KM_USER0); 49 kunmap_atomic(kto);
50} 50}
51 51
52/* 52/*
@@ -56,7 +56,7 @@ void v3_copy_user_highpage(struct page *to, struct page *from,
56 */ 56 */
57void v3_clear_user_highpage(struct page *page, unsigned long vaddr) 57void v3_clear_user_highpage(struct page *page, unsigned long vaddr)
58{ 58{
59 void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 59 void *ptr, *kaddr = kmap_atomic(page);
60 asm volatile("\n\ 60 asm volatile("\n\
61 mov r1, %2 @ 1\n\ 61 mov r1, %2 @ 1\n\
62 mov r2, #0 @ 1\n\ 62 mov r2, #0 @ 1\n\
@@ -72,7 +72,7 @@ void v3_clear_user_highpage(struct page *page, unsigned long vaddr)
72 : "=r" (ptr) 72 : "=r" (ptr)
73 : "0" (kaddr), "I" (PAGE_SIZE / 64) 73 : "0" (kaddr), "I" (PAGE_SIZE / 64)
74 : "r1", "r2", "r3", "ip", "lr"); 74 : "r1", "r2", "r3", "ip", "lr");
75 kunmap_atomic(kaddr, KM_USER0); 75 kunmap_atomic(kaddr);
76} 76}
77 77
78struct cpu_user_fns v3_user_fns __initdata = { 78struct cpu_user_fns v3_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 7d0a8c230342..ec8c3befb9c8 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -71,7 +71,7 @@ mc_copy_user_page(void *from, void *to)
71void v4_mc_copy_user_highpage(struct page *to, struct page *from, 71void v4_mc_copy_user_highpage(struct page *to, struct page *from,
72 unsigned long vaddr, struct vm_area_struct *vma) 72 unsigned long vaddr, struct vm_area_struct *vma)
73{ 73{
74 void *kto = kmap_atomic(to, KM_USER1); 74 void *kto = kmap_atomic(to);
75 75
76 if (!test_and_set_bit(PG_dcache_clean, &from->flags)) 76 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
77 __flush_dcache_page(page_mapping(from), from); 77 __flush_dcache_page(page_mapping(from), from);
@@ -85,7 +85,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
85 85
86 raw_spin_unlock(&minicache_lock); 86 raw_spin_unlock(&minicache_lock);
87 87
88 kunmap_atomic(kto, KM_USER1); 88 kunmap_atomic(kto);
89} 89}
90 90
91/* 91/*
@@ -93,7 +93,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
93 */ 93 */
94void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) 94void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
95{ 95{
96 void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 96 void *ptr, *kaddr = kmap_atomic(page);
97 asm volatile("\ 97 asm volatile("\
98 mov r1, %2 @ 1\n\ 98 mov r1, %2 @ 1\n\
99 mov r2, #0 @ 1\n\ 99 mov r2, #0 @ 1\n\
@@ -111,7 +111,7 @@ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
111 : "=r" (ptr) 111 : "=r" (ptr)
112 : "0" (kaddr), "I" (PAGE_SIZE / 64) 112 : "0" (kaddr), "I" (PAGE_SIZE / 64)
113 : "r1", "r2", "r3", "ip", "lr"); 113 : "r1", "r2", "r3", "ip", "lr");
114 kunmap_atomic(kaddr, KM_USER0); 114 kunmap_atomic(kaddr);
115} 115}
116 116
117struct cpu_user_fns v4_mc_user_fns __initdata = { 117struct cpu_user_fns v4_mc_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
index cb589cbb2b6c..067d0fdd630c 100644
--- a/arch/arm/mm/copypage-v4wb.c
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -52,12 +52,12 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from,
52{ 52{
53 void *kto, *kfrom; 53 void *kto, *kfrom;
54 54
55 kto = kmap_atomic(to, KM_USER0); 55 kto = kmap_atomic(to);
56 kfrom = kmap_atomic(from, KM_USER1); 56 kfrom = kmap_atomic(from);
57 flush_cache_page(vma, vaddr, page_to_pfn(from)); 57 flush_cache_page(vma, vaddr, page_to_pfn(from));
58 v4wb_copy_user_page(kto, kfrom); 58 v4wb_copy_user_page(kto, kfrom);
59 kunmap_atomic(kfrom, KM_USER1); 59 kunmap_atomic(kfrom);
60 kunmap_atomic(kto, KM_USER0); 60 kunmap_atomic(kto);
61} 61}
62 62
63/* 63/*
@@ -67,7 +67,7 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from,
67 */ 67 */
68void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) 68void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
69{ 69{
70 void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 70 void *ptr, *kaddr = kmap_atomic(page);
71 asm volatile("\ 71 asm volatile("\
72 mov r1, %2 @ 1\n\ 72 mov r1, %2 @ 1\n\
73 mov r2, #0 @ 1\n\ 73 mov r2, #0 @ 1\n\
@@ -86,7 +86,7 @@ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
86 : "=r" (ptr) 86 : "=r" (ptr)
87 : "0" (kaddr), "I" (PAGE_SIZE / 64) 87 : "0" (kaddr), "I" (PAGE_SIZE / 64)
88 : "r1", "r2", "r3", "ip", "lr"); 88 : "r1", "r2", "r3", "ip", "lr");
89 kunmap_atomic(kaddr, KM_USER0); 89 kunmap_atomic(kaddr);
90} 90}
91 91
92struct cpu_user_fns v4wb_user_fns __initdata = { 92struct cpu_user_fns v4wb_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
index 30c7d048a324..b85c5da2e510 100644
--- a/arch/arm/mm/copypage-v4wt.c
+++ b/arch/arm/mm/copypage-v4wt.c
@@ -48,11 +48,11 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from,
48{ 48{
49 void *kto, *kfrom; 49 void *kto, *kfrom;
50 50
51 kto = kmap_atomic(to, KM_USER0); 51 kto = kmap_atomic(to);
52 kfrom = kmap_atomic(from, KM_USER1); 52 kfrom = kmap_atomic(from);
53 v4wt_copy_user_page(kto, kfrom); 53 v4wt_copy_user_page(kto, kfrom);
54 kunmap_atomic(kfrom, KM_USER1); 54 kunmap_atomic(kfrom);
55 kunmap_atomic(kto, KM_USER0); 55 kunmap_atomic(kto);
56} 56}
57 57
58/* 58/*
@@ -62,7 +62,7 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from,
62 */ 62 */
63void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) 63void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
64{ 64{
65 void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 65 void *ptr, *kaddr = kmap_atomic(page);
66 asm volatile("\ 66 asm volatile("\
67 mov r1, %2 @ 1\n\ 67 mov r1, %2 @ 1\n\
68 mov r2, #0 @ 1\n\ 68 mov r2, #0 @ 1\n\
@@ -79,7 +79,7 @@ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
79 : "=r" (ptr) 79 : "=r" (ptr)
80 : "0" (kaddr), "I" (PAGE_SIZE / 64) 80 : "0" (kaddr), "I" (PAGE_SIZE / 64)
81 : "r1", "r2", "r3", "ip", "lr"); 81 : "r1", "r2", "r3", "ip", "lr");
82 kunmap_atomic(kaddr, KM_USER0); 82 kunmap_atomic(kaddr);
83} 83}
84 84
85struct cpu_user_fns v4wt_user_fns __initdata = { 85struct cpu_user_fns v4wt_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 3d9a1552cef6..8b03a5814d00 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -38,11 +38,11 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
38{ 38{
39 void *kto, *kfrom; 39 void *kto, *kfrom;
40 40
41 kfrom = kmap_atomic(from, KM_USER0); 41 kfrom = kmap_atomic(from);
42 kto = kmap_atomic(to, KM_USER1); 42 kto = kmap_atomic(to);
43 copy_page(kto, kfrom); 43 copy_page(kto, kfrom);
44 kunmap_atomic(kto, KM_USER1); 44 kunmap_atomic(kto);
45 kunmap_atomic(kfrom, KM_USER0); 45 kunmap_atomic(kfrom);
46} 46}
47 47
48/* 48/*
@@ -51,9 +51,9 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
51 */ 51 */
52static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) 52static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
53{ 53{
54 void *kaddr = kmap_atomic(page, KM_USER0); 54 void *kaddr = kmap_atomic(page);
55 clear_page(kaddr); 55 clear_page(kaddr);
56 kunmap_atomic(kaddr, KM_USER0); 56 kunmap_atomic(kaddr);
57} 57}
58 58
59/* 59/*
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
index f9cde0702f1e..03a2042aced5 100644
--- a/arch/arm/mm/copypage-xsc3.c
+++ b/arch/arm/mm/copypage-xsc3.c
@@ -75,12 +75,12 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
75{ 75{
76 void *kto, *kfrom; 76 void *kto, *kfrom;
77 77
78 kto = kmap_atomic(to, KM_USER0); 78 kto = kmap_atomic(to);
79 kfrom = kmap_atomic(from, KM_USER1); 79 kfrom = kmap_atomic(from);
80 flush_cache_page(vma, vaddr, page_to_pfn(from)); 80 flush_cache_page(vma, vaddr, page_to_pfn(from));
81 xsc3_mc_copy_user_page(kto, kfrom); 81 xsc3_mc_copy_user_page(kto, kfrom);
82 kunmap_atomic(kfrom, KM_USER1); 82 kunmap_atomic(kfrom);
83 kunmap_atomic(kto, KM_USER0); 83 kunmap_atomic(kto);
84} 84}
85 85
86/* 86/*
@@ -90,7 +90,7 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
90 */ 90 */
91void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) 91void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
92{ 92{
93 void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 93 void *ptr, *kaddr = kmap_atomic(page);
94 asm volatile ("\ 94 asm volatile ("\
95 mov r1, %2 \n\ 95 mov r1, %2 \n\
96 mov r2, #0 \n\ 96 mov r2, #0 \n\
@@ -105,7 +105,7 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
105 : "=r" (ptr) 105 : "=r" (ptr)
106 : "0" (kaddr), "I" (PAGE_SIZE / 32) 106 : "0" (kaddr), "I" (PAGE_SIZE / 32)
107 : "r1", "r2", "r3"); 107 : "r1", "r2", "r3");
108 kunmap_atomic(kaddr, KM_USER0); 108 kunmap_atomic(kaddr);
109} 109}
110 110
111struct cpu_user_fns xsc3_mc_user_fns __initdata = { 111struct cpu_user_fns xsc3_mc_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 610c24ced310..439d106ae638 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -93,7 +93,7 @@ mc_copy_user_page(void *from, void *to)
93void xscale_mc_copy_user_highpage(struct page *to, struct page *from, 93void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
94 unsigned long vaddr, struct vm_area_struct *vma) 94 unsigned long vaddr, struct vm_area_struct *vma)
95{ 95{
96 void *kto = kmap_atomic(to, KM_USER1); 96 void *kto = kmap_atomic(to);
97 97
98 if (!test_and_set_bit(PG_dcache_clean, &from->flags)) 98 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
99 __flush_dcache_page(page_mapping(from), from); 99 __flush_dcache_page(page_mapping(from), from);
@@ -107,7 +107,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
107 107
108 raw_spin_unlock(&minicache_lock); 108 raw_spin_unlock(&minicache_lock);
109 109
110 kunmap_atomic(kto, KM_USER1); 110 kunmap_atomic(kto);
111} 111}
112 112
113/* 113/*
@@ -116,7 +116,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
116void 116void
117xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) 117xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
118{ 118{
119 void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 119 void *ptr, *kaddr = kmap_atomic(page);
120 asm volatile( 120 asm volatile(
121 "mov r1, %2 \n\ 121 "mov r1, %2 \n\
122 mov r2, #0 \n\ 122 mov r2, #0 \n\
@@ -133,7 +133,7 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
133 : "=r" (ptr) 133 : "=r" (ptr)
134 : "0" (kaddr), "I" (PAGE_SIZE / 32) 134 : "0" (kaddr), "I" (PAGE_SIZE / 32)
135 : "r1", "r2", "r3", "ip"); 135 : "r1", "r2", "r3", "ip");
136 kunmap_atomic(kaddr, KM_USER0); 136 kunmap_atomic(kaddr);
137} 137}
138 138
139struct cpu_user_fns xscale_mc_user_fns __initdata = { 139struct cpu_user_fns xscale_mc_user_fns __initdata = {
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 807c0573abbe..5a21505d7550 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -36,7 +36,7 @@ void kunmap(struct page *page)
36} 36}
37EXPORT_SYMBOL(kunmap); 37EXPORT_SYMBOL(kunmap);
38 38
39void *__kmap_atomic(struct page *page) 39void *kmap_atomic(struct page *page)
40{ 40{
41 unsigned int idx; 41 unsigned int idx;
42 unsigned long vaddr; 42 unsigned long vaddr;
@@ -81,7 +81,7 @@ void *__kmap_atomic(struct page *page)
81 81
82 return (void *)vaddr; 82 return (void *)vaddr;
83} 83}
84EXPORT_SYMBOL(__kmap_atomic); 84EXPORT_SYMBOL(kmap_atomic);
85 85
86void __kunmap_atomic(void *kvaddr) 86void __kunmap_atomic(void *kvaddr)
87{ 87{
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 5dc7d127a40f..245a55a0a5bb 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -32,7 +32,6 @@
32 32
33#include <asm/mach/arch.h> 33#include <asm/mach/arch.h>
34#include <asm/mach/map.h> 34#include <asm/mach/map.h>
35#include <asm/memblock.h>
36 35
37#include "mm.h" 36#include "mm.h"
38 37
diff --git a/arch/arm/mm/iomap.c b/arch/arm/mm/iomap.c
index e62956e12030..4614208369f1 100644
--- a/arch/arm/mm/iomap.c
+++ b/arch/arm/mm/iomap.c
@@ -32,9 +32,6 @@ EXPORT_SYMBOL(pcibios_min_io);
32unsigned long pcibios_min_mem = 0x01000000; 32unsigned long pcibios_min_mem = 0x01000000;
33EXPORT_SYMBOL(pcibios_min_mem); 33EXPORT_SYMBOL(pcibios_min_mem);
34 34
35unsigned int pci_flags = PCI_REASSIGN_ALL_RSRC;
36EXPORT_SYMBOL(pci_flags);
37
38void pci_iounmap(struct pci_dev *dev, void __iomem *addr) 35void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
39{ 36{
40 if ((unsigned long)addr >= VMALLOC_START && 37 if ((unsigned long)addr >= VMALLOC_START &&