diff options
author | Nicolas Pitre <nico@cam.org> | 2008-11-04 02:42:27 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-11-28 10:36:43 -0500 |
commit | 43ae286b7d4d8c4983bc263ef2e3cccc10dedb2b (patch) | |
tree | be10faffb48904e5bb962fbd45f7cb2ff395caea /arch/arm/mm | |
parent | 303c6443659bc1dc911356f5de149f48ff1d97b8 (diff) |
[ARM] fix a couple clear_user_highpage assembly constraints
In all cases the kaddr is assigned an input register even though it is
modified in the assembly code. Let's assign a new variable to the
modified value and mark those inline asm with volatile otherwise they
get optimized away because the output variable is otherwise not used.
Also fix a few conversion errors in copypage-feroceon.c and
copypage-v4mc.c.
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/copypage-feroceon.c | 12 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v3.c | 10 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 8 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wb.c | 10 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wt.c | 10 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xsc3.c | 10 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xscale.c | 8 |
7 files changed, 34 insertions, 34 deletions
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index c3651b2939c7..c3ba6a94da0c 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c | |||
@@ -81,9 +81,9 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from, | |||
81 | 81 | ||
82 | void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) | 82 | void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) |
83 | { | 83 | { |
84 | void *kaddr = kmap_atomic(page, KM_USER0); | 84 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
85 | asm("\ | 85 | asm volatile ("\ |
86 | mov r1, %1 \n\ | 86 | mov r1, %2 \n\ |
87 | mov r2, #0 \n\ | 87 | mov r2, #0 \n\ |
88 | mov r3, #0 \n\ | 88 | mov r3, #0 \n\ |
89 | mov r4, #0 \n\ | 89 | mov r4, #0 \n\ |
@@ -95,11 +95,11 @@ void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
95 | 1: stmia %0, {r2-r7, ip, lr} \n\ | 95 | 1: stmia %0, {r2-r7, ip, lr} \n\ |
96 | subs r1, r1, #1 \n\ | 96 | subs r1, r1, #1 \n\ |
97 | mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ | 97 | mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ |
98 | add r0, r0, #32 \n\ | 98 | add %0, %0, #32 \n\ |
99 | bne 1b \n\ | 99 | bne 1b \n\ |
100 | mcr p15, 0, r1, c7, c10, 4 @ drain WB" | 100 | mcr p15, 0, r1, c7, c10, 4 @ drain WB" |
101 | : | 101 | : "=r" (ptr) |
102 | : "r" (kaddr), "I" (PAGE_SIZE / 32) | 102 | : "0" (kaddr), "I" (PAGE_SIZE / 32) |
103 | : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); | 103 | : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); |
104 | kunmap_atomic(kaddr, KM_USER0); | 104 | kunmap_atomic(kaddr, KM_USER0); |
105 | } | 105 | } |
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index 13ce0baa6ba5..70ed96c8af8e 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c | |||
@@ -56,9 +56,9 @@ void v3_copy_user_highpage(struct page *to, struct page *from, | |||
56 | */ | 56 | */ |
57 | void v3_clear_user_highpage(struct page *page, unsigned long vaddr) | 57 | void v3_clear_user_highpage(struct page *page, unsigned long vaddr) |
58 | { | 58 | { |
59 | void *kaddr = kmap_atomic(page, KM_USER0); | 59 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
60 | asm("\n\ | 60 | asm volatile("\n\ |
61 | mov r1, %1 @ 1\n\ | 61 | mov r1, %2 @ 1\n\ |
62 | mov r2, #0 @ 1\n\ | 62 | mov r2, #0 @ 1\n\ |
63 | mov r3, #0 @ 1\n\ | 63 | mov r3, #0 @ 1\n\ |
64 | mov ip, #0 @ 1\n\ | 64 | mov ip, #0 @ 1\n\ |
@@ -69,8 +69,8 @@ void v3_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
69 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | 69 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
70 | subs r1, r1, #1 @ 1\n\ | 70 | subs r1, r1, #1 @ 1\n\ |
71 | bne 1b @ 1" | 71 | bne 1b @ 1" |
72 | : | 72 | : "=r" (ptr) |
73 | : "r" (kaddr), "I" (PAGE_SIZE / 64) | 73 | : "0" (kaddr), "I" (PAGE_SIZE / 64) |
74 | : "r1", "r2", "r3", "ip", "lr"); | 74 | : "r1", "r2", "r3", "ip", "lr"); |
75 | kunmap_atomic(kaddr, KM_USER0); | 75 | kunmap_atomic(kaddr, KM_USER0); |
76 | } | 76 | } |
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index a5eae503a34f..bdb5fd983b15 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c | |||
@@ -93,9 +93,9 @@ void v4_mc_copy_user_highpage(struct page *from, struct page *to, | |||
93 | */ | 93 | */ |
94 | void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | 94 | void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
95 | { | 95 | { |
96 | void *kaddr = kmap_atomic(page, KM_USER0); | 96 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
97 | asm volatile("\ | 97 | asm volatile("\ |
98 | mov r1, %0 @ 1\n\ | 98 | mov r1, %2 @ 1\n\ |
99 | mov r2, #0 @ 1\n\ | 99 | mov r2, #0 @ 1\n\ |
100 | mov r3, #0 @ 1\n\ | 100 | mov r3, #0 @ 1\n\ |
101 | mov ip, #0 @ 1\n\ | 101 | mov ip, #0 @ 1\n\ |
@@ -108,8 +108,8 @@ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
108 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | 108 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
109 | subs r1, r1, #1 @ 1\n\ | 109 | subs r1, r1, #1 @ 1\n\ |
110 | bne 1b @ 1" | 110 | bne 1b @ 1" |
111 | : | 111 | : "=r" (ptr) |
112 | : "r" (kaddr), "I" (PAGE_SIZE / 64) | 112 | : "0" (kaddr), "I" (PAGE_SIZE / 64) |
113 | : "r1", "r2", "r3", "ip", "lr"); | 113 | : "r1", "r2", "r3", "ip", "lr"); |
114 | kunmap_atomic(kaddr, KM_USER0); | 114 | kunmap_atomic(kaddr, KM_USER0); |
115 | } | 115 | } |
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 9144a96037bf..3ec93dab7656 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c | |||
@@ -66,9 +66,9 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, | |||
66 | */ | 66 | */ |
67 | void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) | 67 | void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) |
68 | { | 68 | { |
69 | void *kaddr = kmap_atomic(page, KM_USER0); | 69 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
70 | asm("\ | 70 | asm volatile("\ |
71 | mov r1, %1 @ 1\n\ | 71 | mov r1, %2 @ 1\n\ |
72 | mov r2, #0 @ 1\n\ | 72 | mov r2, #0 @ 1\n\ |
73 | mov r3, #0 @ 1\n\ | 73 | mov r3, #0 @ 1\n\ |
74 | mov ip, #0 @ 1\n\ | 74 | mov ip, #0 @ 1\n\ |
@@ -82,8 +82,8 @@ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
82 | subs r1, r1, #1 @ 1\n\ | 82 | subs r1, r1, #1 @ 1\n\ |
83 | bne 1b @ 1\n\ | 83 | bne 1b @ 1\n\ |
84 | mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB" | 84 | mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB" |
85 | : | 85 | : "=r" (ptr) |
86 | : "r" (kaddr), "I" (PAGE_SIZE / 64) | 86 | : "0" (kaddr), "I" (PAGE_SIZE / 64) |
87 | : "r1", "r2", "r3", "ip", "lr"); | 87 | : "r1", "r2", "r3", "ip", "lr"); |
88 | kunmap_atomic(kaddr, KM_USER0); | 88 | kunmap_atomic(kaddr, KM_USER0); |
89 | } | 89 | } |
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index b8a345d6e77e..0f1188efae45 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c | |||
@@ -62,9 +62,9 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from, | |||
62 | */ | 62 | */ |
63 | void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) | 63 | void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) |
64 | { | 64 | { |
65 | void *kaddr = kmap_atomic(page, KM_USER0); | 65 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
66 | asm("\ | 66 | asm volatile("\ |
67 | mov r1, %1 @ 1\n\ | 67 | mov r1, %2 @ 1\n\ |
68 | mov r2, #0 @ 1\n\ | 68 | mov r2, #0 @ 1\n\ |
69 | mov r3, #0 @ 1\n\ | 69 | mov r3, #0 @ 1\n\ |
70 | mov ip, #0 @ 1\n\ | 70 | mov ip, #0 @ 1\n\ |
@@ -76,8 +76,8 @@ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
76 | subs r1, r1, #1 @ 1\n\ | 76 | subs r1, r1, #1 @ 1\n\ |
77 | bne 1b @ 1\n\ | 77 | bne 1b @ 1\n\ |
78 | mcr p15, 0, r2, c7, c7, 0 @ flush ID cache" | 78 | mcr p15, 0, r2, c7, c7, 0 @ flush ID cache" |
79 | : | 79 | : "=r" (ptr) |
80 | : "r" (kaddr), "I" (PAGE_SIZE / 64) | 80 | : "0" (kaddr), "I" (PAGE_SIZE / 64) |
81 | : "r1", "r2", "r3", "ip", "lr"); | 81 | : "r1", "r2", "r3", "ip", "lr"); |
82 | kunmap_atomic(kaddr, KM_USER0); | 82 | kunmap_atomic(kaddr, KM_USER0); |
83 | } | 83 | } |
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index 0e7cb325ca4c..39a994542cad 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c | |||
@@ -89,9 +89,9 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, | |||
89 | */ | 89 | */ |
90 | void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | 90 | void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
91 | { | 91 | { |
92 | void *kaddr = kmap_atomic(page, KM_USER0); | 92 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
93 | asm("\ | 93 | asm volatile ("\ |
94 | mov r1, %1 \n\ | 94 | mov r1, %2 \n\ |
95 | mov r2, #0 \n\ | 95 | mov r2, #0 \n\ |
96 | mov r3, #0 \n\ | 96 | mov r3, #0 \n\ |
97 | 1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\ | 97 | 1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\ |
@@ -101,8 +101,8 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
101 | strd r2, [%0], #8 \n\ | 101 | strd r2, [%0], #8 \n\ |
102 | subs r1, r1, #1 \n\ | 102 | subs r1, r1, #1 \n\ |
103 | bne 1b" | 103 | bne 1b" |
104 | : | 104 | : "=r" (ptr) |
105 | : "r" (kaddr), "I" (PAGE_SIZE / 32) | 105 | : "0" (kaddr), "I" (PAGE_SIZE / 32) |
106 | : "r1", "r2", "r3"); | 106 | : "r1", "r2", "r3"); |
107 | kunmap_atomic(kaddr, KM_USER0); | 107 | kunmap_atomic(kaddr, KM_USER0); |
108 | } | 108 | } |
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index aa9f2ff9dce0..d18f2397ee2d 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c | |||
@@ -116,9 +116,9 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, | |||
116 | void | 116 | void |
117 | xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | 117 | xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
118 | { | 118 | { |
119 | void *kaddr = kmap_atomic(page, KM_USER0); | 119 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
120 | asm volatile( | 120 | asm volatile( |
121 | "mov r1, %1 \n\ | 121 | "mov r1, %2 \n\ |
122 | mov r2, #0 \n\ | 122 | mov r2, #0 \n\ |
123 | mov r3, #0 \n\ | 123 | mov r3, #0 \n\ |
124 | 1: mov ip, %0 \n\ | 124 | 1: mov ip, %0 \n\ |
@@ -130,8 +130,8 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
130 | subs r1, r1, #1 \n\ | 130 | subs r1, r1, #1 \n\ |
131 | mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ | 131 | mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ |
132 | bne 1b" | 132 | bne 1b" |
133 | : | 133 | : "=r" (ptr) |
134 | : "r" (kaddr), "I" (PAGE_SIZE / 32) | 134 | : "0" (kaddr), "I" (PAGE_SIZE / 32) |
135 | : "r1", "r2", "r3", "ip"); | 135 | : "r1", "r2", "r3", "ip"); |
136 | kunmap_atomic(kaddr, KM_USER0); | 136 | kunmap_atomic(kaddr, KM_USER0); |
137 | } | 137 | } |