diff options
| author | Russell King <rmk+kernel@arm.linux.org.uk> | 2015-12-05 08:42:07 -0500 |
|---|---|---|
| committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2015-12-15 06:51:02 -0500 |
| commit | c014953d84ec21a4df9a43be2378861ea6e9246e (patch) | |
| tree | 72af8d8752c59bc9c59c96c70616a911f4a107a1 /arch/arm/lib | |
| parent | 77f1b959b0b6db7a7941b4b4f9d3d287c67d7c15 (diff) | |
ARM: fix uaccess_with_memcpy() with SW_DOMAIN_PAN
The uaccess_with_memcpy() code is currently incompatible with the SW
PAN code: it takes locks within the region that we've changed the DACR,
potentially sleeping as a result. As we do not save and restore the
DACR across co-operative sleep events, can lead to an incorrect DACR
value later in this code path.
Reported-by: Peter Rosin <peda@axentia.se>
Tested-by: Peter Rosin <peda@axentia.se>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/lib')
| -rw-r--r-- | arch/arm/lib/uaccess_with_memcpy.c | 29 |
1 files changed, 23 insertions, 6 deletions
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index d72b90905132..588bbc288396 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c | |||
| @@ -88,6 +88,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) | |||
| 88 | static unsigned long noinline | 88 | static unsigned long noinline |
| 89 | __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) | 89 | __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) |
| 90 | { | 90 | { |
| 91 | unsigned long ua_flags; | ||
| 91 | int atomic; | 92 | int atomic; |
| 92 | 93 | ||
| 93 | if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { | 94 | if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { |
| @@ -118,7 +119,9 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) | |||
| 118 | if (tocopy > n) | 119 | if (tocopy > n) |
| 119 | tocopy = n; | 120 | tocopy = n; |
| 120 | 121 | ||
| 122 | ua_flags = uaccess_save_and_enable(); | ||
| 121 | memcpy((void *)to, from, tocopy); | 123 | memcpy((void *)to, from, tocopy); |
| 124 | uaccess_restore(ua_flags); | ||
| 122 | to += tocopy; | 125 | to += tocopy; |
| 123 | from += tocopy; | 126 | from += tocopy; |
| 124 | n -= tocopy; | 127 | n -= tocopy; |
| @@ -145,14 +148,21 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n) | |||
| 145 | * With frame pointer disabled, tail call optimization kicks in | 148 | * With frame pointer disabled, tail call optimization kicks in |
| 146 | * as well making this test almost invisible. | 149 | * as well making this test almost invisible. |
| 147 | */ | 150 | */ |
| 148 | if (n < 64) | 151 | if (n < 64) { |
| 149 | return __copy_to_user_std(to, from, n); | 152 | unsigned long ua_flags = uaccess_save_and_enable(); |
| 150 | return __copy_to_user_memcpy(to, from, n); | 153 | n = __copy_to_user_std(to, from, n); |
| 154 | uaccess_restore(ua_flags); | ||
| 155 | } else { | ||
| 156 | n = __copy_to_user_memcpy(to, from, n); | ||
| 157 | } | ||
| 158 | return n; | ||
| 151 | } | 159 | } |
| 152 | 160 | ||
| 153 | static unsigned long noinline | 161 | static unsigned long noinline |
| 154 | __clear_user_memset(void __user *addr, unsigned long n) | 162 | __clear_user_memset(void __user *addr, unsigned long n) |
| 155 | { | 163 | { |
| 164 | unsigned long ua_flags; | ||
| 165 | |||
| 156 | if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { | 166 | if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { |
| 157 | memset((void *)addr, 0, n); | 167 | memset((void *)addr, 0, n); |
| 158 | return 0; | 168 | return 0; |
| @@ -175,7 +185,9 @@ __clear_user_memset(void __user *addr, unsigned long n) | |||
| 175 | if (tocopy > n) | 185 | if (tocopy > n) |
| 176 | tocopy = n; | 186 | tocopy = n; |
| 177 | 187 | ||
| 188 | ua_flags = uaccess_save_and_enable(); | ||
| 178 | memset((void *)addr, 0, tocopy); | 189 | memset((void *)addr, 0, tocopy); |
| 190 | uaccess_restore(ua_flags); | ||
| 179 | addr += tocopy; | 191 | addr += tocopy; |
| 180 | n -= tocopy; | 192 | n -= tocopy; |
| 181 | 193 | ||
| @@ -193,9 +205,14 @@ out: | |||
| 193 | unsigned long arm_clear_user(void __user *addr, unsigned long n) | 205 | unsigned long arm_clear_user(void __user *addr, unsigned long n) |
| 194 | { | 206 | { |
| 195 | /* See rational for this in __copy_to_user() above. */ | 207 | /* See rational for this in __copy_to_user() above. */ |
| 196 | if (n < 64) | 208 | if (n < 64) { |
| 197 | return __clear_user_std(addr, n); | 209 | unsigned long ua_flags = uaccess_save_and_enable(); |
| 198 | return __clear_user_memset(addr, n); | 210 | n = __clear_user_std(addr, n); |
| 211 | uaccess_restore(ua_flags); | ||
| 212 | } else { | ||
| 213 | n = __clear_user_memset(addr, n); | ||
| 214 | } | ||
| 215 | return n; | ||
| 199 | } | 216 | } |
| 200 | 217 | ||
| 201 | #if 0 | 218 | #if 0 |
