diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-09-26 16:26:30 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-09-26 16:26:30 -0400 |
| commit | a2b49102daac7a1d90dc01bfc4350ef68aa1204d (patch) | |
| tree | 5f626f0a4c042f805b907a3177af292870f2ef6a | |
| parent | f9d81f61c84aca693bc353dfef4b8c36c2e5e1b5 (diff) | |
| parent | df77abcafc8dc881b6c9347548651777088e4b27 (diff) | |
Merge branch 'fixes' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur/linux-2.6-arm
* 'fixes' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur/linux-2.6-arm:
ARM: 7099/1: futex: preserve oldval in SMP __futex_atomic_op
ARM: dma-mapping: free allocated page if unable to map
ARM: fix vmlinux.lds.S discarding sections
ARM: nommu: fix warning with checksyscalls.sh
ARM: 7091/1: errata: D-cache line maintenance operation by MVA may not succeed
| -rw-r--r-- | arch/arm/Kconfig | 14 | ||||
| -rw-r--r-- | arch/arm/include/asm/futex.h | 34 | ||||
| -rw-r--r-- | arch/arm/include/asm/unistd.h | 4 | ||||
| -rw-r--r-- | arch/arm/kernel/smp_scu.c | 10 | ||||
| -rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 15 | ||||
| -rw-r--r-- | arch/arm/mm/cache-v7.S | 20 | ||||
| -rw-r--r-- | arch/arm/mm/dma-mapping.c | 2 |
7 files changed, 77 insertions, 22 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 3269576dbfa8..3146ed3f6eca 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
| @@ -1283,6 +1283,20 @@ config ARM_ERRATA_364296 | |||
| 1283 | processor into full low interrupt latency mode. ARM11MPCore | 1283 | processor into full low interrupt latency mode. ARM11MPCore |
| 1284 | is not affected. | 1284 | is not affected. |
| 1285 | 1285 | ||
| 1286 | config ARM_ERRATA_764369 | ||
| 1287 | bool "ARM errata: Data cache line maintenance operation by MVA may not succeed" | ||
| 1288 | depends on CPU_V7 && SMP | ||
| 1289 | help | ||
| 1290 | This option enables the workaround for erratum 764369 | ||
| 1291 | affecting Cortex-A9 MPCore with two or more processors (all | ||
| 1292 | current revisions). Under certain timing circumstances, a data | ||
| 1293 | cache line maintenance operation by MVA targeting an Inner | ||
| 1294 | Shareable memory region may fail to proceed up to either the | ||
| 1295 | Point of Coherency or to the Point of Unification of the | ||
| 1296 | system. This workaround adds a DSB instruction before the | ||
| 1297 | relevant cache maintenance functions and sets a specific bit | ||
| 1298 | in the diagnostic control register of the SCU. | ||
| 1299 | |||
| 1286 | endmenu | 1300 | endmenu |
| 1287 | 1301 | ||
| 1288 | source "arch/arm/common/Kconfig" | 1302 | source "arch/arm/common/Kconfig" |
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 8c73900da9ed..253cc86318bf 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h | |||
| @@ -25,17 +25,17 @@ | |||
| 25 | 25 | ||
| 26 | #ifdef CONFIG_SMP | 26 | #ifdef CONFIG_SMP |
| 27 | 27 | ||
| 28 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ | 28 | #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ |
| 29 | smp_mb(); \ | 29 | smp_mb(); \ |
| 30 | __asm__ __volatile__( \ | 30 | __asm__ __volatile__( \ |
| 31 | "1: ldrex %1, [%2]\n" \ | 31 | "1: ldrex %1, [%3]\n" \ |
| 32 | " " insn "\n" \ | 32 | " " insn "\n" \ |
| 33 | "2: strex %1, %0, [%2]\n" \ | 33 | "2: strex %2, %0, [%3]\n" \ |
| 34 | " teq %1, #0\n" \ | 34 | " teq %2, #0\n" \ |
| 35 | " bne 1b\n" \ | 35 | " bne 1b\n" \ |
| 36 | " mov %0, #0\n" \ | 36 | " mov %0, #0\n" \ |
| 37 | __futex_atomic_ex_table("%4") \ | 37 | __futex_atomic_ex_table("%5") \ |
| 38 | : "=&r" (ret), "=&r" (oldval) \ | 38 | : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ |
| 39 | : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ | 39 | : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ |
| 40 | : "cc", "memory") | 40 | : "cc", "memory") |
| 41 | 41 | ||
| @@ -73,14 +73,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
| 73 | #include <linux/preempt.h> | 73 | #include <linux/preempt.h> |
| 74 | #include <asm/domain.h> | 74 | #include <asm/domain.h> |
| 75 | 75 | ||
| 76 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ | 76 | #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ |
| 77 | __asm__ __volatile__( \ | 77 | __asm__ __volatile__( \ |
| 78 | "1: " T(ldr) " %1, [%2]\n" \ | 78 | "1: " T(ldr) " %1, [%3]\n" \ |
| 79 | " " insn "\n" \ | 79 | " " insn "\n" \ |
| 80 | "2: " T(str) " %0, [%2]\n" \ | 80 | "2: " T(str) " %0, [%3]\n" \ |
| 81 | " mov %0, #0\n" \ | 81 | " mov %0, #0\n" \ |
| 82 | __futex_atomic_ex_table("%4") \ | 82 | __futex_atomic_ex_table("%5") \ |
| 83 | : "=&r" (ret), "=&r" (oldval) \ | 83 | : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ |
| 84 | : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ | 84 | : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ |
| 85 | : "cc", "memory") | 85 | : "cc", "memory") |
| 86 | 86 | ||
| @@ -117,7 +117,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | |||
| 117 | int cmp = (encoded_op >> 24) & 15; | 117 | int cmp = (encoded_op >> 24) & 15; |
| 118 | int oparg = (encoded_op << 8) >> 20; | 118 | int oparg = (encoded_op << 8) >> 20; |
| 119 | int cmparg = (encoded_op << 20) >> 20; | 119 | int cmparg = (encoded_op << 20) >> 20; |
| 120 | int oldval = 0, ret; | 120 | int oldval = 0, ret, tmp; |
| 121 | 121 | ||
| 122 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 122 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
| 123 | oparg = 1 << oparg; | 123 | oparg = 1 << oparg; |
| @@ -129,19 +129,19 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | |||
| 129 | 129 | ||
| 130 | switch (op) { | 130 | switch (op) { |
| 131 | case FUTEX_OP_SET: | 131 | case FUTEX_OP_SET: |
| 132 | __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg); | 132 | __futex_atomic_op("mov %0, %4", ret, oldval, tmp, uaddr, oparg); |
| 133 | break; | 133 | break; |
| 134 | case FUTEX_OP_ADD: | 134 | case FUTEX_OP_ADD: |
| 135 | __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg); | 135 | __futex_atomic_op("add %0, %1, %4", ret, oldval, tmp, uaddr, oparg); |
| 136 | break; | 136 | break; |
| 137 | case FUTEX_OP_OR: | 137 | case FUTEX_OP_OR: |
| 138 | __futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg); | 138 | __futex_atomic_op("orr %0, %1, %4", ret, oldval, tmp, uaddr, oparg); |
| 139 | break; | 139 | break; |
| 140 | case FUTEX_OP_ANDN: | 140 | case FUTEX_OP_ANDN: |
| 141 | __futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg); | 141 | __futex_atomic_op("and %0, %1, %4", ret, oldval, tmp, uaddr, ~oparg); |
| 142 | break; | 142 | break; |
| 143 | case FUTEX_OP_XOR: | 143 | case FUTEX_OP_XOR: |
| 144 | __futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg); | 144 | __futex_atomic_op("eor %0, %1, %4", ret, oldval, tmp, uaddr, oparg); |
| 145 | break; | 145 | break; |
| 146 | default: | 146 | default: |
| 147 | ret = -ENOSYS; | 147 | ret = -ENOSYS; |
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 2c04ed5efeb5..c60a2944f95b 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h | |||
| @@ -478,8 +478,8 @@ | |||
| 478 | /* | 478 | /* |
| 479 | * Unimplemented (or alternatively implemented) syscalls | 479 | * Unimplemented (or alternatively implemented) syscalls |
| 480 | */ | 480 | */ |
| 481 | #define __IGNORE_fadvise64_64 1 | 481 | #define __IGNORE_fadvise64_64 |
| 482 | #define __IGNORE_migrate_pages 1 | 482 | #define __IGNORE_migrate_pages |
| 483 | 483 | ||
| 484 | #endif /* __KERNEL__ */ | 484 | #endif /* __KERNEL__ */ |
| 485 | #endif /* __ASM_ARM_UNISTD_H */ | 485 | #endif /* __ASM_ARM_UNISTD_H */ |
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index 79ed5e7f204a..7fcddb75c877 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | 13 | ||
| 14 | #include <asm/smp_scu.h> | 14 | #include <asm/smp_scu.h> |
| 15 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
| 16 | #include <asm/cputype.h> | ||
| 16 | 17 | ||
| 17 | #define SCU_CTRL 0x00 | 18 | #define SCU_CTRL 0x00 |
| 18 | #define SCU_CONFIG 0x04 | 19 | #define SCU_CONFIG 0x04 |
| @@ -37,6 +38,15 @@ void __init scu_enable(void __iomem *scu_base) | |||
| 37 | { | 38 | { |
| 38 | u32 scu_ctrl; | 39 | u32 scu_ctrl; |
| 39 | 40 | ||
| 41 | #ifdef CONFIG_ARM_ERRATA_764369 | ||
| 42 | /* Cortex-A9 only */ | ||
| 43 | if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) { | ||
| 44 | scu_ctrl = __raw_readl(scu_base + 0x30); | ||
| 45 | if (!(scu_ctrl & 1)) | ||
| 46 | __raw_writel(scu_ctrl | 0x1, scu_base + 0x30); | ||
| 47 | } | ||
| 48 | #endif | ||
| 49 | |||
| 40 | scu_ctrl = __raw_readl(scu_base + SCU_CTRL); | 50 | scu_ctrl = __raw_readl(scu_base + SCU_CTRL); |
| 41 | /* already enabled? */ | 51 | /* already enabled? */ |
| 42 | if (scu_ctrl & 1) | 52 | if (scu_ctrl & 1) |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index bf977f8514f6..4e66f62b8d41 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
| @@ -23,8 +23,10 @@ | |||
| 23 | 23 | ||
| 24 | #if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK) | 24 | #if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK) |
| 25 | #define ARM_EXIT_KEEP(x) x | 25 | #define ARM_EXIT_KEEP(x) x |
| 26 | #define ARM_EXIT_DISCARD(x) | ||
| 26 | #else | 27 | #else |
| 27 | #define ARM_EXIT_KEEP(x) | 28 | #define ARM_EXIT_KEEP(x) |
| 29 | #define ARM_EXIT_DISCARD(x) x | ||
| 28 | #endif | 30 | #endif |
| 29 | 31 | ||
| 30 | OUTPUT_ARCH(arm) | 32 | OUTPUT_ARCH(arm) |
| @@ -39,6 +41,11 @@ jiffies = jiffies_64 + 4; | |||
| 39 | SECTIONS | 41 | SECTIONS |
| 40 | { | 42 | { |
| 41 | /* | 43 | /* |
| 44 | * XXX: The linker does not define how output sections are | ||
| 45 | * assigned to input sections when there are multiple statements | ||
| 46 | * matching the same input section name. There is no documented | ||
| 47 | * order of matching. | ||
| 48 | * | ||
| 42 | * unwind exit sections must be discarded before the rest of the | 49 | * unwind exit sections must be discarded before the rest of the |
| 43 | * unwind sections get included. | 50 | * unwind sections get included. |
| 44 | */ | 51 | */ |
| @@ -47,6 +54,9 @@ SECTIONS | |||
| 47 | *(.ARM.extab.exit.text) | 54 | *(.ARM.extab.exit.text) |
| 48 | ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) | 55 | ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) |
| 49 | ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) | 56 | ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) |
| 57 | ARM_EXIT_DISCARD(EXIT_TEXT) | ||
| 58 | ARM_EXIT_DISCARD(EXIT_DATA) | ||
| 59 | EXIT_CALL | ||
| 50 | #ifndef CONFIG_HOTPLUG | 60 | #ifndef CONFIG_HOTPLUG |
| 51 | *(.ARM.exidx.devexit.text) | 61 | *(.ARM.exidx.devexit.text) |
| 52 | *(.ARM.extab.devexit.text) | 62 | *(.ARM.extab.devexit.text) |
| @@ -58,6 +68,8 @@ SECTIONS | |||
| 58 | #ifndef CONFIG_SMP_ON_UP | 68 | #ifndef CONFIG_SMP_ON_UP |
| 59 | *(.alt.smp.init) | 69 | *(.alt.smp.init) |
| 60 | #endif | 70 | #endif |
| 71 | *(.discard) | ||
| 72 | *(.discard.*) | ||
| 61 | } | 73 | } |
| 62 | 74 | ||
| 63 | #ifdef CONFIG_XIP_KERNEL | 75 | #ifdef CONFIG_XIP_KERNEL |
| @@ -279,9 +291,6 @@ SECTIONS | |||
| 279 | 291 | ||
| 280 | STABS_DEBUG | 292 | STABS_DEBUG |
| 281 | .comment 0 : { *(.comment) } | 293 | .comment 0 : { *(.comment) } |
| 282 | |||
| 283 | /* Default discards */ | ||
| 284 | DISCARDS | ||
| 285 | } | 294 | } |
| 286 | 295 | ||
| 287 | /* | 296 | /* |
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 3b24bfa3b828..07c4bc8ea0a4 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S | |||
| @@ -174,6 +174,10 @@ ENTRY(v7_coherent_user_range) | |||
| 174 | dcache_line_size r2, r3 | 174 | dcache_line_size r2, r3 |
| 175 | sub r3, r2, #1 | 175 | sub r3, r2, #1 |
| 176 | bic r12, r0, r3 | 176 | bic r12, r0, r3 |
| 177 | #ifdef CONFIG_ARM_ERRATA_764369 | ||
| 178 | ALT_SMP(W(dsb)) | ||
| 179 | ALT_UP(W(nop)) | ||
| 180 | #endif | ||
| 177 | 1: | 181 | 1: |
| 178 | USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification | 182 | USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification |
| 179 | add r12, r12, r2 | 183 | add r12, r12, r2 |
| @@ -223,6 +227,10 @@ ENTRY(v7_flush_kern_dcache_area) | |||
| 223 | add r1, r0, r1 | 227 | add r1, r0, r1 |
| 224 | sub r3, r2, #1 | 228 | sub r3, r2, #1 |
| 225 | bic r0, r0, r3 | 229 | bic r0, r0, r3 |
| 230 | #ifdef CONFIG_ARM_ERRATA_764369 | ||
| 231 | ALT_SMP(W(dsb)) | ||
| 232 | ALT_UP(W(nop)) | ||
| 233 | #endif | ||
| 226 | 1: | 234 | 1: |
| 227 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line | 235 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line |
| 228 | add r0, r0, r2 | 236 | add r0, r0, r2 |
| @@ -247,6 +255,10 @@ v7_dma_inv_range: | |||
| 247 | sub r3, r2, #1 | 255 | sub r3, r2, #1 |
| 248 | tst r0, r3 | 256 | tst r0, r3 |
| 249 | bic r0, r0, r3 | 257 | bic r0, r0, r3 |
| 258 | #ifdef CONFIG_ARM_ERRATA_764369 | ||
| 259 | ALT_SMP(W(dsb)) | ||
| 260 | ALT_UP(W(nop)) | ||
| 261 | #endif | ||
| 250 | mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line | 262 | mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line |
| 251 | 263 | ||
| 252 | tst r1, r3 | 264 | tst r1, r3 |
| @@ -270,6 +282,10 @@ v7_dma_clean_range: | |||
| 270 | dcache_line_size r2, r3 | 282 | dcache_line_size r2, r3 |
| 271 | sub r3, r2, #1 | 283 | sub r3, r2, #1 |
| 272 | bic r0, r0, r3 | 284 | bic r0, r0, r3 |
| 285 | #ifdef CONFIG_ARM_ERRATA_764369 | ||
| 286 | ALT_SMP(W(dsb)) | ||
| 287 | ALT_UP(W(nop)) | ||
| 288 | #endif | ||
| 273 | 1: | 289 | 1: |
| 274 | mcr p15, 0, r0, c7, c10, 1 @ clean D / U line | 290 | mcr p15, 0, r0, c7, c10, 1 @ clean D / U line |
| 275 | add r0, r0, r2 | 291 | add r0, r0, r2 |
| @@ -288,6 +304,10 @@ ENTRY(v7_dma_flush_range) | |||
| 288 | dcache_line_size r2, r3 | 304 | dcache_line_size r2, r3 |
| 289 | sub r3, r2, #1 | 305 | sub r3, r2, #1 |
| 290 | bic r0, r0, r3 | 306 | bic r0, r0, r3 |
| 307 | #ifdef CONFIG_ARM_ERRATA_764369 | ||
| 308 | ALT_SMP(W(dsb)) | ||
| 309 | ALT_UP(W(nop)) | ||
| 310 | #endif | ||
| 291 | 1: | 311 | 1: |
| 292 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line | 312 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line |
| 293 | add r0, r0, r2 | 313 | add r0, r0, r2 |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 0a0a1e7c20d2..c3ff82f92d9c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
| @@ -324,6 +324,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
| 324 | 324 | ||
| 325 | if (addr) | 325 | if (addr) |
| 326 | *handle = pfn_to_dma(dev, page_to_pfn(page)); | 326 | *handle = pfn_to_dma(dev, page_to_pfn(page)); |
| 327 | else | ||
| 328 | __dma_free_buffer(page, size); | ||
| 327 | 329 | ||
| 328 | return addr; | 330 | return addr; |
| 329 | } | 331 | } |
