diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-08-29 06:02:08 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-08-29 06:02:08 -0400 |
commit | aee2bce3cfdcb9bf2c51c24496ee776e8202ed11 (patch) | |
tree | 66ff8e345cf693cfb39383f25ad796e2f59ab6ad /arch | |
parent | 5ec4c599a52362896c3e7c6a31ba6145dca9c6f5 (diff) | |
parent | c95389b4cd6a4b52af78bea706a274453e886251 (diff) |
Merge branch 'linus' into perf/core
Pick up the latest upstream fixes.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
68 files changed, 477 insertions, 184 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 8d2ae24b9f4a..1feb169274fe 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -407,6 +407,12 @@ config CLONE_BACKWARDS2 | |||
407 | help | 407 | help |
408 | Architecture has the first two arguments of clone(2) swapped. | 408 | Architecture has the first two arguments of clone(2) swapped. |
409 | 409 | ||
410 | config CLONE_BACKWARDS3 | ||
411 | bool | ||
412 | help | ||
413 | Architecture has tls passed as the 3rd argument of clone(2), | ||
414 | not the 5th one. | ||
415 | |||
410 | config ODD_RT_SIGACTION | 416 | config ODD_RT_SIGACTION |
411 | bool | 417 | bool |
412 | help | 418 | help |
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S index 99c10475d477..9c548c7cf001 100644 --- a/arch/arc/lib/strchr-700.S +++ b/arch/arc/lib/strchr-700.S | |||
@@ -39,9 +39,18 @@ ARC_ENTRY strchr | |||
39 | ld.a r2,[r0,4] | 39 | ld.a r2,[r0,4] |
40 | sub r12,r6,r7 | 40 | sub r12,r6,r7 |
41 | bic r12,r12,r6 | 41 | bic r12,r12,r6 |
42 | #ifdef __LITTLE_ENDIAN__ | ||
42 | and r7,r12,r4 | 43 | and r7,r12,r4 |
43 | breq r7,0,.Loop ; For speed, we want this branch to be unaligned. | 44 | breq r7,0,.Loop ; For speed, we want this branch to be unaligned. |
44 | b .Lfound_char ; Likewise this one. | 45 | b .Lfound_char ; Likewise this one. |
46 | #else | ||
47 | and r12,r12,r4 | ||
48 | breq r12,0,.Loop ; For speed, we want this branch to be unaligned. | ||
49 | lsr_s r12,r12,7 | ||
50 | bic r2,r7,r6 | ||
51 | b.d .Lfound_char_b | ||
52 | and_s r2,r2,r12 | ||
53 | #endif | ||
45 | ; /* We require this code address to be unaligned for speed... */ | 54 | ; /* We require this code address to be unaligned for speed... */ |
46 | .Laligned: | 55 | .Laligned: |
47 | ld_s r2,[r0] | 56 | ld_s r2,[r0] |
@@ -95,6 +104,7 @@ ARC_ENTRY strchr | |||
95 | lsr r7,r7,7 | 104 | lsr r7,r7,7 |
96 | 105 | ||
97 | bic r2,r7,r6 | 106 | bic r2,r7,r6 |
107 | .Lfound_char_b: | ||
98 | norm r2,r2 | 108 | norm r2,r2 |
99 | sub_s r0,r0,4 | 109 | sub_s r0,r0,4 |
100 | asr_s r2,r2,3 | 110 | asr_s r2,r2,3 |
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts index d59b70c6a6a0..3d77dbe406f4 100644 --- a/arch/arm/boot/dts/at91sam9n12ek.dts +++ b/arch/arm/boot/dts/at91sam9n12ek.dts | |||
@@ -14,11 +14,11 @@ | |||
14 | compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9"; | 14 | compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9"; |
15 | 15 | ||
16 | chosen { | 16 | chosen { |
17 | bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; | 17 | bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; |
18 | }; | 18 | }; |
19 | 19 | ||
20 | memory { | 20 | memory { |
21 | reg = <0x20000000 0x10000000>; | 21 | reg = <0x20000000 0x8000000>; |
22 | }; | 22 | }; |
23 | 23 | ||
24 | clocks { | 24 | clocks { |
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi index b753855b2058..49e3c45818c2 100644 --- a/arch/arm/boot/dts/at91sam9x5ek.dtsi +++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi | |||
@@ -94,8 +94,9 @@ | |||
94 | 94 | ||
95 | usb0: ohci@00600000 { | 95 | usb0: ohci@00600000 { |
96 | status = "okay"; | 96 | status = "okay"; |
97 | num-ports = <2>; | 97 | num-ports = <3>; |
98 | atmel,vbus-gpio = <&pioD 19 GPIO_ACTIVE_LOW | 98 | atmel,vbus-gpio = <0 /* &pioD 18 GPIO_ACTIVE_LOW *//* Activate to have access to port A */ |
99 | &pioD 19 GPIO_ACTIVE_LOW | ||
99 | &pioD 20 GPIO_ACTIVE_LOW | 100 | &pioD 20 GPIO_ACTIVE_LOW |
100 | >; | 101 | >; |
101 | }; | 102 | }; |
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts index 365760b33a26..40e6fb280333 100644 --- a/arch/arm/boot/dts/tegra20-seaboard.dts +++ b/arch/arm/boot/dts/tegra20-seaboard.dts | |||
@@ -830,6 +830,8 @@ | |||
830 | regulator-max-microvolt = <5000000>; | 830 | regulator-max-microvolt = <5000000>; |
831 | enable-active-high; | 831 | enable-active-high; |
832 | gpio = <&gpio 24 0>; /* PD0 */ | 832 | gpio = <&gpio 24 0>; /* PD0 */ |
833 | regulator-always-on; | ||
834 | regulator-boot-on; | ||
833 | }; | 835 | }; |
834 | }; | 836 | }; |
835 | 837 | ||
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts index ed4b901b0227..37c93d3c4812 100644 --- a/arch/arm/boot/dts/tegra20-trimslice.dts +++ b/arch/arm/boot/dts/tegra20-trimslice.dts | |||
@@ -412,6 +412,8 @@ | |||
412 | regulator-max-microvolt = <5000000>; | 412 | regulator-max-microvolt = <5000000>; |
413 | enable-active-high; | 413 | enable-active-high; |
414 | gpio = <&gpio 170 0>; /* PV2 */ | 414 | gpio = <&gpio 170 0>; /* PV2 */ |
415 | regulator-always-on; | ||
416 | regulator-boot-on; | ||
415 | }; | 417 | }; |
416 | }; | 418 | }; |
417 | 419 | ||
diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts index ab67c94db280..a3d0ebad78a1 100644 --- a/arch/arm/boot/dts/tegra20-whistler.dts +++ b/arch/arm/boot/dts/tegra20-whistler.dts | |||
@@ -588,6 +588,8 @@ | |||
588 | regulator-max-microvolt = <5000000>; | 588 | regulator-max-microvolt = <5000000>; |
589 | enable-active-high; | 589 | enable-active-high; |
590 | gpio = <&tca6416 0 0>; /* GPIO_PMU0 */ | 590 | gpio = <&tca6416 0 0>; /* GPIO_PMU0 */ |
591 | regulator-always-on; | ||
592 | regulator-boot-on; | ||
591 | }; | 593 | }; |
592 | 594 | ||
593 | vbus3_reg: regulator@3 { | 595 | vbus3_reg: regulator@3 { |
@@ -598,6 +600,8 @@ | |||
598 | regulator-max-microvolt = <5000000>; | 600 | regulator-max-microvolt = <5000000>; |
599 | enable-active-high; | 601 | enable-active-high; |
600 | gpio = <&tca6416 1 0>; /* GPIO_PMU1 */ | 602 | gpio = <&tca6416 1 0>; /* GPIO_PMU1 */ |
603 | regulator-always-on; | ||
604 | regulator-boot-on; | ||
601 | }; | 605 | }; |
602 | }; | 606 | }; |
603 | 607 | ||
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 6462a721ebd4..a252c0bfacf5 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h | |||
@@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void) | |||
88 | { | 88 | { |
89 | return 1 << mpidr_hash.bits; | 89 | return 1 << mpidr_hash.bits; |
90 | } | 90 | } |
91 | |||
92 | extern int platform_can_cpu_hotplug(void); | ||
93 | |||
91 | #endif | 94 | #endif |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index f8b8965666e9..b07c09e5a0ac 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
107 | " subs %1, %0, %0, ror #16\n" | 107 | " subs %1, %0, %0, ror #16\n" |
108 | " addeq %0, %0, %4\n" | 108 | " addeq %0, %0, %4\n" |
109 | " strexeq %2, %0, [%3]" | 109 | " strexeq %2, %0, [%3]" |
110 | : "=&r" (slock), "=&r" (contended), "=r" (res) | 110 | : "=&r" (slock), "=&r" (contended), "=&r" (res) |
111 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) | 111 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) |
112 | : "cc"); | 112 | : "cc"); |
113 | } while (res); | 113 | } while (res); |
@@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw) | |||
168 | 168 | ||
169 | static inline int arch_write_trylock(arch_rwlock_t *rw) | 169 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
170 | { | 170 | { |
171 | unsigned long tmp; | 171 | unsigned long contended, res; |
172 | 172 | ||
173 | __asm__ __volatile__( | 173 | do { |
174 | " ldrex %0, [%1]\n" | 174 | __asm__ __volatile__( |
175 | " teq %0, #0\n" | 175 | " ldrex %0, [%2]\n" |
176 | " strexeq %0, %2, [%1]" | 176 | " mov %1, #0\n" |
177 | : "=&r" (tmp) | 177 | " teq %0, #0\n" |
178 | : "r" (&rw->lock), "r" (0x80000000) | 178 | " strexeq %1, %3, [%2]" |
179 | : "cc"); | 179 | : "=&r" (contended), "=&r" (res) |
180 | : "r" (&rw->lock), "r" (0x80000000) | ||
181 | : "cc"); | ||
182 | } while (res); | ||
180 | 183 | ||
181 | if (tmp == 0) { | 184 | if (!contended) { |
182 | smp_mb(); | 185 | smp_mb(); |
183 | return 1; | 186 | return 1; |
184 | } else { | 187 | } else { |
@@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
254 | 257 | ||
255 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 258 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
256 | { | 259 | { |
257 | unsigned long tmp, tmp2 = 1; | 260 | unsigned long contended, res; |
258 | 261 | ||
259 | __asm__ __volatile__( | 262 | do { |
260 | " ldrex %0, [%2]\n" | 263 | __asm__ __volatile__( |
261 | " adds %0, %0, #1\n" | 264 | " ldrex %0, [%2]\n" |
262 | " strexpl %1, %0, [%2]\n" | 265 | " mov %1, #0\n" |
263 | : "=&r" (tmp), "+r" (tmp2) | 266 | " adds %0, %0, #1\n" |
264 | : "r" (&rw->lock) | 267 | " strexpl %1, %0, [%2]" |
265 | : "cc"); | 268 | : "=&r" (contended), "=&r" (res) |
269 | : "r" (&rw->lock) | ||
270 | : "cc"); | ||
271 | } while (res); | ||
266 | 272 | ||
267 | smp_mb(); | 273 | /* If the lock is negative, then it is already held for write. */ |
268 | return tmp2 == 0; | 274 | if (contended < 0x80000000) { |
275 | smp_mb(); | ||
276 | return 1; | ||
277 | } else { | ||
278 | return 0; | ||
279 | } | ||
269 | } | 280 | } |
270 | 281 | ||
271 | /* read_can_lock - would read_trylock() succeed? */ | 282 | /* read_can_lock - would read_trylock() succeed? */ |
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 46e7cfb3e721..0baf7f0d9394 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
@@ -43,6 +43,7 @@ struct mmu_gather { | |||
43 | struct mm_struct *mm; | 43 | struct mm_struct *mm; |
44 | unsigned int fullmm; | 44 | unsigned int fullmm; |
45 | struct vm_area_struct *vma; | 45 | struct vm_area_struct *vma; |
46 | unsigned long start, end; | ||
46 | unsigned long range_start; | 47 | unsigned long range_start; |
47 | unsigned long range_end; | 48 | unsigned long range_end; |
48 | unsigned int nr; | 49 | unsigned int nr; |
@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) | |||
107 | } | 108 | } |
108 | 109 | ||
109 | static inline void | 110 | static inline void |
110 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) | 111 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
111 | { | 112 | { |
112 | tlb->mm = mm; | 113 | tlb->mm = mm; |
113 | tlb->fullmm = fullmm; | 114 | tlb->fullmm = !(start | (end+1)); |
115 | tlb->start = start; | ||
116 | tlb->end = end; | ||
114 | tlb->vma = NULL; | 117 | tlb->vma = NULL; |
115 | tlb->max = ARRAY_SIZE(tlb->local); | 118 | tlb->max = ARRAY_SIZE(tlb->local); |
116 | tlb->pages = tlb->local; | 119 | tlb->pages = tlb->local; |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d40d0ef389db..9cbe70c8b0ef 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -357,7 +357,8 @@ ENDPROC(__pabt_svc) | |||
357 | .endm | 357 | .endm |
358 | 358 | ||
359 | .macro kuser_cmpxchg_check | 359 | .macro kuser_cmpxchg_check |
360 | #if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | 360 | #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \ |
361 | !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | ||
361 | #ifndef CONFIG_MMU | 362 | #ifndef CONFIG_MMU |
362 | #warning "NPTL on non MMU needs fixing" | 363 | #warning "NPTL on non MMU needs fixing" |
363 | #else | 364 | #else |
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 25442f451148..918875d96d5d 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c | |||
@@ -84,17 +84,14 @@ int show_fiq_list(struct seq_file *p, int prec) | |||
84 | 84 | ||
85 | void set_fiq_handler(void *start, unsigned int length) | 85 | void set_fiq_handler(void *start, unsigned int length) |
86 | { | 86 | { |
87 | #if defined(CONFIG_CPU_USE_DOMAINS) | ||
88 | void *base = (void *)0xffff0000; | ||
89 | #else | ||
90 | void *base = vectors_page; | 87 | void *base = vectors_page; |
91 | #endif | ||
92 | unsigned offset = FIQ_OFFSET; | 88 | unsigned offset = FIQ_OFFSET; |
93 | 89 | ||
94 | memcpy(base + offset, start, length); | 90 | memcpy(base + offset, start, length); |
91 | if (!cache_is_vipt_nonaliasing()) | ||
92 | flush_icache_range((unsigned long)base + offset, offset + | ||
93 | length); | ||
95 | flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); | 94 | flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); |
96 | if (!vectors_high()) | ||
97 | flush_icache_range(offset, offset + length); | ||
98 | } | 95 | } |
99 | 96 | ||
100 | int claim_fiq(struct fiq_handler *f) | 97 | int claim_fiq(struct fiq_handler *f) |
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 4fb074c446bf..57221e349a7c 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/mmu_context.h> | 15 | #include <asm/mmu_context.h> |
16 | #include <asm/cacheflush.h> | 16 | #include <asm/cacheflush.h> |
17 | #include <asm/mach-types.h> | 17 | #include <asm/mach-types.h> |
18 | #include <asm/smp_plat.h> | ||
18 | #include <asm/system_misc.h> | 19 | #include <asm/system_misc.h> |
19 | 20 | ||
20 | extern const unsigned char relocate_new_kernel[]; | 21 | extern const unsigned char relocate_new_kernel[]; |
@@ -39,6 +40,14 @@ int machine_kexec_prepare(struct kimage *image) | |||
39 | int i, err; | 40 | int i, err; |
40 | 41 | ||
41 | /* | 42 | /* |
43 | * Validate that if the current HW supports SMP, then the SW supports | ||
44 | * and implements CPU hotplug for the current HW. If not, we won't be | ||
45 | * able to kexec reliably, so fail the prepare operation. | ||
46 | */ | ||
47 | if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug()) | ||
48 | return -EINVAL; | ||
49 | |||
50 | /* | ||
42 | * No segment at default ATAGs address. try to locate | 51 | * No segment at default ATAGs address. try to locate |
43 | * a dtb using magic. | 52 | * a dtb using magic. |
44 | */ | 53 | */ |
@@ -73,6 +82,7 @@ void machine_crash_nonpanic_core(void *unused) | |||
73 | crash_save_cpu(®s, smp_processor_id()); | 82 | crash_save_cpu(®s, smp_processor_id()); |
74 | flush_cache_all(); | 83 | flush_cache_all(); |
75 | 84 | ||
85 | set_cpu_online(smp_processor_id(), false); | ||
76 | atomic_dec(&waiting_for_crash_ipi); | 86 | atomic_dec(&waiting_for_crash_ipi); |
77 | while (1) | 87 | while (1) |
78 | cpu_relax(); | 88 | cpu_relax(); |
@@ -134,10 +144,13 @@ void machine_kexec(struct kimage *image) | |||
134 | unsigned long reboot_code_buffer_phys; | 144 | unsigned long reboot_code_buffer_phys; |
135 | void *reboot_code_buffer; | 145 | void *reboot_code_buffer; |
136 | 146 | ||
137 | if (num_online_cpus() > 1) { | 147 | /* |
138 | pr_err("kexec: error: multiple CPUs still online\n"); | 148 | * This can only happen if machine_shutdown() failed to disable some |
139 | return; | 149 | * CPU, and that can only happen if the checks in |
140 | } | 150 | * machine_kexec_prepare() were not correct. If this fails, we can't |
151 | * reliably kexec anyway, so BUG_ON is appropriate. | ||
152 | */ | ||
153 | BUG_ON(num_online_cpus() > 1); | ||
141 | 154 | ||
142 | page_list = image->head & PAGE_MASK; | 155 | page_list = image->head & PAGE_MASK; |
143 | 156 | ||
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index d9f5cd4e533f..e186ee1e63f6 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) | |||
53 | static int | 53 | static int |
54 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) | 54 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
55 | { | 55 | { |
56 | int mapping = (*event_map)[config]; | 56 | int mapping; |
57 | |||
58 | if (config >= PERF_COUNT_HW_MAX) | ||
59 | return -EINVAL; | ||
60 | |||
61 | mapping = (*event_map)[config]; | ||
57 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; | 62 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
58 | } | 63 | } |
59 | 64 | ||
@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events, | |||
253 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 258 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
254 | struct pmu *leader_pmu = event->group_leader->pmu; | 259 | struct pmu *leader_pmu = event->group_leader->pmu; |
255 | 260 | ||
261 | if (is_software_event(event)) | ||
262 | return 1; | ||
263 | |||
256 | if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) | 264 | if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) |
257 | return 1; | 265 | return 1; |
258 | 266 | ||
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 536c85fe72a8..94f6b05f9e24 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -462,7 +462,7 @@ int in_gate_area_no_mm(unsigned long addr) | |||
462 | { | 462 | { |
463 | return in_gate_area(NULL, addr); | 463 | return in_gate_area(NULL, addr); |
464 | } | 464 | } |
465 | #define is_gate_vma(vma) ((vma) = &gate_vma) | 465 | #define is_gate_vma(vma) ((vma) == &gate_vma) |
466 | #else | 466 | #else |
467 | #define is_gate_vma(vma) 0 | 467 | #define is_gate_vma(vma) 0 |
468 | #endif | 468 | #endif |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index c2b4f8f0be9a..2dc19349eb19 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -145,6 +145,16 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle) | |||
145 | return -ENOSYS; | 145 | return -ENOSYS; |
146 | } | 146 | } |
147 | 147 | ||
148 | int platform_can_cpu_hotplug(void) | ||
149 | { | ||
150 | #ifdef CONFIG_HOTPLUG_CPU | ||
151 | if (smp_ops.cpu_kill) | ||
152 | return 1; | ||
153 | #endif | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
148 | #ifdef CONFIG_HOTPLUG_CPU | 158 | #ifdef CONFIG_HOTPLUG_CPU |
149 | static void percpu_timer_stop(void); | 159 | static void percpu_timer_stop(void); |
150 | 160 | ||
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 4a5199070430..db9cf692d4dd 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -146,7 +146,11 @@ static bool pm_fake(struct kvm_vcpu *vcpu, | |||
146 | #define access_pmintenclr pm_fake | 146 | #define access_pmintenclr pm_fake |
147 | 147 | ||
148 | /* Architected CP15 registers. | 148 | /* Architected CP15 registers. |
149 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 | 149 | * CRn denotes the primary register number, but is copied to the CRm in the |
150 | * user space API for 64-bit register access in line with the terminology used | ||
151 | * in the ARM ARM. | ||
152 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit | ||
153 | * registers preceding 32-bit ones. | ||
150 | */ | 154 | */ |
151 | static const struct coproc_reg cp15_regs[] = { | 155 | static const struct coproc_reg cp15_regs[] = { |
152 | /* CSSELR: swapped by interrupt.S. */ | 156 | /* CSSELR: swapped by interrupt.S. */ |
@@ -154,8 +158,8 @@ static const struct coproc_reg cp15_regs[] = { | |||
154 | NULL, reset_unknown, c0_CSSELR }, | 158 | NULL, reset_unknown, c0_CSSELR }, |
155 | 159 | ||
156 | /* TTBR0/TTBR1: swapped by interrupt.S. */ | 160 | /* TTBR0/TTBR1: swapped by interrupt.S. */ |
157 | { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, | 161 | { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, |
158 | { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, | 162 | { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, |
159 | 163 | ||
160 | /* TTBCR: swapped by interrupt.S. */ | 164 | /* TTBCR: swapped by interrupt.S. */ |
161 | { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, | 165 | { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, |
@@ -182,7 +186,7 @@ static const struct coproc_reg cp15_regs[] = { | |||
182 | NULL, reset_unknown, c6_IFAR }, | 186 | NULL, reset_unknown, c6_IFAR }, |
183 | 187 | ||
184 | /* PAR swapped by interrupt.S */ | 188 | /* PAR swapped by interrupt.S */ |
185 | { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, | 189 | { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, |
186 | 190 | ||
187 | /* | 191 | /* |
188 | * DC{C,I,CI}SW operations: | 192 | * DC{C,I,CI}SW operations: |
@@ -399,12 +403,13 @@ static bool index_to_params(u64 id, struct coproc_params *params) | |||
399 | | KVM_REG_ARM_OPC1_MASK)) | 403 | | KVM_REG_ARM_OPC1_MASK)) |
400 | return false; | 404 | return false; |
401 | params->is_64bit = true; | 405 | params->is_64bit = true; |
402 | params->CRm = ((id & KVM_REG_ARM_CRM_MASK) | 406 | /* CRm to CRn: see cp15_to_index for details */ |
407 | params->CRn = ((id & KVM_REG_ARM_CRM_MASK) | ||
403 | >> KVM_REG_ARM_CRM_SHIFT); | 408 | >> KVM_REG_ARM_CRM_SHIFT); |
404 | params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) | 409 | params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) |
405 | >> KVM_REG_ARM_OPC1_SHIFT); | 410 | >> KVM_REG_ARM_OPC1_SHIFT); |
406 | params->Op2 = 0; | 411 | params->Op2 = 0; |
407 | params->CRn = 0; | 412 | params->CRm = 0; |
408 | return true; | 413 | return true; |
409 | default: | 414 | default: |
410 | return false; | 415 | return false; |
@@ -898,7 +903,14 @@ static u64 cp15_to_index(const struct coproc_reg *reg) | |||
898 | if (reg->is_64) { | 903 | if (reg->is_64) { |
899 | val |= KVM_REG_SIZE_U64; | 904 | val |= KVM_REG_SIZE_U64; |
900 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); | 905 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); |
901 | val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); | 906 | /* |
907 | * CRn always denotes the primary coproc. reg. nr. for the | ||
908 | * in-kernel representation, but the user space API uses the | ||
909 | * CRm for the encoding, because it is modelled after the | ||
910 | * MRRC/MCRR instructions: see the ARM ARM rev. c page | ||
911 | * B3-1445 | ||
912 | */ | ||
913 | val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT); | ||
902 | } else { | 914 | } else { |
903 | val |= KVM_REG_SIZE_U32; | 915 | val |= KVM_REG_SIZE_U32; |
904 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); | 916 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); |
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index b7301d3e4799..0461d5c8d3de 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h | |||
@@ -135,6 +135,8 @@ static inline int cmp_reg(const struct coproc_reg *i1, | |||
135 | return -1; | 135 | return -1; |
136 | if (i1->CRn != i2->CRn) | 136 | if (i1->CRn != i2->CRn) |
137 | return i1->CRn - i2->CRn; | 137 | return i1->CRn - i2->CRn; |
138 | if (i1->is_64 != i2->is_64) | ||
139 | return i2->is_64 - i1->is_64; | ||
138 | if (i1->CRm != i2->CRm) | 140 | if (i1->CRm != i2->CRm) |
139 | return i1->CRm - i2->CRm; | 141 | return i1->CRm - i2->CRm; |
140 | if (i1->Op1 != i2->Op1) | 142 | if (i1->Op1 != i2->Op1) |
@@ -145,6 +147,7 @@ static inline int cmp_reg(const struct coproc_reg *i1, | |||
145 | 147 | ||
146 | #define CRn(_x) .CRn = _x | 148 | #define CRn(_x) .CRn = _x |
147 | #define CRm(_x) .CRm = _x | 149 | #define CRm(_x) .CRm = _x |
150 | #define CRm64(_x) .CRn = _x, .CRm = 0 | ||
148 | #define Op1(_x) .Op1 = _x | 151 | #define Op1(_x) .Op1 = _x |
149 | #define Op2(_x) .Op2 = _x | 152 | #define Op2(_x) .Op2 = _x |
150 | #define is64 .is_64 = true | 153 | #define is64 .is_64 = true |
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c index 685063a6d0cf..cf93472b9dd6 100644 --- a/arch/arm/kvm/coproc_a15.c +++ b/arch/arm/kvm/coproc_a15.c | |||
@@ -114,7 +114,11 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu, | |||
114 | 114 | ||
115 | /* | 115 | /* |
116 | * A15-specific CP15 registers. | 116 | * A15-specific CP15 registers. |
117 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 | 117 | * CRn denotes the primary register number, but is copied to the CRm in the |
118 | * user space API for 64-bit register access in line with the terminology used | ||
119 | * in the ARM ARM. | ||
120 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit | ||
121 | * registers preceding 32-bit ones. | ||
118 | */ | 122 | */ |
119 | static const struct coproc_reg a15_regs[] = { | 123 | static const struct coproc_reg a15_regs[] = { |
120 | /* MPIDR: we use VMPIDR for guest access. */ | 124 | /* MPIDR: we use VMPIDR for guest access. */ |
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index b8e06b7a2833..0c25d9487d53 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c | |||
@@ -63,7 +63,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
63 | static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | 63 | static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
64 | struct kvm_exit_mmio *mmio) | 64 | struct kvm_exit_mmio *mmio) |
65 | { | 65 | { |
66 | unsigned long rt, len; | 66 | unsigned long rt; |
67 | int len; | ||
67 | bool is_write, sign_extend; | 68 | bool is_write, sign_extend; |
68 | 69 | ||
69 | if (kvm_vcpu_dabt_isextabt(vcpu)) { | 70 | if (kvm_vcpu_dabt_isextabt(vcpu)) { |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index ca6bea4859b4..0988d9e04dd4 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -85,6 +85,12 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) | |||
85 | return p; | 85 | return p; |
86 | } | 86 | } |
87 | 87 | ||
88 | static bool page_empty(void *ptr) | ||
89 | { | ||
90 | struct page *ptr_page = virt_to_page(ptr); | ||
91 | return page_count(ptr_page) == 1; | ||
92 | } | ||
93 | |||
88 | static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) | 94 | static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) |
89 | { | 95 | { |
90 | pmd_t *pmd_table = pmd_offset(pud, 0); | 96 | pmd_t *pmd_table = pmd_offset(pud, 0); |
@@ -103,12 +109,6 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) | |||
103 | put_page(virt_to_page(pmd)); | 109 | put_page(virt_to_page(pmd)); |
104 | } | 110 | } |
105 | 111 | ||
106 | static bool pmd_empty(pmd_t *pmd) | ||
107 | { | ||
108 | struct page *pmd_page = virt_to_page(pmd); | ||
109 | return page_count(pmd_page) == 1; | ||
110 | } | ||
111 | |||
112 | static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) | 112 | static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) |
113 | { | 113 | { |
114 | if (pte_present(*pte)) { | 114 | if (pte_present(*pte)) { |
@@ -118,12 +118,6 @@ static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) | |||
118 | } | 118 | } |
119 | } | 119 | } |
120 | 120 | ||
121 | static bool pte_empty(pte_t *pte) | ||
122 | { | ||
123 | struct page *pte_page = virt_to_page(pte); | ||
124 | return page_count(pte_page) == 1; | ||
125 | } | ||
126 | |||
127 | static void unmap_range(struct kvm *kvm, pgd_t *pgdp, | 121 | static void unmap_range(struct kvm *kvm, pgd_t *pgdp, |
128 | unsigned long long start, u64 size) | 122 | unsigned long long start, u64 size) |
129 | { | 123 | { |
@@ -132,37 +126,37 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, | |||
132 | pmd_t *pmd; | 126 | pmd_t *pmd; |
133 | pte_t *pte; | 127 | pte_t *pte; |
134 | unsigned long long addr = start, end = start + size; | 128 | unsigned long long addr = start, end = start + size; |
135 | u64 range; | 129 | u64 next; |
136 | 130 | ||
137 | while (addr < end) { | 131 | while (addr < end) { |
138 | pgd = pgdp + pgd_index(addr); | 132 | pgd = pgdp + pgd_index(addr); |
139 | pud = pud_offset(pgd, addr); | 133 | pud = pud_offset(pgd, addr); |
140 | if (pud_none(*pud)) { | 134 | if (pud_none(*pud)) { |
141 | addr += PUD_SIZE; | 135 | addr = pud_addr_end(addr, end); |
142 | continue; | 136 | continue; |
143 | } | 137 | } |
144 | 138 | ||
145 | pmd = pmd_offset(pud, addr); | 139 | pmd = pmd_offset(pud, addr); |
146 | if (pmd_none(*pmd)) { | 140 | if (pmd_none(*pmd)) { |
147 | addr += PMD_SIZE; | 141 | addr = pmd_addr_end(addr, end); |
148 | continue; | 142 | continue; |
149 | } | 143 | } |
150 | 144 | ||
151 | pte = pte_offset_kernel(pmd, addr); | 145 | pte = pte_offset_kernel(pmd, addr); |
152 | clear_pte_entry(kvm, pte, addr); | 146 | clear_pte_entry(kvm, pte, addr); |
153 | range = PAGE_SIZE; | 147 | next = addr + PAGE_SIZE; |
154 | 148 | ||
155 | /* If we emptied the pte, walk back up the ladder */ | 149 | /* If we emptied the pte, walk back up the ladder */ |
156 | if (pte_empty(pte)) { | 150 | if (page_empty(pte)) { |
157 | clear_pmd_entry(kvm, pmd, addr); | 151 | clear_pmd_entry(kvm, pmd, addr); |
158 | range = PMD_SIZE; | 152 | next = pmd_addr_end(addr, end); |
159 | if (pmd_empty(pmd)) { | 153 | if (page_empty(pmd) && !page_empty(pud)) { |
160 | clear_pud_entry(kvm, pud, addr); | 154 | clear_pud_entry(kvm, pud, addr); |
161 | range = PUD_SIZE; | 155 | next = pud_addr_end(addr, end); |
162 | } | 156 | } |
163 | } | 157 | } |
164 | 158 | ||
165 | addr += range; | 159 | addr = next; |
166 | } | 160 | } |
167 | } | 161 | } |
168 | 162 | ||
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c index 2abee6626aac..916e5a142917 100644 --- a/arch/arm/mach-at91/at91sam9x5.c +++ b/arch/arm/mach-at91/at91sam9x5.c | |||
@@ -227,6 +227,8 @@ static struct clk_lookup periph_clocks_lookups[] = { | |||
227 | CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk), | 227 | CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk), |
228 | CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk), | 228 | CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk), |
229 | CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk), | 229 | CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk), |
230 | CLKDEV_CON_DEV_ID("usart", "f8040000.serial", &uart0_clk), | ||
231 | CLKDEV_CON_DEV_ID("usart", "f8044000.serial", &uart1_clk), | ||
230 | CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk), | 232 | CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk), |
231 | CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk), | 233 | CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk), |
232 | CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk), | 234 | CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk), |
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c index dff4ddc5ef81..139e42da25f0 100644 --- a/arch/arm/mach-davinci/board-dm355-leopard.c +++ b/arch/arm/mach-davinci/board-dm355-leopard.c | |||
@@ -75,6 +75,7 @@ static struct davinci_nand_pdata davinci_nand_data = { | |||
75 | .parts = davinci_nand_partitions, | 75 | .parts = davinci_nand_partitions, |
76 | .nr_parts = ARRAY_SIZE(davinci_nand_partitions), | 76 | .nr_parts = ARRAY_SIZE(davinci_nand_partitions), |
77 | .ecc_mode = NAND_ECC_HW_SYNDROME, | 77 | .ecc_mode = NAND_ECC_HW_SYNDROME, |
78 | .ecc_bits = 4, | ||
78 | .bbt_options = NAND_BBT_USE_FLASH, | 79 | .bbt_options = NAND_BBT_USE_FLASH, |
79 | }; | 80 | }; |
80 | 81 | ||
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index a33686a6fbb2..fa4bfaf952d8 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c | |||
@@ -153,6 +153,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = { | |||
153 | .parts = davinci_evm_nandflash_partition, | 153 | .parts = davinci_evm_nandflash_partition, |
154 | .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition), | 154 | .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition), |
155 | .ecc_mode = NAND_ECC_HW, | 155 | .ecc_mode = NAND_ECC_HW, |
156 | .ecc_bits = 1, | ||
156 | .bbt_options = NAND_BBT_USE_FLASH, | 157 | .bbt_options = NAND_BBT_USE_FLASH, |
157 | .timing = &davinci_evm_nandflash_timing, | 158 | .timing = &davinci_evm_nandflash_timing, |
158 | }; | 159 | }; |
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index fbb8e5ab1dc1..0c005e876cac 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c | |||
@@ -90,6 +90,7 @@ static struct davinci_nand_pdata davinci_nand_data = { | |||
90 | .parts = davinci_nand_partitions, | 90 | .parts = davinci_nand_partitions, |
91 | .nr_parts = ARRAY_SIZE(davinci_nand_partitions), | 91 | .nr_parts = ARRAY_SIZE(davinci_nand_partitions), |
92 | .ecc_mode = NAND_ECC_HW, | 92 | .ecc_mode = NAND_ECC_HW, |
93 | .ecc_bits = 1, | ||
93 | .options = 0, | 94 | .options = 0, |
94 | }; | 95 | }; |
95 | 96 | ||
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c index 2bc112adf565..808233b60e3d 100644 --- a/arch/arm/mach-davinci/board-neuros-osd2.c +++ b/arch/arm/mach-davinci/board-neuros-osd2.c | |||
@@ -88,6 +88,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = { | |||
88 | .parts = davinci_ntosd2_nandflash_partition, | 88 | .parts = davinci_ntosd2_nandflash_partition, |
89 | .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition), | 89 | .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition), |
90 | .ecc_mode = NAND_ECC_HW, | 90 | .ecc_mode = NAND_ECC_HW, |
91 | .ecc_bits = 1, | ||
91 | .bbt_options = NAND_BBT_USE_FLASH, | 92 | .bbt_options = NAND_BBT_USE_FLASH, |
92 | }; | 93 | }; |
93 | 94 | ||
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c index f6eeb87e4e95..827d15009a86 100644 --- a/arch/arm/mach-omap2/board-n8x0.c +++ b/arch/arm/mach-omap2/board-n8x0.c | |||
@@ -122,11 +122,7 @@ static struct musb_hdrc_config musb_config = { | |||
122 | }; | 122 | }; |
123 | 123 | ||
124 | static struct musb_hdrc_platform_data tusb_data = { | 124 | static struct musb_hdrc_platform_data tusb_data = { |
125 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
126 | .mode = MUSB_OTG, | 125 | .mode = MUSB_OTG, |
127 | #else | ||
128 | .mode = MUSB_HOST, | ||
129 | #endif | ||
130 | .set_power = tusb_set_power, | 126 | .set_power = tusb_set_power, |
131 | .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */ | 127 | .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */ |
132 | .power = 100, /* Max 100 mA VBUS for host mode */ | 128 | .power = 100, /* Max 100 mA VBUS for host mode */ |
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c index d2ea68ea678a..7735105561d8 100644 --- a/arch/arm/mach-omap2/board-rx51.c +++ b/arch/arm/mach-omap2/board-rx51.c | |||
@@ -85,7 +85,7 @@ static struct omap_board_mux board_mux[] __initdata = { | |||
85 | 85 | ||
86 | static struct omap_musb_board_data musb_board_data = { | 86 | static struct omap_musb_board_data musb_board_data = { |
87 | .interface_type = MUSB_INTERFACE_ULPI, | 87 | .interface_type = MUSB_INTERFACE_ULPI, |
88 | .mode = MUSB_PERIPHERAL, | 88 | .mode = MUSB_OTG, |
89 | .power = 0, | 89 | .power = 0, |
90 | }; | 90 | }; |
91 | 91 | ||
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c index 8c4de2708cf2..bc897231bd10 100644 --- a/arch/arm/mach-omap2/usb-musb.c +++ b/arch/arm/mach-omap2/usb-musb.c | |||
@@ -38,11 +38,8 @@ static struct musb_hdrc_config musb_config = { | |||
38 | }; | 38 | }; |
39 | 39 | ||
40 | static struct musb_hdrc_platform_data musb_plat = { | 40 | static struct musb_hdrc_platform_data musb_plat = { |
41 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
42 | .mode = MUSB_OTG, | 41 | .mode = MUSB_OTG, |
43 | #else | 42 | |
44 | .mode = MUSB_HOST, | ||
45 | #endif | ||
46 | /* .clock is set dynamically */ | 43 | /* .clock is set dynamically */ |
47 | .config = &musb_config, | 44 | .config = &musb_config, |
48 | 45 | ||
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index db5c2cab8fda..cd2c88e7a8f7 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -809,15 +809,18 @@ config KUSER_HELPERS | |||
809 | the CPU type fitted to the system. This permits binaries to be | 809 | the CPU type fitted to the system. This permits binaries to be |
810 | run on ARMv4 through to ARMv7 without modification. | 810 | run on ARMv4 through to ARMv7 without modification. |
811 | 811 | ||
812 | See Documentation/arm/kernel_user_helpers.txt for details. | ||
813 | |||
812 | However, the fixed address nature of these helpers can be used | 814 | However, the fixed address nature of these helpers can be used |
813 | by ROP (return orientated programming) authors when creating | 815 | by ROP (return orientated programming) authors when creating |
814 | exploits. | 816 | exploits. |
815 | 817 | ||
816 | If all of the binaries and libraries which run on your platform | 818 | If all of the binaries and libraries which run on your platform |
817 | are built specifically for your platform, and make no use of | 819 | are built specifically for your platform, and make no use of |
818 | these helpers, then you can turn this option off. However, | 820 | these helpers, then you can turn this option off to hinder |
819 | when such an binary or library is run, it will receive a SIGILL | 821 | such exploits. However, in that case, if a binary or library |
820 | signal, which will terminate the program. | 822 | relying on those helpers is run, it will receive a SIGILL signal, |
823 | which will terminate the program. | ||
821 | 824 | ||
822 | Say N here only if you are absolutely certain that you do not | 825 | Say N here only if you are absolutely certain that you do not |
823 | need these helpers; otherwise, the safe option is to say Y. | 826 | need these helpers; otherwise, the safe option is to say Y. |
diff --git a/arch/arm/plat-samsung/init.c b/arch/arm/plat-samsung/init.c index 3e5c4619caa5..50a3ea0037db 100644 --- a/arch/arm/plat-samsung/init.c +++ b/arch/arm/plat-samsung/init.c | |||
@@ -55,12 +55,13 @@ void __init s3c_init_cpu(unsigned long idcode, | |||
55 | 55 | ||
56 | printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode); | 56 | printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode); |
57 | 57 | ||
58 | if (cpu->map_io == NULL || cpu->init == NULL) { | 58 | if (cpu->init == NULL) { |
59 | printk(KERN_ERR "CPU %s support not enabled\n", cpu->name); | 59 | printk(KERN_ERR "CPU %s support not enabled\n", cpu->name); |
60 | panic("Unsupported Samsung CPU"); | 60 | panic("Unsupported Samsung CPU"); |
61 | } | 61 | } |
62 | 62 | ||
63 | cpu->map_io(); | 63 | if (cpu->map_io) |
64 | cpu->map_io(); | ||
64 | } | 65 | } |
65 | 66 | ||
66 | /* s3c24xx_init_clocks | 67 | /* s3c24xx_init_clocks |
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index c9770ba5c7df..8a6295c86209 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c | |||
@@ -170,6 +170,7 @@ static void __init xen_percpu_init(void *unused) | |||
170 | per_cpu(xen_vcpu, cpu) = vcpup; | 170 | per_cpu(xen_vcpu, cpu) = vcpup; |
171 | 171 | ||
172 | enable_percpu_irq(xen_events_irq, 0); | 172 | enable_percpu_irq(xen_events_irq, 0); |
173 | put_cpu(); | ||
173 | } | 174 | } |
174 | 175 | ||
175 | static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) | 176 | static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) |
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index c92de4163eba..b25763bc0ec4 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h | |||
@@ -42,14 +42,15 @@ | |||
42 | #define TPIDR_EL1 18 /* Thread ID, Privileged */ | 42 | #define TPIDR_EL1 18 /* Thread ID, Privileged */ |
43 | #define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ | 43 | #define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ |
44 | #define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ | 44 | #define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ |
45 | #define PAR_EL1 21 /* Physical Address Register */ | ||
45 | /* 32bit specific registers. Keep them at the end of the range */ | 46 | /* 32bit specific registers. Keep them at the end of the range */ |
46 | #define DACR32_EL2 21 /* Domain Access Control Register */ | 47 | #define DACR32_EL2 22 /* Domain Access Control Register */ |
47 | #define IFSR32_EL2 22 /* Instruction Fault Status Register */ | 48 | #define IFSR32_EL2 23 /* Instruction Fault Status Register */ |
48 | #define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */ | 49 | #define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */ |
49 | #define DBGVCR32_EL2 24 /* Debug Vector Catch Register */ | 50 | #define DBGVCR32_EL2 25 /* Debug Vector Catch Register */ |
50 | #define TEECR32_EL1 25 /* ThumbEE Configuration Register */ | 51 | #define TEECR32_EL1 26 /* ThumbEE Configuration Register */ |
51 | #define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */ | 52 | #define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */ |
52 | #define NR_SYS_REGS 27 | 53 | #define NR_SYS_REGS 28 |
53 | 54 | ||
54 | /* 32bit mapping */ | 55 | /* 32bit mapping */ |
55 | #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ | 56 | #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ |
@@ -69,6 +70,8 @@ | |||
69 | #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ | 70 | #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ |
70 | #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ | 71 | #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ |
71 | #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ | 72 | #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ |
73 | #define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */ | ||
74 | #define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */ | ||
72 | #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ | 75 | #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ |
73 | #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ | 76 | #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ |
74 | #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ | 77 | #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 644d73956864..0859a4ddd1e7 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -129,7 +129,7 @@ struct kvm_vcpu_arch { | |||
129 | struct kvm_mmu_memory_cache mmu_page_cache; | 129 | struct kvm_mmu_memory_cache mmu_page_cache; |
130 | 130 | ||
131 | /* Target CPU and feature flags */ | 131 | /* Target CPU and feature flags */ |
132 | u32 target; | 132 | int target; |
133 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); | 133 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); |
134 | 134 | ||
135 | /* Detect first run of a vcpu */ | 135 | /* Detect first run of a vcpu */ |
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 46b3beb4b773..717031a762c2 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h | |||
@@ -35,6 +35,7 @@ struct mmu_gather { | |||
35 | struct mm_struct *mm; | 35 | struct mm_struct *mm; |
36 | unsigned int fullmm; | 36 | unsigned int fullmm; |
37 | struct vm_area_struct *vma; | 37 | struct vm_area_struct *vma; |
38 | unsigned long start, end; | ||
38 | unsigned long range_start; | 39 | unsigned long range_start; |
39 | unsigned long range_end; | 40 | unsigned long range_end; |
40 | unsigned int nr; | 41 | unsigned int nr; |
@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) | |||
97 | } | 98 | } |
98 | 99 | ||
99 | static inline void | 100 | static inline void |
100 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) | 101 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
101 | { | 102 | { |
102 | tlb->mm = mm; | 103 | tlb->mm = mm; |
103 | tlb->fullmm = fullmm; | 104 | tlb->fullmm = !(start | (end+1)); |
105 | tlb->start = start; | ||
106 | tlb->end = end; | ||
104 | tlb->vma = NULL; | 107 | tlb->vma = NULL; |
105 | tlb->max = ARRAY_SIZE(tlb->local); | 108 | tlb->max = ARRAY_SIZE(tlb->local); |
106 | tlb->pages = tlb->local; | 109 | tlb->pages = tlb->local; |
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 9ba33c40cdf8..12e6ccb88691 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) | |||
107 | static int | 107 | static int |
108 | armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) | 108 | armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
109 | { | 109 | { |
110 | int mapping = (*event_map)[config]; | 110 | int mapping; |
111 | |||
112 | if (config >= PERF_COUNT_HW_MAX) | ||
113 | return -EINVAL; | ||
114 | |||
115 | mapping = (*event_map)[config]; | ||
111 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; | 116 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
112 | } | 117 | } |
113 | 118 | ||
@@ -317,6 +322,9 @@ validate_event(struct pmu_hw_events *hw_events, | |||
317 | struct hw_perf_event fake_event = event->hw; | 322 | struct hw_perf_event fake_event = event->hw; |
318 | struct pmu *leader_pmu = event->group_leader->pmu; | 323 | struct pmu *leader_pmu = event->group_leader->pmu; |
319 | 324 | ||
325 | if (is_software_event(event)) | ||
326 | return 1; | ||
327 | |||
320 | if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) | 328 | if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) |
321 | return 1; | 329 | return 1; |
322 | 330 | ||
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index ff985e3d8b72..1ac0bbbdddb2 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S | |||
@@ -214,6 +214,7 @@ __kvm_hyp_code_start: | |||
214 | mrs x21, tpidr_el1 | 214 | mrs x21, tpidr_el1 |
215 | mrs x22, amair_el1 | 215 | mrs x22, amair_el1 |
216 | mrs x23, cntkctl_el1 | 216 | mrs x23, cntkctl_el1 |
217 | mrs x24, par_el1 | ||
217 | 218 | ||
218 | stp x4, x5, [x3] | 219 | stp x4, x5, [x3] |
219 | stp x6, x7, [x3, #16] | 220 | stp x6, x7, [x3, #16] |
@@ -225,6 +226,7 @@ __kvm_hyp_code_start: | |||
225 | stp x18, x19, [x3, #112] | 226 | stp x18, x19, [x3, #112] |
226 | stp x20, x21, [x3, #128] | 227 | stp x20, x21, [x3, #128] |
227 | stp x22, x23, [x3, #144] | 228 | stp x22, x23, [x3, #144] |
229 | str x24, [x3, #160] | ||
228 | .endm | 230 | .endm |
229 | 231 | ||
230 | .macro restore_sysregs | 232 | .macro restore_sysregs |
@@ -243,6 +245,7 @@ __kvm_hyp_code_start: | |||
243 | ldp x18, x19, [x3, #112] | 245 | ldp x18, x19, [x3, #112] |
244 | ldp x20, x21, [x3, #128] | 246 | ldp x20, x21, [x3, #128] |
245 | ldp x22, x23, [x3, #144] | 247 | ldp x22, x23, [x3, #144] |
248 | ldr x24, [x3, #160] | ||
246 | 249 | ||
247 | msr vmpidr_el2, x4 | 250 | msr vmpidr_el2, x4 |
248 | msr csselr_el1, x5 | 251 | msr csselr_el1, x5 |
@@ -264,6 +267,7 @@ __kvm_hyp_code_start: | |||
264 | msr tpidr_el1, x21 | 267 | msr tpidr_el1, x21 |
265 | msr amair_el1, x22 | 268 | msr amair_el1, x22 |
266 | msr cntkctl_el1, x23 | 269 | msr cntkctl_el1, x23 |
270 | msr par_el1, x24 | ||
267 | .endm | 271 | .endm |
268 | 272 | ||
269 | .macro skip_32bit_state tmp, target | 273 | .macro skip_32bit_state tmp, target |
@@ -600,6 +604,8 @@ END(__kvm_vcpu_run) | |||
600 | 604 | ||
601 | // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); | 605 | // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
602 | ENTRY(__kvm_tlb_flush_vmid_ipa) | 606 | ENTRY(__kvm_tlb_flush_vmid_ipa) |
607 | dsb ishst | ||
608 | |||
603 | kern_hyp_va x0 | 609 | kern_hyp_va x0 |
604 | ldr x2, [x0, #KVM_VTTBR] | 610 | ldr x2, [x0, #KVM_VTTBR] |
605 | msr vttbr_el2, x2 | 611 | msr vttbr_el2, x2 |
@@ -621,6 +627,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa) | |||
621 | ENDPROC(__kvm_tlb_flush_vmid_ipa) | 627 | ENDPROC(__kvm_tlb_flush_vmid_ipa) |
622 | 628 | ||
623 | ENTRY(__kvm_flush_vm_context) | 629 | ENTRY(__kvm_flush_vm_context) |
630 | dsb ishst | ||
624 | tlbi alle1is | 631 | tlbi alle1is |
625 | ic ialluis | 632 | ic ialluis |
626 | dsb sy | 633 | dsb sy |
@@ -753,6 +760,10 @@ el1_trap: | |||
753 | */ | 760 | */ |
754 | tbnz x1, #7, 1f // S1PTW is set | 761 | tbnz x1, #7, 1f // S1PTW is set |
755 | 762 | ||
763 | /* Preserve PAR_EL1 */ | ||
764 | mrs x3, par_el1 | ||
765 | push x3, xzr | ||
766 | |||
756 | /* | 767 | /* |
757 | * Permission fault, HPFAR_EL2 is invalid. | 768 | * Permission fault, HPFAR_EL2 is invalid. |
758 | * Resolve the IPA the hard way using the guest VA. | 769 | * Resolve the IPA the hard way using the guest VA. |
@@ -766,6 +777,8 @@ el1_trap: | |||
766 | 777 | ||
767 | /* Read result */ | 778 | /* Read result */ |
768 | mrs x3, par_el1 | 779 | mrs x3, par_el1 |
780 | pop x0, xzr // Restore PAR_EL1 from the stack | ||
781 | msr par_el1, x0 | ||
769 | tbnz x3, #0, 3f // Bail out if we failed the translation | 782 | tbnz x3, #0, 3f // Bail out if we failed the translation |
770 | ubfx x3, x3, #12, #36 // Extract IPA | 783 | ubfx x3, x3, #12, #36 // Extract IPA |
771 | lsl x3, x3, #4 // and present it like HPFAR | 784 | lsl x3, x3, #4 // and present it like HPFAR |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 94923609753b..02e9d09e1d80 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -211,6 +211,9 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
211 | /* FAR_EL1 */ | 211 | /* FAR_EL1 */ |
212 | { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), | 212 | { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), |
213 | NULL, reset_unknown, FAR_EL1 }, | 213 | NULL, reset_unknown, FAR_EL1 }, |
214 | /* PAR_EL1 */ | ||
215 | { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), | ||
216 | NULL, reset_unknown, PAR_EL1 }, | ||
214 | 217 | ||
215 | /* PMINTENSET_EL1 */ | 218 | /* PMINTENSET_EL1 */ |
216 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), | 219 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), |
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 33a97929d055..77d442ab28c8 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig | |||
@@ -158,6 +158,7 @@ source "kernel/Kconfig.hz" | |||
158 | endmenu | 158 | endmenu |
159 | 159 | ||
160 | source "init/Kconfig" | 160 | source "init/Kconfig" |
161 | source "kernel/Kconfig.freezer" | ||
161 | source "drivers/Kconfig" | 162 | source "drivers/Kconfig" |
162 | source "fs/Kconfig" | 163 | source "fs/Kconfig" |
163 | 164 | ||
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index ef3a9de01954..bc5efc7c3f3f 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h | |||
@@ -22,7 +22,7 @@ | |||
22 | * unmapping a portion of the virtual address space, these hooks are called according to | 22 | * unmapping a portion of the virtual address space, these hooks are called according to |
23 | * the following template: | 23 | * the following template: |
24 | * | 24 | * |
25 | * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM | 25 | * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM |
26 | * { | 26 | * { |
27 | * for each vma that needs a shootdown do { | 27 | * for each vma that needs a shootdown do { |
28 | * tlb_start_vma(tlb, vma); | 28 | * tlb_start_vma(tlb, vma); |
@@ -58,6 +58,7 @@ struct mmu_gather { | |||
58 | unsigned int max; | 58 | unsigned int max; |
59 | unsigned char fullmm; /* non-zero means full mm flush */ | 59 | unsigned char fullmm; /* non-zero means full mm flush */ |
60 | unsigned char need_flush; /* really unmapped some PTEs? */ | 60 | unsigned char need_flush; /* really unmapped some PTEs? */ |
61 | unsigned long start, end; | ||
61 | unsigned long start_addr; | 62 | unsigned long start_addr; |
62 | unsigned long end_addr; | 63 | unsigned long end_addr; |
63 | struct page **pages; | 64 | struct page **pages; |
@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) | |||
155 | 156 | ||
156 | 157 | ||
157 | static inline void | 158 | static inline void |
158 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) | 159 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
159 | { | 160 | { |
160 | tlb->mm = mm; | 161 | tlb->mm = mm; |
161 | tlb->max = ARRAY_SIZE(tlb->local); | 162 | tlb->max = ARRAY_SIZE(tlb->local); |
162 | tlb->pages = tlb->local; | 163 | tlb->pages = tlb->local; |
163 | tlb->nr = 0; | 164 | tlb->nr = 0; |
164 | tlb->fullmm = full_mm_flush; | 165 | tlb->fullmm = !(start | (end+1)); |
166 | tlb->start = start; | ||
167 | tlb->end = end; | ||
165 | tlb->start_addr = ~0UL; | 168 | tlb->start_addr = ~0UL; |
166 | } | 169 | } |
167 | 170 | ||
diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c index 2291a7d69d49..fa277aecfb78 100644 --- a/arch/m68k/emu/natfeat.c +++ b/arch/m68k/emu/natfeat.c | |||
@@ -18,9 +18,11 @@ | |||
18 | #include <asm/machdep.h> | 18 | #include <asm/machdep.h> |
19 | #include <asm/natfeat.h> | 19 | #include <asm/natfeat.h> |
20 | 20 | ||
21 | extern long nf_get_id2(const char *feature_name); | ||
22 | |||
21 | asm("\n" | 23 | asm("\n" |
22 | " .global nf_get_id,nf_call\n" | 24 | " .global nf_get_id2,nf_call\n" |
23 | "nf_get_id:\n" | 25 | "nf_get_id2:\n" |
24 | " .short 0x7300\n" | 26 | " .short 0x7300\n" |
25 | " rts\n" | 27 | " rts\n" |
26 | "nf_call:\n" | 28 | "nf_call:\n" |
@@ -29,12 +31,25 @@ asm("\n" | |||
29 | "1: moveq.l #0,%d0\n" | 31 | "1: moveq.l #0,%d0\n" |
30 | " rts\n" | 32 | " rts\n" |
31 | " .section __ex_table,\"a\"\n" | 33 | " .section __ex_table,\"a\"\n" |
32 | " .long nf_get_id,1b\n" | 34 | " .long nf_get_id2,1b\n" |
33 | " .long nf_call,1b\n" | 35 | " .long nf_call,1b\n" |
34 | " .previous"); | 36 | " .previous"); |
35 | EXPORT_SYMBOL_GPL(nf_get_id); | ||
36 | EXPORT_SYMBOL_GPL(nf_call); | 37 | EXPORT_SYMBOL_GPL(nf_call); |
37 | 38 | ||
39 | long nf_get_id(const char *feature_name) | ||
40 | { | ||
41 | /* feature_name may be in vmalloc()ed memory, so make a copy */ | ||
42 | char name_copy[32]; | ||
43 | size_t n; | ||
44 | |||
45 | n = strlcpy(name_copy, feature_name, sizeof(name_copy)); | ||
46 | if (n >= sizeof(name_copy)) | ||
47 | return 0; | ||
48 | |||
49 | return nf_get_id2(name_copy); | ||
50 | } | ||
51 | EXPORT_SYMBOL_GPL(nf_get_id); | ||
52 | |||
38 | void nfprint(const char *fmt, ...) | 53 | void nfprint(const char *fmt, ...) |
39 | { | 54 | { |
40 | static char buf[256]; | 55 | static char buf[256]; |
diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h index 444ea8a09e9f..ef881cfbbca9 100644 --- a/arch/m68k/include/asm/div64.h +++ b/arch/m68k/include/asm/div64.h | |||
@@ -15,16 +15,17 @@ | |||
15 | unsigned long long n64; \ | 15 | unsigned long long n64; \ |
16 | } __n; \ | 16 | } __n; \ |
17 | unsigned long __rem, __upper; \ | 17 | unsigned long __rem, __upper; \ |
18 | unsigned long __base = (base); \ | ||
18 | \ | 19 | \ |
19 | __n.n64 = (n); \ | 20 | __n.n64 = (n); \ |
20 | if ((__upper = __n.n32[0])) { \ | 21 | if ((__upper = __n.n32[0])) { \ |
21 | asm ("divul.l %2,%1:%0" \ | 22 | asm ("divul.l %2,%1:%0" \ |
22 | : "=d" (__n.n32[0]), "=d" (__upper) \ | 23 | : "=d" (__n.n32[0]), "=d" (__upper) \ |
23 | : "d" (base), "0" (__n.n32[0])); \ | 24 | : "d" (__base), "0" (__n.n32[0])); \ |
24 | } \ | 25 | } \ |
25 | asm ("divu.l %2,%1:%0" \ | 26 | asm ("divu.l %2,%1:%0" \ |
26 | : "=d" (__n.n32[1]), "=d" (__rem) \ | 27 | : "=d" (__n.n32[1]), "=d" (__rem) \ |
27 | : "d" (base), "1" (__upper), "0" (__n.n32[1])); \ | 28 | : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \ |
28 | (n) = __n.n64; \ | 29 | (n) = __n.n64; \ |
29 | __rem; \ | 30 | __rem; \ |
30 | }) | 31 | }) |
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index d22a4ecffff4..4fab52294d98 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -28,7 +28,7 @@ config MICROBLAZE | |||
28 | select GENERIC_CLOCKEVENTS | 28 | select GENERIC_CLOCKEVENTS |
29 | select GENERIC_IDLE_POLL_SETUP | 29 | select GENERIC_IDLE_POLL_SETUP |
30 | select MODULES_USE_ELF_RELA | 30 | select MODULES_USE_ELF_RELA |
31 | select CLONE_BACKWARDS | 31 | select CLONE_BACKWARDS3 |
32 | 32 | ||
33 | config SWAP | 33 | config SWAP |
34 | def_bool n | 34 | def_bool n |
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index e773659ccf9f..46048d24328c 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c | |||
@@ -803,6 +803,32 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | |||
803 | dec_insn.next_pc_inc; | 803 | dec_insn.next_pc_inc; |
804 | return 1; | 804 | return 1; |
805 | break; | 805 | break; |
806 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
807 | case lwc2_op: /* This is bbit0 on Octeon */ | ||
808 | if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0) | ||
809 | *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); | ||
810 | else | ||
811 | *contpc = regs->cp0_epc + 8; | ||
812 | return 1; | ||
813 | case ldc2_op: /* This is bbit032 on Octeon */ | ||
814 | if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0) | ||
815 | *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); | ||
816 | else | ||
817 | *contpc = regs->cp0_epc + 8; | ||
818 | return 1; | ||
819 | case swc2_op: /* This is bbit1 on Octeon */ | ||
820 | if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) | ||
821 | *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); | ||
822 | else | ||
823 | *contpc = regs->cp0_epc + 8; | ||
824 | return 1; | ||
825 | case sdc2_op: /* This is bbit132 on Octeon */ | ||
826 | if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) | ||
827 | *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); | ||
828 | else | ||
829 | *contpc = regs->cp0_epc + 8; | ||
830 | return 1; | ||
831 | #endif | ||
806 | case cop0_op: | 832 | case cop0_op: |
807 | case cop1_op: | 833 | case cop1_op: |
808 | case cop2_op: | 834 | case cop2_op: |
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 99dbab1c59ac..d60bf98fa5cf 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig | |||
@@ -55,6 +55,7 @@ config GENERIC_CSUM | |||
55 | 55 | ||
56 | source "init/Kconfig" | 56 | source "init/Kconfig" |
57 | 57 | ||
58 | source "kernel/Kconfig.freezer" | ||
58 | 59 | ||
59 | menu "Processor type and features" | 60 | menu "Processor type and features" |
60 | 61 | ||
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index dbd9d3c991e8..9cf59816d3e9 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -979,6 +979,7 @@ config RELOCATABLE | |||
979 | must live at a different physical address than the primary | 979 | must live at a different physical address than the primary |
980 | kernel. | 980 | kernel. |
981 | 981 | ||
982 | # This value must have zeroes in the bottom 60 bits otherwise lots will break | ||
982 | config PAGE_OFFSET | 983 | config PAGE_OFFSET |
983 | hex | 984 | hex |
984 | default "0xc000000000000000" | 985 | default "0xc000000000000000" |
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 988c812aab5b..b9f426212d3a 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h | |||
@@ -211,9 +211,19 @@ extern long long virt_phys_offset; | |||
211 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) | 211 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) |
212 | #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET) | 212 | #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET) |
213 | #else | 213 | #else |
214 | #ifdef CONFIG_PPC64 | ||
215 | /* | ||
216 | * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET | ||
217 | * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit. | ||
218 | */ | ||
219 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET)) | ||
220 | #define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL) | ||
221 | |||
222 | #else /* 32-bit, non book E */ | ||
214 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) | 223 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) |
215 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) | 224 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) |
216 | #endif | 225 | #endif |
226 | #endif | ||
217 | 227 | ||
218 | /* | 228 | /* |
219 | * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, | 229 | * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, |
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index d92f3871e9cf..e2a0a162299b 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c | |||
@@ -35,7 +35,13 @@ | |||
35 | #include <asm/vdso_datapage.h> | 35 | #include <asm/vdso_datapage.h> |
36 | #include <asm/vio.h> | 36 | #include <asm/vio.h> |
37 | #include <asm/mmu.h> | 37 | #include <asm/mmu.h> |
38 | #include <asm/machdep.h> | ||
38 | 39 | ||
40 | |||
41 | /* | ||
42 | * This isn't a module but we expose that to userspace | ||
43 | * via /proc so leave the definitions here | ||
44 | */ | ||
39 | #define MODULE_VERS "1.9" | 45 | #define MODULE_VERS "1.9" |
40 | #define MODULE_NAME "lparcfg" | 46 | #define MODULE_NAME "lparcfg" |
41 | 47 | ||
@@ -418,7 +424,8 @@ static void parse_em_data(struct seq_file *m) | |||
418 | { | 424 | { |
419 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | 425 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; |
420 | 426 | ||
421 | if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS) | 427 | if (firmware_has_feature(FW_FEATURE_LPAR) && |
428 | plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS) | ||
422 | seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]); | 429 | seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]); |
423 | } | 430 | } |
424 | 431 | ||
@@ -677,7 +684,6 @@ static int lparcfg_open(struct inode *inode, struct file *file) | |||
677 | } | 684 | } |
678 | 685 | ||
679 | static const struct file_operations lparcfg_fops = { | 686 | static const struct file_operations lparcfg_fops = { |
680 | .owner = THIS_MODULE, | ||
681 | .read = seq_read, | 687 | .read = seq_read, |
682 | .write = lparcfg_write, | 688 | .write = lparcfg_write, |
683 | .open = lparcfg_open, | 689 | .open = lparcfg_open, |
@@ -699,14 +705,4 @@ static int __init lparcfg_init(void) | |||
699 | } | 705 | } |
700 | return 0; | 706 | return 0; |
701 | } | 707 | } |
702 | 708 | machine_device_initcall(pseries, lparcfg_init); | |
703 | static void __exit lparcfg_cleanup(void) | ||
704 | { | ||
705 | remove_proc_subtree("powerpc/lparcfg", NULL); | ||
706 | } | ||
707 | |||
708 | module_init(lparcfg_init); | ||
709 | module_exit(lparcfg_cleanup); | ||
710 | MODULE_DESCRIPTION("Interface for LPAR configuration data"); | ||
711 | MODULE_AUTHOR("Dave Engebretsen"); | ||
712 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index b75d7d686684..6d6d92b4ea11 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h | |||
@@ -32,6 +32,7 @@ struct mmu_gather { | |||
32 | struct mm_struct *mm; | 32 | struct mm_struct *mm; |
33 | struct mmu_table_batch *batch; | 33 | struct mmu_table_batch *batch; |
34 | unsigned int fullmm; | 34 | unsigned int fullmm; |
35 | unsigned long start, end; | ||
35 | }; | 36 | }; |
36 | 37 | ||
37 | struct mmu_table_batch { | 38 | struct mmu_table_batch { |
@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table); | |||
48 | 49 | ||
49 | static inline void tlb_gather_mmu(struct mmu_gather *tlb, | 50 | static inline void tlb_gather_mmu(struct mmu_gather *tlb, |
50 | struct mm_struct *mm, | 51 | struct mm_struct *mm, |
51 | unsigned int full_mm_flush) | 52 | unsigned long start, |
53 | unsigned long end) | ||
52 | { | 54 | { |
53 | tlb->mm = mm; | 55 | tlb->mm = mm; |
54 | tlb->fullmm = full_mm_flush; | 56 | tlb->start = start; |
57 | tlb->end = end; | ||
58 | tlb->fullmm = !(start | (end+1)); | ||
55 | tlb->batch = NULL; | 59 | tlb->batch = NULL; |
56 | if (tlb->fullmm) | 60 | if (tlb->fullmm) |
57 | __tlb_flush_mm(mm); | 61 | __tlb_flush_mm(mm); |
diff --git a/arch/score/Kconfig b/arch/score/Kconfig index c8def8bc9020..5fc237581caf 100644 --- a/arch/score/Kconfig +++ b/arch/score/Kconfig | |||
@@ -87,6 +87,8 @@ config STACKTRACE_SUPPORT | |||
87 | 87 | ||
88 | source "init/Kconfig" | 88 | source "init/Kconfig" |
89 | 89 | ||
90 | source "kernel/Kconfig.freezer" | ||
91 | |||
90 | config MMU | 92 | config MMU |
91 | def_bool y | 93 | def_bool y |
92 | 94 | ||
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index e61d43d9f689..362192ed12fe 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h | |||
@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) | |||
36 | } | 36 | } |
37 | 37 | ||
38 | static inline void | 38 | static inline void |
39 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) | 39 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
40 | { | 40 | { |
41 | tlb->mm = mm; | 41 | tlb->mm = mm; |
42 | tlb->fullmm = full_mm_flush; | 42 | tlb->start = start; |
43 | tlb->end = end; | ||
44 | tlb->fullmm = !(start | (end+1)); | ||
43 | 45 | ||
44 | init_tlb_gather(tlb); | 46 | init_tlb_gather(tlb); |
45 | } | 47 | } |
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index 4febacd1a8a1..29b0301c18aa 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h | |||
@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) | |||
45 | } | 45 | } |
46 | 46 | ||
47 | static inline void | 47 | static inline void |
48 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) | 48 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
49 | { | 49 | { |
50 | tlb->mm = mm; | 50 | tlb->mm = mm; |
51 | tlb->fullmm = full_mm_flush; | 51 | tlb->start = start; |
52 | tlb->end = end; | ||
53 | tlb->fullmm = !(start | (end+1)); | ||
52 | 54 | ||
53 | init_tlb_gather(tlb); | 55 | init_tlb_gather(tlb); |
54 | } | 56 | } |
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h index 653668d140f9..4a8cb8d7cbd5 100644 --- a/arch/x86/include/asm/bootparam_utils.h +++ b/arch/x86/include/asm/bootparam_utils.h | |||
@@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params) | |||
35 | */ | 35 | */ |
36 | if (boot_params->sentinel) { | 36 | if (boot_params->sentinel) { |
37 | /* fields in boot_params are left uninitialized, clear them */ | 37 | /* fields in boot_params are left uninitialized, clear them */ |
38 | memset(&boot_params->olpc_ofw_header, 0, | 38 | memset(&boot_params->ext_ramdisk_image, 0, |
39 | (char *)&boot_params->efi_info - | 39 | (char *)&boot_params->efi_info - |
40 | (char *)&boot_params->olpc_ofw_header); | 40 | (char *)&boot_params->ext_ramdisk_image); |
41 | memset(&boot_params->kbd_status, 0, | 41 | memset(&boot_params->kbd_status, 0, |
42 | (char *)&boot_params->hdr - | 42 | (char *)&boot_params->hdr - |
43 | (char *)&boot_params->kbd_status); | 43 | (char *)&boot_params->kbd_status); |
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index 50e5c58ced23..4c019179a57d 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h | |||
@@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table, | |||
59 | 59 | ||
60 | extern int __apply_microcode_amd(struct microcode_amd *mc_amd); | 60 | extern int __apply_microcode_amd(struct microcode_amd *mc_amd); |
61 | extern int apply_microcode_amd(int cpu); | 61 | extern int apply_microcode_amd(int cpu); |
62 | extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size); | 62 | extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); |
63 | 63 | ||
64 | #ifdef CONFIG_MICROCODE_AMD_EARLY | 64 | #ifdef CONFIG_MICROCODE_AMD_EARLY |
65 | #ifdef CONFIG_X86_32 | 65 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index f2b489cf1602..3bf2dd0cf61f 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h | |||
@@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) | |||
55 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) | 55 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
59 | |||
60 | /* | ||
61 | * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and | ||
62 | * _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset | ||
63 | * into this range. | ||
64 | */ | ||
65 | #define PTE_FILE_MAX_BITS 28 | ||
66 | #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) | ||
67 | #define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1) | ||
68 | #define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1) | ||
69 | #define PTE_FILE_SHIFT4 (_PAGE_BIT_SOFT_DIRTY + 1) | ||
70 | #define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) | ||
71 | #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) | ||
72 | #define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1) | ||
73 | |||
74 | #define pte_to_pgoff(pte) \ | ||
75 | ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \ | ||
76 | & ((1U << PTE_FILE_BITS1) - 1))) \ | ||
77 | + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \ | ||
78 | & ((1U << PTE_FILE_BITS2) - 1)) \ | ||
79 | << (PTE_FILE_BITS1)) \ | ||
80 | + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \ | ||
81 | & ((1U << PTE_FILE_BITS3) - 1)) \ | ||
82 | << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ | ||
83 | + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \ | ||
84 | << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3)) | ||
85 | |||
86 | #define pgoff_to_pte(off) \ | ||
87 | ((pte_t) { .pte_low = \ | ||
88 | ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ | ||
89 | + ((((off) >> PTE_FILE_BITS1) \ | ||
90 | & ((1U << PTE_FILE_BITS2) - 1)) \ | ||
91 | << PTE_FILE_SHIFT2) \ | ||
92 | + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ | ||
93 | & ((1U << PTE_FILE_BITS3) - 1)) \ | ||
94 | << PTE_FILE_SHIFT3) \ | ||
95 | + ((((off) >> \ | ||
96 | (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \ | ||
97 | << PTE_FILE_SHIFT4) \ | ||
98 | + _PAGE_FILE }) | ||
99 | |||
100 | #else /* CONFIG_MEM_SOFT_DIRTY */ | ||
101 | |||
58 | /* | 102 | /* |
59 | * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, | 103 | * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, |
60 | * split up the 29 bits of offset into this range: | 104 | * split up the 29 bits of offset into this range. |
61 | */ | 105 | */ |
62 | #define PTE_FILE_MAX_BITS 29 | 106 | #define PTE_FILE_MAX_BITS 29 |
63 | #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) | 107 | #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) |
@@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) | |||
88 | << PTE_FILE_SHIFT3) \ | 132 | << PTE_FILE_SHIFT3) \ |
89 | + _PAGE_FILE }) | 133 | + _PAGE_FILE }) |
90 | 134 | ||
135 | #endif /* CONFIG_MEM_SOFT_DIRTY */ | ||
136 | |||
91 | /* Encode and de-code a swap entry */ | 137 | /* Encode and de-code a swap entry */ |
92 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE | 138 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE |
93 | #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) | 139 | #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) |
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 4cc9f2b7cdc3..81bb91b49a88 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h | |||
@@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) | |||
179 | /* | 179 | /* |
180 | * Bits 0, 6 and 7 are taken in the low part of the pte, | 180 | * Bits 0, 6 and 7 are taken in the low part of the pte, |
181 | * put the 32 bits of offset into the high part. | 181 | * put the 32 bits of offset into the high part. |
182 | * | ||
183 | * For soft-dirty tracking 11 bit is taken from | ||
184 | * the low part of pte as well. | ||
182 | */ | 185 | */ |
183 | #define pte_to_pgoff(pte) ((pte).pte_high) | 186 | #define pte_to_pgoff(pte) ((pte).pte_high) |
184 | #define pgoff_to_pte(off) \ | 187 | #define pgoff_to_pte(off) \ |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 7dc305a46058..1c00631164c2 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -314,6 +314,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) | |||
314 | return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); | 314 | return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); |
315 | } | 315 | } |
316 | 316 | ||
317 | static inline pte_t pte_swp_mksoft_dirty(pte_t pte) | ||
318 | { | ||
319 | return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); | ||
320 | } | ||
321 | |||
322 | static inline int pte_swp_soft_dirty(pte_t pte) | ||
323 | { | ||
324 | return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; | ||
325 | } | ||
326 | |||
327 | static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) | ||
328 | { | ||
329 | return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); | ||
330 | } | ||
331 | |||
332 | static inline pte_t pte_file_clear_soft_dirty(pte_t pte) | ||
333 | { | ||
334 | return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); | ||
335 | } | ||
336 | |||
337 | static inline pte_t pte_file_mksoft_dirty(pte_t pte) | ||
338 | { | ||
339 | return pte_set_flags(pte, _PAGE_SOFT_DIRTY); | ||
340 | } | ||
341 | |||
342 | static inline int pte_file_soft_dirty(pte_t pte) | ||
343 | { | ||
344 | return pte_flags(pte) & _PAGE_SOFT_DIRTY; | ||
345 | } | ||
346 | |||
317 | /* | 347 | /* |
318 | * Mask out unsupported bits in a present pgprot. Non-present pgprots | 348 | * Mask out unsupported bits in a present pgprot. Non-present pgprots |
319 | * can use those bits for other purposes, so leave them be. | 349 | * can use those bits for other purposes, so leave them be. |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index c98ac63aae48..f4843e031131 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -61,12 +61,27 @@ | |||
61 | * they do not conflict with each other. | 61 | * they do not conflict with each other. |
62 | */ | 62 | */ |
63 | 63 | ||
64 | #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN | ||
65 | |||
64 | #ifdef CONFIG_MEM_SOFT_DIRTY | 66 | #ifdef CONFIG_MEM_SOFT_DIRTY |
65 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) | 67 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY) |
66 | #else | 68 | #else |
67 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) | 69 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) |
68 | #endif | 70 | #endif |
69 | 71 | ||
72 | /* | ||
73 | * Tracking soft dirty bit when a page goes to a swap is tricky. | ||
74 | * We need a bit which can be stored in pte _and_ not conflict | ||
75 | * with swap entry format. On x86 bits 6 and 7 are *not* involved | ||
76 | * into swap entry computation, but bit 6 is used for nonlinear | ||
77 | * file mapping, so we borrow bit 7 for soft dirty tracking. | ||
78 | */ | ||
79 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
80 | #define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE | ||
81 | #else | ||
82 | #define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0)) | ||
83 | #endif | ||
84 | |||
70 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | 85 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
71 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) | 86 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) |
72 | #else | 87 | #else |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 33692eaabab5..e3ddd7db723f 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -233,8 +233,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
233 | #define arch_read_relax(lock) cpu_relax() | 233 | #define arch_read_relax(lock) cpu_relax() |
234 | #define arch_write_relax(lock) cpu_relax() | 234 | #define arch_write_relax(lock) cpu_relax() |
235 | 235 | ||
236 | /* The {read|write|spin}_lock() on x86 are full memory barriers. */ | ||
237 | static inline void smp_mb__after_lock(void) { } | ||
238 | #define ARCH_HAS_SMP_MB_AFTER_LOCK | ||
239 | |||
240 | #endif /* _ASM_X86_SPINLOCK_H */ | 236 | #endif /* _ASM_X86_SPINLOCK_H */ |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index f654ecefea5b..08a089043ccf 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -512,7 +512,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) | |||
512 | 512 | ||
513 | static const int amd_erratum_383[]; | 513 | static const int amd_erratum_383[]; |
514 | static const int amd_erratum_400[]; | 514 | static const int amd_erratum_400[]; |
515 | static bool cpu_has_amd_erratum(const int *erratum); | 515 | static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); |
516 | 516 | ||
517 | static void init_amd(struct cpuinfo_x86 *c) | 517 | static void init_amd(struct cpuinfo_x86 *c) |
518 | { | 518 | { |
@@ -729,11 +729,11 @@ static void init_amd(struct cpuinfo_x86 *c) | |||
729 | value &= ~(1ULL << 24); | 729 | value &= ~(1ULL << 24); |
730 | wrmsrl_safe(MSR_AMD64_BU_CFG2, value); | 730 | wrmsrl_safe(MSR_AMD64_BU_CFG2, value); |
731 | 731 | ||
732 | if (cpu_has_amd_erratum(amd_erratum_383)) | 732 | if (cpu_has_amd_erratum(c, amd_erratum_383)) |
733 | set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); | 733 | set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); |
734 | } | 734 | } |
735 | 735 | ||
736 | if (cpu_has_amd_erratum(amd_erratum_400)) | 736 | if (cpu_has_amd_erratum(c, amd_erratum_400)) |
737 | set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); | 737 | set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); |
738 | 738 | ||
739 | rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); | 739 | rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); |
@@ -878,23 +878,13 @@ static const int amd_erratum_400[] = | |||
878 | static const int amd_erratum_383[] = | 878 | static const int amd_erratum_383[] = |
879 | AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); | 879 | AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); |
880 | 880 | ||
881 | static bool cpu_has_amd_erratum(const int *erratum) | 881 | |
882 | static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) | ||
882 | { | 883 | { |
883 | struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); | ||
884 | int osvw_id = *erratum++; | 884 | int osvw_id = *erratum++; |
885 | u32 range; | 885 | u32 range; |
886 | u32 ms; | 886 | u32 ms; |
887 | 887 | ||
888 | /* | ||
889 | * If called early enough that current_cpu_data hasn't been initialized | ||
890 | * yet, fall back to boot_cpu_data. | ||
891 | */ | ||
892 | if (cpu->x86 == 0) | ||
893 | cpu = &boot_cpu_data; | ||
894 | |||
895 | if (cpu->x86_vendor != X86_VENDOR_AMD) | ||
896 | return false; | ||
897 | |||
898 | if (osvw_id >= 0 && osvw_id < 65536 && | 888 | if (osvw_id >= 0 && osvw_id < 65536 && |
899 | cpu_has(cpu, X86_FEATURE_OSVW)) { | 889 | cpu_has(cpu, X86_FEATURE_OSVW)) { |
900 | u64 osvw_len; | 890 | u64 osvw_len; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index fbc9210b45bc..a45d8d4ace10 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -2270,6 +2270,7 @@ __init int intel_pmu_init(void) | |||
2270 | case 70: | 2270 | case 70: |
2271 | case 71: | 2271 | case 71: |
2272 | case 63: | 2272 | case 63: |
2273 | case 69: | ||
2273 | x86_pmu.late_ack = true; | 2274 | x86_pmu.late_ack = true; |
2274 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | 2275 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); |
2275 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); | 2276 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index e9696d8269ba..34c11ae9bda3 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -352,8 +352,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = { | |||
352 | static struct uncore_event_desc snbep_uncore_qpi_events[] = { | 352 | static struct uncore_event_desc snbep_uncore_qpi_events[] = { |
353 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), | 353 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), |
354 | INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), | 354 | INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), |
355 | INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"), | 355 | INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"), |
356 | INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"), | 356 | INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"), |
357 | { /* end: all zeroes */ }, | 357 | { /* end: all zeroes */ }, |
358 | }; | 358 | }; |
359 | 359 | ||
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 7a0adb7ee433..7123b5df479d 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -145,10 +145,9 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) | |||
145 | return 0; | 145 | return 0; |
146 | } | 146 | } |
147 | 147 | ||
148 | static unsigned int verify_patch_size(int cpu, u32 patch_size, | 148 | static unsigned int verify_patch_size(u8 family, u32 patch_size, |
149 | unsigned int size) | 149 | unsigned int size) |
150 | { | 150 | { |
151 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
152 | u32 max_size; | 151 | u32 max_size; |
153 | 152 | ||
154 | #define F1XH_MPB_MAX_SIZE 2048 | 153 | #define F1XH_MPB_MAX_SIZE 2048 |
@@ -156,7 +155,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size, | |||
156 | #define F15H_MPB_MAX_SIZE 4096 | 155 | #define F15H_MPB_MAX_SIZE 4096 |
157 | #define F16H_MPB_MAX_SIZE 3458 | 156 | #define F16H_MPB_MAX_SIZE 3458 |
158 | 157 | ||
159 | switch (c->x86) { | 158 | switch (family) { |
160 | case 0x14: | 159 | case 0x14: |
161 | max_size = F14H_MPB_MAX_SIZE; | 160 | max_size = F14H_MPB_MAX_SIZE; |
162 | break; | 161 | break; |
@@ -277,9 +276,8 @@ static void cleanup(void) | |||
277 | * driver cannot continue functioning normally. In such cases, we tear | 276 | * driver cannot continue functioning normally. In such cases, we tear |
278 | * down everything we've used up so far and exit. | 277 | * down everything we've used up so far and exit. |
279 | */ | 278 | */ |
280 | static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) | 279 | static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) |
281 | { | 280 | { |
282 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
283 | struct microcode_header_amd *mc_hdr; | 281 | struct microcode_header_amd *mc_hdr; |
284 | struct ucode_patch *patch; | 282 | struct ucode_patch *patch; |
285 | unsigned int patch_size, crnt_size, ret; | 283 | unsigned int patch_size, crnt_size, ret; |
@@ -299,7 +297,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) | |||
299 | 297 | ||
300 | /* check if patch is for the current family */ | 298 | /* check if patch is for the current family */ |
301 | proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); | 299 | proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); |
302 | if (proc_fam != c->x86) | 300 | if (proc_fam != family) |
303 | return crnt_size; | 301 | return crnt_size; |
304 | 302 | ||
305 | if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { | 303 | if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { |
@@ -308,7 +306,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) | |||
308 | return crnt_size; | 306 | return crnt_size; |
309 | } | 307 | } |
310 | 308 | ||
311 | ret = verify_patch_size(cpu, patch_size, leftover); | 309 | ret = verify_patch_size(family, patch_size, leftover); |
312 | if (!ret) { | 310 | if (!ret) { |
313 | pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); | 311 | pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); |
314 | return crnt_size; | 312 | return crnt_size; |
@@ -339,7 +337,8 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) | |||
339 | return crnt_size; | 337 | return crnt_size; |
340 | } | 338 | } |
341 | 339 | ||
342 | static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size) | 340 | static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, |
341 | size_t size) | ||
343 | { | 342 | { |
344 | enum ucode_state ret = UCODE_ERROR; | 343 | enum ucode_state ret = UCODE_ERROR; |
345 | unsigned int leftover; | 344 | unsigned int leftover; |
@@ -362,7 +361,7 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz | |||
362 | } | 361 | } |
363 | 362 | ||
364 | while (leftover) { | 363 | while (leftover) { |
365 | crnt_size = verify_and_add_patch(cpu, fw, leftover); | 364 | crnt_size = verify_and_add_patch(family, fw, leftover); |
366 | if (crnt_size < 0) | 365 | if (crnt_size < 0) |
367 | return ret; | 366 | return ret; |
368 | 367 | ||
@@ -373,22 +372,22 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz | |||
373 | return UCODE_OK; | 372 | return UCODE_OK; |
374 | } | 373 | } |
375 | 374 | ||
376 | enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size) | 375 | enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) |
377 | { | 376 | { |
378 | enum ucode_state ret; | 377 | enum ucode_state ret; |
379 | 378 | ||
380 | /* free old equiv table */ | 379 | /* free old equiv table */ |
381 | free_equiv_cpu_table(); | 380 | free_equiv_cpu_table(); |
382 | 381 | ||
383 | ret = __load_microcode_amd(cpu, data, size); | 382 | ret = __load_microcode_amd(family, data, size); |
384 | 383 | ||
385 | if (ret != UCODE_OK) | 384 | if (ret != UCODE_OK) |
386 | cleanup(); | 385 | cleanup(); |
387 | 386 | ||
388 | #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) | 387 | #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) |
389 | /* save BSP's matching patch for early load */ | 388 | /* save BSP's matching patch for early load */ |
390 | if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { | 389 | if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) { |
391 | struct ucode_patch *p = find_patch(cpu); | 390 | struct ucode_patch *p = find_patch(smp_processor_id()); |
392 | if (p) { | 391 | if (p) { |
393 | memset(amd_bsp_mpb, 0, MPB_MAX_SIZE); | 392 | memset(amd_bsp_mpb, 0, MPB_MAX_SIZE); |
394 | memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data), | 393 | memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data), |
@@ -441,7 +440,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, | |||
441 | goto fw_release; | 440 | goto fw_release; |
442 | } | 441 | } |
443 | 442 | ||
444 | ret = load_microcode_amd(cpu, fw->data, fw->size); | 443 | ret = load_microcode_amd(c->x86, fw->data, fw->size); |
445 | 444 | ||
446 | fw_release: | 445 | fw_release: |
447 | release_firmware(fw); | 446 | release_firmware(fw); |
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c index 1d14ffee5749..6073104ccaa3 100644 --- a/arch/x86/kernel/microcode_amd_early.c +++ b/arch/x86/kernel/microcode_amd_early.c | |||
@@ -238,25 +238,17 @@ static void __init collect_cpu_sig_on_bsp(void *arg) | |||
238 | uci->cpu_sig.sig = cpuid_eax(0x00000001); | 238 | uci->cpu_sig.sig = cpuid_eax(0x00000001); |
239 | } | 239 | } |
240 | #else | 240 | #else |
241 | static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c, | 241 | void load_ucode_amd_ap(void) |
242 | struct ucode_cpu_info *uci) | ||
243 | { | 242 | { |
243 | unsigned int cpu = smp_processor_id(); | ||
244 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | ||
244 | u32 rev, eax; | 245 | u32 rev, eax; |
245 | 246 | ||
246 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); | 247 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); |
247 | eax = cpuid_eax(0x00000001); | 248 | eax = cpuid_eax(0x00000001); |
248 | 249 | ||
249 | uci->cpu_sig.sig = eax; | ||
250 | uci->cpu_sig.rev = rev; | 250 | uci->cpu_sig.rev = rev; |
251 | c->microcode = rev; | 251 | uci->cpu_sig.sig = eax; |
252 | c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); | ||
253 | } | ||
254 | |||
255 | void load_ucode_amd_ap(void) | ||
256 | { | ||
257 | unsigned int cpu = smp_processor_id(); | ||
258 | |||
259 | collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu); | ||
260 | 252 | ||
261 | if (cpu && !ucode_loaded) { | 253 | if (cpu && !ucode_loaded) { |
262 | void *ucode; | 254 | void *ucode; |
@@ -265,8 +257,10 @@ void load_ucode_amd_ap(void) | |||
265 | return; | 257 | return; |
266 | 258 | ||
267 | ucode = (void *)(initrd_start + ucode_offset); | 259 | ucode = (void *)(initrd_start + ucode_offset); |
268 | if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK) | 260 | eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); |
261 | if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK) | ||
269 | return; | 262 | return; |
263 | |||
270 | ucode_loaded = true; | 264 | ucode_loaded = true; |
271 | } | 265 | } |
272 | 266 | ||
@@ -278,6 +272,8 @@ int __init save_microcode_in_initrd_amd(void) | |||
278 | { | 272 | { |
279 | enum ucode_state ret; | 273 | enum ucode_state ret; |
280 | void *ucode; | 274 | void *ucode; |
275 | u32 eax; | ||
276 | |||
281 | #ifdef CONFIG_X86_32 | 277 | #ifdef CONFIG_X86_32 |
282 | unsigned int bsp = boot_cpu_data.cpu_index; | 278 | unsigned int bsp = boot_cpu_data.cpu_index; |
283 | struct ucode_cpu_info *uci = ucode_cpu_info + bsp; | 279 | struct ucode_cpu_info *uci = ucode_cpu_info + bsp; |
@@ -293,7 +289,10 @@ int __init save_microcode_in_initrd_amd(void) | |||
293 | return 0; | 289 | return 0; |
294 | 290 | ||
295 | ucode = (void *)(initrd_start + ucode_offset); | 291 | ucode = (void *)(initrd_start + ucode_offset); |
296 | ret = load_microcode_amd(0, ucode, ucode_size); | 292 | eax = cpuid_eax(0x00000001); |
293 | eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); | ||
294 | |||
295 | ret = load_microcode_amd(eax, ucode, ucode_size); | ||
297 | if (ret != UCODE_OK) | 296 | if (ret != UCODE_OK) |
298 | return -EINVAL; | 297 | return -EINVAL; |
299 | 298 | ||
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index dbded5aedb81..30277e27431a 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, | |||
101 | *begin = new_begin; | 101 | *begin = new_begin; |
102 | } | 102 | } |
103 | } else { | 103 | } else { |
104 | *begin = TASK_UNMAPPED_BASE; | 104 | *begin = current->mm->mmap_legacy_base; |
105 | *end = TASK_SIZE; | 105 | *end = TASK_SIZE; |
106 | } | 106 | } |
107 | } | 107 | } |
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 62c29a5bfe26..25e7e1372bb2 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c | |||
@@ -112,11 +112,13 @@ static unsigned long mmap_legacy_base(void) | |||
112 | */ | 112 | */ |
113 | void arch_pick_mmap_layout(struct mm_struct *mm) | 113 | void arch_pick_mmap_layout(struct mm_struct *mm) |
114 | { | 114 | { |
115 | mm->mmap_legacy_base = mmap_legacy_base(); | ||
116 | mm->mmap_base = mmap_base(); | ||
117 | |||
115 | if (mmap_is_legacy()) { | 118 | if (mmap_is_legacy()) { |
116 | mm->mmap_base = mmap_legacy_base(); | 119 | mm->mmap_base = mm->mmap_legacy_base; |
117 | mm->get_unmapped_area = arch_get_unmapped_area; | 120 | mm->get_unmapped_area = arch_get_unmapped_area; |
118 | } else { | 121 | } else { |
119 | mm->mmap_base = mmap_base(); | ||
120 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | 122 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; |
121 | } | 123 | } |
122 | } | 124 | } |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 056d11faef21..8f3eea6b80c5 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type) | |||
313 | e820_add_region(start, end - start, type); | 313 | e820_add_region(start, end - start, type); |
314 | } | 314 | } |
315 | 315 | ||
316 | void xen_ignore_unusable(struct e820entry *list, size_t map_size) | ||
317 | { | ||
318 | struct e820entry *entry; | ||
319 | unsigned int i; | ||
320 | |||
321 | for (i = 0, entry = list; i < map_size; i++, entry++) { | ||
322 | if (entry->type == E820_UNUSABLE) | ||
323 | entry->type = E820_RAM; | ||
324 | } | ||
325 | } | ||
326 | |||
316 | /** | 327 | /** |
317 | * machine_specific_memory_setup - Hook for machine specific memory setup. | 328 | * machine_specific_memory_setup - Hook for machine specific memory setup. |
318 | **/ | 329 | **/ |
@@ -353,6 +364,17 @@ char * __init xen_memory_setup(void) | |||
353 | } | 364 | } |
354 | BUG_ON(rc); | 365 | BUG_ON(rc); |
355 | 366 | ||
367 | /* | ||
368 | * Xen won't allow a 1:1 mapping to be created to UNUSABLE | ||
369 | * regions, so if we're using the machine memory map leave the | ||
370 | * region as RAM as it is in the pseudo-physical map. | ||
371 | * | ||
372 | * UNUSABLE regions in domUs are not handled and will need | ||
373 | * a patch in the future. | ||
374 | */ | ||
375 | if (xen_initial_domain()) | ||
376 | xen_ignore_unusable(map, memmap.nr_entries); | ||
377 | |||
356 | /* Make sure the Xen-supplied memory map is well-ordered. */ | 378 | /* Make sure the Xen-supplied memory map is well-ordered. */ |
357 | sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); | 379 | sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); |
358 | 380 | ||
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index ca92754eb846..b81c88e51daa 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -694,8 +694,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) | |||
694 | static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) | 694 | static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) |
695 | { | 695 | { |
696 | int rc; | 696 | int rc; |
697 | rc = native_cpu_up(cpu, tidle); | 697 | /* |
698 | WARN_ON (xen_smp_intr_init(cpu)); | 698 | * xen_smp_intr_init() needs to run before native_cpu_up() |
699 | * so that IPI vectors are set up on the booting CPU before | ||
700 | * it is marked online in native_cpu_up(). | ||
701 | */ | ||
702 | rc = xen_smp_intr_init(cpu); | ||
703 | WARN_ON(rc); | ||
704 | if (!rc) | ||
705 | rc = native_cpu_up(cpu, tidle); | ||
699 | return rc; | 706 | return rc; |
700 | } | 707 | } |
701 | 708 | ||