aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-10-11 01:39:37 -0400
committerIngo Molnar <mingo@kernel.org>2013-10-11 01:39:37 -0400
commitec0ad3d01f99d5e5b56a99a58f7003b99250dc65 (patch)
tree85066cbea1cf8da6d099019adefaca90aae39234 /arch
parent3354781a2184380046c8dd19144628d3c33991e6 (diff)
parent3f0116c3238a96bc18ad4b4acefe4e7be32fa861 (diff)
Merge branch 'core/urgent' into sched/core
Merge in asm goto fix, to be able to apply the asm/rmwcc.h fix. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Makefile9
-rw-r--r--arch/arm/boot/Makefile16
-rw-r--r--arch/arm/boot/install.sh14
-rw-r--r--arch/arm/include/asm/jump_label.h2
-rw-r--r--arch/mips/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/kernel/irq.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c18
-rw-r--r--arch/s390/include/asm/jump_label.h2
-rw-r--r--arch/s390/kernel/crash_dump.c42
-rw-r--r--arch/s390/kernel/entry.S1
-rw-r--r--arch/s390/kernel/entry64.S1
-rw-r--r--arch/s390/kernel/kprobes.c6
-rw-r--r--arch/sparc/include/asm/jump_label.h2
-rw-r--r--arch/tile/include/asm/atomic.h5
-rw-r--r--arch/tile/include/asm/atomic_32.h27
-rw-r--r--arch/tile/include/asm/cmpxchg.h28
-rw-r--r--arch/tile/include/asm/percpu.h34
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/intvec_32.S3
-rw-r--r--arch/tile/kernel/intvec_64.S3
-rw-r--r--arch/tile/kernel/stack.c12
-rw-r--r--arch/tile/lib/atomic_32.c8
-rw-r--r--arch/x86/include/asm/cpufeature.h6
-rw-r--r--arch/x86/include/asm/jump_label.h2
-rw-r--r--arch/x86/include/asm/mutex_64.h4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c11
-rw-r--r--arch/x86/kvm/vmx.c24
29 files changed, 187 insertions, 110 deletions
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index a37a50f575a2..db50b626be98 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -296,10 +296,15 @@ archprepare:
296# Convert bzImage to zImage 296# Convert bzImage to zImage
297bzImage: zImage 297bzImage: zImage
298 298
299zImage Image xipImage bootpImage uImage: vmlinux 299BOOT_TARGETS = zImage Image xipImage bootpImage uImage
300INSTALL_TARGETS = zinstall uinstall install
301
302PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
303
304$(BOOT_TARGETS): vmlinux
300 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ 305 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
301 306
302zinstall uinstall install: vmlinux 307$(INSTALL_TARGETS):
303 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ 308 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
304 309
305%.dtb: | scripts 310%.dtb: | scripts
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 84aa2caf07ed..ec2f8065f955 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -95,24 +95,24 @@ initrd:
95 @test "$(INITRD)" != "" || \ 95 @test "$(INITRD)" != "" || \
96 (echo You must specify INITRD; exit -1) 96 (echo You must specify INITRD; exit -1)
97 97
98install: $(obj)/Image 98install:
99 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 99 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
100 $(obj)/Image System.map "$(INSTALL_PATH)" 100 $(obj)/Image System.map "$(INSTALL_PATH)"
101 101
102zinstall: $(obj)/zImage 102zinstall:
103 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 103 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
104 $(obj)/zImage System.map "$(INSTALL_PATH)" 104 $(obj)/zImage System.map "$(INSTALL_PATH)"
105 105
106uinstall: $(obj)/uImage 106uinstall:
107 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 107 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
108 $(obj)/uImage System.map "$(INSTALL_PATH)" 108 $(obj)/uImage System.map "$(INSTALL_PATH)"
109 109
110zi: 110zi:
111 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 111 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
112 $(obj)/zImage System.map "$(INSTALL_PATH)" 112 $(obj)/zImage System.map "$(INSTALL_PATH)"
113 113
114i: 114i:
115 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 115 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
116 $(obj)/Image System.map "$(INSTALL_PATH)" 116 $(obj)/Image System.map "$(INSTALL_PATH)"
117 117
118subdir- := bootp compressed dts 118subdir- := bootp compressed dts
diff --git a/arch/arm/boot/install.sh b/arch/arm/boot/install.sh
index 06ea7d42ce8e..2a45092a40e3 100644
--- a/arch/arm/boot/install.sh
+++ b/arch/arm/boot/install.sh
@@ -20,6 +20,20 @@
20# $4 - default install path (blank if root directory) 20# $4 - default install path (blank if root directory)
21# 21#
22 22
23verify () {
24 if [ ! -f "$1" ]; then
25 echo "" 1>&2
26 echo " *** Missing file: $1" 1>&2
27 echo ' *** You need to run "make" before "make install".' 1>&2
28 echo "" 1>&2
29 exit 1
30 fi
31}
32
33# Make sure the files actually exist
34verify "$2"
35verify "$3"
36
23# User may have a custom install script 37# User may have a custom install script
24if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi 38if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
25if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi 39if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index bfc198c75913..863c892b4aaa 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -16,7 +16,7 @@
16 16
17static __always_inline bool arch_static_branch(struct static_key *key) 17static __always_inline bool arch_static_branch(struct static_key *key)
18{ 18{
19 asm goto("1:\n\t" 19 asm_volatile_goto("1:\n\t"
20 JUMP_LABEL_NOP "\n\t" 20 JUMP_LABEL_NOP "\n\t"
21 ".pushsection __jump_table, \"aw\"\n\t" 21 ".pushsection __jump_table, \"aw\"\n\t"
22 ".word 1b, %l[l_yes], %c0\n\t" 22 ".word 1b, %l[l_yes], %c0\n\t"
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index 4d6d77ed9b9d..e194f957ca8c 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -22,7 +22,7 @@
22 22
23static __always_inline bool arch_static_branch(struct static_key *key) 23static __always_inline bool arch_static_branch(struct static_key *key)
24{ 24{
25 asm goto("1:\tnop\n\t" 25 asm_volatile_goto("1:\tnop\n\t"
26 "nop\n\t" 26 "nop\n\t"
27 ".pushsection __jump_table, \"aw\"\n\t" 27 ".pushsection __jump_table, \"aw\"\n\t"
28 WORD_INSN " 1b, %l[l_yes], %0\n\t" 28 WORD_INSN " 1b, %l[l_yes], %0\n\t"
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index ae098c438f00..f016bb699b5f 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -19,7 +19,7 @@
19 19
20static __always_inline bool arch_static_branch(struct static_key *key) 20static __always_inline bool arch_static_branch(struct static_key *key)
21{ 21{
22 asm goto("1:\n\t" 22 asm_volatile_goto("1:\n\t"
23 "nop\n\t" 23 "nop\n\t"
24 ".pushsection __jump_table, \"aw\"\n\t" 24 ".pushsection __jump_table, \"aw\"\n\t"
25 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" 25 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 57d286a78f86..c7cb8c232d2f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -495,14 +495,15 @@ void __do_irq(struct pt_regs *regs)
495void do_IRQ(struct pt_regs *regs) 495void do_IRQ(struct pt_regs *regs)
496{ 496{
497 struct pt_regs *old_regs = set_irq_regs(regs); 497 struct pt_regs *old_regs = set_irq_regs(regs);
498 struct thread_info *curtp, *irqtp; 498 struct thread_info *curtp, *irqtp, *sirqtp;
499 499
500 /* Switch to the irq stack to handle this */ 500 /* Switch to the irq stack to handle this */
501 curtp = current_thread_info(); 501 curtp = current_thread_info();
502 irqtp = hardirq_ctx[raw_smp_processor_id()]; 502 irqtp = hardirq_ctx[raw_smp_processor_id()];
503 sirqtp = softirq_ctx[raw_smp_processor_id()];
503 504
504 /* Already there ? */ 505 /* Already there ? */
505 if (unlikely(curtp == irqtp)) { 506 if (unlikely(curtp == irqtp || curtp == sirqtp)) {
506 __do_irq(regs); 507 __do_irq(regs);
507 set_irq_regs(old_regs); 508 set_irq_regs(old_regs);
508 return; 509 return;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 294b7af28cdd..c71103b8a748 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1066,7 +1066,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1066BEGIN_FTR_SECTION 1066BEGIN_FTR_SECTION
1067 mfspr r8, SPRN_DSCR 1067 mfspr r8, SPRN_DSCR
1068 ld r7, HSTATE_DSCR(r13) 1068 ld r7, HSTATE_DSCR(r13)
1069 std r8, VCPU_DSCR(r7) 1069 std r8, VCPU_DSCR(r9)
1070 mtspr SPRN_DSCR, r7 1070 mtspr SPRN_DSCR, r7
1071END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1071END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1072 1072
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 1c6a9d729df4..c65593abae8e 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -332,6 +332,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
332 unsigned long hva; 332 unsigned long hva;
333 int pfnmap = 0; 333 int pfnmap = 0;
334 int tsize = BOOK3E_PAGESZ_4K; 334 int tsize = BOOK3E_PAGESZ_4K;
335 int ret = 0;
336 unsigned long mmu_seq;
337 struct kvm *kvm = vcpu_e500->vcpu.kvm;
338
339 /* used to check for invalidations in progress */
340 mmu_seq = kvm->mmu_notifier_seq;
341 smp_rmb();
335 342
336 /* 343 /*
337 * Translate guest physical to true physical, acquiring 344 * Translate guest physical to true physical, acquiring
@@ -449,6 +456,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
449 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); 456 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
450 } 457 }
451 458
459 spin_lock(&kvm->mmu_lock);
460 if (mmu_notifier_retry(kvm, mmu_seq)) {
461 ret = -EAGAIN;
462 goto out;
463 }
464
452 kvmppc_e500_ref_setup(ref, gtlbe, pfn); 465 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
453 466
454 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 467 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -457,10 +470,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
457 /* Clear i-cache for new pages */ 470 /* Clear i-cache for new pages */
458 kvmppc_mmu_flush_icache(pfn); 471 kvmppc_mmu_flush_icache(pfn);
459 472
473out:
474 spin_unlock(&kvm->mmu_lock);
475
460 /* Drop refcount on page, so that mmu notifiers can clear it */ 476 /* Drop refcount on page, so that mmu notifiers can clear it */
461 kvm_release_pfn_clean(pfn); 477 kvm_release_pfn_clean(pfn);
462 478
463 return 0; 479 return ret;
464} 480}
465 481
466/* XXX only map the one-one case, for now use TLB0 */ 482/* XXX only map the one-one case, for now use TLB0 */
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 6c32190dc73e..346b1c85ffb4 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -15,7 +15,7 @@
15 15
16static __always_inline bool arch_static_branch(struct static_key *key) 16static __always_inline bool arch_static_branch(struct static_key *key)
17{ 17{
18 asm goto("0: brcl 0,0\n" 18 asm_volatile_goto("0: brcl 0,0\n"
19 ".pushsection __jump_table, \"aw\"\n" 19 ".pushsection __jump_table, \"aw\"\n"
20 ASM_ALIGN "\n" 20 ASM_ALIGN "\n"
21 ASM_PTR " 0b, %l[label], %0\n" 21 ASM_PTR " 0b, %l[label], %0\n"
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index c84f33d51f7b..7dd21720e5b0 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -40,28 +40,26 @@ static inline void *load_real_addr(void *addr)
40} 40}
41 41
42/* 42/*
43 * Copy up to one page to vmalloc or real memory 43 * Copy real to virtual or real memory
44 */ 44 */
45static ssize_t copy_page_real(void *buf, void *src, size_t csize) 45static int copy_from_realmem(void *dest, void *src, size_t count)
46{ 46{
47 size_t size; 47 unsigned long size;
48 int rc;
48 49
49 if (is_vmalloc_addr(buf)) { 50 if (!count)
50 BUG_ON(csize >= PAGE_SIZE); 51 return 0;
51 /* If buf is not page aligned, copy first part */ 52 if (!is_vmalloc_or_module_addr(dest))
52 size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize); 53 return memcpy_real(dest, src, count);
53 if (size) { 54 do {
54 if (memcpy_real(load_real_addr(buf), src, size)) 55 size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK));
55 return -EFAULT; 56 if (memcpy_real(load_real_addr(dest), src, size))
56 buf += size; 57 return -EFAULT;
57 src += size; 58 count -= size;
58 } 59 dest += size;
59 /* Copy second part */ 60 src += size;
60 size = csize - size; 61 } while (count);
61 return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0; 62 return 0;
62 } else {
63 return memcpy_real(buf, src, csize);
64 }
65} 63}
66 64
67/* 65/*
@@ -114,7 +112,7 @@ static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
114 rc = copy_to_user_real((void __force __user *) buf, 112 rc = copy_to_user_real((void __force __user *) buf,
115 (void *) src, csize); 113 (void *) src, csize);
116 else 114 else
117 rc = copy_page_real(buf, (void *) src, csize); 115 rc = copy_from_realmem(buf, (void *) src, csize);
118 return (rc == 0) ? rc : csize; 116 return (rc == 0) ? rc : csize;
119} 117}
120 118
@@ -210,7 +208,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
210 if (OLDMEM_BASE) { 208 if (OLDMEM_BASE) {
211 if ((unsigned long) src < OLDMEM_SIZE) { 209 if ((unsigned long) src < OLDMEM_SIZE) {
212 copied = min(count, OLDMEM_SIZE - (unsigned long) src); 210 copied = min(count, OLDMEM_SIZE - (unsigned long) src);
213 rc = memcpy_real(dest, src + OLDMEM_BASE, copied); 211 rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied);
214 if (rc) 212 if (rc)
215 return rc; 213 return rc;
216 } 214 }
@@ -223,7 +221,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
223 return rc; 221 return rc;
224 } 222 }
225 } 223 }
226 return memcpy_real(dest + copied, src + copied, count - copied); 224 return copy_from_realmem(dest + copied, src + copied, count - copied);
227} 225}
228 226
229/* 227/*
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index cc30d1fb000c..0dc2b6d0a1ec 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -266,6 +266,7 @@ sysc_sigpending:
266 tm __TI_flags+3(%r12),_TIF_SYSCALL 266 tm __TI_flags+3(%r12),_TIF_SYSCALL
267 jno sysc_return 267 jno sysc_return
268 lm %r2,%r7,__PT_R2(%r11) # load svc arguments 268 lm %r2,%r7,__PT_R2(%r11) # load svc arguments
269 l %r10,__TI_sysc_table(%r12) # 31 bit system call table
269 xr %r8,%r8 # svc 0 returns -ENOSYS 270 xr %r8,%r8 # svc 0 returns -ENOSYS
270 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) 271 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
271 jnl sysc_nr_ok # invalid svc number -> do svc 0 272 jnl sysc_nr_ok # invalid svc number -> do svc 0
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 2b2188b97c6a..e5b43c97a834 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -297,6 +297,7 @@ sysc_sigpending:
297 tm __TI_flags+7(%r12),_TIF_SYSCALL 297 tm __TI_flags+7(%r12),_TIF_SYSCALL
298 jno sysc_return 298 jno sysc_return
299 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments 299 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
300 lg %r10,__TI_sysc_table(%r12) # address of system call table
300 lghi %r8,0 # svc 0 returns -ENOSYS 301 lghi %r8,0 # svc 0 returns -ENOSYS
301 llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number 302 llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
302 cghi %r1,NR_syscalls 303 cghi %r1,NR_syscalls
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 0ce9fb245034..d86e64eddb42 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -67,6 +67,11 @@ static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
67 case 0xac: /* stnsm */ 67 case 0xac: /* stnsm */
68 case 0xad: /* stosm */ 68 case 0xad: /* stosm */
69 return -EINVAL; 69 return -EINVAL;
70 case 0xc6:
71 switch (insn[0] & 0x0f) {
72 case 0x00: /* exrl */
73 return -EINVAL;
74 }
70 } 75 }
71 switch (insn[0]) { 76 switch (insn[0]) {
72 case 0x0101: /* pr */ 77 case 0x0101: /* pr */
@@ -180,7 +185,6 @@ static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
180 break; 185 break;
181 case 0xc6: 186 case 0xc6:
182 switch (insn[0] & 0x0f) { 187 switch (insn[0] & 0x0f) {
183 case 0x00: /* exrl */
184 case 0x02: /* pfdrl */ 188 case 0x02: /* pfdrl */
185 case 0x04: /* cghrl */ 189 case 0x04: /* cghrl */
186 case 0x05: /* chrl */ 190 case 0x05: /* chrl */
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index 5080d16a832f..ec2e2e2aba7d 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -9,7 +9,7 @@
9 9
10static __always_inline bool arch_static_branch(struct static_key *key) 10static __always_inline bool arch_static_branch(struct static_key *key)
11{ 11{
12 asm goto("1:\n\t" 12 asm_volatile_goto("1:\n\t"
13 "nop\n\t" 13 "nop\n\t"
14 "nop\n\t" 14 "nop\n\t"
15 ".pushsection __jump_table, \"aw\"\n\t" 15 ".pushsection __jump_table, \"aw\"\n\t"
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index d385eaadece7..709798460763 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
166 * 166 *
167 * Atomically sets @v to @i and returns old @v 167 * Atomically sets @v to @i and returns old @v
168 */ 168 */
169static inline u64 atomic64_xchg(atomic64_t *v, u64 n) 169static inline long long atomic64_xchg(atomic64_t *v, long long n)
170{ 170{
171 return xchg64(&v->counter, n); 171 return xchg64(&v->counter, n);
172} 172}
@@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
180 * Atomically checks if @v holds @o and replaces it with @n if so. 180 * Atomically checks if @v holds @o and replaces it with @n if so.
181 * Returns the old value at @v. 181 * Returns the old value at @v.
182 */ 182 */
183static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) 183static inline long long atomic64_cmpxchg(atomic64_t *v, long long o,
184 long long n)
184{ 185{
185 return cmpxchg64(&v->counter, o, n); 186 return cmpxchg64(&v->counter, o, n);
186} 187}
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 0d0395b1b152..1ad4a1f7d42b 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n)
80/* A 64bit atomic type */ 80/* A 64bit atomic type */
81 81
82typedef struct { 82typedef struct {
83 u64 __aligned(8) counter; 83 long long counter;
84} atomic64_t; 84} atomic64_t;
85 85
86#define ATOMIC64_INIT(val) { (val) } 86#define ATOMIC64_INIT(val) { (val) }
@@ -91,14 +91,14 @@ typedef struct {
91 * 91 *
92 * Atomically reads the value of @v. 92 * Atomically reads the value of @v.
93 */ 93 */
94static inline u64 atomic64_read(const atomic64_t *v) 94static inline long long atomic64_read(const atomic64_t *v)
95{ 95{
96 /* 96 /*
97 * Requires an atomic op to read both 32-bit parts consistently. 97 * Requires an atomic op to read both 32-bit parts consistently.
98 * Casting away const is safe since the atomic support routines 98 * Casting away const is safe since the atomic support routines
99 * do not write to memory if the value has not been modified. 99 * do not write to memory if the value has not been modified.
100 */ 100 */
101 return _atomic64_xchg_add((u64 *)&v->counter, 0); 101 return _atomic64_xchg_add((long long *)&v->counter, 0);
102} 102}
103 103
104/** 104/**
@@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
108 * 108 *
109 * Atomically adds @i to @v. 109 * Atomically adds @i to @v.
110 */ 110 */
111static inline void atomic64_add(u64 i, atomic64_t *v) 111static inline void atomic64_add(long long i, atomic64_t *v)
112{ 112{
113 _atomic64_xchg_add(&v->counter, i); 113 _atomic64_xchg_add(&v->counter, i);
114} 114}
@@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
120 * 120 *
121 * Atomically adds @i to @v and returns @i + @v 121 * Atomically adds @i to @v and returns @i + @v
122 */ 122 */
123static inline u64 atomic64_add_return(u64 i, atomic64_t *v) 123static inline long long atomic64_add_return(long long i, atomic64_t *v)
124{ 124{
125 smp_mb(); /* barrier for proper semantics */ 125 smp_mb(); /* barrier for proper semantics */
126 return _atomic64_xchg_add(&v->counter, i) + i; 126 return _atomic64_xchg_add(&v->counter, i) + i;
@@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
135 * Atomically adds @a to @v, so long as @v was not already @u. 135 * Atomically adds @a to @v, so long as @v was not already @u.
136 * Returns non-zero if @v was not @u, and zero otherwise. 136 * Returns non-zero if @v was not @u, and zero otherwise.
137 */ 137 */
138static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) 138static inline long long atomic64_add_unless(atomic64_t *v, long long a,
139 long long u)
139{ 140{
140 smp_mb(); /* barrier for proper semantics */ 141 smp_mb(); /* barrier for proper semantics */
141 return _atomic64_xchg_add_unless(&v->counter, a, u) != u; 142 return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
@@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
151 * atomic64_set() can't be just a raw store, since it would be lost if it 152 * atomic64_set() can't be just a raw store, since it would be lost if it
152 * fell between the load and store of one of the other atomic ops. 153 * fell between the load and store of one of the other atomic ops.
153 */ 154 */
154static inline void atomic64_set(atomic64_t *v, u64 n) 155static inline void atomic64_set(atomic64_t *v, long long n)
155{ 156{
156 _atomic64_xchg(&v->counter, n); 157 _atomic64_xchg(&v->counter, n);
157} 158}
@@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
236extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); 237extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
237extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); 238extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
238extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); 239extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
239extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); 240extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
240extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); 241 long long o, long long n);
241extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); 242extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
242extern u64 __atomic64_xchg_add_unless(volatile u64 *p, 243extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
243 int *lock, u64 o, u64 n); 244 long long n);
245extern long long __atomic64_xchg_add_unless(volatile long long *p,
246 int *lock, long long o, long long n);
244 247
245/* Return failure from the atomic wrappers. */ 248/* Return failure from the atomic wrappers. */
246struct __get_user __atomic_bad_address(int __user *addr); 249struct __get_user __atomic_bad_address(int __user *addr);
diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h
index 4001d5eab4bb..0ccda3c425be 100644
--- a/arch/tile/include/asm/cmpxchg.h
+++ b/arch/tile/include/asm/cmpxchg.h
@@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n);
35int _atomic_xchg_add(int *v, int i); 35int _atomic_xchg_add(int *v, int i);
36int _atomic_xchg_add_unless(int *v, int a, int u); 36int _atomic_xchg_add_unless(int *v, int a, int u);
37int _atomic_cmpxchg(int *ptr, int o, int n); 37int _atomic_cmpxchg(int *ptr, int o, int n);
38u64 _atomic64_xchg(u64 *v, u64 n); 38long long _atomic64_xchg(long long *v, long long n);
39u64 _atomic64_xchg_add(u64 *v, u64 i); 39long long _atomic64_xchg_add(long long *v, long long i);
40u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u); 40long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);
41u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); 41long long _atomic64_cmpxchg(long long *v, long long o, long long n);
42 42
43#define xchg(ptr, n) \ 43#define xchg(ptr, n) \
44 ({ \ 44 ({ \
@@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
53 if (sizeof(*(ptr)) != 4) \ 53 if (sizeof(*(ptr)) != 4) \
54 __cmpxchg_called_with_bad_pointer(); \ 54 __cmpxchg_called_with_bad_pointer(); \
55 smp_mb(); \ 55 smp_mb(); \
56 (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \ 56 (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, \
57 (int)n); \
57 }) 58 })
58 59
59#define xchg64(ptr, n) \ 60#define xchg64(ptr, n) \
@@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
61 if (sizeof(*(ptr)) != 8) \ 62 if (sizeof(*(ptr)) != 8) \
62 __xchg_called_with_bad_pointer(); \ 63 __xchg_called_with_bad_pointer(); \
63 smp_mb(); \ 64 smp_mb(); \
64 (typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \ 65 (typeof(*(ptr)))_atomic64_xchg((long long *)(ptr), \
66 (long long)(n)); \
65 }) 67 })
66 68
67#define cmpxchg64(ptr, o, n) \ 69#define cmpxchg64(ptr, o, n) \
@@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
69 if (sizeof(*(ptr)) != 8) \ 71 if (sizeof(*(ptr)) != 8) \
70 __cmpxchg_called_with_bad_pointer(); \ 72 __cmpxchg_called_with_bad_pointer(); \
71 smp_mb(); \ 73 smp_mb(); \
72 (typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \ 74 (typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr, \
75 (long long)o, (long long)n); \
73 }) 76 })
74 77
75#else 78#else
@@ -81,10 +84,11 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
81 switch (sizeof(*(ptr))) { \ 84 switch (sizeof(*(ptr))) { \
82 case 4: \ 85 case 4: \
83 __x = (typeof(__x))(unsigned long) \ 86 __x = (typeof(__x))(unsigned long) \
84 __insn_exch4((ptr), (u32)(unsigned long)(n)); \ 87 __insn_exch4((ptr), \
88 (u32)(unsigned long)(n)); \
85 break; \ 89 break; \
86 case 8: \ 90 case 8: \
87 __x = (typeof(__x)) \ 91 __x = (typeof(__x)) \
88 __insn_exch((ptr), (unsigned long)(n)); \ 92 __insn_exch((ptr), (unsigned long)(n)); \
89 break; \ 93 break; \
90 default: \ 94 default: \
@@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
103 switch (sizeof(*(ptr))) { \ 107 switch (sizeof(*(ptr))) { \
104 case 4: \ 108 case 4: \
105 __x = (typeof(__x))(unsigned long) \ 109 __x = (typeof(__x))(unsigned long) \
106 __insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \ 110 __insn_cmpexch4((ptr), \
111 (u32)(unsigned long)(n)); \
107 break; \ 112 break; \
108 case 8: \ 113 case 8: \
109 __x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \ 114 __x = (typeof(__x))__insn_cmpexch((ptr), \
115 (long long)(n)); \
110 break; \ 116 break; \
111 default: \ 117 default: \
112 __cmpxchg_called_with_bad_pointer(); \ 118 __cmpxchg_called_with_bad_pointer(); \
diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h
index 63294f5a8efb..4f7ae39fa202 100644
--- a/arch/tile/include/asm/percpu.h
+++ b/arch/tile/include/asm/percpu.h
@@ -15,9 +15,37 @@
15#ifndef _ASM_TILE_PERCPU_H 15#ifndef _ASM_TILE_PERCPU_H
16#define _ASM_TILE_PERCPU_H 16#define _ASM_TILE_PERCPU_H
17 17
18register unsigned long __my_cpu_offset __asm__("tp"); 18register unsigned long my_cpu_offset_reg asm("tp");
19#define __my_cpu_offset __my_cpu_offset 19
20#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp)) 20#ifdef CONFIG_PREEMPT
21/*
22 * For full preemption, we can't just use the register variable
23 * directly, since we need barrier() to hazard against it, causing the
24 * compiler to reload anything computed from a previous "tp" value.
25 * But we also don't want to use volatile asm, since we'd like the
26 * compiler to be able to cache the value across multiple percpu reads.
27 * So we use a fake stack read as a hazard against barrier().
28 * The 'U' constraint is like 'm' but disallows postincrement.
29 */
30static inline unsigned long __my_cpu_offset(void)
31{
32 unsigned long tp;
33 register unsigned long *sp asm("sp");
34 asm("move %0, tp" : "=r" (tp) : "U" (*sp));
35 return tp;
36}
37#define __my_cpu_offset __my_cpu_offset()
38#else
39/*
40 * We don't need to hazard against barrier() since "tp" doesn't ever
41 * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
42 * changes at function call points, at which we are already re-reading
43 * the value of "tp" due to "my_cpu_offset_reg" being a global variable.
44 */
45#define __my_cpu_offset my_cpu_offset_reg
46#endif
47
48#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
21 49
22#include <asm-generic/percpu.h> 50#include <asm-generic/percpu.h>
23 51
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index df27a1fd94a3..531f4c365351 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -66,7 +66,7 @@ static struct hardwall_type hardwall_types[] = {
66 0, 66 0,
67 "udn", 67 "udn",
68 LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list), 68 LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
69 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock), 69 __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock),
70 NULL 70 NULL
71 }, 71 },
72#ifndef __tilepro__ 72#ifndef __tilepro__
@@ -77,7 +77,7 @@ static struct hardwall_type hardwall_types[] = {
77 1, /* disabled pending hypervisor support */ 77 1, /* disabled pending hypervisor support */
78 "idn", 78 "idn",
79 LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list), 79 LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
80 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock), 80 __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock),
81 NULL 81 NULL
82 }, 82 },
83 { /* access to user-space IPI */ 83 { /* access to user-space IPI */
@@ -87,7 +87,7 @@ static struct hardwall_type hardwall_types[] = {
87 0, 87 0,
88 "ipi", 88 "ipi",
89 LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list), 89 LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
90 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock), 90 __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock),
91 NULL 91 NULL
92 }, 92 },
93#endif 93#endif
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 088d5c141e68..2cbe6d5dd6b0 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -815,6 +815,9 @@ STD_ENTRY(interrupt_return)
815 } 815 }
816 bzt r28, 1f 816 bzt r28, 1f
817 bnz r29, 1f 817 bnz r29, 1f
818 /* Disable interrupts explicitly for preemption. */
819 IRQ_DISABLE(r20,r21)
820 TRACE_IRQS_OFF
818 jal preempt_schedule_irq 821 jal preempt_schedule_irq
819 FEEDBACK_REENTER(interrupt_return) 822 FEEDBACK_REENTER(interrupt_return)
8201: 8231:
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index ec755d3f3734..b8fc497f2437 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -841,6 +841,9 @@ STD_ENTRY(interrupt_return)
841 } 841 }
842 beqzt r28, 1f 842 beqzt r28, 1f
843 bnez r29, 1f 843 bnez r29, 1f
844 /* Disable interrupts explicitly for preemption. */
845 IRQ_DISABLE(r20,r21)
846 TRACE_IRQS_OFF
844 jal preempt_schedule_irq 847 jal preempt_schedule_irq
845 FEEDBACK_REENTER(interrupt_return) 848 FEEDBACK_REENTER(interrupt_return)
8461: 8491:
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 362284af3afd..c93977a62116 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -23,6 +23,7 @@
23#include <linux/mmzone.h> 23#include <linux/mmzone.h>
24#include <linux/dcache.h> 24#include <linux/dcache.h>
25#include <linux/fs.h> 25#include <linux/fs.h>
26#include <linux/string.h>
26#include <asm/backtrace.h> 27#include <asm/backtrace.h>
27#include <asm/page.h> 28#include <asm/page.h>
28#include <asm/ucontext.h> 29#include <asm/ucontext.h>
@@ -332,21 +333,18 @@ static void describe_addr(struct KBacktraceIterator *kbt,
332 } 333 }
333 334
334 if (vma->vm_file) { 335 if (vma->vm_file) {
335 char *s;
336 p = d_path(&vma->vm_file->f_path, buf, bufsize); 336 p = d_path(&vma->vm_file->f_path, buf, bufsize);
337 if (IS_ERR(p)) 337 if (IS_ERR(p))
338 p = "?"; 338 p = "?";
339 s = strrchr(p, '/'); 339 name = kbasename(p);
340 if (s)
341 p = s+1;
342 } else { 340 } else {
343 p = "anon"; 341 name = "anon";
344 } 342 }
345 343
346 /* Generate a string description of the vma info. */ 344 /* Generate a string description of the vma info. */
347 namelen = strlen(p); 345 namelen = strlen(name);
348 remaining = (bufsize - 1) - namelen; 346 remaining = (bufsize - 1) - namelen;
349 memmove(buf, p, namelen); 347 memmove(buf, name, namelen);
350 snprintf(buf + namelen, remaining, "[%lx+%lx] ", 348 snprintf(buf + namelen, remaining, "[%lx+%lx] ",
351 vma->vm_start, vma->vm_end - vma->vm_start); 349 vma->vm_start, vma->vm_end - vma->vm_start);
352} 350}
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 759efa337be8..c89b211fd9e7 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
107EXPORT_SYMBOL(_atomic_xor); 107EXPORT_SYMBOL(_atomic_xor);
108 108
109 109
110u64 _atomic64_xchg(u64 *v, u64 n) 110long long _atomic64_xchg(long long *v, long long n)
111{ 111{
112 return __atomic64_xchg(v, __atomic_setup(v), n); 112 return __atomic64_xchg(v, __atomic_setup(v), n);
113} 113}
114EXPORT_SYMBOL(_atomic64_xchg); 114EXPORT_SYMBOL(_atomic64_xchg);
115 115
116u64 _atomic64_xchg_add(u64 *v, u64 i) 116long long _atomic64_xchg_add(long long *v, long long i)
117{ 117{
118 return __atomic64_xchg_add(v, __atomic_setup(v), i); 118 return __atomic64_xchg_add(v, __atomic_setup(v), i);
119} 119}
120EXPORT_SYMBOL(_atomic64_xchg_add); 120EXPORT_SYMBOL(_atomic64_xchg_add);
121 121
122u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) 122long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
123{ 123{
124 /* 124 /*
125 * Note: argument order is switched here since it is easier 125 * Note: argument order is switched here since it is easier
@@ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
130} 130}
131EXPORT_SYMBOL(_atomic64_xchg_add_unless); 131EXPORT_SYMBOL(_atomic64_xchg_add_unless);
132 132
133u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n) 133long long _atomic64_cmpxchg(long long *v, long long o, long long n)
134{ 134{
135 return __atomic64_cmpxchg(v, __atomic_setup(v), o, n); 135 return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
136} 136}
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index d3f5c63078d8..89270b4318db 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -374,7 +374,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
374 * Catch too early usage of this before alternatives 374 * Catch too early usage of this before alternatives
375 * have run. 375 * have run.
376 */ 376 */
377 asm goto("1: jmp %l[t_warn]\n" 377 asm_volatile_goto("1: jmp %l[t_warn]\n"
378 "2:\n" 378 "2:\n"
379 ".section .altinstructions,\"a\"\n" 379 ".section .altinstructions,\"a\"\n"
380 " .long 1b - .\n" 380 " .long 1b - .\n"
@@ -388,7 +388,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
388 388
389#endif 389#endif
390 390
391 asm goto("1: jmp %l[t_no]\n" 391 asm_volatile_goto("1: jmp %l[t_no]\n"
392 "2:\n" 392 "2:\n"
393 ".section .altinstructions,\"a\"\n" 393 ".section .altinstructions,\"a\"\n"
394 " .long 1b - .\n" 394 " .long 1b - .\n"
@@ -453,7 +453,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
453 * have. Thus, we force the jump to the widest, 4-byte, signed relative 453 * have. Thus, we force the jump to the widest, 4-byte, signed relative
454 * offset even though the last would often fit in less bytes. 454 * offset even though the last would often fit in less bytes.
455 */ 455 */
456 asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n" 456 asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
457 "2:\n" 457 "2:\n"
458 ".section .altinstructions,\"a\"\n" 458 ".section .altinstructions,\"a\"\n"
459 " .long 1b - .\n" /* src offset */ 459 " .long 1b - .\n" /* src offset */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 64507f35800c..6a2cefb4395a 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -18,7 +18,7 @@
18 18
19static __always_inline bool arch_static_branch(struct static_key *key) 19static __always_inline bool arch_static_branch(struct static_key *key)
20{ 20{
21 asm goto("1:" 21 asm_volatile_goto("1:"
22 ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" 22 ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
23 ".pushsection __jump_table, \"aw\" \n\t" 23 ".pushsection __jump_table, \"aw\" \n\t"
24 _ASM_ALIGN "\n\t" 24 _ASM_ALIGN "\n\t"
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
index e7e6751648ed..07537a44216e 100644
--- a/arch/x86/include/asm/mutex_64.h
+++ b/arch/x86/include/asm/mutex_64.h
@@ -20,7 +20,7 @@
20static inline void __mutex_fastpath_lock(atomic_t *v, 20static inline void __mutex_fastpath_lock(atomic_t *v,
21 void (*fail_fn)(atomic_t *)) 21 void (*fail_fn)(atomic_t *))
22{ 22{
23 asm volatile goto(LOCK_PREFIX " decl %0\n" 23 asm_volatile_goto(LOCK_PREFIX " decl %0\n"
24 " jns %l[exit]\n" 24 " jns %l[exit]\n"
25 : : "m" (v->counter) 25 : : "m" (v->counter)
26 : "memory", "cc" 26 : "memory", "cc"
@@ -75,7 +75,7 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count)
75static inline void __mutex_fastpath_unlock(atomic_t *v, 75static inline void __mutex_fastpath_unlock(atomic_t *v,
76 void (*fail_fn)(atomic_t *)) 76 void (*fail_fn)(atomic_t *))
77{ 77{
78 asm volatile goto(LOCK_PREFIX " incl %0\n" 78 asm_volatile_goto(LOCK_PREFIX " incl %0\n"
79 " jg %l[exit]\n" 79 " jg %l[exit]\n"
80 : : "m" (v->counter) 80 : : "m" (v->counter)
81 : "memory", "cc" 81 : "memory", "cc"
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 897783b3302a..9d8449158cf9 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1888,10 +1888,7 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1888 userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc; 1888 userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
1889 userpg->pmc_width = x86_pmu.cntval_bits; 1889 userpg->pmc_width = x86_pmu.cntval_bits;
1890 1890
1891 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 1891 if (!sched_clock_stable)
1892 return;
1893
1894 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
1895 return; 1892 return;
1896 1893
1897 userpg->cap_user_time = 1; 1894 userpg->cap_user_time = 1;
@@ -1899,10 +1896,8 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1899 userpg->time_shift = CYC2NS_SCALE_FACTOR; 1896 userpg->time_shift = CYC2NS_SCALE_FACTOR;
1900 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; 1897 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
1901 1898
1902 if (sched_clock_stable && !check_tsc_disabled()) { 1899 userpg->cap_user_time_zero = 1;
1903 userpg->cap_user_time_zero = 1; 1900 userpg->time_zero = this_cpu_read(cyc2ns_offset);
1904 userpg->time_zero = this_cpu_read(cyc2ns_offset);
1905 }
1906} 1901}
1907 1902
1908/* 1903/*
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3b8e7459dd4d..2b2fce1b2009 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3255,25 +3255,29 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
3255 3255
3256static void ept_load_pdptrs(struct kvm_vcpu *vcpu) 3256static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
3257{ 3257{
3258 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3259
3258 if (!test_bit(VCPU_EXREG_PDPTR, 3260 if (!test_bit(VCPU_EXREG_PDPTR,
3259 (unsigned long *)&vcpu->arch.regs_dirty)) 3261 (unsigned long *)&vcpu->arch.regs_dirty))
3260 return; 3262 return;
3261 3263
3262 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 3264 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3263 vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]); 3265 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
3264 vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]); 3266 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
3265 vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]); 3267 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
3266 vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]); 3268 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
3267 } 3269 }
3268} 3270}
3269 3271
3270static void ept_save_pdptrs(struct kvm_vcpu *vcpu) 3272static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3271{ 3273{
3274 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3275
3272 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 3276 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3273 vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0); 3277 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3274 vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1); 3278 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3275 vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); 3279 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3276 vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); 3280 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3277 } 3281 }
3278 3282
3279 __set_bit(VCPU_EXREG_PDPTR, 3283 __set_bit(VCPU_EXREG_PDPTR,
@@ -7777,10 +7781,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7777 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 7781 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
7778 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 7782 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
7779 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 7783 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
7780 __clear_bit(VCPU_EXREG_PDPTR,
7781 (unsigned long *)&vcpu->arch.regs_avail);
7782 __clear_bit(VCPU_EXREG_PDPTR,
7783 (unsigned long *)&vcpu->arch.regs_dirty);
7784 } 7784 }
7785 7785
7786 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); 7786 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);