aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-04-12 19:41:23 -0400
committerDavid S. Miller <davem@davemloft.net>2012-04-12 19:41:23 -0400
commit011e3c63251be832d23df9f0697626ab7b354d02 (patch)
tree2cad5b58c274c93ae49d9b58fb15d784d4dfd78f /arch
parentc1412fce7eccae62b4de22494f6ab3ff8a90c0c6 (diff)
parentecca5c3acc0d0933d89abc44e60afb0cc8170e35 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'arch')
-rw-r--r--arch/c6x/include/asm/irq.h4
-rw-r--r--arch/c6x/kernel/irq.c13
-rw-r--r--arch/powerpc/include/asm/irq.h2
-rw-r--r--arch/powerpc/kernel/entry_32.S39
-rw-r--r--arch/powerpc/kernel/irq.c6
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c9
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S8
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S7
-rw-r--r--arch/powerpc/kvm/book3s_pr.c9
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S7
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c2
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c2
-rw-r--r--arch/powerpc/platforms/powermac/smp.c2
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c3
-rw-r--r--arch/sh/Kconfig.debug1
-rw-r--r--arch/sh/boards/board-sh7785lcr.c1
-rw-r--r--arch/sh/boards/mach-hp6xx/pm.c1
-rw-r--r--arch/sh/drivers/dma/dma-sysfs.c2
-rw-r--r--arch/sh/kernel/cpu/fpu.c1
-rw-r--r--arch/sh/kernel/cpu/sh2a/fpu.c1
-rw-r--r--arch/sh/kernel/cpu/sh4/fpu.c1
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7757.c2
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm.c1
-rw-r--r--arch/sh/kernel/idle.c2
-rw-r--r--arch/sh/kernel/kgdb.c1
-rw-r--r--arch/sh/kernel/process_32.c1
-rw-r--r--arch/sh/kernel/smp.c1
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-sigreturn.S35
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-trapa.S23
-rw-r--r--arch/sh/mm/cache-sh4.c1
-rw-r--r--arch/sh/mm/flush-sh4.c1
-rw-r--r--arch/sh/mm/sram.c1
-rw-r--r--arch/sparc/kernel/leon_pci.c13
-rw-r--r--arch/sparc/mm/fault_32.c37
-rw-r--r--arch/sparc/mm/fault_64.c37
-rw-r--r--arch/tile/Kconfig8
-rw-r--r--arch/tile/Makefile5
-rw-r--r--arch/tile/include/arch/spr_def.h4
-rw-r--r--arch/tile/include/asm/atomic.h50
-rw-r--r--arch/tile/include/asm/atomic_32.h2
-rw-r--r--arch/tile/include/asm/bitops_64.h8
-rw-r--r--arch/tile/include/asm/cmpxchg.h73
-rw-r--r--arch/tile/include/asm/irq.h2
-rw-r--r--arch/tile/include/asm/spinlock_64.h2
-rw-r--r--arch/tile/include/asm/stack.h1
-rw-r--r--arch/tile/include/asm/traps.h6
-rw-r--r--arch/tile/kernel/entry.S2
-rw-r--r--arch/tile/kernel/intvec_32.S24
-rw-r--r--arch/tile/kernel/intvec_64.S52
-rw-r--r--arch/tile/kernel/module.c2
-rw-r--r--arch/tile/kernel/proc.c4
-rw-r--r--arch/tile/kernel/process.c3
-rw-r--r--arch/tile/kernel/setup.c28
-rw-r--r--arch/tile/kernel/single_step.c31
-rw-r--r--arch/tile/kernel/smp.c8
-rw-r--r--arch/tile/kernel/smpboot.c2
-rw-r--r--arch/tile/kernel/stack.c232
-rw-r--r--arch/tile/kernel/traps.c15
-rw-r--r--arch/tile/lib/Makefile1
-rw-r--r--arch/tile/lib/cacheflush.c30
-rw-r--r--arch/tile/lib/memcpy_user_64.c8
-rw-r--r--arch/tile/lib/spinlock_common.h2
-rw-r--r--arch/tile/mm/fault.c22
-rw-r--r--arch/tile/mm/homecache.c1
-rw-r--r--arch/tile/mm/init.c26
-rw-r--r--arch/tile/mm/pgtable.c38
-rw-r--r--arch/um/drivers/cow.h35
-rw-r--r--arch/um/drivers/cow_user.c43
-rw-r--r--arch/um/drivers/mconsole_kern.c1
-rw-r--r--arch/um/include/asm/Kbuild3
-rw-r--r--arch/um/kernel/Makefile7
-rw-r--r--arch/um/kernel/process.c6
-rw-r--r--arch/um/kernel/skas/mmu.c1
-rw-r--r--arch/x86/Makefile.um3
-rw-r--r--arch/x86/include/asm/uaccess.h2
-rw-r--r--arch/x86/include/asm/uaccess_32.h5
-rw-r--r--arch/x86/include/asm/uaccess_64.h4
-rw-r--r--arch/x86/kernel/kvm.c4
-rw-r--r--arch/x86/kvm/pmu.c2
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/lib/usercopy.c103
-rw-r--r--arch/x86/lib/usercopy_32.c87
-rw-r--r--arch/x86/lib/usercopy_64.c49
-rw-r--r--arch/x86/um/asm/barrier.h75
-rw-r--r--arch/x86/um/asm/system.h135
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--arch/x86/xen/smp.c2
88 files changed, 832 insertions, 714 deletions
diff --git a/arch/c6x/include/asm/irq.h b/arch/c6x/include/asm/irq.h
index f13b78d5e1ca..ab4577f93d96 100644
--- a/arch/c6x/include/asm/irq.h
+++ b/arch/c6x/include/asm/irq.h
@@ -42,10 +42,6 @@
42/* This number is used when no interrupt has been assigned */ 42/* This number is used when no interrupt has been assigned */
43#define NO_IRQ 0 43#define NO_IRQ 0
44 44
45struct irq_data;
46extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
47extern irq_hw_number_t virq_to_hw(unsigned int virq);
48
49extern void __init init_pic_c64xplus(void); 45extern void __init init_pic_c64xplus(void);
50 46
51extern void init_IRQ(void); 47extern void init_IRQ(void);
diff --git a/arch/c6x/kernel/irq.c b/arch/c6x/kernel/irq.c
index 65b8ddf54b44..c90fb5e82ad7 100644
--- a/arch/c6x/kernel/irq.c
+++ b/arch/c6x/kernel/irq.c
@@ -130,16 +130,3 @@ int arch_show_interrupts(struct seq_file *p, int prec)
130 seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); 130 seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
131 return 0; 131 return 0;
132} 132}
133
134irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
135{
136 return d->hwirq;
137}
138EXPORT_SYMBOL_GPL(irqd_to_hwirq);
139
140irq_hw_number_t virq_to_hw(unsigned int virq)
141{
142 struct irq_data *irq_data = irq_get_irq_data(virq);
143 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
144}
145EXPORT_SYMBOL_GPL(virq_to_hw);
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index cf417e510736..e648af92ced1 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -33,8 +33,6 @@ extern atomic_t ppc_n_lost_interrupts;
33/* Same thing, used by the generic IRQ code */ 33/* Same thing, used by the generic IRQ code */
34#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS 34#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS
35 35
36struct irq_data;
37extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
38extern irq_hw_number_t virq_to_hw(unsigned int virq); 36extern irq_hw_number_t virq_to_hw(unsigned int virq);
39 37
40/** 38/**
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 3e57a00b8cba..ba3aeb4bc06a 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -206,40 +206,43 @@ reenable_mmu: /* re-enable mmu so we can */
206 andi. r10,r10,MSR_EE /* Did EE change? */ 206 andi. r10,r10,MSR_EE /* Did EE change? */
207 beq 1f 207 beq 1f
208 208
209 /* Save handler and return address into the 2 unused words
210 * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
211 * else can be recovered from the pt_regs except r3 which for
212 * normal interrupts has been set to pt_regs and for syscalls
213 * is an argument, so we temporarily use ORIG_GPR3 to save it
214 */
215 stw r9,8(r1)
216 stw r11,12(r1)
217 stw r3,ORIG_GPR3(r1)
218 /* 209 /*
219 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1. 210 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
220 * If from user mode there is only one stack frame on the stack, and 211 * If from user mode there is only one stack frame on the stack, and
221 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy 212 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
222 * stack frame to make trace_hardirqs_off happy. 213 * stack frame to make trace_hardirqs_off happy.
214 *
215 * This is handy because we also need to save a bunch of GPRs,
216 * r3 can be different from GPR3(r1) at this point, r9 and r11
217 * contains the old MSR and handler address respectively,
218 * r4 & r5 can contain page fault arguments that need to be passed
219 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
220 * they aren't useful past this point (aren't syscall arguments),
221 * the rest is restored from the exception frame.
223 */ 222 */
223 stwu r1,-32(r1)
224 stw r9,8(r1)
225 stw r11,12(r1)
226 stw r3,16(r1)
227 stw r4,20(r1)
228 stw r5,24(r1)
224 andi. r12,r12,MSR_PR 229 andi. r12,r12,MSR_PR
225 beq 11f 230 b 11f
226 stwu r1,-16(r1)
227 bl trace_hardirqs_off 231 bl trace_hardirqs_off
228 addi r1,r1,16
229 b 12f 232 b 12f
230
23111: 23311:
232 bl trace_hardirqs_off 234 bl trace_hardirqs_off
23312: 23512:
236 lwz r5,24(r1)
237 lwz r4,20(r1)
238 lwz r3,16(r1)
239 lwz r11,12(r1)
240 lwz r9,8(r1)
241 addi r1,r1,32
234 lwz r0,GPR0(r1) 242 lwz r0,GPR0(r1)
235 lwz r3,ORIG_GPR3(r1)
236 lwz r4,GPR4(r1)
237 lwz r5,GPR5(r1)
238 lwz r6,GPR6(r1) 243 lwz r6,GPR6(r1)
239 lwz r7,GPR7(r1) 244 lwz r7,GPR7(r1)
240 lwz r8,GPR8(r1) 245 lwz r8,GPR8(r1)
241 lwz r9,8(r1)
242 lwz r11,12(r1)
2431: mtctr r11 2461: mtctr r11
244 mtlr r9 247 mtlr r9
245 bctr /* jump to handler */ 248 bctr /* jump to handler */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 243dbabfe74d..5ec1b2354ca6 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -560,12 +560,6 @@ void do_softirq(void)
560 local_irq_restore(flags); 560 local_irq_restore(flags);
561} 561}
562 562
563irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
564{
565 return d->hwirq;
566}
567EXPORT_SYMBOL_GPL(irqd_to_hwirq);
568
569irq_hw_number_t virq_to_hw(unsigned int virq) 563irq_hw_number_t virq_to_hw(unsigned int virq)
570{ 564{
571 struct irq_data *irq_data = irq_get_irq_data(virq); 565 struct irq_data *irq_data = irq_get_irq_data(virq);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index f88698c0f332..4937c9690090 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1235,7 +1235,7 @@ void __ppc64_runlatch_on(void)
1235 ctrl |= CTRL_RUNLATCH; 1235 ctrl |= CTRL_RUNLATCH;
1236 mtspr(SPRN_CTRLT, ctrl); 1236 mtspr(SPRN_CTRLT, ctrl);
1237 1237
1238 ti->local_flags |= TLF_RUNLATCH; 1238 ti->local_flags |= _TLF_RUNLATCH;
1239} 1239}
1240 1240
1241/* Called with hard IRQs off */ 1241/* Called with hard IRQs off */
@@ -1244,7 +1244,7 @@ void __ppc64_runlatch_off(void)
1244 struct thread_info *ti = current_thread_info(); 1244 struct thread_info *ti = current_thread_info();
1245 unsigned long ctrl; 1245 unsigned long ctrl;
1246 1246
1247 ti->local_flags &= ~TLF_RUNLATCH; 1247 ti->local_flags &= ~_TLF_RUNLATCH;
1248 1248
1249 ctrl = mfspr(SPRN_CTRLF); 1249 ctrl = mfspr(SPRN_CTRLF);
1250 ctrl &= ~CTRL_RUNLATCH; 1250 ctrl &= ~CTRL_RUNLATCH;
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index bed1279aa6a8..e1b60f56f2a1 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -173,9 +173,9 @@ static void __init kvm_linear_init_one(ulong size, int count, int type)
173 173
174static struct kvmppc_linear_info *kvm_alloc_linear(int type) 174static struct kvmppc_linear_info *kvm_alloc_linear(int type)
175{ 175{
176 struct kvmppc_linear_info *ri; 176 struct kvmppc_linear_info *ri, *ret;
177 177
178 ri = NULL; 178 ret = NULL;
179 spin_lock(&linear_lock); 179 spin_lock(&linear_lock);
180 list_for_each_entry(ri, &free_linears, list) { 180 list_for_each_entry(ri, &free_linears, list) {
181 if (ri->type != type) 181 if (ri->type != type)
@@ -183,11 +183,12 @@ static struct kvmppc_linear_info *kvm_alloc_linear(int type)
183 183
184 list_del(&ri->list); 184 list_del(&ri->list);
185 atomic_inc(&ri->use_count); 185 atomic_inc(&ri->use_count);
186 memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
187 ret = ri;
186 break; 188 break;
187 } 189 }
188 spin_unlock(&linear_lock); 190 spin_unlock(&linear_lock);
189 memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT); 191 return ret;
190 return ri;
191} 192}
192 193
193static void kvm_release_linear(struct kvmppc_linear_info *ri) 194static void kvm_release_linear(struct kvmppc_linear_info *ri)
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 3f7b674dd4bf..d3fb4df02c41 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -46,8 +46,10 @@ _GLOBAL(__kvmppc_vcore_entry)
46 /* Save host state to the stack */ 46 /* Save host state to the stack */
47 stdu r1, -SWITCH_FRAME_SIZE(r1) 47 stdu r1, -SWITCH_FRAME_SIZE(r1)
48 48
49 /* Save non-volatile registers (r14 - r31) */ 49 /* Save non-volatile registers (r14 - r31) and CR */
50 SAVE_NVGPRS(r1) 50 SAVE_NVGPRS(r1)
51 mfcr r3
52 std r3, _CCR(r1)
51 53
52 /* Save host DSCR */ 54 /* Save host DSCR */
53BEGIN_FTR_SECTION 55BEGIN_FTR_SECTION
@@ -157,8 +159,10 @@ kvmppc_handler_highmem:
157 * R13 = PACA 159 * R13 = PACA
158 */ 160 */
159 161
160 /* Restore non-volatile host registers (r14 - r31) */ 162 /* Restore non-volatile host registers (r14 - r31) and CR */
161 REST_NVGPRS(r1) 163 REST_NVGPRS(r1)
164 ld r4, _CCR(r1)
165 mtcr r4
162 166
163 addi r1, r1, SWITCH_FRAME_SIZE 167 addi r1, r1, SWITCH_FRAME_SIZE
164 ld r0, PPC_LR_STKOFF(r1) 168 ld r0, PPC_LR_STKOFF(r1)
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 0a8515a5c042..3e35383bdb21 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -84,6 +84,10 @@ kvm_start_entry:
84 /* Save non-volatile registers (r14 - r31) */ 84 /* Save non-volatile registers (r14 - r31) */
85 SAVE_NVGPRS(r1) 85 SAVE_NVGPRS(r1)
86 86
87 /* Save CR */
88 mfcr r14
89 stw r14, _CCR(r1)
90
87 /* Save LR */ 91 /* Save LR */
88 PPC_STL r0, _LINK(r1) 92 PPC_STL r0, _LINK(r1)
89 93
@@ -165,6 +169,9 @@ kvm_exit_loop:
165 PPC_LL r4, _LINK(r1) 169 PPC_LL r4, _LINK(r1)
166 mtlr r4 170 mtlr r4
167 171
172 lwz r14, _CCR(r1)
173 mtcr r14
174
168 /* Restore non-volatile host registers (r14 - r31) */ 175 /* Restore non-volatile host registers (r14 - r31) */
169 REST_NVGPRS(r1) 176 REST_NVGPRS(r1)
170 177
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 642d88574b07..7759053d391b 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -777,6 +777,7 @@ program_interrupt:
777 } 777 }
778 } 778 }
779 779
780 preempt_disable();
780 if (!(r & RESUME_HOST)) { 781 if (!(r & RESUME_HOST)) {
781 /* To avoid clobbering exit_reason, only check for signals if 782 /* To avoid clobbering exit_reason, only check for signals if
782 * we aren't already exiting to userspace for some other 783 * we aren't already exiting to userspace for some other
@@ -798,8 +799,6 @@ program_interrupt:
798 run->exit_reason = KVM_EXIT_INTR; 799 run->exit_reason = KVM_EXIT_INTR;
799 r = -EINTR; 800 r = -EINTR;
800 } else { 801 } else {
801 preempt_disable();
802
803 /* In case an interrupt came in that was triggered 802 /* In case an interrupt came in that was triggered
804 * from userspace (like DEC), we need to check what 803 * from userspace (like DEC), we need to check what
805 * to inject now! */ 804 * to inject now! */
@@ -881,7 +880,8 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
881 880
882 switch (reg->id) { 881 switch (reg->id) {
883 case KVM_REG_PPC_HIOR: 882 case KVM_REG_PPC_HIOR:
884 r = put_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr); 883 r = copy_to_user((u64 __user *)(long)reg->addr,
884 &to_book3s(vcpu)->hior, sizeof(u64));
885 break; 885 break;
886 default: 886 default:
887 break; 887 break;
@@ -896,7 +896,8 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
896 896
897 switch (reg->id) { 897 switch (reg->id) {
898 case KVM_REG_PPC_HIOR: 898 case KVM_REG_PPC_HIOR:
899 r = get_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr); 899 r = copy_from_user(&to_book3s(vcpu)->hior,
900 (u64 __user *)(long)reg->addr, sizeof(u64));
900 if (!r) 901 if (!r)
901 to_book3s(vcpu)->hior_explicit = true; 902 to_book3s(vcpu)->hior_explicit = true;
902 break; 903 break;
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 10d8ef602e5c..c8c4b878795a 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -34,7 +34,8 @@
34/* r2 is special: it holds 'current', and it made nonvolatile in the 34/* r2 is special: it holds 'current', and it made nonvolatile in the
35 * kernel with the -ffixed-r2 gcc option. */ 35 * kernel with the -ffixed-r2 gcc option. */
36#define HOST_R2 12 36#define HOST_R2 12
37#define HOST_NV_GPRS 16 37#define HOST_CR 16
38#define HOST_NV_GPRS 20
38#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) 39#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
39#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4) 40#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
40#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ 41#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
@@ -296,8 +297,10 @@ heavyweight_exit:
296 297
297 /* Return to kvm_vcpu_run(). */ 298 /* Return to kvm_vcpu_run(). */
298 lwz r4, HOST_STACK_LR(r1) 299 lwz r4, HOST_STACK_LR(r1)
300 lwz r5, HOST_CR(r1)
299 addi r1, r1, HOST_STACK_SIZE 301 addi r1, r1, HOST_STACK_SIZE
300 mtlr r4 302 mtlr r4
303 mtcr r5
301 /* r3 still contains the return code from kvmppc_handle_exit(). */ 304 /* r3 still contains the return code from kvmppc_handle_exit(). */
302 blr 305 blr
303 306
@@ -314,6 +317,8 @@ _GLOBAL(__kvmppc_vcpu_run)
314 stw r3, HOST_RUN(r1) 317 stw r3, HOST_RUN(r1)
315 mflr r3 318 mflr r3
316 stw r3, HOST_STACK_LR(r1) 319 stw r3, HOST_STACK_LR(r1)
320 mfcr r5
321 stw r5, HOST_CR(r1)
317 322
318 /* Save host non-volatile register state to stack. */ 323 /* Save host non-volatile register state to stack. */
319 stw r14, HOST_NV_GPR(r14)(r1) 324 stw r14, HOST_NV_GPR(r14)(r1)
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index db360fc4cf0e..d09f3e8e6867 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -392,7 +392,7 @@ static int axon_msi_probe(struct platform_device *device)
392 } 392 }
393 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); 393 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
394 394
395 msic->irq_domain = irq_domain_add_nomap(dn, &msic_host_ops, msic); 395 msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic);
396 if (!msic->irq_domain) { 396 if (!msic->irq_domain) {
397 printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", 397 printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
398 dn->full_name); 398 dn->full_name);
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index e5c3a2c6090d..f9a48af335cb 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -239,7 +239,7 @@ void __init beatic_init_IRQ(void)
239 ppc_md.get_irq = beatic_get_irq; 239 ppc_md.get_irq = beatic_get_irq;
240 240
241 /* Allocate an irq host */ 241 /* Allocate an irq host */
242 beatic_host = irq_domain_add_nomap(NULL, &beatic_pic_host_ops, NULL); 242 beatic_host = irq_domain_add_nomap(NULL, 0, &beatic_pic_host_ops, NULL);
243 BUG_ON(beatic_host == NULL); 243 BUG_ON(beatic_host == NULL);
244 irq_set_default_host(beatic_host); 244 irq_set_default_host(beatic_host);
245} 245}
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index a81e5a88fbdf..b4ddaa3fbb29 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -192,7 +192,7 @@ static int psurge_secondary_ipi_init(void)
192{ 192{
193 int rc = -ENOMEM; 193 int rc = -ENOMEM;
194 194
195 psurge_host = irq_domain_add_nomap(NULL, &psurge_host_ops, NULL); 195 psurge_host = irq_domain_add_nomap(NULL, 0, &psurge_host_ops, NULL);
196 196
197 if (psurge_host) 197 if (psurge_host)
198 psurge_secondary_virq = irq_create_direct_mapping(psurge_host); 198 psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 2a4ff86cc21f..5f3b23220b8e 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -753,9 +753,8 @@ void __init ps3_init_IRQ(void)
753 unsigned cpu; 753 unsigned cpu;
754 struct irq_domain *host; 754 struct irq_domain *host;
755 755
756 host = irq_domain_add_nomap(NULL, &ps3_host_ops, NULL); 756 host = irq_domain_add_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL);
757 irq_set_default_host(host); 757 irq_set_default_host(host);
758 irq_set_virq_count(PS3_PLUG_MAX + 1);
759 758
760 for_each_possible_cpu(cpu) { 759 for_each_possible_cpu(cpu) {
761 struct ps3_private *pd = &per_cpu(ps3_private, cpu); 760 struct ps3_private *pd = &per_cpu(ps3_private, cpu);
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index c1d5a820b1aa..5f2bb4242c0f 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -61,6 +61,7 @@ config DUMP_CODE
61config DWARF_UNWINDER 61config DWARF_UNWINDER
62 bool "Enable the DWARF unwinder for stacktraces" 62 bool "Enable the DWARF unwinder for stacktraces"
63 select FRAME_POINTER 63 select FRAME_POINTER
64 depends on SUPERH32
64 default n 65 default n
65 help 66 help
66 Enabling this option will make stacktraces more accurate, at 67 Enabling this option will make stacktraces more accurate, at
diff --git a/arch/sh/boards/board-sh7785lcr.c b/arch/sh/boards/board-sh7785lcr.c
index d879848f3cdd..d0d6221d7c2e 100644
--- a/arch/sh/boards/board-sh7785lcr.c
+++ b/arch/sh/boards/board-sh7785lcr.c
@@ -28,6 +28,7 @@
28#include <cpu/sh7785.h> 28#include <cpu/sh7785.h>
29#include <asm/heartbeat.h> 29#include <asm/heartbeat.h>
30#include <asm/clock.h> 30#include <asm/clock.h>
31#include <asm/bl_bit.h>
31 32
32/* 33/*
33 * NOTE: This board has 2 physical memory maps. 34 * NOTE: This board has 2 physical memory maps.
diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
index adc9b4bba828..8b50cf763c06 100644
--- a/arch/sh/boards/mach-hp6xx/pm.c
+++ b/arch/sh/boards/mach-hp6xx/pm.c
@@ -14,6 +14,7 @@
14#include <linux/gfp.h> 14#include <linux/gfp.h>
15#include <asm/io.h> 15#include <asm/io.h>
16#include <asm/hd64461.h> 16#include <asm/hd64461.h>
17#include <asm/bl_bit.h>
17#include <mach/hp6xx.h> 18#include <mach/hp6xx.h>
18#include <cpu/dac.h> 19#include <cpu/dac.h>
19#include <asm/freq.h> 20#include <asm/freq.h>
diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c
index b1cb2715ad6e..67ee95603813 100644
--- a/arch/sh/drivers/dma/dma-sysfs.c
+++ b/arch/sh/drivers/dma/dma-sysfs.c
@@ -54,7 +54,7 @@ static int __init dma_subsys_init(void)
54 if (unlikely(ret)) 54 if (unlikely(ret))
55 return ret; 55 return ret;
56 56
57 return device_create_file(dma_subsys.dev_root, &dev_attr_devices.attr); 57 return device_create_file(dma_subsys.dev_root, &dev_attr_devices);
58} 58}
59postcore_initcall(dma_subsys_init); 59postcore_initcall(dma_subsys_init);
60 60
diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c
index 7f1b70cace35..f8f7af51c128 100644
--- a/arch/sh/kernel/cpu/fpu.c
+++ b/arch/sh/kernel/cpu/fpu.c
@@ -2,6 +2,7 @@
2#include <linux/slab.h> 2#include <linux/slab.h>
3#include <asm/processor.h> 3#include <asm/processor.h>
4#include <asm/fpu.h> 4#include <asm/fpu.h>
5#include <asm/traps.h>
5 6
6int init_fpu(struct task_struct *tsk) 7int init_fpu(struct task_struct *tsk)
7{ 8{
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c
index 488d24e0cdf0..98bbaa447c93 100644
--- a/arch/sh/kernel/cpu/sh2a/fpu.c
+++ b/arch/sh/kernel/cpu/sh2a/fpu.c
@@ -14,6 +14,7 @@
14#include <asm/processor.h> 14#include <asm/processor.h>
15#include <asm/io.h> 15#include <asm/io.h>
16#include <asm/fpu.h> 16#include <asm/fpu.h>
17#include <asm/traps.h>
17 18
18/* The PR (precision) bit in the FP Status Register must be clear when 19/* The PR (precision) bit in the FP Status Register must be clear when
19 * an frchg instruction is executed, otherwise the instruction is undefined. 20 * an frchg instruction is executed, otherwise the instruction is undefined.
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index e74cd6c0f10d..69ab4d3c8d41 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -16,6 +16,7 @@
16#include <cpu/fpu.h> 16#include <cpu/fpu.h>
17#include <asm/processor.h> 17#include <asm/processor.h>
18#include <asm/fpu.h> 18#include <asm/fpu.h>
19#include <asm/traps.h>
19 20
20/* The PR (precision) bit in the FP Status Register must be clear when 21/* The PR (precision) bit in the FP Status Register must be clear when
21 * an frchg instruction is executed, otherwise the instruction is undefined. 22 * an frchg instruction is executed, otherwise the instruction is undefined.
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
index 5853989586ed..04ab5aeaf920 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
@@ -113,7 +113,7 @@ static struct clk_lookup lookups[] = {
113 CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), 113 CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
114 114
115 /* MSTP32 clocks */ 115 /* MSTP32 clocks */
116 CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP004]), 116 CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP004]),
117 CLKDEV_CON_ID("riic0", &mstp_clks[MSTP000]), 117 CLKDEV_CON_ID("riic0", &mstp_clks[MSTP000]),
118 CLKDEV_CON_ID("riic1", &mstp_clks[MSTP000]), 118 CLKDEV_CON_ID("riic1", &mstp_clks[MSTP000]),
119 CLKDEV_CON_ID("riic2", &mstp_clks[MSTP000]), 119 CLKDEV_CON_ID("riic2", &mstp_clks[MSTP000]),
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
index a6f95ae4aae7..08d27fac8d08 100644
--- a/arch/sh/kernel/cpu/shmobile/pm.c
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -16,6 +16,7 @@
16#include <asm/suspend.h> 16#include <asm/suspend.h>
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/bl_bit.h>
19 20
20/* 21/*
21 * Notifier lists for pre/post sleep notification 22 * Notifier lists for pre/post sleep notification
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 64852ecc6881..ee226e20c20c 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -17,8 +17,8 @@
17#include <linux/irqflags.h> 17#include <linux/irqflags.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/cpuidle.h> 19#include <linux/cpuidle.h>
20#include <asm/pgalloc.h>
21#include <linux/atomic.h> 20#include <linux/atomic.h>
21#include <asm/pgalloc.h>
22#include <asm/smp.h> 22#include <asm/smp.h>
23#include <asm/bl_bit.h> 23#include <asm/bl_bit.h>
24 24
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index efb6d398dec3..b117781bfea2 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -14,6 +14,7 @@
14#include <linux/irq.h> 14#include <linux/irq.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17#include <asm/traps.h>
17 18
18/* Macros for single step instruction identification */ 19/* Macros for single step instruction identification */
19#define OPCODE_BT(op) (((op) & 0xff00) == 0x8900) 20#define OPCODE_BT(op) (((op) & 0xff00) == 0x8900)
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index f72e3a951588..94273aaf78c1 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -26,6 +26,7 @@
26#include <asm/mmu_context.h> 26#include <asm/mmu_context.h>
27#include <asm/fpu.h> 27#include <asm/fpu.h>
28#include <asm/syscalls.h> 28#include <asm/syscalls.h>
29#include <asm/switch_to.h>
29 30
30void show_regs(struct pt_regs * regs) 31void show_regs(struct pt_regs * regs)
31{ 32{
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index a17a14d32340..eaebdf6a5c77 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -27,6 +27,7 @@
27#include <asm/smp.h> 27#include <asm/smp.h>
28#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
29#include <asm/sections.h> 29#include <asm/sections.h>
30#include <asm/setup.h>
30 31
31int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 32int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
32int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 33int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
diff --git a/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
index 555a64f124ca..23af17584054 100644
--- a/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
+++ b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
@@ -34,6 +34,41 @@ __kernel_rt_sigreturn:
341: .short __NR_rt_sigreturn 341: .short __NR_rt_sigreturn
35.LEND_rt_sigreturn: 35.LEND_rt_sigreturn:
36 .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn 36 .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
37 .previous
37 38
38 .section .eh_frame,"a",@progbits 39 .section .eh_frame,"a",@progbits
40.LCIE1:
41 .ualong .LCIE1_end - .LCIE1_start
42.LCIE1_start:
43 .ualong 0 /* CIE ID */
44 .byte 0x1 /* Version number */
45 .string "zRS" /* NUL-terminated augmentation string */
46 .uleb128 0x1 /* Code alignment factor */
47 .sleb128 -4 /* Data alignment factor */
48 .byte 0x11 /* Return address register column */
49 .uleb128 0x1 /* Augmentation length and data */
50 .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
51 .byte 0xc, 0xf, 0x0 /* DW_CFA_def_cfa: r15 ofs 0 */
52
53 .align 2
54.LCIE1_end:
55
56 .ualong .LFDE0_end-.LFDE0_start /* Length FDE0 */
57.LFDE0_start:
58 .ualong .LFDE0_start-.LCIE1 /* CIE pointer */
59 .ualong .LSTART_sigreturn-. /* PC-relative start address */
60 .ualong .LEND_sigreturn-.LSTART_sigreturn
61 .uleb128 0 /* Augmentation */
62 .align 2
63.LFDE0_end:
64
65 .ualong .LFDE1_end-.LFDE1_start /* Length FDE1 */
66.LFDE1_start:
67 .ualong .LFDE1_start-.LCIE1 /* CIE pointer */
68 .ualong .LSTART_rt_sigreturn-. /* PC-relative start address */
69 .ualong .LEND_rt_sigreturn-.LSTART_rt_sigreturn
70 .uleb128 0 /* Augmentation */
71 .align 2
72.LFDE1_end:
73
39 .previous 74 .previous
diff --git a/arch/sh/kernel/vsyscall/vsyscall-trapa.S b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
index 3e70f851cdc6..0eb74d00690a 100644
--- a/arch/sh/kernel/vsyscall/vsyscall-trapa.S
+++ b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
@@ -3,37 +3,34 @@
3 .type __kernel_vsyscall,@function 3 .type __kernel_vsyscall,@function
4__kernel_vsyscall: 4__kernel_vsyscall:
5.LSTART_vsyscall: 5.LSTART_vsyscall:
6 /* XXX: We'll have to do something here once we opt to use the vDSO 6 trapa #0x10
7 * page for something other than the signal trampoline.. as well as 7 nop
8 * fill out .eh_frame -- PFM. */
9.LEND_vsyscall: 8.LEND_vsyscall:
10 .size __kernel_vsyscall,.-.LSTART_vsyscall 9 .size __kernel_vsyscall,.-.LSTART_vsyscall
10 .previous
11 11
12 .section .eh_frame,"a",@progbits 12 .section .eh_frame,"a",@progbits
13 .previous
14.LCIE: 13.LCIE:
15 .ualong .LCIE_end - .LCIE_start 14 .ualong .LCIE_end - .LCIE_start
16.LCIE_start: 15.LCIE_start:
17 .ualong 0 /* CIE ID */ 16 .ualong 0 /* CIE ID */
18 .byte 0x1 /* Version number */ 17 .byte 0x1 /* Version number */
19 .string "zRS" /* NUL-terminated augmentation string */ 18 .string "zR" /* NUL-terminated augmentation string */
20 .uleb128 0x1 /* Code alignment factor */ 19 .uleb128 0x1 /* Code alignment factor */
21 .sleb128 -4 /* Data alignment factor */ 20 .sleb128 -4 /* Data alignment factor */
22 .byte 0x11 /* Return address register column */ 21 .byte 0x11 /* Return address register column */
23 /* Augmentation length and data (none) */ 22 .uleb128 0x1 /* Augmentation length and data */
24 .byte 0xc /* DW_CFA_def_cfa */ 23 .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
25 .uleb128 0xf /* r15 */ 24 .byte 0xc,0xf,0x0 /* DW_CFA_def_cfa: r15 ofs 0 */
26 .uleb128 0x0 /* offset 0 */
27
28 .align 2 25 .align 2
29.LCIE_end: 26.LCIE_end:
30 27
31 .ualong .LFDE_end-.LFDE_start /* Length FDE */ 28 .ualong .LFDE_end-.LFDE_start /* Length FDE */
32.LFDE_start: 29.LFDE_start:
33 .ualong .LCIE /* CIE pointer */ 30 .ualong .LFDE_start-.LCIE /* CIE pointer */
34 .ualong .LSTART_vsyscall-. /* start address */ 31 .ualong .LSTART_vsyscall-. /* PC-relative start address */
35 .ualong .LEND_vsyscall-.LSTART_vsyscall 32 .ualong .LEND_vsyscall-.LSTART_vsyscall
36 .uleb128 0 33 .uleb128 0 /* Augmentation */
37 .align 2 34 .align 2
38.LFDE_end: 35.LFDE_end:
39 .previous 36 .previous
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 112fea12522a..0e529285b28d 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -18,6 +18,7 @@
18#include <linux/highmem.h> 18#include <linux/highmem.h>
19#include <asm/pgtable.h> 19#include <asm/pgtable.h>
20#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
21#include <asm/cache_insns.h>
21#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
22 23
23/* 24/*
diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c
index 75a17f5bfa14..0b85dd9dd3a7 100644
--- a/arch/sh/mm/flush-sh4.c
+++ b/arch/sh/mm/flush-sh4.c
@@ -1,5 +1,6 @@
1#include <linux/mm.h> 1#include <linux/mm.h>
2#include <asm/mmu_context.h> 2#include <asm/mmu_context.h>
3#include <asm/cache_insns.h>
3#include <asm/cacheflush.h> 4#include <asm/cacheflush.h>
4#include <asm/traps.h> 5#include <asm/traps.h>
5 6
diff --git a/arch/sh/mm/sram.c b/arch/sh/mm/sram.c
index bc156ec4545e..2d8fa718d55e 100644
--- a/arch/sh/mm/sram.c
+++ b/arch/sh/mm/sram.c
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/errno.h>
12#include <asm/sram.h> 13#include <asm/sram.h>
13 14
14/* 15/*
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
index aba6b958b2a5..19f56058742b 100644
--- a/arch/sparc/kernel/leon_pci.c
+++ b/arch/sparc/kernel/leon_pci.c
@@ -45,7 +45,6 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
45 45
46void __devinit pcibios_fixup_bus(struct pci_bus *pbus) 46void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
47{ 47{
48 struct leon_pci_info *info = pbus->sysdata;
49 struct pci_dev *dev; 48 struct pci_dev *dev;
50 int i, has_io, has_mem; 49 int i, has_io, has_mem;
51 u16 cmd; 50 u16 cmd;
@@ -111,18 +110,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
111 return pci_enable_resources(dev, mask); 110 return pci_enable_resources(dev, mask);
112} 111}
113 112
114struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
115{
116 /*
117 * Currently the OpenBoot nodes are not connected with the PCI device,
118 * this is because the LEON PROM does not create PCI nodes. Eventually
119 * this will change and the same approach as pcic.c can be used to
120 * match PROM nodes with pci devices.
121 */
122 return NULL;
123}
124EXPORT_SYMBOL(pci_device_to_OF_node);
125
126void __devinit pcibios_update_irq(struct pci_dev *dev, int irq) 113void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
127{ 114{
128#ifdef CONFIG_PCI_DEBUG 115#ifdef CONFIG_PCI_DEBUG
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 7705c6731e28..df3155a17991 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -225,6 +225,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
225 unsigned long g2; 225 unsigned long g2;
226 int from_user = !(regs->psr & PSR_PS); 226 int from_user = !(regs->psr & PSR_PS);
227 int fault, code; 227 int fault, code;
228 unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
229 (write ? FAULT_FLAG_WRITE : 0));
228 230
229 if(text_fault) 231 if(text_fault)
230 address = regs->pc; 232 address = regs->pc;
@@ -251,6 +253,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
251 253
252 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 254 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
253 255
256retry:
254 down_read(&mm->mmap_sem); 257 down_read(&mm->mmap_sem);
255 258
256 /* 259 /*
@@ -289,7 +292,11 @@ good_area:
289 * make sure we exit gracefully rather than endlessly redo 292 * make sure we exit gracefully rather than endlessly redo
290 * the fault. 293 * the fault.
291 */ 294 */
292 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); 295 fault = handle_mm_fault(mm, vma, address, flags);
296
297 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
298 return;
299
293 if (unlikely(fault & VM_FAULT_ERROR)) { 300 if (unlikely(fault & VM_FAULT_ERROR)) {
294 if (fault & VM_FAULT_OOM) 301 if (fault & VM_FAULT_OOM)
295 goto out_of_memory; 302 goto out_of_memory;
@@ -297,13 +304,29 @@ good_area:
297 goto do_sigbus; 304 goto do_sigbus;
298 BUG(); 305 BUG();
299 } 306 }
300 if (fault & VM_FAULT_MAJOR) { 307
301 current->maj_flt++; 308 if (flags & FAULT_FLAG_ALLOW_RETRY) {
302 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 309 if (fault & VM_FAULT_MAJOR) {
303 } else { 310 current->maj_flt++;
304 current->min_flt++; 311 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
305 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 312 1, regs, address);
313 } else {
314 current->min_flt++;
315 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
316 1, regs, address);
317 }
318 if (fault & VM_FAULT_RETRY) {
319 flags &= ~FAULT_FLAG_ALLOW_RETRY;
320
321 /* No need to up_read(&mm->mmap_sem) as we would
322 * have already released it in __lock_page_or_retry
323 * in mm/filemap.c.
324 */
325
326 goto retry;
327 }
306 } 328 }
329
307 up_read(&mm->mmap_sem); 330 up_read(&mm->mmap_sem);
308 return; 331 return;
309 332
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 504c0622f729..1fe0429b6314 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -279,6 +279,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
279 unsigned int insn = 0; 279 unsigned int insn = 0;
280 int si_code, fault_code, fault; 280 int si_code, fault_code, fault;
281 unsigned long address, mm_rss; 281 unsigned long address, mm_rss;
282 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
282 283
283 fault_code = get_thread_fault_code(); 284 fault_code = get_thread_fault_code();
284 285
@@ -333,6 +334,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
333 insn = get_fault_insn(regs, insn); 334 insn = get_fault_insn(regs, insn);
334 goto handle_kernel_fault; 335 goto handle_kernel_fault;
335 } 336 }
337
338retry:
336 down_read(&mm->mmap_sem); 339 down_read(&mm->mmap_sem);
337 } 340 }
338 341
@@ -423,7 +426,12 @@ good_area:
423 goto bad_area; 426 goto bad_area;
424 } 427 }
425 428
426 fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0); 429 flags |= ((fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
430 fault = handle_mm_fault(mm, vma, address, flags);
431
432 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
433 return;
434
427 if (unlikely(fault & VM_FAULT_ERROR)) { 435 if (unlikely(fault & VM_FAULT_ERROR)) {
428 if (fault & VM_FAULT_OOM) 436 if (fault & VM_FAULT_OOM)
429 goto out_of_memory; 437 goto out_of_memory;
@@ -431,12 +439,27 @@ good_area:
431 goto do_sigbus; 439 goto do_sigbus;
432 BUG(); 440 BUG();
433 } 441 }
434 if (fault & VM_FAULT_MAJOR) { 442
435 current->maj_flt++; 443 if (flags & FAULT_FLAG_ALLOW_RETRY) {
436 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 444 if (fault & VM_FAULT_MAJOR) {
437 } else { 445 current->maj_flt++;
438 current->min_flt++; 446 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
439 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 447 1, regs, address);
448 } else {
449 current->min_flt++;
450 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
451 1, regs, address);
452 }
453 if (fault & VM_FAULT_RETRY) {
454 flags &= ~FAULT_FLAG_ALLOW_RETRY;
455
456 /* No need to up_read(&mm->mmap_sem) as we would
457 * have already released it in __lock_page_or_retry
458 * in mm/filemap.c.
459 */
460
461 goto retry;
462 }
440 } 463 }
441 up_read(&mm->mmap_sem); 464 up_read(&mm->mmap_sem);
442 465
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 11270ca22c0a..96033e2d6845 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -12,7 +12,7 @@ config TILE
12 select GENERIC_PENDING_IRQ if SMP 12 select GENERIC_PENDING_IRQ if SMP
13 select GENERIC_IRQ_SHOW 13 select GENERIC_IRQ_SHOW
14 select SYS_HYPERVISOR 14 select SYS_HYPERVISOR
15 select ARCH_HAVE_NMI_SAFE_CMPXCHG if !M386 15 select ARCH_HAVE_NMI_SAFE_CMPXCHG
16 16
17# FIXME: investigate whether we need/want these options. 17# FIXME: investigate whether we need/want these options.
18# select HAVE_IOREMAP_PROT 18# select HAVE_IOREMAP_PROT
@@ -69,6 +69,9 @@ config ARCH_PHYS_ADDR_T_64BIT
69config ARCH_DMA_ADDR_T_64BIT 69config ARCH_DMA_ADDR_T_64BIT
70 def_bool y 70 def_bool y
71 71
72config NEED_DMA_MAP_STATE
73 def_bool y
74
72config LOCKDEP_SUPPORT 75config LOCKDEP_SUPPORT
73 def_bool y 76 def_bool y
74 77
@@ -118,7 +121,7 @@ config 64BIT
118 121
119config ARCH_DEFCONFIG 122config ARCH_DEFCONFIG
120 string 123 string
121 default "arch/tile/configs/tile_defconfig" if !TILEGX 124 default "arch/tile/configs/tilepro_defconfig" if !TILEGX
122 default "arch/tile/configs/tilegx_defconfig" if TILEGX 125 default "arch/tile/configs/tilegx_defconfig" if TILEGX
123 126
124source "init/Kconfig" 127source "init/Kconfig"
@@ -240,6 +243,7 @@ endchoice
240 243
241config PAGE_OFFSET 244config PAGE_OFFSET
242 hex 245 hex
246 depends on !64BIT
243 default 0xF0000000 if VMSPLIT_3_75G 247 default 0xF0000000 if VMSPLIT_3_75G
244 default 0xE0000000 if VMSPLIT_3_5G 248 default 0xE0000000 if VMSPLIT_3_5G
245 default 0xB0000000 if VMSPLIT_2_75G 249 default 0xB0000000 if VMSPLIT_2_75G
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
index 17acce70569b..9520bc5a4b7f 100644
--- a/arch/tile/Makefile
+++ b/arch/tile/Makefile
@@ -30,7 +30,8 @@ ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
30KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS) 30KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
31endif 31endif
32 32
33LIBGCC_PATH := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) 33LIBGCC_PATH := \
34 $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
34 35
35# Provide the path to use for "make defconfig". 36# Provide the path to use for "make defconfig".
36KBUILD_DEFCONFIG := $(ARCH)_defconfig 37KBUILD_DEFCONFIG := $(ARCH)_defconfig
@@ -53,8 +54,6 @@ libs-y += $(LIBGCC_PATH)
53# See arch/tile/Kbuild for content of core part of the kernel 54# See arch/tile/Kbuild for content of core part of the kernel
54core-y += arch/tile/ 55core-y += arch/tile/
55 56
56core-$(CONFIG_KVM) += arch/tile/kvm/
57
58ifdef TILERA_ROOT 57ifdef TILERA_ROOT
59INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot 58INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot
60endif 59endif
diff --git a/arch/tile/include/arch/spr_def.h b/arch/tile/include/arch/spr_def.h
index f548efeb2de3..d6ba449b5363 100644
--- a/arch/tile/include/arch/spr_def.h
+++ b/arch/tile/include/arch/spr_def.h
@@ -60,8 +60,8 @@
60 _concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,) 60 _concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,)
61#define SPR_IPI_EVENT_RESET_K \ 61#define SPR_IPI_EVENT_RESET_K \
62 _concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,) 62 _concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,)
63#define SPR_IPI_MASK_SET_K \ 63#define SPR_IPI_EVENT_SET_K \
64 _concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,) 64 _concat4(SPR_IPI_EVENT_SET_, CONFIG_KERNEL_PL,,)
65#define INT_IPI_K \ 65#define INT_IPI_K \
66 _concat4(INT_IPI_, CONFIG_KERNEL_PL,,) 66 _concat4(INT_IPI_, CONFIG_KERNEL_PL,,)
67 67
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index bb696da5d7cd..f2461429a4a4 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -17,6 +17,8 @@
17#ifndef _ASM_TILE_ATOMIC_H 17#ifndef _ASM_TILE_ATOMIC_H
18#define _ASM_TILE_ATOMIC_H 18#define _ASM_TILE_ATOMIC_H
19 19
20#include <asm/cmpxchg.h>
21
20#ifndef __ASSEMBLY__ 22#ifndef __ASSEMBLY__
21 23
22#include <linux/compiler.h> 24#include <linux/compiler.h>
@@ -121,54 +123,6 @@ static inline int atomic_read(const atomic_t *v)
121 */ 123 */
122#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) 124#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
123 125
124/* Nonexistent functions intended to cause link errors. */
125extern unsigned long __xchg_called_with_bad_pointer(void);
126extern unsigned long __cmpxchg_called_with_bad_pointer(void);
127
128#define xchg(ptr, x) \
129 ({ \
130 typeof(*(ptr)) __x; \
131 switch (sizeof(*(ptr))) { \
132 case 4: \
133 __x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \
134 (atomic_t *)(ptr), \
135 (u32)(typeof((x)-(x)))(x)); \
136 break; \
137 case 8: \
138 __x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \
139 (atomic64_t *)(ptr), \
140 (u64)(typeof((x)-(x)))(x)); \
141 break; \
142 default: \
143 __xchg_called_with_bad_pointer(); \
144 } \
145 __x; \
146 })
147
148#define cmpxchg(ptr, o, n) \
149 ({ \
150 typeof(*(ptr)) __x; \
151 switch (sizeof(*(ptr))) { \
152 case 4: \
153 __x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \
154 (atomic_t *)(ptr), \
155 (u32)(typeof((o)-(o)))(o), \
156 (u32)(typeof((n)-(n)))(n)); \
157 break; \
158 case 8: \
159 __x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \
160 (atomic64_t *)(ptr), \
161 (u64)(typeof((o)-(o)))(o), \
162 (u64)(typeof((n)-(n)))(n)); \
163 break; \
164 default: \
165 __cmpxchg_called_with_bad_pointer(); \
166 } \
167 __x; \
168 })
169
170#define tas(ptr) (xchg((ptr), 1))
171
172#endif /* __ASSEMBLY__ */ 126#endif /* __ASSEMBLY__ */
173 127
174#ifndef __tilegx__ 128#ifndef __tilegx__
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 466dc4a39a4f..54d1da826f93 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -200,7 +200,7 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
200 * @u: ...unless v is equal to u. 200 * @u: ...unless v is equal to u.
201 * 201 *
202 * Atomically adds @a to @v, so long as @v was not already @u. 202 * Atomically adds @a to @v, so long as @v was not already @u.
203 * Returns the old value of @v. 203 * Returns non-zero if @v was not @u, and zero otherwise.
204 */ 204 */
205static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) 205static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
206{ 206{
diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h
index 58d021a9834f..60b87ee54fb8 100644
--- a/arch/tile/include/asm/bitops_64.h
+++ b/arch/tile/include/asm/bitops_64.h
@@ -38,10 +38,10 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
38 38
39static inline void change_bit(unsigned nr, volatile unsigned long *addr) 39static inline void change_bit(unsigned nr, volatile unsigned long *addr)
40{ 40{
41 unsigned long old, mask = (1UL << (nr % BITS_PER_LONG)); 41 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
42 long guess, oldval; 42 unsigned long guess, oldval;
43 addr += nr / BITS_PER_LONG; 43 addr += nr / BITS_PER_LONG;
44 old = *addr; 44 oldval = *addr;
45 do { 45 do {
46 guess = oldval; 46 guess = oldval;
47 oldval = atomic64_cmpxchg((atomic64_t *)addr, 47 oldval = atomic64_cmpxchg((atomic64_t *)addr,
@@ -85,7 +85,7 @@ static inline int test_and_change_bit(unsigned nr,
85 volatile unsigned long *addr) 85 volatile unsigned long *addr)
86{ 86{
87 unsigned long mask = (1UL << (nr % BITS_PER_LONG)); 87 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
88 long guess, oldval = *addr; 88 unsigned long guess, oldval;
89 addr += nr / BITS_PER_LONG; 89 addr += nr / BITS_PER_LONG;
90 oldval = *addr; 90 oldval = *addr;
91 do { 91 do {
diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..276f067e3640
--- /dev/null
+++ b/arch/tile/include/asm/cmpxchg.h
@@ -0,0 +1,73 @@
1/*
2 * cmpxchg.h -- forked from asm/atomic.h with this copyright:
3 *
4 * Copyright 2010 Tilera Corporation. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation, version 2.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for
14 * more details.
15 *
16 */
17
18#ifndef _ASM_TILE_CMPXCHG_H
19#define _ASM_TILE_CMPXCHG_H
20
21#ifndef __ASSEMBLY__
22
23/* Nonexistent functions intended to cause link errors. */
24extern unsigned long __xchg_called_with_bad_pointer(void);
25extern unsigned long __cmpxchg_called_with_bad_pointer(void);
26
27#define xchg(ptr, x) \
28 ({ \
29 typeof(*(ptr)) __x; \
30 switch (sizeof(*(ptr))) { \
31 case 4: \
32 __x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \
33 (atomic_t *)(ptr), \
34 (u32)(typeof((x)-(x)))(x)); \
35 break; \
36 case 8: \
37 __x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \
38 (atomic64_t *)(ptr), \
39 (u64)(typeof((x)-(x)))(x)); \
40 break; \
41 default: \
42 __xchg_called_with_bad_pointer(); \
43 } \
44 __x; \
45 })
46
47#define cmpxchg(ptr, o, n) \
48 ({ \
49 typeof(*(ptr)) __x; \
50 switch (sizeof(*(ptr))) { \
51 case 4: \
52 __x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \
53 (atomic_t *)(ptr), \
54 (u32)(typeof((o)-(o)))(o), \
55 (u32)(typeof((n)-(n)))(n)); \
56 break; \
57 case 8: \
58 __x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \
59 (atomic64_t *)(ptr), \
60 (u64)(typeof((o)-(o)))(o), \
61 (u64)(typeof((n)-(n)))(n)); \
62 break; \
63 default: \
64 __cmpxchg_called_with_bad_pointer(); \
65 } \
66 __x; \
67 })
68
69#define tas(ptr) (xchg((ptr), 1))
70
71#endif /* __ASSEMBLY__ */
72
73#endif /* _ASM_TILE_CMPXCHG_H */
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h
index f80f8ceabc67..33cff9a3058b 100644
--- a/arch/tile/include/asm/irq.h
+++ b/arch/tile/include/asm/irq.h
@@ -21,7 +21,7 @@
21#define NR_IRQS 32 21#define NR_IRQS 32
22 22
23/* IRQ numbers used for linux IPIs. */ 23/* IRQ numbers used for linux IPIs. */
24#define IRQ_RESCHEDULE 1 24#define IRQ_RESCHEDULE 0
25 25
26#define irq_canonicalize(irq) (irq) 26#define irq_canonicalize(irq) (irq)
27 27
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h
index 72be5904e020..5f8b6a095fd8 100644
--- a/arch/tile/include/asm/spinlock_64.h
+++ b/arch/tile/include/asm/spinlock_64.h
@@ -137,7 +137,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
137static inline void arch_write_unlock(arch_rwlock_t *rw) 137static inline void arch_write_unlock(arch_rwlock_t *rw)
138{ 138{
139 __insn_mf(); 139 __insn_mf();
140 rw->lock = 0; 140 __insn_exch4(&rw->lock, 0); /* Avoid waiting in the write buffer. */
141} 141}
142 142
143static inline int arch_read_trylock(arch_rwlock_t *rw) 143static inline int arch_read_trylock(arch_rwlock_t *rw)
diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h
index 4d97a2db932e..0e9d382a2d45 100644
--- a/arch/tile/include/asm/stack.h
+++ b/arch/tile/include/asm/stack.h
@@ -25,7 +25,6 @@
25struct KBacktraceIterator { 25struct KBacktraceIterator {
26 BacktraceIterator it; 26 BacktraceIterator it;
27 struct task_struct *task; /* task we are backtracing */ 27 struct task_struct *task; /* task we are backtracing */
28 pte_t *pgtable; /* page table for user space access */
29 int end; /* iteration complete. */ 28 int end; /* iteration complete. */
30 int new_context; /* new context is starting */ 29 int new_context; /* new context is starting */
31 int profile; /* profiling, so stop on async intrpt */ 30 int profile; /* profiling, so stop on async intrpt */
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h
index 5f20f920f932..e28c3df4176a 100644
--- a/arch/tile/include/asm/traps.h
+++ b/arch/tile/include/asm/traps.h
@@ -64,7 +64,11 @@ void do_breakpoint(struct pt_regs *, int fault_num);
64 64
65 65
66#ifdef __tilegx__ 66#ifdef __tilegx__
67/* kernel/single_step.c */
67void gx_singlestep_handle(struct pt_regs *, int fault_num); 68void gx_singlestep_handle(struct pt_regs *, int fault_num);
69
70/* kernel/intvec_64.S */
71void fill_ra_stack(void);
68#endif 72#endif
69 73
70#endif /* _ASM_TILE_SYSCALLS_H */ 74#endif /* _ASM_TILE_TRAPS_H */
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index 431e9ae60488..ec91568df880 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -85,6 +85,7 @@ STD_ENTRY(cpu_idle_on_new_stack)
85/* Loop forever on a nap during SMP boot. */ 85/* Loop forever on a nap during SMP boot. */
86STD_ENTRY(smp_nap) 86STD_ENTRY(smp_nap)
87 nap 87 nap
88 nop /* avoid provoking the icache prefetch with a jump */
88 j smp_nap /* we are not architecturally guaranteed not to exit nap */ 89 j smp_nap /* we are not architecturally guaranteed not to exit nap */
89 jrp lr /* clue in the backtracer */ 90 jrp lr /* clue in the backtracer */
90 STD_ENDPROC(smp_nap) 91 STD_ENDPROC(smp_nap)
@@ -105,5 +106,6 @@ STD_ENTRY(_cpu_idle)
105 .global _cpu_idle_nap 106 .global _cpu_idle_nap
106_cpu_idle_nap: 107_cpu_idle_nap:
107 nap 108 nap
109 nop /* avoid provoking the icache prefetch with a jump */
108 jrp lr 110 jrp lr
109 STD_ENDPROC(_cpu_idle) 111 STD_ENDPROC(_cpu_idle)
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index aecc8ed5f39b..5d56a1ef5ba5 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -799,6 +799,10 @@ handle_interrupt:
799 * This routine takes a boolean in r30 indicating if this is an NMI. 799 * This routine takes a boolean in r30 indicating if this is an NMI.
800 * If so, we also expect a boolean in r31 indicating whether to 800 * If so, we also expect a boolean in r31 indicating whether to
801 * re-enable the oprofile interrupts. 801 * re-enable the oprofile interrupts.
802 *
803 * Note that .Lresume_userspace is jumped to directly in several
804 * places, and we need to make sure r30 is set correctly in those
805 * callers as well.
802 */ 806 */
803STD_ENTRY(interrupt_return) 807STD_ENTRY(interrupt_return)
804 /* If we're resuming to kernel space, don't check thread flags. */ 808 /* If we're resuming to kernel space, don't check thread flags. */
@@ -1237,7 +1241,10 @@ handle_syscall:
1237 bzt r30, 1f 1241 bzt r30, 1f
1238 jal do_syscall_trace 1242 jal do_syscall_trace
1239 FEEDBACK_REENTER(handle_syscall) 1243 FEEDBACK_REENTER(handle_syscall)
12401: j .Lresume_userspace /* jump into middle of interrupt_return */ 12441: {
1245 movei r30, 0 /* not an NMI */
1246 j .Lresume_userspace /* jump into middle of interrupt_return */
1247 }
1241 1248
1242.Linvalid_syscall: 1249.Linvalid_syscall:
1243 /* Report an invalid syscall back to the user program */ 1250 /* Report an invalid syscall back to the user program */
@@ -1246,7 +1253,10 @@ handle_syscall:
1246 movei r28, -ENOSYS 1253 movei r28, -ENOSYS
1247 } 1254 }
1248 sw r29, r28 1255 sw r29, r28
1249 j .Lresume_userspace /* jump into middle of interrupt_return */ 1256 {
1257 movei r30, 0 /* not an NMI */
1258 j .Lresume_userspace /* jump into middle of interrupt_return */
1259 }
1250 STD_ENDPROC(handle_syscall) 1260 STD_ENDPROC(handle_syscall)
1251 1261
1252 /* Return the address for oprofile to suppress in backtraces. */ 1262 /* Return the address for oprofile to suppress in backtraces. */
@@ -1262,7 +1272,10 @@ STD_ENTRY(ret_from_fork)
1262 jal sim_notify_fork 1272 jal sim_notify_fork
1263 jal schedule_tail 1273 jal schedule_tail
1264 FEEDBACK_REENTER(ret_from_fork) 1274 FEEDBACK_REENTER(ret_from_fork)
1265 j .Lresume_userspace /* jump into middle of interrupt_return */ 1275 {
1276 movei r30, 0 /* not an NMI */
1277 j .Lresume_userspace /* jump into middle of interrupt_return */
1278 }
1266 STD_ENDPROC(ret_from_fork) 1279 STD_ENDPROC(ret_from_fork)
1267 1280
1268 /* 1281 /*
@@ -1376,7 +1389,10 @@ handle_ill:
1376 1389
1377 jal send_sigtrap /* issue a SIGTRAP */ 1390 jal send_sigtrap /* issue a SIGTRAP */
1378 FEEDBACK_REENTER(handle_ill) 1391 FEEDBACK_REENTER(handle_ill)
1379 j .Lresume_userspace /* jump into middle of interrupt_return */ 1392 {
1393 movei r30, 0 /* not an NMI */
1394 j .Lresume_userspace /* jump into middle of interrupt_return */
1395 }
1380 1396
1381.Ldispatch_normal_ill: 1397.Ldispatch_normal_ill:
1382 { 1398 {
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 79c93e10ba27..49d9d6621682 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -22,6 +22,7 @@
22#include <asm/irqflags.h> 22#include <asm/irqflags.h>
23#include <asm/asm-offsets.h> 23#include <asm/asm-offsets.h>
24#include <asm/types.h> 24#include <asm/types.h>
25#include <asm/signal.h>
25#include <hv/hypervisor.h> 26#include <hv/hypervisor.h>
26#include <arch/abi.h> 27#include <arch/abi.h>
27#include <arch/interrupts.h> 28#include <arch/interrupts.h>
@@ -605,6 +606,10 @@ handle_interrupt:
605 * This routine takes a boolean in r30 indicating if this is an NMI. 606 * This routine takes a boolean in r30 indicating if this is an NMI.
606 * If so, we also expect a boolean in r31 indicating whether to 607 * If so, we also expect a boolean in r31 indicating whether to
607 * re-enable the oprofile interrupts. 608 * re-enable the oprofile interrupts.
609 *
610 * Note that .Lresume_userspace is jumped to directly in several
611 * places, and we need to make sure r30 is set correctly in those
612 * callers as well.
608 */ 613 */
609STD_ENTRY(interrupt_return) 614STD_ENTRY(interrupt_return)
610 /* If we're resuming to kernel space, don't check thread flags. */ 615 /* If we're resuming to kernel space, don't check thread flags. */
@@ -1039,11 +1044,28 @@ handle_syscall:
1039 1044
1040 /* Do syscall trace again, if requested. */ 1045 /* Do syscall trace again, if requested. */
1041 ld r30, r31 1046 ld r30, r31
1042 andi r30, r30, _TIF_SYSCALL_TRACE 1047 andi r0, r30, _TIF_SYSCALL_TRACE
1043 beqzt r30, 1f 1048 {
1049 andi r0, r30, _TIF_SINGLESTEP
1050 beqzt r0, 1f
1051 }
1044 jal do_syscall_trace 1052 jal do_syscall_trace
1045 FEEDBACK_REENTER(handle_syscall) 1053 FEEDBACK_REENTER(handle_syscall)
10461: j .Lresume_userspace /* jump into middle of interrupt_return */ 1054 andi r0, r30, _TIF_SINGLESTEP
1055
10561: beqzt r0, 2f
1057
1058 /* Single stepping -- notify ptrace. */
1059 {
1060 movei r0, SIGTRAP
1061 jal ptrace_notify
1062 }
1063 FEEDBACK_REENTER(handle_syscall)
1064
10652: {
1066 movei r30, 0 /* not an NMI */
1067 j .Lresume_userspace /* jump into middle of interrupt_return */
1068 }
1047 1069
1048.Lcompat_syscall: 1070.Lcompat_syscall:
1049 /* 1071 /*
@@ -1077,7 +1099,10 @@ handle_syscall:
1077 movei r28, -ENOSYS 1099 movei r28, -ENOSYS
1078 } 1100 }
1079 st r29, r28 1101 st r29, r28
1080 j .Lresume_userspace /* jump into middle of interrupt_return */ 1102 {
1103 movei r30, 0 /* not an NMI */
1104 j .Lresume_userspace /* jump into middle of interrupt_return */
1105 }
1081 STD_ENDPROC(handle_syscall) 1106 STD_ENDPROC(handle_syscall)
1082 1107
1083 /* Return the address for oprofile to suppress in backtraces. */ 1108 /* Return the address for oprofile to suppress in backtraces. */
@@ -1093,7 +1118,10 @@ STD_ENTRY(ret_from_fork)
1093 jal sim_notify_fork 1118 jal sim_notify_fork
1094 jal schedule_tail 1119 jal schedule_tail
1095 FEEDBACK_REENTER(ret_from_fork) 1120 FEEDBACK_REENTER(ret_from_fork)
1096 j .Lresume_userspace 1121 {
1122 movei r30, 0 /* not an NMI */
1123 j .Lresume_userspace /* jump into middle of interrupt_return */
1124 }
1097 STD_ENDPROC(ret_from_fork) 1125 STD_ENDPROC(ret_from_fork)
1098 1126
1099/* Various stub interrupt handlers and syscall handlers */ 1127/* Various stub interrupt handlers and syscall handlers */
@@ -1156,6 +1184,18 @@ int_unalign:
1156 push_extra_callee_saves r0 1184 push_extra_callee_saves r0
1157 j do_trap 1185 j do_trap
1158 1186
1187/* Fill the return address stack with nonzero entries. */
1188STD_ENTRY(fill_ra_stack)
1189 {
1190 move r0, lr
1191 jal 1f
1192 }
11931: jal 2f
11942: jal 3f
11953: jal 4f
11964: jrp r0
1197 STD_ENDPROC(fill_ra_stack)
1198
1159/* Include .intrpt1 array of interrupt vectors */ 1199/* Include .intrpt1 array of interrupt vectors */
1160 .section ".intrpt1", "ax" 1200 .section ".intrpt1", "ax"
1161 1201
@@ -1166,7 +1206,7 @@ int_unalign:
1166#define do_hardwall_trap bad_intr 1206#define do_hardwall_trap bad_intr
1167#endif 1207#endif
1168 1208
1169 int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr 1209 int_hand INT_MEM_ERROR, MEM_ERROR, do_trap
1170 int_hand INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr 1210 int_hand INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr
1171#if CONFIG_KERNEL_PL == 2 1211#if CONFIG_KERNEL_PL == 2
1172 int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle 1212 int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index b90ab9925674..98d476920106 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -67,6 +67,8 @@ void *module_alloc(unsigned long size)
67 area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END); 67 area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END);
68 if (!area) 68 if (!area)
69 goto error; 69 goto error;
70 area->nr_pages = npages;
71 area->pages = pages;
70 72
71 if (map_vm_area(area, prot_rwx, &pages)) { 73 if (map_vm_area(area, prot_rwx, &pages)) {
72 vunmap(area->addr); 74 vunmap(area->addr);
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c
index 7a9327046404..446a7f52cc11 100644
--- a/arch/tile/kernel/proc.c
+++ b/arch/tile/kernel/proc.c
@@ -146,7 +146,6 @@ static ctl_table unaligned_table[] = {
146 }, 146 },
147 {} 147 {}
148}; 148};
149#endif
150 149
151static struct ctl_path tile_path[] = { 150static struct ctl_path tile_path[] = {
152 { .procname = "tile" }, 151 { .procname = "tile" },
@@ -155,10 +154,9 @@ static struct ctl_path tile_path[] = {
155 154
156static int __init proc_sys_tile_init(void) 155static int __init proc_sys_tile_init(void)
157{ 156{
158#ifndef __tilegx__ /* FIXME: GX: no support for unaligned access yet */
159 register_sysctl_paths(tile_path, unaligned_table); 157 register_sysctl_paths(tile_path, unaligned_table);
160#endif
161 return 0; 158 return 0;
162} 159}
163 160
164arch_initcall(proc_sys_tile_init); 161arch_initcall(proc_sys_tile_init);
162#endif
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 30caecac94dc..2d5ef617bb39 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -28,6 +28,7 @@
28#include <linux/tracehook.h> 28#include <linux/tracehook.h>
29#include <linux/signal.h> 29#include <linux/signal.h>
30#include <asm/stack.h> 30#include <asm/stack.h>
31#include <asm/switch_to.h>
31#include <asm/homecache.h> 32#include <asm/homecache.h>
32#include <asm/syscalls.h> 33#include <asm/syscalls.h>
33#include <asm/traps.h> 34#include <asm/traps.h>
@@ -285,7 +286,7 @@ struct task_struct *validate_current(void)
285 static struct task_struct corrupt = { .comm = "<corrupt>" }; 286 static struct task_struct corrupt = { .comm = "<corrupt>" };
286 struct task_struct *tsk = current; 287 struct task_struct *tsk = current;
287 if (unlikely((unsigned long)tsk < PAGE_OFFSET || 288 if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
288 (void *)tsk > high_memory || 289 (high_memory && (void *)tsk > high_memory) ||
289 ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) { 290 ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
290 pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer); 291 pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
291 tsk = &corrupt; 292 tsk = &corrupt;
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 92a94f4920ad..bff23f476110 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -103,13 +103,11 @@ unsigned long __initdata pci_reserve_end_pfn = -1U;
103 103
104static int __init setup_maxmem(char *str) 104static int __init setup_maxmem(char *str)
105{ 105{
106 long maxmem_mb; 106 unsigned long long maxmem;
107 if (str == NULL || strict_strtol(str, 0, &maxmem_mb) != 0 || 107 if (str == NULL || (maxmem = memparse(str, NULL)) == 0)
108 maxmem_mb == 0)
109 return -EINVAL; 108 return -EINVAL;
110 109
111 maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) << 110 maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT);
112 (HPAGE_SHIFT - PAGE_SHIFT);
113 pr_info("Forcing RAM used to no more than %dMB\n", 111 pr_info("Forcing RAM used to no more than %dMB\n",
114 maxmem_pfn >> (20 - PAGE_SHIFT)); 112 maxmem_pfn >> (20 - PAGE_SHIFT));
115 return 0; 113 return 0;
@@ -119,14 +117,15 @@ early_param("maxmem", setup_maxmem);
119static int __init setup_maxnodemem(char *str) 117static int __init setup_maxnodemem(char *str)
120{ 118{
121 char *endp; 119 char *endp;
122 long maxnodemem_mb, node; 120 unsigned long long maxnodemem;
121 long node;
123 122
124 node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; 123 node = str ? simple_strtoul(str, &endp, 0) : INT_MAX;
125 if (node >= MAX_NUMNODES || *endp != ':' || 124 if (node >= MAX_NUMNODES || *endp != ':')
126 strict_strtol(endp+1, 0, &maxnodemem_mb) != 0)
127 return -EINVAL; 125 return -EINVAL;
128 126
129 maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) << 127 maxnodemem = memparse(endp+1, NULL);
128 maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) <<
130 (HPAGE_SHIFT - PAGE_SHIFT); 129 (HPAGE_SHIFT - PAGE_SHIFT);
131 pr_info("Forcing RAM used on node %ld to no more than %dMB\n", 130 pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
132 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); 131 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
@@ -913,6 +912,13 @@ void __cpuinit setup_cpu(int boot)
913 912
914#ifdef CONFIG_BLK_DEV_INITRD 913#ifdef CONFIG_BLK_DEV_INITRD
915 914
915/*
916 * Note that the kernel can potentially support other compression
917 * techniques than gz, though we don't do so by default. If we ever
918 * decide to do so we can either look for other filename extensions,
919 * or just allow a file with this name to be compressed with an
920 * arbitrary compressor (somewhat counterintuitively).
921 */
916static int __initdata set_initramfs_file; 922static int __initdata set_initramfs_file;
917static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; 923static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
918 924
@@ -928,9 +934,9 @@ static int __init setup_initramfs_file(char *str)
928early_param("initramfs_file", setup_initramfs_file); 934early_param("initramfs_file", setup_initramfs_file);
929 935
930/* 936/*
931 * We look for an additional "initramfs.cpio.gz" file in the hvfs. 937 * We look for an "initramfs.cpio.gz" file in the hvfs.
932 * If there is one, we allocate some memory for it and it will be 938 * If there is one, we allocate some memory for it and it will be
933 * unpacked to the initramfs after any built-in initramfs_data. 939 * unpacked to the initramfs.
934 */ 940 */
935static void __init load_hv_initrd(void) 941static void __init load_hv_initrd(void)
936{ 942{
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index bc1eb586e24d..9efbc1391b3c 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -153,6 +153,25 @@ static tile_bundle_bits rewrite_load_store_unaligned(
153 if (((unsigned long)addr % size) == 0) 153 if (((unsigned long)addr % size) == 0)
154 return bundle; 154 return bundle;
155 155
156 /*
157 * Return SIGBUS with the unaligned address, if requested.
158 * Note that we return SIGBUS even for completely invalid addresses
159 * as long as they are in fact unaligned; this matches what the
160 * tilepro hardware would be doing, if it could provide us with the
161 * actual bad address in an SPR, which it doesn't.
162 */
163 if (unaligned_fixup == 0) {
164 siginfo_t info = {
165 .si_signo = SIGBUS,
166 .si_code = BUS_ADRALN,
167 .si_addr = addr
168 };
169 trace_unhandled_signal("unaligned trap", regs,
170 (unsigned long)addr, SIGBUS);
171 force_sig_info(info.si_signo, &info, current);
172 return (tilepro_bundle_bits) 0;
173 }
174
156#ifndef __LITTLE_ENDIAN 175#ifndef __LITTLE_ENDIAN
157# error We assume little-endian representation with copy_xx_user size 2 here 176# error We assume little-endian representation with copy_xx_user size 2 here
158#endif 177#endif
@@ -192,18 +211,6 @@ static tile_bundle_bits rewrite_load_store_unaligned(
192 return (tile_bundle_bits) 0; 211 return (tile_bundle_bits) 0;
193 } 212 }
194 213
195 if (unaligned_fixup == 0) {
196 siginfo_t info = {
197 .si_signo = SIGBUS,
198 .si_code = BUS_ADRALN,
199 .si_addr = addr
200 };
201 trace_unhandled_signal("unaligned trap", regs,
202 (unsigned long)addr, SIGBUS);
203 force_sig_info(info.si_signo, &info, current);
204 return (tile_bundle_bits) 0;
205 }
206
207 if (unaligned_printk || unaligned_fixup_count == 0) { 214 if (unaligned_printk || unaligned_fixup_count == 0) {
208 pr_info("Process %d/%s: PC %#lx: Fixup of" 215 pr_info("Process %d/%s: PC %#lx: Fixup of"
209 " unaligned %s at %#lx.\n", 216 " unaligned %s at %#lx.\n",
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index a44e103c5a63..91da0f721958 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -103,7 +103,7 @@ static void smp_stop_cpu_interrupt(void)
103 set_cpu_online(smp_processor_id(), 0); 103 set_cpu_online(smp_processor_id(), 0);
104 arch_local_irq_disable_all(); 104 arch_local_irq_disable_all();
105 for (;;) 105 for (;;)
106 asm("nap"); 106 asm("nap; nop");
107} 107}
108 108
109/* This function calls the 'stop' function on all other CPUs in the system. */ 109/* This function calls the 'stop' function on all other CPUs in the system. */
@@ -113,6 +113,12 @@ void smp_send_stop(void)
113 send_IPI_allbutself(MSG_TAG_STOP_CPU); 113 send_IPI_allbutself(MSG_TAG_STOP_CPU);
114} 114}
115 115
116/* On panic, just wait; we may get an smp_send_stop() later on. */
117void panic_smp_self_stop(void)
118{
119 while (1)
120 asm("nap; nop");
121}
116 122
117/* 123/*
118 * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages. 124 * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages.
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index b949edcec200..172aef7d3159 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -196,6 +196,8 @@ void __cpuinit online_secondary(void)
196 /* This must be done before setting cpu_online_mask */ 196 /* This must be done before setting cpu_online_mask */
197 wmb(); 197 wmb();
198 198
199 notify_cpu_starting(smp_processor_id());
200
199 /* 201 /*
200 * We need to hold call_lock, so there is no inconsistency 202 * We need to hold call_lock, so there is no inconsistency
201 * between the time smp_call_function() determines number of 203 * between the time smp_call_function() determines number of
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 37ee4d037e0b..b2f44c28dda6 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -21,10 +21,12 @@
21#include <linux/stacktrace.h> 21#include <linux/stacktrace.h>
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23#include <linux/mmzone.h> 23#include <linux/mmzone.h>
24#include <linux/dcache.h>
25#include <linux/fs.h>
24#include <asm/backtrace.h> 26#include <asm/backtrace.h>
25#include <asm/page.h> 27#include <asm/page.h>
26#include <asm/tlbflush.h>
27#include <asm/ucontext.h> 28#include <asm/ucontext.h>
29#include <asm/switch_to.h>
28#include <asm/sigframe.h> 30#include <asm/sigframe.h>
29#include <asm/stack.h> 31#include <asm/stack.h>
30#include <arch/abi.h> 32#include <arch/abi.h>
@@ -44,72 +46,23 @@ static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
44 return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; 46 return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
45} 47}
46 48
47/* Is address valid for reading? */
48static int valid_address(struct KBacktraceIterator *kbt, unsigned long address)
49{
50 HV_PTE *l1_pgtable = kbt->pgtable;
51 HV_PTE *l2_pgtable;
52 unsigned long pfn;
53 HV_PTE pte;
54 struct page *page;
55
56 if (l1_pgtable == NULL)
57 return 0; /* can't read user space in other tasks */
58
59#ifdef CONFIG_64BIT
60 /* Find the real l1_pgtable by looking in the l0_pgtable. */
61 pte = l1_pgtable[HV_L0_INDEX(address)];
62 if (!hv_pte_get_present(pte))
63 return 0;
64 pfn = hv_pte_get_pfn(pte);
65 if (pte_huge(pte)) {
66 if (!pfn_valid(pfn)) {
67 pr_err("L0 huge page has bad pfn %#lx\n", pfn);
68 return 0;
69 }
70 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
71 }
72 page = pfn_to_page(pfn);
73 BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */
74 l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
75#endif
76 pte = l1_pgtable[HV_L1_INDEX(address)];
77 if (!hv_pte_get_present(pte))
78 return 0;
79 pfn = hv_pte_get_pfn(pte);
80 if (pte_huge(pte)) {
81 if (!pfn_valid(pfn)) {
82 pr_err("huge page has bad pfn %#lx\n", pfn);
83 return 0;
84 }
85 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
86 }
87
88 page = pfn_to_page(pfn);
89 if (PageHighMem(page)) {
90 pr_err("L2 page table not in LOWMEM (%#llx)\n",
91 HV_PFN_TO_CPA(pfn));
92 return 0;
93 }
94 l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
95 pte = l2_pgtable[HV_L2_INDEX(address)];
96 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
97}
98
99/* Callback for backtracer; basically a glorified memcpy */ 49/* Callback for backtracer; basically a glorified memcpy */
100static bool read_memory_func(void *result, unsigned long address, 50static bool read_memory_func(void *result, unsigned long address,
101 unsigned int size, void *vkbt) 51 unsigned int size, void *vkbt)
102{ 52{
103 int retval; 53 int retval;
104 struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; 54 struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
55
56 if (address == 0)
57 return 0;
105 if (__kernel_text_address(address)) { 58 if (__kernel_text_address(address)) {
106 /* OK to read kernel code. */ 59 /* OK to read kernel code. */
107 } else if (address >= PAGE_OFFSET) { 60 } else if (address >= PAGE_OFFSET) {
108 /* We only tolerate kernel-space reads of this task's stack */ 61 /* We only tolerate kernel-space reads of this task's stack */
109 if (!in_kernel_stack(kbt, address)) 62 if (!in_kernel_stack(kbt, address))
110 return 0; 63 return 0;
111 } else if (!valid_address(kbt, address)) { 64 } else if (!kbt->is_current) {
112 return 0; /* invalid user-space address */ 65 return 0; /* can't read from other user address spaces */
113 } 66 }
114 pagefault_disable(); 67 pagefault_disable();
115 retval = __copy_from_user_inatomic(result, 68 retval = __copy_from_user_inatomic(result,
@@ -127,6 +80,8 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
127 unsigned long sp = kbt->it.sp; 80 unsigned long sp = kbt->it.sp;
128 struct pt_regs *p; 81 struct pt_regs *p;
129 82
83 if (sp % sizeof(long) != 0)
84 return NULL;
130 if (!in_kernel_stack(kbt, sp)) 85 if (!in_kernel_stack(kbt, sp))
131 return NULL; 86 return NULL;
132 if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) 87 if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
@@ -169,27 +124,27 @@ static int is_sigreturn(unsigned long pc)
169} 124}
170 125
171/* Return a pt_regs pointer for a valid signal handler frame */ 126/* Return a pt_regs pointer for a valid signal handler frame */
172static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt) 127static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
128 struct rt_sigframe* kframe)
173{ 129{
174 BacktraceIterator *b = &kbt->it; 130 BacktraceIterator *b = &kbt->it;
175 131
176 if (b->pc == VDSO_BASE) { 132 if (b->pc == VDSO_BASE && b->sp < PAGE_OFFSET &&
177 struct rt_sigframe *frame; 133 b->sp % sizeof(long) == 0) {
178 unsigned long sigframe_top = 134 int retval;
179 b->sp + sizeof(struct rt_sigframe) - 1; 135 pagefault_disable();
180 if (!valid_address(kbt, b->sp) || 136 retval = __copy_from_user_inatomic(
181 !valid_address(kbt, sigframe_top)) { 137 kframe, (void __user __force *)b->sp,
182 if (kbt->verbose) 138 sizeof(*kframe));
183 pr_err(" (odd signal: sp %#lx?)\n", 139 pagefault_enable();
184 (unsigned long)(b->sp)); 140 if (retval != 0 ||
141 (unsigned int)(kframe->info.si_signo) >= _NSIG)
185 return NULL; 142 return NULL;
186 }
187 frame = (struct rt_sigframe *)b->sp;
188 if (kbt->verbose) { 143 if (kbt->verbose) {
189 pr_err(" <received signal %d>\n", 144 pr_err(" <received signal %d>\n",
190 frame->info.si_signo); 145 kframe->info.si_signo);
191 } 146 }
192 return (struct pt_regs *)&frame->uc.uc_mcontext; 147 return (struct pt_regs *)&kframe->uc.uc_mcontext;
193 } 148 }
194 return NULL; 149 return NULL;
195} 150}
@@ -202,10 +157,11 @@ static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
202static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) 157static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
203{ 158{
204 struct pt_regs *p; 159 struct pt_regs *p;
160 struct rt_sigframe kframe;
205 161
206 p = valid_fault_handler(kbt); 162 p = valid_fault_handler(kbt);
207 if (p == NULL) 163 if (p == NULL)
208 p = valid_sigframe(kbt); 164 p = valid_sigframe(kbt, &kframe);
209 if (p == NULL) 165 if (p == NULL)
210 return 0; 166 return 0;
211 backtrace_init(&kbt->it, read_memory_func, kbt, 167 backtrace_init(&kbt->it, read_memory_func, kbt,
@@ -265,41 +221,19 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
265 221
266 /* 222 /*
267 * Set up callback information. We grab the kernel stack base 223 * Set up callback information. We grab the kernel stack base
268 * so we will allow reads of that address range, and if we're 224 * so we will allow reads of that address range.
269 * asking about the current process we grab the page table
270 * so we can check user accesses before trying to read them.
271 * We flush the TLB to avoid any weird skew issues.
272 */ 225 */
273 is_current = (t == NULL); 226 is_current = (t == NULL || t == current);
274 kbt->is_current = is_current; 227 kbt->is_current = is_current;
275 if (is_current) 228 if (is_current)
276 t = validate_current(); 229 t = validate_current();
277 kbt->task = t; 230 kbt->task = t;
278 kbt->pgtable = NULL;
279 kbt->verbose = 0; /* override in caller if desired */ 231 kbt->verbose = 0; /* override in caller if desired */
280 kbt->profile = 0; /* override in caller if desired */ 232 kbt->profile = 0; /* override in caller if desired */
281 kbt->end = KBT_ONGOING; 233 kbt->end = KBT_ONGOING;
282 kbt->new_context = 0; 234 kbt->new_context = 1;
283 if (is_current) { 235 if (is_current)
284 HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
285 if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) {
286 /*
287 * Not just an optimization: this also allows
288 * this to work at all before va/pa mappings
289 * are set up.
290 */
291 kbt->pgtable = swapper_pg_dir;
292 } else {
293 struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa));
294 if (!PageHighMem(page))
295 kbt->pgtable = __va(pgdir_pa);
296 else
297 pr_err("page table not in LOWMEM"
298 " (%#llx)\n", pgdir_pa);
299 }
300 local_flush_tlb_all();
301 validate_stack(regs); 236 validate_stack(regs);
302 }
303 237
304 if (regs == NULL) { 238 if (regs == NULL) {
305 if (is_current || t->state == TASK_RUNNING) { 239 if (is_current || t->state == TASK_RUNNING) {
@@ -345,6 +279,78 @@ void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
345} 279}
346EXPORT_SYMBOL(KBacktraceIterator_next); 280EXPORT_SYMBOL(KBacktraceIterator_next);
347 281
282static void describe_addr(struct KBacktraceIterator *kbt,
283 unsigned long address,
284 int have_mmap_sem, char *buf, size_t bufsize)
285{
286 struct vm_area_struct *vma;
287 size_t namelen, remaining;
288 unsigned long size, offset, adjust;
289 char *p, *modname;
290 const char *name;
291 int rc;
292
293 /*
294 * Look one byte back for every caller frame (i.e. those that
295 * aren't a new context) so we look up symbol data for the
296 * call itself, not the following instruction, which may be on
297 * a different line (or in a different function).
298 */
299 adjust = !kbt->new_context;
300 address -= adjust;
301
302 if (address >= PAGE_OFFSET) {
303 /* Handle kernel symbols. */
304 BUG_ON(bufsize < KSYM_NAME_LEN);
305 name = kallsyms_lookup(address, &size, &offset,
306 &modname, buf);
307 if (name == NULL) {
308 buf[0] = '\0';
309 return;
310 }
311 namelen = strlen(buf);
312 remaining = (bufsize - 1) - namelen;
313 p = buf + namelen;
314 rc = snprintf(p, remaining, "+%#lx/%#lx ",
315 offset + adjust, size);
316 if (modname && rc < remaining)
317 snprintf(p + rc, remaining - rc, "[%s] ", modname);
318 buf[bufsize-1] = '\0';
319 return;
320 }
321
322 /* If we don't have the mmap_sem, we can't show any more info. */
323 buf[0] = '\0';
324 if (!have_mmap_sem)
325 return;
326
327 /* Find vma info. */
328 vma = find_vma(kbt->task->mm, address);
329 if (vma == NULL || address < vma->vm_start) {
330 snprintf(buf, bufsize, "[unmapped address] ");
331 return;
332 }
333
334 if (vma->vm_file) {
335 char *s;
336 p = d_path(&vma->vm_file->f_path, buf, bufsize);
337 if (IS_ERR(p))
338 p = "?";
339 s = strrchr(p, '/');
340 if (s)
341 p = s+1;
342 } else {
343 p = "anon";
344 }
345
346 /* Generate a string description of the vma info. */
347 namelen = strlen(p);
348 remaining = (bufsize - 1) - namelen;
349 memmove(buf, p, namelen);
350 snprintf(buf + namelen, remaining, "[%lx+%lx] ",
351 vma->vm_start, vma->vm_end - vma->vm_start);
352}
353
348/* 354/*
349 * This method wraps the backtracer's more generic support. 355 * This method wraps the backtracer's more generic support.
350 * It is only invoked from the architecture-specific code; show_stack() 356 * It is only invoked from the architecture-specific code; show_stack()
@@ -353,6 +359,7 @@ EXPORT_SYMBOL(KBacktraceIterator_next);
353void tile_show_stack(struct KBacktraceIterator *kbt, int headers) 359void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
354{ 360{
355 int i; 361 int i;
362 int have_mmap_sem = 0;
356 363
357 if (headers) { 364 if (headers) {
358 /* 365 /*
@@ -369,31 +376,16 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
369 kbt->verbose = 1; 376 kbt->verbose = 1;
370 i = 0; 377 i = 0;
371 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { 378 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
372 char *modname;
373 const char *name;
374 unsigned long address = kbt->it.pc;
375 unsigned long offset, size;
376 char namebuf[KSYM_NAME_LEN+100]; 379 char namebuf[KSYM_NAME_LEN+100];
380 unsigned long address = kbt->it.pc;
377 381
378 if (address >= PAGE_OFFSET) 382 /* Try to acquire the mmap_sem as we pass into userspace. */
379 name = kallsyms_lookup(address, &size, &offset, 383 if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm)
380 &modname, namebuf); 384 have_mmap_sem =
381 else 385 down_read_trylock(&kbt->task->mm->mmap_sem);
382 name = NULL; 386
383 387 describe_addr(kbt, address, have_mmap_sem,
384 if (!name) 388 namebuf, sizeof(namebuf));
385 namebuf[0] = '\0';
386 else {
387 size_t namelen = strlen(namebuf);
388 size_t remaining = (sizeof(namebuf) - 1) - namelen;
389 char *p = namebuf + namelen;
390 int rc = snprintf(p, remaining, "+%#lx/%#lx ",
391 offset, size);
392 if (modname && rc < remaining)
393 snprintf(p + rc, remaining - rc,
394 "[%s] ", modname);
395 namebuf[sizeof(namebuf)-1] = '\0';
396 }
397 389
398 pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n", 390 pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
399 i++, address, namebuf, (unsigned long)(kbt->it.sp)); 391 i++, address, namebuf, (unsigned long)(kbt->it.sp));
@@ -408,6 +400,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
408 pr_err("Stack dump stopped; next frame identical to this one\n"); 400 pr_err("Stack dump stopped; next frame identical to this one\n");
409 if (headers) 401 if (headers)
410 pr_err("Stack dump complete\n"); 402 pr_err("Stack dump complete\n");
403 if (have_mmap_sem)
404 up_read(&kbt->task->mm->mmap_sem);
411} 405}
412EXPORT_SYMBOL(tile_show_stack); 406EXPORT_SYMBOL(tile_show_stack);
413 407
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 2bb6602a1ee7..73cff814ac57 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -200,7 +200,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
200{ 200{
201 siginfo_t info = { 0 }; 201 siginfo_t info = { 0 };
202 int signo, code; 202 int signo, code;
203 unsigned long address; 203 unsigned long address = 0;
204 bundle_bits instr; 204 bundle_bits instr;
205 205
206 /* Re-enable interrupts. */ 206 /* Re-enable interrupts. */
@@ -223,6 +223,10 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
223 } 223 }
224 224
225 switch (fault_num) { 225 switch (fault_num) {
226 case INT_MEM_ERROR:
227 signo = SIGBUS;
228 code = BUS_OBJERR;
229 break;
226 case INT_ILL: 230 case INT_ILL:
227 if (copy_from_user(&instr, (void __user *)regs->pc, 231 if (copy_from_user(&instr, (void __user *)regs->pc,
228 sizeof(instr))) { 232 sizeof(instr))) {
@@ -289,7 +293,10 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
289 address = regs->pc; 293 address = regs->pc;
290 break; 294 break;
291#ifdef __tilegx__ 295#ifdef __tilegx__
292 case INT_ILL_TRANS: 296 case INT_ILL_TRANS: {
297 /* Avoid a hardware erratum with the return address stack. */
298 fill_ra_stack();
299
293 signo = SIGSEGV; 300 signo = SIGSEGV;
294 code = SEGV_MAPERR; 301 code = SEGV_MAPERR;
295 if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK) 302 if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK)
@@ -297,6 +304,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
297 else 304 else
298 address = 0; /* FIXME: GX: single-step for address */ 305 address = 0; /* FIXME: GX: single-step for address */
299 break; 306 break;
307 }
300#endif 308#endif
301 default: 309 default:
302 panic("Unexpected do_trap interrupt number %d", fault_num); 310 panic("Unexpected do_trap interrupt number %d", fault_num);
@@ -308,7 +316,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
308 info.si_addr = (void __user *)address; 316 info.si_addr = (void __user *)address;
309 if (signo == SIGILL) 317 if (signo == SIGILL)
310 info.si_trapno = fault_num; 318 info.si_trapno = fault_num;
311 trace_unhandled_signal("trap", regs, address, signo); 319 if (signo != SIGTRAP)
320 trace_unhandled_signal("trap", regs, address, signo);
312 force_sig_info(signo, &info, current); 321 force_sig_info(signo, &info, current);
313} 322}
314 323
diff --git a/arch/tile/lib/Makefile b/arch/tile/lib/Makefile
index 0c26086ecbef..985f59858234 100644
--- a/arch/tile/lib/Makefile
+++ b/arch/tile/lib/Makefile
@@ -7,6 +7,7 @@ lib-y = cacheflush.o checksum.o cpumask.o delay.o uaccess.o \
7 strchr_$(BITS).o strlen_$(BITS).o 7 strchr_$(BITS).o strlen_$(BITS).o
8 8
9ifeq ($(CONFIG_TILEGX),y) 9ifeq ($(CONFIG_TILEGX),y)
10CFLAGS_REMOVE_memcpy_user_64.o = -fno-omit-frame-pointer
10lib-y += memcpy_user_64.o 11lib-y += memcpy_user_64.o
11else 12else
12lib-y += atomic_32.o atomic_asm_32.o memcpy_tile64.o 13lib-y += atomic_32.o atomic_asm_32.o memcpy_tile64.o
diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c
index 8928aace7a64..db4fb89e12d8 100644
--- a/arch/tile/lib/cacheflush.c
+++ b/arch/tile/lib/cacheflush.c
@@ -39,7 +39,21 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
39{ 39{
40 char *p, *base; 40 char *p, *base;
41 size_t step_size, load_count; 41 size_t step_size, load_count;
42
43 /*
44 * On TILEPro the striping granularity is a fixed 8KB; on
45 * TILE-Gx it is configurable, and we rely on the fact that
46 * the hypervisor always configures maximum striping, so that
47 * bits 9 and 10 of the PA are part of the stripe function, so
48 * every 512 bytes we hit a striping boundary.
49 *
50 */
51#ifdef __tilegx__
52 const unsigned long STRIPE_WIDTH = 512;
53#else
42 const unsigned long STRIPE_WIDTH = 8192; 54 const unsigned long STRIPE_WIDTH = 8192;
55#endif
56
43#ifdef __tilegx__ 57#ifdef __tilegx__
44 /* 58 /*
45 * On TILE-Gx, we must disable the dstream prefetcher before doing 59 * On TILE-Gx, we must disable the dstream prefetcher before doing
@@ -74,7 +88,7 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
74 * memory, that one load would be sufficient, but since we may 88 * memory, that one load would be sufficient, but since we may
75 * be, we also need to back up to the last load issued to 89 * be, we also need to back up to the last load issued to
76 * another memory controller, which would be the point where 90 * another memory controller, which would be the point where
77 * we crossed an 8KB boundary (the granularity of striping 91 * we crossed a "striping" boundary (the granularity of striping
78 * across memory controllers). Keep backing up and doing this 92 * across memory controllers). Keep backing up and doing this
79 * until we are before the beginning of the buffer, or have 93 * until we are before the beginning of the buffer, or have
80 * hit all the controllers. 94 * hit all the controllers.
@@ -88,12 +102,22 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
88 * every cache line on a full memory stripe on each 102 * every cache line on a full memory stripe on each
89 * controller" that we simply do that, to simplify the logic. 103 * controller" that we simply do that, to simplify the logic.
90 * 104 *
91 * FIXME: See bug 9535 for some issues with this code. 105 * On TILE-Gx the hash-for-home function is much more complex,
106 * with the upshot being we can't readily guarantee we have
107 * hit both entries in the 128-entry AMT that were hit by any
108 * load in the entire range, so we just re-load them all.
109 * With larger buffers, we may want to consider using a hypervisor
110 * trap to issue loads directly to each hash-for-home tile for
111 * each controller (doing it from Linux would trash the TLB).
92 */ 112 */
93 if (hfh) { 113 if (hfh) {
94 step_size = L2_CACHE_BYTES; 114 step_size = L2_CACHE_BYTES;
115#ifdef __tilegx__
116 load_count = (size + L2_CACHE_BYTES - 1) / L2_CACHE_BYTES;
117#else
95 load_count = (STRIPE_WIDTH / L2_CACHE_BYTES) * 118 load_count = (STRIPE_WIDTH / L2_CACHE_BYTES) *
96 (1 << CHIP_LOG_NUM_MSHIMS()); 119 (1 << CHIP_LOG_NUM_MSHIMS());
120#endif
97 } else { 121 } else {
98 step_size = STRIPE_WIDTH; 122 step_size = STRIPE_WIDTH;
99 load_count = (1 << CHIP_LOG_NUM_MSHIMS()); 123 load_count = (1 << CHIP_LOG_NUM_MSHIMS());
@@ -109,7 +133,7 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
109 133
110 /* Figure out how far back we need to go. */ 134 /* Figure out how far back we need to go. */
111 base = p - (step_size * (load_count - 2)); 135 base = p - (step_size * (load_count - 2));
112 if ((long)base < (long)buffer) 136 if ((unsigned long)base < (unsigned long)buffer)
113 base = buffer; 137 base = buffer;
114 138
115 /* 139 /*
diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c
index 4763b3aff1cc..37440caa7370 100644
--- a/arch/tile/lib/memcpy_user_64.c
+++ b/arch/tile/lib/memcpy_user_64.c
@@ -14,7 +14,13 @@
14 * Do memcpy(), but trap and return "n" when a load or store faults. 14 * Do memcpy(), but trap and return "n" when a load or store faults.
15 * 15 *
16 * Note: this idiom only works when memcpy() compiles to a leaf function. 16 * Note: this idiom only works when memcpy() compiles to a leaf function.
17 * If "sp" is updated during memcpy, the "jrp lr" will be incorrect. 17 * Here leaf function not only means it does not have calls, but also
18 * requires no stack operations (sp, stack frame pointer) and no
19 * use of callee-saved registers, else "jrp lr" will be incorrect since
20 * unwinding stack frame is bypassed. Since memcpy() is not complex so
21 * these conditions are satisfied here, but we need to be careful when
22 * modifying this file. This is not a clean solution but is the best
23 * one so far.
18 * 24 *
19 * Also note that we are capturing "n" from the containing scope here. 25 * Also note that we are capturing "n" from the containing scope here.
20 */ 26 */
diff --git a/arch/tile/lib/spinlock_common.h b/arch/tile/lib/spinlock_common.h
index c10109809132..6ac37509faca 100644
--- a/arch/tile/lib/spinlock_common.h
+++ b/arch/tile/lib/spinlock_common.h
@@ -60,5 +60,5 @@ static void delay_backoff(int iterations)
60 loops += __insn_crc32_32(stack_pointer, get_cycles_low()) & 60 loops += __insn_crc32_32(stack_pointer, get_cycles_low()) &
61 (loops - 1); 61 (loops - 1);
62 62
63 relax(1 << exponent); 63 relax(loops);
64} 64}
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index cba30e9547b4..22e58f51ed23 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -130,7 +130,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
130} 130}
131 131
132/* 132/*
133 * Handle a fault on the vmalloc or module mapping area 133 * Handle a fault on the vmalloc area.
134 */ 134 */
135static inline int vmalloc_fault(pgd_t *pgd, unsigned long address) 135static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)
136{ 136{
@@ -203,9 +203,14 @@ static pgd_t *get_current_pgd(void)
203 * interrupt or a critical region, and must do as little as possible. 203 * interrupt or a critical region, and must do as little as possible.
204 * Similarly, we can't use atomic ops here, since we may be handling a 204 * Similarly, we can't use atomic ops here, since we may be handling a
205 * fault caused by an atomic op access. 205 * fault caused by an atomic op access.
206 *
207 * If we find a migrating PTE while we're in an NMI context, and we're
208 * at a PC that has a registered exception handler, we don't wait,
209 * since this thread may (e.g.) have been interrupted while migrating
210 * its own stack, which would then cause us to self-deadlock.
206 */ 211 */
207static int handle_migrating_pte(pgd_t *pgd, int fault_num, 212static int handle_migrating_pte(pgd_t *pgd, int fault_num,
208 unsigned long address, 213 unsigned long address, unsigned long pc,
209 int is_kernel_mode, int write) 214 int is_kernel_mode, int write)
210{ 215{
211 pud_t *pud; 216 pud_t *pud;
@@ -227,6 +232,8 @@ static int handle_migrating_pte(pgd_t *pgd, int fault_num,
227 pte_offset_kernel(pmd, address); 232 pte_offset_kernel(pmd, address);
228 pteval = *pte; 233 pteval = *pte;
229 if (pte_migrating(pteval)) { 234 if (pte_migrating(pteval)) {
235 if (in_nmi() && search_exception_tables(pc))
236 return 0;
230 wait_for_migration(pte); 237 wait_for_migration(pte);
231 return 1; 238 return 1;
232 } 239 }
@@ -300,7 +307,7 @@ static int handle_page_fault(struct pt_regs *regs,
300 * rather than trying to patch up the existing PTE. 307 * rather than trying to patch up the existing PTE.
301 */ 308 */
302 pgd = get_current_pgd(); 309 pgd = get_current_pgd();
303 if (handle_migrating_pte(pgd, fault_num, address, 310 if (handle_migrating_pte(pgd, fault_num, address, regs->pc,
304 is_kernel_mode, write)) 311 is_kernel_mode, write))
305 return 1; 312 return 1;
306 313
@@ -335,9 +342,12 @@ static int handle_page_fault(struct pt_regs *regs,
335 /* 342 /*
336 * If we're trying to touch user-space addresses, we must 343 * If we're trying to touch user-space addresses, we must
337 * be either at PL0, or else with interrupts enabled in the 344 * be either at PL0, or else with interrupts enabled in the
338 * kernel, so either way we can re-enable interrupts here. 345 * kernel, so either way we can re-enable interrupts here
346 * unless we are doing atomic access to user space with
347 * interrupts disabled.
339 */ 348 */
340 local_irq_enable(); 349 if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
350 local_irq_enable();
341 351
342 mm = tsk->mm; 352 mm = tsk->mm;
343 353
@@ -665,7 +675,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
665 */ 675 */
666 if (fault_num == INT_DTLB_ACCESS) 676 if (fault_num == INT_DTLB_ACCESS)
667 write = 1; 677 write = 1;
668 if (handle_migrating_pte(pgd, fault_num, address, 1, write)) 678 if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write))
669 return state; 679 return state;
670 680
671 /* Return zero so that we continue on with normal fault handling. */ 681 /* Return zero so that we continue on with normal fault handling. */
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 1cc6ae477c98..499f73770b05 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -394,6 +394,7 @@ int page_home(struct page *page)
394 return pte_to_home(*virt_to_pte(NULL, kva)); 394 return pte_to_home(*virt_to_pte(NULL, kva));
395 } 395 }
396} 396}
397EXPORT_SYMBOL(page_home);
397 398
398void homecache_change_page_home(struct page *page, int order, int home) 399void homecache_change_page_home(struct page *page, int order, int home)
399{ 400{
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 830c4908ea76..6a9d20ddc34f 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -254,11 +254,6 @@ static pgprot_t __init init_pgprot(ulong address)
254 return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE); 254 return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE);
255 } 255 }
256 256
257 /* As a performance optimization, keep the boot init stack here. */
258 if (address >= (ulong)&init_thread_union &&
259 address < (ulong)&init_thread_union + THREAD_SIZE)
260 return construct_pgprot(PAGE_KERNEL, smp_processor_id());
261
262#ifndef __tilegx__ 257#ifndef __tilegx__
263#if !ATOMIC_LOCKS_FOUND_VIA_TABLE() 258#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
264 /* Force the atomic_locks[] array page to be hash-for-home. */ 259 /* Force the atomic_locks[] array page to be hash-for-home. */
@@ -557,6 +552,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
557 552
558 address = MEM_SV_INTRPT; 553 address = MEM_SV_INTRPT;
559 pmd = get_pmd(pgtables, address); 554 pmd = get_pmd(pgtables, address);
555 pfn = 0; /* code starts at PA 0 */
560 if (ktext_small) { 556 if (ktext_small) {
561 /* Allocate an L2 PTE for the kernel text */ 557 /* Allocate an L2 PTE for the kernel text */
562 int cpu = 0; 558 int cpu = 0;
@@ -579,10 +575,15 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
579 } 575 }
580 576
581 BUG_ON(address != (unsigned long)_stext); 577 BUG_ON(address != (unsigned long)_stext);
582 pfn = 0; /* code starts at PA 0 */ 578 pte = NULL;
583 pte = alloc_pte(); 579 for (; address < (unsigned long)_einittext;
584 for (pte_ofs = 0; address < (unsigned long)_einittext; 580 pfn++, address += PAGE_SIZE) {
585 pfn++, pte_ofs++, address += PAGE_SIZE) { 581 pte_ofs = pte_index(address);
582 if (pte_ofs == 0) {
583 if (pte)
584 assign_pte(pmd++, pte);
585 pte = alloc_pte();
586 }
586 if (!ktext_local) { 587 if (!ktext_local) {
587 prot = set_remote_cache_cpu(prot, cpu); 588 prot = set_remote_cache_cpu(prot, cpu);
588 cpu = cpumask_next(cpu, &ktext_mask); 589 cpu = cpumask_next(cpu, &ktext_mask);
@@ -591,7 +592,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
591 } 592 }
592 pte[pte_ofs] = pfn_pte(pfn, prot); 593 pte[pte_ofs] = pfn_pte(pfn, prot);
593 } 594 }
594 assign_pte(pmd, pte); 595 if (pte)
596 assign_pte(pmd, pte);
595 } else { 597 } else {
596 pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC); 598 pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
597 pteval = pte_mkhuge(pteval); 599 pteval = pte_mkhuge(pteval);
@@ -614,7 +616,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
614 else 616 else
615 pteval = hv_pte_set_mode(pteval, 617 pteval = hv_pte_set_mode(pteval,
616 HV_PTE_MODE_CACHE_NO_L3); 618 HV_PTE_MODE_CACHE_NO_L3);
617 *(pte_t *)pmd = pteval; 619 for (; address < (unsigned long)_einittext;
620 pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
621 *(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
618 } 622 }
619 623
620 /* Set swapper_pgprot here so it is flushed to memory right away. */ 624 /* Set swapper_pgprot here so it is flushed to memory right away. */
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 87303693a072..2410aa899b3e 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -177,14 +177,10 @@ void shatter_huge_page(unsigned long addr)
177 if (!pmd_huge_page(*pmd)) 177 if (!pmd_huge_page(*pmd))
178 return; 178 return;
179 179
180 /* 180 spin_lock_irqsave(&init_mm.page_table_lock, flags);
181 * Grab the pgd_lock, since we may need it to walk the pgd_list,
182 * and since we need some kind of lock here to avoid races.
183 */
184 spin_lock_irqsave(&pgd_lock, flags);
185 if (!pmd_huge_page(*pmd)) { 181 if (!pmd_huge_page(*pmd)) {
186 /* Lost the race to convert the huge page. */ 182 /* Lost the race to convert the huge page. */
187 spin_unlock_irqrestore(&pgd_lock, flags); 183 spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
188 return; 184 return;
189 } 185 }
190 186
@@ -194,6 +190,7 @@ void shatter_huge_page(unsigned long addr)
194 190
195#ifdef __PAGETABLE_PMD_FOLDED 191#ifdef __PAGETABLE_PMD_FOLDED
196 /* Walk every pgd on the system and update the pmd there. */ 192 /* Walk every pgd on the system and update the pmd there. */
193 spin_lock(&pgd_lock);
197 list_for_each(pos, &pgd_list) { 194 list_for_each(pos, &pgd_list) {
198 pmd_t *copy_pmd; 195 pmd_t *copy_pmd;
199 pgd = list_to_pgd(pos) + pgd_index(addr); 196 pgd = list_to_pgd(pos) + pgd_index(addr);
@@ -201,6 +198,7 @@ void shatter_huge_page(unsigned long addr)
201 copy_pmd = pmd_offset(pud, addr); 198 copy_pmd = pmd_offset(pud, addr);
202 __set_pmd(copy_pmd, *pmd); 199 __set_pmd(copy_pmd, *pmd);
203 } 200 }
201 spin_unlock(&pgd_lock);
204#endif 202#endif
205 203
206 /* Tell every cpu to notice the change. */ 204 /* Tell every cpu to notice the change. */
@@ -208,7 +206,7 @@ void shatter_huge_page(unsigned long addr)
208 cpu_possible_mask, NULL, 0); 206 cpu_possible_mask, NULL, 0);
209 207
210 /* Hold the lock until the TLB flush is finished to avoid races. */ 208 /* Hold the lock until the TLB flush is finished to avoid races. */
211 spin_unlock_irqrestore(&pgd_lock, flags); 209 spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
212} 210}
213 211
214/* 212/*
@@ -217,9 +215,13 @@ void shatter_huge_page(unsigned long addr)
217 * against pageattr.c; it is the unique case in which a valid change 215 * against pageattr.c; it is the unique case in which a valid change
218 * of kernel pagetables can't be lazily synchronized by vmalloc faults. 216 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
219 * vmalloc faults work because attached pagetables are never freed. 217 * vmalloc faults work because attached pagetables are never freed.
220 * The locking scheme was chosen on the basis of manfred's 218 *
221 * recommendations and having no core impact whatsoever. 219 * The lock is always taken with interrupts disabled, unlike on x86
222 * -- wli 220 * and other platforms, because we need to take the lock in
221 * shatter_huge_page(), which may be called from an interrupt context.
222 * We are not at risk from the tlbflush IPI deadlock that was seen on
223 * x86, since we use the flush_remote() API to have the hypervisor do
224 * the TLB flushes regardless of irq disabling.
223 */ 225 */
224DEFINE_SPINLOCK(pgd_lock); 226DEFINE_SPINLOCK(pgd_lock);
225LIST_HEAD(pgd_list); 227LIST_HEAD(pgd_list);
@@ -469,10 +471,18 @@ void __set_pte(pte_t *ptep, pte_t pte)
469 471
470void set_pte(pte_t *ptep, pte_t pte) 472void set_pte(pte_t *ptep, pte_t pte)
471{ 473{
472 struct page *page = pfn_to_page(pte_pfn(pte)); 474 if (pte_present(pte) &&
473 475 (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) {
474 /* Update the home of a PTE if necessary */ 476 /* The PTE actually references physical memory. */
475 pte = pte_set_home(pte, page_home(page)); 477 unsigned long pfn = pte_pfn(pte);
478 if (pfn_valid(pfn)) {
479 /* Update the home of the PTE from the struct page. */
480 pte = pte_set_home(pte, page_home(pfn_to_page(pfn)));
481 } else if (hv_pte_get_mode(pte) == 0) {
482 /* remap_pfn_range(), etc, must supply PTE mode. */
483 panic("set_pte(): out-of-range PFN and mode 0\n");
484 }
485 }
476 486
477 __set_pte(ptep, pte); 487 __set_pte(ptep, pte);
478} 488}
diff --git a/arch/um/drivers/cow.h b/arch/um/drivers/cow.h
index dc36b222100b..6673508f3426 100644
--- a/arch/um/drivers/cow.h
+++ b/arch/um/drivers/cow.h
@@ -3,41 +3,6 @@
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6#if defined(__KERNEL__)
7
8# include <asm/byteorder.h>
9
10# if defined(__BIG_ENDIAN)
11# define ntohll(x) (x)
12# define htonll(x) (x)
13# elif defined(__LITTLE_ENDIAN)
14# define ntohll(x) be64_to_cpu(x)
15# define htonll(x) cpu_to_be64(x)
16# else
17# error "Could not determine byte order"
18# endif
19
20#else
21/* For the definition of ntohl, htonl and __BYTE_ORDER */
22#include <endian.h>
23#include <netinet/in.h>
24#if defined(__BYTE_ORDER)
25
26# if __BYTE_ORDER == __BIG_ENDIAN
27# define ntohll(x) (x)
28# define htonll(x) (x)
29# elif __BYTE_ORDER == __LITTLE_ENDIAN
30# define ntohll(x) bswap_64(x)
31# define htonll(x) bswap_64(x)
32# else
33# error "Could not determine byte order: __BYTE_ORDER uncorrectly defined"
34# endif
35
36#else /* ! defined(__BYTE_ORDER) */
37# error "Could not determine byte order: __BYTE_ORDER not defined"
38#endif
39#endif /* ! defined(__KERNEL__) */
40
41extern int init_cow_file(int fd, char *cow_file, char *backing_file, 6extern int init_cow_file(int fd, char *cow_file, char *backing_file,
42 int sectorsize, int alignment, int *bitmap_offset_out, 7 int sectorsize, int alignment, int *bitmap_offset_out,
43 unsigned long *bitmap_len_out, int *data_offset_out); 8 unsigned long *bitmap_len_out, int *data_offset_out);
diff --git a/arch/um/drivers/cow_user.c b/arch/um/drivers/cow_user.c
index 9cbb426c0b91..0ee9cc6cc4c7 100644
--- a/arch/um/drivers/cow_user.c
+++ b/arch/um/drivers/cow_user.c
@@ -8,11 +8,10 @@
8 * that. 8 * that.
9 */ 9 */
10#include <unistd.h> 10#include <unistd.h>
11#include <byteswap.h>
12#include <errno.h> 11#include <errno.h>
13#include <string.h> 12#include <string.h>
14#include <arpa/inet.h> 13#include <arpa/inet.h>
15#include <asm/types.h> 14#include <endian.h>
16#include "cow.h" 15#include "cow.h"
17#include "cow_sys.h" 16#include "cow_sys.h"
18 17
@@ -214,8 +213,8 @@ int write_cow_header(char *cow_file, int fd, char *backing_file,
214 "header\n"); 213 "header\n");
215 goto out; 214 goto out;
216 } 215 }
217 header->magic = htonl(COW_MAGIC); 216 header->magic = htobe32(COW_MAGIC);
218 header->version = htonl(COW_VERSION); 217 header->version = htobe32(COW_VERSION);
219 218
220 err = -EINVAL; 219 err = -EINVAL;
221 if (strlen(backing_file) > sizeof(header->backing_file) - 1) { 220 if (strlen(backing_file) > sizeof(header->backing_file) - 1) {
@@ -246,10 +245,10 @@ int write_cow_header(char *cow_file, int fd, char *backing_file,
246 goto out_free; 245 goto out_free;
247 } 246 }
248 247
249 header->mtime = htonl(modtime); 248 header->mtime = htobe32(modtime);
250 header->size = htonll(*size); 249 header->size = htobe64(*size);
251 header->sectorsize = htonl(sectorsize); 250 header->sectorsize = htobe32(sectorsize);
252 header->alignment = htonl(alignment); 251 header->alignment = htobe32(alignment);
253 header->cow_format = COW_BITMAP; 252 header->cow_format = COW_BITMAP;
254 253
255 err = cow_write_file(fd, header, sizeof(*header)); 254 err = cow_write_file(fd, header, sizeof(*header));
@@ -301,8 +300,8 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
301 magic = header->v1.magic; 300 magic = header->v1.magic;
302 if (magic == COW_MAGIC) 301 if (magic == COW_MAGIC)
303 version = header->v1.version; 302 version = header->v1.version;
304 else if (magic == ntohl(COW_MAGIC)) 303 else if (magic == be32toh(COW_MAGIC))
305 version = ntohl(header->v1.version); 304 version = be32toh(header->v1.version);
306 /* No error printed because the non-COW case comes through here */ 305 /* No error printed because the non-COW case comes through here */
307 else goto out; 306 else goto out;
308 307
@@ -327,9 +326,9 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
327 "header\n"); 326 "header\n");
328 goto out; 327 goto out;
329 } 328 }
330 *mtime_out = ntohl(header->v2.mtime); 329 *mtime_out = be32toh(header->v2.mtime);
331 *size_out = ntohll(header->v2.size); 330 *size_out = be64toh(header->v2.size);
332 *sectorsize_out = ntohl(header->v2.sectorsize); 331 *sectorsize_out = be32toh(header->v2.sectorsize);
333 *bitmap_offset_out = sizeof(header->v2); 332 *bitmap_offset_out = sizeof(header->v2);
334 *align_out = *sectorsize_out; 333 *align_out = *sectorsize_out;
335 file = header->v2.backing_file; 334 file = header->v2.backing_file;
@@ -341,10 +340,10 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
341 "header\n"); 340 "header\n");
342 goto out; 341 goto out;
343 } 342 }
344 *mtime_out = ntohl(header->v3.mtime); 343 *mtime_out = be32toh(header->v3.mtime);
345 *size_out = ntohll(header->v3.size); 344 *size_out = be64toh(header->v3.size);
346 *sectorsize_out = ntohl(header->v3.sectorsize); 345 *sectorsize_out = be32toh(header->v3.sectorsize);
347 *align_out = ntohl(header->v3.alignment); 346 *align_out = be32toh(header->v3.alignment);
348 if (*align_out == 0) { 347 if (*align_out == 0) {
349 cow_printf("read_cow_header - invalid COW header, " 348 cow_printf("read_cow_header - invalid COW header, "
350 "align == 0\n"); 349 "align == 0\n");
@@ -366,16 +365,16 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
366 * this was used until Dec2005 - 64bits are needed to represent 365 * this was used until Dec2005 - 64bits are needed to represent
367 * 2038+. I.e. we can safely do this truncating cast. 366 * 2038+. I.e. we can safely do this truncating cast.
368 * 367 *
369 * Additionally, we must use ntohl() instead of ntohll(), since 368 * Additionally, we must use be32toh() instead of be64toh(), since
370 * the program used to use the former (tested - I got mtime 369 * the program used to use the former (tested - I got mtime
371 * mismatch "0 vs whatever"). 370 * mismatch "0 vs whatever").
372 * 371 *
373 * Ever heard about bug-to-bug-compatibility ? ;-) */ 372 * Ever heard about bug-to-bug-compatibility ? ;-) */
374 *mtime_out = (time32_t) ntohl(header->v3_b.mtime); 373 *mtime_out = (time32_t) be32toh(header->v3_b.mtime);
375 374
376 *size_out = ntohll(header->v3_b.size); 375 *size_out = be64toh(header->v3_b.size);
377 *sectorsize_out = ntohl(header->v3_b.sectorsize); 376 *sectorsize_out = be32toh(header->v3_b.sectorsize);
378 *align_out = ntohl(header->v3_b.alignment); 377 *align_out = be32toh(header->v3_b.alignment);
379 if (*align_out == 0) { 378 if (*align_out == 0) {
380 cow_printf("read_cow_header - invalid COW header, " 379 cow_printf("read_cow_header - invalid COW header, "
381 "align == 0\n"); 380 "align == 0\n");
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index e672bd6d43e3..43b39d61b538 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -22,6 +22,7 @@
22#include <linux/workqueue.h> 22#include <linux/workqueue.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <asm/uaccess.h> 24#include <asm/uaccess.h>
25#include <asm/switch_to.h>
25 26
26#include "init.h" 27#include "init.h"
27#include "irq_kern.h" 28#include "irq_kern.h"
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 8419f5cf2ac7..fff24352255d 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -1,3 +1,4 @@
1generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h 1generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
2generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h 2generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
3generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h 3generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
4generic-y += switch_to.h
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 492bc4c1b62b..65a1c3d690ea 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -3,9 +3,10 @@
3# Licensed under the GPL 3# Licensed under the GPL
4# 4#
5 5
6CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \ 6CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \
7 -DELF_ARCH=$(LDS_ELF_ARCH) \ 7 -DELF_ARCH=$(LDS_ELF_ARCH) \
8 -DELF_FORMAT=$(LDS_ELF_FORMAT) 8 -DELF_FORMAT=$(LDS_ELF_FORMAT) \
9 $(LDS_EXTRA)
9extra-y := vmlinux.lds 10extra-y := vmlinux.lds
10clean-files := 11clean-files :=
11 12
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index f386d04a84a5..2b73dedb44ca 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -88,11 +88,8 @@ static inline void set_current(struct task_struct *task)
88 88
89extern void arch_switch_to(struct task_struct *to); 89extern void arch_switch_to(struct task_struct *to);
90 90
91void *_switch_to(void *prev, void *next, void *last) 91void *__switch_to(struct task_struct *from, struct task_struct *to)
92{ 92{
93 struct task_struct *from = prev;
94 struct task_struct *to = next;
95
96 to->thread.prev_sched = from; 93 to->thread.prev_sched = from;
97 set_current(to); 94 set_current(to);
98 95
@@ -111,7 +108,6 @@ void *_switch_to(void *prev, void *next, void *last)
111 } while (current->thread.saved_task); 108 } while (current->thread.saved_task);
112 109
113 return current->thread.prev_sched; 110 return current->thread.prev_sched;
114
115} 111}
116 112
117void interrupt_end(void) 113void interrupt_end(void)
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 4947b319f53a..0a49ef0c2bf4 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -103,7 +103,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
103 103
104void uml_setup_stubs(struct mm_struct *mm) 104void uml_setup_stubs(struct mm_struct *mm)
105{ 105{
106 struct page **pages;
107 int err, ret; 106 int err, ret;
108 107
109 if (!skas_needs_stub) 108 if (!skas_needs_stub)
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
index 4be406abeefd..36b62bc52638 100644
--- a/arch/x86/Makefile.um
+++ b/arch/x86/Makefile.um
@@ -14,6 +14,9 @@ LINK-y += $(call cc-option,-m32)
14 14
15export LDFLAGS 15export LDFLAGS
16 16
17LDS_EXTRA := -Ui386
18export LDS_EXTRA
19
17# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. 20# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y.
18include $(srctree)/arch/x86/Makefile_32.cpu 21include $(srctree)/arch/x86/Makefile_32.cpu
19 22
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 8be5f54d9360..e0544597cfe7 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -557,6 +557,8 @@ struct __large_struct { unsigned long buf[100]; };
557 557
558extern unsigned long 558extern unsigned long
559copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 559copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
560extern __must_check long
561strncpy_from_user(char *dst, const char __user *src, long count);
560 562
561/* 563/*
562 * movsl can be slow when source and dest are not both 8-byte aligned 564 * movsl can be slow when source and dest are not both 8-byte aligned
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 566e803cc602..8084bc73b18c 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -213,11 +213,6 @@ static inline unsigned long __must_check copy_from_user(void *to,
213 return n; 213 return n;
214} 214}
215 215
216long __must_check strncpy_from_user(char *dst, const char __user *src,
217 long count);
218long __must_check __strncpy_from_user(char *dst,
219 const char __user *src, long count);
220
221/** 216/**
222 * strlen_user: - Get the size of a string in user space. 217 * strlen_user: - Get the size of a string in user space.
223 * @str: The string to measure. 218 * @str: The string to measure.
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 1c66d30971ad..fcd4b6f3ef02 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -208,10 +208,6 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
208 } 208 }
209} 209}
210 210
211__must_check long
212strncpy_from_user(char *dst, const char __user *src, long count);
213__must_check long
214__strncpy_from_user(char *dst, const char __user *src, long count);
215__must_check long strnlen_user(const char __user *str, long n); 211__must_check long strnlen_user(const char __user *str, long n);
216__must_check long __strnlen_user(const char __user *str, long n); 212__must_check long __strnlen_user(const char __user *str, long n);
217__must_check long strlen_user(const char __user *str); 213__must_check long strlen_user(const char __user *str);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 694d801bf606..b8ba6e4a27e4 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -38,6 +38,7 @@
38#include <asm/traps.h> 38#include <asm/traps.h>
39#include <asm/desc.h> 39#include <asm/desc.h>
40#include <asm/tlbflush.h> 40#include <asm/tlbflush.h>
41#include <asm/idle.h>
41 42
42static int kvmapf = 1; 43static int kvmapf = 1;
43 44
@@ -253,7 +254,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
253 kvm_async_pf_task_wait((u32)read_cr2()); 254 kvm_async_pf_task_wait((u32)read_cr2());
254 break; 255 break;
255 case KVM_PV_REASON_PAGE_READY: 256 case KVM_PV_REASON_PAGE_READY:
257 rcu_irq_enter();
258 exit_idle();
256 kvm_async_pf_task_wake((u32)read_cr2()); 259 kvm_async_pf_task_wake((u32)read_cr2());
260 rcu_irq_exit();
257 break; 261 break;
258 } 262 }
259} 263}
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index a73f0c104813..173df38dbda5 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -369,7 +369,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
369 case MSR_CORE_PERF_FIXED_CTR_CTRL: 369 case MSR_CORE_PERF_FIXED_CTR_CTRL:
370 if (pmu->fixed_ctr_ctrl == data) 370 if (pmu->fixed_ctr_ctrl == data)
371 return 0; 371 return 0;
372 if (!(data & 0xfffffffffffff444)) { 372 if (!(data & 0xfffffffffffff444ull)) {
373 reprogram_fixed_counters(pmu, data); 373 reprogram_fixed_counters(pmu, data);
374 return 0; 374 return 0;
375 } 375 }
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 280751c84724..ad85adfef843 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3906,7 +3906,9 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
3906 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 3906 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
3907 3907
3908 vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; 3908 vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
3909 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3909 vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */ 3910 vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
3911 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
3910 vmx_set_cr4(&vmx->vcpu, 0); 3912 vmx_set_cr4(&vmx->vcpu, 0);
3911 vmx_set_efer(&vmx->vcpu, 0); 3913 vmx_set_efer(&vmx->vcpu, 0);
3912 vmx_fpu_activate(&vmx->vcpu); 3914 vmx_fpu_activate(&vmx->vcpu);
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index 97be9cb54483..57252c928f56 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -7,6 +7,8 @@
7#include <linux/highmem.h> 7#include <linux/highmem.h>
8#include <linux/module.h> 8#include <linux/module.h>
9 9
10#include <asm/word-at-a-time.h>
11
10/* 12/*
11 * best effort, GUP based copy_from_user() that is NMI-safe 13 * best effort, GUP based copy_from_user() that is NMI-safe
12 */ 14 */
@@ -41,3 +43,104 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
41 return len; 43 return len;
42} 44}
43EXPORT_SYMBOL_GPL(copy_from_user_nmi); 45EXPORT_SYMBOL_GPL(copy_from_user_nmi);
46
47static inline unsigned long count_bytes(unsigned long mask)
48{
49 mask = (mask - 1) & ~mask;
50 mask >>= 7;
51 return count_masked_bytes(mask);
52}
53
54/*
55 * Do a strncpy, return length of string without final '\0'.
56 * 'count' is the user-supplied count (return 'count' if we
57 * hit it), 'max' is the address space maximum (and we return
58 * -EFAULT if we hit it).
59 */
60static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, long max)
61{
62 long res = 0;
63
64 /*
65 * Truncate 'max' to the user-specified limit, so that
66 * we only have one limit we need to check in the loop
67 */
68 if (max > count)
69 max = count;
70
71 while (max >= sizeof(unsigned long)) {
72 unsigned long c;
73
74 /* Fall back to byte-at-a-time if we get a page fault */
75 if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
76 break;
77 /* This can write a few bytes past the NUL character, but that's ok */
78 *(unsigned long *)(dst+res) = c;
79 c = has_zero(c);
80 if (c)
81 return res + count_bytes(c);
82 res += sizeof(unsigned long);
83 max -= sizeof(unsigned long);
84 }
85
86 while (max) {
87 char c;
88
89 if (unlikely(__get_user(c,src+res)))
90 return -EFAULT;
91 dst[res] = c;
92 if (!c)
93 return res;
94 res++;
95 max--;
96 }
97
98 /*
99 * Uhhuh. We hit 'max'. But was that the user-specified maximum
100 * too? If so, that's ok - we got as much as the user asked for.
101 */
102 if (res >= count)
103 return count;
104
105 /*
106 * Nope: we hit the address space limit, and we still had more
107 * characters the caller would have wanted. That's an EFAULT.
108 */
109 return -EFAULT;
110}
111
112/**
113 * strncpy_from_user: - Copy a NUL terminated string from userspace.
114 * @dst: Destination address, in kernel space. This buffer must be at
115 * least @count bytes long.
116 * @src: Source address, in user space.
117 * @count: Maximum number of bytes to copy, including the trailing NUL.
118 *
119 * Copies a NUL-terminated string from userspace to kernel space.
120 *
121 * On success, returns the length of the string (not including the trailing
122 * NUL).
123 *
124 * If access to userspace fails, returns -EFAULT (some data may have been
125 * copied).
126 *
127 * If @count is smaller than the length of the string, copies @count bytes
128 * and returns @count.
129 */
130long
131strncpy_from_user(char *dst, const char __user *src, long count)
132{
133 unsigned long max_addr, src_addr;
134
135 if (unlikely(count <= 0))
136 return 0;
137
138 max_addr = current_thread_info()->addr_limit.seg;
139 src_addr = (unsigned long)src;
140 if (likely(src_addr < max_addr)) {
141 unsigned long max = max_addr - src_addr;
142 return do_strncpy_from_user(dst, src, count, max);
143 }
144 return -EFAULT;
145}
146EXPORT_SYMBOL(strncpy_from_user);
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index d9b094ca7aaa..ef2a6a5d78e3 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -33,93 +33,6 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon
33 __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n)) 33 __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
34 34
35/* 35/*
36 * Copy a null terminated string from userspace.
37 */
38
39#define __do_strncpy_from_user(dst, src, count, res) \
40do { \
41 int __d0, __d1, __d2; \
42 might_fault(); \
43 __asm__ __volatile__( \
44 " testl %1,%1\n" \
45 " jz 2f\n" \
46 "0: lodsb\n" \
47 " stosb\n" \
48 " testb %%al,%%al\n" \
49 " jz 1f\n" \
50 " decl %1\n" \
51 " jnz 0b\n" \
52 "1: subl %1,%0\n" \
53 "2:\n" \
54 ".section .fixup,\"ax\"\n" \
55 "3: movl %5,%0\n" \
56 " jmp 2b\n" \
57 ".previous\n" \
58 _ASM_EXTABLE(0b,3b) \
59 : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \
60 "=&D" (__d2) \
61 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
62 : "memory"); \
63} while (0)
64
65/**
66 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
67 * @dst: Destination address, in kernel space. This buffer must be at
68 * least @count bytes long.
69 * @src: Source address, in user space.
70 * @count: Maximum number of bytes to copy, including the trailing NUL.
71 *
72 * Copies a NUL-terminated string from userspace to kernel space.
73 * Caller must check the specified block with access_ok() before calling
74 * this function.
75 *
76 * On success, returns the length of the string (not including the trailing
77 * NUL).
78 *
79 * If access to userspace fails, returns -EFAULT (some data may have been
80 * copied).
81 *
82 * If @count is smaller than the length of the string, copies @count bytes
83 * and returns @count.
84 */
85long
86__strncpy_from_user(char *dst, const char __user *src, long count)
87{
88 long res;
89 __do_strncpy_from_user(dst, src, count, res);
90 return res;
91}
92EXPORT_SYMBOL(__strncpy_from_user);
93
94/**
95 * strncpy_from_user: - Copy a NUL terminated string from userspace.
96 * @dst: Destination address, in kernel space. This buffer must be at
97 * least @count bytes long.
98 * @src: Source address, in user space.
99 * @count: Maximum number of bytes to copy, including the trailing NUL.
100 *
101 * Copies a NUL-terminated string from userspace to kernel space.
102 *
103 * On success, returns the length of the string (not including the trailing
104 * NUL).
105 *
106 * If access to userspace fails, returns -EFAULT (some data may have been
107 * copied).
108 *
109 * If @count is smaller than the length of the string, copies @count bytes
110 * and returns @count.
111 */
112long
113strncpy_from_user(char *dst, const char __user *src, long count)
114{
115 long res = -EFAULT;
116 if (access_ok(VERIFY_READ, src, 1))
117 __do_strncpy_from_user(dst, src, count, res);
118 return res;
119}
120EXPORT_SYMBOL(strncpy_from_user);
121
122/*
123 * Zero Userspace 36 * Zero Userspace
124 */ 37 */
125 38
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index b7c2849ffb66..0d0326f388c0 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -9,55 +9,6 @@
9#include <asm/uaccess.h> 9#include <asm/uaccess.h>
10 10
11/* 11/*
12 * Copy a null terminated string from userspace.
13 */
14
15#define __do_strncpy_from_user(dst,src,count,res) \
16do { \
17 long __d0, __d1, __d2; \
18 might_fault(); \
19 __asm__ __volatile__( \
20 " testq %1,%1\n" \
21 " jz 2f\n" \
22 "0: lodsb\n" \
23 " stosb\n" \
24 " testb %%al,%%al\n" \
25 " jz 1f\n" \
26 " decq %1\n" \
27 " jnz 0b\n" \
28 "1: subq %1,%0\n" \
29 "2:\n" \
30 ".section .fixup,\"ax\"\n" \
31 "3: movq %5,%0\n" \
32 " jmp 2b\n" \
33 ".previous\n" \
34 _ASM_EXTABLE(0b,3b) \
35 : "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \
36 "=&D" (__d2) \
37 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
38 : "memory"); \
39} while (0)
40
41long
42__strncpy_from_user(char *dst, const char __user *src, long count)
43{
44 long res;
45 __do_strncpy_from_user(dst, src, count, res);
46 return res;
47}
48EXPORT_SYMBOL(__strncpy_from_user);
49
50long
51strncpy_from_user(char *dst, const char __user *src, long count)
52{
53 long res = -EFAULT;
54 if (access_ok(VERIFY_READ, src, 1))
55 return __strncpy_from_user(dst, src, count);
56 return res;
57}
58EXPORT_SYMBOL(strncpy_from_user);
59
60/*
61 * Zero Userspace 12 * Zero Userspace
62 */ 13 */
63 14
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
new file mode 100644
index 000000000000..7d01b8c56c00
--- /dev/null
+++ b/arch/x86/um/asm/barrier.h
@@ -0,0 +1,75 @@
1#ifndef _ASM_UM_BARRIER_H_
2#define _ASM_UM_BARRIER_H_
3
4#include <asm/asm.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
8#include <asm/nops.h>
9
10#include <linux/kernel.h>
11#include <linux/irqflags.h>
12
13/*
14 * Force strict CPU ordering.
15 * And yes, this is required on UP too when we're talking
16 * to devices.
17 */
18#ifdef CONFIG_X86_32
19
20#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
21#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
22#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
23
24#else /* CONFIG_X86_32 */
25
26#define mb() asm volatile("mfence" : : : "memory")
27#define rmb() asm volatile("lfence" : : : "memory")
28#define wmb() asm volatile("sfence" : : : "memory")
29
30#endif /* CONFIG_X86_32 */
31
32#define read_barrier_depends() do { } while (0)
33
34#ifdef CONFIG_SMP
35
36#define smp_mb() mb()
37#ifdef CONFIG_X86_PPRO_FENCE
38#define smp_rmb() rmb()
39#else /* CONFIG_X86_PPRO_FENCE */
40#define smp_rmb() barrier()
41#endif /* CONFIG_X86_PPRO_FENCE */
42
43#ifdef CONFIG_X86_OOSTORE
44#define smp_wmb() wmb()
45#else /* CONFIG_X86_OOSTORE */
46#define smp_wmb() barrier()
47#endif /* CONFIG_X86_OOSTORE */
48
49#define smp_read_barrier_depends() read_barrier_depends()
50#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
51
52#else /* CONFIG_SMP */
53
54#define smp_mb() barrier()
55#define smp_rmb() barrier()
56#define smp_wmb() barrier()
57#define smp_read_barrier_depends() do { } while (0)
58#define set_mb(var, value) do { var = value; barrier(); } while (0)
59
60#endif /* CONFIG_SMP */
61
62/*
63 * Stop RDTSC speculation. This is needed when you need to use RDTSC
64 * (or get_cycles or vread that possibly accesses the TSC) in a defined
65 * code region.
66 *
67 * (Could use an alternative three way for this if there was one.)
68 */
69static inline void rdtsc_barrier(void)
70{
71 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
72 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
73}
74
75#endif
diff --git a/arch/x86/um/asm/system.h b/arch/x86/um/asm/system.h
deleted file mode 100644
index a459fd9b7598..000000000000
--- a/arch/x86/um/asm/system.h
+++ /dev/null
@@ -1,135 +0,0 @@
1#ifndef _ASM_X86_SYSTEM_H_
2#define _ASM_X86_SYSTEM_H_
3
4#include <asm/asm.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
8#include <asm/nops.h>
9
10#include <linux/kernel.h>
11#include <linux/irqflags.h>
12
13/* entries in ARCH_DLINFO: */
14#ifdef CONFIG_IA32_EMULATION
15# define AT_VECTOR_SIZE_ARCH 2
16#else
17# define AT_VECTOR_SIZE_ARCH 1
18#endif
19
20extern unsigned long arch_align_stack(unsigned long sp);
21
22void default_idle(void);
23
24/*
25 * Force strict CPU ordering.
26 * And yes, this is required on UP too when we're talking
27 * to devices.
28 */
29#ifdef CONFIG_X86_32
30/*
31 * Some non-Intel clones support out of order store. wmb() ceases to be a
32 * nop for these.
33 */
34#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
35#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
36#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
37#else
38#define mb() asm volatile("mfence":::"memory")
39#define rmb() asm volatile("lfence":::"memory")
40#define wmb() asm volatile("sfence" ::: "memory")
41#endif
42
43/**
44 * read_barrier_depends - Flush all pending reads that subsequents reads
45 * depend on.
46 *
47 * No data-dependent reads from memory-like regions are ever reordered
48 * over this barrier. All reads preceding this primitive are guaranteed
49 * to access memory (but not necessarily other CPUs' caches) before any
50 * reads following this primitive that depend on the data return by
51 * any of the preceding reads. This primitive is much lighter weight than
52 * rmb() on most CPUs, and is never heavier weight than is
53 * rmb().
54 *
55 * These ordering constraints are respected by both the local CPU
56 * and the compiler.
57 *
58 * Ordering is not guaranteed by anything other than these primitives,
59 * not even by data dependencies. See the documentation for
60 * memory_barrier() for examples and URLs to more information.
61 *
62 * For example, the following code would force ordering (the initial
63 * value of "a" is zero, "b" is one, and "p" is "&a"):
64 *
65 * <programlisting>
66 * CPU 0 CPU 1
67 *
68 * b = 2;
69 * memory_barrier();
70 * p = &b; q = p;
71 * read_barrier_depends();
72 * d = *q;
73 * </programlisting>
74 *
75 * because the read of "*q" depends on the read of "p" and these
76 * two reads are separated by a read_barrier_depends(). However,
77 * the following code, with the same initial values for "a" and "b":
78 *
79 * <programlisting>
80 * CPU 0 CPU 1
81 *
82 * a = 2;
83 * memory_barrier();
84 * b = 3; y = b;
85 * read_barrier_depends();
86 * x = a;
87 * </programlisting>
88 *
89 * does not enforce ordering, since there is no data dependency between
90 * the read of "a" and the read of "b". Therefore, on some CPUs, such
91 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
92 * in cases like this where there are no data dependencies.
93 **/
94
95#define read_barrier_depends() do { } while (0)
96
97#ifdef CONFIG_SMP
98#define smp_mb() mb()
99#ifdef CONFIG_X86_PPRO_FENCE
100# define smp_rmb() rmb()
101#else
102# define smp_rmb() barrier()
103#endif
104#ifdef CONFIG_X86_OOSTORE
105# define smp_wmb() wmb()
106#else
107# define smp_wmb() barrier()
108#endif
109#define smp_read_barrier_depends() read_barrier_depends()
110#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
111#else
112#define smp_mb() barrier()
113#define smp_rmb() barrier()
114#define smp_wmb() barrier()
115#define smp_read_barrier_depends() do { } while (0)
116#define set_mb(var, value) do { var = value; barrier(); } while (0)
117#endif
118
119/*
120 * Stop RDTSC speculation. This is needed when you need to use RDTSC
121 * (or get_cycles or vread that possibly accesses the TSC) in a defined
122 * code region.
123 *
124 * (Could use an alternative three way for this if there was one.)
125 */
126static inline void rdtsc_barrier(void)
127{
128 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
129 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
130}
131
132extern void *_switch_to(void *prev, void *next, void *last);
133#define switch_to(prev, next, last) prev = _switch_to(prev, next, last)
134
135#endif
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 988828b479ed..b8e279479a6b 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1859,6 +1859,7 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
1859#endif /* CONFIG_X86_64 */ 1859#endif /* CONFIG_X86_64 */
1860 1860
1861static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; 1861static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
1862static unsigned char fake_ioapic_mapping[PAGE_SIZE] __page_aligned_bss;
1862 1863
1863static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) 1864static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1864{ 1865{
@@ -1899,7 +1900,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1899 * We just don't map the IO APIC - all access is via 1900 * We just don't map the IO APIC - all access is via
1900 * hypercalls. Keep the address in the pte for reference. 1901 * hypercalls. Keep the address in the pte for reference.
1901 */ 1902 */
1902 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); 1903 pte = pfn_pte(PFN_DOWN(__pa(fake_ioapic_mapping)), PAGE_KERNEL);
1903 break; 1904 break;
1904#endif 1905#endif
1905 1906
@@ -2064,6 +2065,7 @@ void __init xen_init_mmu_ops(void)
2064 pv_mmu_ops = xen_mmu_ops; 2065 pv_mmu_ops = xen_mmu_ops;
2065 2066
2066 memset(dummy_mapping, 0xff, PAGE_SIZE); 2067 memset(dummy_mapping, 0xff, PAGE_SIZE);
2068 memset(fake_ioapic_mapping, 0xfd, PAGE_SIZE);
2067} 2069}
2068 2070
2069/* Protected by xen_reservation_lock. */ 2071/* Protected by xen_reservation_lock. */
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 02900e8ce26c..5fac6919b957 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -59,7 +59,7 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
59 59
60static void __cpuinit cpu_bringup(void) 60static void __cpuinit cpu_bringup(void)
61{ 61{
62 int cpu = smp_processor_id(); 62 int cpu;
63 63
64 cpu_init(); 64 cpu_init();
65 touch_softlockup_watchdog(); 65 touch_softlockup_watchdog();