aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-24 19:06:13 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-24 19:18:31 -0500
commit77be36de8b07027a70fbc8f02703ccd76cd16d09 (patch)
treee2310718e39cb0dcfe93942d4c990df9154a7ecd /arch
parent89f883372fa60f604d136924baf3e89ff1870e9e (diff)
parentc81611c4e96f595a80d8be9367c385d2c116428b (diff)
Merge tag 'stable/for-linus-3.9-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen
Pull Xen update from Konrad Rzeszutek Wilk: "This has two new ACPI drivers for Xen - a physical CPU offline/online and a memory hotplug. The way this works is that ACPI kicks the drivers and they make the appropiate hypercall to the hypervisor to tell it that there is a new CPU or memory. There also some changes to the Xen ARM ABIs and couple of fixes. One particularly nasty bug in the Xen PV spinlock code was fixed by Stefan Bader - and has been there since the 2.6.32! Features: - Xen ACPI memory and CPU hotplug drivers - allowing Xen hypervisor to be aware of new CPU and new DIMMs - Cleanups Bug-fixes: - Fixes a long-standing bug in the PV spinlock wherein we did not kick VCPUs that were in a tight loop. - Fixes in the error paths for the event channel machinery" Fix up a few semantic conflicts with the ACPI interface changes in drivers/xen/xen-acpi-{cpu,mem}hotplug.c. * tag 'stable/for-linus-3.9-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen: xen: event channel arrays are xen_ulong_t and not unsigned long xen: Send spinlock IPI to all waiters xen: introduce xen_remap, use it instead of ioremap xen: close evtchn port if binding to irq fails xen-evtchn: correct comment and error output xen/tmem: Add missing %s in the printk statement. xen/acpi: move xen_acpi_get_pxm under CONFIG_XEN_DOM0 xen/acpi: ACPI cpu hotplug xen/acpi: Move xen_acpi_get_pxm to Xen's acpi.h xen/stub: driver for CPU hotplug xen/acpi: ACPI memory hotplug xen/stub: driver for memory hotplug xen: implement updated XENMEM_add_to_physmap_range ABI xen/smp: Move the common CPU init code a bit to prep for PVH patch.
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/xen/events.h22
-rw-r--r--arch/arm/include/asm/xen/page.h4
-rw-r--r--arch/arm/xen/enlighten.c8
-rw-r--r--arch/x86/include/asm/xen/events.h3
-rw-r--r--arch/x86/include/asm/xen/page.h2
-rw-r--r--arch/x86/xen/smp.c42
-rw-r--r--arch/x86/xen/spinlock.c1
7 files changed, 59 insertions, 23 deletions
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
index 94b4e9020b02..5c27696de14f 100644
--- a/arch/arm/include/asm/xen/events.h
+++ b/arch/arm/include/asm/xen/events.h
@@ -15,4 +15,26 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
15 return raw_irqs_disabled_flags(regs->ARM_cpsr); 15 return raw_irqs_disabled_flags(regs->ARM_cpsr);
16} 16}
17 17
18/*
19 * We cannot use xchg because it does not support 8-byte
20 * values. However it is safe to use {ldr,dtd}exd directly because all
21 * platforms which Xen can run on support those instructions.
22 */
23static inline xen_ulong_t xchg_xen_ulong(xen_ulong_t *ptr, xen_ulong_t val)
24{
25 xen_ulong_t oldval;
26 unsigned int tmp;
27
28 wmb();
29 asm volatile("@ xchg_xen_ulong\n"
30 "1: ldrexd %0, %H0, [%3]\n"
31 " strexd %1, %2, %H2, [%3]\n"
32 " teq %1, #0\n"
33 " bne 1b"
34 : "=&r" (oldval), "=&r" (tmp)
35 : "r" (val), "r" (ptr)
36 : "memory", "cc");
37 return oldval;
38}
39
18#endif /* _ASM_ARM_XEN_EVENTS_H */ 40#endif /* _ASM_ARM_XEN_EVENTS_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index c6b9096cef95..30cdacb675af 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_ARM_XEN_PAGE_H 1#ifndef _ASM_ARM_XEN_PAGE_H
2#define _ASM_ARM_XEN_PAGE_H 2#define _ASM_ARM_XEN_PAGE_H
3 3
4#include <asm/mach/map.h>
4#include <asm/page.h> 5#include <asm/page.h>
5#include <asm/pgtable.h> 6#include <asm/pgtable.h>
6 7
@@ -86,4 +87,7 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
86{ 87{
87 return __set_phys_to_machine(pfn, mfn); 88 return __set_phys_to_machine(pfn, mfn);
88} 89}
90
91#define xen_remap(cookie, size) __arm_ioremap((cookie), (size), MT_MEMORY);
92
89#endif /* _ASM_ARM_XEN_PAGE_H */ 93#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 7a32976fa2a3..8dc0605a9ce9 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -59,14 +59,16 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
59 }; 59 };
60 xen_ulong_t idx = fgmfn; 60 xen_ulong_t idx = fgmfn;
61 xen_pfn_t gpfn = lpfn; 61 xen_pfn_t gpfn = lpfn;
62 int err = 0;
62 63
63 set_xen_guest_handle(xatp.idxs, &idx); 64 set_xen_guest_handle(xatp.idxs, &idx);
64 set_xen_guest_handle(xatp.gpfns, &gpfn); 65 set_xen_guest_handle(xatp.gpfns, &gpfn);
66 set_xen_guest_handle(xatp.errs, &err);
65 67
66 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); 68 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
67 if (rc) { 69 if (rc || err) {
68 pr_warn("Failed to map pfn to mfn rc:%d pfn:%lx mfn:%lx\n", 70 pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n",
69 rc, lpfn, fgmfn); 71 rc, err, lpfn, fgmfn);
70 return 1; 72 return 1;
71 } 73 }
72 return 0; 74 return 0;
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index cc146d51449e..ca842f2769ef 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -16,4 +16,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
16 return raw_irqs_disabled_flags(regs->flags); 16 return raw_irqs_disabled_flags(regs->flags);
17} 17}
18 18
19/* No need for a barrier -- XCHG is a barrier on x86. */
20#define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
21
19#endif /* _ASM_X86_XEN_EVENTS_H */ 22#endif /* _ASM_X86_XEN_EVENTS_H */
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 472b9b783019..6aef9fbc09b7 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -212,4 +212,6 @@ unsigned long arbitrary_virt_to_mfn(void *vaddr);
212void make_lowmem_page_readonly(void *vaddr); 212void make_lowmem_page_readonly(void *vaddr);
213void make_lowmem_page_readwrite(void *vaddr); 213void make_lowmem_page_readwrite(void *vaddr);
214 214
215#define xen_remap(cookie, size) ioremap((cookie), (size));
216
215#endif /* _ASM_X86_XEN_PAGE_H */ 217#endif /* _ASM_X86_XEN_PAGE_H */
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 34bc4cee8887..09ea61d2e02f 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -300,8 +300,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
300 gdt = get_cpu_gdt_table(cpu); 300 gdt = get_cpu_gdt_table(cpu);
301 301
302 ctxt->flags = VGCF_IN_KERNEL; 302 ctxt->flags = VGCF_IN_KERNEL;
303 ctxt->user_regs.ds = __USER_DS;
304 ctxt->user_regs.es = __USER_DS;
305 ctxt->user_regs.ss = __KERNEL_DS; 303 ctxt->user_regs.ss = __KERNEL_DS;
306#ifdef CONFIG_X86_32 304#ifdef CONFIG_X86_32
307 ctxt->user_regs.fs = __KERNEL_PERCPU; 305 ctxt->user_regs.fs = __KERNEL_PERCPU;
@@ -310,35 +308,41 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
310 ctxt->gs_base_kernel = per_cpu_offset(cpu); 308 ctxt->gs_base_kernel = per_cpu_offset(cpu);
311#endif 309#endif
312 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; 310 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
313 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
314 311
315 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); 312 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
316 313
317 xen_copy_trap_info(ctxt->trap_ctxt); 314 {
315 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
316 ctxt->user_regs.ds = __USER_DS;
317 ctxt->user_regs.es = __USER_DS;
318 318
319 ctxt->ldt_ents = 0; 319 xen_copy_trap_info(ctxt->trap_ctxt);
320 320
321 BUG_ON((unsigned long)gdt & ~PAGE_MASK); 321 ctxt->ldt_ents = 0;
322 322
323 gdt_mfn = arbitrary_virt_to_mfn(gdt); 323 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
324 make_lowmem_page_readonly(gdt);
325 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
326 324
327 ctxt->gdt_frames[0] = gdt_mfn; 325 gdt_mfn = arbitrary_virt_to_mfn(gdt);
328 ctxt->gdt_ents = GDT_ENTRIES; 326 make_lowmem_page_readonly(gdt);
327 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
329 328
330 ctxt->user_regs.cs = __KERNEL_CS; 329 ctxt->gdt_frames[0] = gdt_mfn;
331 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); 330 ctxt->gdt_ents = GDT_ENTRIES;
332 331
333 ctxt->kernel_ss = __KERNEL_DS; 332 ctxt->kernel_ss = __KERNEL_DS;
334 ctxt->kernel_sp = idle->thread.sp0; 333 ctxt->kernel_sp = idle->thread.sp0;
335 334
336#ifdef CONFIG_X86_32 335#ifdef CONFIG_X86_32
337 ctxt->event_callback_cs = __KERNEL_CS; 336 ctxt->event_callback_cs = __KERNEL_CS;
338 ctxt->failsafe_callback_cs = __KERNEL_CS; 337 ctxt->failsafe_callback_cs = __KERNEL_CS;
339#endif 338#endif
340 ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback; 339 ctxt->event_callback_eip =
341 ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback; 340 (unsigned long)xen_hypervisor_callback;
341 ctxt->failsafe_callback_eip =
342 (unsigned long)xen_failsafe_callback;
343 }
344 ctxt->user_regs.cs = __KERNEL_CS;
345 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
342 346
343 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); 347 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
344 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); 348 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 83e866d714ce..f7a080ef0354 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -328,7 +328,6 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
328 if (per_cpu(lock_spinners, cpu) == xl) { 328 if (per_cpu(lock_spinners, cpu) == xl) {
329 ADD_STATS(released_slow_kicked, 1); 329 ADD_STATS(released_slow_kicked, 1);
330 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); 330 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
331 break;
332 } 331 }
333 } 332 }
334} 333}