aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-10 23:07:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-10 23:07:04 -0400
commita60d4b9874dc62cf0fd0d42b247baaaef75d30f8 (patch)
treefad38260717e11130b506055f2561d7cab20bc8d /arch/x86
parentfa1586a7e43760f0e25e72b2e3f97ee18b2be967 (diff)
parentc3b7cb1fd8c1513be99fb3cfb7f39c5116d80dac (diff)
Merge tag 'stable/for-linus-3.12-rc0-tag-two' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull Xen bug-fixes from Konrad Rzeszutek Wilk: "This pull I usually do after rc1 is out but because we have a nice amount of fixes, some bootup related fixes for ARM, and it is early in the cycle we figured to do it now to help with tracking of potential regressions. The simple ones are the ARM ones - one of the patches fell through the cracks, other fixes a bootup issue (unconditionally using Xen functions). Then a fix for a regression causing preempt count being off (patch causing this went in v3.12). Lastly are the fixes to make Xen PVHVM guests use PV ticketlocks (Xen PV already does). The enablement of that was supposed to be part of the x86 spinlock merge in commit 816434ec4a67 ("The biggest change here are paravirtualized ticket spinlocks (PV spinlocks), which bring a nice speedup on various benchmarks...") but unfortunatly it would cause hang when booting Xen PVHVM guests. Yours truly got all of the bugs fixed last week and they (six of them) are included in this pull. Bug-fixes: - Boot on ARM without using Xen unconditionally - On Xen ARM don't run cpuidle/cpufreq - Fix regression in balloon driver, preempt count warnings - Fixes to make PVHVM able to use pv ticketlock. - Revert Xen PVHVM disabling pv ticketlock (aka, re-enable pv ticketlocks)" * tag 'stable/for-linus-3.12-rc0-tag-two' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/spinlock: Don't use __initdate for xen_pv_spin Revert "xen/spinlock: Disable IRQ spinlock (PV) allocation on PVHVM" xen/spinlock: Don't setup xen spinlock IPI kicker if disabled. xen/smp: Update pv_lock_ops functions before alternative code starts under PVHVM xen/spinlock: We don't need the old structure anymore xen/spinlock: Fix locking path engaging too soon under PVHVM. xen/arm: disable cpuidle and cpufreq when linux is running as dom0 xen/p2m: Don't call get_balloon_scratch_page() twice, keep interrupts disabled for multicalls ARM: xen: only set pm function ptrs for Xen guests
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/enlighten.c1
-rw-r--r--arch/x86/xen/p2m.c10
-rw-r--r--arch/x86/xen/smp.c28
-rw-r--r--arch/x86/xen/spinlock.c45
4 files changed, 37 insertions, 47 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2fc216dfbd9c..fa6ade76ef3f 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1692,7 +1692,6 @@ static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
1692 case CPU_UP_PREPARE: 1692 case CPU_UP_PREPARE:
1693 xen_vcpu_setup(cpu); 1693 xen_vcpu_setup(cpu);
1694 if (xen_have_vector_callback) { 1694 if (xen_have_vector_callback) {
1695 xen_init_lock_cpu(cpu);
1696 if (xen_feature(XENFEAT_hvm_safe_pvclock)) 1695 if (xen_feature(XENFEAT_hvm_safe_pvclock))
1697 xen_setup_timer(cpu); 1696 xen_setup_timer(cpu);
1698 } 1697 }
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 0d4ec35895d4..8b901e8d782d 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -990,10 +990,13 @@ int m2p_remove_override(struct page *page,
990 printk(KERN_WARNING "m2p_remove_override: " 990 printk(KERN_WARNING "m2p_remove_override: "
991 "pfn %lx mfn %lx, failed to modify kernel mappings", 991 "pfn %lx mfn %lx, failed to modify kernel mappings",
992 pfn, mfn); 992 pfn, mfn);
993 put_balloon_scratch_page();
993 return -1; 994 return -1;
994 } 995 }
995 996
996 mcs = xen_mc_entry( 997 xen_mc_batch();
998
999 mcs = __xen_mc_entry(
997 sizeof(struct gnttab_unmap_and_replace)); 1000 sizeof(struct gnttab_unmap_and_replace));
998 unmap_op = mcs.args; 1001 unmap_op = mcs.args;
999 unmap_op->host_addr = kmap_op->host_addr; 1002 unmap_op->host_addr = kmap_op->host_addr;
@@ -1003,12 +1006,11 @@ int m2p_remove_override(struct page *page,
1003 MULTI_grant_table_op(mcs.mc, 1006 MULTI_grant_table_op(mcs.mc,
1004 GNTTABOP_unmap_and_replace, unmap_op, 1); 1007 GNTTABOP_unmap_and_replace, unmap_op, 1);
1005 1008
1006 xen_mc_issue(PARAVIRT_LAZY_MMU);
1007
1008 mcs = __xen_mc_entry(0); 1009 mcs = __xen_mc_entry(0);
1009 MULTI_update_va_mapping(mcs.mc, scratch_page_address, 1010 MULTI_update_va_mapping(mcs.mc, scratch_page_address,
1010 pfn_pte(page_to_pfn(get_balloon_scratch_page()), 1011 pfn_pte(page_to_pfn(scratch_page),
1011 PAGE_KERNEL_RO), 0); 1012 PAGE_KERNEL_RO), 0);
1013
1012 xen_mc_issue(PARAVIRT_LAZY_MMU); 1014 xen_mc_issue(PARAVIRT_LAZY_MMU);
1013 1015
1014 kmap_op->host_addr = 0; 1016 kmap_op->host_addr = 0;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 9235842cd76a..d1e4777b4e75 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -273,12 +273,20 @@ static void __init xen_smp_prepare_boot_cpu(void)
273 BUG_ON(smp_processor_id() != 0); 273 BUG_ON(smp_processor_id() != 0);
274 native_smp_prepare_boot_cpu(); 274 native_smp_prepare_boot_cpu();
275 275
276 /* We've switched to the "real" per-cpu gdt, so make sure the 276 if (xen_pv_domain()) {
277 old memory can be recycled */ 277 /* We've switched to the "real" per-cpu gdt, so make sure the
278 make_lowmem_page_readwrite(xen_initial_gdt); 278 old memory can be recycled */
279 make_lowmem_page_readwrite(xen_initial_gdt);
279 280
280 xen_filter_cpu_maps(); 281 xen_filter_cpu_maps();
281 xen_setup_vcpu_info_placement(); 282 xen_setup_vcpu_info_placement();
283 }
284 /*
285 * The alternative logic (which patches the unlock/lock) runs before
286 * the smp bootup up code is activated. Hence we need to set this up
287 * the core kernel is being patched. Otherwise we will have only
288 * modules patched but not core code.
289 */
282 xen_init_spinlocks(); 290 xen_init_spinlocks();
283} 291}
284 292
@@ -709,6 +717,15 @@ static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
709 WARN_ON(rc); 717 WARN_ON(rc);
710 if (!rc) 718 if (!rc)
711 rc = native_cpu_up(cpu, tidle); 719 rc = native_cpu_up(cpu, tidle);
720
721 /*
722 * We must initialize the slowpath CPU kicker _after_ the native
723 * path has executed. If we initialized it before none of the
724 * unlocker IPI kicks would reach the booting CPU as the booting
725 * CPU had not set itself 'online' in cpu_online_mask. That mask
726 * is checked when IPIs are sent (on HVM at least).
727 */
728 xen_init_lock_cpu(cpu);
712 return rc; 729 return rc;
713} 730}
714 731
@@ -728,4 +745,5 @@ void __init xen_hvm_smp_init(void)
728 smp_ops.cpu_die = xen_hvm_cpu_die; 745 smp_ops.cpu_die = xen_hvm_cpu_die;
729 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; 746 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
730 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; 747 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
748 smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
731} 749}
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 0438b9324a72..253f63fceea1 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -81,7 +81,6 @@ static inline void spin_time_accum_blocked(u64 start)
81 spinlock_stats.time_blocked += delta; 81 spinlock_stats.time_blocked += delta;
82} 82}
83#else /* !CONFIG_XEN_DEBUG_FS */ 83#else /* !CONFIG_XEN_DEBUG_FS */
84#define TIMEOUT (1 << 10)
85static inline void add_stats(enum xen_contention_stat var, u32 val) 84static inline void add_stats(enum xen_contention_stat var, u32 val)
86{ 85{
87} 86}
@@ -96,23 +95,6 @@ static inline void spin_time_accum_blocked(u64 start)
96} 95}
97#endif /* CONFIG_XEN_DEBUG_FS */ 96#endif /* CONFIG_XEN_DEBUG_FS */
98 97
99/*
100 * Size struct xen_spinlock so it's the same as arch_spinlock_t.
101 */
102#if NR_CPUS < 256
103typedef u8 xen_spinners_t;
104# define inc_spinners(xl) \
105 asm(LOCK_PREFIX " incb %0" : "+m" ((xl)->spinners) : : "memory");
106# define dec_spinners(xl) \
107 asm(LOCK_PREFIX " decb %0" : "+m" ((xl)->spinners) : : "memory");
108#else
109typedef u16 xen_spinners_t;
110# define inc_spinners(xl) \
111 asm(LOCK_PREFIX " incw %0" : "+m" ((xl)->spinners) : : "memory");
112# define dec_spinners(xl) \
113 asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory");
114#endif
115
116struct xen_lock_waiting { 98struct xen_lock_waiting {
117 struct arch_spinlock *lock; 99 struct arch_spinlock *lock;
118 __ticket_t want; 100 __ticket_t want;
@@ -123,6 +105,7 @@ static DEFINE_PER_CPU(char *, irq_name);
123static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting); 105static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
124static cpumask_t waiting_cpus; 106static cpumask_t waiting_cpus;
125 107
108static bool xen_pvspin = true;
126static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) 109static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
127{ 110{
128 int irq = __this_cpu_read(lock_kicker_irq); 111 int irq = __this_cpu_read(lock_kicker_irq);
@@ -241,16 +224,12 @@ void xen_init_lock_cpu(int cpu)
241 int irq; 224 int irq;
242 char *name; 225 char *name;
243 226
227 if (!xen_pvspin)
228 return;
229
244 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", 230 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
245 cpu, per_cpu(lock_kicker_irq, cpu)); 231 cpu, per_cpu(lock_kicker_irq, cpu));
246 232
247 /*
248 * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
249 * (xen: disable PV spinlocks on HVM)
250 */
251 if (xen_hvm_domain())
252 return;
253
254 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); 233 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
255 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, 234 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
256 cpu, 235 cpu,
@@ -270,11 +249,7 @@ void xen_init_lock_cpu(int cpu)
270 249
271void xen_uninit_lock_cpu(int cpu) 250void xen_uninit_lock_cpu(int cpu)
272{ 251{
273 /* 252 if (!xen_pvspin)
274 * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
275 * (xen: disable PV spinlocks on HVM)
276 */
277 if (xen_hvm_domain())
278 return; 253 return;
279 254
280 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); 255 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
@@ -283,16 +258,9 @@ void xen_uninit_lock_cpu(int cpu)
283 per_cpu(irq_name, cpu) = NULL; 258 per_cpu(irq_name, cpu) = NULL;
284} 259}
285 260
286static bool xen_pvspin __initdata = true;
287 261
288void __init xen_init_spinlocks(void) 262void __init xen_init_spinlocks(void)
289{ 263{
290 /*
291 * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
292 * (xen: disable PV spinlocks on HVM)
293 */
294 if (xen_hvm_domain())
295 return;
296 264
297 if (!xen_pvspin) { 265 if (!xen_pvspin) {
298 printk(KERN_DEBUG "xen: PV spinlocks disabled\n"); 266 printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
@@ -323,6 +291,9 @@ static int __init xen_spinlock_debugfs(void)
323 if (d_xen == NULL) 291 if (d_xen == NULL)
324 return -ENOMEM; 292 return -ENOMEM;
325 293
294 if (!xen_pvspin)
295 return 0;
296
326 d_spin_debug = debugfs_create_dir("spinlocks", d_xen); 297 d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
327 298
328 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats); 299 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);