diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-22 22:59:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-22 23:16:14 -0400 |
commit | d4c6fa73fe984e504d52f3d6bba291fd76fe49f7 (patch) | |
tree | 47842ddebb2a48cc1513b36fba18835678e2b94e | |
parent | aab008db8063364dc3c8ccf4981c21124866b395 (diff) | |
parent | 4bc25af79ec54b79266148f8c1b84bb1e7ff2621 (diff) |
Merge tag 'stable/for-linus-3.4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen
Pull xen updates from Konrad Rzeszutek Wilk:
"which has three neat features:
- PV multiconsole support, so that there can be hvc1, hvc2, etc; This
can be used in HVM and in PV mode.
- P-state and C-state power management driver that uploads said power
management data to the hypervisor. It also inhibits cpufreq
scaling drivers to load so that only the hypervisor can make power
management decisions - fixing a weird perf bug.
There is one thing in the Kconfig that you won't like: "default y
if (X86_ACPI_CPUFREQ = y || X86_POWERNOW_K8 = y)" (note, that it
all depends on CONFIG_XEN which depends on CONFIG_PARAVIRT which by
default is off). I've a fix to convert that boolean expression
into "default m" which I am going to post after the cpufreq git
pull - as the two patches to make this work depend on a fix in Dave
Jones's tree.
- Function Level Reset (FLR) support in the Xen PCI backend.
Fixes:
- Kconfig dependencies for Xen PV keyboard and video
- Compile warnings and constify fixes
- Change over to use percpu_xxx instead of this_cpu_xxx"
Fix up trivial conflicts in drivers/tty/hvc/hvc_xen.c due to changes to
a removed commit.
* tag 'stable/for-linus-3.4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
xen kconfig: relax INPUT_XEN_KBDDEV_FRONTEND deps
xen/acpi-processor: C and P-state driver that uploads said data to hypervisor.
xen: constify all instances of "struct attribute_group"
xen/xenbus: ignore console/0
hvc_xen: introduce HVC_XEN_FRONTEND
hvc_xen: implement multiconsole support
hvc_xen: support PV on HVM consoles
xenbus: don't free other end details too early
xen/enlighten: Expose MWAIT and MWAIT_LEAF if hypervisor OKs it.
xen/setup/pm/acpi: Remove the call to boot_option_idle_override.
xenbus: address compiler warnings
xen: use this_cpu_xxx replace percpu_xxx funcs
xen/pciback: Support pci_reset_function, aka FLR or D3 support.
pci: Introduce __pci_reset_function_locked to be used when holding device_lock.
xen: Utilize the restore_msi_irqs hook.
29 files changed, 1264 insertions, 80 deletions
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h index fbb519828aa1..09d5f7fd9db1 100644 --- a/arch/ia64/include/asm/xen/interface.h +++ b/arch/ia64/include/asm/xen/interface.h | |||
@@ -77,6 +77,7 @@ DEFINE_GUEST_HANDLE(int); | |||
77 | DEFINE_GUEST_HANDLE(long); | 77 | DEFINE_GUEST_HANDLE(long); |
78 | DEFINE_GUEST_HANDLE(void); | 78 | DEFINE_GUEST_HANDLE(void); |
79 | DEFINE_GUEST_HANDLE(uint64_t); | 79 | DEFINE_GUEST_HANDLE(uint64_t); |
80 | DEFINE_GUEST_HANDLE(uint32_t); | ||
80 | 81 | ||
81 | typedef unsigned long xen_pfn_t; | 82 | typedef unsigned long xen_pfn_t; |
82 | DEFINE_GUEST_HANDLE(xen_pfn_t); | 83 | DEFINE_GUEST_HANDLE(xen_pfn_t); |
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h index a1f2db5f1170..cbf0c9d50b92 100644 --- a/arch/x86/include/asm/xen/interface.h +++ b/arch/x86/include/asm/xen/interface.h | |||
@@ -56,6 +56,7 @@ DEFINE_GUEST_HANDLE(int); | |||
56 | DEFINE_GUEST_HANDLE(long); | 56 | DEFINE_GUEST_HANDLE(long); |
57 | DEFINE_GUEST_HANDLE(void); | 57 | DEFINE_GUEST_HANDLE(void); |
58 | DEFINE_GUEST_HANDLE(uint64_t); | 58 | DEFINE_GUEST_HANDLE(uint64_t); |
59 | DEFINE_GUEST_HANDLE(uint32_t); | ||
59 | #endif | 60 | #endif |
60 | 61 | ||
61 | #ifndef HYPERVISOR_VIRT_START | 62 | #ifndef HYPERVISOR_VIRT_START |
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index d99346ea8fdb..7415aa927913 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
@@ -324,6 +324,32 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
324 | out: | 324 | out: |
325 | return ret; | 325 | return ret; |
326 | } | 326 | } |
327 | |||
328 | static void xen_initdom_restore_msi_irqs(struct pci_dev *dev, int irq) | ||
329 | { | ||
330 | int ret = 0; | ||
331 | |||
332 | if (pci_seg_supported) { | ||
333 | struct physdev_pci_device restore_ext; | ||
334 | |||
335 | restore_ext.seg = pci_domain_nr(dev->bus); | ||
336 | restore_ext.bus = dev->bus->number; | ||
337 | restore_ext.devfn = dev->devfn; | ||
338 | ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi_ext, | ||
339 | &restore_ext); | ||
340 | if (ret == -ENOSYS) | ||
341 | pci_seg_supported = false; | ||
342 | WARN(ret && ret != -ENOSYS, "restore_msi_ext -> %d\n", ret); | ||
343 | } | ||
344 | if (!pci_seg_supported) { | ||
345 | struct physdev_restore_msi restore; | ||
346 | |||
347 | restore.bus = dev->bus->number; | ||
348 | restore.devfn = dev->devfn; | ||
349 | ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi, &restore); | ||
350 | WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret); | ||
351 | } | ||
352 | } | ||
327 | #endif | 353 | #endif |
328 | 354 | ||
329 | static void xen_teardown_msi_irqs(struct pci_dev *dev) | 355 | static void xen_teardown_msi_irqs(struct pci_dev *dev) |
@@ -446,6 +472,7 @@ int __init pci_xen_initial_domain(void) | |||
446 | #ifdef CONFIG_PCI_MSI | 472 | #ifdef CONFIG_PCI_MSI |
447 | x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; | 473 | x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; |
448 | x86_msi.teardown_msi_irq = xen_teardown_msi_irq; | 474 | x86_msi.teardown_msi_irq = xen_teardown_msi_irq; |
475 | x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs; | ||
449 | #endif | 476 | #endif |
450 | xen_setup_acpi_sci(); | 477 | xen_setup_acpi_sci(); |
451 | __acpi_register_gsi = acpi_register_gsi_xen; | 478 | __acpi_register_gsi = acpi_register_gsi_xen; |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 4172af8ceeb3..b132ade26f77 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -62,6 +62,15 @@ | |||
62 | #include <asm/reboot.h> | 62 | #include <asm/reboot.h> |
63 | #include <asm/stackprotector.h> | 63 | #include <asm/stackprotector.h> |
64 | #include <asm/hypervisor.h> | 64 | #include <asm/hypervisor.h> |
65 | #include <asm/mwait.h> | ||
66 | |||
67 | #ifdef CONFIG_ACPI | ||
68 | #include <linux/acpi.h> | ||
69 | #include <asm/acpi.h> | ||
70 | #include <acpi/pdc_intel.h> | ||
71 | #include <acpi/processor.h> | ||
72 | #include <xen/interface/platform.h> | ||
73 | #endif | ||
65 | 74 | ||
66 | #include "xen-ops.h" | 75 | #include "xen-ops.h" |
67 | #include "mmu.h" | 76 | #include "mmu.h" |
@@ -200,13 +209,17 @@ static void __init xen_banner(void) | |||
200 | static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; | 209 | static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; |
201 | static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; | 210 | static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; |
202 | 211 | ||
212 | static __read_mostly unsigned int cpuid_leaf1_ecx_set_mask; | ||
213 | static __read_mostly unsigned int cpuid_leaf5_ecx_val; | ||
214 | static __read_mostly unsigned int cpuid_leaf5_edx_val; | ||
215 | |||
203 | static void xen_cpuid(unsigned int *ax, unsigned int *bx, | 216 | static void xen_cpuid(unsigned int *ax, unsigned int *bx, |
204 | unsigned int *cx, unsigned int *dx) | 217 | unsigned int *cx, unsigned int *dx) |
205 | { | 218 | { |
206 | unsigned maskebx = ~0; | 219 | unsigned maskebx = ~0; |
207 | unsigned maskecx = ~0; | 220 | unsigned maskecx = ~0; |
208 | unsigned maskedx = ~0; | 221 | unsigned maskedx = ~0; |
209 | 222 | unsigned setecx = 0; | |
210 | /* | 223 | /* |
211 | * Mask out inconvenient features, to try and disable as many | 224 | * Mask out inconvenient features, to try and disable as many |
212 | * unsupported kernel subsystems as possible. | 225 | * unsupported kernel subsystems as possible. |
@@ -214,9 +227,18 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, | |||
214 | switch (*ax) { | 227 | switch (*ax) { |
215 | case 1: | 228 | case 1: |
216 | maskecx = cpuid_leaf1_ecx_mask; | 229 | maskecx = cpuid_leaf1_ecx_mask; |
230 | setecx = cpuid_leaf1_ecx_set_mask; | ||
217 | maskedx = cpuid_leaf1_edx_mask; | 231 | maskedx = cpuid_leaf1_edx_mask; |
218 | break; | 232 | break; |
219 | 233 | ||
234 | case CPUID_MWAIT_LEAF: | ||
235 | /* Synthesize the values.. */ | ||
236 | *ax = 0; | ||
237 | *bx = 0; | ||
238 | *cx = cpuid_leaf5_ecx_val; | ||
239 | *dx = cpuid_leaf5_edx_val; | ||
240 | return; | ||
241 | |||
220 | case 0xb: | 242 | case 0xb: |
221 | /* Suppress extended topology stuff */ | 243 | /* Suppress extended topology stuff */ |
222 | maskebx = 0; | 244 | maskebx = 0; |
@@ -232,9 +254,75 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, | |||
232 | 254 | ||
233 | *bx &= maskebx; | 255 | *bx &= maskebx; |
234 | *cx &= maskecx; | 256 | *cx &= maskecx; |
257 | *cx |= setecx; | ||
235 | *dx &= maskedx; | 258 | *dx &= maskedx; |
259 | |||
236 | } | 260 | } |
237 | 261 | ||
262 | static bool __init xen_check_mwait(void) | ||
263 | { | ||
264 | #ifdef CONFIG_ACPI | ||
265 | struct xen_platform_op op = { | ||
266 | .cmd = XENPF_set_processor_pminfo, | ||
267 | .u.set_pminfo.id = -1, | ||
268 | .u.set_pminfo.type = XEN_PM_PDC, | ||
269 | }; | ||
270 | uint32_t buf[3]; | ||
271 | unsigned int ax, bx, cx, dx; | ||
272 | unsigned int mwait_mask; | ||
273 | |||
274 | /* We need to determine whether it is OK to expose the MWAIT | ||
275 | * capability to the kernel to harvest deeper than C3 states from ACPI | ||
276 | * _CST using the processor_harvest_xen.c module. For this to work, we | ||
277 | * need to gather the MWAIT_LEAF values (which the cstate.c code | ||
278 | * checks against). The hypervisor won't expose the MWAIT flag because | ||
279 | * it would break backwards compatibility; so we will find out directly | ||
280 | * from the hardware and hypercall. | ||
281 | */ | ||
282 | if (!xen_initial_domain()) | ||
283 | return false; | ||
284 | |||
285 | ax = 1; | ||
286 | cx = 0; | ||
287 | |||
288 | native_cpuid(&ax, &bx, &cx, &dx); | ||
289 | |||
290 | mwait_mask = (1 << (X86_FEATURE_EST % 32)) | | ||
291 | (1 << (X86_FEATURE_MWAIT % 32)); | ||
292 | |||
293 | if ((cx & mwait_mask) != mwait_mask) | ||
294 | return false; | ||
295 | |||
296 | /* We need to emulate the MWAIT_LEAF and for that we need both | ||
297 | * ecx and edx. The hypercall provides only partial information. | ||
298 | */ | ||
299 | |||
300 | ax = CPUID_MWAIT_LEAF; | ||
301 | bx = 0; | ||
302 | cx = 0; | ||
303 | dx = 0; | ||
304 | |||
305 | native_cpuid(&ax, &bx, &cx, &dx); | ||
306 | |||
307 | /* Ask the Hypervisor whether to clear ACPI_PDC_C_C2C3_FFH. If so, | ||
308 | * don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3. | ||
309 | */ | ||
310 | buf[0] = ACPI_PDC_REVISION_ID; | ||
311 | buf[1] = 1; | ||
312 | buf[2] = (ACPI_PDC_C_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_SWSMP); | ||
313 | |||
314 | set_xen_guest_handle(op.u.set_pminfo.pdc, buf); | ||
315 | |||
316 | if ((HYPERVISOR_dom0_op(&op) == 0) && | ||
317 | (buf[2] & (ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH))) { | ||
318 | cpuid_leaf5_ecx_val = cx; | ||
319 | cpuid_leaf5_edx_val = dx; | ||
320 | } | ||
321 | return true; | ||
322 | #else | ||
323 | return false; | ||
324 | #endif | ||
325 | } | ||
238 | static void __init xen_init_cpuid_mask(void) | 326 | static void __init xen_init_cpuid_mask(void) |
239 | { | 327 | { |
240 | unsigned int ax, bx, cx, dx; | 328 | unsigned int ax, bx, cx, dx; |
@@ -261,6 +349,9 @@ static void __init xen_init_cpuid_mask(void) | |||
261 | /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ | 349 | /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ |
262 | if ((cx & xsave_mask) != xsave_mask) | 350 | if ((cx & xsave_mask) != xsave_mask) |
263 | cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ | 351 | cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ |
352 | |||
353 | if (xen_check_mwait()) | ||
354 | cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32)); | ||
264 | } | 355 | } |
265 | 356 | ||
266 | static void xen_set_debugreg(int reg, unsigned long val) | 357 | static void xen_set_debugreg(int reg, unsigned long val) |
@@ -777,11 +868,11 @@ static DEFINE_PER_CPU(unsigned long, xen_cr0_value); | |||
777 | 868 | ||
778 | static unsigned long xen_read_cr0(void) | 869 | static unsigned long xen_read_cr0(void) |
779 | { | 870 | { |
780 | unsigned long cr0 = percpu_read(xen_cr0_value); | 871 | unsigned long cr0 = this_cpu_read(xen_cr0_value); |
781 | 872 | ||
782 | if (unlikely(cr0 == 0)) { | 873 | if (unlikely(cr0 == 0)) { |
783 | cr0 = native_read_cr0(); | 874 | cr0 = native_read_cr0(); |
784 | percpu_write(xen_cr0_value, cr0); | 875 | this_cpu_write(xen_cr0_value, cr0); |
785 | } | 876 | } |
786 | 877 | ||
787 | return cr0; | 878 | return cr0; |
@@ -791,7 +882,7 @@ static void xen_write_cr0(unsigned long cr0) | |||
791 | { | 882 | { |
792 | struct multicall_space mcs; | 883 | struct multicall_space mcs; |
793 | 884 | ||
794 | percpu_write(xen_cr0_value, cr0); | 885 | this_cpu_write(xen_cr0_value, cr0); |
795 | 886 | ||
796 | /* Only pay attention to cr0.TS; everything else is | 887 | /* Only pay attention to cr0.TS; everything else is |
797 | ignored. */ | 888 | ignored. */ |
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index 8bbb465b6f0a..157337657971 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c | |||
@@ -26,7 +26,7 @@ static unsigned long xen_save_fl(void) | |||
26 | struct vcpu_info *vcpu; | 26 | struct vcpu_info *vcpu; |
27 | unsigned long flags; | 27 | unsigned long flags; |
28 | 28 | ||
29 | vcpu = percpu_read(xen_vcpu); | 29 | vcpu = this_cpu_read(xen_vcpu); |
30 | 30 | ||
31 | /* flag has opposite sense of mask */ | 31 | /* flag has opposite sense of mask */ |
32 | flags = !vcpu->evtchn_upcall_mask; | 32 | flags = !vcpu->evtchn_upcall_mask; |
@@ -50,7 +50,7 @@ static void xen_restore_fl(unsigned long flags) | |||
50 | make sure we're don't switch CPUs between getting the vcpu | 50 | make sure we're don't switch CPUs between getting the vcpu |
51 | pointer and updating the mask. */ | 51 | pointer and updating the mask. */ |
52 | preempt_disable(); | 52 | preempt_disable(); |
53 | vcpu = percpu_read(xen_vcpu); | 53 | vcpu = this_cpu_read(xen_vcpu); |
54 | vcpu->evtchn_upcall_mask = flags; | 54 | vcpu->evtchn_upcall_mask = flags; |
55 | preempt_enable_no_resched(); | 55 | preempt_enable_no_resched(); |
56 | 56 | ||
@@ -72,7 +72,7 @@ static void xen_irq_disable(void) | |||
72 | make sure we're don't switch CPUs between getting the vcpu | 72 | make sure we're don't switch CPUs between getting the vcpu |
73 | pointer and updating the mask. */ | 73 | pointer and updating the mask. */ |
74 | preempt_disable(); | 74 | preempt_disable(); |
75 | percpu_read(xen_vcpu)->evtchn_upcall_mask = 1; | 75 | this_cpu_read(xen_vcpu)->evtchn_upcall_mask = 1; |
76 | preempt_enable_no_resched(); | 76 | preempt_enable_no_resched(); |
77 | } | 77 | } |
78 | PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable); | 78 | PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable); |
@@ -86,7 +86,7 @@ static void xen_irq_enable(void) | |||
86 | the caller is confused and is trying to re-enable interrupts | 86 | the caller is confused and is trying to re-enable interrupts |
87 | on an indeterminate processor. */ | 87 | on an indeterminate processor. */ |
88 | 88 | ||
89 | vcpu = percpu_read(xen_vcpu); | 89 | vcpu = this_cpu_read(xen_vcpu); |
90 | vcpu->evtchn_upcall_mask = 0; | 90 | vcpu->evtchn_upcall_mask = 0; |
91 | 91 | ||
92 | /* Doesn't matter if we get preempted here, because any | 92 | /* Doesn't matter if we get preempted here, because any |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 95c1cf60c669..988828b479ed 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1071,14 +1071,14 @@ static void drop_other_mm_ref(void *info) | |||
1071 | struct mm_struct *mm = info; | 1071 | struct mm_struct *mm = info; |
1072 | struct mm_struct *active_mm; | 1072 | struct mm_struct *active_mm; |
1073 | 1073 | ||
1074 | active_mm = percpu_read(cpu_tlbstate.active_mm); | 1074 | active_mm = this_cpu_read(cpu_tlbstate.active_mm); |
1075 | 1075 | ||
1076 | if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK) | 1076 | if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) |
1077 | leave_mm(smp_processor_id()); | 1077 | leave_mm(smp_processor_id()); |
1078 | 1078 | ||
1079 | /* If this cpu still has a stale cr3 reference, then make sure | 1079 | /* If this cpu still has a stale cr3 reference, then make sure |
1080 | it has been flushed. */ | 1080 | it has been flushed. */ |
1081 | if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) | 1081 | if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd)) |
1082 | load_cr3(swapper_pg_dir); | 1082 | load_cr3(swapper_pg_dir); |
1083 | } | 1083 | } |
1084 | 1084 | ||
@@ -1185,17 +1185,17 @@ static void __init xen_pagetable_setup_done(pgd_t *base) | |||
1185 | 1185 | ||
1186 | static void xen_write_cr2(unsigned long cr2) | 1186 | static void xen_write_cr2(unsigned long cr2) |
1187 | { | 1187 | { |
1188 | percpu_read(xen_vcpu)->arch.cr2 = cr2; | 1188 | this_cpu_read(xen_vcpu)->arch.cr2 = cr2; |
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | static unsigned long xen_read_cr2(void) | 1191 | static unsigned long xen_read_cr2(void) |
1192 | { | 1192 | { |
1193 | return percpu_read(xen_vcpu)->arch.cr2; | 1193 | return this_cpu_read(xen_vcpu)->arch.cr2; |
1194 | } | 1194 | } |
1195 | 1195 | ||
1196 | unsigned long xen_read_cr2_direct(void) | 1196 | unsigned long xen_read_cr2_direct(void) |
1197 | { | 1197 | { |
1198 | return percpu_read(xen_vcpu_info.arch.cr2); | 1198 | return this_cpu_read(xen_vcpu_info.arch.cr2); |
1199 | } | 1199 | } |
1200 | 1200 | ||
1201 | static void xen_flush_tlb(void) | 1201 | static void xen_flush_tlb(void) |
@@ -1278,12 +1278,12 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, | |||
1278 | 1278 | ||
1279 | static unsigned long xen_read_cr3(void) | 1279 | static unsigned long xen_read_cr3(void) |
1280 | { | 1280 | { |
1281 | return percpu_read(xen_cr3); | 1281 | return this_cpu_read(xen_cr3); |
1282 | } | 1282 | } |
1283 | 1283 | ||
1284 | static void set_current_cr3(void *v) | 1284 | static void set_current_cr3(void *v) |
1285 | { | 1285 | { |
1286 | percpu_write(xen_current_cr3, (unsigned long)v); | 1286 | this_cpu_write(xen_current_cr3, (unsigned long)v); |
1287 | } | 1287 | } |
1288 | 1288 | ||
1289 | static void __xen_write_cr3(bool kernel, unsigned long cr3) | 1289 | static void __xen_write_cr3(bool kernel, unsigned long cr3) |
@@ -1306,7 +1306,7 @@ static void __xen_write_cr3(bool kernel, unsigned long cr3) | |||
1306 | xen_extend_mmuext_op(&op); | 1306 | xen_extend_mmuext_op(&op); |
1307 | 1307 | ||
1308 | if (kernel) { | 1308 | if (kernel) { |
1309 | percpu_write(xen_cr3, cr3); | 1309 | this_cpu_write(xen_cr3, cr3); |
1310 | 1310 | ||
1311 | /* Update xen_current_cr3 once the batch has actually | 1311 | /* Update xen_current_cr3 once the batch has actually |
1312 | been submitted. */ | 1312 | been submitted. */ |
@@ -1322,7 +1322,7 @@ static void xen_write_cr3(unsigned long cr3) | |||
1322 | 1322 | ||
1323 | /* Update while interrupts are disabled, so its atomic with | 1323 | /* Update while interrupts are disabled, so its atomic with |
1324 | respect to ipis */ | 1324 | respect to ipis */ |
1325 | percpu_write(xen_cr3, cr3); | 1325 | this_cpu_write(xen_cr3, cr3); |
1326 | 1326 | ||
1327 | __xen_write_cr3(true, cr3); | 1327 | __xen_write_cr3(true, cr3); |
1328 | 1328 | ||
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h index dee79b78a90f..9c2e74f9096c 100644 --- a/arch/x86/xen/multicalls.h +++ b/arch/x86/xen/multicalls.h | |||
@@ -47,7 +47,7 @@ static inline void xen_mc_issue(unsigned mode) | |||
47 | xen_mc_flush(); | 47 | xen_mc_flush(); |
48 | 48 | ||
49 | /* restore flags saved in xen_mc_batch */ | 49 | /* restore flags saved in xen_mc_batch */ |
50 | local_irq_restore(percpu_read(xen_mc_irq_flags)); | 50 | local_irq_restore(this_cpu_read(xen_mc_irq_flags)); |
51 | } | 51 | } |
52 | 52 | ||
53 | /* Set up a callback to be called when the current batch is flushed */ | 53 | /* Set up a callback to be called when the current batch is flushed */ |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index e03c63692176..12366238d07d 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -420,7 +420,6 @@ void __init xen_arch_setup(void) | |||
420 | boot_cpu_data.hlt_works_ok = 1; | 420 | boot_cpu_data.hlt_works_ok = 1; |
421 | #endif | 421 | #endif |
422 | disable_cpuidle(); | 422 | disable_cpuidle(); |
423 | boot_option_idle_override = IDLE_HALT; | ||
424 | WARN_ON(set_pm_idle_to_default()); | 423 | WARN_ON(set_pm_idle_to_default()); |
425 | fiddle_vdso(); | 424 | fiddle_vdso(); |
426 | } | 425 | } |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 501d4e0244ba..315d8fa0c8fb 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -76,7 +76,7 @@ static void __cpuinit cpu_bringup(void) | |||
76 | xen_setup_cpu_clockevents(); | 76 | xen_setup_cpu_clockevents(); |
77 | 77 | ||
78 | set_cpu_online(cpu, true); | 78 | set_cpu_online(cpu, true); |
79 | percpu_write(cpu_state, CPU_ONLINE); | 79 | this_cpu_write(cpu_state, CPU_ONLINE); |
80 | wmb(); | 80 | wmb(); |
81 | 81 | ||
82 | /* We can take interrupts now: we're officially "up". */ | 82 | /* We can take interrupts now: we're officially "up". */ |
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 7b46781c30c9..8f675ae20916 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig | |||
@@ -558,7 +558,7 @@ config INPUT_CMA3000_I2C | |||
558 | 558 | ||
559 | config INPUT_XEN_KBDDEV_FRONTEND | 559 | config INPUT_XEN_KBDDEV_FRONTEND |
560 | tristate "Xen virtual keyboard and mouse support" | 560 | tristate "Xen virtual keyboard and mouse support" |
561 | depends on XEN_FBDEV_FRONTEND | 561 | depends on XEN |
562 | default y | 562 | default y |
563 | select XEN_XENBUS_FRONTEND | 563 | select XEN_XENBUS_FRONTEND |
564 | help | 564 | help |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index af295bb21d62..053670e09e2b 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -3163,6 +3163,31 @@ int __pci_reset_function(struct pci_dev *dev) | |||
3163 | EXPORT_SYMBOL_GPL(__pci_reset_function); | 3163 | EXPORT_SYMBOL_GPL(__pci_reset_function); |
3164 | 3164 | ||
3165 | /** | 3165 | /** |
3166 | * __pci_reset_function_locked - reset a PCI device function while holding | ||
3167 | * the @dev mutex lock. | ||
3168 | * @dev: PCI device to reset | ||
3169 | * | ||
3170 | * Some devices allow an individual function to be reset without affecting | ||
3171 | * other functions in the same device. The PCI device must be responsive | ||
3172 | * to PCI config space in order to use this function. | ||
3173 | * | ||
3174 | * The device function is presumed to be unused and the caller is holding | ||
3175 | * the device mutex lock when this function is called. | ||
3176 | * Resetting the device will make the contents of PCI configuration space | ||
3177 | * random, so any caller of this must be prepared to reinitialise the | ||
3178 | * device including MSI, bus mastering, BARs, decoding IO and memory spaces, | ||
3179 | * etc. | ||
3180 | * | ||
3181 | * Returns 0 if the device function was successfully reset or negative if the | ||
3182 | * device doesn't support resetting a single function. | ||
3183 | */ | ||
3184 | int __pci_reset_function_locked(struct pci_dev *dev) | ||
3185 | { | ||
3186 | return pci_dev_reset(dev, 1); | ||
3187 | } | ||
3188 | EXPORT_SYMBOL_GPL(__pci_reset_function_locked); | ||
3189 | |||
3190 | /** | ||
3166 | * pci_probe_reset_function - check whether the device can be safely reset | 3191 | * pci_probe_reset_function - check whether the device can be safely reset |
3167 | * @dev: PCI device to reset | 3192 | * @dev: PCI device to reset |
3168 | * | 3193 | * |
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig index 48cb8d3d1758..0282a83f51fb 100644 --- a/drivers/tty/hvc/Kconfig +++ b/drivers/tty/hvc/Kconfig | |||
@@ -66,6 +66,14 @@ config HVC_XEN | |||
66 | help | 66 | help |
67 | Xen virtual console device driver | 67 | Xen virtual console device driver |
68 | 68 | ||
69 | config HVC_XEN_FRONTEND | ||
70 | bool "Xen Hypervisor Multiple Consoles support" | ||
71 | depends on HVC_XEN | ||
72 | select XEN_XENBUS_FRONTEND | ||
73 | default y | ||
74 | help | ||
75 | Xen driver for secondary virtual consoles | ||
76 | |||
69 | config HVC_UDBG | 77 | config HVC_UDBG |
70 | bool "udbg based fake hypervisor console" | 78 | bool "udbg based fake hypervisor console" |
71 | depends on PPC && EXPERIMENTAL | 79 | depends on PPC && EXPERIMENTAL |
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index a1b0a75c3eae..83d5c88e7165 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c | |||
@@ -23,44 +23,74 @@ | |||
23 | #include <linux/err.h> | 23 | #include <linux/err.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/list.h> | ||
26 | 27 | ||
28 | #include <asm/io.h> | ||
27 | #include <asm/xen/hypervisor.h> | 29 | #include <asm/xen/hypervisor.h> |
28 | 30 | ||
29 | #include <xen/xen.h> | 31 | #include <xen/xen.h> |
32 | #include <xen/interface/xen.h> | ||
33 | #include <xen/hvm.h> | ||
34 | #include <xen/grant_table.h> | ||
30 | #include <xen/page.h> | 35 | #include <xen/page.h> |
31 | #include <xen/events.h> | 36 | #include <xen/events.h> |
32 | #include <xen/interface/io/console.h> | 37 | #include <xen/interface/io/console.h> |
33 | #include <xen/hvc-console.h> | 38 | #include <xen/hvc-console.h> |
39 | #include <xen/xenbus.h> | ||
34 | 40 | ||
35 | #include "hvc_console.h" | 41 | #include "hvc_console.h" |
36 | 42 | ||
37 | #define HVC_COOKIE 0x58656e /* "Xen" in hex */ | 43 | #define HVC_COOKIE 0x58656e /* "Xen" in hex */ |
38 | 44 | ||
39 | static struct hvc_struct *hvc; | 45 | struct xencons_info { |
40 | static int xencons_irq; | 46 | struct list_head list; |
47 | struct xenbus_device *xbdev; | ||
48 | struct xencons_interface *intf; | ||
49 | unsigned int evtchn; | ||
50 | struct hvc_struct *hvc; | ||
51 | int irq; | ||
52 | int vtermno; | ||
53 | grant_ref_t gntref; | ||
54 | }; | ||
55 | |||
56 | static LIST_HEAD(xenconsoles); | ||
57 | static DEFINE_SPINLOCK(xencons_lock); | ||
41 | 58 | ||
42 | /* ------------------------------------------------------------------ */ | 59 | /* ------------------------------------------------------------------ */ |
43 | 60 | ||
44 | static unsigned long console_pfn = ~0ul; | 61 | static struct xencons_info *vtermno_to_xencons(int vtermno) |
62 | { | ||
63 | struct xencons_info *entry, *n, *ret = NULL; | ||
64 | |||
65 | if (list_empty(&xenconsoles)) | ||
66 | return NULL; | ||
67 | |||
68 | list_for_each_entry_safe(entry, n, &xenconsoles, list) { | ||
69 | if (entry->vtermno == vtermno) { | ||
70 | ret = entry; | ||
71 | break; | ||
72 | } | ||
73 | } | ||
74 | |||
75 | return ret; | ||
76 | } | ||
45 | 77 | ||
46 | static inline struct xencons_interface *xencons_interface(void) | 78 | static inline int xenbus_devid_to_vtermno(int devid) |
47 | { | 79 | { |
48 | if (console_pfn == ~0ul) | 80 | return devid + HVC_COOKIE; |
49 | return mfn_to_virt(xen_start_info->console.domU.mfn); | ||
50 | else | ||
51 | return __va(console_pfn << PAGE_SHIFT); | ||
52 | } | 81 | } |
53 | 82 | ||
54 | static inline void notify_daemon(void) | 83 | static inline void notify_daemon(struct xencons_info *cons) |
55 | { | 84 | { |
56 | /* Use evtchn: this is called early, before irq is set up. */ | 85 | /* Use evtchn: this is called early, before irq is set up. */ |
57 | notify_remote_via_evtchn(xen_start_info->console.domU.evtchn); | 86 | notify_remote_via_evtchn(cons->evtchn); |
58 | } | 87 | } |
59 | 88 | ||
60 | static int __write_console(const char *data, int len) | 89 | static int __write_console(struct xencons_info *xencons, |
90 | const char *data, int len) | ||
61 | { | 91 | { |
62 | struct xencons_interface *intf = xencons_interface(); | ||
63 | XENCONS_RING_IDX cons, prod; | 92 | XENCONS_RING_IDX cons, prod; |
93 | struct xencons_interface *intf = xencons->intf; | ||
64 | int sent = 0; | 94 | int sent = 0; |
65 | 95 | ||
66 | cons = intf->out_cons; | 96 | cons = intf->out_cons; |
@@ -75,13 +105,16 @@ static int __write_console(const char *data, int len) | |||
75 | intf->out_prod = prod; | 105 | intf->out_prod = prod; |
76 | 106 | ||
77 | if (sent) | 107 | if (sent) |
78 | notify_daemon(); | 108 | notify_daemon(xencons); |
79 | return sent; | 109 | return sent; |
80 | } | 110 | } |
81 | 111 | ||
82 | static int domU_write_console(uint32_t vtermno, const char *data, int len) | 112 | static int domU_write_console(uint32_t vtermno, const char *data, int len) |
83 | { | 113 | { |
84 | int ret = len; | 114 | int ret = len; |
115 | struct xencons_info *cons = vtermno_to_xencons(vtermno); | ||
116 | if (cons == NULL) | ||
117 | return -EINVAL; | ||
85 | 118 | ||
86 | /* | 119 | /* |
87 | * Make sure the whole buffer is emitted, polling if | 120 | * Make sure the whole buffer is emitted, polling if |
@@ -90,7 +123,7 @@ static int domU_write_console(uint32_t vtermno, const char *data, int len) | |||
90 | * kernel is crippled. | 123 | * kernel is crippled. |
91 | */ | 124 | */ |
92 | while (len) { | 125 | while (len) { |
93 | int sent = __write_console(data, len); | 126 | int sent = __write_console(cons, data, len); |
94 | 127 | ||
95 | data += sent; | 128 | data += sent; |
96 | len -= sent; | 129 | len -= sent; |
@@ -104,9 +137,13 @@ static int domU_write_console(uint32_t vtermno, const char *data, int len) | |||
104 | 137 | ||
105 | static int domU_read_console(uint32_t vtermno, char *buf, int len) | 138 | static int domU_read_console(uint32_t vtermno, char *buf, int len) |
106 | { | 139 | { |
107 | struct xencons_interface *intf = xencons_interface(); | 140 | struct xencons_interface *intf; |
108 | XENCONS_RING_IDX cons, prod; | 141 | XENCONS_RING_IDX cons, prod; |
109 | int recv = 0; | 142 | int recv = 0; |
143 | struct xencons_info *xencons = vtermno_to_xencons(vtermno); | ||
144 | if (xencons == NULL) | ||
145 | return -EINVAL; | ||
146 | intf = xencons->intf; | ||
110 | 147 | ||
111 | cons = intf->in_cons; | 148 | cons = intf->in_cons; |
112 | prod = intf->in_prod; | 149 | prod = intf->in_prod; |
@@ -119,7 +156,7 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len) | |||
119 | mb(); /* read ring before consuming */ | 156 | mb(); /* read ring before consuming */ |
120 | intf->in_cons = cons; | 157 | intf->in_cons = cons; |
121 | 158 | ||
122 | notify_daemon(); | 159 | notify_daemon(xencons); |
123 | return recv; | 160 | return recv; |
124 | } | 161 | } |
125 | 162 | ||
@@ -157,68 +194,407 @@ static struct hv_ops dom0_hvc_ops = { | |||
157 | .notifier_hangup = notifier_hangup_irq, | 194 | .notifier_hangup = notifier_hangup_irq, |
158 | }; | 195 | }; |
159 | 196 | ||
160 | static int __init xen_hvc_init(void) | 197 | static int xen_hvm_console_init(void) |
198 | { | ||
199 | int r; | ||
200 | uint64_t v = 0; | ||
201 | unsigned long mfn; | ||
202 | struct xencons_info *info; | ||
203 | |||
204 | if (!xen_hvm_domain()) | ||
205 | return -ENODEV; | ||
206 | |||
207 | info = vtermno_to_xencons(HVC_COOKIE); | ||
208 | if (!info) { | ||
209 | info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL | __GFP_ZERO); | ||
210 | if (!info) | ||
211 | return -ENOMEM; | ||
212 | } | ||
213 | |||
214 | /* already configured */ | ||
215 | if (info->intf != NULL) | ||
216 | return 0; | ||
217 | |||
218 | r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); | ||
219 | if (r < 0) { | ||
220 | kfree(info); | ||
221 | return -ENODEV; | ||
222 | } | ||
223 | info->evtchn = v; | ||
224 | hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v); | ||
225 | if (r < 0) { | ||
226 | kfree(info); | ||
227 | return -ENODEV; | ||
228 | } | ||
229 | mfn = v; | ||
230 | info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE); | ||
231 | if (info->intf == NULL) { | ||
232 | kfree(info); | ||
233 | return -ENODEV; | ||
234 | } | ||
235 | info->vtermno = HVC_COOKIE; | ||
236 | |||
237 | spin_lock(&xencons_lock); | ||
238 | list_add_tail(&info->list, &xenconsoles); | ||
239 | spin_unlock(&xencons_lock); | ||
240 | |||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static int xen_pv_console_init(void) | ||
161 | { | 245 | { |
162 | struct hvc_struct *hp; | 246 | struct xencons_info *info; |
163 | struct hv_ops *ops; | ||
164 | 247 | ||
165 | if (!xen_pv_domain()) | 248 | if (!xen_pv_domain()) |
166 | return -ENODEV; | 249 | return -ENODEV; |
167 | 250 | ||
168 | if (xen_initial_domain()) { | 251 | if (!xen_start_info->console.domU.evtchn) |
169 | ops = &dom0_hvc_ops; | 252 | return -ENODEV; |
170 | xencons_irq = bind_virq_to_irq(VIRQ_CONSOLE, 0); | ||
171 | } else { | ||
172 | if (!xen_start_info->console.domU.evtchn) | ||
173 | return -ENODEV; | ||
174 | 253 | ||
175 | ops = &domU_hvc_ops; | 254 | info = vtermno_to_xencons(HVC_COOKIE); |
176 | xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn); | 255 | if (!info) { |
256 | info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL | __GFP_ZERO); | ||
257 | if (!info) | ||
258 | return -ENOMEM; | ||
177 | } | 259 | } |
178 | if (xencons_irq < 0) | ||
179 | xencons_irq = 0; | ||
180 | else | ||
181 | irq_set_noprobe(xencons_irq); | ||
182 | 260 | ||
183 | hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256); | 261 | /* already configured */ |
184 | if (IS_ERR(hp)) | 262 | if (info->intf != NULL) |
185 | return PTR_ERR(hp); | 263 | return 0; |
264 | |||
265 | info->evtchn = xen_start_info->console.domU.evtchn; | ||
266 | info->intf = mfn_to_virt(xen_start_info->console.domU.mfn); | ||
267 | info->vtermno = HVC_COOKIE; | ||
268 | |||
269 | spin_lock(&xencons_lock); | ||
270 | list_add_tail(&info->list, &xenconsoles); | ||
271 | spin_unlock(&xencons_lock); | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static int xen_initial_domain_console_init(void) | ||
277 | { | ||
278 | struct xencons_info *info; | ||
279 | |||
280 | if (!xen_initial_domain()) | ||
281 | return -ENODEV; | ||
282 | |||
283 | info = vtermno_to_xencons(HVC_COOKIE); | ||
284 | if (!info) { | ||
285 | info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL | __GFP_ZERO); | ||
286 | if (!info) | ||
287 | return -ENOMEM; | ||
288 | } | ||
186 | 289 | ||
187 | hvc = hp; | 290 | info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0); |
291 | info->vtermno = HVC_COOKIE; | ||
188 | 292 | ||
189 | console_pfn = mfn_to_pfn(xen_start_info->console.domU.mfn); | 293 | spin_lock(&xencons_lock); |
294 | list_add_tail(&info->list, &xenconsoles); | ||
295 | spin_unlock(&xencons_lock); | ||
190 | 296 | ||
191 | return 0; | 297 | return 0; |
192 | } | 298 | } |
193 | 299 | ||
194 | void xen_console_resume(void) | 300 | void xen_console_resume(void) |
195 | { | 301 | { |
196 | if (xencons_irq) | 302 | struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE); |
197 | rebind_evtchn_irq(xen_start_info->console.domU.evtchn, xencons_irq); | 303 | if (info != NULL && info->irq) |
304 | rebind_evtchn_irq(info->evtchn, info->irq); | ||
305 | } | ||
306 | |||
307 | static void xencons_disconnect_backend(struct xencons_info *info) | ||
308 | { | ||
309 | if (info->irq > 0) | ||
310 | unbind_from_irqhandler(info->irq, NULL); | ||
311 | info->irq = 0; | ||
312 | if (info->evtchn > 0) | ||
313 | xenbus_free_evtchn(info->xbdev, info->evtchn); | ||
314 | info->evtchn = 0; | ||
315 | if (info->gntref > 0) | ||
316 | gnttab_free_grant_references(info->gntref); | ||
317 | info->gntref = 0; | ||
318 | if (info->hvc != NULL) | ||
319 | hvc_remove(info->hvc); | ||
320 | info->hvc = NULL; | ||
321 | } | ||
322 | |||
323 | static void xencons_free(struct xencons_info *info) | ||
324 | { | ||
325 | free_page((unsigned long)info->intf); | ||
326 | info->intf = NULL; | ||
327 | info->vtermno = 0; | ||
328 | kfree(info); | ||
329 | } | ||
330 | |||
331 | static int xen_console_remove(struct xencons_info *info) | ||
332 | { | ||
333 | xencons_disconnect_backend(info); | ||
334 | spin_lock(&xencons_lock); | ||
335 | list_del(&info->list); | ||
336 | spin_unlock(&xencons_lock); | ||
337 | if (info->xbdev != NULL) | ||
338 | xencons_free(info); | ||
339 | else { | ||
340 | if (xen_hvm_domain()) | ||
341 | iounmap(info->intf); | ||
342 | kfree(info); | ||
343 | } | ||
344 | return 0; | ||
345 | } | ||
346 | |||
347 | #ifdef CONFIG_HVC_XEN_FRONTEND | ||
348 | static struct xenbus_driver xencons_driver; | ||
349 | |||
350 | static int xencons_remove(struct xenbus_device *dev) | ||
351 | { | ||
352 | return xen_console_remove(dev_get_drvdata(&dev->dev)); | ||
353 | } | ||
354 | |||
355 | static int xencons_connect_backend(struct xenbus_device *dev, | ||
356 | struct xencons_info *info) | ||
357 | { | ||
358 | int ret, evtchn, devid, ref, irq; | ||
359 | struct xenbus_transaction xbt; | ||
360 | grant_ref_t gref_head; | ||
361 | unsigned long mfn; | ||
362 | |||
363 | ret = xenbus_alloc_evtchn(dev, &evtchn); | ||
364 | if (ret) | ||
365 | return ret; | ||
366 | info->evtchn = evtchn; | ||
367 | irq = bind_evtchn_to_irq(evtchn); | ||
368 | if (irq < 0) | ||
369 | return irq; | ||
370 | info->irq = irq; | ||
371 | devid = dev->nodename[strlen(dev->nodename) - 1] - '0'; | ||
372 | info->hvc = hvc_alloc(xenbus_devid_to_vtermno(devid), | ||
373 | irq, &domU_hvc_ops, 256); | ||
374 | if (IS_ERR(info->hvc)) | ||
375 | return PTR_ERR(info->hvc); | ||
376 | if (xen_pv_domain()) | ||
377 | mfn = virt_to_mfn(info->intf); | ||
378 | else | ||
379 | mfn = __pa(info->intf) >> PAGE_SHIFT; | ||
380 | ret = gnttab_alloc_grant_references(1, &gref_head); | ||
381 | if (ret < 0) | ||
382 | return ret; | ||
383 | info->gntref = gref_head; | ||
384 | ref = gnttab_claim_grant_reference(&gref_head); | ||
385 | if (ref < 0) | ||
386 | return ref; | ||
387 | gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id, | ||
388 | mfn, 0); | ||
389 | |||
390 | again: | ||
391 | ret = xenbus_transaction_start(&xbt); | ||
392 | if (ret) { | ||
393 | xenbus_dev_fatal(dev, ret, "starting transaction"); | ||
394 | return ret; | ||
395 | } | ||
396 | ret = xenbus_printf(xbt, dev->nodename, "ring-ref", "%d", ref); | ||
397 | if (ret) | ||
398 | goto error_xenbus; | ||
399 | ret = xenbus_printf(xbt, dev->nodename, "port", "%u", | ||
400 | evtchn); | ||
401 | if (ret) | ||
402 | goto error_xenbus; | ||
403 | ret = xenbus_printf(xbt, dev->nodename, "type", "ioemu"); | ||
404 | if (ret) | ||
405 | goto error_xenbus; | ||
406 | ret = xenbus_transaction_end(xbt, 0); | ||
407 | if (ret) { | ||
408 | if (ret == -EAGAIN) | ||
409 | goto again; | ||
410 | xenbus_dev_fatal(dev, ret, "completing transaction"); | ||
411 | return ret; | ||
412 | } | ||
413 | |||
414 | xenbus_switch_state(dev, XenbusStateInitialised); | ||
415 | return 0; | ||
416 | |||
417 | error_xenbus: | ||
418 | xenbus_transaction_end(xbt, 1); | ||
419 | xenbus_dev_fatal(dev, ret, "writing xenstore"); | ||
420 | return ret; | ||
421 | } | ||
422 | |||
423 | static int __devinit xencons_probe(struct xenbus_device *dev, | ||
424 | const struct xenbus_device_id *id) | ||
425 | { | ||
426 | int ret, devid; | ||
427 | struct xencons_info *info; | ||
428 | |||
429 | devid = dev->nodename[strlen(dev->nodename) - 1] - '0'; | ||
430 | if (devid == 0) | ||
431 | return -ENODEV; | ||
432 | |||
433 | info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL | __GFP_ZERO); | ||
434 | if (!info) | ||
435 | goto error_nomem; | ||
436 | dev_set_drvdata(&dev->dev, info); | ||
437 | info->xbdev = dev; | ||
438 | info->vtermno = xenbus_devid_to_vtermno(devid); | ||
439 | info->intf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
440 | if (!info->intf) | ||
441 | goto error_nomem; | ||
442 | |||
443 | ret = xencons_connect_backend(dev, info); | ||
444 | if (ret < 0) | ||
445 | goto error; | ||
446 | spin_lock(&xencons_lock); | ||
447 | list_add_tail(&info->list, &xenconsoles); | ||
448 | spin_unlock(&xencons_lock); | ||
449 | |||
450 | return 0; | ||
451 | |||
452 | error_nomem: | ||
453 | ret = -ENOMEM; | ||
454 | xenbus_dev_fatal(dev, ret, "allocating device memory"); | ||
455 | error: | ||
456 | xencons_disconnect_backend(info); | ||
457 | xencons_free(info); | ||
458 | return ret; | ||
459 | } | ||
460 | |||
461 | static int xencons_resume(struct xenbus_device *dev) | ||
462 | { | ||
463 | struct xencons_info *info = dev_get_drvdata(&dev->dev); | ||
464 | |||
465 | xencons_disconnect_backend(info); | ||
466 | memset(info->intf, 0, PAGE_SIZE); | ||
467 | return xencons_connect_backend(dev, info); | ||
468 | } | ||
469 | |||
470 | static void xencons_backend_changed(struct xenbus_device *dev, | ||
471 | enum xenbus_state backend_state) | ||
472 | { | ||
473 | switch (backend_state) { | ||
474 | case XenbusStateReconfiguring: | ||
475 | case XenbusStateReconfigured: | ||
476 | case XenbusStateInitialising: | ||
477 | case XenbusStateInitialised: | ||
478 | case XenbusStateUnknown: | ||
479 | case XenbusStateClosed: | ||
480 | break; | ||
481 | |||
482 | case XenbusStateInitWait: | ||
483 | break; | ||
484 | |||
485 | case XenbusStateConnected: | ||
486 | xenbus_switch_state(dev, XenbusStateConnected); | ||
487 | break; | ||
488 | |||
489 | case XenbusStateClosing: | ||
490 | xenbus_frontend_closed(dev); | ||
491 | break; | ||
492 | } | ||
493 | } | ||
494 | |||
495 | static const struct xenbus_device_id xencons_ids[] = { | ||
496 | { "console" }, | ||
497 | { "" } | ||
498 | }; | ||
499 | |||
500 | |||
501 | static DEFINE_XENBUS_DRIVER(xencons, "xenconsole", | ||
502 | .probe = xencons_probe, | ||
503 | .remove = xencons_remove, | ||
504 | .resume = xencons_resume, | ||
505 | .otherend_changed = xencons_backend_changed, | ||
506 | ); | ||
507 | #endif /* CONFIG_HVC_XEN_FRONTEND */ | ||
508 | |||
509 | static int __init xen_hvc_init(void) | ||
510 | { | ||
511 | int r; | ||
512 | struct xencons_info *info; | ||
513 | const struct hv_ops *ops; | ||
514 | |||
515 | if (!xen_domain()) | ||
516 | return -ENODEV; | ||
517 | |||
518 | if (xen_initial_domain()) { | ||
519 | ops = &dom0_hvc_ops; | ||
520 | r = xen_initial_domain_console_init(); | ||
521 | if (r < 0) | ||
522 | return r; | ||
523 | info = vtermno_to_xencons(HVC_COOKIE); | ||
524 | } else { | ||
525 | ops = &domU_hvc_ops; | ||
526 | if (xen_hvm_domain()) | ||
527 | r = xen_hvm_console_init(); | ||
528 | else | ||
529 | r = xen_pv_console_init(); | ||
530 | if (r < 0) | ||
531 | return r; | ||
532 | |||
533 | info = vtermno_to_xencons(HVC_COOKIE); | ||
534 | info->irq = bind_evtchn_to_irq(info->evtchn); | ||
535 | } | ||
536 | if (info->irq < 0) | ||
537 | info->irq = 0; /* NO_IRQ */ | ||
538 | else | ||
539 | irq_set_noprobe(info->irq); | ||
540 | |||
541 | info->hvc = hvc_alloc(HVC_COOKIE, info->irq, ops, 256); | ||
542 | if (IS_ERR(info->hvc)) { | ||
543 | r = PTR_ERR(info->hvc); | ||
544 | spin_lock(&xencons_lock); | ||
545 | list_del(&info->list); | ||
546 | spin_unlock(&xencons_lock); | ||
547 | if (info->irq) | ||
548 | unbind_from_irqhandler(info->irq, NULL); | ||
549 | kfree(info); | ||
550 | return r; | ||
551 | } | ||
552 | |||
553 | r = 0; | ||
554 | #ifdef CONFIG_HVC_XEN_FRONTEND | ||
555 | r = xenbus_register_frontend(&xencons_driver); | ||
556 | #endif | ||
557 | return r; | ||
198 | } | 558 | } |
199 | 559 | ||
200 | static void __exit xen_hvc_fini(void) | 560 | static void __exit xen_hvc_fini(void) |
201 | { | 561 | { |
202 | if (hvc) | 562 | struct xencons_info *entry, *next; |
203 | hvc_remove(hvc); | 563 | |
564 | if (list_empty(&xenconsoles)) | ||
565 | return; | ||
566 | |||
567 | list_for_each_entry_safe(entry, next, &xenconsoles, list) { | ||
568 | xen_console_remove(entry); | ||
569 | } | ||
204 | } | 570 | } |
205 | 571 | ||
206 | static int xen_cons_init(void) | 572 | static int xen_cons_init(void) |
207 | { | 573 | { |
208 | struct hv_ops *ops; | 574 | const struct hv_ops *ops; |
209 | 575 | ||
210 | if (!xen_pv_domain()) | 576 | if (!xen_domain()) |
211 | return 0; | 577 | return 0; |
212 | 578 | ||
213 | if (xen_initial_domain()) | 579 | if (xen_initial_domain()) |
214 | ops = &dom0_hvc_ops; | 580 | ops = &dom0_hvc_ops; |
215 | else | 581 | else { |
582 | int r; | ||
216 | ops = &domU_hvc_ops; | 583 | ops = &domU_hvc_ops; |
217 | 584 | ||
585 | if (xen_hvm_domain()) | ||
586 | r = xen_hvm_console_init(); | ||
587 | else | ||
588 | r = xen_pv_console_init(); | ||
589 | if (r < 0) | ||
590 | return r; | ||
591 | } | ||
592 | |||
218 | hvc_instantiate(HVC_COOKIE, 0, ops); | 593 | hvc_instantiate(HVC_COOKIE, 0, ops); |
219 | return 0; | 594 | return 0; |
220 | } | 595 | } |
221 | 596 | ||
597 | |||
222 | module_init(xen_hvc_init); | 598 | module_init(xen_hvc_init); |
223 | module_exit(xen_hvc_fini); | 599 | module_exit(xen_hvc_fini); |
224 | console_initcall(xen_cons_init); | 600 | console_initcall(xen_cons_init); |
@@ -230,6 +606,9 @@ static void xenboot_write_console(struct console *console, const char *string, | |||
230 | unsigned int linelen, off = 0; | 606 | unsigned int linelen, off = 0; |
231 | const char *pos; | 607 | const char *pos; |
232 | 608 | ||
609 | if (!xen_pv_domain()) | ||
610 | return; | ||
611 | |||
233 | dom0_write_console(0, string, len); | 612 | dom0_write_console(0, string, len); |
234 | 613 | ||
235 | if (xen_initial_domain()) | 614 | if (xen_initial_domain()) |
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 6ca0c407c144..eca60c73ef1f 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig | |||
@@ -2269,6 +2269,7 @@ config XEN_FBDEV_FRONTEND | |||
2269 | select FB_SYS_IMAGEBLIT | 2269 | select FB_SYS_IMAGEBLIT |
2270 | select FB_SYS_FOPS | 2270 | select FB_SYS_FOPS |
2271 | select FB_DEFERRED_IO | 2271 | select FB_DEFERRED_IO |
2272 | select INPUT_XEN_KBDDEV_FRONTEND | ||
2272 | select XEN_XENBUS_FRONTEND | 2273 | select XEN_XENBUS_FRONTEND |
2273 | default y | 2274 | default y |
2274 | help | 2275 | help |
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index a1ced521cf74..648bcd4195c5 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig | |||
@@ -178,4 +178,21 @@ config XEN_PRIVCMD | |||
178 | depends on XEN | 178 | depends on XEN |
179 | default m | 179 | default m |
180 | 180 | ||
181 | config XEN_ACPI_PROCESSOR | ||
182 | tristate "Xen ACPI processor" | ||
183 | depends on XEN && X86 && ACPI_PROCESSOR | ||
184 | default y if (X86_ACPI_CPUFREQ = y || X86_POWERNOW_K8 = y) | ||
185 | default m if (X86_ACPI_CPUFREQ = m || X86_POWERNOW_K8 = m) | ||
186 | help | ||
187 | This ACPI processor uploads Power Management information to the Xen hypervisor. | ||
188 | |||
189 | To do that the driver parses the Power Management data and uploads said | ||
190 | information to the Xen hypervisor. Then the Xen hypervisor can select the | ||
191 | proper Cx and Pxx states. It also registers itslef as the SMM so that | ||
192 | other drivers (such as ACPI cpufreq scaling driver) will not load. | ||
193 | |||
194 | To compile this driver as a module, choose M here: the | ||
195 | module will be called xen_acpi_processor If you do not know what to choose, | ||
196 | select M here. If the CPUFREQ drivers are built in, select Y here. | ||
197 | |||
181 | endmenu | 198 | endmenu |
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index aa31337192cc..9adc5be57b13 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -20,7 +20,7 @@ obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o | |||
20 | obj-$(CONFIG_XEN_DOM0) += pci.o | 20 | obj-$(CONFIG_XEN_DOM0) += pci.o |
21 | obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/ | 21 | obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/ |
22 | obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o | 22 | obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o |
23 | 23 | obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o | |
24 | xen-evtchn-y := evtchn.o | 24 | xen-evtchn-y := evtchn.o |
25 | xen-gntdev-y := gntdev.o | 25 | xen-gntdev-y := gntdev.o |
26 | xen-gntalloc-y := gntalloc.o | 26 | xen-gntalloc-y := gntalloc.o |
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c index 1e0fe01eb670..fdb6d229c9bb 100644 --- a/drivers/xen/sys-hypervisor.c +++ b/drivers/xen/sys-hypervisor.c | |||
@@ -97,7 +97,7 @@ static struct attribute *version_attrs[] = { | |||
97 | NULL | 97 | NULL |
98 | }; | 98 | }; |
99 | 99 | ||
100 | static struct attribute_group version_group = { | 100 | static const struct attribute_group version_group = { |
101 | .name = "version", | 101 | .name = "version", |
102 | .attrs = version_attrs, | 102 | .attrs = version_attrs, |
103 | }; | 103 | }; |
@@ -210,7 +210,7 @@ static struct attribute *xen_compile_attrs[] = { | |||
210 | NULL | 210 | NULL |
211 | }; | 211 | }; |
212 | 212 | ||
213 | static struct attribute_group xen_compilation_group = { | 213 | static const struct attribute_group xen_compilation_group = { |
214 | .name = "compilation", | 214 | .name = "compilation", |
215 | .attrs = xen_compile_attrs, | 215 | .attrs = xen_compile_attrs, |
216 | }; | 216 | }; |
@@ -340,7 +340,7 @@ static struct attribute *xen_properties_attrs[] = { | |||
340 | NULL | 340 | NULL |
341 | }; | 341 | }; |
342 | 342 | ||
343 | static struct attribute_group xen_properties_group = { | 343 | static const struct attribute_group xen_properties_group = { |
344 | .name = "properties", | 344 | .name = "properties", |
345 | .attrs = xen_properties_attrs, | 345 | .attrs = xen_properties_attrs, |
346 | }; | 346 | }; |
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c new file mode 100644 index 000000000000..5c2be963aa18 --- /dev/null +++ b/drivers/xen/xen-acpi-processor.c | |||
@@ -0,0 +1,562 @@ | |||
1 | /* | ||
2 | * Copyright 2012 by Oracle Inc | ||
3 | * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | ||
4 | * | ||
5 | * This code borrows ideas from https://lkml.org/lkml/2011/11/30/249 | ||
6 | * so many thanks go to Kevin Tian <kevin.tian@intel.com> | ||
7 | * and Yu Ke <ke.yu@intel.com>. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms and conditions of the GNU General Public License, | ||
11 | * version 2, as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/cpumask.h> | ||
21 | #include <linux/cpufreq.h> | ||
22 | #include <linux/freezer.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/kthread.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <acpi/acpi_bus.h> | ||
29 | #include <acpi/acpi_drivers.h> | ||
30 | #include <acpi/processor.h> | ||
31 | |||
32 | #include <xen/interface/platform.h> | ||
33 | #include <asm/xen/hypercall.h> | ||
34 | |||
35 | #define DRV_NAME "xen-acpi-processor: " | ||
36 | |||
37 | static int no_hypercall; | ||
38 | MODULE_PARM_DESC(off, "Inhibit the hypercall."); | ||
39 | module_param_named(off, no_hypercall, int, 0400); | ||
40 | |||
41 | /* | ||
42 | * Note: Do not convert the acpi_id* below to cpumask_var_t or use cpumask_bit | ||
43 | * - as those shrink to nr_cpu_bits (which is dependent on possible_cpu), which | ||
44 | * can be less than what we want to put in. Instead use the 'nr_acpi_bits' | ||
45 | * which is dynamically computed based on the MADT or x2APIC table. | ||
46 | */ | ||
47 | static unsigned int nr_acpi_bits; | ||
48 | /* Mutex to protect the acpi_ids_done - for CPU hotplug use. */ | ||
49 | static DEFINE_MUTEX(acpi_ids_mutex); | ||
50 | /* Which ACPI ID we have processed from 'struct acpi_processor'. */ | ||
51 | static unsigned long *acpi_ids_done; | ||
52 | /* Which ACPI ID exist in the SSDT/DSDT processor definitions. */ | ||
53 | static unsigned long __initdata *acpi_id_present; | ||
54 | /* And if there is an _CST definition (or a PBLK) for the ACPI IDs */ | ||
55 | static unsigned long __initdata *acpi_id_cst_present; | ||
56 | |||
57 | static int push_cxx_to_hypervisor(struct acpi_processor *_pr) | ||
58 | { | ||
59 | struct xen_platform_op op = { | ||
60 | .cmd = XENPF_set_processor_pminfo, | ||
61 | .interface_version = XENPF_INTERFACE_VERSION, | ||
62 | .u.set_pminfo.id = _pr->acpi_id, | ||
63 | .u.set_pminfo.type = XEN_PM_CX, | ||
64 | }; | ||
65 | struct xen_processor_cx *dst_cx, *dst_cx_states = NULL; | ||
66 | struct acpi_processor_cx *cx; | ||
67 | unsigned int i, ok; | ||
68 | int ret = 0; | ||
69 | |||
70 | dst_cx_states = kcalloc(_pr->power.count, | ||
71 | sizeof(struct xen_processor_cx), GFP_KERNEL); | ||
72 | if (!dst_cx_states) | ||
73 | return -ENOMEM; | ||
74 | |||
75 | for (ok = 0, i = 1; i <= _pr->power.count; i++) { | ||
76 | cx = &_pr->power.states[i]; | ||
77 | if (!cx->valid) | ||
78 | continue; | ||
79 | |||
80 | dst_cx = &(dst_cx_states[ok++]); | ||
81 | |||
82 | dst_cx->reg.space_id = ACPI_ADR_SPACE_SYSTEM_IO; | ||
83 | if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { | ||
84 | dst_cx->reg.bit_width = 8; | ||
85 | dst_cx->reg.bit_offset = 0; | ||
86 | dst_cx->reg.access_size = 1; | ||
87 | } else { | ||
88 | dst_cx->reg.space_id = ACPI_ADR_SPACE_FIXED_HARDWARE; | ||
89 | if (cx->entry_method == ACPI_CSTATE_FFH) { | ||
90 | /* NATIVE_CSTATE_BEYOND_HALT */ | ||
91 | dst_cx->reg.bit_offset = 2; | ||
92 | dst_cx->reg.bit_width = 1; /* VENDOR_INTEL */ | ||
93 | } | ||
94 | dst_cx->reg.access_size = 0; | ||
95 | } | ||
96 | dst_cx->reg.address = cx->address; | ||
97 | |||
98 | dst_cx->type = cx->type; | ||
99 | dst_cx->latency = cx->latency; | ||
100 | dst_cx->power = cx->power; | ||
101 | |||
102 | dst_cx->dpcnt = 0; | ||
103 | set_xen_guest_handle(dst_cx->dp, NULL); | ||
104 | } | ||
105 | if (!ok) { | ||
106 | pr_debug(DRV_NAME "No _Cx for ACPI CPU %u\n", _pr->acpi_id); | ||
107 | kfree(dst_cx_states); | ||
108 | return -EINVAL; | ||
109 | } | ||
110 | op.u.set_pminfo.power.count = ok; | ||
111 | op.u.set_pminfo.power.flags.bm_control = _pr->flags.bm_control; | ||
112 | op.u.set_pminfo.power.flags.bm_check = _pr->flags.bm_check; | ||
113 | op.u.set_pminfo.power.flags.has_cst = _pr->flags.has_cst; | ||
114 | op.u.set_pminfo.power.flags.power_setup_done = | ||
115 | _pr->flags.power_setup_done; | ||
116 | |||
117 | set_xen_guest_handle(op.u.set_pminfo.power.states, dst_cx_states); | ||
118 | |||
119 | if (!no_hypercall) | ||
120 | ret = HYPERVISOR_dom0_op(&op); | ||
121 | |||
122 | if (!ret) { | ||
123 | pr_debug("ACPI CPU%u - C-states uploaded.\n", _pr->acpi_id); | ||
124 | for (i = 1; i <= _pr->power.count; i++) { | ||
125 | cx = &_pr->power.states[i]; | ||
126 | if (!cx->valid) | ||
127 | continue; | ||
128 | pr_debug(" C%d: %s %d uS\n", | ||
129 | cx->type, cx->desc, (u32)cx->latency); | ||
130 | } | ||
131 | } else | ||
132 | pr_err(DRV_NAME "(CX): Hypervisor error (%d) for ACPI CPU%u\n", | ||
133 | ret, _pr->acpi_id); | ||
134 | |||
135 | kfree(dst_cx_states); | ||
136 | |||
137 | return ret; | ||
138 | } | ||
139 | static struct xen_processor_px * | ||
140 | xen_copy_pss_data(struct acpi_processor *_pr, | ||
141 | struct xen_processor_performance *dst_perf) | ||
142 | { | ||
143 | struct xen_processor_px *dst_states = NULL; | ||
144 | unsigned int i; | ||
145 | |||
146 | BUILD_BUG_ON(sizeof(struct xen_processor_px) != | ||
147 | sizeof(struct acpi_processor_px)); | ||
148 | |||
149 | dst_states = kcalloc(_pr->performance->state_count, | ||
150 | sizeof(struct xen_processor_px), GFP_KERNEL); | ||
151 | if (!dst_states) | ||
152 | return ERR_PTR(-ENOMEM); | ||
153 | |||
154 | dst_perf->state_count = _pr->performance->state_count; | ||
155 | for (i = 0; i < _pr->performance->state_count; i++) { | ||
156 | /* Fortunatly for us, they are both the same size */ | ||
157 | memcpy(&(dst_states[i]), &(_pr->performance->states[i]), | ||
158 | sizeof(struct acpi_processor_px)); | ||
159 | } | ||
160 | return dst_states; | ||
161 | } | ||
162 | static int xen_copy_psd_data(struct acpi_processor *_pr, | ||
163 | struct xen_processor_performance *dst) | ||
164 | { | ||
165 | struct acpi_psd_package *pdomain; | ||
166 | |||
167 | BUILD_BUG_ON(sizeof(struct xen_psd_package) != | ||
168 | sizeof(struct acpi_psd_package)); | ||
169 | |||
170 | /* This information is enumerated only if acpi_processor_preregister_performance | ||
171 | * has been called. | ||
172 | */ | ||
173 | dst->shared_type = _pr->performance->shared_type; | ||
174 | |||
175 | pdomain = &(_pr->performance->domain_info); | ||
176 | |||
177 | /* 'acpi_processor_preregister_performance' does not parse if the | ||
178 | * num_processors <= 1, but Xen still requires it. Do it manually here. | ||
179 | */ | ||
180 | if (pdomain->num_processors <= 1) { | ||
181 | if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) | ||
182 | dst->shared_type = CPUFREQ_SHARED_TYPE_ALL; | ||
183 | else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) | ||
184 | dst->shared_type = CPUFREQ_SHARED_TYPE_HW; | ||
185 | else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) | ||
186 | dst->shared_type = CPUFREQ_SHARED_TYPE_ANY; | ||
187 | |||
188 | } | ||
189 | memcpy(&(dst->domain_info), pdomain, sizeof(struct acpi_psd_package)); | ||
190 | return 0; | ||
191 | } | ||
192 | static int xen_copy_pct_data(struct acpi_pct_register *pct, | ||
193 | struct xen_pct_register *dst_pct) | ||
194 | { | ||
195 | /* It would be nice if you could just do 'memcpy(pct, dst_pct') but | ||
196 | * sadly the Xen structure did not have the proper padding so the | ||
197 | * descriptor field takes two (dst_pct) bytes instead of one (pct). | ||
198 | */ | ||
199 | dst_pct->descriptor = pct->descriptor; | ||
200 | dst_pct->length = pct->length; | ||
201 | dst_pct->space_id = pct->space_id; | ||
202 | dst_pct->bit_width = pct->bit_width; | ||
203 | dst_pct->bit_offset = pct->bit_offset; | ||
204 | dst_pct->reserved = pct->reserved; | ||
205 | dst_pct->address = pct->address; | ||
206 | return 0; | ||
207 | } | ||
208 | static int push_pxx_to_hypervisor(struct acpi_processor *_pr) | ||
209 | { | ||
210 | int ret = 0; | ||
211 | struct xen_platform_op op = { | ||
212 | .cmd = XENPF_set_processor_pminfo, | ||
213 | .interface_version = XENPF_INTERFACE_VERSION, | ||
214 | .u.set_pminfo.id = _pr->acpi_id, | ||
215 | .u.set_pminfo.type = XEN_PM_PX, | ||
216 | }; | ||
217 | struct xen_processor_performance *dst_perf; | ||
218 | struct xen_processor_px *dst_states = NULL; | ||
219 | |||
220 | dst_perf = &op.u.set_pminfo.perf; | ||
221 | |||
222 | dst_perf->platform_limit = _pr->performance_platform_limit; | ||
223 | dst_perf->flags |= XEN_PX_PPC; | ||
224 | xen_copy_pct_data(&(_pr->performance->control_register), | ||
225 | &dst_perf->control_register); | ||
226 | xen_copy_pct_data(&(_pr->performance->status_register), | ||
227 | &dst_perf->status_register); | ||
228 | dst_perf->flags |= XEN_PX_PCT; | ||
229 | dst_states = xen_copy_pss_data(_pr, dst_perf); | ||
230 | if (!IS_ERR_OR_NULL(dst_states)) { | ||
231 | set_xen_guest_handle(dst_perf->states, dst_states); | ||
232 | dst_perf->flags |= XEN_PX_PSS; | ||
233 | } | ||
234 | if (!xen_copy_psd_data(_pr, dst_perf)) | ||
235 | dst_perf->flags |= XEN_PX_PSD; | ||
236 | |||
237 | if (dst_perf->flags != (XEN_PX_PSD | XEN_PX_PSS | XEN_PX_PCT | XEN_PX_PPC)) { | ||
238 | pr_warn(DRV_NAME "ACPI CPU%u missing some P-state data (%x), skipping.\n", | ||
239 | _pr->acpi_id, dst_perf->flags); | ||
240 | ret = -ENODEV; | ||
241 | goto err_free; | ||
242 | } | ||
243 | |||
244 | if (!no_hypercall) | ||
245 | ret = HYPERVISOR_dom0_op(&op); | ||
246 | |||
247 | if (!ret) { | ||
248 | struct acpi_processor_performance *perf; | ||
249 | unsigned int i; | ||
250 | |||
251 | perf = _pr->performance; | ||
252 | pr_debug("ACPI CPU%u - P-states uploaded.\n", _pr->acpi_id); | ||
253 | for (i = 0; i < perf->state_count; i++) { | ||
254 | pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", | ||
255 | (i == perf->state ? '*' : ' '), i, | ||
256 | (u32) perf->states[i].core_frequency, | ||
257 | (u32) perf->states[i].power, | ||
258 | (u32) perf->states[i].transition_latency); | ||
259 | } | ||
260 | } else if (ret != -EINVAL) | ||
261 | /* EINVAL means the ACPI ID is incorrect - meaning the ACPI | ||
262 | * table is referencing a non-existing CPU - which can happen | ||
263 | * with broken ACPI tables. */ | ||
264 | pr_warn(DRV_NAME "(_PXX): Hypervisor error (%d) for ACPI CPU%u\n", | ||
265 | ret, _pr->acpi_id); | ||
266 | err_free: | ||
267 | if (!IS_ERR_OR_NULL(dst_states)) | ||
268 | kfree(dst_states); | ||
269 | |||
270 | return ret; | ||
271 | } | ||
272 | static int upload_pm_data(struct acpi_processor *_pr) | ||
273 | { | ||
274 | int err = 0; | ||
275 | |||
276 | mutex_lock(&acpi_ids_mutex); | ||
277 | if (__test_and_set_bit(_pr->acpi_id, acpi_ids_done)) { | ||
278 | mutex_unlock(&acpi_ids_mutex); | ||
279 | return -EBUSY; | ||
280 | } | ||
281 | if (_pr->flags.power) | ||
282 | err = push_cxx_to_hypervisor(_pr); | ||
283 | |||
284 | if (_pr->performance && _pr->performance->states) | ||
285 | err |= push_pxx_to_hypervisor(_pr); | ||
286 | |||
287 | mutex_unlock(&acpi_ids_mutex); | ||
288 | return err; | ||
289 | } | ||
290 | static unsigned int __init get_max_acpi_id(void) | ||
291 | { | ||
292 | struct xenpf_pcpuinfo *info; | ||
293 | struct xen_platform_op op = { | ||
294 | .cmd = XENPF_get_cpuinfo, | ||
295 | .interface_version = XENPF_INTERFACE_VERSION, | ||
296 | }; | ||
297 | int ret = 0; | ||
298 | unsigned int i, last_cpu, max_acpi_id = 0; | ||
299 | |||
300 | info = &op.u.pcpu_info; | ||
301 | info->xen_cpuid = 0; | ||
302 | |||
303 | ret = HYPERVISOR_dom0_op(&op); | ||
304 | if (ret) | ||
305 | return NR_CPUS; | ||
306 | |||
307 | /* The max_present is the same irregardless of the xen_cpuid */ | ||
308 | last_cpu = op.u.pcpu_info.max_present; | ||
309 | for (i = 0; i <= last_cpu; i++) { | ||
310 | info->xen_cpuid = i; | ||
311 | ret = HYPERVISOR_dom0_op(&op); | ||
312 | if (ret) | ||
313 | continue; | ||
314 | max_acpi_id = max(info->acpi_id, max_acpi_id); | ||
315 | } | ||
316 | max_acpi_id *= 2; /* Slack for CPU hotplug support. */ | ||
317 | pr_debug(DRV_NAME "Max ACPI ID: %u\n", max_acpi_id); | ||
318 | return max_acpi_id; | ||
319 | } | ||
320 | /* | ||
321 | * The read_acpi_id and check_acpi_ids are there to support the Xen | ||
322 | * oddity of virtual CPUs != physical CPUs in the initial domain. | ||
323 | * The user can supply 'xen_max_vcpus=X' on the Xen hypervisor line | ||
324 | * which will band the amount of CPUs the initial domain can see. | ||
325 | * In general that is OK, except it plays havoc with any of the | ||
326 | * for_each_[present|online]_cpu macros which are banded to the virtual | ||
327 | * CPU amount. | ||
328 | */ | ||
329 | static acpi_status __init | ||
330 | read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv) | ||
331 | { | ||
332 | u32 acpi_id; | ||
333 | acpi_status status; | ||
334 | acpi_object_type acpi_type; | ||
335 | unsigned long long tmp; | ||
336 | union acpi_object object = { 0 }; | ||
337 | struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; | ||
338 | acpi_io_address pblk = 0; | ||
339 | |||
340 | status = acpi_get_type(handle, &acpi_type); | ||
341 | if (ACPI_FAILURE(status)) | ||
342 | return AE_OK; | ||
343 | |||
344 | switch (acpi_type) { | ||
345 | case ACPI_TYPE_PROCESSOR: | ||
346 | status = acpi_evaluate_object(handle, NULL, NULL, &buffer); | ||
347 | if (ACPI_FAILURE(status)) | ||
348 | return AE_OK; | ||
349 | acpi_id = object.processor.proc_id; | ||
350 | pblk = object.processor.pblk_address; | ||
351 | break; | ||
352 | case ACPI_TYPE_DEVICE: | ||
353 | status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp); | ||
354 | if (ACPI_FAILURE(status)) | ||
355 | return AE_OK; | ||
356 | acpi_id = tmp; | ||
357 | break; | ||
358 | default: | ||
359 | return AE_OK; | ||
360 | } | ||
361 | /* There are more ACPI Processor objects than in x2APIC or MADT. | ||
362 | * This can happen with incorrect ACPI SSDT declerations. */ | ||
363 | if (acpi_id > nr_acpi_bits) { | ||
364 | pr_debug(DRV_NAME "We only have %u, trying to set %u\n", | ||
365 | nr_acpi_bits, acpi_id); | ||
366 | return AE_OK; | ||
367 | } | ||
368 | /* OK, There is a ACPI Processor object */ | ||
369 | __set_bit(acpi_id, acpi_id_present); | ||
370 | |||
371 | pr_debug(DRV_NAME "ACPI CPU%u w/ PBLK:0x%lx\n", acpi_id, | ||
372 | (unsigned long)pblk); | ||
373 | |||
374 | status = acpi_evaluate_object(handle, "_CST", NULL, &buffer); | ||
375 | if (ACPI_FAILURE(status)) { | ||
376 | if (!pblk) | ||
377 | return AE_OK; | ||
378 | } | ||
379 | /* .. and it has a C-state */ | ||
380 | __set_bit(acpi_id, acpi_id_cst_present); | ||
381 | |||
382 | return AE_OK; | ||
383 | } | ||
384 | static int __init check_acpi_ids(struct acpi_processor *pr_backup) | ||
385 | { | ||
386 | |||
387 | if (!pr_backup) | ||
388 | return -ENODEV; | ||
389 | |||
390 | /* All online CPUs have been processed at this stage. Now verify | ||
391 | * whether in fact "online CPUs" == physical CPUs. | ||
392 | */ | ||
393 | acpi_id_present = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL); | ||
394 | if (!acpi_id_present) | ||
395 | return -ENOMEM; | ||
396 | |||
397 | acpi_id_cst_present = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL); | ||
398 | if (!acpi_id_cst_present) { | ||
399 | kfree(acpi_id_present); | ||
400 | return -ENOMEM; | ||
401 | } | ||
402 | |||
403 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, | ||
404 | ACPI_UINT32_MAX, | ||
405 | read_acpi_id, NULL, NULL, NULL); | ||
406 | acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL); | ||
407 | |||
408 | if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) { | ||
409 | unsigned int i; | ||
410 | for_each_set_bit(i, acpi_id_present, nr_acpi_bits) { | ||
411 | pr_backup->acpi_id = i; | ||
412 | /* Mask out C-states if there are no _CST or PBLK */ | ||
413 | pr_backup->flags.power = test_bit(i, acpi_id_cst_present); | ||
414 | (void)upload_pm_data(pr_backup); | ||
415 | } | ||
416 | } | ||
417 | kfree(acpi_id_present); | ||
418 | acpi_id_present = NULL; | ||
419 | kfree(acpi_id_cst_present); | ||
420 | acpi_id_cst_present = NULL; | ||
421 | return 0; | ||
422 | } | ||
423 | static int __init check_prereq(void) | ||
424 | { | ||
425 | struct cpuinfo_x86 *c = &cpu_data(0); | ||
426 | |||
427 | if (!xen_initial_domain()) | ||
428 | return -ENODEV; | ||
429 | |||
430 | if (!acpi_gbl_FADT.smi_command) | ||
431 | return -ENODEV; | ||
432 | |||
433 | if (c->x86_vendor == X86_VENDOR_INTEL) { | ||
434 | if (!cpu_has(c, X86_FEATURE_EST)) | ||
435 | return -ENODEV; | ||
436 | |||
437 | return 0; | ||
438 | } | ||
439 | if (c->x86_vendor == X86_VENDOR_AMD) { | ||
440 | /* Copied from powernow-k8.h, can't include ../cpufreq/powernow | ||
441 | * as we get compile warnings for the static functions. | ||
442 | */ | ||
443 | #define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007 | ||
444 | #define USE_HW_PSTATE 0x00000080 | ||
445 | u32 eax, ebx, ecx, edx; | ||
446 | cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); | ||
447 | if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE) | ||
448 | return -ENODEV; | ||
449 | return 0; | ||
450 | } | ||
451 | return -ENODEV; | ||
452 | } | ||
453 | /* acpi_perf_data is a pointer to percpu data. */ | ||
454 | static struct acpi_processor_performance __percpu *acpi_perf_data; | ||
455 | |||
456 | static void free_acpi_perf_data(void) | ||
457 | { | ||
458 | unsigned int i; | ||
459 | |||
460 | /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ | ||
461 | for_each_possible_cpu(i) | ||
462 | free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) | ||
463 | ->shared_cpu_map); | ||
464 | free_percpu(acpi_perf_data); | ||
465 | } | ||
466 | |||
467 | static int __init xen_acpi_processor_init(void) | ||
468 | { | ||
469 | struct acpi_processor *pr_backup = NULL; | ||
470 | unsigned int i; | ||
471 | int rc = check_prereq(); | ||
472 | |||
473 | if (rc) | ||
474 | return rc; | ||
475 | |||
476 | nr_acpi_bits = get_max_acpi_id() + 1; | ||
477 | acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL); | ||
478 | if (!acpi_ids_done) | ||
479 | return -ENOMEM; | ||
480 | |||
481 | acpi_perf_data = alloc_percpu(struct acpi_processor_performance); | ||
482 | if (!acpi_perf_data) { | ||
483 | pr_debug(DRV_NAME "Memory allocation error for acpi_perf_data.\n"); | ||
484 | kfree(acpi_ids_done); | ||
485 | return -ENOMEM; | ||
486 | } | ||
487 | for_each_possible_cpu(i) { | ||
488 | if (!zalloc_cpumask_var_node( | ||
489 | &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, | ||
490 | GFP_KERNEL, cpu_to_node(i))) { | ||
491 | rc = -ENOMEM; | ||
492 | goto err_out; | ||
493 | } | ||
494 | } | ||
495 | |||
496 | /* Do initialization in ACPI core. It is OK to fail here. */ | ||
497 | (void)acpi_processor_preregister_performance(acpi_perf_data); | ||
498 | |||
499 | for_each_possible_cpu(i) { | ||
500 | struct acpi_processor_performance *perf; | ||
501 | |||
502 | perf = per_cpu_ptr(acpi_perf_data, i); | ||
503 | rc = acpi_processor_register_performance(perf, i); | ||
504 | if (WARN_ON(rc)) | ||
505 | goto err_out; | ||
506 | } | ||
507 | rc = acpi_processor_notify_smm(THIS_MODULE); | ||
508 | if (WARN_ON(rc)) | ||
509 | goto err_unregister; | ||
510 | |||
511 | for_each_possible_cpu(i) { | ||
512 | struct acpi_processor *_pr; | ||
513 | _pr = per_cpu(processors, i /* APIC ID */); | ||
514 | if (!_pr) | ||
515 | continue; | ||
516 | |||
517 | if (!pr_backup) { | ||
518 | pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); | ||
519 | memcpy(pr_backup, _pr, sizeof(struct acpi_processor)); | ||
520 | } | ||
521 | (void)upload_pm_data(_pr); | ||
522 | } | ||
523 | rc = check_acpi_ids(pr_backup); | ||
524 | if (rc) | ||
525 | goto err_unregister; | ||
526 | |||
527 | kfree(pr_backup); | ||
528 | |||
529 | return 0; | ||
530 | err_unregister: | ||
531 | for_each_possible_cpu(i) { | ||
532 | struct acpi_processor_performance *perf; | ||
533 | perf = per_cpu_ptr(acpi_perf_data, i); | ||
534 | acpi_processor_unregister_performance(perf, i); | ||
535 | } | ||
536 | err_out: | ||
537 | /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ | ||
538 | free_acpi_perf_data(); | ||
539 | kfree(acpi_ids_done); | ||
540 | return rc; | ||
541 | } | ||
542 | static void __exit xen_acpi_processor_exit(void) | ||
543 | { | ||
544 | int i; | ||
545 | |||
546 | kfree(acpi_ids_done); | ||
547 | for_each_possible_cpu(i) { | ||
548 | struct acpi_processor_performance *perf; | ||
549 | perf = per_cpu_ptr(acpi_perf_data, i); | ||
550 | acpi_processor_unregister_performance(perf, i); | ||
551 | } | ||
552 | free_acpi_perf_data(); | ||
553 | } | ||
554 | |||
555 | MODULE_AUTHOR("Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>"); | ||
556 | MODULE_DESCRIPTION("Xen ACPI Processor P-states (and Cx) driver which uploads PM data to Xen hypervisor"); | ||
557 | MODULE_LICENSE("GPL"); | ||
558 | |||
559 | /* We want to be loaded before the CPU freq scaling drivers are loaded. | ||
560 | * They are loaded in late_initcall. */ | ||
561 | device_initcall(xen_acpi_processor_init); | ||
562 | module_exit(xen_acpi_processor_exit); | ||
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c index 596e6a7b17d6..8f37e23f6d13 100644 --- a/drivers/xen/xen-balloon.c +++ b/drivers/xen/xen-balloon.c | |||
@@ -207,7 +207,7 @@ static struct attribute *balloon_info_attrs[] = { | |||
207 | NULL | 207 | NULL |
208 | }; | 208 | }; |
209 | 209 | ||
210 | static struct attribute_group balloon_info_group = { | 210 | static const struct attribute_group balloon_info_group = { |
211 | .name = "info", | 211 | .name = "info", |
212 | .attrs = balloon_info_attrs | 212 | .attrs = balloon_info_attrs |
213 | }; | 213 | }; |
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 19834d1c7c36..097e536e8672 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c | |||
@@ -85,19 +85,34 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev) | |||
85 | static void pcistub_device_release(struct kref *kref) | 85 | static void pcistub_device_release(struct kref *kref) |
86 | { | 86 | { |
87 | struct pcistub_device *psdev; | 87 | struct pcistub_device *psdev; |
88 | struct xen_pcibk_dev_data *dev_data; | ||
88 | 89 | ||
89 | psdev = container_of(kref, struct pcistub_device, kref); | 90 | psdev = container_of(kref, struct pcistub_device, kref); |
91 | dev_data = pci_get_drvdata(psdev->dev); | ||
90 | 92 | ||
91 | dev_dbg(&psdev->dev->dev, "pcistub_device_release\n"); | 93 | dev_dbg(&psdev->dev->dev, "pcistub_device_release\n"); |
92 | 94 | ||
93 | xen_unregister_device_domain_owner(psdev->dev); | 95 | xen_unregister_device_domain_owner(psdev->dev); |
94 | 96 | ||
95 | /* Clean-up the device */ | 97 | /* Call the reset function which does not take lock as this |
98 | * is called from "unbind" which takes a device_lock mutex. | ||
99 | */ | ||
100 | __pci_reset_function_locked(psdev->dev); | ||
101 | if (pci_load_and_free_saved_state(psdev->dev, | ||
102 | &dev_data->pci_saved_state)) { | ||
103 | dev_dbg(&psdev->dev->dev, "Could not reload PCI state\n"); | ||
104 | } else | ||
105 | pci_restore_state(psdev->dev); | ||
106 | |||
107 | /* Disable the device */ | ||
96 | xen_pcibk_reset_device(psdev->dev); | 108 | xen_pcibk_reset_device(psdev->dev); |
109 | |||
110 | kfree(dev_data); | ||
111 | pci_set_drvdata(psdev->dev, NULL); | ||
112 | |||
113 | /* Clean-up the device */ | ||
97 | xen_pcibk_config_free_dyn_fields(psdev->dev); | 114 | xen_pcibk_config_free_dyn_fields(psdev->dev); |
98 | xen_pcibk_config_free_dev(psdev->dev); | 115 | xen_pcibk_config_free_dev(psdev->dev); |
99 | kfree(pci_get_drvdata(psdev->dev)); | ||
100 | pci_set_drvdata(psdev->dev, NULL); | ||
101 | 116 | ||
102 | psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; | 117 | psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; |
103 | pci_dev_put(psdev->dev); | 118 | pci_dev_put(psdev->dev); |
@@ -231,7 +246,17 @@ void pcistub_put_pci_dev(struct pci_dev *dev) | |||
231 | /* Cleanup our device | 246 | /* Cleanup our device |
232 | * (so it's ready for the next domain) | 247 | * (so it's ready for the next domain) |
233 | */ | 248 | */ |
249 | |||
250 | /* This is OK - we are running from workqueue context | ||
251 | * and want to inhibit the user from fiddling with 'reset' | ||
252 | */ | ||
253 | pci_reset_function(dev); | ||
254 | pci_restore_state(psdev->dev); | ||
255 | |||
256 | /* This disables the device. */ | ||
234 | xen_pcibk_reset_device(found_psdev->dev); | 257 | xen_pcibk_reset_device(found_psdev->dev); |
258 | |||
259 | /* And cleanup up our emulated fields. */ | ||
235 | xen_pcibk_config_free_dyn_fields(found_psdev->dev); | 260 | xen_pcibk_config_free_dyn_fields(found_psdev->dev); |
236 | xen_pcibk_config_reset_dev(found_psdev->dev); | 261 | xen_pcibk_config_reset_dev(found_psdev->dev); |
237 | 262 | ||
@@ -328,6 +353,16 @@ static int __devinit pcistub_init_device(struct pci_dev *dev) | |||
328 | if (err) | 353 | if (err) |
329 | goto config_release; | 354 | goto config_release; |
330 | 355 | ||
356 | dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n"); | ||
357 | __pci_reset_function_locked(dev); | ||
358 | |||
359 | /* We need the device active to save the state. */ | ||
360 | dev_dbg(&dev->dev, "save state of device\n"); | ||
361 | pci_save_state(dev); | ||
362 | dev_data->pci_saved_state = pci_store_saved_state(dev); | ||
363 | if (!dev_data->pci_saved_state) | ||
364 | dev_err(&dev->dev, "Could not store PCI conf saved state!\n"); | ||
365 | |||
331 | /* Now disable the device (this also ensures some private device | 366 | /* Now disable the device (this also ensures some private device |
332 | * data is setup before we export) | 367 | * data is setup before we export) |
333 | */ | 368 | */ |
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h index e9b4011c5f9a..a7def010eba3 100644 --- a/drivers/xen/xen-pciback/pciback.h +++ b/drivers/xen/xen-pciback/pciback.h | |||
@@ -41,6 +41,7 @@ struct xen_pcibk_device { | |||
41 | 41 | ||
42 | struct xen_pcibk_dev_data { | 42 | struct xen_pcibk_dev_data { |
43 | struct list_head config_fields; | 43 | struct list_head config_fields; |
44 | struct pci_saved_state *pci_saved_state; | ||
44 | unsigned int permissive:1; | 45 | unsigned int permissive:1; |
45 | unsigned int warned_on_write:1; | 46 | unsigned int warned_on_write:1; |
46 | unsigned int enable_intx:1; | 47 | unsigned int enable_intx:1; |
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c index 767ff656d5a7..146c94897016 100644 --- a/drivers/xen/xen-selfballoon.c +++ b/drivers/xen/xen-selfballoon.c | |||
@@ -488,7 +488,7 @@ static struct attribute *selfballoon_attrs[] = { | |||
488 | NULL | 488 | NULL |
489 | }; | 489 | }; |
490 | 490 | ||
491 | static struct attribute_group selfballoon_group = { | 491 | static const struct attribute_group selfballoon_group = { |
492 | .name = "selfballoon", | 492 | .name = "selfballoon", |
493 | .attrs = selfballoon_attrs | 493 | .attrs = selfballoon_attrs |
494 | }; | 494 | }; |
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 566d2adbd6ea..b3e146edb51d 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c | |||
@@ -569,7 +569,7 @@ int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, | |||
569 | { | 569 | { |
570 | struct gnttab_map_grant_ref op; | 570 | struct gnttab_map_grant_ref op; |
571 | 571 | ||
572 | gnttab_set_map_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, gnt_ref, | 572 | gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, |
573 | dev->otherend_id); | 573 | dev->otherend_id); |
574 | 574 | ||
575 | if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) | 575 | if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) |
@@ -662,7 +662,7 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) | |||
662 | goto found; | 662 | goto found; |
663 | } | 663 | } |
664 | } | 664 | } |
665 | node = NULL; | 665 | node = addr = NULL; |
666 | found: | 666 | found: |
667 | spin_unlock(&xenbus_valloc_lock); | 667 | spin_unlock(&xenbus_valloc_lock); |
668 | 668 | ||
@@ -698,7 +698,7 @@ int xenbus_unmap_ring(struct xenbus_device *dev, | |||
698 | { | 698 | { |
699 | struct gnttab_unmap_grant_ref op; | 699 | struct gnttab_unmap_grant_ref op; |
700 | 700 | ||
701 | gnttab_set_unmap_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, handle); | 701 | gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); |
702 | 702 | ||
703 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) | 703 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) |
704 | BUG(); | 704 | BUG(); |
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 3864967202b5..b793723e724d 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
@@ -257,11 +257,12 @@ int xenbus_dev_remove(struct device *_dev) | |||
257 | DPRINTK("%s", dev->nodename); | 257 | DPRINTK("%s", dev->nodename); |
258 | 258 | ||
259 | free_otherend_watch(dev); | 259 | free_otherend_watch(dev); |
260 | free_otherend_details(dev); | ||
261 | 260 | ||
262 | if (drv->remove) | 261 | if (drv->remove) |
263 | drv->remove(dev); | 262 | drv->remove(dev); |
264 | 263 | ||
264 | free_otherend_details(dev); | ||
265 | |||
265 | xenbus_switch_state(dev, XenbusStateClosed); | 266 | xenbus_switch_state(dev, XenbusStateClosed); |
266 | return 0; | 267 | return 0; |
267 | } | 268 | } |
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c index 9c57819df51a..f20c5f178b40 100644 --- a/drivers/xen/xenbus/xenbus_probe_frontend.c +++ b/drivers/xen/xenbus/xenbus_probe_frontend.c | |||
@@ -53,6 +53,12 @@ static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type, | |||
53 | char *nodename; | 53 | char *nodename; |
54 | int err; | 54 | int err; |
55 | 55 | ||
56 | /* ignore console/0 */ | ||
57 | if (!strncmp(type, "console", 7) && !strncmp(name, "0", 1)) { | ||
58 | DPRINTK("Ignoring buggy device entry console/0"); | ||
59 | return 0; | ||
60 | } | ||
61 | |||
56 | nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name); | 62 | nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name); |
57 | if (!nodename) | 63 | if (!nodename) |
58 | return -ENOMEM; | 64 | return -ENOMEM; |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 27bf521bcebd..900da5db60ee 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -817,6 +817,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq); | |||
817 | int pcie_get_mps(struct pci_dev *dev); | 817 | int pcie_get_mps(struct pci_dev *dev); |
818 | int pcie_set_mps(struct pci_dev *dev, int mps); | 818 | int pcie_set_mps(struct pci_dev *dev, int mps); |
819 | int __pci_reset_function(struct pci_dev *dev); | 819 | int __pci_reset_function(struct pci_dev *dev); |
820 | int __pci_reset_function_locked(struct pci_dev *dev); | ||
820 | int pci_reset_function(struct pci_dev *dev); | 821 | int pci_reset_function(struct pci_dev *dev); |
821 | void pci_update_resource(struct pci_dev *dev, int resno); | 822 | void pci_update_resource(struct pci_dev *dev, int resno); |
822 | int __must_check pci_assign_resource(struct pci_dev *dev, int i); | 823 | int __must_check pci_assign_resource(struct pci_dev *dev, int i); |
diff --git a/include/xen/interface/hvm/params.h b/include/xen/interface/hvm/params.h index 1888d8c157e6..1b4f923d7086 100644 --- a/include/xen/interface/hvm/params.h +++ b/include/xen/interface/hvm/params.h | |||
@@ -90,6 +90,10 @@ | |||
90 | /* Boolean: Enable aligning all periodic vpts to reduce interrupts */ | 90 | /* Boolean: Enable aligning all periodic vpts to reduce interrupts */ |
91 | #define HVM_PARAM_VPT_ALIGN 16 | 91 | #define HVM_PARAM_VPT_ALIGN 16 |
92 | 92 | ||
93 | #define HVM_NR_PARAMS 17 | 93 | /* Console debug shared memory ring and event channel */ |
94 | #define HVM_PARAM_CONSOLE_PFN 17 | ||
95 | #define HVM_PARAM_CONSOLE_EVTCHN 18 | ||
96 | |||
97 | #define HVM_NR_PARAMS 19 | ||
94 | 98 | ||
95 | #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ | 99 | #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ |
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h index c1080d9c705d..0c28989007fb 100644 --- a/include/xen/interface/physdev.h +++ b/include/xen/interface/physdev.h | |||
@@ -145,6 +145,13 @@ struct physdev_manage_pci { | |||
145 | uint8_t devfn; | 145 | uint8_t devfn; |
146 | }; | 146 | }; |
147 | 147 | ||
148 | #define PHYSDEVOP_restore_msi 19 | ||
149 | struct physdev_restore_msi { | ||
150 | /* IN */ | ||
151 | uint8_t bus; | ||
152 | uint8_t devfn; | ||
153 | }; | ||
154 | |||
148 | #define PHYSDEVOP_manage_pci_add_ext 20 | 155 | #define PHYSDEVOP_manage_pci_add_ext 20 |
149 | struct physdev_manage_pci_ext { | 156 | struct physdev_manage_pci_ext { |
150 | /* IN */ | 157 | /* IN */ |
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h index c1684680431b..486653f0dd8f 100644 --- a/include/xen/interface/platform.h +++ b/include/xen/interface/platform.h | |||
@@ -200,7 +200,7 @@ DEFINE_GUEST_HANDLE_STRUCT(xenpf_getidletime_t); | |||
200 | #define XEN_PM_CX 0 | 200 | #define XEN_PM_CX 0 |
201 | #define XEN_PM_PX 1 | 201 | #define XEN_PM_PX 1 |
202 | #define XEN_PM_TX 2 | 202 | #define XEN_PM_TX 2 |
203 | 203 | #define XEN_PM_PDC 3 | |
204 | /* Px sub info type */ | 204 | /* Px sub info type */ |
205 | #define XEN_PX_PCT 1 | 205 | #define XEN_PX_PCT 1 |
206 | #define XEN_PX_PSS 2 | 206 | #define XEN_PX_PSS 2 |
@@ -293,10 +293,27 @@ struct xenpf_set_processor_pminfo { | |||
293 | union { | 293 | union { |
294 | struct xen_processor_power power;/* Cx: _CST/_CSD */ | 294 | struct xen_processor_power power;/* Cx: _CST/_CSD */ |
295 | struct xen_processor_performance perf; /* Px: _PPC/_PCT/_PSS/_PSD */ | 295 | struct xen_processor_performance perf; /* Px: _PPC/_PCT/_PSS/_PSD */ |
296 | GUEST_HANDLE(uint32_t) pdc; | ||
296 | }; | 297 | }; |
297 | }; | 298 | }; |
298 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_set_processor_pminfo); | 299 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_set_processor_pminfo); |
299 | 300 | ||
301 | #define XENPF_get_cpuinfo 55 | ||
302 | struct xenpf_pcpuinfo { | ||
303 | /* IN */ | ||
304 | uint32_t xen_cpuid; | ||
305 | /* OUT */ | ||
306 | /* The maxium cpu_id that is present */ | ||
307 | uint32_t max_present; | ||
308 | #define XEN_PCPU_FLAGS_ONLINE 1 | ||
309 | /* Correponding xen_cpuid is not present*/ | ||
310 | #define XEN_PCPU_FLAGS_INVALID 2 | ||
311 | uint32_t flags; | ||
312 | uint32_t apic_id; | ||
313 | uint32_t acpi_id; | ||
314 | }; | ||
315 | DEFINE_GUEST_HANDLE_STRUCT(xenpf_pcpuinfo); | ||
316 | |||
300 | struct xen_platform_op { | 317 | struct xen_platform_op { |
301 | uint32_t cmd; | 318 | uint32_t cmd; |
302 | uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ | 319 | uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ |
@@ -312,6 +329,7 @@ struct xen_platform_op { | |||
312 | struct xenpf_change_freq change_freq; | 329 | struct xenpf_change_freq change_freq; |
313 | struct xenpf_getidletime getidletime; | 330 | struct xenpf_getidletime getidletime; |
314 | struct xenpf_set_processor_pminfo set_pminfo; | 331 | struct xenpf_set_processor_pminfo set_pminfo; |
332 | struct xenpf_pcpuinfo pcpu_info; | ||
315 | uint8_t pad[128]; | 333 | uint8_t pad[128]; |
316 | } u; | 334 | } u; |
317 | }; | 335 | }; |