diff options
46 files changed, 411 insertions, 146 deletions
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 7bb0d934b6d8..dbea4f95fc85 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt | |||
| @@ -139,6 +139,7 @@ Code Seq# Include File Comments | |||
| 139 | 'm' all linux/synclink.h conflict! | 139 | 'm' all linux/synclink.h conflict! |
| 140 | 'm' 00-1F net/irda/irmod.h conflict! | 140 | 'm' 00-1F net/irda/irmod.h conflict! |
| 141 | 'n' 00-7F linux/ncp_fs.h | 141 | 'n' 00-7F linux/ncp_fs.h |
| 142 | 'n' 80-8F linux/nilfs2_fs.h NILFS2 | ||
| 142 | 'n' E0-FF video/matrox.h matroxfb | 143 | 'n' E0-FF video/matrox.h matroxfb |
| 143 | 'o' 00-1F fs/ocfs2/ocfs2_fs.h OCFS2 | 144 | 'o' 00-1F fs/ocfs2/ocfs2_fs.h OCFS2 |
| 144 | 'o' 00-03 include/mtd/ubi-user.h conflict! (OCFS2 and UBI overlaps) | 145 | 'o' 00-03 include/mtd/ubi-user.h conflict! (OCFS2 and UBI overlaps) |
diff --git a/Documentation/lockdep-design.txt b/Documentation/lockdep-design.txt index e20d913d5914..abf768c681e2 100644 --- a/Documentation/lockdep-design.txt +++ b/Documentation/lockdep-design.txt | |||
| @@ -30,9 +30,9 @@ State | |||
| 30 | The validator tracks lock-class usage history into 4n + 1 separate state bits: | 30 | The validator tracks lock-class usage history into 4n + 1 separate state bits: |
| 31 | 31 | ||
| 32 | - 'ever held in STATE context' | 32 | - 'ever held in STATE context' |
| 33 | - 'ever head as readlock in STATE context' | 33 | - 'ever held as readlock in STATE context' |
| 34 | - 'ever head with STATE enabled' | 34 | - 'ever held with STATE enabled' |
| 35 | - 'ever head as readlock with STATE enabled' | 35 | - 'ever held as readlock with STATE enabled' |
| 36 | 36 | ||
| 37 | Where STATE can be either one of (kernel/lockdep_states.h) | 37 | Where STATE can be either one of (kernel/lockdep_states.h) |
| 38 | - hardirq | 38 | - hardirq |
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c index 21f63fffc379..9bf55afd08d0 100644 --- a/arch/ia64/kvm/mmio.c +++ b/arch/ia64/kvm/mmio.c | |||
| @@ -247,7 +247,8 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) | |||
| 247 | vcpu_get_fpreg(vcpu, inst.M9.f2, &v); | 247 | vcpu_get_fpreg(vcpu, inst.M9.f2, &v); |
| 248 | /* Write high word. FIXME: this is a kludge! */ | 248 | /* Write high word. FIXME: this is a kludge! */ |
| 249 | v.u.bits[1] &= 0x3ffff; | 249 | v.u.bits[1] &= 0x3ffff; |
| 250 | mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE); | 250 | mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], 8, |
| 251 | ma, IOREQ_WRITE); | ||
| 251 | data = v.u.bits[0]; | 252 | data = v.u.bits[0]; |
| 252 | size = 3; | 253 | size = 3; |
| 253 | } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) { | 254 | } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) { |
| @@ -265,7 +266,8 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) | |||
| 265 | 266 | ||
| 266 | /* Write high word.FIXME: this is a kludge! */ | 267 | /* Write high word.FIXME: this is a kludge! */ |
| 267 | v.u.bits[1] &= 0x3ffff; | 268 | v.u.bits[1] &= 0x3ffff; |
| 268 | mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE); | 269 | mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], |
| 270 | 8, ma, IOREQ_WRITE); | ||
| 269 | data = v.u.bits[0]; | 271 | data = v.u.bits[0]; |
| 270 | size = 3; | 272 | size = 3; |
| 271 | } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) { | 273 | } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) { |
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index 46b02cbcc874..cc406d064a09 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c | |||
| @@ -461,7 +461,7 @@ void setreg(unsigned long regnum, unsigned long val, | |||
| 461 | u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg) | 461 | u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg) |
| 462 | { | 462 | { |
| 463 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | 463 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); |
| 464 | u64 val; | 464 | unsigned long val; |
| 465 | 465 | ||
| 466 | if (!reg) | 466 | if (!reg) |
| 467 | return 0; | 467 | return 0; |
| @@ -469,7 +469,7 @@ u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg) | |||
| 469 | return val; | 469 | return val; |
| 470 | } | 470 | } |
| 471 | 471 | ||
| 472 | void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat) | 472 | void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, u64 value, int nat) |
| 473 | { | 473 | { |
| 474 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | 474 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); |
| 475 | long sof = (regs->cr_ifs) & 0x7f; | 475 | long sof = (regs->cr_ifs) & 0x7f; |
| @@ -1072,7 +1072,7 @@ void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) | |||
| 1072 | vcpu_set_gr(vcpu, inst.M46.r1, tag, 0); | 1072 | vcpu_set_gr(vcpu, inst.M46.r1, tag, 0); |
| 1073 | } | 1073 | } |
| 1074 | 1074 | ||
| 1075 | int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr) | 1075 | int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, unsigned long *padr) |
| 1076 | { | 1076 | { |
| 1077 | struct thash_data *data; | 1077 | struct thash_data *data; |
| 1078 | union ia64_isr visr, pt_isr; | 1078 | union ia64_isr visr, pt_isr; |
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index 042af92ced83..360724d3ae69 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h | |||
| @@ -686,14 +686,15 @@ static inline int highest_inservice_irq(struct kvm_vcpu *vcpu) | |||
| 686 | return highest_bits((int *)&(VMX(vcpu, insvc[0]))); | 686 | return highest_bits((int *)&(VMX(vcpu, insvc[0]))); |
| 687 | } | 687 | } |
| 688 | 688 | ||
| 689 | extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, u64 reg, | 689 | extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, |
| 690 | struct ia64_fpreg *val); | 690 | struct ia64_fpreg *val); |
| 691 | extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, u64 reg, | 691 | extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, |
| 692 | struct ia64_fpreg *val); | 692 | struct ia64_fpreg *val); |
| 693 | extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, u64 reg); | 693 | extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg); |
| 694 | extern void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 val, int nat); | 694 | extern void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, |
| 695 | extern u64 vcpu_get_psr(struct kvm_vcpu *vcpu); | 695 | u64 val, int nat); |
| 696 | extern void vcpu_set_psr(struct kvm_vcpu *vcpu, u64 val); | 696 | extern unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu); |
| 697 | extern void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val); | ||
| 697 | extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr); | 698 | extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr); |
| 698 | extern void vcpu_bsw0(struct kvm_vcpu *vcpu); | 699 | extern void vcpu_bsw0(struct kvm_vcpu *vcpu); |
| 699 | extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, | 700 | extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, |
diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h index 35d2ed6396f6..19aecc90f7a4 100644 --- a/arch/mn10300/include/asm/pci.h +++ b/arch/mn10300/include/asm/pci.h | |||
| @@ -59,7 +59,6 @@ void pcibios_penalize_isa_irq(int irq); | |||
| 59 | #include <linux/slab.h> | 59 | #include <linux/slab.h> |
| 60 | #include <asm/scatterlist.h> | 60 | #include <asm/scatterlist.h> |
| 61 | #include <linux/string.h> | 61 | #include <linux/string.h> |
| 62 | #include <linux/mm.h> | ||
| 63 | #include <asm/io.h> | 62 | #include <asm/io.h> |
| 64 | 63 | ||
| 65 | struct pci_dev; | 64 | struct pci_dev; |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index dfdf13c9fefd..fddc3ed715fa 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
| @@ -34,7 +34,7 @@ | |||
| 34 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 34 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
| 35 | 35 | ||
| 36 | /* We don't currently support large pages. */ | 36 | /* We don't currently support large pages. */ |
| 37 | #define KVM_PAGES_PER_HPAGE (1<<31) | 37 | #define KVM_PAGES_PER_HPAGE (1UL << 31) |
| 38 | 38 | ||
| 39 | struct kvm; | 39 | struct kvm; |
| 40 | struct kvm_run; | 40 | struct kvm_run; |
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 20a60d661ba8..ccf129d47d84 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | 7 | ||
| 8 | #include <linux/device.h> | 8 | #include <linux/device.h> |
| 9 | #include <linux/dma-mapping.h> | 9 | #include <linux/dma-mapping.h> |
| 10 | #include <linux/lmb.h> | ||
| 10 | #include <asm/bug.h> | 11 | #include <asm/bug.h> |
| 11 | #include <asm/abs_addr.h> | 12 | #include <asm/abs_addr.h> |
| 12 | 13 | ||
| @@ -90,11 +91,10 @@ static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, | |||
| 90 | static int dma_direct_dma_supported(struct device *dev, u64 mask) | 91 | static int dma_direct_dma_supported(struct device *dev, u64 mask) |
| 91 | { | 92 | { |
| 92 | #ifdef CONFIG_PPC64 | 93 | #ifdef CONFIG_PPC64 |
| 93 | /* Could be improved to check for memory though it better be | 94 | /* Could be improved so platforms can set the limit in case |
| 94 | * done via some global so platforms can set the limit in case | ||
| 95 | * they have limited DMA windows | 95 | * they have limited DMA windows |
| 96 | */ | 96 | */ |
| 97 | return mask >= DMA_BIT_MASK(32); | 97 | return mask >= (lmb_end_of_DRAM() - 1); |
| 98 | #else | 98 | #else |
| 99 | return 1; | 99 | return 1; |
| 100 | #endif | 100 | #endif |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index f04f5301b1b4..4d613415c435 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
| @@ -386,7 +386,7 @@ no_timer: | |||
| 386 | } | 386 | } |
| 387 | __unset_cpu_idle(vcpu); | 387 | __unset_cpu_idle(vcpu); |
| 388 | __set_current_state(TASK_RUNNING); | 388 | __set_current_state(TASK_RUNNING); |
| 389 | remove_wait_queue(&vcpu->wq, &wait); | 389 | remove_wait_queue(&vcpu->arch.local_int.wq, &wait); |
| 390 | spin_unlock_bh(&vcpu->arch.local_int.lock); | 390 | spin_unlock_bh(&vcpu->arch.local_int.lock); |
| 391 | spin_unlock(&vcpu->arch.local_int.float_int->lock); | 391 | spin_unlock(&vcpu->arch.local_int.float_int->lock); |
| 392 | hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); | 392 | hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); |
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 2ed4e2bb3b32..a5371ec36776 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
| @@ -17,11 +17,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
| 17 | return x2apic_enabled(); | 17 | return x2apic_enabled(); |
| 18 | } | 18 | } |
| 19 | 19 | ||
| 20 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 20 | /* |
| 21 | 21 | * need to use more than cpu 0, because we need more vectors when | |
| 22 | * MSI-X are used. | ||
| 23 | */ | ||
| 22 | static const struct cpumask *x2apic_target_cpus(void) | 24 | static const struct cpumask *x2apic_target_cpus(void) |
| 23 | { | 25 | { |
| 24 | return cpumask_of(0); | 26 | return cpu_online_mask; |
| 25 | } | 27 | } |
| 26 | 28 | ||
| 27 | /* | 29 | /* |
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 0b631c6a2e00..a8989aadc99a 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c | |||
| @@ -27,11 +27,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
| 27 | return 0; | 27 | return 0; |
| 28 | } | 28 | } |
| 29 | 29 | ||
| 30 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 30 | /* |
| 31 | 31 | * need to use more than cpu 0, because we need more vectors when | |
| 32 | * MSI-X are used. | ||
| 33 | */ | ||
| 32 | static const struct cpumask *x2apic_target_cpus(void) | 34 | static const struct cpumask *x2apic_target_cpus(void) |
| 33 | { | 35 | { |
| 34 | return cpumask_of(0); | 36 | return cpu_online_mask; |
| 35 | } | 37 | } |
| 36 | 38 | ||
| 37 | static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) | 39 | static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) |
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index 19ccf6d0dccf..fe26ba3e3451 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c | |||
| @@ -354,7 +354,7 @@ void __init efi_init(void) | |||
| 354 | */ | 354 | */ |
| 355 | c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2); | 355 | c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2); |
| 356 | if (c16) { | 356 | if (c16) { |
| 357 | for (i = 0; i < sizeof(vendor) && *c16; ++i) | 357 | for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i) |
| 358 | vendor[i] = *c16++; | 358 | vendor[i] = *c16++; |
| 359 | vendor[i] = '\0'; | 359 | vendor[i] = '\0'; |
| 360 | } else | 360 | } else |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 834c9da8bf9d..9eb897603705 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
| @@ -405,7 +405,7 @@ EXPORT_SYMBOL(machine_real_restart); | |||
| 405 | #endif /* CONFIG_X86_32 */ | 405 | #endif /* CONFIG_X86_32 */ |
| 406 | 406 | ||
| 407 | /* | 407 | /* |
| 408 | * Apple MacBook5,2 (2009 MacBook) needs reboot=p | 408 | * Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot |
| 409 | */ | 409 | */ |
| 410 | static int __init set_pci_reboot(const struct dmi_system_id *d) | 410 | static int __init set_pci_reboot(const struct dmi_system_id *d) |
| 411 | { | 411 | { |
| @@ -426,6 +426,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { | |||
| 426 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"), | 426 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"), |
| 427 | }, | 427 | }, |
| 428 | }, | 428 | }, |
| 429 | { /* Handle problems with rebooting on Apple MacBookPro5,1 */ | ||
| 430 | .callback = set_pci_reboot, | ||
| 431 | .ident = "Apple MacBookPro5,1", | ||
| 432 | .matches = { | ||
| 433 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | ||
| 434 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,1"), | ||
| 435 | }, | ||
| 436 | }, | ||
| 429 | { } | 437 | { } |
| 430 | }; | 438 | }; |
| 431 | 439 | ||
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 6e1a368d21d4..71f4368b357e 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
| @@ -275,15 +275,20 @@ static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin) | |||
| 275 | * use the TSC value at the transitions to calculate a pretty | 275 | * use the TSC value at the transitions to calculate a pretty |
| 276 | * good value for the TSC frequencty. | 276 | * good value for the TSC frequencty. |
| 277 | */ | 277 | */ |
| 278 | static inline int pit_verify_msb(unsigned char val) | ||
| 279 | { | ||
| 280 | /* Ignore LSB */ | ||
| 281 | inb(0x42); | ||
| 282 | return inb(0x42) == val; | ||
| 283 | } | ||
| 284 | |||
| 278 | static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) | 285 | static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) |
| 279 | { | 286 | { |
| 280 | int count; | 287 | int count; |
| 281 | u64 tsc = 0; | 288 | u64 tsc = 0; |
| 282 | 289 | ||
| 283 | for (count = 0; count < 50000; count++) { | 290 | for (count = 0; count < 50000; count++) { |
| 284 | /* Ignore LSB */ | 291 | if (!pit_verify_msb(val)) |
| 285 | inb(0x42); | ||
| 286 | if (inb(0x42) != val) | ||
| 287 | break; | 292 | break; |
| 288 | tsc = get_cycles(); | 293 | tsc = get_cycles(); |
| 289 | } | 294 | } |
| @@ -336,8 +341,7 @@ static unsigned long quick_pit_calibrate(void) | |||
| 336 | * to do that is to just read back the 16-bit counter | 341 | * to do that is to just read back the 16-bit counter |
| 337 | * once from the PIT. | 342 | * once from the PIT. |
| 338 | */ | 343 | */ |
| 339 | inb(0x42); | 344 | pit_verify_msb(0); |
| 340 | inb(0x42); | ||
| 341 | 345 | ||
| 342 | if (pit_expect_msb(0xff, &tsc, &d1)) { | 346 | if (pit_expect_msb(0xff, &tsc, &d1)) { |
| 343 | for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { | 347 | for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { |
| @@ -348,8 +352,19 @@ static unsigned long quick_pit_calibrate(void) | |||
| 348 | * Iterate until the error is less than 500 ppm | 352 | * Iterate until the error is less than 500 ppm |
| 349 | */ | 353 | */ |
| 350 | delta -= tsc; | 354 | delta -= tsc; |
| 351 | if (d1+d2 < delta >> 11) | 355 | if (d1+d2 >= delta >> 11) |
| 352 | goto success; | 356 | continue; |
| 357 | |||
| 358 | /* | ||
| 359 | * Check the PIT one more time to verify that | ||
| 360 | * all TSC reads were stable wrt the PIT. | ||
| 361 | * | ||
| 362 | * This also guarantees serialization of the | ||
| 363 | * last cycle read ('d2') in pit_expect_msb. | ||
| 364 | */ | ||
| 365 | if (!pit_verify_msb(0xfe - i)) | ||
| 366 | break; | ||
| 367 | goto success; | ||
| 353 | } | 368 | } |
| 354 | } | 369 | } |
| 355 | printk("Fast TSC calibration failed\n"); | 370 | printk("Fast TSC calibration failed\n"); |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index b263423fbe2a..95a7289e4b0c 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
| @@ -441,7 +441,7 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, | |||
| 441 | ap.ds = __USER_DS; | 441 | ap.ds = __USER_DS; |
| 442 | ap.es = __USER_DS; | 442 | ap.es = __USER_DS; |
| 443 | ap.fs = __KERNEL_PERCPU; | 443 | ap.fs = __KERNEL_PERCPU; |
| 444 | ap.gs = 0; | 444 | ap.gs = __KERNEL_STACK_CANARY; |
| 445 | 445 | ||
| 446 | ap.eflags = 0; | 446 | ap.eflags = 0; |
| 447 | 447 | ||
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 4d6f0d293ee2..21f68e00524f 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
| @@ -104,6 +104,9 @@ static s64 __kpit_elapsed(struct kvm *kvm) | |||
| 104 | ktime_t remaining; | 104 | ktime_t remaining; |
| 105 | struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; | 105 | struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; |
| 106 | 106 | ||
| 107 | if (!ps->pit_timer.period) | ||
| 108 | return 0; | ||
| 109 | |||
| 107 | /* | 110 | /* |
| 108 | * The Counter does not stop when it reaches zero. In | 111 | * The Counter does not stop when it reaches zero. In |
| 109 | * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to | 112 | * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7030b5f911bf..0ef5bb2b4043 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -489,16 +489,20 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage) | |||
| 489 | * | 489 | * |
| 490 | * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc | 490 | * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc |
| 491 | * containing more mappings. | 491 | * containing more mappings. |
| 492 | * | ||
| 493 | * Returns the number of rmap entries before the spte was added or zero if | ||
| 494 | * the spte was not added. | ||
| 495 | * | ||
| 492 | */ | 496 | */ |
| 493 | static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage) | 497 | static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage) |
| 494 | { | 498 | { |
| 495 | struct kvm_mmu_page *sp; | 499 | struct kvm_mmu_page *sp; |
| 496 | struct kvm_rmap_desc *desc; | 500 | struct kvm_rmap_desc *desc; |
| 497 | unsigned long *rmapp; | 501 | unsigned long *rmapp; |
| 498 | int i; | 502 | int i, count = 0; |
| 499 | 503 | ||
| 500 | if (!is_rmap_pte(*spte)) | 504 | if (!is_rmap_pte(*spte)) |
| 501 | return; | 505 | return count; |
| 502 | gfn = unalias_gfn(vcpu->kvm, gfn); | 506 | gfn = unalias_gfn(vcpu->kvm, gfn); |
| 503 | sp = page_header(__pa(spte)); | 507 | sp = page_header(__pa(spte)); |
| 504 | sp->gfns[spte - sp->spt] = gfn; | 508 | sp->gfns[spte - sp->spt] = gfn; |
| @@ -515,8 +519,10 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage) | |||
| 515 | } else { | 519 | } else { |
| 516 | rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); | 520 | rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); |
| 517 | desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); | 521 | desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); |
| 518 | while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) | 522 | while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) { |
| 519 | desc = desc->more; | 523 | desc = desc->more; |
| 524 | count += RMAP_EXT; | ||
| 525 | } | ||
| 520 | if (desc->shadow_ptes[RMAP_EXT-1]) { | 526 | if (desc->shadow_ptes[RMAP_EXT-1]) { |
| 521 | desc->more = mmu_alloc_rmap_desc(vcpu); | 527 | desc->more = mmu_alloc_rmap_desc(vcpu); |
| 522 | desc = desc->more; | 528 | desc = desc->more; |
| @@ -525,6 +531,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage) | |||
| 525 | ; | 531 | ; |
| 526 | desc->shadow_ptes[i] = spte; | 532 | desc->shadow_ptes[i] = spte; |
| 527 | } | 533 | } |
| 534 | return count; | ||
| 528 | } | 535 | } |
| 529 | 536 | ||
| 530 | static void rmap_desc_remove_entry(unsigned long *rmapp, | 537 | static void rmap_desc_remove_entry(unsigned long *rmapp, |
| @@ -754,6 +761,19 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp) | |||
| 754 | return young; | 761 | return young; |
| 755 | } | 762 | } |
| 756 | 763 | ||
| 764 | #define RMAP_RECYCLE_THRESHOLD 1000 | ||
| 765 | |||
| 766 | static void rmap_recycle(struct kvm_vcpu *vcpu, gfn_t gfn, int lpage) | ||
| 767 | { | ||
| 768 | unsigned long *rmapp; | ||
| 769 | |||
| 770 | gfn = unalias_gfn(vcpu->kvm, gfn); | ||
| 771 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage); | ||
| 772 | |||
| 773 | kvm_unmap_rmapp(vcpu->kvm, rmapp); | ||
| 774 | kvm_flush_remote_tlbs(vcpu->kvm); | ||
| 775 | } | ||
| 776 | |||
| 757 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) | 777 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) |
| 758 | { | 778 | { |
| 759 | return kvm_handle_hva(kvm, hva, kvm_age_rmapp); | 779 | return kvm_handle_hva(kvm, hva, kvm_age_rmapp); |
| @@ -1407,24 +1427,25 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) | |||
| 1407 | */ | 1427 | */ |
| 1408 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) | 1428 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) |
| 1409 | { | 1429 | { |
| 1430 | int used_pages; | ||
| 1431 | |||
| 1432 | used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages; | ||
| 1433 | used_pages = max(0, used_pages); | ||
| 1434 | |||
| 1410 | /* | 1435 | /* |
| 1411 | * If we set the number of mmu pages to be smaller be than the | 1436 | * If we set the number of mmu pages to be smaller be than the |
| 1412 | * number of actived pages , we must to free some mmu pages before we | 1437 | * number of actived pages , we must to free some mmu pages before we |
| 1413 | * change the value | 1438 | * change the value |
| 1414 | */ | 1439 | */ |
| 1415 | 1440 | ||
| 1416 | if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) > | 1441 | if (used_pages > kvm_nr_mmu_pages) { |
| 1417 | kvm_nr_mmu_pages) { | 1442 | while (used_pages > kvm_nr_mmu_pages) { |
| 1418 | int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages | ||
| 1419 | - kvm->arch.n_free_mmu_pages; | ||
| 1420 | |||
| 1421 | while (n_used_mmu_pages > kvm_nr_mmu_pages) { | ||
| 1422 | struct kvm_mmu_page *page; | 1443 | struct kvm_mmu_page *page; |
| 1423 | 1444 | ||
| 1424 | page = container_of(kvm->arch.active_mmu_pages.prev, | 1445 | page = container_of(kvm->arch.active_mmu_pages.prev, |
| 1425 | struct kvm_mmu_page, link); | 1446 | struct kvm_mmu_page, link); |
| 1426 | kvm_mmu_zap_page(kvm, page); | 1447 | kvm_mmu_zap_page(kvm, page); |
| 1427 | n_used_mmu_pages--; | 1448 | used_pages--; |
| 1428 | } | 1449 | } |
| 1429 | kvm->arch.n_free_mmu_pages = 0; | 1450 | kvm->arch.n_free_mmu_pages = 0; |
| 1430 | } | 1451 | } |
| @@ -1740,6 +1761,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
| 1740 | { | 1761 | { |
| 1741 | int was_rmapped = 0; | 1762 | int was_rmapped = 0; |
| 1742 | int was_writeble = is_writeble_pte(*shadow_pte); | 1763 | int was_writeble = is_writeble_pte(*shadow_pte); |
| 1764 | int rmap_count; | ||
| 1743 | 1765 | ||
| 1744 | pgprintk("%s: spte %llx access %x write_fault %d" | 1766 | pgprintk("%s: spte %llx access %x write_fault %d" |
| 1745 | " user_fault %d gfn %lx\n", | 1767 | " user_fault %d gfn %lx\n", |
| @@ -1781,9 +1803,11 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
| 1781 | 1803 | ||
| 1782 | page_header_update_slot(vcpu->kvm, shadow_pte, gfn); | 1804 | page_header_update_slot(vcpu->kvm, shadow_pte, gfn); |
| 1783 | if (!was_rmapped) { | 1805 | if (!was_rmapped) { |
| 1784 | rmap_add(vcpu, shadow_pte, gfn, largepage); | 1806 | rmap_count = rmap_add(vcpu, shadow_pte, gfn, largepage); |
| 1785 | if (!is_rmap_pte(*shadow_pte)) | 1807 | if (!is_rmap_pte(*shadow_pte)) |
| 1786 | kvm_release_pfn_clean(pfn); | 1808 | kvm_release_pfn_clean(pfn); |
| 1809 | if (rmap_count > RMAP_RECYCLE_THRESHOLD) | ||
| 1810 | rmap_recycle(vcpu, gfn, largepage); | ||
| 1787 | } else { | 1811 | } else { |
| 1788 | if (was_writeble) | 1812 | if (was_writeble) |
| 1789 | kvm_release_pfn_dirty(pfn); | 1813 | kvm_release_pfn_dirty(pfn); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 71510e07e69e..b1f658ad2f06 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -711,6 +711,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
| 711 | svm->vmcb->control.tsc_offset += delta; | 711 | svm->vmcb->control.tsc_offset += delta; |
| 712 | vcpu->cpu = cpu; | 712 | vcpu->cpu = cpu; |
| 713 | kvm_migrate_timers(vcpu); | 713 | kvm_migrate_timers(vcpu); |
| 714 | svm->asid_generation = 0; | ||
| 714 | } | 715 | } |
| 715 | 716 | ||
| 716 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) | 717 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) |
| @@ -1031,7 +1032,6 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data) | |||
| 1031 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; | 1032 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; |
| 1032 | } | 1033 | } |
| 1033 | 1034 | ||
| 1034 | svm->vcpu.cpu = svm_data->cpu; | ||
| 1035 | svm->asid_generation = svm_data->asid_generation; | 1035 | svm->asid_generation = svm_data->asid_generation; |
| 1036 | svm->vmcb->control.asid = svm_data->next_asid++; | 1036 | svm->vmcb->control.asid = svm_data->next_asid++; |
| 1037 | } | 1037 | } |
| @@ -2300,8 +2300,8 @@ static void pre_svm_run(struct vcpu_svm *svm) | |||
| 2300 | struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); | 2300 | struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); |
| 2301 | 2301 | ||
| 2302 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; | 2302 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; |
| 2303 | if (svm->vcpu.cpu != cpu || | 2303 | /* FIXME: handle wraparound of asid_generation */ |
| 2304 | svm->asid_generation != svm_data->asid_generation) | 2304 | if (svm->asid_generation != svm_data->asid_generation) |
| 2305 | new_asid(svm, svm_data); | 2305 | new_asid(svm, svm_data); |
| 2306 | } | 2306 | } |
| 2307 | 2307 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 356a0ce85c68..29f912927a58 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -3157,8 +3157,8 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | |||
| 3157 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3157 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 3158 | enum emulation_result err = EMULATE_DONE; | 3158 | enum emulation_result err = EMULATE_DONE; |
| 3159 | 3159 | ||
| 3160 | preempt_enable(); | ||
| 3161 | local_irq_enable(); | 3160 | local_irq_enable(); |
| 3161 | preempt_enable(); | ||
| 3162 | 3162 | ||
| 3163 | while (!guest_state_valid(vcpu)) { | 3163 | while (!guest_state_valid(vcpu)) { |
| 3164 | err = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | 3164 | err = emulate_instruction(vcpu, kvm_run, 0, 0, 0); |
| @@ -3168,7 +3168,7 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | |||
| 3168 | 3168 | ||
| 3169 | if (err != EMULATE_DONE) { | 3169 | if (err != EMULATE_DONE) { |
| 3170 | kvm_report_emulation_failure(vcpu, "emulation failure"); | 3170 | kvm_report_emulation_failure(vcpu, "emulation failure"); |
| 3171 | return; | 3171 | break; |
| 3172 | } | 3172 | } |
| 3173 | 3173 | ||
| 3174 | if (signal_pending(current)) | 3174 | if (signal_pending(current)) |
| @@ -3177,8 +3177,8 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | |||
| 3177 | schedule(); | 3177 | schedule(); |
| 3178 | } | 3178 | } |
| 3179 | 3179 | ||
| 3180 | local_irq_disable(); | ||
| 3181 | preempt_disable(); | 3180 | preempt_disable(); |
| 3181 | local_irq_disable(); | ||
| 3182 | 3182 | ||
| 3183 | vmx->invalid_state_emulation_result = err; | 3183 | vmx->invalid_state_emulation_result = err; |
| 3184 | } | 3184 | } |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index fe5474aec41a..3d4529011828 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -704,11 +704,48 @@ static bool msr_mtrr_valid(unsigned msr) | |||
| 704 | return false; | 704 | return false; |
| 705 | } | 705 | } |
| 706 | 706 | ||
| 707 | static bool valid_pat_type(unsigned t) | ||
| 708 | { | ||
| 709 | return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ | ||
| 710 | } | ||
| 711 | |||
| 712 | static bool valid_mtrr_type(unsigned t) | ||
| 713 | { | ||
| 714 | return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ | ||
| 715 | } | ||
| 716 | |||
| 717 | static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) | ||
| 718 | { | ||
| 719 | int i; | ||
| 720 | |||
| 721 | if (!msr_mtrr_valid(msr)) | ||
| 722 | return false; | ||
| 723 | |||
| 724 | if (msr == MSR_IA32_CR_PAT) { | ||
| 725 | for (i = 0; i < 8; i++) | ||
| 726 | if (!valid_pat_type((data >> (i * 8)) & 0xff)) | ||
| 727 | return false; | ||
| 728 | return true; | ||
| 729 | } else if (msr == MSR_MTRRdefType) { | ||
| 730 | if (data & ~0xcff) | ||
| 731 | return false; | ||
| 732 | return valid_mtrr_type(data & 0xff); | ||
| 733 | } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { | ||
| 734 | for (i = 0; i < 8 ; i++) | ||
| 735 | if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) | ||
| 736 | return false; | ||
| 737 | return true; | ||
| 738 | } | ||
| 739 | |||
| 740 | /* variable MTRRs */ | ||
| 741 | return valid_mtrr_type(data & 0xff); | ||
| 742 | } | ||
| 743 | |||
| 707 | static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) | 744 | static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
| 708 | { | 745 | { |
| 709 | u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; | 746 | u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; |
| 710 | 747 | ||
| 711 | if (!msr_mtrr_valid(msr)) | 748 | if (!mtrr_valid(vcpu, msr, data)) |
| 712 | return 1; | 749 | return 1; |
| 713 | 750 | ||
| 714 | if (msr == MSR_MTRRdefType) { | 751 | if (msr == MSR_MTRRdefType) { |
| @@ -1079,14 +1116,13 @@ long kvm_arch_dev_ioctl(struct file *filp, | |||
| 1079 | if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) | 1116 | if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) |
| 1080 | goto out; | 1117 | goto out; |
| 1081 | r = -E2BIG; | 1118 | r = -E2BIG; |
| 1082 | if (n < num_msrs_to_save) | 1119 | if (n < msr_list.nmsrs) |
| 1083 | goto out; | 1120 | goto out; |
| 1084 | r = -EFAULT; | 1121 | r = -EFAULT; |
| 1085 | if (copy_to_user(user_msr_list->indices, &msrs_to_save, | 1122 | if (copy_to_user(user_msr_list->indices, &msrs_to_save, |
| 1086 | num_msrs_to_save * sizeof(u32))) | 1123 | num_msrs_to_save * sizeof(u32))) |
| 1087 | goto out; | 1124 | goto out; |
| 1088 | if (copy_to_user(user_msr_list->indices | 1125 | if (copy_to_user(user_msr_list->indices + num_msrs_to_save, |
| 1089 | + num_msrs_to_save * sizeof(u32), | ||
| 1090 | &emulated_msrs, | 1126 | &emulated_msrs, |
| 1091 | ARRAY_SIZE(emulated_msrs) * sizeof(u32))) | 1127 | ARRAY_SIZE(emulated_msrs) * sizeof(u32))) |
| 1092 | goto out; | 1128 | goto out; |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index b4a3dbcebe9b..f85aaf21e783 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
| @@ -566,7 +566,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, | |||
| 566 | 566 | ||
| 567 | ret = drm_vblank_get(dev, crtc); | 567 | ret = drm_vblank_get(dev, crtc); |
| 568 | if (ret) { | 568 | if (ret) { |
| 569 | DRM_ERROR("failed to acquire vblank counter, %d\n", ret); | 569 | DRM_DEBUG("failed to acquire vblank counter, %d\n", ret); |
| 570 | return ret; | 570 | return ret; |
| 571 | } | 571 | } |
| 572 | seq = drm_vblank_count(dev, crtc); | 572 | seq = drm_vblank_count(dev, crtc); |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 54f492a488a9..7914097b09c6 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
| @@ -566,6 +566,8 @@ void drm_mode_connector_list_update(struct drm_connector *connector) | |||
| 566 | found_it = 1; | 566 | found_it = 1; |
| 567 | /* if equal delete the probed mode */ | 567 | /* if equal delete the probed mode */ |
| 568 | mode->status = pmode->status; | 568 | mode->status = pmode->status; |
| 569 | /* Merge type bits together */ | ||
| 570 | mode->type |= pmode->type; | ||
| 569 | list_del(&pmode->head); | 571 | list_del(&pmode->head); |
| 570 | drm_mode_destroy(connector->dev, pmode); | 572 | drm_mode_destroy(connector->dev, pmode); |
| 571 | break; | 573 | break; |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 83aee80e77a6..7ebc84c2881e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -190,7 +190,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |||
| 190 | low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; | 190 | low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; |
| 191 | 191 | ||
| 192 | if (!i915_pipe_enabled(dev, pipe)) { | 192 | if (!i915_pipe_enabled(dev, pipe)) { |
| 193 | DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe); | 193 | DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); |
| 194 | return 0; | 194 | return 0; |
| 195 | } | 195 | } |
| 196 | 196 | ||
| @@ -219,7 +219,7 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | |||
| 219 | int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; | 219 | int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; |
| 220 | 220 | ||
| 221 | if (!i915_pipe_enabled(dev, pipe)) { | 221 | if (!i915_pipe_enabled(dev, pipe)) { |
| 222 | DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe); | 222 | DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); |
| 223 | return 0; | 223 | return 0; |
| 224 | } | 224 | } |
| 225 | 225 | ||
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 0f2034c3ed2f..e4d9ef0c965a 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c | |||
| @@ -1254,6 +1254,7 @@ out_free: | |||
| 1254 | if (!ubi->volumes[i]) | 1254 | if (!ubi->volumes[i]) |
| 1255 | continue; | 1255 | continue; |
| 1256 | kfree(ubi->volumes[i]->eba_tbl); | 1256 | kfree(ubi->volumes[i]->eba_tbl); |
| 1257 | ubi->volumes[i]->eba_tbl = NULL; | ||
| 1257 | } | 1258 | } |
| 1258 | return err; | 1259 | return err; |
| 1259 | } | 1260 | } |
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index a423131b6171..b847745394b4 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c | |||
| @@ -781,11 +781,22 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, | |||
| 781 | return -EINVAL; | 781 | return -EINVAL; |
| 782 | } | 782 | } |
| 783 | 783 | ||
| 784 | /* | ||
| 785 | * Make sure that all PEBs have the same image sequence number. | ||
| 786 | * This allows us to detect situations when users flash UBI | ||
| 787 | * images incorrectly, so that the flash has the new UBI image | ||
| 788 | * and leftovers from the old one. This feature was added | ||
| 789 | * relatively recently, and the sequence number was always | ||
| 790 | * zero, because old UBI implementations always set it to zero. | ||
| 791 | * For this reasons, we do not panic if some PEBs have zero | ||
| 792 | * sequence number, while other PEBs have non-zero sequence | ||
| 793 | * number. | ||
| 794 | */ | ||
| 784 | image_seq = be32_to_cpu(ech->image_seq); | 795 | image_seq = be32_to_cpu(ech->image_seq); |
| 785 | if (!si->image_seq_set) { | 796 | if (!si->image_seq_set) { |
| 786 | ubi->image_seq = image_seq; | 797 | ubi->image_seq = image_seq; |
| 787 | si->image_seq_set = 1; | 798 | si->image_seq_set = 1; |
| 788 | } else if (ubi->image_seq != image_seq) { | 799 | } else if (ubi->image_seq && ubi->image_seq != image_seq) { |
| 789 | ubi_err("bad image sequence number %d in PEB %d, " | 800 | ubi_err("bad image sequence number %d in PEB %d, " |
| 790 | "expected %d", image_seq, pnum, ubi->image_seq); | 801 | "expected %d", image_seq, pnum, ubi->image_seq); |
| 791 | ubi_dbg_dump_ec_hdr(ech); | 802 | ubi_dbg_dump_ec_hdr(ech); |
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index a4494d78e7c2..8aebe1e9d3d6 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c | |||
| @@ -90,11 +90,10 @@ static struct hotplug_slot_ops sn_hotplug_slot_ops = { | |||
| 90 | 90 | ||
| 91 | static DEFINE_MUTEX(sn_hotplug_mutex); | 91 | static DEFINE_MUTEX(sn_hotplug_mutex); |
| 92 | 92 | ||
| 93 | static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot, | 93 | static ssize_t path_show(struct pci_slot *pci_slot, char *buf) |
| 94 | char *buf) | ||
| 95 | { | 94 | { |
| 96 | int retval = -ENOENT; | 95 | int retval = -ENOENT; |
| 97 | struct slot *slot = bss_hotplug_slot->private; | 96 | struct slot *slot = pci_slot->hotplug->private; |
| 98 | 97 | ||
| 99 | if (!slot) | 98 | if (!slot) |
| 100 | return retval; | 99 | return retval; |
| @@ -103,7 +102,7 @@ static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot, | |||
| 103 | return retval; | 102 | return retval; |
| 104 | } | 103 | } |
| 105 | 104 | ||
| 106 | static struct hotplug_slot_attribute sn_slot_path_attr = __ATTR_RO(path); | 105 | static struct pci_slot_attribute sn_slot_path_attr = __ATTR_RO(path); |
| 107 | 106 | ||
| 108 | static int sn_pci_slot_valid(struct pci_bus *pci_bus, int device) | 107 | static int sn_pci_slot_valid(struct pci_bus *pci_bus, int device) |
| 109 | { | 108 | { |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 3ce5ae9e3d2d..175db258942f 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
| @@ -234,23 +234,20 @@ static int check_mem_permission(struct task_struct *task) | |||
| 234 | 234 | ||
| 235 | struct mm_struct *mm_for_maps(struct task_struct *task) | 235 | struct mm_struct *mm_for_maps(struct task_struct *task) |
| 236 | { | 236 | { |
| 237 | struct mm_struct *mm = get_task_mm(task); | 237 | struct mm_struct *mm; |
| 238 | if (!mm) | 238 | |
| 239 | if (mutex_lock_killable(&task->cred_guard_mutex)) | ||
| 239 | return NULL; | 240 | return NULL; |
| 240 | down_read(&mm->mmap_sem); | 241 | |
| 241 | task_lock(task); | 242 | mm = get_task_mm(task); |
| 242 | if (task->mm != mm) | 243 | if (mm && mm != current->mm && |
| 243 | goto out; | 244 | !ptrace_may_access(task, PTRACE_MODE_READ)) { |
| 244 | if (task->mm != current->mm && | 245 | mmput(mm); |
| 245 | __ptrace_may_access(task, PTRACE_MODE_READ) < 0) | 246 | mm = NULL; |
| 246 | goto out; | 247 | } |
| 247 | task_unlock(task); | 248 | mutex_unlock(&task->cred_guard_mutex); |
| 249 | |||
| 248 | return mm; | 250 | return mm; |
| 249 | out: | ||
| 250 | task_unlock(task); | ||
| 251 | up_read(&mm->mmap_sem); | ||
| 252 | mmput(mm); | ||
| 253 | return NULL; | ||
| 254 | } | 251 | } |
| 255 | 252 | ||
| 256 | static int proc_pid_cmdline(struct task_struct *task, char * buffer) | 253 | static int proc_pid_cmdline(struct task_struct *task, char * buffer) |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 6f61b7cc32e0..9bd8be1d235c 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
| @@ -119,6 +119,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) | |||
| 119 | mm = mm_for_maps(priv->task); | 119 | mm = mm_for_maps(priv->task); |
| 120 | if (!mm) | 120 | if (!mm) |
| 121 | return NULL; | 121 | return NULL; |
| 122 | down_read(&mm->mmap_sem); | ||
| 122 | 123 | ||
| 123 | tail_vma = get_gate_vma(priv->task); | 124 | tail_vma = get_gate_vma(priv->task); |
| 124 | priv->tail_vma = tail_vma; | 125 | priv->tail_vma = tail_vma; |
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 64a72e2e7650..8f5c05d3dbd3 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c | |||
| @@ -189,6 +189,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) | |||
| 189 | priv->task = NULL; | 189 | priv->task = NULL; |
| 190 | return NULL; | 190 | return NULL; |
| 191 | } | 191 | } |
| 192 | down_read(&mm->mmap_sem); | ||
| 192 | 193 | ||
| 193 | /* start from the Nth VMA */ | 194 | /* start from the Nth VMA */ |
| 194 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) | 195 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index d7cd193c2277..a81170de7f6b 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
| @@ -89,7 +89,9 @@ enum print_line_t { | |||
| 89 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ | 89 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ |
| 90 | }; | 90 | }; |
| 91 | 91 | ||
| 92 | 92 | void tracing_generic_entry_update(struct trace_entry *entry, | |
| 93 | unsigned long flags, | ||
| 94 | int pc); | ||
| 93 | struct ring_buffer_event * | 95 | struct ring_buffer_event * |
| 94 | trace_current_buffer_lock_reserve(int type, unsigned long len, | 96 | trace_current_buffer_lock_reserve(int type, unsigned long len, |
| 95 | unsigned long flags, int pc); | 97 | unsigned long flags, int pc); |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 16713dc672e4..3060bdc35ffe 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -110,6 +110,7 @@ struct kvm_memory_slot { | |||
| 110 | 110 | ||
| 111 | struct kvm_kernel_irq_routing_entry { | 111 | struct kvm_kernel_irq_routing_entry { |
| 112 | u32 gsi; | 112 | u32 gsi; |
| 113 | u32 type; | ||
| 113 | int (*set)(struct kvm_kernel_irq_routing_entry *e, | 114 | int (*set)(struct kvm_kernel_irq_routing_entry *e, |
| 114 | struct kvm *kvm, int level); | 115 | struct kvm *kvm, int level); |
| 115 | union { | 116 | union { |
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index e604e6ef72dd..a67dd5c5b6d3 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
| @@ -121,8 +121,9 @@ enum perf_counter_sample_format { | |||
| 121 | PERF_SAMPLE_CPU = 1U << 7, | 121 | PERF_SAMPLE_CPU = 1U << 7, |
| 122 | PERF_SAMPLE_PERIOD = 1U << 8, | 122 | PERF_SAMPLE_PERIOD = 1U << 8, |
| 123 | PERF_SAMPLE_STREAM_ID = 1U << 9, | 123 | PERF_SAMPLE_STREAM_ID = 1U << 9, |
| 124 | PERF_SAMPLE_TP_RECORD = 1U << 10, | ||
| 124 | 125 | ||
| 125 | PERF_SAMPLE_MAX = 1U << 10, /* non-ABI */ | 126 | PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ |
| 126 | }; | 127 | }; |
| 127 | 128 | ||
| 128 | /* | 129 | /* |
| @@ -413,6 +414,11 @@ struct perf_callchain_entry { | |||
| 413 | __u64 ip[PERF_MAX_STACK_DEPTH]; | 414 | __u64 ip[PERF_MAX_STACK_DEPTH]; |
| 414 | }; | 415 | }; |
| 415 | 416 | ||
| 417 | struct perf_tracepoint_record { | ||
| 418 | int size; | ||
| 419 | char *record; | ||
| 420 | }; | ||
| 421 | |||
| 416 | struct task_struct; | 422 | struct task_struct; |
| 417 | 423 | ||
| 418 | /** | 424 | /** |
| @@ -681,6 +687,7 @@ struct perf_sample_data { | |||
| 681 | struct pt_regs *regs; | 687 | struct pt_regs *regs; |
| 682 | u64 addr; | 688 | u64 addr; |
| 683 | u64 period; | 689 | u64 period; |
| 690 | void *private; | ||
| 684 | }; | 691 | }; |
| 685 | 692 | ||
| 686 | extern int perf_counter_overflow(struct perf_counter *counter, int nmi, | 693 | extern int perf_counter_overflow(struct perf_counter *counter, int nmi, |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 1867553c61e5..7fb16d90e7b1 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
| @@ -144,6 +144,9 @@ | |||
| 144 | #undef TP_fast_assign | 144 | #undef TP_fast_assign |
| 145 | #define TP_fast_assign(args...) args | 145 | #define TP_fast_assign(args...) args |
| 146 | 146 | ||
| 147 | #undef TP_perf_assign | ||
| 148 | #define TP_perf_assign(args...) | ||
| 149 | |||
| 147 | #undef TRACE_EVENT | 150 | #undef TRACE_EVENT |
| 148 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | 151 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ |
| 149 | static int \ | 152 | static int \ |
| @@ -345,6 +348,56 @@ static inline int ftrace_get_offsets_##call( \ | |||
| 345 | 348 | ||
| 346 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 349 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
| 347 | 350 | ||
| 351 | #ifdef CONFIG_EVENT_PROFILE | ||
| 352 | |||
| 353 | /* | ||
| 354 | * Generate the functions needed for tracepoint perf_counter support. | ||
| 355 | * | ||
| 356 | * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later | ||
| 357 | * | ||
| 358 | * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call) | ||
| 359 | * { | ||
| 360 | * int ret = 0; | ||
| 361 | * | ||
| 362 | * if (!atomic_inc_return(&event_call->profile_count)) | ||
| 363 | * ret = register_trace_<call>(ftrace_profile_<call>); | ||
| 364 | * | ||
| 365 | * return ret; | ||
| 366 | * } | ||
| 367 | * | ||
| 368 | * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call) | ||
| 369 | * { | ||
| 370 | * if (atomic_add_negative(-1, &event->call->profile_count)) | ||
| 371 | * unregister_trace_<call>(ftrace_profile_<call>); | ||
| 372 | * } | ||
| 373 | * | ||
| 374 | */ | ||
| 375 | |||
| 376 | #undef TRACE_EVENT | ||
| 377 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
| 378 | \ | ||
| 379 | static void ftrace_profile_##call(proto); \ | ||
| 380 | \ | ||
| 381 | static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ | ||
| 382 | { \ | ||
| 383 | int ret = 0; \ | ||
| 384 | \ | ||
| 385 | if (!atomic_inc_return(&event_call->profile_count)) \ | ||
| 386 | ret = register_trace_##call(ftrace_profile_##call); \ | ||
| 387 | \ | ||
| 388 | return ret; \ | ||
| 389 | } \ | ||
| 390 | \ | ||
| 391 | static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | ||
| 392 | { \ | ||
| 393 | if (atomic_add_negative(-1, &event_call->profile_count)) \ | ||
| 394 | unregister_trace_##call(ftrace_profile_##call); \ | ||
| 395 | } | ||
| 396 | |||
| 397 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
| 398 | |||
| 399 | #endif | ||
| 400 | |||
| 348 | /* | 401 | /* |
| 349 | * Stage 4 of the trace events. | 402 | * Stage 4 of the trace events. |
| 350 | * | 403 | * |
| @@ -447,28 +500,6 @@ static inline int ftrace_get_offsets_##call( \ | |||
| 447 | #define TP_FMT(fmt, args...) fmt "\n", ##args | 500 | #define TP_FMT(fmt, args...) fmt "\n", ##args |
| 448 | 501 | ||
| 449 | #ifdef CONFIG_EVENT_PROFILE | 502 | #ifdef CONFIG_EVENT_PROFILE |
| 450 | #define _TRACE_PROFILE(call, proto, args) \ | ||
| 451 | static void ftrace_profile_##call(proto) \ | ||
| 452 | { \ | ||
| 453 | extern void perf_tpcounter_event(int); \ | ||
| 454 | perf_tpcounter_event(event_##call.id); \ | ||
| 455 | } \ | ||
| 456 | \ | ||
| 457 | static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ | ||
| 458 | { \ | ||
| 459 | int ret = 0; \ | ||
| 460 | \ | ||
| 461 | if (!atomic_inc_return(&event_call->profile_count)) \ | ||
| 462 | ret = register_trace_##call(ftrace_profile_##call); \ | ||
| 463 | \ | ||
| 464 | return ret; \ | ||
| 465 | } \ | ||
| 466 | \ | ||
| 467 | static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | ||
| 468 | { \ | ||
| 469 | if (atomic_add_negative(-1, &event_call->profile_count)) \ | ||
| 470 | unregister_trace_##call(ftrace_profile_##call); \ | ||
| 471 | } | ||
| 472 | 503 | ||
| 473 | #define _TRACE_PROFILE_INIT(call) \ | 504 | #define _TRACE_PROFILE_INIT(call) \ |
| 474 | .profile_count = ATOMIC_INIT(-1), \ | 505 | .profile_count = ATOMIC_INIT(-1), \ |
| @@ -476,7 +507,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
| 476 | .profile_disable = ftrace_profile_disable_##call, | 507 | .profile_disable = ftrace_profile_disable_##call, |
| 477 | 508 | ||
| 478 | #else | 509 | #else |
| 479 | #define _TRACE_PROFILE(call, proto, args) | ||
| 480 | #define _TRACE_PROFILE_INIT(call) | 510 | #define _TRACE_PROFILE_INIT(call) |
| 481 | #endif | 511 | #endif |
| 482 | 512 | ||
| @@ -502,7 +532,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
| 502 | 532 | ||
| 503 | #undef TRACE_EVENT | 533 | #undef TRACE_EVENT |
| 504 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | 534 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ |
| 505 | _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ | ||
| 506 | \ | 535 | \ |
| 507 | static struct ftrace_event_call event_##call; \ | 536 | static struct ftrace_event_call event_##call; \ |
| 508 | \ | 537 | \ |
| @@ -586,6 +615,99 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
| 586 | 615 | ||
| 587 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 616 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
| 588 | 617 | ||
| 589 | #undef _TRACE_PROFILE | 618 | /* |
| 619 | * Define the insertion callback to profile events | ||
| 620 | * | ||
| 621 | * The job is very similar to ftrace_raw_event_<call> except that we don't | ||
| 622 | * insert in the ring buffer but in a perf counter. | ||
| 623 | * | ||
| 624 | * static void ftrace_profile_<call>(proto) | ||
| 625 | * { | ||
| 626 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | ||
| 627 | * struct ftrace_event_call *event_call = &event_<call>; | ||
| 628 | * extern void perf_tpcounter_event(int, u64, u64, void *, int); | ||
| 629 | * struct ftrace_raw_##call *entry; | ||
| 630 | * u64 __addr = 0, __count = 1; | ||
| 631 | * unsigned long irq_flags; | ||
| 632 | * int __entry_size; | ||
| 633 | * int __data_size; | ||
| 634 | * int pc; | ||
| 635 | * | ||
| 636 | * local_save_flags(irq_flags); | ||
| 637 | * pc = preempt_count(); | ||
| 638 | * | ||
| 639 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | ||
| 640 | * __entry_size = __data_size + sizeof(*entry); | ||
| 641 | * | ||
| 642 | * do { | ||
| 643 | * char raw_data[__entry_size]; <- allocate our sample in the stack | ||
| 644 | * struct trace_entry *ent; | ||
| 645 | * | ||
| 646 | * entry = (struct ftrace_raw_<call> *)raw_data; | ||
| 647 | * ent = &entry->ent; | ||
| 648 | * tracing_generic_entry_update(ent, irq_flags, pc); | ||
| 649 | * ent->type = event_call->id; | ||
| 650 | * | ||
| 651 | * <tstruct> <- do some jobs with dynamic arrays | ||
| 652 | * | ||
| 653 | * <assign> <- affect our values | ||
| 654 | * | ||
| 655 | * perf_tpcounter_event(event_call->id, __addr, __count, entry, | ||
| 656 | * __entry_size); <- submit them to perf counter | ||
| 657 | * } while (0); | ||
| 658 | * | ||
| 659 | * } | ||
| 660 | */ | ||
| 661 | |||
| 662 | #ifdef CONFIG_EVENT_PROFILE | ||
| 663 | |||
| 664 | #undef __perf_addr | ||
| 665 | #define __perf_addr(a) __addr = (a) | ||
| 666 | |||
| 667 | #undef __perf_count | ||
| 668 | #define __perf_count(c) __count = (c) | ||
| 669 | |||
| 670 | #undef TRACE_EVENT | ||
| 671 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
| 672 | static void ftrace_profile_##call(proto) \ | ||
| 673 | { \ | ||
| 674 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | ||
| 675 | struct ftrace_event_call *event_call = &event_##call; \ | ||
| 676 | extern void perf_tpcounter_event(int, u64, u64, void *, int); \ | ||
| 677 | struct ftrace_raw_##call *entry; \ | ||
| 678 | u64 __addr = 0, __count = 1; \ | ||
| 679 | unsigned long irq_flags; \ | ||
| 680 | int __entry_size; \ | ||
| 681 | int __data_size; \ | ||
| 682 | int pc; \ | ||
| 683 | \ | ||
| 684 | local_save_flags(irq_flags); \ | ||
| 685 | pc = preempt_count(); \ | ||
| 686 | \ | ||
| 687 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | ||
| 688 | __entry_size = ALIGN(__data_size + sizeof(*entry), sizeof(u64));\ | ||
| 689 | \ | ||
| 690 | do { \ | ||
| 691 | char raw_data[__entry_size]; \ | ||
| 692 | struct trace_entry *ent; \ | ||
| 693 | \ | ||
| 694 | entry = (struct ftrace_raw_##call *)raw_data; \ | ||
| 695 | ent = &entry->ent; \ | ||
| 696 | tracing_generic_entry_update(ent, irq_flags, pc); \ | ||
| 697 | ent->type = event_call->id; \ | ||
| 698 | \ | ||
| 699 | tstruct \ | ||
| 700 | \ | ||
| 701 | { assign; } \ | ||
| 702 | \ | ||
| 703 | perf_tpcounter_event(event_call->id, __addr, __count, entry,\ | ||
| 704 | __entry_size); \ | ||
| 705 | } while (0); \ | ||
| 706 | \ | ||
| 707 | } | ||
| 708 | |||
| 709 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
| 710 | #endif /* CONFIG_EVENT_PROFILE */ | ||
| 711 | |||
| 590 | #undef _TRACE_PROFILE_INIT | 712 | #undef _TRACE_PROFILE_INIT |
| 591 | 713 | ||
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index d7135aa2d2c4..e94caa666dba 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c | |||
| @@ -758,7 +758,8 @@ static int __init lockdep_proc_init(void) | |||
| 758 | &proc_lockdep_stats_operations); | 758 | &proc_lockdep_stats_operations); |
| 759 | 759 | ||
| 760 | #ifdef CONFIG_LOCK_STAT | 760 | #ifdef CONFIG_LOCK_STAT |
| 761 | proc_create("lock_stat", S_IRUSR, NULL, &proc_lock_stat_operations); | 761 | proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL, |
| 762 | &proc_lock_stat_operations); | ||
| 762 | #endif | 763 | #endif |
| 763 | 764 | ||
| 764 | return 0; | 765 | return 0; |
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 673c1aaf7332..868102172aa4 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
| @@ -2646,6 +2646,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
| 2646 | u64 counter; | 2646 | u64 counter; |
| 2647 | } group_entry; | 2647 | } group_entry; |
| 2648 | struct perf_callchain_entry *callchain = NULL; | 2648 | struct perf_callchain_entry *callchain = NULL; |
| 2649 | struct perf_tracepoint_record *tp; | ||
| 2649 | int callchain_size = 0; | 2650 | int callchain_size = 0; |
| 2650 | u64 time; | 2651 | u64 time; |
| 2651 | struct { | 2652 | struct { |
| @@ -2714,6 +2715,11 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
| 2714 | header.size += sizeof(u64); | 2715 | header.size += sizeof(u64); |
| 2715 | } | 2716 | } |
| 2716 | 2717 | ||
| 2718 | if (sample_type & PERF_SAMPLE_TP_RECORD) { | ||
| 2719 | tp = data->private; | ||
| 2720 | header.size += tp->size; | ||
| 2721 | } | ||
| 2722 | |||
| 2717 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); | 2723 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); |
| 2718 | if (ret) | 2724 | if (ret) |
| 2719 | return; | 2725 | return; |
| @@ -2777,6 +2783,9 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
| 2777 | } | 2783 | } |
| 2778 | } | 2784 | } |
| 2779 | 2785 | ||
| 2786 | if (sample_type & PERF_SAMPLE_TP_RECORD) | ||
| 2787 | perf_output_copy(&handle, tp->record, tp->size); | ||
| 2788 | |||
| 2780 | perf_output_end(&handle); | 2789 | perf_output_end(&handle); |
| 2781 | } | 2790 | } |
| 2782 | 2791 | ||
| @@ -3703,17 +3712,24 @@ static const struct pmu perf_ops_task_clock = { | |||
| 3703 | }; | 3712 | }; |
| 3704 | 3713 | ||
| 3705 | #ifdef CONFIG_EVENT_PROFILE | 3714 | #ifdef CONFIG_EVENT_PROFILE |
| 3706 | void perf_tpcounter_event(int event_id) | 3715 | void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, |
| 3716 | int entry_size) | ||
| 3707 | { | 3717 | { |
| 3718 | struct perf_tracepoint_record tp = { | ||
| 3719 | .size = entry_size, | ||
| 3720 | .record = record, | ||
| 3721 | }; | ||
| 3722 | |||
| 3708 | struct perf_sample_data data = { | 3723 | struct perf_sample_data data = { |
| 3709 | .regs = get_irq_regs(), | 3724 | .regs = get_irq_regs(), |
| 3710 | .addr = 0, | 3725 | .addr = addr, |
| 3726 | .private = &tp, | ||
| 3711 | }; | 3727 | }; |
| 3712 | 3728 | ||
| 3713 | if (!data.regs) | 3729 | if (!data.regs) |
| 3714 | data.regs = task_pt_regs(current); | 3730 | data.regs = task_pt_regs(current); |
| 3715 | 3731 | ||
| 3716 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data); | 3732 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data); |
| 3717 | } | 3733 | } |
| 3718 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); | 3734 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); |
| 3719 | 3735 | ||
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index bece7c0b67b2..e33a21cb9407 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
| @@ -521,11 +521,12 @@ void posix_cpu_timers_exit(struct task_struct *tsk) | |||
| 521 | } | 521 | } |
| 522 | void posix_cpu_timers_exit_group(struct task_struct *tsk) | 522 | void posix_cpu_timers_exit_group(struct task_struct *tsk) |
| 523 | { | 523 | { |
| 524 | struct task_cputime cputime; | 524 | struct signal_struct *const sig = tsk->signal; |
| 525 | 525 | ||
| 526 | thread_group_cputimer(tsk, &cputime); | ||
| 527 | cleanup_timers(tsk->signal->cpu_timers, | 526 | cleanup_timers(tsk->signal->cpu_timers, |
| 528 | cputime.utime, cputime.stime, cputime.sum_exec_runtime); | 527 | cputime_add(tsk->utime, sig->utime), |
| 528 | cputime_add(tsk->stime, sig->stime), | ||
| 529 | tsk->se.sum_exec_runtime + sig->sum_sched_runtime); | ||
| 529 | } | 530 | } |
| 530 | 531 | ||
| 531 | static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) | 532 | static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) |
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index fcd107a78c5a..29bd4baf9e75 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c | |||
| @@ -1039,16 +1039,14 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |||
| 1039 | if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) { | 1039 | if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) { |
| 1040 | /* We got the lock for task. */ | 1040 | /* We got the lock for task. */ |
| 1041 | debug_rt_mutex_lock(lock); | 1041 | debug_rt_mutex_lock(lock); |
| 1042 | |||
| 1043 | rt_mutex_set_owner(lock, task, 0); | 1042 | rt_mutex_set_owner(lock, task, 0); |
| 1044 | 1043 | spin_unlock(&lock->wait_lock); | |
| 1045 | rt_mutex_deadlock_account_lock(lock, task); | 1044 | rt_mutex_deadlock_account_lock(lock, task); |
| 1046 | return 1; | 1045 | return 1; |
| 1047 | } | 1046 | } |
| 1048 | 1047 | ||
| 1049 | ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); | 1048 | ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); |
| 1050 | 1049 | ||
| 1051 | |||
| 1052 | if (ret && !waiter->task) { | 1050 | if (ret && !waiter->task) { |
| 1053 | /* | 1051 | /* |
| 1054 | * Reset the return value. We might have | 1052 | * Reset the return value. We might have |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index bf27bb7a63e2..a330513d96ce 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -735,6 +735,7 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
| 735 | 735 | ||
| 736 | put_online_cpus(); | 736 | put_online_cpus(); |
| 737 | 737 | ||
| 738 | kfree(buffer->buffers); | ||
| 738 | free_cpumask_var(buffer->cpumask); | 739 | free_cpumask_var(buffer->cpumask); |
| 739 | 740 | ||
| 740 | kfree(buffer); | 741 | kfree(buffer); |
| @@ -1785,7 +1786,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, | |||
| 1785 | */ | 1786 | */ |
| 1786 | RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); | 1787 | RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); |
| 1787 | 1788 | ||
| 1788 | if (!rb_try_to_discard(cpu_buffer, event)) | 1789 | if (rb_try_to_discard(cpu_buffer, event)) |
| 1789 | goto out; | 1790 | goto out; |
| 1790 | 1791 | ||
| 1791 | /* | 1792 | /* |
| @@ -2383,7 +2384,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
| 2383 | * the box. Return the padding, and we will release | 2384 | * the box. Return the padding, and we will release |
| 2384 | * the current locks, and try again. | 2385 | * the current locks, and try again. |
| 2385 | */ | 2386 | */ |
| 2386 | rb_advance_reader(cpu_buffer); | ||
| 2387 | return event; | 2387 | return event; |
| 2388 | 2388 | ||
| 2389 | case RINGBUF_TYPE_TIME_EXTEND: | 2389 | case RINGBUF_TYPE_TIME_EXTEND: |
| @@ -2486,7 +2486,7 @@ static inline int rb_ok_to_lock(void) | |||
| 2486 | * buffer too. A one time deal is all you get from reading | 2486 | * buffer too. A one time deal is all you get from reading |
| 2487 | * the ring buffer from an NMI. | 2487 | * the ring buffer from an NMI. |
| 2488 | */ | 2488 | */ |
| 2489 | if (likely(!in_nmi() && !oops_in_progress)) | 2489 | if (likely(!in_nmi())) |
| 2490 | return 1; | 2490 | return 1; |
| 2491 | 2491 | ||
| 2492 | tracing_off_permanent(); | 2492 | tracing_off_permanent(); |
| @@ -2519,6 +2519,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
| 2519 | if (dolock) | 2519 | if (dolock) |
| 2520 | spin_lock(&cpu_buffer->reader_lock); | 2520 | spin_lock(&cpu_buffer->reader_lock); |
| 2521 | event = rb_buffer_peek(buffer, cpu, ts); | 2521 | event = rb_buffer_peek(buffer, cpu, ts); |
| 2522 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | ||
| 2523 | rb_advance_reader(cpu_buffer); | ||
| 2522 | if (dolock) | 2524 | if (dolock) |
| 2523 | spin_unlock(&cpu_buffer->reader_lock); | 2525 | spin_unlock(&cpu_buffer->reader_lock); |
| 2524 | local_irq_restore(flags); | 2526 | local_irq_restore(flags); |
| @@ -2590,12 +2592,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
| 2590 | spin_lock(&cpu_buffer->reader_lock); | 2592 | spin_lock(&cpu_buffer->reader_lock); |
| 2591 | 2593 | ||
| 2592 | event = rb_buffer_peek(buffer, cpu, ts); | 2594 | event = rb_buffer_peek(buffer, cpu, ts); |
| 2593 | if (!event) | 2595 | if (event) |
| 2594 | goto out_unlock; | 2596 | rb_advance_reader(cpu_buffer); |
| 2595 | |||
| 2596 | rb_advance_reader(cpu_buffer); | ||
| 2597 | 2597 | ||
| 2598 | out_unlock: | ||
| 2599 | if (dolock) | 2598 | if (dolock) |
| 2600 | spin_unlock(&cpu_buffer->reader_lock); | 2599 | spin_unlock(&cpu_buffer->reader_lock); |
| 2601 | local_irq_restore(flags); | 2600 | local_irq_restore(flags); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8930e39b9d8c..c22b40f8f576 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -848,6 +848,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
| 848 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | 848 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | |
| 849 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | 849 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); |
| 850 | } | 850 | } |
| 851 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); | ||
| 851 | 852 | ||
| 852 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | 853 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, |
| 853 | int type, | 854 | int type, |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 3548ae5cc780..8b9f4f6e9559 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -438,10 +438,6 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | |||
| 438 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 438 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
| 439 | int *ent_cpu, u64 *ent_ts); | 439 | int *ent_cpu, u64 *ent_ts); |
| 440 | 440 | ||
| 441 | void tracing_generic_entry_update(struct trace_entry *entry, | ||
| 442 | unsigned long flags, | ||
| 443 | int pc); | ||
| 444 | |||
| 445 | void default_wait_pipe(struct trace_iterator *iter); | 441 | void default_wait_pipe(struct trace_iterator *iter); |
| 446 | void poll_wait_pipe(struct trace_iterator *iter); | 442 | void poll_wait_pipe(struct trace_iterator *iter); |
| 447 | 443 | ||
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 936c621bbf46..f32dc9d1ea7b 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
| @@ -624,9 +624,6 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps, | |||
| 624 | return -ENOSPC; | 624 | return -ENOSPC; |
| 625 | } | 625 | } |
| 626 | 626 | ||
| 627 | filter->preds[filter->n_preds] = pred; | ||
| 628 | filter->n_preds++; | ||
| 629 | |||
| 630 | list_for_each_entry(call, &ftrace_events, list) { | 627 | list_for_each_entry(call, &ftrace_events, list) { |
| 631 | 628 | ||
| 632 | if (!call->define_fields) | 629 | if (!call->define_fields) |
| @@ -643,6 +640,9 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps, | |||
| 643 | } | 640 | } |
| 644 | replace_filter_string(call->filter, filter_string); | 641 | replace_filter_string(call->filter, filter_string); |
| 645 | } | 642 | } |
| 643 | |||
| 644 | filter->preds[filter->n_preds] = pred; | ||
| 645 | filter->n_preds++; | ||
| 646 | out: | 646 | out: |
| 647 | return err; | 647 | return err; |
| 648 | } | 648 | } |
| @@ -1029,12 +1029,17 @@ static int replace_preds(struct event_subsystem *system, | |||
| 1029 | 1029 | ||
| 1030 | if (elt->op == OP_AND || elt->op == OP_OR) { | 1030 | if (elt->op == OP_AND || elt->op == OP_OR) { |
| 1031 | pred = create_logical_pred(elt->op); | 1031 | pred = create_logical_pred(elt->op); |
| 1032 | if (!pred) | ||
| 1033 | return -ENOMEM; | ||
| 1032 | if (call) { | 1034 | if (call) { |
| 1033 | err = filter_add_pred(ps, call, pred); | 1035 | err = filter_add_pred(ps, call, pred); |
| 1034 | filter_free_pred(pred); | 1036 | filter_free_pred(pred); |
| 1035 | } else | 1037 | } else { |
| 1036 | err = filter_add_subsystem_pred(ps, system, | 1038 | err = filter_add_subsystem_pred(ps, system, |
| 1037 | pred, filter_string); | 1039 | pred, filter_string); |
| 1040 | if (err) | ||
| 1041 | filter_free_pred(pred); | ||
| 1042 | } | ||
| 1038 | if (err) | 1043 | if (err) |
| 1039 | return err; | 1044 | return err; |
| 1040 | 1045 | ||
| @@ -1048,12 +1053,17 @@ static int replace_preds(struct event_subsystem *system, | |||
| 1048 | } | 1053 | } |
| 1049 | 1054 | ||
| 1050 | pred = create_pred(elt->op, operand1, operand2); | 1055 | pred = create_pred(elt->op, operand1, operand2); |
| 1056 | if (!pred) | ||
| 1057 | return -ENOMEM; | ||
| 1051 | if (call) { | 1058 | if (call) { |
| 1052 | err = filter_add_pred(ps, call, pred); | 1059 | err = filter_add_pred(ps, call, pred); |
| 1053 | filter_free_pred(pred); | 1060 | filter_free_pred(pred); |
| 1054 | } else | 1061 | } else { |
| 1055 | err = filter_add_subsystem_pred(ps, system, pred, | 1062 | err = filter_add_subsystem_pred(ps, system, pred, |
| 1056 | filter_string); | 1063 | filter_string); |
| 1064 | if (err) | ||
| 1065 | filter_free_pred(pred); | ||
| 1066 | } | ||
| 1057 | if (err) | 1067 | if (err) |
| 1058 | return err; | 1068 | return err; |
| 1059 | 1069 | ||
diff --git a/mm/mempool.c b/mm/mempool.c index a46eb1b4bb66..32e75d400503 100644 --- a/mm/mempool.c +++ b/mm/mempool.c | |||
| @@ -303,14 +303,14 @@ EXPORT_SYMBOL(mempool_free_slab); | |||
| 303 | */ | 303 | */ |
| 304 | void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) | 304 | void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) |
| 305 | { | 305 | { |
| 306 | size_t size = (size_t)(long)pool_data; | 306 | size_t size = (size_t)pool_data; |
| 307 | return kmalloc(size, gfp_mask); | 307 | return kmalloc(size, gfp_mask); |
| 308 | } | 308 | } |
| 309 | EXPORT_SYMBOL(mempool_kmalloc); | 309 | EXPORT_SYMBOL(mempool_kmalloc); |
| 310 | 310 | ||
| 311 | void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data) | 311 | void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data) |
| 312 | { | 312 | { |
| 313 | size_t size = (size_t) pool_data; | 313 | size_t size = (size_t)pool_data; |
| 314 | return kzalloc(size, gfp_mask); | 314 | return kzalloc(size, gfp_mask); |
| 315 | } | 315 | } |
| 316 | EXPORT_SYMBOL(mempool_kzalloc); | 316 | EXPORT_SYMBOL(mempool_kzalloc); |
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index d29baa2e063a..911ba7ffab84 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl | |||
| @@ -393,7 +393,7 @@ while (<IN>) { | |||
| 393 | $read_function = 0; | 393 | $read_function = 0; |
| 394 | } | 394 | } |
| 395 | # print out any recorded offsets | 395 | # print out any recorded offsets |
| 396 | update_funcs() if ($text_found); | 396 | update_funcs() if (defined($ref_func)); |
| 397 | 397 | ||
| 398 | # reset all markers and arrays | 398 | # reset all markers and arrays |
| 399 | $text_found = 0; | 399 | $text_found = 0; |
| @@ -414,7 +414,10 @@ while (<IN>) { | |||
| 414 | $offset = hex $1; | 414 | $offset = hex $1; |
| 415 | } else { | 415 | } else { |
| 416 | # if we already have a function, and this is weak, skip it | 416 | # if we already have a function, and this is weak, skip it |
| 417 | if (!defined($ref_func) && !defined($weak{$text})) { | 417 | if (!defined($ref_func) && !defined($weak{$text}) && |
| 418 | # PPC64 can have symbols that start with .L and | ||
| 419 | # gcc considers these special. Don't use them! | ||
| 420 | $text !~ /^\.L/) { | ||
| 418 | $ref_func = $text; | 421 | $ref_func = $text; |
| 419 | $offset = hex $1; | 422 | $offset = hex $1; |
| 420 | } | 423 | } |
| @@ -441,7 +444,7 @@ while (<IN>) { | |||
| 441 | } | 444 | } |
| 442 | 445 | ||
| 443 | # dump out anymore offsets that may have been found | 446 | # dump out anymore offsets that may have been found |
| 444 | update_funcs() if ($text_found); | 447 | update_funcs() if (defined($ref_func)); |
| 445 | 448 | ||
| 446 | # If we did not find any mcount callers, we are done (do nothing). | 449 | # If we did not find any mcount callers, we are done (do nothing). |
| 447 | if (!$opened) { | 450 | if (!$opened) { |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 6da09928130f..90c98082af10 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
| @@ -412,6 +412,7 @@ static void create_counter(int counter, int cpu, pid_t pid) | |||
| 412 | if (call_graph) | 412 | if (call_graph) |
| 413 | attr->sample_type |= PERF_SAMPLE_CALLCHAIN; | 413 | attr->sample_type |= PERF_SAMPLE_CALLCHAIN; |
| 414 | 414 | ||
| 415 | |||
| 415 | attr->mmap = track; | 416 | attr->mmap = track; |
| 416 | attr->comm = track; | 417 | attr->comm = track; |
| 417 | attr->inherit = (cpu < 0) && inherit; | 418 | attr->inherit = (cpu < 0) && inherit; |
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 1eddae94bab3..1150c6d5c7b8 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
| @@ -95,8 +95,6 @@ static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) | |||
| 95 | if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) | 95 | if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) |
| 96 | pent->fields.remote_irr = 1; | 96 | pent->fields.remote_irr = 1; |
| 97 | } | 97 | } |
| 98 | if (!pent->fields.trig_mode) | ||
| 99 | ioapic->irr &= ~(1 << idx); | ||
| 100 | 98 | ||
| 101 | return injected; | 99 | return injected; |
| 102 | } | 100 | } |
| @@ -136,7 +134,8 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) | |||
| 136 | mask_after = ioapic->redirtbl[index].fields.mask; | 134 | mask_after = ioapic->redirtbl[index].fields.mask; |
| 137 | if (mask_before != mask_after) | 135 | if (mask_before != mask_after) |
| 138 | kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after); | 136 | kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after); |
| 139 | if (ioapic->irr & (1 << index)) | 137 | if (ioapic->redirtbl[index].fields.trig_mode == IOAPIC_LEVEL_TRIG |
| 138 | && ioapic->irr & (1 << index)) | ||
| 140 | ioapic_service(ioapic, index); | 139 | ioapic_service(ioapic, index); |
| 141 | break; | 140 | break; |
| 142 | } | 141 | } |
| @@ -184,9 +183,10 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) | |||
| 184 | if (!level) | 183 | if (!level) |
| 185 | ioapic->irr &= ~mask; | 184 | ioapic->irr &= ~mask; |
| 186 | else { | 185 | else { |
| 186 | int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); | ||
| 187 | ioapic->irr |= mask; | 187 | ioapic->irr |= mask; |
| 188 | if ((!entry.fields.trig_mode && old_irr != ioapic->irr) | 188 | if ((edge && old_irr != ioapic->irr) || |
| 189 | || !entry.fields.remote_irr) | 189 | (!edge && !entry.fields.remote_irr)) |
| 190 | ret = ioapic_service(ioapic, irq); | 190 | ret = ioapic_service(ioapic, irq); |
| 191 | } | 191 | } |
| 192 | } | 192 | } |
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index a8bd466d00cc..ddc17f0e2f35 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c | |||
| @@ -160,7 +160,8 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) | |||
| 160 | unsigned gsi = pin; | 160 | unsigned gsi = pin; |
| 161 | 161 | ||
| 162 | list_for_each_entry(e, &kvm->irq_routing, link) | 162 | list_for_each_entry(e, &kvm->irq_routing, link) |
| 163 | if (e->irqchip.irqchip == irqchip && | 163 | if (e->type == KVM_IRQ_ROUTING_IRQCHIP && |
| 164 | e->irqchip.irqchip == irqchip && | ||
| 164 | e->irqchip.pin == pin) { | 165 | e->irqchip.pin == pin) { |
| 165 | gsi = e->gsi; | 166 | gsi = e->gsi; |
| 166 | break; | 167 | break; |
| @@ -259,6 +260,7 @@ static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e, | |||
| 259 | int delta; | 260 | int delta; |
| 260 | 261 | ||
| 261 | e->gsi = ue->gsi; | 262 | e->gsi = ue->gsi; |
| 263 | e->type = ue->type; | ||
| 262 | switch (ue->type) { | 264 | switch (ue->type) { |
| 263 | case KVM_IRQ_ROUTING_IRQCHIP: | 265 | case KVM_IRQ_ROUTING_IRQCHIP: |
| 264 | delta = 0; | 266 | delta = 0; |
