diff options
31 files changed, 243 insertions, 170 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 73c5c2b05f64..7f3c0a2e60cd 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
| @@ -1802,7 +1802,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | |||
| 1802 | { | 1802 | { |
| 1803 | struct kvm_memory_slot *memslot; | 1803 | struct kvm_memory_slot *memslot; |
| 1804 | int r, i; | 1804 | int r, i; |
| 1805 | long n, base; | 1805 | long base; |
| 1806 | unsigned long n; | ||
| 1806 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + | 1807 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + |
| 1807 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); | 1808 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); |
| 1808 | 1809 | ||
| @@ -1815,7 +1816,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | |||
| 1815 | if (!memslot->dirty_bitmap) | 1816 | if (!memslot->dirty_bitmap) |
| 1816 | goto out; | 1817 | goto out; |
| 1817 | 1818 | ||
| 1818 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 1819 | n = kvm_dirty_bitmap_bytes(memslot); |
| 1819 | base = memslot->base_gfn / BITS_PER_LONG; | 1820 | base = memslot->base_gfn / BITS_PER_LONG; |
| 1820 | 1821 | ||
| 1821 | for (i = 0; i < n/sizeof(long); ++i) { | 1822 | for (i = 0; i < n/sizeof(long); ++i) { |
| @@ -1831,7 +1832,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
| 1831 | struct kvm_dirty_log *log) | 1832 | struct kvm_dirty_log *log) |
| 1832 | { | 1833 | { |
| 1833 | int r; | 1834 | int r; |
| 1834 | int n; | 1835 | unsigned long n; |
| 1835 | struct kvm_memory_slot *memslot; | 1836 | struct kvm_memory_slot *memslot; |
| 1836 | int is_dirty = 0; | 1837 | int is_dirty = 0; |
| 1837 | 1838 | ||
| @@ -1850,7 +1851,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
| 1850 | if (is_dirty) { | 1851 | if (is_dirty) { |
| 1851 | kvm_flush_remote_tlbs(kvm); | 1852 | kvm_flush_remote_tlbs(kvm); |
| 1852 | memslot = &kvm->memslots->memslots[log->slot]; | 1853 | memslot = &kvm->memslots->memslots[log->slot]; |
| 1853 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 1854 | n = kvm_dirty_bitmap_bytes(memslot); |
| 1854 | memset(memslot->dirty_bitmap, 0, n); | 1855 | memset(memslot->dirty_bitmap, 0, n); |
| 1855 | } | 1856 | } |
| 1856 | r = 0; | 1857 | r = 0; |
diff --git a/arch/m68k/include/asm/mcfuart.h b/arch/m68k/include/asm/mcfuart.h index ef2293873612..01a8716c5fc5 100644 --- a/arch/m68k/include/asm/mcfuart.h +++ b/arch/m68k/include/asm/mcfuart.h | |||
| @@ -212,5 +212,10 @@ struct mcf_platform_uart { | |||
| 212 | #define MCFUART_URF_RXS 0xc0 /* Receiver status */ | 212 | #define MCFUART_URF_RXS 0xc0 /* Receiver status */ |
| 213 | #endif | 213 | #endif |
| 214 | 214 | ||
| 215 | #if defined(CONFIG_M5272) | ||
| 216 | #define MCFUART_TXFIFOSIZE 25 | ||
| 217 | #else | ||
| 218 | #define MCFUART_TXFIFOSIZE 1 | ||
| 219 | #endif | ||
| 215 | /****************************************************************************/ | 220 | /****************************************************************************/ |
| 216 | #endif /* mcfuart_h */ | 221 | #endif /* mcfuart_h */ |
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile index ce404bc9ccbd..14042574ac21 100644 --- a/arch/m68knommu/Makefile +++ b/arch/m68knommu/Makefile | |||
| @@ -94,7 +94,7 @@ cflags-$(CONFIG_M520x) := $(call cc-option,-mcpu=5208,-m5200) | |||
| 94 | cflags-$(CONFIG_M523x) := $(call cc-option,-mcpu=523x,-m5307) | 94 | cflags-$(CONFIG_M523x) := $(call cc-option,-mcpu=523x,-m5307) |
| 95 | cflags-$(CONFIG_M5249) := $(call cc-option,-mcpu=5249,-m5200) | 95 | cflags-$(CONFIG_M5249) := $(call cc-option,-mcpu=5249,-m5200) |
| 96 | cflags-$(CONFIG_M5271) := $(call cc-option,-mcpu=5271,-m5307) | 96 | cflags-$(CONFIG_M5271) := $(call cc-option,-mcpu=5271,-m5307) |
| 97 | cflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5271,-m5200) | 97 | cflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5272,-m5307) |
| 98 | cflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307) | 98 | cflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307) |
| 99 | cflags-$(CONFIG_M528x) := $(call cc-option,-m528x,-m5307) | 99 | cflags-$(CONFIG_M528x) := $(call cc-option,-m528x,-m5307) |
| 100 | cflags-$(CONFIG_M5307) := $(call cc-option,-m5307,-m5200) | 100 | cflags-$(CONFIG_M5307) := $(call cc-option,-m5307,-m5200) |
diff --git a/arch/m68knommu/kernel/entry.S b/arch/m68knommu/kernel/entry.S index 56043ade3941..aff6f57ef8b5 100644 --- a/arch/m68knommu/kernel/entry.S +++ b/arch/m68knommu/kernel/entry.S | |||
| @@ -145,6 +145,6 @@ ENTRY(ret_from_user_signal) | |||
| 145 | trap #0 | 145 | trap #0 |
| 146 | 146 | ||
| 147 | ENTRY(ret_from_user_rt_signal) | 147 | ENTRY(ret_from_user_rt_signal) |
| 148 | move #__NR_rt_sigreturn,%d0 | 148 | movel #__NR_rt_sigreturn,%d0 |
| 149 | trap #0 | 149 | trap #0 |
| 150 | 150 | ||
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c index 1143f77caca4..6f22970d8c20 100644 --- a/arch/m68knommu/platform/68360/ints.c +++ b/arch/m68knommu/platform/68360/ints.c | |||
| @@ -107,7 +107,6 @@ void init_IRQ(void) | |||
| 107 | _ramvec[vba+CPMVEC_PIO_PC7] = inthandler; /* pio - pc7 */ | 107 | _ramvec[vba+CPMVEC_PIO_PC7] = inthandler; /* pio - pc7 */ |
| 108 | _ramvec[vba+CPMVEC_PIO_PC6] = inthandler; /* pio - pc6 */ | 108 | _ramvec[vba+CPMVEC_PIO_PC6] = inthandler; /* pio - pc6 */ |
| 109 | _ramvec[vba+CPMVEC_TIMER3] = inthandler; /* timer 3 */ | 109 | _ramvec[vba+CPMVEC_TIMER3] = inthandler; /* timer 3 */ |
| 110 | _ramvec[vba+CPMVEC_RISCTIMER] = inthandler; /* reserved */ | ||
| 111 | _ramvec[vba+CPMVEC_PIO_PC5] = inthandler; /* pio - pc5 */ | 110 | _ramvec[vba+CPMVEC_PIO_PC5] = inthandler; /* pio - pc5 */ |
| 112 | _ramvec[vba+CPMVEC_PIO_PC4] = inthandler; /* pio - pc4 */ | 111 | _ramvec[vba+CPMVEC_PIO_PC4] = inthandler; /* pio - pc4 */ |
| 113 | _ramvec[vba+CPMVEC_RESERVED2] = inthandler; /* reserved */ | 112 | _ramvec[vba+CPMVEC_RESERVED2] = inthandler; /* reserved */ |
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 25da07fd9f77..604af29b71ed 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
| @@ -1004,7 +1004,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
| 1004 | struct kvm_vcpu *vcpu; | 1004 | struct kvm_vcpu *vcpu; |
| 1005 | ulong ga, ga_end; | 1005 | ulong ga, ga_end; |
| 1006 | int is_dirty = 0; | 1006 | int is_dirty = 0; |
| 1007 | int r, n; | 1007 | int r; |
| 1008 | unsigned long n; | ||
| 1008 | 1009 | ||
| 1009 | mutex_lock(&kvm->slots_lock); | 1010 | mutex_lock(&kvm->slots_lock); |
| 1010 | 1011 | ||
| @@ -1022,7 +1023,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
| 1022 | kvm_for_each_vcpu(n, vcpu, kvm) | 1023 | kvm_for_each_vcpu(n, vcpu, kvm) |
| 1023 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | 1024 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); |
| 1024 | 1025 | ||
| 1025 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 1026 | n = kvm_dirty_bitmap_bytes(memslot); |
| 1026 | memset(memslot->dirty_bitmap, 0, n); | 1027 | memset(memslot->dirty_bitmap, 0, n); |
| 1027 | } | 1028 | } |
| 1028 | 1029 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 48aeee8eefb0..19a8906bcaa2 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -1490,8 +1490,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm, | |||
| 1490 | for_each_sp(pages, sp, parents, i) { | 1490 | for_each_sp(pages, sp, parents, i) { |
| 1491 | kvm_mmu_zap_page(kvm, sp); | 1491 | kvm_mmu_zap_page(kvm, sp); |
| 1492 | mmu_pages_clear_parents(&parents); | 1492 | mmu_pages_clear_parents(&parents); |
| 1493 | zapped++; | ||
| 1493 | } | 1494 | } |
| 1494 | zapped += pages.nr; | ||
| 1495 | kvm_mmu_pages_init(parent, &parents, &pages); | 1495 | kvm_mmu_pages_init(parent, &parents, &pages); |
| 1496 | } | 1496 | } |
| 1497 | 1497 | ||
| @@ -1542,14 +1542,16 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) | |||
| 1542 | */ | 1542 | */ |
| 1543 | 1543 | ||
| 1544 | if (used_pages > kvm_nr_mmu_pages) { | 1544 | if (used_pages > kvm_nr_mmu_pages) { |
| 1545 | while (used_pages > kvm_nr_mmu_pages) { | 1545 | while (used_pages > kvm_nr_mmu_pages && |
| 1546 | !list_empty(&kvm->arch.active_mmu_pages)) { | ||
| 1546 | struct kvm_mmu_page *page; | 1547 | struct kvm_mmu_page *page; |
| 1547 | 1548 | ||
| 1548 | page = container_of(kvm->arch.active_mmu_pages.prev, | 1549 | page = container_of(kvm->arch.active_mmu_pages.prev, |
| 1549 | struct kvm_mmu_page, link); | 1550 | struct kvm_mmu_page, link); |
| 1550 | kvm_mmu_zap_page(kvm, page); | 1551 | used_pages -= kvm_mmu_zap_page(kvm, page); |
| 1551 | used_pages--; | 1552 | used_pages--; |
| 1552 | } | 1553 | } |
| 1554 | kvm_nr_mmu_pages = used_pages; | ||
| 1553 | kvm->arch.n_free_mmu_pages = 0; | 1555 | kvm->arch.n_free_mmu_pages = 0; |
| 1554 | } | 1556 | } |
| 1555 | else | 1557 | else |
| @@ -1596,7 +1598,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) | |||
| 1596 | && !sp->role.invalid) { | 1598 | && !sp->role.invalid) { |
| 1597 | pgprintk("%s: zap %lx %x\n", | 1599 | pgprintk("%s: zap %lx %x\n", |
| 1598 | __func__, gfn, sp->role.word); | 1600 | __func__, gfn, sp->role.word); |
| 1599 | kvm_mmu_zap_page(kvm, sp); | 1601 | if (kvm_mmu_zap_page(kvm, sp)) |
| 1602 | nn = bucket->first; | ||
| 1600 | } | 1603 | } |
| 1601 | } | 1604 | } |
| 1602 | } | 1605 | } |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 445c59411ed0..2ba58206812a 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -706,29 +706,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | |||
| 706 | if (err) | 706 | if (err) |
| 707 | goto free_svm; | 707 | goto free_svm; |
| 708 | 708 | ||
| 709 | err = -ENOMEM; | ||
| 709 | page = alloc_page(GFP_KERNEL); | 710 | page = alloc_page(GFP_KERNEL); |
| 710 | if (!page) { | 711 | if (!page) |
| 711 | err = -ENOMEM; | ||
| 712 | goto uninit; | 712 | goto uninit; |
| 713 | } | ||
| 714 | 713 | ||
| 715 | err = -ENOMEM; | ||
| 716 | msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); | 714 | msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); |
| 717 | if (!msrpm_pages) | 715 | if (!msrpm_pages) |
| 718 | goto uninit; | 716 | goto free_page1; |
| 719 | 717 | ||
| 720 | nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); | 718 | nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); |
| 721 | if (!nested_msrpm_pages) | 719 | if (!nested_msrpm_pages) |
| 722 | goto uninit; | 720 | goto free_page2; |
| 723 | |||
| 724 | svm->msrpm = page_address(msrpm_pages); | ||
| 725 | svm_vcpu_init_msrpm(svm->msrpm); | ||
| 726 | 721 | ||
| 727 | hsave_page = alloc_page(GFP_KERNEL); | 722 | hsave_page = alloc_page(GFP_KERNEL); |
| 728 | if (!hsave_page) | 723 | if (!hsave_page) |
| 729 | goto uninit; | 724 | goto free_page3; |
| 725 | |||
| 730 | svm->nested.hsave = page_address(hsave_page); | 726 | svm->nested.hsave = page_address(hsave_page); |
| 731 | 727 | ||
| 728 | svm->msrpm = page_address(msrpm_pages); | ||
| 729 | svm_vcpu_init_msrpm(svm->msrpm); | ||
| 730 | |||
| 732 | svm->nested.msrpm = page_address(nested_msrpm_pages); | 731 | svm->nested.msrpm = page_address(nested_msrpm_pages); |
| 733 | 732 | ||
| 734 | svm->vmcb = page_address(page); | 733 | svm->vmcb = page_address(page); |
| @@ -744,6 +743,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | |||
| 744 | 743 | ||
| 745 | return &svm->vcpu; | 744 | return &svm->vcpu; |
| 746 | 745 | ||
| 746 | free_page3: | ||
| 747 | __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER); | ||
| 748 | free_page2: | ||
| 749 | __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER); | ||
| 750 | free_page1: | ||
| 751 | __free_page(page); | ||
| 747 | uninit: | 752 | uninit: |
| 748 | kvm_vcpu_uninit(&svm->vcpu); | 753 | kvm_vcpu_uninit(&svm->vcpu); |
| 749 | free_svm: | 754 | free_svm: |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 686492ed3079..bc933cfb4e66 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -77,6 +77,8 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO); | |||
| 77 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) | 77 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) |
| 78 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) | 78 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) |
| 79 | 79 | ||
| 80 | #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) | ||
| 81 | |||
| 80 | /* | 82 | /* |
| 81 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: | 83 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: |
| 82 | * ple_gap: upper bound on the amount of time between two successive | 84 | * ple_gap: upper bound on the amount of time between two successive |
| @@ -131,7 +133,7 @@ struct vcpu_vmx { | |||
| 131 | } host_state; | 133 | } host_state; |
| 132 | struct { | 134 | struct { |
| 133 | int vm86_active; | 135 | int vm86_active; |
| 134 | u8 save_iopl; | 136 | ulong save_rflags; |
| 135 | struct kvm_save_segment { | 137 | struct kvm_save_segment { |
| 136 | u16 selector; | 138 | u16 selector; |
| 137 | unsigned long base; | 139 | unsigned long base; |
| @@ -818,18 +820,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) | |||
| 818 | 820 | ||
| 819 | static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) | 821 | static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) |
| 820 | { | 822 | { |
| 821 | unsigned long rflags; | 823 | unsigned long rflags, save_rflags; |
| 822 | 824 | ||
| 823 | rflags = vmcs_readl(GUEST_RFLAGS); | 825 | rflags = vmcs_readl(GUEST_RFLAGS); |
| 824 | if (to_vmx(vcpu)->rmode.vm86_active) | 826 | if (to_vmx(vcpu)->rmode.vm86_active) { |
| 825 | rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM); | 827 | rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; |
| 828 | save_rflags = to_vmx(vcpu)->rmode.save_rflags; | ||
| 829 | rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; | ||
| 830 | } | ||
| 826 | return rflags; | 831 | return rflags; |
| 827 | } | 832 | } |
| 828 | 833 | ||
| 829 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | 834 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
| 830 | { | 835 | { |
| 831 | if (to_vmx(vcpu)->rmode.vm86_active) | 836 | if (to_vmx(vcpu)->rmode.vm86_active) { |
| 837 | to_vmx(vcpu)->rmode.save_rflags = rflags; | ||
| 832 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | 838 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; |
| 839 | } | ||
| 833 | vmcs_writel(GUEST_RFLAGS, rflags); | 840 | vmcs_writel(GUEST_RFLAGS, rflags); |
| 834 | } | 841 | } |
| 835 | 842 | ||
| @@ -1483,8 +1490,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu) | |||
| 1483 | vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); | 1490 | vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); |
| 1484 | 1491 | ||
| 1485 | flags = vmcs_readl(GUEST_RFLAGS); | 1492 | flags = vmcs_readl(GUEST_RFLAGS); |
| 1486 | flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); | 1493 | flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; |
| 1487 | flags |= (vmx->rmode.save_iopl << IOPL_SHIFT); | 1494 | flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; |
| 1488 | vmcs_writel(GUEST_RFLAGS, flags); | 1495 | vmcs_writel(GUEST_RFLAGS, flags); |
| 1489 | 1496 | ||
| 1490 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | | 1497 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | |
| @@ -1557,8 +1564,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu) | |||
| 1557 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); | 1564 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); |
| 1558 | 1565 | ||
| 1559 | flags = vmcs_readl(GUEST_RFLAGS); | 1566 | flags = vmcs_readl(GUEST_RFLAGS); |
| 1560 | vmx->rmode.save_iopl | 1567 | vmx->rmode.save_rflags = flags; |
| 1561 | = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | ||
| 1562 | 1568 | ||
| 1563 | flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | 1569 | flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; |
| 1564 | 1570 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 24cd0ee896e9..3c4ca98ad27f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -433,8 +433,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
| 433 | 433 | ||
| 434 | #ifdef CONFIG_X86_64 | 434 | #ifdef CONFIG_X86_64 |
| 435 | if (cr0 & 0xffffffff00000000UL) { | 435 | if (cr0 & 0xffffffff00000000UL) { |
| 436 | printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", | ||
| 437 | cr0, kvm_read_cr0(vcpu)); | ||
| 438 | kvm_inject_gp(vcpu, 0); | 436 | kvm_inject_gp(vcpu, 0); |
| 439 | return; | 437 | return; |
| 440 | } | 438 | } |
| @@ -443,14 +441,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
| 443 | cr0 &= ~CR0_RESERVED_BITS; | 441 | cr0 &= ~CR0_RESERVED_BITS; |
| 444 | 442 | ||
| 445 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { | 443 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { |
| 446 | printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); | ||
| 447 | kvm_inject_gp(vcpu, 0); | 444 | kvm_inject_gp(vcpu, 0); |
| 448 | return; | 445 | return; |
| 449 | } | 446 | } |
| 450 | 447 | ||
| 451 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { | 448 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { |
| 452 | printk(KERN_DEBUG "set_cr0: #GP, set PG flag " | ||
| 453 | "and a clear PE flag\n"); | ||
| 454 | kvm_inject_gp(vcpu, 0); | 449 | kvm_inject_gp(vcpu, 0); |
| 455 | return; | 450 | return; |
| 456 | } | 451 | } |
| @@ -461,15 +456,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
| 461 | int cs_db, cs_l; | 456 | int cs_db, cs_l; |
| 462 | 457 | ||
| 463 | if (!is_pae(vcpu)) { | 458 | if (!is_pae(vcpu)) { |
| 464 | printk(KERN_DEBUG "set_cr0: #GP, start paging " | ||
| 465 | "in long mode while PAE is disabled\n"); | ||
| 466 | kvm_inject_gp(vcpu, 0); | 459 | kvm_inject_gp(vcpu, 0); |
| 467 | return; | 460 | return; |
| 468 | } | 461 | } |
| 469 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 462 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); |
| 470 | if (cs_l) { | 463 | if (cs_l) { |
| 471 | printk(KERN_DEBUG "set_cr0: #GP, start paging " | ||
| 472 | "in long mode while CS.L == 1\n"); | ||
| 473 | kvm_inject_gp(vcpu, 0); | 464 | kvm_inject_gp(vcpu, 0); |
| 474 | return; | 465 | return; |
| 475 | 466 | ||
| @@ -477,8 +468,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
| 477 | } else | 468 | } else |
| 478 | #endif | 469 | #endif |
| 479 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { | 470 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { |
| 480 | printk(KERN_DEBUG "set_cr0: #GP, pdptrs " | ||
| 481 | "reserved bits\n"); | ||
| 482 | kvm_inject_gp(vcpu, 0); | 471 | kvm_inject_gp(vcpu, 0); |
| 483 | return; | 472 | return; |
| 484 | } | 473 | } |
| @@ -505,28 +494,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
| 505 | unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; | 494 | unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; |
| 506 | 495 | ||
| 507 | if (cr4 & CR4_RESERVED_BITS) { | 496 | if (cr4 & CR4_RESERVED_BITS) { |
| 508 | printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); | ||
| 509 | kvm_inject_gp(vcpu, 0); | 497 | kvm_inject_gp(vcpu, 0); |
| 510 | return; | 498 | return; |
| 511 | } | 499 | } |
| 512 | 500 | ||
| 513 | if (is_long_mode(vcpu)) { | 501 | if (is_long_mode(vcpu)) { |
| 514 | if (!(cr4 & X86_CR4_PAE)) { | 502 | if (!(cr4 & X86_CR4_PAE)) { |
| 515 | printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " | ||
| 516 | "in long mode\n"); | ||
| 517 | kvm_inject_gp(vcpu, 0); | 503 | kvm_inject_gp(vcpu, 0); |
| 518 | return; | 504 | return; |
| 519 | } | 505 | } |
| 520 | } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) | 506 | } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) |
| 521 | && ((cr4 ^ old_cr4) & pdptr_bits) | 507 | && ((cr4 ^ old_cr4) & pdptr_bits) |
| 522 | && !load_pdptrs(vcpu, vcpu->arch.cr3)) { | 508 | && !load_pdptrs(vcpu, vcpu->arch.cr3)) { |
| 523 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); | ||
| 524 | kvm_inject_gp(vcpu, 0); | 509 | kvm_inject_gp(vcpu, 0); |
| 525 | return; | 510 | return; |
| 526 | } | 511 | } |
| 527 | 512 | ||
| 528 | if (cr4 & X86_CR4_VMXE) { | 513 | if (cr4 & X86_CR4_VMXE) { |
| 529 | printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); | ||
| 530 | kvm_inject_gp(vcpu, 0); | 514 | kvm_inject_gp(vcpu, 0); |
| 531 | return; | 515 | return; |
| 532 | } | 516 | } |
| @@ -547,21 +531,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
| 547 | 531 | ||
| 548 | if (is_long_mode(vcpu)) { | 532 | if (is_long_mode(vcpu)) { |
| 549 | if (cr3 & CR3_L_MODE_RESERVED_BITS) { | 533 | if (cr3 & CR3_L_MODE_RESERVED_BITS) { |
| 550 | printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); | ||
| 551 | kvm_inject_gp(vcpu, 0); | 534 | kvm_inject_gp(vcpu, 0); |
| 552 | return; | 535 | return; |
| 553 | } | 536 | } |
| 554 | } else { | 537 | } else { |
| 555 | if (is_pae(vcpu)) { | 538 | if (is_pae(vcpu)) { |
| 556 | if (cr3 & CR3_PAE_RESERVED_BITS) { | 539 | if (cr3 & CR3_PAE_RESERVED_BITS) { |
| 557 | printk(KERN_DEBUG | ||
| 558 | "set_cr3: #GP, reserved bits\n"); | ||
| 559 | kvm_inject_gp(vcpu, 0); | 540 | kvm_inject_gp(vcpu, 0); |
| 560 | return; | 541 | return; |
| 561 | } | 542 | } |
| 562 | if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { | 543 | if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { |
| 563 | printk(KERN_DEBUG "set_cr3: #GP, pdptrs " | ||
| 564 | "reserved bits\n"); | ||
| 565 | kvm_inject_gp(vcpu, 0); | 544 | kvm_inject_gp(vcpu, 0); |
| 566 | return; | 545 | return; |
| 567 | } | 546 | } |
| @@ -593,7 +572,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3); | |||
| 593 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) | 572 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) |
| 594 | { | 573 | { |
| 595 | if (cr8 & CR8_RESERVED_BITS) { | 574 | if (cr8 & CR8_RESERVED_BITS) { |
| 596 | printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8); | ||
| 597 | kvm_inject_gp(vcpu, 0); | 575 | kvm_inject_gp(vcpu, 0); |
| 598 | return; | 576 | return; |
| 599 | } | 577 | } |
| @@ -649,15 +627,12 @@ static u32 emulated_msrs[] = { | |||
| 649 | static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | 627 | static void set_efer(struct kvm_vcpu *vcpu, u64 efer) |
| 650 | { | 628 | { |
| 651 | if (efer & efer_reserved_bits) { | 629 | if (efer & efer_reserved_bits) { |
| 652 | printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n", | ||
| 653 | efer); | ||
| 654 | kvm_inject_gp(vcpu, 0); | 630 | kvm_inject_gp(vcpu, 0); |
| 655 | return; | 631 | return; |
| 656 | } | 632 | } |
| 657 | 633 | ||
| 658 | if (is_paging(vcpu) | 634 | if (is_paging(vcpu) |
| 659 | && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { | 635 | && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { |
| 660 | printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n"); | ||
| 661 | kvm_inject_gp(vcpu, 0); | 636 | kvm_inject_gp(vcpu, 0); |
| 662 | return; | 637 | return; |
| 663 | } | 638 | } |
| @@ -667,7 +642,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
| 667 | 642 | ||
| 668 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | 643 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
| 669 | if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { | 644 | if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { |
| 670 | printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n"); | ||
| 671 | kvm_inject_gp(vcpu, 0); | 645 | kvm_inject_gp(vcpu, 0); |
| 672 | return; | 646 | return; |
| 673 | } | 647 | } |
| @@ -678,7 +652,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
| 678 | 652 | ||
| 679 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | 653 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
| 680 | if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { | 654 | if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { |
| 681 | printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n"); | ||
| 682 | kvm_inject_gp(vcpu, 0); | 655 | kvm_inject_gp(vcpu, 0); |
| 683 | return; | 656 | return; |
| 684 | } | 657 | } |
| @@ -967,9 +940,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
| 967 | if (msr >= MSR_IA32_MC0_CTL && | 940 | if (msr >= MSR_IA32_MC0_CTL && |
| 968 | msr < MSR_IA32_MC0_CTL + 4 * bank_num) { | 941 | msr < MSR_IA32_MC0_CTL + 4 * bank_num) { |
| 969 | u32 offset = msr - MSR_IA32_MC0_CTL; | 942 | u32 offset = msr - MSR_IA32_MC0_CTL; |
| 970 | /* only 0 or all 1s can be written to IA32_MCi_CTL */ | 943 | /* only 0 or all 1s can be written to IA32_MCi_CTL |
| 944 | * some Linux kernels though clear bit 10 in bank 4 to | ||
| 945 | * workaround a BIOS/GART TBL issue on AMD K8s, ignore | ||
| 946 | * this to avoid an uncatched #GP in the guest | ||
| 947 | */ | ||
| 971 | if ((offset & 0x3) == 0 && | 948 | if ((offset & 0x3) == 0 && |
| 972 | data != 0 && data != ~(u64)0) | 949 | data != 0 && (data | (1 << 10)) != ~(u64)0) |
| 973 | return -1; | 950 | return -1; |
| 974 | vcpu->arch.mce_banks[offset] = data; | 951 | vcpu->arch.mce_banks[offset] = data; |
| 975 | break; | 952 | break; |
| @@ -2635,8 +2612,9 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm, | |||
| 2635 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | 2612 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
| 2636 | struct kvm_dirty_log *log) | 2613 | struct kvm_dirty_log *log) |
| 2637 | { | 2614 | { |
| 2638 | int r, n, i; | 2615 | int r, i; |
| 2639 | struct kvm_memory_slot *memslot; | 2616 | struct kvm_memory_slot *memslot; |
| 2617 | unsigned long n; | ||
| 2640 | unsigned long is_dirty = 0; | 2618 | unsigned long is_dirty = 0; |
| 2641 | unsigned long *dirty_bitmap = NULL; | 2619 | unsigned long *dirty_bitmap = NULL; |
| 2642 | 2620 | ||
| @@ -2651,7 +2629,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
| 2651 | if (!memslot->dirty_bitmap) | 2629 | if (!memslot->dirty_bitmap) |
| 2652 | goto out; | 2630 | goto out; |
| 2653 | 2631 | ||
| 2654 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 2632 | n = kvm_dirty_bitmap_bytes(memslot); |
| 2655 | 2633 | ||
| 2656 | r = -ENOMEM; | 2634 | r = -ENOMEM; |
| 2657 | dirty_bitmap = vmalloc(n); | 2635 | dirty_bitmap = vmalloc(n); |
| @@ -4483,7 +4461,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
| 4483 | kvm_set_cr8(vcpu, kvm_run->cr8); | 4461 | kvm_set_cr8(vcpu, kvm_run->cr8); |
| 4484 | 4462 | ||
| 4485 | if (vcpu->arch.pio.cur_count) { | 4463 | if (vcpu->arch.pio.cur_count) { |
| 4464 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
| 4486 | r = complete_pio(vcpu); | 4465 | r = complete_pio(vcpu); |
| 4466 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | ||
| 4487 | if (r) | 4467 | if (r) |
| 4488 | goto out; | 4468 | goto out; |
| 4489 | } | 4469 | } |
| @@ -5146,6 +5126,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
| 5146 | int ret = 0; | 5126 | int ret = 0; |
| 5147 | u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); | 5127 | u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); |
| 5148 | u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); | 5128 | u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); |
| 5129 | u32 desc_limit; | ||
| 5149 | 5130 | ||
| 5150 | old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL); | 5131 | old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL); |
| 5151 | 5132 | ||
| @@ -5168,7 +5149,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
| 5168 | } | 5149 | } |
| 5169 | } | 5150 | } |
| 5170 | 5151 | ||
| 5171 | if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) { | 5152 | desc_limit = get_desc_limit(&nseg_desc); |
| 5153 | if (!nseg_desc.p || | ||
| 5154 | ((desc_limit < 0x67 && (nseg_desc.type & 8)) || | ||
| 5155 | desc_limit < 0x2b)) { | ||
| 5172 | kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc); | 5156 | kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc); |
| 5173 | return 1; | 5157 | return 1; |
| 5174 | } | 5158 | } |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e3e9a36ea3b7..20e48401910e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -1650,8 +1650,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1650 | int previous, int *dd_idx, | 1650 | int previous, int *dd_idx, |
| 1651 | struct stripe_head *sh) | 1651 | struct stripe_head *sh) |
| 1652 | { | 1652 | { |
| 1653 | long stripe; | 1653 | sector_t stripe; |
| 1654 | unsigned long chunk_number; | 1654 | sector_t chunk_number; |
| 1655 | unsigned int chunk_offset; | 1655 | unsigned int chunk_offset; |
| 1656 | int pd_idx, qd_idx; | 1656 | int pd_idx, qd_idx; |
| 1657 | int ddf_layout = 0; | 1657 | int ddf_layout = 0; |
| @@ -1671,17 +1671,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1671 | */ | 1671 | */ |
| 1672 | chunk_offset = sector_div(r_sector, sectors_per_chunk); | 1672 | chunk_offset = sector_div(r_sector, sectors_per_chunk); |
| 1673 | chunk_number = r_sector; | 1673 | chunk_number = r_sector; |
| 1674 | BUG_ON(r_sector != chunk_number); | ||
| 1675 | 1674 | ||
| 1676 | /* | 1675 | /* |
| 1677 | * Compute the stripe number | 1676 | * Compute the stripe number |
| 1678 | */ | 1677 | */ |
| 1679 | stripe = chunk_number / data_disks; | 1678 | stripe = chunk_number; |
| 1680 | 1679 | *dd_idx = sector_div(stripe, data_disks); | |
| 1681 | /* | ||
| 1682 | * Compute the data disk and parity disk indexes inside the stripe | ||
| 1683 | */ | ||
| 1684 | *dd_idx = chunk_number % data_disks; | ||
| 1685 | 1680 | ||
| 1686 | /* | 1681 | /* |
| 1687 | * Select the parity disk based on the user selected algorithm. | 1682 | * Select the parity disk based on the user selected algorithm. |
| @@ -1870,14 +1865,14 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) | |||
| 1870 | : conf->algorithm; | 1865 | : conf->algorithm; |
| 1871 | sector_t stripe; | 1866 | sector_t stripe; |
| 1872 | int chunk_offset; | 1867 | int chunk_offset; |
| 1873 | int chunk_number, dummy1, dd_idx = i; | 1868 | sector_t chunk_number; |
| 1869 | int dummy1, dd_idx = i; | ||
| 1874 | sector_t r_sector; | 1870 | sector_t r_sector; |
| 1875 | struct stripe_head sh2; | 1871 | struct stripe_head sh2; |
| 1876 | 1872 | ||
| 1877 | 1873 | ||
| 1878 | chunk_offset = sector_div(new_sector, sectors_per_chunk); | 1874 | chunk_offset = sector_div(new_sector, sectors_per_chunk); |
| 1879 | stripe = new_sector; | 1875 | stripe = new_sector; |
| 1880 | BUG_ON(new_sector != stripe); | ||
| 1881 | 1876 | ||
| 1882 | if (i == sh->pd_idx) | 1877 | if (i == sh->pd_idx) |
| 1883 | return 0; | 1878 | return 0; |
| @@ -1970,7 +1965,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) | |||
| 1970 | } | 1965 | } |
| 1971 | 1966 | ||
| 1972 | chunk_number = stripe * data_disks + i; | 1967 | chunk_number = stripe * data_disks + i; |
| 1973 | r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; | 1968 | r_sector = chunk_number * sectors_per_chunk + chunk_offset; |
| 1974 | 1969 | ||
| 1975 | check = raid5_compute_sector(conf, r_sector, | 1970 | check = raid5_compute_sector(conf, r_sector, |
| 1976 | previous, &dummy1, &sh2); | 1971 | previous, &dummy1, &sh2); |
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c index a681f5e8f786..ad036dd8da13 100644 --- a/drivers/regulator/mc13783-regulator.c +++ b/drivers/regulator/mc13783-regulator.c | |||
| @@ -618,9 +618,12 @@ static int __devexit mc13783_regulator_remove(struct platform_device *pdev) | |||
| 618 | dev_get_platdata(&pdev->dev); | 618 | dev_get_platdata(&pdev->dev); |
| 619 | int i; | 619 | int i; |
| 620 | 620 | ||
| 621 | platform_set_drvdata(pdev, NULL); | ||
| 622 | |||
| 621 | for (i = 0; i < pdata->num_regulators; i++) | 623 | for (i = 0; i < pdata->num_regulators; i++) |
| 622 | regulator_unregister(priv->regulators[i]); | 624 | regulator_unregister(priv->regulators[i]); |
| 623 | 625 | ||
| 626 | kfree(priv); | ||
| 624 | return 0; | 627 | return 0; |
| 625 | } | 628 | } |
| 626 | 629 | ||
diff --git a/drivers/serial/mcf.c b/drivers/serial/mcf.c index 7bb5fee639e3..b5aaef965f24 100644 --- a/drivers/serial/mcf.c +++ b/drivers/serial/mcf.c | |||
| @@ -263,6 +263,7 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios, | |||
| 263 | } | 263 | } |
| 264 | 264 | ||
| 265 | spin_lock_irqsave(&port->lock, flags); | 265 | spin_lock_irqsave(&port->lock, flags); |
| 266 | uart_update_timeout(port, termios->c_cflag, baud); | ||
| 266 | writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR); | 267 | writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR); |
| 267 | writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR); | 268 | writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR); |
| 268 | writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR); | 269 | writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR); |
| @@ -379,6 +380,7 @@ static irqreturn_t mcf_interrupt(int irq, void *data) | |||
| 379 | static void mcf_config_port(struct uart_port *port, int flags) | 380 | static void mcf_config_port(struct uart_port *port, int flags) |
| 380 | { | 381 | { |
| 381 | port->type = PORT_MCF; | 382 | port->type = PORT_MCF; |
| 383 | port->fifosize = MCFUART_TXFIFOSIZE; | ||
| 382 | 384 | ||
| 383 | /* Clear mask, so no surprise interrupts. */ | 385 | /* Clear mask, so no surprise interrupts. */ |
| 384 | writeb(0, port->membase + MCFUART_UIMR); | 386 | writeb(0, port->membase + MCFUART_UIMR); |
| @@ -424,7 +426,7 @@ static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser) | |||
| 424 | /* | 426 | /* |
| 425 | * Define the basic serial functions we support. | 427 | * Define the basic serial functions we support. |
| 426 | */ | 428 | */ |
| 427 | static struct uart_ops mcf_uart_ops = { | 429 | static const struct uart_ops mcf_uart_ops = { |
| 428 | .tx_empty = mcf_tx_empty, | 430 | .tx_empty = mcf_tx_empty, |
| 429 | .get_mctrl = mcf_get_mctrl, | 431 | .get_mctrl = mcf_get_mctrl, |
| 430 | .set_mctrl = mcf_set_mctrl, | 432 | .set_mctrl = mcf_set_mctrl, |
| @@ -443,7 +445,7 @@ static struct uart_ops mcf_uart_ops = { | |||
| 443 | .verify_port = mcf_verify_port, | 445 | .verify_port = mcf_verify_port, |
| 444 | }; | 446 | }; |
| 445 | 447 | ||
| 446 | static struct mcf_uart mcf_ports[3]; | 448 | static struct mcf_uart mcf_ports[4]; |
| 447 | 449 | ||
| 448 | #define MCF_MAXPORTS ARRAY_SIZE(mcf_ports) | 450 | #define MCF_MAXPORTS ARRAY_SIZE(mcf_ports) |
| 449 | 451 | ||
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index 5e813a816ce4..b3feddc4f7d6 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c | |||
| @@ -138,9 +138,9 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) | |||
| 138 | { | 138 | { |
| 139 | struct afs_super_info *super; | 139 | struct afs_super_info *super; |
| 140 | struct vfsmount *mnt; | 140 | struct vfsmount *mnt; |
| 141 | struct page *page = NULL; | 141 | struct page *page; |
| 142 | size_t size; | 142 | size_t size; |
| 143 | char *buf, *devname = NULL, *options = NULL; | 143 | char *buf, *devname, *options; |
| 144 | int ret; | 144 | int ret; |
| 145 | 145 | ||
| 146 | _enter("{%s}", mntpt->d_name.name); | 146 | _enter("{%s}", mntpt->d_name.name); |
| @@ -150,22 +150,22 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) | |||
| 150 | ret = -EINVAL; | 150 | ret = -EINVAL; |
| 151 | size = mntpt->d_inode->i_size; | 151 | size = mntpt->d_inode->i_size; |
| 152 | if (size > PAGE_SIZE - 1) | 152 | if (size > PAGE_SIZE - 1) |
| 153 | goto error; | 153 | goto error_no_devname; |
| 154 | 154 | ||
| 155 | ret = -ENOMEM; | 155 | ret = -ENOMEM; |
| 156 | devname = (char *) get_zeroed_page(GFP_KERNEL); | 156 | devname = (char *) get_zeroed_page(GFP_KERNEL); |
| 157 | if (!devname) | 157 | if (!devname) |
| 158 | goto error; | 158 | goto error_no_devname; |
| 159 | 159 | ||
| 160 | options = (char *) get_zeroed_page(GFP_KERNEL); | 160 | options = (char *) get_zeroed_page(GFP_KERNEL); |
| 161 | if (!options) | 161 | if (!options) |
| 162 | goto error; | 162 | goto error_no_options; |
| 163 | 163 | ||
| 164 | /* read the contents of the AFS special symlink */ | 164 | /* read the contents of the AFS special symlink */ |
| 165 | page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL); | 165 | page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL); |
| 166 | if (IS_ERR(page)) { | 166 | if (IS_ERR(page)) { |
| 167 | ret = PTR_ERR(page); | 167 | ret = PTR_ERR(page); |
| 168 | goto error; | 168 | goto error_no_page; |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | ret = -EIO; | 171 | ret = -EIO; |
| @@ -196,12 +196,12 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) | |||
| 196 | return mnt; | 196 | return mnt; |
| 197 | 197 | ||
| 198 | error: | 198 | error: |
| 199 | if (page) | 199 | page_cache_release(page); |
| 200 | page_cache_release(page); | 200 | error_no_page: |
| 201 | if (devname) | 201 | free_page((unsigned long) options); |
| 202 | free_page((unsigned long) devname); | 202 | error_no_options: |
| 203 | if (options) | 203 | free_page((unsigned long) devname); |
| 204 | free_page((unsigned long) options); | 204 | error_no_devname: |
| 205 | _leave(" = %d", ret); | 205 | _leave(" = %d", ret); |
| 206 | return ERR_PTR(ret); | 206 | return ERR_PTR(ret); |
| 207 | } | 207 | } |
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index e0e769bdca59..49566c1687d8 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c | |||
| @@ -355,7 +355,7 @@ calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp) | |||
| 355 | 355 | ||
| 356 | if (!flat_reloc_valid(r, start_brk - start_data + text_len)) { | 356 | if (!flat_reloc_valid(r, start_brk - start_data + text_len)) { |
| 357 | printk("BINFMT_FLAT: reloc outside program 0x%x (0 - 0x%x/0x%x)", | 357 | printk("BINFMT_FLAT: reloc outside program 0x%x (0 - 0x%x/0x%x)", |
| 358 | (int) r,(int)(start_brk-start_code),(int)text_len); | 358 | (int) r,(int)(start_brk-start_data+text_len),(int)text_len); |
| 359 | goto failed; | 359 | goto failed; |
| 360 | } | 360 | } |
| 361 | 361 | ||
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 9dd126276c9f..ed9ba6fe04f5 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c | |||
| @@ -61,7 +61,7 @@ struct inode *jfs_iget(struct super_block *sb, unsigned long ino) | |||
| 61 | inode->i_op = &page_symlink_inode_operations; | 61 | inode->i_op = &page_symlink_inode_operations; |
| 62 | inode->i_mapping->a_ops = &jfs_aops; | 62 | inode->i_mapping->a_ops = &jfs_aops; |
| 63 | } else { | 63 | } else { |
| 64 | inode->i_op = &jfs_symlink_inode_operations; | 64 | inode->i_op = &jfs_fast_symlink_inode_operations; |
| 65 | /* | 65 | /* |
| 66 | * The inline data should be null-terminated, but | 66 | * The inline data should be null-terminated, but |
| 67 | * don't let on-disk corruption crash the kernel | 67 | * don't let on-disk corruption crash the kernel |
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 6c4dfcbf3f55..9e2f6a721668 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c | |||
| @@ -196,7 +196,7 @@ int dbMount(struct inode *ipbmap) | |||
| 196 | bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag); | 196 | bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag); |
| 197 | bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref); | 197 | bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref); |
| 198 | bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel); | 198 | bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel); |
| 199 | bmp->db_agheigth = le32_to_cpu(dbmp_le->dn_agheigth); | 199 | bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight); |
| 200 | bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); | 200 | bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); |
| 201 | bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); | 201 | bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); |
| 202 | bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); | 202 | bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); |
| @@ -288,7 +288,7 @@ int dbSync(struct inode *ipbmap) | |||
| 288 | dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag); | 288 | dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag); |
| 289 | dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref); | 289 | dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref); |
| 290 | dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel); | 290 | dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel); |
| 291 | dbmp_le->dn_agheigth = cpu_to_le32(bmp->db_agheigth); | 291 | dbmp_le->dn_agheight = cpu_to_le32(bmp->db_agheight); |
| 292 | dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth); | 292 | dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth); |
| 293 | dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart); | 293 | dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart); |
| 294 | dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size); | 294 | dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size); |
| @@ -1441,7 +1441,7 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results) | |||
| 1441 | * tree index of this allocation group within the control page. | 1441 | * tree index of this allocation group within the control page. |
| 1442 | */ | 1442 | */ |
| 1443 | agperlev = | 1443 | agperlev = |
| 1444 | (1 << (L2LPERCTL - (bmp->db_agheigth << 1))) / bmp->db_agwidth; | 1444 | (1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth; |
| 1445 | ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1)); | 1445 | ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1)); |
| 1446 | 1446 | ||
| 1447 | /* dmap control page trees fan-out by 4 and a single allocation | 1447 | /* dmap control page trees fan-out by 4 and a single allocation |
| @@ -1460,7 +1460,7 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results) | |||
| 1460 | * the subtree to find the leftmost leaf that describes this | 1460 | * the subtree to find the leftmost leaf that describes this |
| 1461 | * free space. | 1461 | * free space. |
| 1462 | */ | 1462 | */ |
| 1463 | for (k = bmp->db_agheigth; k > 0; k--) { | 1463 | for (k = bmp->db_agheight; k > 0; k--) { |
| 1464 | for (n = 0, m = (ti << 2) + 1; n < 4; n++) { | 1464 | for (n = 0, m = (ti << 2) + 1; n < 4; n++) { |
| 1465 | if (l2nb <= dcp->stree[m + n]) { | 1465 | if (l2nb <= dcp->stree[m + n]) { |
| 1466 | ti = m + n; | 1466 | ti = m + n; |
| @@ -3607,7 +3607,7 @@ void dbFinalizeBmap(struct inode *ipbmap) | |||
| 3607 | } | 3607 | } |
| 3608 | 3608 | ||
| 3609 | /* | 3609 | /* |
| 3610 | * compute db_aglevel, db_agheigth, db_width, db_agstart: | 3610 | * compute db_aglevel, db_agheight, db_width, db_agstart: |
| 3611 | * an ag is covered in aglevel dmapctl summary tree, | 3611 | * an ag is covered in aglevel dmapctl summary tree, |
| 3612 | * at agheight level height (from leaf) with agwidth number of nodes | 3612 | * at agheight level height (from leaf) with agwidth number of nodes |
| 3613 | * each, which starts at agstart index node of the smmary tree node | 3613 | * each, which starts at agstart index node of the smmary tree node |
| @@ -3616,9 +3616,9 @@ void dbFinalizeBmap(struct inode *ipbmap) | |||
| 3616 | bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize); | 3616 | bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize); |
| 3617 | l2nl = | 3617 | l2nl = |
| 3618 | bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL); | 3618 | bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL); |
| 3619 | bmp->db_agheigth = l2nl >> 1; | 3619 | bmp->db_agheight = l2nl >> 1; |
| 3620 | bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheigth << 1)); | 3620 | bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheight << 1)); |
| 3621 | for (i = 5 - bmp->db_agheigth, bmp->db_agstart = 0, n = 1; i > 0; | 3621 | for (i = 5 - bmp->db_agheight, bmp->db_agstart = 0, n = 1; i > 0; |
| 3622 | i--) { | 3622 | i--) { |
| 3623 | bmp->db_agstart += n; | 3623 | bmp->db_agstart += n; |
| 3624 | n <<= 2; | 3624 | n <<= 2; |
diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h index 1a6eb41569bc..6dcb906c55d8 100644 --- a/fs/jfs/jfs_dmap.h +++ b/fs/jfs/jfs_dmap.h | |||
| @@ -210,7 +210,7 @@ struct dbmap_disk { | |||
| 210 | __le32 dn_maxag; /* 4: max active alloc group number */ | 210 | __le32 dn_maxag; /* 4: max active alloc group number */ |
| 211 | __le32 dn_agpref; /* 4: preferred alloc group (hint) */ | 211 | __le32 dn_agpref; /* 4: preferred alloc group (hint) */ |
| 212 | __le32 dn_aglevel; /* 4: dmapctl level holding the AG */ | 212 | __le32 dn_aglevel; /* 4: dmapctl level holding the AG */ |
| 213 | __le32 dn_agheigth; /* 4: height in dmapctl of the AG */ | 213 | __le32 dn_agheight; /* 4: height in dmapctl of the AG */ |
| 214 | __le32 dn_agwidth; /* 4: width in dmapctl of the AG */ | 214 | __le32 dn_agwidth; /* 4: width in dmapctl of the AG */ |
| 215 | __le32 dn_agstart; /* 4: start tree index at AG height */ | 215 | __le32 dn_agstart; /* 4: start tree index at AG height */ |
| 216 | __le32 dn_agl2size; /* 4: l2 num of blks per alloc group */ | 216 | __le32 dn_agl2size; /* 4: l2 num of blks per alloc group */ |
| @@ -229,7 +229,7 @@ struct dbmap { | |||
| 229 | int dn_maxag; /* max active alloc group number */ | 229 | int dn_maxag; /* max active alloc group number */ |
| 230 | int dn_agpref; /* preferred alloc group (hint) */ | 230 | int dn_agpref; /* preferred alloc group (hint) */ |
| 231 | int dn_aglevel; /* dmapctl level holding the AG */ | 231 | int dn_aglevel; /* dmapctl level holding the AG */ |
| 232 | int dn_agheigth; /* height in dmapctl of the AG */ | 232 | int dn_agheight; /* height in dmapctl of the AG */ |
| 233 | int dn_agwidth; /* width in dmapctl of the AG */ | 233 | int dn_agwidth; /* width in dmapctl of the AG */ |
| 234 | int dn_agstart; /* start tree index at AG height */ | 234 | int dn_agstart; /* start tree index at AG height */ |
| 235 | int dn_agl2size; /* l2 num of blks per alloc group */ | 235 | int dn_agl2size; /* l2 num of blks per alloc group */ |
| @@ -255,7 +255,7 @@ struct bmap { | |||
| 255 | #define db_agsize db_bmap.dn_agsize | 255 | #define db_agsize db_bmap.dn_agsize |
| 256 | #define db_agl2size db_bmap.dn_agl2size | 256 | #define db_agl2size db_bmap.dn_agl2size |
| 257 | #define db_agwidth db_bmap.dn_agwidth | 257 | #define db_agwidth db_bmap.dn_agwidth |
| 258 | #define db_agheigth db_bmap.dn_agheigth | 258 | #define db_agheight db_bmap.dn_agheight |
| 259 | #define db_agstart db_bmap.dn_agstart | 259 | #define db_agstart db_bmap.dn_agstart |
| 260 | #define db_numag db_bmap.dn_numag | 260 | #define db_numag db_bmap.dn_numag |
| 261 | #define db_maxlevel db_bmap.dn_maxlevel | 261 | #define db_maxlevel db_bmap.dn_maxlevel |
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h index 79e2c79661df..9e6bda30a6e8 100644 --- a/fs/jfs/jfs_inode.h +++ b/fs/jfs/jfs_inode.h | |||
| @@ -48,5 +48,6 @@ extern const struct file_operations jfs_dir_operations; | |||
| 48 | extern const struct inode_operations jfs_file_inode_operations; | 48 | extern const struct inode_operations jfs_file_inode_operations; |
| 49 | extern const struct file_operations jfs_file_operations; | 49 | extern const struct file_operations jfs_file_operations; |
| 50 | extern const struct inode_operations jfs_symlink_inode_operations; | 50 | extern const struct inode_operations jfs_symlink_inode_operations; |
| 51 | extern const struct inode_operations jfs_fast_symlink_inode_operations; | ||
| 51 | extern const struct dentry_operations jfs_ci_dentry_operations; | 52 | extern const struct dentry_operations jfs_ci_dentry_operations; |
| 52 | #endif /* _H_JFS_INODE */ | 53 | #endif /* _H_JFS_INODE */ |
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 4a3e9f39c21d..a9cf8e8675be 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c | |||
| @@ -956,7 +956,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, | |||
| 956 | */ | 956 | */ |
| 957 | 957 | ||
| 958 | if (ssize <= IDATASIZE) { | 958 | if (ssize <= IDATASIZE) { |
| 959 | ip->i_op = &jfs_symlink_inode_operations; | 959 | ip->i_op = &jfs_fast_symlink_inode_operations; |
| 960 | 960 | ||
| 961 | i_fastsymlink = JFS_IP(ip)->i_inline; | 961 | i_fastsymlink = JFS_IP(ip)->i_inline; |
| 962 | memcpy(i_fastsymlink, name, ssize); | 962 | memcpy(i_fastsymlink, name, ssize); |
| @@ -978,7 +978,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, | |||
| 978 | else { | 978 | else { |
| 979 | jfs_info("jfs_symlink: allocate extent ip:0x%p", ip); | 979 | jfs_info("jfs_symlink: allocate extent ip:0x%p", ip); |
| 980 | 980 | ||
| 981 | ip->i_op = &page_symlink_inode_operations; | 981 | ip->i_op = &jfs_symlink_inode_operations; |
| 982 | ip->i_mapping->a_ops = &jfs_aops; | 982 | ip->i_mapping->a_ops = &jfs_aops; |
| 983 | 983 | ||
| 984 | /* | 984 | /* |
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c index 7f24a0bb08ca..1aba0039f1c9 100644 --- a/fs/jfs/resize.c +++ b/fs/jfs/resize.c | |||
| @@ -81,6 +81,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) | |||
| 81 | struct inode *iplist[1]; | 81 | struct inode *iplist[1]; |
| 82 | struct jfs_superblock *j_sb, *j_sb2; | 82 | struct jfs_superblock *j_sb, *j_sb2; |
| 83 | uint old_agsize; | 83 | uint old_agsize; |
| 84 | int agsizechanged = 0; | ||
| 84 | struct buffer_head *bh, *bh2; | 85 | struct buffer_head *bh, *bh2; |
| 85 | 86 | ||
| 86 | /* If the volume hasn't grown, get out now */ | 87 | /* If the volume hasn't grown, get out now */ |
| @@ -333,6 +334,9 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) | |||
| 333 | */ | 334 | */ |
| 334 | if ((rc = dbExtendFS(ipbmap, XAddress, nblocks))) | 335 | if ((rc = dbExtendFS(ipbmap, XAddress, nblocks))) |
| 335 | goto error_out; | 336 | goto error_out; |
| 337 | |||
| 338 | agsizechanged |= (bmp->db_agsize != old_agsize); | ||
| 339 | |||
| 336 | /* | 340 | /* |
| 337 | * the map now has extended to cover additional nblocks: | 341 | * the map now has extended to cover additional nblocks: |
| 338 | * dn_mapsize = oldMapsize + nblocks; | 342 | * dn_mapsize = oldMapsize + nblocks; |
| @@ -432,7 +436,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) | |||
| 432 | * will correctly identify the new ag); | 436 | * will correctly identify the new ag); |
| 433 | */ | 437 | */ |
| 434 | /* if new AG size the same as old AG size, done! */ | 438 | /* if new AG size the same as old AG size, done! */ |
| 435 | if (bmp->db_agsize != old_agsize) { | 439 | if (agsizechanged) { |
| 436 | if ((rc = diExtendFS(ipimap, ipbmap))) | 440 | if ((rc = diExtendFS(ipimap, ipbmap))) |
| 437 | goto error_out; | 441 | goto error_out; |
| 438 | 442 | ||
diff --git a/fs/jfs/symlink.c b/fs/jfs/symlink.c index 4af1a05aad0a..205b946d8e0d 100644 --- a/fs/jfs/symlink.c +++ b/fs/jfs/symlink.c | |||
| @@ -29,9 +29,21 @@ static void *jfs_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
| 29 | return NULL; | 29 | return NULL; |
| 30 | } | 30 | } |
| 31 | 31 | ||
| 32 | const struct inode_operations jfs_symlink_inode_operations = { | 32 | const struct inode_operations jfs_fast_symlink_inode_operations = { |
| 33 | .readlink = generic_readlink, | 33 | .readlink = generic_readlink, |
| 34 | .follow_link = jfs_follow_link, | 34 | .follow_link = jfs_follow_link, |
| 35 | .setattr = jfs_setattr, | ||
| 36 | .setxattr = jfs_setxattr, | ||
| 37 | .getxattr = jfs_getxattr, | ||
| 38 | .listxattr = jfs_listxattr, | ||
| 39 | .removexattr = jfs_removexattr, | ||
| 40 | }; | ||
| 41 | |||
| 42 | const struct inode_operations jfs_symlink_inode_operations = { | ||
| 43 | .readlink = generic_readlink, | ||
| 44 | .follow_link = page_follow_link_light, | ||
| 45 | .put_link = page_put_link, | ||
| 46 | .setattr = jfs_setattr, | ||
| 35 | .setxattr = jfs_setxattr, | 47 | .setxattr = jfs_setxattr, |
| 36 | .getxattr = jfs_getxattr, | 48 | .getxattr = jfs_getxattr, |
| 37 | .listxattr = jfs_listxattr, | 49 | .listxattr = jfs_listxattr, |
diff --git a/fs/logfs/gc.c b/fs/logfs/gc.c index 84e36f52fe95..76c242fbe1b0 100644 --- a/fs/logfs/gc.c +++ b/fs/logfs/gc.c | |||
| @@ -459,6 +459,14 @@ static void __logfs_gc_pass(struct super_block *sb, int target) | |||
| 459 | struct logfs_block *block; | 459 | struct logfs_block *block; |
| 460 | int round, progress, last_progress = 0; | 460 | int round, progress, last_progress = 0; |
| 461 | 461 | ||
| 462 | /* | ||
| 463 | * Doing too many changes to the segfile at once would result | ||
| 464 | * in a large number of aliases. Write the journal before | ||
| 465 | * things get out of hand. | ||
| 466 | */ | ||
| 467 | if (super->s_shadow_tree.no_shadowed_segments >= MAX_OBJ_ALIASES) | ||
| 468 | logfs_write_anchor(sb); | ||
| 469 | |||
| 462 | if (no_free_segments(sb) >= target && | 470 | if (no_free_segments(sb) >= target && |
| 463 | super->s_no_object_aliases < MAX_OBJ_ALIASES) | 471 | super->s_no_object_aliases < MAX_OBJ_ALIASES) |
| 464 | return; | 472 | return; |
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c index 33bd260b8309..fb0a613f885b 100644 --- a/fs/logfs/journal.c +++ b/fs/logfs/journal.c | |||
| @@ -389,7 +389,10 @@ static void journal_get_erase_count(struct logfs_area *area) | |||
| 389 | static int journal_erase_segment(struct logfs_area *area) | 389 | static int journal_erase_segment(struct logfs_area *area) |
| 390 | { | 390 | { |
| 391 | struct super_block *sb = area->a_sb; | 391 | struct super_block *sb = area->a_sb; |
| 392 | struct logfs_segment_header sh; | 392 | union { |
| 393 | struct logfs_segment_header sh; | ||
| 394 | unsigned char c[ALIGN(sizeof(struct logfs_segment_header), 16)]; | ||
| 395 | } u; | ||
| 393 | u64 ofs; | 396 | u64 ofs; |
| 394 | int err; | 397 | int err; |
| 395 | 398 | ||
| @@ -397,20 +400,21 @@ static int journal_erase_segment(struct logfs_area *area) | |||
| 397 | if (err) | 400 | if (err) |
| 398 | return err; | 401 | return err; |
| 399 | 402 | ||
| 400 | sh.pad = 0; | 403 | memset(&u, 0, sizeof(u)); |
| 401 | sh.type = SEG_JOURNAL; | 404 | u.sh.pad = 0; |
| 402 | sh.level = 0; | 405 | u.sh.type = SEG_JOURNAL; |
| 403 | sh.segno = cpu_to_be32(area->a_segno); | 406 | u.sh.level = 0; |
| 404 | sh.ec = cpu_to_be32(area->a_erase_count); | 407 | u.sh.segno = cpu_to_be32(area->a_segno); |
| 405 | sh.gec = cpu_to_be64(logfs_super(sb)->s_gec); | 408 | u.sh.ec = cpu_to_be32(area->a_erase_count); |
| 406 | sh.crc = logfs_crc32(&sh, sizeof(sh), 4); | 409 | u.sh.gec = cpu_to_be64(logfs_super(sb)->s_gec); |
| 410 | u.sh.crc = logfs_crc32(&u.sh, sizeof(u.sh), 4); | ||
| 407 | 411 | ||
| 408 | /* This causes a bug in segment.c. Not yet. */ | 412 | /* This causes a bug in segment.c. Not yet. */ |
| 409 | //logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count, 0); | 413 | //logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count, 0); |
| 410 | 414 | ||
| 411 | ofs = dev_ofs(sb, area->a_segno, 0); | 415 | ofs = dev_ofs(sb, area->a_segno, 0); |
| 412 | area->a_used_bytes = ALIGN(sizeof(sh), 16); | 416 | area->a_used_bytes = sizeof(u); |
| 413 | logfs_buf_write(area, ofs, &sh, sizeof(sh)); | 417 | logfs_buf_write(area, ofs, &u, sizeof(u)); |
| 414 | return 0; | 418 | return 0; |
| 415 | } | 419 | } |
| 416 | 420 | ||
| @@ -494,6 +498,8 @@ static void account_shadows(struct super_block *sb) | |||
| 494 | 498 | ||
| 495 | btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow); | 499 | btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow); |
| 496 | btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow); | 500 | btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow); |
| 501 | btree_grim_visitor32(&tree->segment_map, 0, NULL); | ||
| 502 | tree->no_shadowed_segments = 0; | ||
| 497 | 503 | ||
| 498 | if (li->li_block) { | 504 | if (li->li_block) { |
| 499 | /* | 505 | /* |
| @@ -607,9 +613,9 @@ static size_t __logfs_write_je(struct super_block *sb, void *buf, u16 type, | |||
| 607 | if (len == 0) | 613 | if (len == 0) |
| 608 | return logfs_write_header(super, header, 0, type); | 614 | return logfs_write_header(super, header, 0, type); |
| 609 | 615 | ||
| 616 | BUG_ON(len > sb->s_blocksize); | ||
| 610 | compr_len = logfs_compress(buf, data, len, sb->s_blocksize); | 617 | compr_len = logfs_compress(buf, data, len, sb->s_blocksize); |
| 611 | if (compr_len < 0 || type == JE_ANCHOR) { | 618 | if (compr_len < 0 || type == JE_ANCHOR) { |
| 612 | BUG_ON(len > sb->s_blocksize); | ||
| 613 | memcpy(data, buf, len); | 619 | memcpy(data, buf, len); |
| 614 | compr_len = len; | 620 | compr_len = len; |
| 615 | compr = COMPR_NONE; | 621 | compr = COMPR_NONE; |
| @@ -661,6 +667,7 @@ static int logfs_write_je_buf(struct super_block *sb, void *buf, u16 type, | |||
| 661 | if (ofs < 0) | 667 | if (ofs < 0) |
| 662 | return ofs; | 668 | return ofs; |
| 663 | logfs_buf_write(area, ofs, super->s_compressed_je, len); | 669 | logfs_buf_write(area, ofs, super->s_compressed_je, len); |
| 670 | BUG_ON(super->s_no_je >= MAX_JOURNAL_ENTRIES); | ||
| 664 | super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs); | 671 | super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs); |
| 665 | return 0; | 672 | return 0; |
| 666 | } | 673 | } |
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h index b84b0eec6024..0a3df1a0c936 100644 --- a/fs/logfs/logfs.h +++ b/fs/logfs/logfs.h | |||
| @@ -257,10 +257,14 @@ struct logfs_shadow { | |||
| 257 | * struct shadow_tree | 257 | * struct shadow_tree |
| 258 | * @new: shadows where old_ofs==0, indexed by new_ofs | 258 | * @new: shadows where old_ofs==0, indexed by new_ofs |
| 259 | * @old: shadows where old_ofs!=0, indexed by old_ofs | 259 | * @old: shadows where old_ofs!=0, indexed by old_ofs |
| 260 | * @segment_map: bitfield of segments containing shadows | ||
| 261 | * @no_shadowed_segment: number of segments containing shadows | ||
| 260 | */ | 262 | */ |
| 261 | struct shadow_tree { | 263 | struct shadow_tree { |
| 262 | struct btree_head64 new; | 264 | struct btree_head64 new; |
| 263 | struct btree_head64 old; | 265 | struct btree_head64 old; |
| 266 | struct btree_head32 segment_map; | ||
| 267 | int no_shadowed_segments; | ||
| 264 | }; | 268 | }; |
| 265 | 269 | ||
| 266 | struct object_alias_item { | 270 | struct object_alias_item { |
| @@ -305,13 +309,14 @@ typedef int write_alias_t(struct super_block *sb, u64 ino, u64 bix, | |||
| 305 | level_t level, int child_no, __be64 val); | 309 | level_t level, int child_no, __be64 val); |
| 306 | struct logfs_block_ops { | 310 | struct logfs_block_ops { |
| 307 | void (*write_block)(struct logfs_block *block); | 311 | void (*write_block)(struct logfs_block *block); |
| 308 | gc_level_t (*block_level)(struct logfs_block *block); | ||
| 309 | void (*free_block)(struct super_block *sb, struct logfs_block*block); | 312 | void (*free_block)(struct super_block *sb, struct logfs_block*block); |
| 310 | int (*write_alias)(struct super_block *sb, | 313 | int (*write_alias)(struct super_block *sb, |
| 311 | struct logfs_block *block, | 314 | struct logfs_block *block, |
| 312 | write_alias_t *write_one_alias); | 315 | write_alias_t *write_one_alias); |
| 313 | }; | 316 | }; |
| 314 | 317 | ||
| 318 | #define MAX_JOURNAL_ENTRIES 256 | ||
| 319 | |||
| 315 | struct logfs_super { | 320 | struct logfs_super { |
| 316 | struct mtd_info *s_mtd; /* underlying device */ | 321 | struct mtd_info *s_mtd; /* underlying device */ |
| 317 | struct block_device *s_bdev; /* underlying device */ | 322 | struct block_device *s_bdev; /* underlying device */ |
| @@ -378,7 +383,7 @@ struct logfs_super { | |||
| 378 | u32 s_journal_ec[LOGFS_JOURNAL_SEGS]; /* journal erasecounts */ | 383 | u32 s_journal_ec[LOGFS_JOURNAL_SEGS]; /* journal erasecounts */ |
| 379 | u64 s_last_version; | 384 | u64 s_last_version; |
| 380 | struct logfs_area *s_journal_area; /* open journal segment */ | 385 | struct logfs_area *s_journal_area; /* open journal segment */ |
| 381 | __be64 s_je_array[64]; | 386 | __be64 s_je_array[MAX_JOURNAL_ENTRIES]; |
| 382 | int s_no_je; | 387 | int s_no_je; |
| 383 | 388 | ||
| 384 | int s_sum_index; /* for the 12 summaries */ | 389 | int s_sum_index; /* for the 12 summaries */ |
| @@ -722,4 +727,10 @@ static inline struct logfs_area *get_area(struct super_block *sb, | |||
| 722 | return logfs_super(sb)->s_area[(__force u8)gc_level]; | 727 | return logfs_super(sb)->s_area[(__force u8)gc_level]; |
| 723 | } | 728 | } |
| 724 | 729 | ||
| 730 | static inline void logfs_mempool_destroy(mempool_t *pool) | ||
| 731 | { | ||
| 732 | if (pool) | ||
| 733 | mempool_destroy(pool); | ||
| 734 | } | ||
| 735 | |||
| 725 | #endif | 736 | #endif |
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c index bff40253dfb2..3159db6958e5 100644 --- a/fs/logfs/readwrite.c +++ b/fs/logfs/readwrite.c | |||
| @@ -430,25 +430,6 @@ static void inode_write_block(struct logfs_block *block) | |||
| 430 | } | 430 | } |
| 431 | } | 431 | } |
| 432 | 432 | ||
| 433 | static gc_level_t inode_block_level(struct logfs_block *block) | ||
| 434 | { | ||
| 435 | BUG_ON(block->inode->i_ino == LOGFS_INO_MASTER); | ||
| 436 | return GC_LEVEL(LOGFS_MAX_LEVELS); | ||
| 437 | } | ||
| 438 | |||
| 439 | static gc_level_t indirect_block_level(struct logfs_block *block) | ||
| 440 | { | ||
| 441 | struct page *page; | ||
| 442 | struct inode *inode; | ||
| 443 | u64 bix; | ||
| 444 | level_t level; | ||
| 445 | |||
| 446 | page = block->page; | ||
| 447 | inode = page->mapping->host; | ||
| 448 | logfs_unpack_index(page->index, &bix, &level); | ||
| 449 | return expand_level(inode->i_ino, level); | ||
| 450 | } | ||
| 451 | |||
| 452 | /* | 433 | /* |
| 453 | * This silences a false, yet annoying gcc warning. I hate it when my editor | 434 | * This silences a false, yet annoying gcc warning. I hate it when my editor |
| 454 | * jumps into bitops.h each time I recompile this file. | 435 | * jumps into bitops.h each time I recompile this file. |
| @@ -587,14 +568,12 @@ static void indirect_free_block(struct super_block *sb, | |||
| 587 | 568 | ||
| 588 | static struct logfs_block_ops inode_block_ops = { | 569 | static struct logfs_block_ops inode_block_ops = { |
| 589 | .write_block = inode_write_block, | 570 | .write_block = inode_write_block, |
| 590 | .block_level = inode_block_level, | ||
| 591 | .free_block = inode_free_block, | 571 | .free_block = inode_free_block, |
| 592 | .write_alias = inode_write_alias, | 572 | .write_alias = inode_write_alias, |
| 593 | }; | 573 | }; |
| 594 | 574 | ||
| 595 | struct logfs_block_ops indirect_block_ops = { | 575 | struct logfs_block_ops indirect_block_ops = { |
| 596 | .write_block = indirect_write_block, | 576 | .write_block = indirect_write_block, |
| 597 | .block_level = indirect_block_level, | ||
| 598 | .free_block = indirect_free_block, | 577 | .free_block = indirect_free_block, |
| 599 | .write_alias = indirect_write_alias, | 578 | .write_alias = indirect_write_alias, |
| 600 | }; | 579 | }; |
| @@ -1241,6 +1220,18 @@ static void free_shadow(struct inode *inode, struct logfs_shadow *shadow) | |||
| 1241 | mempool_free(shadow, super->s_shadow_pool); | 1220 | mempool_free(shadow, super->s_shadow_pool); |
| 1242 | } | 1221 | } |
| 1243 | 1222 | ||
| 1223 | static void mark_segment(struct shadow_tree *tree, u32 segno) | ||
| 1224 | { | ||
| 1225 | int err; | ||
| 1226 | |||
| 1227 | if (!btree_lookup32(&tree->segment_map, segno)) { | ||
| 1228 | err = btree_insert32(&tree->segment_map, segno, (void *)1, | ||
| 1229 | GFP_NOFS); | ||
| 1230 | BUG_ON(err); | ||
| 1231 | tree->no_shadowed_segments++; | ||
| 1232 | } | ||
| 1233 | } | ||
| 1234 | |||
| 1244 | /** | 1235 | /** |
| 1245 | * fill_shadow_tree - Propagate shadow tree changes due to a write | 1236 | * fill_shadow_tree - Propagate shadow tree changes due to a write |
| 1246 | * @inode: Inode owning the page | 1237 | * @inode: Inode owning the page |
| @@ -1288,6 +1279,8 @@ static void fill_shadow_tree(struct inode *inode, struct page *page, | |||
| 1288 | 1279 | ||
| 1289 | super->s_dirty_used_bytes += shadow->new_len; | 1280 | super->s_dirty_used_bytes += shadow->new_len; |
| 1290 | super->s_dirty_free_bytes += shadow->old_len; | 1281 | super->s_dirty_free_bytes += shadow->old_len; |
| 1282 | mark_segment(tree, shadow->old_ofs >> super->s_segshift); | ||
| 1283 | mark_segment(tree, shadow->new_ofs >> super->s_segshift); | ||
| 1291 | } | 1284 | } |
| 1292 | } | 1285 | } |
| 1293 | 1286 | ||
| @@ -1845,19 +1838,37 @@ static int __logfs_truncate(struct inode *inode, u64 size) | |||
| 1845 | return logfs_truncate_direct(inode, size); | 1838 | return logfs_truncate_direct(inode, size); |
| 1846 | } | 1839 | } |
| 1847 | 1840 | ||
| 1848 | int logfs_truncate(struct inode *inode, u64 size) | 1841 | /* |
| 1842 | * Truncate, by changing the segment file, can consume a fair amount | ||
| 1843 | * of resources. So back off from time to time and do some GC. | ||
| 1844 | * 8 or 2048 blocks should be well within safety limits even if | ||
| 1845 | * every single block resided in a different segment. | ||
| 1846 | */ | ||
| 1847 | #define TRUNCATE_STEP (8 * 1024 * 1024) | ||
| 1848 | int logfs_truncate(struct inode *inode, u64 target) | ||
| 1849 | { | 1849 | { |
| 1850 | struct super_block *sb = inode->i_sb; | 1850 | struct super_block *sb = inode->i_sb; |
| 1851 | int err; | 1851 | u64 size = i_size_read(inode); |
| 1852 | int err = 0; | ||
| 1852 | 1853 | ||
| 1853 | logfs_get_wblocks(sb, NULL, 1); | 1854 | size = ALIGN(size, TRUNCATE_STEP); |
| 1854 | err = __logfs_truncate(inode, size); | 1855 | while (size > target) { |
| 1855 | if (!err) | 1856 | if (size > TRUNCATE_STEP) |
| 1856 | err = __logfs_write_inode(inode, 0); | 1857 | size -= TRUNCATE_STEP; |
| 1857 | logfs_put_wblocks(sb, NULL, 1); | 1858 | else |
| 1859 | size = 0; | ||
| 1860 | if (size < target) | ||
| 1861 | size = target; | ||
| 1862 | |||
| 1863 | logfs_get_wblocks(sb, NULL, 1); | ||
| 1864 | err = __logfs_truncate(inode, target); | ||
| 1865 | if (!err) | ||
| 1866 | err = __logfs_write_inode(inode, 0); | ||
| 1867 | logfs_put_wblocks(sb, NULL, 1); | ||
| 1868 | } | ||
| 1858 | 1869 | ||
| 1859 | if (!err) | 1870 | if (!err) |
| 1860 | err = vmtruncate(inode, size); | 1871 | err = vmtruncate(inode, target); |
| 1861 | 1872 | ||
| 1862 | /* I don't trust error recovery yet. */ | 1873 | /* I don't trust error recovery yet. */ |
| 1863 | WARN_ON(err); | 1874 | WARN_ON(err); |
| @@ -2251,8 +2262,6 @@ void logfs_cleanup_rw(struct super_block *sb) | |||
| 2251 | struct logfs_super *super = logfs_super(sb); | 2262 | struct logfs_super *super = logfs_super(sb); |
| 2252 | 2263 | ||
| 2253 | destroy_meta_inode(super->s_segfile_inode); | 2264 | destroy_meta_inode(super->s_segfile_inode); |
| 2254 | if (super->s_block_pool) | 2265 | logfs_mempool_destroy(super->s_block_pool); |
| 2255 | mempool_destroy(super->s_block_pool); | 2266 | logfs_mempool_destroy(super->s_shadow_pool); |
| 2256 | if (super->s_shadow_pool) | ||
| 2257 | mempool_destroy(super->s_shadow_pool); | ||
| 2258 | } | 2267 | } |
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c index 801a3a141625..f77ce2b470ba 100644 --- a/fs/logfs/segment.c +++ b/fs/logfs/segment.c | |||
| @@ -183,14 +183,8 @@ static int btree_write_alias(struct super_block *sb, struct logfs_block *block, | |||
| 183 | return 0; | 183 | return 0; |
| 184 | } | 184 | } |
| 185 | 185 | ||
| 186 | static gc_level_t btree_block_level(struct logfs_block *block) | ||
| 187 | { | ||
| 188 | return expand_level(block->ino, block->level); | ||
| 189 | } | ||
| 190 | |||
| 191 | static struct logfs_block_ops btree_block_ops = { | 186 | static struct logfs_block_ops btree_block_ops = { |
| 192 | .write_block = btree_write_block, | 187 | .write_block = btree_write_block, |
| 193 | .block_level = btree_block_level, | ||
| 194 | .free_block = __free_block, | 188 | .free_block = __free_block, |
| 195 | .write_alias = btree_write_alias, | 189 | .write_alias = btree_write_alias, |
| 196 | }; | 190 | }; |
| @@ -919,7 +913,7 @@ err: | |||
| 919 | for (i--; i >= 0; i--) | 913 | for (i--; i >= 0; i--) |
| 920 | free_area(super->s_area[i]); | 914 | free_area(super->s_area[i]); |
| 921 | free_area(super->s_journal_area); | 915 | free_area(super->s_journal_area); |
| 922 | mempool_destroy(super->s_alias_pool); | 916 | logfs_mempool_destroy(super->s_alias_pool); |
| 923 | return -ENOMEM; | 917 | return -ENOMEM; |
| 924 | } | 918 | } |
| 925 | 919 | ||
diff --git a/fs/logfs/super.c b/fs/logfs/super.c index b60bfac3263c..5866ee6e1327 100644 --- a/fs/logfs/super.c +++ b/fs/logfs/super.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include "logfs.h" | 12 | #include "logfs.h" |
| 13 | #include <linux/bio.h> | 13 | #include <linux/bio.h> |
| 14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
| 15 | #include <linux/blkdev.h> | ||
| 15 | #include <linux/mtd/mtd.h> | 16 | #include <linux/mtd/mtd.h> |
| 16 | #include <linux/statfs.h> | 17 | #include <linux/statfs.h> |
| 17 | #include <linux/buffer_head.h> | 18 | #include <linux/buffer_head.h> |
| @@ -137,6 +138,10 @@ static int logfs_sb_set(struct super_block *sb, void *_super) | |||
| 137 | sb->s_fs_info = super; | 138 | sb->s_fs_info = super; |
| 138 | sb->s_mtd = super->s_mtd; | 139 | sb->s_mtd = super->s_mtd; |
| 139 | sb->s_bdev = super->s_bdev; | 140 | sb->s_bdev = super->s_bdev; |
| 141 | if (sb->s_bdev) | ||
| 142 | sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info; | ||
| 143 | if (sb->s_mtd) | ||
| 144 | sb->s_bdi = sb->s_mtd->backing_dev_info; | ||
| 140 | return 0; | 145 | return 0; |
| 141 | } | 146 | } |
| 142 | 147 | ||
| @@ -452,6 +457,8 @@ static int logfs_read_sb(struct super_block *sb, int read_only) | |||
| 452 | 457 | ||
| 453 | btree_init_mempool64(&super->s_shadow_tree.new, super->s_btree_pool); | 458 | btree_init_mempool64(&super->s_shadow_tree.new, super->s_btree_pool); |
| 454 | btree_init_mempool64(&super->s_shadow_tree.old, super->s_btree_pool); | 459 | btree_init_mempool64(&super->s_shadow_tree.old, super->s_btree_pool); |
| 460 | btree_init_mempool32(&super->s_shadow_tree.segment_map, | ||
| 461 | super->s_btree_pool); | ||
| 455 | 462 | ||
| 456 | ret = logfs_init_mapping(sb); | 463 | ret = logfs_init_mapping(sb); |
| 457 | if (ret) | 464 | if (ret) |
| @@ -516,8 +523,8 @@ static void logfs_kill_sb(struct super_block *sb) | |||
| 516 | if (super->s_erase_page) | 523 | if (super->s_erase_page) |
| 517 | __free_page(super->s_erase_page); | 524 | __free_page(super->s_erase_page); |
| 518 | super->s_devops->put_device(sb); | 525 | super->s_devops->put_device(sb); |
| 519 | mempool_destroy(super->s_btree_pool); | 526 | logfs_mempool_destroy(super->s_btree_pool); |
| 520 | mempool_destroy(super->s_alias_pool); | 527 | logfs_mempool_destroy(super->s_alias_pool); |
| 521 | kfree(super); | 528 | kfree(super); |
| 522 | log_super("LogFS: Finished unmounting\n"); | 529 | log_super("LogFS: Finished unmounting\n"); |
| 523 | } | 530 | } |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a3fd0f91d943..169d07758ee5 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -54,7 +54,7 @@ extern struct kmem_cache *kvm_vcpu_cache; | |||
| 54 | */ | 54 | */ |
| 55 | struct kvm_io_bus { | 55 | struct kvm_io_bus { |
| 56 | int dev_count; | 56 | int dev_count; |
| 57 | #define NR_IOBUS_DEVS 6 | 57 | #define NR_IOBUS_DEVS 200 |
| 58 | struct kvm_io_device *devs[NR_IOBUS_DEVS]; | 58 | struct kvm_io_device *devs[NR_IOBUS_DEVS]; |
| 59 | }; | 59 | }; |
| 60 | 60 | ||
| @@ -119,6 +119,11 @@ struct kvm_memory_slot { | |||
| 119 | int user_alloc; | 119 | int user_alloc; |
| 120 | }; | 120 | }; |
| 121 | 121 | ||
| 122 | static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) | ||
| 123 | { | ||
| 124 | return ALIGN(memslot->npages, BITS_PER_LONG) / 8; | ||
| 125 | } | ||
| 126 | |||
| 122 | struct kvm_kernel_irq_routing_entry { | 127 | struct kvm_kernel_irq_routing_entry { |
| 123 | u32 gsi; | 128 | u32 gsi; |
| 124 | u32 type; | 129 | u32 type; |
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 28c9fd020d39..ebd747265294 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h | |||
| @@ -183,9 +183,13 @@ static inline struct regulator *__must_check regulator_get(struct device *dev, | |||
| 183 | { | 183 | { |
| 184 | /* Nothing except the stubbed out regulator API should be | 184 | /* Nothing except the stubbed out regulator API should be |
| 185 | * looking at the value except to check if it is an error | 185 | * looking at the value except to check if it is an error |
| 186 | * value so the actual return value doesn't matter. | 186 | * value. Drivers are free to handle NULL specifically by |
| 187 | * skipping all regulator API calls, but they don't have to. | ||
| 188 | * Drivers which don't, should make sure they properly handle | ||
| 189 | * corner cases of the API, such as regulator_get_voltage() | ||
| 190 | * returning 0. | ||
| 187 | */ | 191 | */ |
| 188 | return (struct regulator *)id; | 192 | return NULL; |
| 189 | } | 193 | } |
| 190 | static inline void regulator_put(struct regulator *regulator) | 194 | static inline void regulator_put(struct regulator *regulator) |
| 191 | { | 195 | { |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5a0cd194dce0..c82ae2492634 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -341,7 +341,11 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, | |||
| 341 | struct mm_struct *mm) | 341 | struct mm_struct *mm) |
| 342 | { | 342 | { |
| 343 | struct kvm *kvm = mmu_notifier_to_kvm(mn); | 343 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 344 | int idx; | ||
| 345 | |||
| 346 | idx = srcu_read_lock(&kvm->srcu); | ||
| 344 | kvm_arch_flush_shadow(kvm); | 347 | kvm_arch_flush_shadow(kvm); |
| 348 | srcu_read_unlock(&kvm->srcu, idx); | ||
| 345 | } | 349 | } |
| 346 | 350 | ||
| 347 | static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { | 351 | static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { |
| @@ -648,7 +652,7 @@ skip_lpage: | |||
| 648 | 652 | ||
| 649 | /* Allocate page dirty bitmap if needed */ | 653 | /* Allocate page dirty bitmap if needed */ |
| 650 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { | 654 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { |
| 651 | unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; | 655 | unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new); |
| 652 | 656 | ||
| 653 | new.dirty_bitmap = vmalloc(dirty_bytes); | 657 | new.dirty_bitmap = vmalloc(dirty_bytes); |
| 654 | if (!new.dirty_bitmap) | 658 | if (!new.dirty_bitmap) |
| @@ -768,7 +772,7 @@ int kvm_get_dirty_log(struct kvm *kvm, | |||
| 768 | { | 772 | { |
| 769 | struct kvm_memory_slot *memslot; | 773 | struct kvm_memory_slot *memslot; |
| 770 | int r, i; | 774 | int r, i; |
| 771 | int n; | 775 | unsigned long n; |
| 772 | unsigned long any = 0; | 776 | unsigned long any = 0; |
| 773 | 777 | ||
| 774 | r = -EINVAL; | 778 | r = -EINVAL; |
| @@ -780,7 +784,7 @@ int kvm_get_dirty_log(struct kvm *kvm, | |||
| 780 | if (!memslot->dirty_bitmap) | 784 | if (!memslot->dirty_bitmap) |
| 781 | goto out; | 785 | goto out; |
| 782 | 786 | ||
| 783 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 787 | n = kvm_dirty_bitmap_bytes(memslot); |
| 784 | 788 | ||
| 785 | for (i = 0; !any && i < n/sizeof(long); ++i) | 789 | for (i = 0; !any && i < n/sizeof(long); ++i) |
| 786 | any = memslot->dirty_bitmap[i]; | 790 | any = memslot->dirty_bitmap[i]; |
| @@ -1186,10 +1190,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) | |||
| 1186 | memslot = gfn_to_memslot_unaliased(kvm, gfn); | 1190 | memslot = gfn_to_memslot_unaliased(kvm, gfn); |
| 1187 | if (memslot && memslot->dirty_bitmap) { | 1191 | if (memslot && memslot->dirty_bitmap) { |
| 1188 | unsigned long rel_gfn = gfn - memslot->base_gfn; | 1192 | unsigned long rel_gfn = gfn - memslot->base_gfn; |
| 1193 | unsigned long *p = memslot->dirty_bitmap + | ||
| 1194 | rel_gfn / BITS_PER_LONG; | ||
| 1195 | int offset = rel_gfn % BITS_PER_LONG; | ||
| 1189 | 1196 | ||
| 1190 | /* avoid RMW */ | 1197 | /* avoid RMW */ |
| 1191 | if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) | 1198 | if (!generic_test_le_bit(offset, p)) |
| 1192 | generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); | 1199 | generic___set_le_bit(offset, p); |
| 1193 | } | 1200 | } |
| 1194 | } | 1201 | } |
| 1195 | 1202 | ||
