diff options
146 files changed, 2195 insertions, 588 deletions
diff --git a/Documentation/HOWTO b/Documentation/HOWTO index f5395af88a41..40ada93b820a 100644 --- a/Documentation/HOWTO +++ b/Documentation/HOWTO | |||
| @@ -234,7 +234,7 @@ process is as follows: | |||
| 234 | Linus, usually the patches that have already been included in the | 234 | Linus, usually the patches that have already been included in the |
| 235 | -next kernel for a few weeks. The preferred way to submit big changes | 235 | -next kernel for a few weeks. The preferred way to submit big changes |
| 236 | is using git (the kernel's source management tool, more information | 236 | is using git (the kernel's source management tool, more information |
| 237 | can be found at http://git.or.cz/) but plain patches are also just | 237 | can be found at http://git-scm.com/) but plain patches are also just |
| 238 | fine. | 238 | fine. |
| 239 | - After two weeks a -rc1 kernel is released it is now possible to push | 239 | - After two weeks a -rc1 kernel is released it is now possible to push |
| 240 | only patches that do not include new features that could affect the | 240 | only patches that do not include new features that could affect the |
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt index fd588ff0e296..a1ca5924faff 100644 --- a/Documentation/cgroups/cgroups.txt +++ b/Documentation/cgroups/cgroups.txt | |||
| @@ -235,8 +235,7 @@ containing the following files describing that cgroup: | |||
| 235 | - cgroup.procs: list of tgids in the cgroup. This list is not | 235 | - cgroup.procs: list of tgids in the cgroup. This list is not |
| 236 | guaranteed to be sorted or free of duplicate tgids, and userspace | 236 | guaranteed to be sorted or free of duplicate tgids, and userspace |
| 237 | should sort/uniquify the list if this property is required. | 237 | should sort/uniquify the list if this property is required. |
| 238 | Writing a tgid into this file moves all threads with that tgid into | 238 | This is a read-only file, for now. |
| 239 | this cgroup. | ||
| 240 | - notify_on_release flag: run the release agent on exit? | 239 | - notify_on_release flag: run the release agent on exit? |
| 241 | - release_agent: the path to use for release notifications (this file | 240 | - release_agent: the path to use for release notifications (this file |
| 242 | exists in the top cgroup only) | 241 | exists in the top cgroup only) |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index e2202e93b148..839b21b0699a 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -1194,7 +1194,7 @@ and is between 256 and 4096 characters. It is defined in the file | |||
| 1194 | 1194 | ||
| 1195 | libata.force= [LIBATA] Force configurations. The format is comma | 1195 | libata.force= [LIBATA] Force configurations. The format is comma |
| 1196 | separated list of "[ID:]VAL" where ID is | 1196 | separated list of "[ID:]VAL" where ID is |
| 1197 | PORT[:DEVICE]. PORT and DEVICE are decimal numbers | 1197 | PORT[.DEVICE]. PORT and DEVICE are decimal numbers |
| 1198 | matching port, link or device. Basically, it matches | 1198 | matching port, link or device. Basically, it matches |
| 1199 | the ATA ID string printed on console by libata. If | 1199 | the ATA ID string printed on console by libata. If |
| 1200 | the whole ID part is omitted, the last PORT and DEVICE | 1200 | the whole ID part is omitted, the last PORT and DEVICE |
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt index 5effa5bd993b..e213f45cf9d7 100644 --- a/Documentation/stable_kernel_rules.txt +++ b/Documentation/stable_kernel_rules.txt | |||
| @@ -18,16 +18,15 @@ Rules on what kind of patches are accepted, and which ones are not, into the | |||
| 18 | - It cannot contain any "trivial" fixes in it (spelling changes, | 18 | - It cannot contain any "trivial" fixes in it (spelling changes, |
| 19 | whitespace cleanups, etc). | 19 | whitespace cleanups, etc). |
| 20 | - It must follow the Documentation/SubmittingPatches rules. | 20 | - It must follow the Documentation/SubmittingPatches rules. |
| 21 | - It or an equivalent fix must already exist in Linus' tree. Quote the | 21 | - It or an equivalent fix must already exist in Linus' tree (upstream). |
| 22 | respective commit ID in Linus' tree in your patch submission to -stable. | ||
| 23 | 22 | ||
| 24 | 23 | ||
| 25 | Procedure for submitting patches to the -stable tree: | 24 | Procedure for submitting patches to the -stable tree: |
| 26 | 25 | ||
| 27 | - Send the patch, after verifying that it follows the above rules, to | 26 | - Send the patch, after verifying that it follows the above rules, to |
| 28 | stable@kernel.org. | 27 | stable@kernel.org. You must note the upstream commit ID in the changelog |
| 29 | - To have the patch automatically included in the stable tree, add the | 28 | of your submission. |
| 30 | the tag | 29 | - To have the patch automatically included in the stable tree, add the tag |
| 31 | Cc: stable@kernel.org | 30 | Cc: stable@kernel.org |
| 32 | in the sign-off area. Once the patch is merged it will be applied to | 31 | in the sign-off area. Once the patch is merged it will be applied to |
| 33 | the stable tree without anything else needing to be done by the author | 32 | the stable tree without anything else needing to be done by the author |
diff --git a/MAINTAINERS b/MAINTAINERS index a0e3c3a47a51..183887518fe3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1960,7 +1960,7 @@ F: lib/kobj* | |||
| 1960 | 1960 | ||
| 1961 | DRM DRIVERS | 1961 | DRM DRIVERS |
| 1962 | M: David Airlie <airlied@linux.ie> | 1962 | M: David Airlie <airlied@linux.ie> |
| 1963 | L: dri-devel@lists.sourceforge.net | 1963 | L: dri-devel@lists.freedesktop.org |
| 1964 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git | 1964 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git |
| 1965 | S: Maintained | 1965 | S: Maintained |
| 1966 | F: drivers/gpu/drm/ | 1966 | F: drivers/gpu/drm/ |
| @@ -4791,12 +4791,11 @@ F: drivers/s390/crypto/ | |||
| 4791 | 4791 | ||
| 4792 | S390 ZFCP DRIVER | 4792 | S390 ZFCP DRIVER |
| 4793 | M: Christof Schmitt <christof.schmitt@de.ibm.com> | 4793 | M: Christof Schmitt <christof.schmitt@de.ibm.com> |
| 4794 | M: Martin Peschke <mp3@de.ibm.com> | 4794 | M: Swen Schillig <swen@vnet.ibm.com> |
| 4795 | M: linux390@de.ibm.com | 4795 | M: linux390@de.ibm.com |
| 4796 | L: linux-s390@vger.kernel.org | 4796 | L: linux-s390@vger.kernel.org |
| 4797 | W: http://www.ibm.com/developerworks/linux/linux390/ | 4797 | W: http://www.ibm.com/developerworks/linux/linux390/ |
| 4798 | S: Supported | 4798 | S: Supported |
| 4799 | F: Documentation/s390/zfcpdump.txt | ||
| 4800 | F: drivers/s390/scsi/zfcp_* | 4799 | F: drivers/s390/scsi/zfcp_* |
| 4801 | 4800 | ||
| 4802 | S390 IUCV NETWORK LAYER | 4801 | S390 IUCV NETWORK LAYER |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 73c5c2b05f64..7f3c0a2e60cd 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
| @@ -1802,7 +1802,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | |||
| 1802 | { | 1802 | { |
| 1803 | struct kvm_memory_slot *memslot; | 1803 | struct kvm_memory_slot *memslot; |
| 1804 | int r, i; | 1804 | int r, i; |
| 1805 | long n, base; | 1805 | long base; |
| 1806 | unsigned long n; | ||
| 1806 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + | 1807 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + |
| 1807 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); | 1808 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); |
| 1808 | 1809 | ||
| @@ -1815,7 +1816,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | |||
| 1815 | if (!memslot->dirty_bitmap) | 1816 | if (!memslot->dirty_bitmap) |
| 1816 | goto out; | 1817 | goto out; |
| 1817 | 1818 | ||
| 1818 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 1819 | n = kvm_dirty_bitmap_bytes(memslot); |
| 1819 | base = memslot->base_gfn / BITS_PER_LONG; | 1820 | base = memslot->base_gfn / BITS_PER_LONG; |
| 1820 | 1821 | ||
| 1821 | for (i = 0; i < n/sizeof(long); ++i) { | 1822 | for (i = 0; i < n/sizeof(long); ++i) { |
| @@ -1831,7 +1832,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
| 1831 | struct kvm_dirty_log *log) | 1832 | struct kvm_dirty_log *log) |
| 1832 | { | 1833 | { |
| 1833 | int r; | 1834 | int r; |
| 1834 | int n; | 1835 | unsigned long n; |
| 1835 | struct kvm_memory_slot *memslot; | 1836 | struct kvm_memory_slot *memslot; |
| 1836 | int is_dirty = 0; | 1837 | int is_dirty = 0; |
| 1837 | 1838 | ||
| @@ -1850,7 +1851,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
| 1850 | if (is_dirty) { | 1851 | if (is_dirty) { |
| 1851 | kvm_flush_remote_tlbs(kvm); | 1852 | kvm_flush_remote_tlbs(kvm); |
| 1852 | memslot = &kvm->memslots->memslots[log->slot]; | 1853 | memslot = &kvm->memslots->memslots[log->slot]; |
| 1853 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 1854 | n = kvm_dirty_bitmap_bytes(memslot); |
| 1854 | memset(memslot->dirty_bitmap, 0, n); | 1855 | memset(memslot->dirty_bitmap, 0, n); |
| 1855 | } | 1856 | } |
| 1856 | r = 0; | 1857 | r = 0; |
diff --git a/arch/m68k/include/asm/mcfuart.h b/arch/m68k/include/asm/mcfuart.h index ef2293873612..01a8716c5fc5 100644 --- a/arch/m68k/include/asm/mcfuart.h +++ b/arch/m68k/include/asm/mcfuart.h | |||
| @@ -212,5 +212,10 @@ struct mcf_platform_uart { | |||
| 212 | #define MCFUART_URF_RXS 0xc0 /* Receiver status */ | 212 | #define MCFUART_URF_RXS 0xc0 /* Receiver status */ |
| 213 | #endif | 213 | #endif |
| 214 | 214 | ||
| 215 | #if defined(CONFIG_M5272) | ||
| 216 | #define MCFUART_TXFIFOSIZE 25 | ||
| 217 | #else | ||
| 218 | #define MCFUART_TXFIFOSIZE 1 | ||
| 219 | #endif | ||
| 215 | /****************************************************************************/ | 220 | /****************************************************************************/ |
| 216 | #endif /* mcfuart_h */ | 221 | #endif /* mcfuart_h */ |
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile index ce404bc9ccbd..14042574ac21 100644 --- a/arch/m68knommu/Makefile +++ b/arch/m68knommu/Makefile | |||
| @@ -94,7 +94,7 @@ cflags-$(CONFIG_M520x) := $(call cc-option,-mcpu=5208,-m5200) | |||
| 94 | cflags-$(CONFIG_M523x) := $(call cc-option,-mcpu=523x,-m5307) | 94 | cflags-$(CONFIG_M523x) := $(call cc-option,-mcpu=523x,-m5307) |
| 95 | cflags-$(CONFIG_M5249) := $(call cc-option,-mcpu=5249,-m5200) | 95 | cflags-$(CONFIG_M5249) := $(call cc-option,-mcpu=5249,-m5200) |
| 96 | cflags-$(CONFIG_M5271) := $(call cc-option,-mcpu=5271,-m5307) | 96 | cflags-$(CONFIG_M5271) := $(call cc-option,-mcpu=5271,-m5307) |
| 97 | cflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5271,-m5200) | 97 | cflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5272,-m5307) |
| 98 | cflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307) | 98 | cflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307) |
| 99 | cflags-$(CONFIG_M528x) := $(call cc-option,-m528x,-m5307) | 99 | cflags-$(CONFIG_M528x) := $(call cc-option,-m528x,-m5307) |
| 100 | cflags-$(CONFIG_M5307) := $(call cc-option,-m5307,-m5200) | 100 | cflags-$(CONFIG_M5307) := $(call cc-option,-m5307,-m5200) |
diff --git a/arch/m68knommu/kernel/entry.S b/arch/m68knommu/kernel/entry.S index 56043ade3941..aff6f57ef8b5 100644 --- a/arch/m68knommu/kernel/entry.S +++ b/arch/m68knommu/kernel/entry.S | |||
| @@ -145,6 +145,6 @@ ENTRY(ret_from_user_signal) | |||
| 145 | trap #0 | 145 | trap #0 |
| 146 | 146 | ||
| 147 | ENTRY(ret_from_user_rt_signal) | 147 | ENTRY(ret_from_user_rt_signal) |
| 148 | move #__NR_rt_sigreturn,%d0 | 148 | movel #__NR_rt_sigreturn,%d0 |
| 149 | trap #0 | 149 | trap #0 |
| 150 | 150 | ||
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c index 1143f77caca4..6f22970d8c20 100644 --- a/arch/m68knommu/platform/68360/ints.c +++ b/arch/m68knommu/platform/68360/ints.c | |||
| @@ -107,7 +107,6 @@ void init_IRQ(void) | |||
| 107 | _ramvec[vba+CPMVEC_PIO_PC7] = inthandler; /* pio - pc7 */ | 107 | _ramvec[vba+CPMVEC_PIO_PC7] = inthandler; /* pio - pc7 */ |
| 108 | _ramvec[vba+CPMVEC_PIO_PC6] = inthandler; /* pio - pc6 */ | 108 | _ramvec[vba+CPMVEC_PIO_PC6] = inthandler; /* pio - pc6 */ |
| 109 | _ramvec[vba+CPMVEC_TIMER3] = inthandler; /* timer 3 */ | 109 | _ramvec[vba+CPMVEC_TIMER3] = inthandler; /* timer 3 */ |
| 110 | _ramvec[vba+CPMVEC_RISCTIMER] = inthandler; /* reserved */ | ||
| 111 | _ramvec[vba+CPMVEC_PIO_PC5] = inthandler; /* pio - pc5 */ | 110 | _ramvec[vba+CPMVEC_PIO_PC5] = inthandler; /* pio - pc5 */ |
| 112 | _ramvec[vba+CPMVEC_PIO_PC4] = inthandler; /* pio - pc4 */ | 111 | _ramvec[vba+CPMVEC_PIO_PC4] = inthandler; /* pio - pc4 */ |
| 113 | _ramvec[vba+CPMVEC_RESERVED2] = inthandler; /* reserved */ | 112 | _ramvec[vba+CPMVEC_RESERVED2] = inthandler; /* reserved */ |
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 25da07fd9f77..604af29b71ed 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
| @@ -1004,7 +1004,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
| 1004 | struct kvm_vcpu *vcpu; | 1004 | struct kvm_vcpu *vcpu; |
| 1005 | ulong ga, ga_end; | 1005 | ulong ga, ga_end; |
| 1006 | int is_dirty = 0; | 1006 | int is_dirty = 0; |
| 1007 | int r, n; | 1007 | int r; |
| 1008 | unsigned long n; | ||
| 1008 | 1009 | ||
| 1009 | mutex_lock(&kvm->slots_lock); | 1010 | mutex_lock(&kvm->slots_lock); |
| 1010 | 1011 | ||
| @@ -1022,7 +1023,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
| 1022 | kvm_for_each_vcpu(n, vcpu, kvm) | 1023 | kvm_for_each_vcpu(n, vcpu, kvm) |
| 1023 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | 1024 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); |
| 1024 | 1025 | ||
| 1025 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 1026 | n = kvm_dirty_bitmap_bytes(memslot); |
| 1026 | memset(memslot->dirty_bitmap, 0, n); | 1027 | memset(memslot->dirty_bitmap, 0, n); |
| 1027 | } | 1028 | } |
| 1028 | 1029 | ||
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h index 4a76d9480cce..533f35751aeb 100644 --- a/arch/s390/include/asm/vdso.h +++ b/arch/s390/include/asm/vdso.h | |||
| @@ -29,6 +29,7 @@ struct vdso_data { | |||
| 29 | __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ | 29 | __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ |
| 30 | __u32 tz_dsttime; /* Type of dst correction 0x34 */ | 30 | __u32 tz_dsttime; /* Type of dst correction 0x34 */ |
| 31 | __u32 ectg_available; | 31 | __u32 ectg_available; |
| 32 | __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */ | ||
| 32 | }; | 33 | }; |
| 33 | 34 | ||
| 34 | struct vdso_per_cpu_data { | 35 | struct vdso_per_cpu_data { |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 08db736dded0..a09408952ed0 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
| @@ -61,6 +61,7 @@ int main(void) | |||
| 61 | DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); | 61 | DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); |
| 62 | DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); | 62 | DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); |
| 63 | DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); | 63 | DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); |
| 64 | DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult)); | ||
| 64 | DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); | 65 | DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); |
| 65 | DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); | 66 | DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); |
| 66 | /* constants used by the vdso */ | 67 | /* constants used by the vdso */ |
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index b354427e03b7..c56d3f56d020 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S | |||
| @@ -256,6 +256,9 @@ restore_registers: | |||
| 256 | lghi %r2,0 | 256 | lghi %r2,0 |
| 257 | brasl %r14,arch_set_page_states | 257 | brasl %r14,arch_set_page_states |
| 258 | 258 | ||
| 259 | /* Reinitialize the channel subsystem */ | ||
| 260 | brasl %r14,channel_subsystem_reinit | ||
| 261 | |||
| 259 | /* Return 0 */ | 262 | /* Return 0 */ |
| 260 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) | 263 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) |
| 261 | lghi %r2,0 | 264 | lghi %r2,0 |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index fba6dec156bf..d906bf19c14a 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
| @@ -221,6 +221,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, | |||
| 221 | vdso_data->xtime_clock_nsec = wall_time->tv_nsec; | 221 | vdso_data->xtime_clock_nsec = wall_time->tv_nsec; |
| 222 | vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; | 222 | vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; |
| 223 | vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; | 223 | vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; |
| 224 | vdso_data->ntp_mult = mult; | ||
| 224 | smp_wmb(); | 225 | smp_wmb(); |
| 225 | ++vdso_data->tb_update_count; | 226 | ++vdso_data->tb_update_count; |
| 226 | } | 227 | } |
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S index 4a98909a8310..969643954273 100644 --- a/arch/s390/kernel/vdso32/clock_gettime.S +++ b/arch/s390/kernel/vdso32/clock_gettime.S | |||
| @@ -38,13 +38,13 @@ __kernel_clock_gettime: | |||
| 38 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) | 38 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) |
| 39 | brc 3,2f | 39 | brc 3,2f |
| 40 | ahi %r0,-1 | 40 | ahi %r0,-1 |
| 41 | 2: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ | 41 | 2: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ |
| 42 | lr %r2,%r0 | 42 | lr %r2,%r0 |
| 43 | lhi %r0,1000 | 43 | l %r0,__VDSO_NTP_MULT(%r5) |
| 44 | ltr %r1,%r1 | 44 | ltr %r1,%r1 |
| 45 | mr %r0,%r0 | 45 | mr %r0,%r0 |
| 46 | jnm 3f | 46 | jnm 3f |
| 47 | ahi %r0,1000 | 47 | a %r0,__VDSO_NTP_MULT(%r5) |
| 48 | 3: alr %r0,%r2 | 48 | 3: alr %r0,%r2 |
| 49 | srdl %r0,12 | 49 | srdl %r0,12 |
| 50 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | 50 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ |
| @@ -86,13 +86,13 @@ __kernel_clock_gettime: | |||
| 86 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) | 86 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) |
| 87 | brc 3,12f | 87 | brc 3,12f |
| 88 | ahi %r0,-1 | 88 | ahi %r0,-1 |
| 89 | 12: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ | 89 | 12: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ |
| 90 | lr %r2,%r0 | 90 | lr %r2,%r0 |
| 91 | lhi %r0,1000 | 91 | l %r0,__VDSO_NTP_MULT(%r5) |
| 92 | ltr %r1,%r1 | 92 | ltr %r1,%r1 |
| 93 | mr %r0,%r0 | 93 | mr %r0,%r0 |
| 94 | jnm 13f | 94 | jnm 13f |
| 95 | ahi %r0,1000 | 95 | a %r0,__VDSO_NTP_MULT(%r5) |
| 96 | 13: alr %r0,%r2 | 96 | 13: alr %r0,%r2 |
| 97 | srdl %r0,12 | 97 | srdl %r0,12 |
| 98 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | 98 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ |
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S index ad8acfc949fb..2d3633175e3b 100644 --- a/arch/s390/kernel/vdso32/gettimeofday.S +++ b/arch/s390/kernel/vdso32/gettimeofday.S | |||
| @@ -35,13 +35,13 @@ __kernel_gettimeofday: | |||
| 35 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) | 35 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) |
| 36 | brc 3,3f | 36 | brc 3,3f |
| 37 | ahi %r0,-1 | 37 | ahi %r0,-1 |
| 38 | 3: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ | 38 | 3: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ |
| 39 | st %r0,24(%r15) | 39 | st %r0,24(%r15) |
| 40 | lhi %r0,1000 | 40 | l %r0,__VDSO_NTP_MULT(%r5) |
| 41 | ltr %r1,%r1 | 41 | ltr %r1,%r1 |
| 42 | mr %r0,%r0 | 42 | mr %r0,%r0 |
| 43 | jnm 4f | 43 | jnm 4f |
| 44 | ahi %r0,1000 | 44 | a %r0,__VDSO_NTP_MULT(%r5) |
| 45 | 4: al %r0,24(%r15) | 45 | 4: al %r0,24(%r15) |
| 46 | srdl %r0,12 | 46 | srdl %r0,12 |
| 47 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | 47 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ |
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S index 49106c6e6f88..f40467884a03 100644 --- a/arch/s390/kernel/vdso64/clock_gettime.S +++ b/arch/s390/kernel/vdso64/clock_gettime.S | |||
| @@ -36,7 +36,7 @@ __kernel_clock_gettime: | |||
| 36 | stck 48(%r15) /* Store TOD clock */ | 36 | stck 48(%r15) /* Store TOD clock */ |
| 37 | lg %r1,48(%r15) | 37 | lg %r1,48(%r15) |
| 38 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | 38 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ |
| 39 | mghi %r1,1000 | 39 | msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ |
| 40 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ | 40 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ |
| 41 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | 41 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ |
| 42 | lg %r0,__VDSO_XTIME_SEC(%r5) | 42 | lg %r0,__VDSO_XTIME_SEC(%r5) |
| @@ -64,7 +64,7 @@ __kernel_clock_gettime: | |||
| 64 | stck 48(%r15) /* Store TOD clock */ | 64 | stck 48(%r15) /* Store TOD clock */ |
| 65 | lg %r1,48(%r15) | 65 | lg %r1,48(%r15) |
| 66 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | 66 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ |
| 67 | mghi %r1,1000 | 67 | msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ |
| 68 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ | 68 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ |
| 69 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | 69 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ |
| 70 | lg %r0,__VDSO_XTIME_SEC(%r5) | 70 | lg %r0,__VDSO_XTIME_SEC(%r5) |
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S index f873e75634e1..36ee674722ec 100644 --- a/arch/s390/kernel/vdso64/gettimeofday.S +++ b/arch/s390/kernel/vdso64/gettimeofday.S | |||
| @@ -31,7 +31,7 @@ __kernel_gettimeofday: | |||
| 31 | stck 48(%r15) /* Store TOD clock */ | 31 | stck 48(%r15) /* Store TOD clock */ |
| 32 | lg %r1,48(%r15) | 32 | lg %r1,48(%r15) |
| 33 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | 33 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ |
| 34 | mghi %r1,1000 | 34 | msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ |
| 35 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ | 35 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ |
| 36 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ | 36 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ |
| 37 | lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */ | 37 | lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */ |
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index 64cda95f59ca..7a656bd8bd3c 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include "linux/irqreturn.h" | 6 | #include "linux/irqreturn.h" |
| 7 | #include "linux/kd.h" | 7 | #include "linux/kd.h" |
| 8 | #include "linux/sched.h" | 8 | #include "linux/sched.h" |
| 9 | #include "linux/slab.h" | ||
| 9 | #include "chan_kern.h" | 10 | #include "chan_kern.h" |
| 10 | #include "irq_kern.h" | 11 | #include "irq_kern.h" |
| 11 | #include "irq_user.h" | 12 | #include "irq_user.h" |
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c index 06d6ccf0e444..b6b1096152aa 100644 --- a/arch/um/os-Linux/helper.c +++ b/arch/um/os-Linux/helper.c | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | #include <errno.h> | 8 | #include <errno.h> |
| 9 | #include <sched.h> | 9 | #include <sched.h> |
| 10 | #include <linux/limits.h> | 10 | #include <linux/limits.h> |
| 11 | #include <linux/slab.h> | ||
| 12 | #include <sys/socket.h> | 11 | #include <sys/socket.h> |
| 13 | #include <sys/wait.h> | 12 | #include <sys/wait.h> |
| 14 | #include "kern_constants.h" | 13 | #include "kern_constants.h" |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 1cbed97b59cf..dfdb4dba2320 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | */ | 22 | */ |
| 23 | 23 | ||
| 24 | #include <linux/dmi.h> | 24 | #include <linux/dmi.h> |
| 25 | #include <linux/module.h> | ||
| 25 | #include <asm/div64.h> | 26 | #include <asm/div64.h> |
| 26 | #include <asm/vmware.h> | 27 | #include <asm/vmware.h> |
| 27 | #include <asm/x86_init.h> | 28 | #include <asm/x86_init.h> |
| @@ -101,6 +102,7 @@ int vmware_platform(void) | |||
| 101 | 102 | ||
| 102 | return 0; | 103 | return 0; |
| 103 | } | 104 | } |
| 105 | EXPORT_SYMBOL(vmware_platform); | ||
| 104 | 106 | ||
| 105 | /* | 107 | /* |
| 106 | * VMware hypervisor takes care of exporting a reliable TSC to the guest. | 108 | * VMware hypervisor takes care of exporting a reliable TSC to the guest. |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 48aeee8eefb0..19a8906bcaa2 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -1490,8 +1490,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm, | |||
| 1490 | for_each_sp(pages, sp, parents, i) { | 1490 | for_each_sp(pages, sp, parents, i) { |
| 1491 | kvm_mmu_zap_page(kvm, sp); | 1491 | kvm_mmu_zap_page(kvm, sp); |
| 1492 | mmu_pages_clear_parents(&parents); | 1492 | mmu_pages_clear_parents(&parents); |
| 1493 | zapped++; | ||
| 1493 | } | 1494 | } |
| 1494 | zapped += pages.nr; | ||
| 1495 | kvm_mmu_pages_init(parent, &parents, &pages); | 1495 | kvm_mmu_pages_init(parent, &parents, &pages); |
| 1496 | } | 1496 | } |
| 1497 | 1497 | ||
| @@ -1542,14 +1542,16 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) | |||
| 1542 | */ | 1542 | */ |
| 1543 | 1543 | ||
| 1544 | if (used_pages > kvm_nr_mmu_pages) { | 1544 | if (used_pages > kvm_nr_mmu_pages) { |
| 1545 | while (used_pages > kvm_nr_mmu_pages) { | 1545 | while (used_pages > kvm_nr_mmu_pages && |
| 1546 | !list_empty(&kvm->arch.active_mmu_pages)) { | ||
| 1546 | struct kvm_mmu_page *page; | 1547 | struct kvm_mmu_page *page; |
| 1547 | 1548 | ||
| 1548 | page = container_of(kvm->arch.active_mmu_pages.prev, | 1549 | page = container_of(kvm->arch.active_mmu_pages.prev, |
| 1549 | struct kvm_mmu_page, link); | 1550 | struct kvm_mmu_page, link); |
| 1550 | kvm_mmu_zap_page(kvm, page); | 1551 | used_pages -= kvm_mmu_zap_page(kvm, page); |
| 1551 | used_pages--; | 1552 | used_pages--; |
| 1552 | } | 1553 | } |
| 1554 | kvm_nr_mmu_pages = used_pages; | ||
| 1553 | kvm->arch.n_free_mmu_pages = 0; | 1555 | kvm->arch.n_free_mmu_pages = 0; |
| 1554 | } | 1556 | } |
| 1555 | else | 1557 | else |
| @@ -1596,7 +1598,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) | |||
| 1596 | && !sp->role.invalid) { | 1598 | && !sp->role.invalid) { |
| 1597 | pgprintk("%s: zap %lx %x\n", | 1599 | pgprintk("%s: zap %lx %x\n", |
| 1598 | __func__, gfn, sp->role.word); | 1600 | __func__, gfn, sp->role.word); |
| 1599 | kvm_mmu_zap_page(kvm, sp); | 1601 | if (kvm_mmu_zap_page(kvm, sp)) |
| 1602 | nn = bucket->first; | ||
| 1600 | } | 1603 | } |
| 1601 | } | 1604 | } |
| 1602 | } | 1605 | } |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 445c59411ed0..2ba58206812a 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -706,29 +706,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | |||
| 706 | if (err) | 706 | if (err) |
| 707 | goto free_svm; | 707 | goto free_svm; |
| 708 | 708 | ||
| 709 | err = -ENOMEM; | ||
| 709 | page = alloc_page(GFP_KERNEL); | 710 | page = alloc_page(GFP_KERNEL); |
| 710 | if (!page) { | 711 | if (!page) |
| 711 | err = -ENOMEM; | ||
| 712 | goto uninit; | 712 | goto uninit; |
| 713 | } | ||
| 714 | 713 | ||
| 715 | err = -ENOMEM; | ||
| 716 | msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); | 714 | msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); |
| 717 | if (!msrpm_pages) | 715 | if (!msrpm_pages) |
| 718 | goto uninit; | 716 | goto free_page1; |
| 719 | 717 | ||
| 720 | nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); | 718 | nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); |
| 721 | if (!nested_msrpm_pages) | 719 | if (!nested_msrpm_pages) |
| 722 | goto uninit; | 720 | goto free_page2; |
| 723 | |||
| 724 | svm->msrpm = page_address(msrpm_pages); | ||
| 725 | svm_vcpu_init_msrpm(svm->msrpm); | ||
| 726 | 721 | ||
| 727 | hsave_page = alloc_page(GFP_KERNEL); | 722 | hsave_page = alloc_page(GFP_KERNEL); |
| 728 | if (!hsave_page) | 723 | if (!hsave_page) |
| 729 | goto uninit; | 724 | goto free_page3; |
| 725 | |||
| 730 | svm->nested.hsave = page_address(hsave_page); | 726 | svm->nested.hsave = page_address(hsave_page); |
| 731 | 727 | ||
| 728 | svm->msrpm = page_address(msrpm_pages); | ||
| 729 | svm_vcpu_init_msrpm(svm->msrpm); | ||
| 730 | |||
| 732 | svm->nested.msrpm = page_address(nested_msrpm_pages); | 731 | svm->nested.msrpm = page_address(nested_msrpm_pages); |
| 733 | 732 | ||
| 734 | svm->vmcb = page_address(page); | 733 | svm->vmcb = page_address(page); |
| @@ -744,6 +743,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | |||
| 744 | 743 | ||
| 745 | return &svm->vcpu; | 744 | return &svm->vcpu; |
| 746 | 745 | ||
| 746 | free_page3: | ||
| 747 | __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER); | ||
| 748 | free_page2: | ||
| 749 | __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER); | ||
| 750 | free_page1: | ||
| 751 | __free_page(page); | ||
| 747 | uninit: | 752 | uninit: |
| 748 | kvm_vcpu_uninit(&svm->vcpu); | 753 | kvm_vcpu_uninit(&svm->vcpu); |
| 749 | free_svm: | 754 | free_svm: |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 686492ed3079..bc933cfb4e66 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -77,6 +77,8 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO); | |||
| 77 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) | 77 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) |
| 78 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) | 78 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) |
| 79 | 79 | ||
| 80 | #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) | ||
| 81 | |||
| 80 | /* | 82 | /* |
| 81 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: | 83 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: |
| 82 | * ple_gap: upper bound on the amount of time between two successive | 84 | * ple_gap: upper bound on the amount of time between two successive |
| @@ -131,7 +133,7 @@ struct vcpu_vmx { | |||
| 131 | } host_state; | 133 | } host_state; |
| 132 | struct { | 134 | struct { |
| 133 | int vm86_active; | 135 | int vm86_active; |
| 134 | u8 save_iopl; | 136 | ulong save_rflags; |
| 135 | struct kvm_save_segment { | 137 | struct kvm_save_segment { |
| 136 | u16 selector; | 138 | u16 selector; |
| 137 | unsigned long base; | 139 | unsigned long base; |
| @@ -818,18 +820,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) | |||
| 818 | 820 | ||
| 819 | static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) | 821 | static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) |
| 820 | { | 822 | { |
| 821 | unsigned long rflags; | 823 | unsigned long rflags, save_rflags; |
| 822 | 824 | ||
| 823 | rflags = vmcs_readl(GUEST_RFLAGS); | 825 | rflags = vmcs_readl(GUEST_RFLAGS); |
| 824 | if (to_vmx(vcpu)->rmode.vm86_active) | 826 | if (to_vmx(vcpu)->rmode.vm86_active) { |
| 825 | rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM); | 827 | rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; |
| 828 | save_rflags = to_vmx(vcpu)->rmode.save_rflags; | ||
| 829 | rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; | ||
| 830 | } | ||
| 826 | return rflags; | 831 | return rflags; |
| 827 | } | 832 | } |
| 828 | 833 | ||
| 829 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | 834 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
| 830 | { | 835 | { |
| 831 | if (to_vmx(vcpu)->rmode.vm86_active) | 836 | if (to_vmx(vcpu)->rmode.vm86_active) { |
| 837 | to_vmx(vcpu)->rmode.save_rflags = rflags; | ||
| 832 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | 838 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; |
| 839 | } | ||
| 833 | vmcs_writel(GUEST_RFLAGS, rflags); | 840 | vmcs_writel(GUEST_RFLAGS, rflags); |
| 834 | } | 841 | } |
| 835 | 842 | ||
| @@ -1483,8 +1490,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu) | |||
| 1483 | vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); | 1490 | vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); |
| 1484 | 1491 | ||
| 1485 | flags = vmcs_readl(GUEST_RFLAGS); | 1492 | flags = vmcs_readl(GUEST_RFLAGS); |
| 1486 | flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); | 1493 | flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; |
| 1487 | flags |= (vmx->rmode.save_iopl << IOPL_SHIFT); | 1494 | flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; |
| 1488 | vmcs_writel(GUEST_RFLAGS, flags); | 1495 | vmcs_writel(GUEST_RFLAGS, flags); |
| 1489 | 1496 | ||
| 1490 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | | 1497 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | |
| @@ -1557,8 +1564,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu) | |||
| 1557 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); | 1564 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); |
| 1558 | 1565 | ||
| 1559 | flags = vmcs_readl(GUEST_RFLAGS); | 1566 | flags = vmcs_readl(GUEST_RFLAGS); |
| 1560 | vmx->rmode.save_iopl | 1567 | vmx->rmode.save_rflags = flags; |
| 1561 | = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | ||
| 1562 | 1568 | ||
| 1563 | flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | 1569 | flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; |
| 1564 | 1570 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 24cd0ee896e9..3c4ca98ad27f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -433,8 +433,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
| 433 | 433 | ||
| 434 | #ifdef CONFIG_X86_64 | 434 | #ifdef CONFIG_X86_64 |
| 435 | if (cr0 & 0xffffffff00000000UL) { | 435 | if (cr0 & 0xffffffff00000000UL) { |
| 436 | printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", | ||
| 437 | cr0, kvm_read_cr0(vcpu)); | ||
| 438 | kvm_inject_gp(vcpu, 0); | 436 | kvm_inject_gp(vcpu, 0); |
| 439 | return; | 437 | return; |
| 440 | } | 438 | } |
| @@ -443,14 +441,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
| 443 | cr0 &= ~CR0_RESERVED_BITS; | 441 | cr0 &= ~CR0_RESERVED_BITS; |
| 444 | 442 | ||
| 445 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { | 443 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { |
| 446 | printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); | ||
| 447 | kvm_inject_gp(vcpu, 0); | 444 | kvm_inject_gp(vcpu, 0); |
| 448 | return; | 445 | return; |
| 449 | } | 446 | } |
| 450 | 447 | ||
| 451 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { | 448 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { |
| 452 | printk(KERN_DEBUG "set_cr0: #GP, set PG flag " | ||
| 453 | "and a clear PE flag\n"); | ||
| 454 | kvm_inject_gp(vcpu, 0); | 449 | kvm_inject_gp(vcpu, 0); |
| 455 | return; | 450 | return; |
| 456 | } | 451 | } |
| @@ -461,15 +456,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
| 461 | int cs_db, cs_l; | 456 | int cs_db, cs_l; |
| 462 | 457 | ||
| 463 | if (!is_pae(vcpu)) { | 458 | if (!is_pae(vcpu)) { |
| 464 | printk(KERN_DEBUG "set_cr0: #GP, start paging " | ||
| 465 | "in long mode while PAE is disabled\n"); | ||
| 466 | kvm_inject_gp(vcpu, 0); | 459 | kvm_inject_gp(vcpu, 0); |
| 467 | return; | 460 | return; |
| 468 | } | 461 | } |
| 469 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 462 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); |
| 470 | if (cs_l) { | 463 | if (cs_l) { |
| 471 | printk(KERN_DEBUG "set_cr0: #GP, start paging " | ||
| 472 | "in long mode while CS.L == 1\n"); | ||
| 473 | kvm_inject_gp(vcpu, 0); | 464 | kvm_inject_gp(vcpu, 0); |
| 474 | return; | 465 | return; |
| 475 | 466 | ||
| @@ -477,8 +468,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
| 477 | } else | 468 | } else |
| 478 | #endif | 469 | #endif |
| 479 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { | 470 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { |
| 480 | printk(KERN_DEBUG "set_cr0: #GP, pdptrs " | ||
| 481 | "reserved bits\n"); | ||
| 482 | kvm_inject_gp(vcpu, 0); | 471 | kvm_inject_gp(vcpu, 0); |
| 483 | return; | 472 | return; |
| 484 | } | 473 | } |
| @@ -505,28 +494,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
| 505 | unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; | 494 | unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; |
| 506 | 495 | ||
| 507 | if (cr4 & CR4_RESERVED_BITS) { | 496 | if (cr4 & CR4_RESERVED_BITS) { |
| 508 | printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); | ||
| 509 | kvm_inject_gp(vcpu, 0); | 497 | kvm_inject_gp(vcpu, 0); |
| 510 | return; | 498 | return; |
| 511 | } | 499 | } |
| 512 | 500 | ||
| 513 | if (is_long_mode(vcpu)) { | 501 | if (is_long_mode(vcpu)) { |
| 514 | if (!(cr4 & X86_CR4_PAE)) { | 502 | if (!(cr4 & X86_CR4_PAE)) { |
| 515 | printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " | ||
| 516 | "in long mode\n"); | ||
| 517 | kvm_inject_gp(vcpu, 0); | 503 | kvm_inject_gp(vcpu, 0); |
| 518 | return; | 504 | return; |
| 519 | } | 505 | } |
| 520 | } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) | 506 | } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) |
| 521 | && ((cr4 ^ old_cr4) & pdptr_bits) | 507 | && ((cr4 ^ old_cr4) & pdptr_bits) |
| 522 | && !load_pdptrs(vcpu, vcpu->arch.cr3)) { | 508 | && !load_pdptrs(vcpu, vcpu->arch.cr3)) { |
| 523 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); | ||
| 524 | kvm_inject_gp(vcpu, 0); | 509 | kvm_inject_gp(vcpu, 0); |
| 525 | return; | 510 | return; |
| 526 | } | 511 | } |
| 527 | 512 | ||
| 528 | if (cr4 & X86_CR4_VMXE) { | 513 | if (cr4 & X86_CR4_VMXE) { |
| 529 | printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); | ||
| 530 | kvm_inject_gp(vcpu, 0); | 514 | kvm_inject_gp(vcpu, 0); |
| 531 | return; | 515 | return; |
| 532 | } | 516 | } |
| @@ -547,21 +531,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
| 547 | 531 | ||
| 548 | if (is_long_mode(vcpu)) { | 532 | if (is_long_mode(vcpu)) { |
| 549 | if (cr3 & CR3_L_MODE_RESERVED_BITS) { | 533 | if (cr3 & CR3_L_MODE_RESERVED_BITS) { |
| 550 | printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); | ||
| 551 | kvm_inject_gp(vcpu, 0); | 534 | kvm_inject_gp(vcpu, 0); |
| 552 | return; | 535 | return; |
| 553 | } | 536 | } |
| 554 | } else { | 537 | } else { |
| 555 | if (is_pae(vcpu)) { | 538 | if (is_pae(vcpu)) { |
| 556 | if (cr3 & CR3_PAE_RESERVED_BITS) { | 539 | if (cr3 & CR3_PAE_RESERVED_BITS) { |
| 557 | printk(KERN_DEBUG | ||
| 558 | "set_cr3: #GP, reserved bits\n"); | ||
| 559 | kvm_inject_gp(vcpu, 0); | 540 | kvm_inject_gp(vcpu, 0); |
| 560 | return; | 541 | return; |
| 561 | } | 542 | } |
| 562 | if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { | 543 | if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { |
| 563 | printk(KERN_DEBUG "set_cr3: #GP, pdptrs " | ||
| 564 | "reserved bits\n"); | ||
| 565 | kvm_inject_gp(vcpu, 0); | 544 | kvm_inject_gp(vcpu, 0); |
| 566 | return; | 545 | return; |
| 567 | } | 546 | } |
| @@ -593,7 +572,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3); | |||
| 593 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) | 572 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) |
| 594 | { | 573 | { |
| 595 | if (cr8 & CR8_RESERVED_BITS) { | 574 | if (cr8 & CR8_RESERVED_BITS) { |
| 596 | printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8); | ||
| 597 | kvm_inject_gp(vcpu, 0); | 575 | kvm_inject_gp(vcpu, 0); |
| 598 | return; | 576 | return; |
| 599 | } | 577 | } |
| @@ -649,15 +627,12 @@ static u32 emulated_msrs[] = { | |||
| 649 | static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | 627 | static void set_efer(struct kvm_vcpu *vcpu, u64 efer) |
| 650 | { | 628 | { |
| 651 | if (efer & efer_reserved_bits) { | 629 | if (efer & efer_reserved_bits) { |
| 652 | printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n", | ||
| 653 | efer); | ||
| 654 | kvm_inject_gp(vcpu, 0); | 630 | kvm_inject_gp(vcpu, 0); |
| 655 | return; | 631 | return; |
| 656 | } | 632 | } |
| 657 | 633 | ||
| 658 | if (is_paging(vcpu) | 634 | if (is_paging(vcpu) |
| 659 | && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { | 635 | && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { |
| 660 | printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n"); | ||
| 661 | kvm_inject_gp(vcpu, 0); | 636 | kvm_inject_gp(vcpu, 0); |
| 662 | return; | 637 | return; |
| 663 | } | 638 | } |
| @@ -667,7 +642,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
| 667 | 642 | ||
| 668 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | 643 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
| 669 | if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { | 644 | if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { |
| 670 | printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n"); | ||
| 671 | kvm_inject_gp(vcpu, 0); | 645 | kvm_inject_gp(vcpu, 0); |
| 672 | return; | 646 | return; |
| 673 | } | 647 | } |
| @@ -678,7 +652,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
| 678 | 652 | ||
| 679 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | 653 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
| 680 | if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { | 654 | if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { |
| 681 | printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n"); | ||
| 682 | kvm_inject_gp(vcpu, 0); | 655 | kvm_inject_gp(vcpu, 0); |
| 683 | return; | 656 | return; |
| 684 | } | 657 | } |
| @@ -967,9 +940,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
| 967 | if (msr >= MSR_IA32_MC0_CTL && | 940 | if (msr >= MSR_IA32_MC0_CTL && |
| 968 | msr < MSR_IA32_MC0_CTL + 4 * bank_num) { | 941 | msr < MSR_IA32_MC0_CTL + 4 * bank_num) { |
| 969 | u32 offset = msr - MSR_IA32_MC0_CTL; | 942 | u32 offset = msr - MSR_IA32_MC0_CTL; |
| 970 | /* only 0 or all 1s can be written to IA32_MCi_CTL */ | 943 | /* only 0 or all 1s can be written to IA32_MCi_CTL |
| 944 | * some Linux kernels though clear bit 10 in bank 4 to | ||
| 945 | * workaround a BIOS/GART TBL issue on AMD K8s, ignore | ||
| 946 | * this to avoid an uncatched #GP in the guest | ||
| 947 | */ | ||
| 971 | if ((offset & 0x3) == 0 && | 948 | if ((offset & 0x3) == 0 && |
| 972 | data != 0 && data != ~(u64)0) | 949 | data != 0 && (data | (1 << 10)) != ~(u64)0) |
| 973 | return -1; | 950 | return -1; |
| 974 | vcpu->arch.mce_banks[offset] = data; | 951 | vcpu->arch.mce_banks[offset] = data; |
| 975 | break; | 952 | break; |
| @@ -2635,8 +2612,9 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm, | |||
| 2635 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | 2612 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
| 2636 | struct kvm_dirty_log *log) | 2613 | struct kvm_dirty_log *log) |
| 2637 | { | 2614 | { |
| 2638 | int r, n, i; | 2615 | int r, i; |
| 2639 | struct kvm_memory_slot *memslot; | 2616 | struct kvm_memory_slot *memslot; |
| 2617 | unsigned long n; | ||
| 2640 | unsigned long is_dirty = 0; | 2618 | unsigned long is_dirty = 0; |
| 2641 | unsigned long *dirty_bitmap = NULL; | 2619 | unsigned long *dirty_bitmap = NULL; |
| 2642 | 2620 | ||
| @@ -2651,7 +2629,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
| 2651 | if (!memslot->dirty_bitmap) | 2629 | if (!memslot->dirty_bitmap) |
| 2652 | goto out; | 2630 | goto out; |
| 2653 | 2631 | ||
| 2654 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 2632 | n = kvm_dirty_bitmap_bytes(memslot); |
| 2655 | 2633 | ||
| 2656 | r = -ENOMEM; | 2634 | r = -ENOMEM; |
| 2657 | dirty_bitmap = vmalloc(n); | 2635 | dirty_bitmap = vmalloc(n); |
| @@ -4483,7 +4461,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
| 4483 | kvm_set_cr8(vcpu, kvm_run->cr8); | 4461 | kvm_set_cr8(vcpu, kvm_run->cr8); |
| 4484 | 4462 | ||
| 4485 | if (vcpu->arch.pio.cur_count) { | 4463 | if (vcpu->arch.pio.cur_count) { |
| 4464 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
| 4486 | r = complete_pio(vcpu); | 4465 | r = complete_pio(vcpu); |
| 4466 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | ||
| 4487 | if (r) | 4467 | if (r) |
| 4488 | goto out; | 4468 | goto out; |
| 4489 | } | 4469 | } |
| @@ -5146,6 +5126,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
| 5146 | int ret = 0; | 5126 | int ret = 0; |
| 5147 | u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); | 5127 | u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); |
| 5148 | u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); | 5128 | u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); |
| 5129 | u32 desc_limit; | ||
| 5149 | 5130 | ||
| 5150 | old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL); | 5131 | old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL); |
| 5151 | 5132 | ||
| @@ -5168,7 +5149,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
| 5168 | } | 5149 | } |
| 5169 | } | 5150 | } |
| 5170 | 5151 | ||
| 5171 | if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) { | 5152 | desc_limit = get_desc_limit(&nseg_desc); |
| 5153 | if (!nseg_desc.p || | ||
| 5154 | ((desc_limit < 0x67 && (nseg_desc.type & 8)) || | ||
| 5155 | desc_limit < 0x2b)) { | ||
| 5172 | kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc); | 5156 | kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc); |
| 5173 | return 1; | 5157 | return 1; |
| 5174 | } | 5158 | } |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index c7b1ebfb7da7..44f83ce02470 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
| @@ -66,14 +66,44 @@ resource_to_addr(struct acpi_resource *resource, | |||
| 66 | struct acpi_resource_address64 *addr) | 66 | struct acpi_resource_address64 *addr) |
| 67 | { | 67 | { |
| 68 | acpi_status status; | 68 | acpi_status status; |
| 69 | 69 | struct acpi_resource_memory24 *memory24; | |
| 70 | status = acpi_resource_to_address64(resource, addr); | 70 | struct acpi_resource_memory32 *memory32; |
| 71 | if (ACPI_SUCCESS(status) && | 71 | struct acpi_resource_fixed_memory32 *fixed_memory32; |
| 72 | (addr->resource_type == ACPI_MEMORY_RANGE || | 72 | |
| 73 | addr->resource_type == ACPI_IO_RANGE) && | 73 | memset(addr, 0, sizeof(*addr)); |
| 74 | addr->address_length > 0 && | 74 | switch (resource->type) { |
| 75 | addr->producer_consumer == ACPI_PRODUCER) { | 75 | case ACPI_RESOURCE_TYPE_MEMORY24: |
| 76 | memory24 = &resource->data.memory24; | ||
| 77 | addr->resource_type = ACPI_MEMORY_RANGE; | ||
| 78 | addr->minimum = memory24->minimum; | ||
| 79 | addr->address_length = memory24->address_length; | ||
| 80 | addr->maximum = addr->minimum + addr->address_length - 1; | ||
| 81 | return AE_OK; | ||
| 82 | case ACPI_RESOURCE_TYPE_MEMORY32: | ||
| 83 | memory32 = &resource->data.memory32; | ||
| 84 | addr->resource_type = ACPI_MEMORY_RANGE; | ||
| 85 | addr->minimum = memory32->minimum; | ||
| 86 | addr->address_length = memory32->address_length; | ||
| 87 | addr->maximum = addr->minimum + addr->address_length - 1; | ||
| 76 | return AE_OK; | 88 | return AE_OK; |
| 89 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: | ||
| 90 | fixed_memory32 = &resource->data.fixed_memory32; | ||
| 91 | addr->resource_type = ACPI_MEMORY_RANGE; | ||
| 92 | addr->minimum = fixed_memory32->address; | ||
| 93 | addr->address_length = fixed_memory32->address_length; | ||
| 94 | addr->maximum = addr->minimum + addr->address_length - 1; | ||
| 95 | return AE_OK; | ||
| 96 | case ACPI_RESOURCE_TYPE_ADDRESS16: | ||
| 97 | case ACPI_RESOURCE_TYPE_ADDRESS32: | ||
| 98 | case ACPI_RESOURCE_TYPE_ADDRESS64: | ||
| 99 | status = acpi_resource_to_address64(resource, addr); | ||
| 100 | if (ACPI_SUCCESS(status) && | ||
| 101 | (addr->resource_type == ACPI_MEMORY_RANGE || | ||
| 102 | addr->resource_type == ACPI_IO_RANGE) && | ||
| 103 | addr->address_length > 0) { | ||
| 104 | return AE_OK; | ||
| 105 | } | ||
| 106 | break; | ||
| 77 | } | 107 | } |
| 78 | return AE_ERROR; | 108 | return AE_ERROR; |
| 79 | } | 109 | } |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 9f6cfac0f2cc..228740f356c9 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
| @@ -879,6 +879,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) | |||
| 879 | void ata_qc_schedule_eh(struct ata_queued_cmd *qc) | 879 | void ata_qc_schedule_eh(struct ata_queued_cmd *qc) |
| 880 | { | 880 | { |
| 881 | struct ata_port *ap = qc->ap; | 881 | struct ata_port *ap = qc->ap; |
| 882 | struct request_queue *q = qc->scsicmd->device->request_queue; | ||
| 883 | unsigned long flags; | ||
| 882 | 884 | ||
| 883 | WARN_ON(!ap->ops->error_handler); | 885 | WARN_ON(!ap->ops->error_handler); |
| 884 | 886 | ||
| @@ -890,7 +892,9 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc) | |||
| 890 | * Note that ATA_QCFLAG_FAILED is unconditionally set after | 892 | * Note that ATA_QCFLAG_FAILED is unconditionally set after |
| 891 | * this function completes. | 893 | * this function completes. |
| 892 | */ | 894 | */ |
| 895 | spin_lock_irqsave(q->queue_lock, flags); | ||
| 893 | blk_abort_request(qc->scsicmd->request); | 896 | blk_abort_request(qc->scsicmd->request); |
| 897 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
| 894 | } | 898 | } |
| 895 | 899 | ||
| 896 | /** | 900 | /** |
| @@ -1624,6 +1628,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) | |||
| 1624 | } | 1628 | } |
| 1625 | 1629 | ||
| 1626 | /* okay, this error is ours */ | 1630 | /* okay, this error is ours */ |
| 1631 | memset(&tf, 0, sizeof(tf)); | ||
| 1627 | rc = ata_eh_read_log_10h(dev, &tag, &tf); | 1632 | rc = ata_eh_read_log_10h(dev, &tag, &tf); |
| 1628 | if (rc) { | 1633 | if (rc) { |
| 1629 | ata_link_printk(link, KERN_ERR, "failed to read log page 10h " | 1634 | ata_link_printk(link, KERN_ERR, "failed to read log page 10h " |
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index 3c3172d3c34e..4164dd244dd0 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c | |||
| @@ -424,6 +424,8 @@ static struct pcmcia_device_id pcmcia_devices[] = { | |||
| 424 | PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), | 424 | PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), |
| 425 | PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), | 425 | PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), |
| 426 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), | 426 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), |
| 427 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17), | ||
| 428 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10), | ||
| 427 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), | 429 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), |
| 428 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), | 430 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), |
| 429 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), | 431 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), |
| @@ -444,6 +446,8 @@ static struct pcmcia_device_id pcmcia_devices[] = { | |||
| 444 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), | 446 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), |
| 445 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), | 447 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), |
| 446 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), | 448 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), |
| 449 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d), | ||
| 450 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47), | ||
| 447 | PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), | 451 | PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), |
| 448 | PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), | 452 | PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), |
| 449 | PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), | 453 | PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), |
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 3784a47865b7..8f5aebfb29df 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
| @@ -190,7 +190,7 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, | |||
| 190 | for (try = 0; try < 5; try++) { | 190 | for (try = 0; try < 5; try++) { |
| 191 | new = allocate ? old - bandwidth : old + bandwidth; | 191 | new = allocate ? old - bandwidth : old + bandwidth; |
| 192 | if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) | 192 | if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) |
| 193 | break; | 193 | return -EBUSY; |
| 194 | 194 | ||
| 195 | data[0] = cpu_to_be32(old); | 195 | data[0] = cpu_to_be32(old); |
| 196 | data[1] = cpu_to_be32(new); | 196 | data[1] = cpu_to_be32(new); |
| @@ -218,7 +218,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
| 218 | u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) | 218 | u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) |
| 219 | { | 219 | { |
| 220 | __be32 c, all, old; | 220 | __be32 c, all, old; |
| 221 | int i, retry = 5; | 221 | int i, ret = -EIO, retry = 5; |
| 222 | 222 | ||
| 223 | old = all = allocate ? cpu_to_be32(~0) : 0; | 223 | old = all = allocate ? cpu_to_be32(~0) : 0; |
| 224 | 224 | ||
| @@ -226,6 +226,8 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
| 226 | if (!(channels_mask & 1 << i)) | 226 | if (!(channels_mask & 1 << i)) |
| 227 | continue; | 227 | continue; |
| 228 | 228 | ||
| 229 | ret = -EBUSY; | ||
| 230 | |||
| 229 | c = cpu_to_be32(1 << (31 - i)); | 231 | c = cpu_to_be32(1 << (31 - i)); |
| 230 | if ((old & c) != (all & c)) | 232 | if ((old & c) != (all & c)) |
| 231 | continue; | 233 | continue; |
| @@ -251,12 +253,16 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
| 251 | 253 | ||
| 252 | /* 1394-1995 IRM, fall through to retry. */ | 254 | /* 1394-1995 IRM, fall through to retry. */ |
| 253 | default: | 255 | default: |
| 254 | if (retry--) | 256 | if (retry) { |
| 257 | retry--; | ||
| 255 | i--; | 258 | i--; |
| 259 | } else { | ||
| 260 | ret = -EIO; | ||
| 261 | } | ||
| 256 | } | 262 | } |
| 257 | } | 263 | } |
| 258 | 264 | ||
| 259 | return -EIO; | 265 | return ret; |
| 260 | } | 266 | } |
| 261 | 267 | ||
| 262 | static void deallocate_channel(struct fw_card *card, int irm_id, | 268 | static void deallocate_channel(struct fw_card *card, int irm_id, |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 0cf4d7f562c5..94b16e0340ae 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
| @@ -1158,7 +1158,7 @@ static void handle_local_lock(struct fw_ohci *ohci, | |||
| 1158 | struct fw_packet *packet, u32 csr) | 1158 | struct fw_packet *packet, u32 csr) |
| 1159 | { | 1159 | { |
| 1160 | struct fw_packet response; | 1160 | struct fw_packet response; |
| 1161 | int tcode, length, ext_tcode, sel; | 1161 | int tcode, length, ext_tcode, sel, try; |
| 1162 | __be32 *payload, lock_old; | 1162 | __be32 *payload, lock_old; |
| 1163 | u32 lock_arg, lock_data; | 1163 | u32 lock_arg, lock_data; |
| 1164 | 1164 | ||
| @@ -1185,21 +1185,26 @@ static void handle_local_lock(struct fw_ohci *ohci, | |||
| 1185 | reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); | 1185 | reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); |
| 1186 | reg_write(ohci, OHCI1394_CSRControl, sel); | 1186 | reg_write(ohci, OHCI1394_CSRControl, sel); |
| 1187 | 1187 | ||
| 1188 | if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) | 1188 | for (try = 0; try < 20; try++) |
| 1189 | lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData)); | 1189 | if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) { |
| 1190 | else | 1190 | lock_old = cpu_to_be32(reg_read(ohci, |
| 1191 | fw_notify("swap not done yet\n"); | 1191 | OHCI1394_CSRData)); |
| 1192 | fw_fill_response(&response, packet->header, | ||
| 1193 | RCODE_COMPLETE, | ||
| 1194 | &lock_old, sizeof(lock_old)); | ||
| 1195 | goto out; | ||
| 1196 | } | ||
| 1197 | |||
| 1198 | fw_error("swap not done (CSR lock timeout)\n"); | ||
| 1199 | fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); | ||
| 1192 | 1200 | ||
| 1193 | fw_fill_response(&response, packet->header, | ||
| 1194 | RCODE_COMPLETE, &lock_old, sizeof(lock_old)); | ||
| 1195 | out: | 1201 | out: |
| 1196 | fw_core_handle_response(&ohci->card, &response); | 1202 | fw_core_handle_response(&ohci->card, &response); |
| 1197 | } | 1203 | } |
| 1198 | 1204 | ||
| 1199 | static void handle_local_request(struct context *ctx, struct fw_packet *packet) | 1205 | static void handle_local_request(struct context *ctx, struct fw_packet *packet) |
| 1200 | { | 1206 | { |
| 1201 | u64 offset; | 1207 | u64 offset, csr; |
| 1202 | u32 csr; | ||
| 1203 | 1208 | ||
| 1204 | if (ctx == &ctx->ohci->at_request_ctx) { | 1209 | if (ctx == &ctx->ohci->at_request_ctx) { |
| 1205 | packet->ack = ACK_PENDING; | 1210 | packet->ack = ACK_PENDING; |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2dc93939507d..c3cfafcbfe7d 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -1357,6 +1357,8 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
| 1357 | 1357 | ||
| 1358 | dev_priv->cfb_size = size; | 1358 | dev_priv->cfb_size = size; |
| 1359 | 1359 | ||
| 1360 | dev_priv->compressed_fb = compressed_fb; | ||
| 1361 | |||
| 1360 | if (IS_GM45(dev)) { | 1362 | if (IS_GM45(dev)) { |
| 1361 | g4x_disable_fbc(dev); | 1363 | g4x_disable_fbc(dev); |
| 1362 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); | 1364 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); |
| @@ -1364,12 +1366,22 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
| 1364 | i8xx_disable_fbc(dev); | 1366 | i8xx_disable_fbc(dev); |
| 1365 | I915_WRITE(FBC_CFB_BASE, cfb_base); | 1367 | I915_WRITE(FBC_CFB_BASE, cfb_base); |
| 1366 | I915_WRITE(FBC_LL_BASE, ll_base); | 1368 | I915_WRITE(FBC_LL_BASE, ll_base); |
| 1369 | dev_priv->compressed_llb = compressed_llb; | ||
| 1367 | } | 1370 | } |
| 1368 | 1371 | ||
| 1369 | DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, | 1372 | DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, |
| 1370 | ll_base, size >> 20); | 1373 | ll_base, size >> 20); |
| 1371 | } | 1374 | } |
| 1372 | 1375 | ||
| 1376 | static void i915_cleanup_compression(struct drm_device *dev) | ||
| 1377 | { | ||
| 1378 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 1379 | |||
| 1380 | drm_mm_put_block(dev_priv->compressed_fb); | ||
| 1381 | if (!IS_GM45(dev)) | ||
| 1382 | drm_mm_put_block(dev_priv->compressed_llb); | ||
| 1383 | } | ||
| 1384 | |||
| 1373 | /* true = enable decode, false = disable decoder */ | 1385 | /* true = enable decode, false = disable decoder */ |
| 1374 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | 1386 | static unsigned int i915_vga_set_decode(void *cookie, bool state) |
| 1375 | { | 1387 | { |
| @@ -1787,6 +1799,8 @@ int i915_driver_unload(struct drm_device *dev) | |||
| 1787 | mutex_lock(&dev->struct_mutex); | 1799 | mutex_lock(&dev->struct_mutex); |
| 1788 | i915_gem_cleanup_ringbuffer(dev); | 1800 | i915_gem_cleanup_ringbuffer(dev); |
| 1789 | mutex_unlock(&dev->struct_mutex); | 1801 | mutex_unlock(&dev->struct_mutex); |
| 1802 | if (I915_HAS_FBC(dev) && i915_powersave) | ||
| 1803 | i915_cleanup_compression(dev); | ||
| 1790 | drm_mm_takedown(&dev_priv->vram); | 1804 | drm_mm_takedown(&dev_priv->vram); |
| 1791 | i915_gem_lastclose(dev); | 1805 | i915_gem_lastclose(dev); |
| 1792 | 1806 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0af3dcc85ce9..cc03537bb883 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -69,7 +69,8 @@ const static struct intel_device_info intel_845g_info = { | |||
| 69 | }; | 69 | }; |
| 70 | 70 | ||
| 71 | const static struct intel_device_info intel_i85x_info = { | 71 | const static struct intel_device_info intel_i85x_info = { |
| 72 | .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, | 72 | .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, |
| 73 | .cursor_needs_physical = 1, | ||
| 73 | }; | 74 | }; |
| 74 | 75 | ||
| 75 | const static struct intel_device_info intel_i865g_info = { | 76 | const static struct intel_device_info intel_i865g_info = { |
| @@ -151,7 +152,7 @@ const static struct pci_device_id pciidlist[] = { | |||
| 151 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), | 152 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), |
| 152 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), | 153 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), |
| 153 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), | 154 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), |
| 154 | INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), | 155 | INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), |
| 155 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), | 156 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), |
| 156 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), | 157 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), |
| 157 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), | 158 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6960849522f8..6e4790065d9e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -195,6 +195,7 @@ struct intel_overlay; | |||
| 195 | struct intel_device_info { | 195 | struct intel_device_info { |
| 196 | u8 is_mobile : 1; | 196 | u8 is_mobile : 1; |
| 197 | u8 is_i8xx : 1; | 197 | u8 is_i8xx : 1; |
| 198 | u8 is_i85x : 1; | ||
| 198 | u8 is_i915g : 1; | 199 | u8 is_i915g : 1; |
| 199 | u8 is_i9xx : 1; | 200 | u8 is_i9xx : 1; |
| 200 | u8 is_i945gm : 1; | 201 | u8 is_i945gm : 1; |
| @@ -235,11 +236,14 @@ typedef struct drm_i915_private { | |||
| 235 | 236 | ||
| 236 | drm_dma_handle_t *status_page_dmah; | 237 | drm_dma_handle_t *status_page_dmah; |
| 237 | void *hw_status_page; | 238 | void *hw_status_page; |
| 239 | void *seqno_page; | ||
| 238 | dma_addr_t dma_status_page; | 240 | dma_addr_t dma_status_page; |
| 239 | uint32_t counter; | 241 | uint32_t counter; |
| 240 | unsigned int status_gfx_addr; | 242 | unsigned int status_gfx_addr; |
| 243 | unsigned int seqno_gfx_addr; | ||
| 241 | drm_local_map_t hws_map; | 244 | drm_local_map_t hws_map; |
| 242 | struct drm_gem_object *hws_obj; | 245 | struct drm_gem_object *hws_obj; |
| 246 | struct drm_gem_object *seqno_obj; | ||
| 243 | struct drm_gem_object *pwrctx; | 247 | struct drm_gem_object *pwrctx; |
| 244 | 248 | ||
| 245 | struct resource mch_res; | 249 | struct resource mch_res; |
| @@ -630,6 +634,9 @@ typedef struct drm_i915_private { | |||
| 630 | u8 max_delay; | 634 | u8 max_delay; |
| 631 | 635 | ||
| 632 | enum no_fbc_reason no_fbc_reason; | 636 | enum no_fbc_reason no_fbc_reason; |
| 637 | |||
| 638 | struct drm_mm_node *compressed_fb; | ||
| 639 | struct drm_mm_node *compressed_llb; | ||
| 633 | } drm_i915_private_t; | 640 | } drm_i915_private_t; |
| 634 | 641 | ||
| 635 | /** driver private structure attached to each drm_gem_object */ | 642 | /** driver private structure attached to each drm_gem_object */ |
| @@ -1070,7 +1077,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
| 1070 | 1077 | ||
| 1071 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) | 1078 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) |
| 1072 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) | 1079 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) |
| 1073 | #define IS_I85X(dev) ((dev)->pci_device == 0x3582) | 1080 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) |
| 1074 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | 1081 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) |
| 1075 | #define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) | 1082 | #define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) |
| 1076 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) | 1083 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) |
| @@ -1135,6 +1142,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
| 1135 | 1142 | ||
| 1136 | #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ | 1143 | #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ |
| 1137 | IS_GEN6(dev)) | 1144 | IS_GEN6(dev)) |
| 1145 | #define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev)) | ||
| 1138 | 1146 | ||
| 1139 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 1147 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
| 1140 | 1148 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 80871c62a571..ef3d91dda71a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -1588,6 +1588,13 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
| 1588 | } | 1588 | } |
| 1589 | } | 1589 | } |
| 1590 | 1590 | ||
| 1591 | #define PIPE_CONTROL_FLUSH(addr) \ | ||
| 1592 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ | ||
| 1593 | PIPE_CONTROL_DEPTH_STALL); \ | ||
| 1594 | OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ | ||
| 1595 | OUT_RING(0); \ | ||
| 1596 | OUT_RING(0); \ | ||
| 1597 | |||
| 1591 | /** | 1598 | /** |
| 1592 | * Creates a new sequence number, emitting a write of it to the status page | 1599 | * Creates a new sequence number, emitting a write of it to the status page |
| 1593 | * plus an interrupt, which will trigger i915_user_interrupt_handler. | 1600 | * plus an interrupt, which will trigger i915_user_interrupt_handler. |
| @@ -1622,13 +1629,47 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
| 1622 | if (dev_priv->mm.next_gem_seqno == 0) | 1629 | if (dev_priv->mm.next_gem_seqno == 0) |
| 1623 | dev_priv->mm.next_gem_seqno++; | 1630 | dev_priv->mm.next_gem_seqno++; |
| 1624 | 1631 | ||
| 1625 | BEGIN_LP_RING(4); | 1632 | if (HAS_PIPE_CONTROL(dev)) { |
| 1626 | OUT_RING(MI_STORE_DWORD_INDEX); | 1633 | u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; |
| 1627 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
| 1628 | OUT_RING(seqno); | ||
| 1629 | 1634 | ||
| 1630 | OUT_RING(MI_USER_INTERRUPT); | 1635 | /* |
| 1631 | ADVANCE_LP_RING(); | 1636 | * Workaround qword write incoherence by flushing the |
| 1637 | * PIPE_NOTIFY buffers out to memory before requesting | ||
| 1638 | * an interrupt. | ||
| 1639 | */ | ||
| 1640 | BEGIN_LP_RING(32); | ||
| 1641 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
| 1642 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); | ||
| 1643 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
| 1644 | OUT_RING(seqno); | ||
| 1645 | OUT_RING(0); | ||
| 1646 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1647 | scratch_addr += 128; /* write to separate cachelines */ | ||
| 1648 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1649 | scratch_addr += 128; | ||
| 1650 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1651 | scratch_addr += 128; | ||
| 1652 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1653 | scratch_addr += 128; | ||
| 1654 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1655 | scratch_addr += 128; | ||
| 1656 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1657 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
| 1658 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | | ||
| 1659 | PIPE_CONTROL_NOTIFY); | ||
| 1660 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
| 1661 | OUT_RING(seqno); | ||
| 1662 | OUT_RING(0); | ||
| 1663 | ADVANCE_LP_RING(); | ||
| 1664 | } else { | ||
| 1665 | BEGIN_LP_RING(4); | ||
| 1666 | OUT_RING(MI_STORE_DWORD_INDEX); | ||
| 1667 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
| 1668 | OUT_RING(seqno); | ||
| 1669 | |||
| 1670 | OUT_RING(MI_USER_INTERRUPT); | ||
| 1671 | ADVANCE_LP_RING(); | ||
| 1672 | } | ||
| 1632 | 1673 | ||
| 1633 | DRM_DEBUG_DRIVER("%d\n", seqno); | 1674 | DRM_DEBUG_DRIVER("%d\n", seqno); |
| 1634 | 1675 | ||
| @@ -1752,7 +1793,10 @@ i915_get_gem_seqno(struct drm_device *dev) | |||
| 1752 | { | 1793 | { |
| 1753 | drm_i915_private_t *dev_priv = dev->dev_private; | 1794 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1754 | 1795 | ||
| 1755 | return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); | 1796 | if (HAS_PIPE_CONTROL(dev)) |
| 1797 | return ((volatile u32 *)(dev_priv->seqno_page))[0]; | ||
| 1798 | else | ||
| 1799 | return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); | ||
| 1756 | } | 1800 | } |
| 1757 | 1801 | ||
| 1758 | /** | 1802 | /** |
| @@ -2362,6 +2406,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
| 2362 | pitch_val = obj_priv->stride / tile_width; | 2406 | pitch_val = obj_priv->stride / tile_width; |
| 2363 | pitch_val = ffs(pitch_val) - 1; | 2407 | pitch_val = ffs(pitch_val) - 1; |
| 2364 | 2408 | ||
| 2409 | if (obj_priv->tiling_mode == I915_TILING_Y && | ||
| 2410 | HAS_128_BYTE_Y_TILING(dev)) | ||
| 2411 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); | ||
| 2412 | else | ||
| 2413 | WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); | ||
| 2414 | |||
| 2365 | val = obj_priv->gtt_offset; | 2415 | val = obj_priv->gtt_offset; |
| 2366 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2416 | if (obj_priv->tiling_mode == I915_TILING_Y) |
| 2367 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | 2417 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
| @@ -4546,6 +4596,49 @@ i915_gem_idle(struct drm_device *dev) | |||
| 4546 | return 0; | 4596 | return 0; |
| 4547 | } | 4597 | } |
| 4548 | 4598 | ||
| 4599 | /* | ||
| 4600 | * 965+ support PIPE_CONTROL commands, which provide finer grained control | ||
| 4601 | * over cache flushing. | ||
| 4602 | */ | ||
| 4603 | static int | ||
| 4604 | i915_gem_init_pipe_control(struct drm_device *dev) | ||
| 4605 | { | ||
| 4606 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 4607 | struct drm_gem_object *obj; | ||
| 4608 | struct drm_i915_gem_object *obj_priv; | ||
| 4609 | int ret; | ||
| 4610 | |||
| 4611 | obj = drm_gem_object_alloc(dev, 4096); | ||
| 4612 | if (obj == NULL) { | ||
| 4613 | DRM_ERROR("Failed to allocate seqno page\n"); | ||
| 4614 | ret = -ENOMEM; | ||
| 4615 | goto err; | ||
| 4616 | } | ||
| 4617 | obj_priv = to_intel_bo(obj); | ||
| 4618 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | ||
| 4619 | |||
| 4620 | ret = i915_gem_object_pin(obj, 4096); | ||
| 4621 | if (ret) | ||
| 4622 | goto err_unref; | ||
| 4623 | |||
| 4624 | dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; | ||
| 4625 | dev_priv->seqno_page = kmap(obj_priv->pages[0]); | ||
| 4626 | if (dev_priv->seqno_page == NULL) | ||
| 4627 | goto err_unpin; | ||
| 4628 | |||
| 4629 | dev_priv->seqno_obj = obj; | ||
| 4630 | memset(dev_priv->seqno_page, 0, PAGE_SIZE); | ||
| 4631 | |||
| 4632 | return 0; | ||
| 4633 | |||
| 4634 | err_unpin: | ||
| 4635 | i915_gem_object_unpin(obj); | ||
| 4636 | err_unref: | ||
| 4637 | drm_gem_object_unreference(obj); | ||
| 4638 | err: | ||
| 4639 | return ret; | ||
| 4640 | } | ||
| 4641 | |||
| 4549 | static int | 4642 | static int |
| 4550 | i915_gem_init_hws(struct drm_device *dev) | 4643 | i915_gem_init_hws(struct drm_device *dev) |
| 4551 | { | 4644 | { |
| @@ -4563,7 +4656,8 @@ i915_gem_init_hws(struct drm_device *dev) | |||
| 4563 | obj = drm_gem_object_alloc(dev, 4096); | 4656 | obj = drm_gem_object_alloc(dev, 4096); |
| 4564 | if (obj == NULL) { | 4657 | if (obj == NULL) { |
| 4565 | DRM_ERROR("Failed to allocate status page\n"); | 4658 | DRM_ERROR("Failed to allocate status page\n"); |
| 4566 | return -ENOMEM; | 4659 | ret = -ENOMEM; |
| 4660 | goto err; | ||
| 4567 | } | 4661 | } |
| 4568 | obj_priv = to_intel_bo(obj); | 4662 | obj_priv = to_intel_bo(obj); |
| 4569 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | 4663 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; |
| @@ -4571,7 +4665,7 @@ i915_gem_init_hws(struct drm_device *dev) | |||
| 4571 | ret = i915_gem_object_pin(obj, 4096); | 4665 | ret = i915_gem_object_pin(obj, 4096); |
| 4572 | if (ret != 0) { | 4666 | if (ret != 0) { |
| 4573 | drm_gem_object_unreference(obj); | 4667 | drm_gem_object_unreference(obj); |
| 4574 | return ret; | 4668 | goto err_unref; |
| 4575 | } | 4669 | } |
| 4576 | 4670 | ||
| 4577 | dev_priv->status_gfx_addr = obj_priv->gtt_offset; | 4671 | dev_priv->status_gfx_addr = obj_priv->gtt_offset; |
| @@ -4580,10 +4674,16 @@ i915_gem_init_hws(struct drm_device *dev) | |||
| 4580 | if (dev_priv->hw_status_page == NULL) { | 4674 | if (dev_priv->hw_status_page == NULL) { |
| 4581 | DRM_ERROR("Failed to map status page.\n"); | 4675 | DRM_ERROR("Failed to map status page.\n"); |
| 4582 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 4676 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
| 4583 | i915_gem_object_unpin(obj); | 4677 | ret = -EINVAL; |
| 4584 | drm_gem_object_unreference(obj); | 4678 | goto err_unpin; |
| 4585 | return -EINVAL; | ||
| 4586 | } | 4679 | } |
| 4680 | |||
| 4681 | if (HAS_PIPE_CONTROL(dev)) { | ||
| 4682 | ret = i915_gem_init_pipe_control(dev); | ||
| 4683 | if (ret) | ||
| 4684 | goto err_unpin; | ||
| 4685 | } | ||
| 4686 | |||
| 4587 | dev_priv->hws_obj = obj; | 4687 | dev_priv->hws_obj = obj; |
| 4588 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | 4688 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); |
| 4589 | if (IS_GEN6(dev)) { | 4689 | if (IS_GEN6(dev)) { |
| @@ -4596,6 +4696,30 @@ i915_gem_init_hws(struct drm_device *dev) | |||
| 4596 | DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); | 4696 | DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); |
| 4597 | 4697 | ||
| 4598 | return 0; | 4698 | return 0; |
| 4699 | |||
| 4700 | err_unpin: | ||
| 4701 | i915_gem_object_unpin(obj); | ||
| 4702 | err_unref: | ||
| 4703 | drm_gem_object_unreference(obj); | ||
| 4704 | err: | ||
| 4705 | return 0; | ||
| 4706 | } | ||
| 4707 | |||
| 4708 | static void | ||
| 4709 | i915_gem_cleanup_pipe_control(struct drm_device *dev) | ||
| 4710 | { | ||
| 4711 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 4712 | struct drm_gem_object *obj; | ||
| 4713 | struct drm_i915_gem_object *obj_priv; | ||
| 4714 | |||
| 4715 | obj = dev_priv->seqno_obj; | ||
| 4716 | obj_priv = to_intel_bo(obj); | ||
| 4717 | kunmap(obj_priv->pages[0]); | ||
| 4718 | i915_gem_object_unpin(obj); | ||
| 4719 | drm_gem_object_unreference(obj); | ||
| 4720 | dev_priv->seqno_obj = NULL; | ||
| 4721 | |||
| 4722 | dev_priv->seqno_page = NULL; | ||
| 4599 | } | 4723 | } |
| 4600 | 4724 | ||
| 4601 | static void | 4725 | static void |
| @@ -4619,6 +4743,9 @@ i915_gem_cleanup_hws(struct drm_device *dev) | |||
| 4619 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 4743 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
| 4620 | dev_priv->hw_status_page = NULL; | 4744 | dev_priv->hw_status_page = NULL; |
| 4621 | 4745 | ||
| 4746 | if (HAS_PIPE_CONTROL(dev)) | ||
| 4747 | i915_gem_cleanup_pipe_control(dev); | ||
| 4748 | |||
| 4622 | /* Write high address into HWS_PGA when disabling. */ | 4749 | /* Write high address into HWS_PGA when disabling. */ |
| 4623 | I915_WRITE(HWS_PGA, 0x1ffff000); | 4750 | I915_WRITE(HWS_PGA, 0x1ffff000); |
| 4624 | } | 4751 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 449157f71610..4bdccefcf2cf 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
| @@ -202,21 +202,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
| 202 | * reg, so dont bother to check the size */ | 202 | * reg, so dont bother to check the size */ |
| 203 | if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) | 203 | if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) |
| 204 | return false; | 204 | return false; |
| 205 | } else if (IS_I9XX(dev)) { | 205 | } else if (IS_GEN3(dev) || IS_GEN2(dev)) { |
| 206 | uint32_t pitch_val = ffs(stride / tile_width) - 1; | 206 | if (stride > 8192) |
| 207 | |||
| 208 | /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB) | ||
| 209 | * instead of 4 (2KB) on 945s. | ||
| 210 | */ | ||
| 211 | if (pitch_val > I915_FENCE_MAX_PITCH_VAL || | ||
| 212 | size > (I830_FENCE_MAX_SIZE_VAL << 20)) | ||
| 213 | return false; | 207 | return false; |
| 214 | } else { | ||
| 215 | uint32_t pitch_val = ffs(stride / tile_width) - 1; | ||
| 216 | 208 | ||
| 217 | if (pitch_val > I830_FENCE_MAX_PITCH_VAL || | 209 | if (IS_GEN3(dev)) { |
| 218 | size > (I830_FENCE_MAX_SIZE_VAL << 19)) | 210 | if (size > I830_FENCE_MAX_SIZE_VAL << 20) |
| 219 | return false; | 211 | return false; |
| 212 | } else { | ||
| 213 | if (size > I830_FENCE_MAX_SIZE_VAL << 19) | ||
| 214 | return false; | ||
| 215 | } | ||
| 220 | } | 216 | } |
| 221 | 217 | ||
| 222 | /* 965+ just needs multiples of tile width */ | 218 | /* 965+ just needs multiples of tile width */ |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6421481d6222..2b8b969d0c15 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -349,7 +349,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
| 349 | READ_BREADCRUMB(dev_priv); | 349 | READ_BREADCRUMB(dev_priv); |
| 350 | } | 350 | } |
| 351 | 351 | ||
| 352 | if (gt_iir & GT_USER_INTERRUPT) { | 352 | if (gt_iir & GT_PIPE_NOTIFY) { |
| 353 | u32 seqno = i915_get_gem_seqno(dev); | 353 | u32 seqno = i915_get_gem_seqno(dev); |
| 354 | dev_priv->mm.irq_gem_seqno = seqno; | 354 | dev_priv->mm.irq_gem_seqno = seqno; |
| 355 | trace_i915_gem_request_complete(dev, seqno); | 355 | trace_i915_gem_request_complete(dev, seqno); |
| @@ -1005,7 +1005,7 @@ void i915_user_irq_get(struct drm_device *dev) | |||
| 1005 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1005 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
| 1006 | if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { | 1006 | if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { |
| 1007 | if (HAS_PCH_SPLIT(dev)) | 1007 | if (HAS_PCH_SPLIT(dev)) |
| 1008 | ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); | 1008 | ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); |
| 1009 | else | 1009 | else |
| 1010 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); | 1010 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); |
| 1011 | } | 1011 | } |
| @@ -1021,7 +1021,7 @@ void i915_user_irq_put(struct drm_device *dev) | |||
| 1021 | BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); | 1021 | BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); |
| 1022 | if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { | 1022 | if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { |
| 1023 | if (HAS_PCH_SPLIT(dev)) | 1023 | if (HAS_PCH_SPLIT(dev)) |
| 1024 | ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); | 1024 | ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); |
| 1025 | else | 1025 | else |
| 1026 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); | 1026 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); |
| 1027 | } | 1027 | } |
| @@ -1305,7 +1305,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
| 1305 | /* enable kind of interrupts always enabled */ | 1305 | /* enable kind of interrupts always enabled */ |
| 1306 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 1306 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
| 1307 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | 1307 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; |
| 1308 | u32 render_mask = GT_USER_INTERRUPT; | 1308 | u32 render_mask = GT_PIPE_NOTIFY; |
| 1309 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1309 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
| 1310 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1310 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
| 1311 | 1311 | ||
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c index 7cc8410239cb..8fcc75c1aa28 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/i915_opregion.c | |||
| @@ -382,8 +382,57 @@ static void intel_didl_outputs(struct drm_device *dev) | |||
| 382 | struct drm_i915_private *dev_priv = dev->dev_private; | 382 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 383 | struct intel_opregion *opregion = &dev_priv->opregion; | 383 | struct intel_opregion *opregion = &dev_priv->opregion; |
| 384 | struct drm_connector *connector; | 384 | struct drm_connector *connector; |
| 385 | acpi_handle handle; | ||
| 386 | struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; | ||
| 387 | unsigned long long device_id; | ||
| 388 | acpi_status status; | ||
| 385 | int i = 0; | 389 | int i = 0; |
| 386 | 390 | ||
| 391 | handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); | ||
| 392 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) | ||
| 393 | return; | ||
| 394 | |||
| 395 | if (acpi_is_video_device(acpi_dev)) | ||
| 396 | acpi_video_bus = acpi_dev; | ||
| 397 | else { | ||
| 398 | list_for_each_entry(acpi_cdev, &acpi_dev->children, node) { | ||
| 399 | if (acpi_is_video_device(acpi_cdev)) { | ||
| 400 | acpi_video_bus = acpi_cdev; | ||
| 401 | break; | ||
| 402 | } | ||
| 403 | } | ||
| 404 | } | ||
| 405 | |||
| 406 | if (!acpi_video_bus) { | ||
| 407 | printk(KERN_WARNING "No ACPI video bus found\n"); | ||
| 408 | return; | ||
| 409 | } | ||
| 410 | |||
| 411 | list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { | ||
| 412 | if (i >= 8) { | ||
| 413 | dev_printk (KERN_ERR, &dev->pdev->dev, | ||
| 414 | "More than 8 outputs detected\n"); | ||
| 415 | return; | ||
| 416 | } | ||
| 417 | status = | ||
| 418 | acpi_evaluate_integer(acpi_cdev->handle, "_ADR", | ||
| 419 | NULL, &device_id); | ||
| 420 | if (ACPI_SUCCESS(status)) { | ||
| 421 | if (!device_id) | ||
| 422 | goto blind_set; | ||
| 423 | opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f); | ||
| 424 | i++; | ||
| 425 | } | ||
| 426 | } | ||
| 427 | |||
| 428 | end: | ||
| 429 | /* If fewer than 8 outputs, the list must be null terminated */ | ||
| 430 | if (i < 8) | ||
| 431 | opregion->acpi->didl[i] = 0; | ||
| 432 | return; | ||
| 433 | |||
| 434 | blind_set: | ||
| 435 | i = 0; | ||
| 387 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 436 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 388 | int output_type = ACPI_OTHER_OUTPUT; | 437 | int output_type = ACPI_OTHER_OUTPUT; |
| 389 | if (i >= 8) { | 438 | if (i >= 8) { |
| @@ -416,10 +465,7 @@ static void intel_didl_outputs(struct drm_device *dev) | |||
| 416 | opregion->acpi->didl[i] |= (1<<31) | output_type | i; | 465 | opregion->acpi->didl[i] |= (1<<31) | output_type | i; |
| 417 | i++; | 466 | i++; |
| 418 | } | 467 | } |
| 419 | 468 | goto end; | |
| 420 | /* If fewer than 8 outputs, the list must be null terminated */ | ||
| 421 | if (i < 8) | ||
| 422 | opregion->acpi->didl[i] = 0; | ||
| 423 | } | 469 | } |
| 424 | 470 | ||
| 425 | int intel_opregion_init(struct drm_device *dev, int resume) | 471 | int intel_opregion_init(struct drm_device *dev, int resume) |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index cbbf59f56dfa..4cbc5210fd30 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -230,6 +230,16 @@ | |||
| 230 | #define ASYNC_FLIP (1<<22) | 230 | #define ASYNC_FLIP (1<<22) |
| 231 | #define DISPLAY_PLANE_A (0<<20) | 231 | #define DISPLAY_PLANE_A (0<<20) |
| 232 | #define DISPLAY_PLANE_B (1<<20) | 232 | #define DISPLAY_PLANE_B (1<<20) |
| 233 | #define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2) | ||
| 234 | #define PIPE_CONTROL_QW_WRITE (1<<14) | ||
| 235 | #define PIPE_CONTROL_DEPTH_STALL (1<<13) | ||
| 236 | #define PIPE_CONTROL_WC_FLUSH (1<<12) | ||
| 237 | #define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */ | ||
| 238 | #define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */ | ||
| 239 | #define PIPE_CONTROL_ISP_DIS (1<<9) | ||
| 240 | #define PIPE_CONTROL_NOTIFY (1<<8) | ||
| 241 | #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ | ||
| 242 | #define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */ | ||
| 233 | 243 | ||
| 234 | /* | 244 | /* |
| 235 | * Fence registers | 245 | * Fence registers |
| @@ -241,7 +251,7 @@ | |||
| 241 | #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) | 251 | #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) |
| 242 | #define I830_FENCE_PITCH_SHIFT 4 | 252 | #define I830_FENCE_PITCH_SHIFT 4 |
| 243 | #define I830_FENCE_REG_VALID (1<<0) | 253 | #define I830_FENCE_REG_VALID (1<<0) |
| 244 | #define I915_FENCE_MAX_PITCH_VAL 0x10 | 254 | #define I915_FENCE_MAX_PITCH_VAL 4 |
| 245 | #define I830_FENCE_MAX_PITCH_VAL 6 | 255 | #define I830_FENCE_MAX_PITCH_VAL 6 |
| 246 | #define I830_FENCE_MAX_SIZE_VAL (1<<8) | 256 | #define I830_FENCE_MAX_SIZE_VAL (1<<8) |
| 247 | 257 | ||
| @@ -2285,6 +2295,7 @@ | |||
| 2285 | #define DEIER 0x4400c | 2295 | #define DEIER 0x4400c |
| 2286 | 2296 | ||
| 2287 | /* GT interrupt */ | 2297 | /* GT interrupt */ |
| 2298 | #define GT_PIPE_NOTIFY (1 << 4) | ||
| 2288 | #define GT_SYNC_STATUS (1 << 2) | 2299 | #define GT_SYNC_STATUS (1 << 2) |
| 2289 | #define GT_USER_INTERRUPT (1 << 0) | 2300 | #define GT_USER_INTERRUPT (1 << 0) |
| 2290 | 2301 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e7356fb6c918..c7502b6b1600 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -4853,17 +4853,18 @@ static void intel_init_display(struct drm_device *dev) | |||
| 4853 | dev_priv->display.update_wm = g4x_update_wm; | 4853 | dev_priv->display.update_wm = g4x_update_wm; |
| 4854 | else if (IS_I965G(dev)) | 4854 | else if (IS_I965G(dev)) |
| 4855 | dev_priv->display.update_wm = i965_update_wm; | 4855 | dev_priv->display.update_wm = i965_update_wm; |
| 4856 | else if (IS_I9XX(dev) || IS_MOBILE(dev)) { | 4856 | else if (IS_I9XX(dev)) { |
| 4857 | dev_priv->display.update_wm = i9xx_update_wm; | 4857 | dev_priv->display.update_wm = i9xx_update_wm; |
| 4858 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; | 4858 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; |
| 4859 | } else if (IS_I85X(dev)) { | ||
| 4860 | dev_priv->display.update_wm = i9xx_update_wm; | ||
| 4861 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; | ||
| 4859 | } else { | 4862 | } else { |
| 4860 | if (IS_I85X(dev)) | 4863 | dev_priv->display.update_wm = i830_update_wm; |
| 4861 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; | 4864 | if (IS_845G(dev)) |
| 4862 | else if (IS_845G(dev)) | ||
| 4863 | dev_priv->display.get_fifo_size = i845_get_fifo_size; | 4865 | dev_priv->display.get_fifo_size = i845_get_fifo_size; |
| 4864 | else | 4866 | else |
| 4865 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | 4867 | dev_priv->display.get_fifo_size = i830_get_fifo_size; |
| 4866 | dev_priv->display.update_wm = i830_update_wm; | ||
| 4867 | } | 4868 | } |
| 4868 | } | 4869 | } |
| 4869 | 4870 | ||
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index 75f3fa55663d..16c420240724 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c | |||
| @@ -1169,15 +1169,19 @@ static int atk_create_files(struct atk_data *data) | |||
| 1169 | int err; | 1169 | int err; |
| 1170 | 1170 | ||
| 1171 | list_for_each_entry(s, &data->sensor_list, list) { | 1171 | list_for_each_entry(s, &data->sensor_list, list) { |
| 1172 | sysfs_attr_init(&s->input_attr.attr); | ||
| 1172 | err = device_create_file(data->hwmon_dev, &s->input_attr); | 1173 | err = device_create_file(data->hwmon_dev, &s->input_attr); |
| 1173 | if (err) | 1174 | if (err) |
| 1174 | return err; | 1175 | return err; |
| 1176 | sysfs_attr_init(&s->label_attr.attr); | ||
| 1175 | err = device_create_file(data->hwmon_dev, &s->label_attr); | 1177 | err = device_create_file(data->hwmon_dev, &s->label_attr); |
| 1176 | if (err) | 1178 | if (err) |
| 1177 | return err; | 1179 | return err; |
| 1180 | sysfs_attr_init(&s->limit1_attr.attr); | ||
| 1178 | err = device_create_file(data->hwmon_dev, &s->limit1_attr); | 1181 | err = device_create_file(data->hwmon_dev, &s->limit1_attr); |
| 1179 | if (err) | 1182 | if (err) |
| 1180 | return err; | 1183 | return err; |
| 1184 | sysfs_attr_init(&s->limit2_attr.attr); | ||
| 1181 | err = device_create_file(data->hwmon_dev, &s->limit2_attr); | 1185 | err = device_create_file(data->hwmon_dev, &s->limit2_attr); |
| 1182 | if (err) | 1186 | if (err) |
| 1183 | return err; | 1187 | return err; |
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c index be475e844c2a..c8ab50516672 100644 --- a/drivers/hwmon/hp_accel.c +++ b/drivers/hwmon/hp_accel.c | |||
| @@ -217,6 +217,10 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = { | |||
| 217 | AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted), | 217 | AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted), |
| 218 | AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted), | 218 | AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted), |
| 219 | AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), | 219 | AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), |
| 220 | AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left), | ||
| 221 | AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), | ||
| 222 | AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), | ||
| 223 | AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), | ||
| 220 | { NULL, } | 224 | { NULL, } |
| 221 | /* Laptop models without axis info (yet): | 225 | /* Laptop models without axis info (yet): |
| 222 | * "NC6910" "HP Compaq 6910" | 226 | * "NC6910" "HP Compaq 6910" |
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index f7e27b702375..d1ff9408dc1f 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
| @@ -146,10 +146,10 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) | |||
| 146 | "<%s> I2C Interrupted\n", __func__); | 146 | "<%s> I2C Interrupted\n", __func__); |
| 147 | return -EINTR; | 147 | return -EINTR; |
| 148 | } | 148 | } |
| 149 | if (time_after(jiffies, orig_jiffies + HZ / 1000)) { | 149 | if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) { |
| 150 | dev_dbg(&i2c_imx->adapter.dev, | 150 | dev_dbg(&i2c_imx->adapter.dev, |
| 151 | "<%s> I2C bus is busy\n", __func__); | 151 | "<%s> I2C bus is busy\n", __func__); |
| 152 | return -EIO; | 152 | return -ETIMEDOUT; |
| 153 | } | 153 | } |
| 154 | schedule(); | 154 | schedule(); |
| 155 | } | 155 | } |
| @@ -444,6 +444,8 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter, | |||
| 444 | result = i2c_imx_read(i2c_imx, &msgs[i]); | 444 | result = i2c_imx_read(i2c_imx, &msgs[i]); |
| 445 | else | 445 | else |
| 446 | result = i2c_imx_write(i2c_imx, &msgs[i]); | 446 | result = i2c_imx_write(i2c_imx, &msgs[i]); |
| 447 | if (result) | ||
| 448 | goto fail0; | ||
| 447 | } | 449 | } |
| 448 | 450 | ||
| 449 | fail0: | 451 | fail0: |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 6bd0f19cd451..389ac6032a7b 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
| @@ -903,6 +903,11 @@ omap_i2c_probe(struct platform_device *pdev) | |||
| 903 | 903 | ||
| 904 | platform_set_drvdata(pdev, dev); | 904 | platform_set_drvdata(pdev, dev); |
| 905 | 905 | ||
| 906 | if (cpu_is_omap7xx()) | ||
| 907 | dev->reg_shift = 1; | ||
| 908 | else | ||
| 909 | dev->reg_shift = 2; | ||
| 910 | |||
| 906 | if ((r = omap_i2c_get_clocks(dev)) != 0) | 911 | if ((r = omap_i2c_get_clocks(dev)) != 0) |
| 907 | goto err_iounmap; | 912 | goto err_iounmap; |
| 908 | 913 | ||
| @@ -926,11 +931,6 @@ omap_i2c_probe(struct platform_device *pdev) | |||
| 926 | dev->b_hw = 1; /* Enable hardware fixes */ | 931 | dev->b_hw = 1; /* Enable hardware fixes */ |
| 927 | } | 932 | } |
| 928 | 933 | ||
| 929 | if (cpu_is_omap7xx()) | ||
| 930 | dev->reg_shift = 1; | ||
| 931 | else | ||
| 932 | dev->reg_shift = 2; | ||
| 933 | |||
| 934 | /* reset ASAP, clearing any IRQs */ | 934 | /* reset ASAP, clearing any IRQs */ |
| 935 | omap_i2c_init(dev); | 935 | omap_i2c_init(dev); |
| 936 | 936 | ||
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index 247103372a06..a97e3fec8148 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c | |||
| @@ -173,6 +173,9 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data) | |||
| 173 | /* We still have something to talk about... */ | 173 | /* We still have something to talk about... */ |
| 174 | val = *alg_data->mif.buf++; | 174 | val = *alg_data->mif.buf++; |
| 175 | 175 | ||
| 176 | if (alg_data->mif.len == 1) | ||
| 177 | val |= stop_bit; | ||
| 178 | |||
| 176 | alg_data->mif.len--; | 179 | alg_data->mif.len--; |
| 177 | iowrite32(val, I2C_REG_TX(alg_data)); | 180 | iowrite32(val, I2C_REG_TX(alg_data)); |
| 178 | 181 | ||
| @@ -246,6 +249,9 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data) | |||
| 246 | __func__); | 249 | __func__); |
| 247 | 250 | ||
| 248 | if (alg_data->mif.len == 1) { | 251 | if (alg_data->mif.len == 1) { |
| 252 | /* Last byte, do not acknowledge next rcv. */ | ||
| 253 | val |= stop_bit; | ||
| 254 | |||
| 249 | /* | 255 | /* |
| 250 | * Enable interrupt RFDAIE (data in Rx fifo), | 256 | * Enable interrupt RFDAIE (data in Rx fifo), |
| 251 | * and disable DRMIE (need data for Tx) | 257 | * and disable DRMIE (need data for Tx) |
| @@ -633,6 +639,8 @@ static int __devinit i2c_pnx_probe(struct platform_device *pdev) | |||
| 633 | */ | 639 | */ |
| 634 | 640 | ||
| 635 | tmp = ((freq / 1000) / I2C_PNX_SPEED_KHZ) / 2 - 2; | 641 | tmp = ((freq / 1000) / I2C_PNX_SPEED_KHZ) / 2 - 2; |
| 642 | if (tmp > 0x3FF) | ||
| 643 | tmp = 0x3FF; | ||
| 636 | iowrite32(tmp, I2C_REG_CKH(alg_data)); | 644 | iowrite32(tmp, I2C_REG_CKH(alg_data)); |
| 637 | iowrite32(tmp, I2C_REG_CKL(alg_data)); | 645 | iowrite32(tmp, I2C_REG_CKL(alg_data)); |
| 638 | 646 | ||
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index 1f5b38be73bc..495be451d326 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c | |||
| @@ -498,7 +498,7 @@ static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate) | |||
| 498 | int i = 0; | 498 | int i = 0; |
| 499 | 499 | ||
| 500 | /* Locate the apropriate clock setting */ | 500 | /* Locate the apropriate clock setting */ |
| 501 | while (i < ARRAY_SIZE(stu300_clktable) && | 501 | while (i < ARRAY_SIZE(stu300_clktable) - 1 && |
| 502 | stu300_clktable[i].rate < clkrate) | 502 | stu300_clktable[i].rate < clkrate) |
| 503 | i++; | 503 | i++; |
| 504 | 504 | ||
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c index ab87e4f7cec9..defce2877eef 100644 --- a/drivers/ide/ide-cs.c +++ b/drivers/ide/ide-cs.c | |||
| @@ -409,6 +409,8 @@ static struct pcmcia_device_id ide_ids[] = { | |||
| 409 | PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), | 409 | PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), |
| 410 | PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), | 410 | PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), |
| 411 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), | 411 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), |
| 412 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17), | ||
| 413 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10), | ||
| 412 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), | 414 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), |
| 413 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), | 415 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), |
| 414 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), | 416 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), |
| @@ -429,6 +431,8 @@ static struct pcmcia_device_id ide_ids[] = { | |||
| 429 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), | 431 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), |
| 430 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), | 432 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), |
| 431 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), | 433 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), |
| 434 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d), | ||
| 435 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47), | ||
| 432 | PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), | 436 | PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), |
| 433 | PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), | 437 | PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), |
| 434 | PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), | 438 | PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e3e9a36ea3b7..58ea0ecae7c3 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -1650,8 +1650,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1650 | int previous, int *dd_idx, | 1650 | int previous, int *dd_idx, |
| 1651 | struct stripe_head *sh) | 1651 | struct stripe_head *sh) |
| 1652 | { | 1652 | { |
| 1653 | long stripe; | 1653 | sector_t stripe, stripe2; |
| 1654 | unsigned long chunk_number; | 1654 | sector_t chunk_number; |
| 1655 | unsigned int chunk_offset; | 1655 | unsigned int chunk_offset; |
| 1656 | int pd_idx, qd_idx; | 1656 | int pd_idx, qd_idx; |
| 1657 | int ddf_layout = 0; | 1657 | int ddf_layout = 0; |
| @@ -1671,18 +1671,13 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1671 | */ | 1671 | */ |
| 1672 | chunk_offset = sector_div(r_sector, sectors_per_chunk); | 1672 | chunk_offset = sector_div(r_sector, sectors_per_chunk); |
| 1673 | chunk_number = r_sector; | 1673 | chunk_number = r_sector; |
| 1674 | BUG_ON(r_sector != chunk_number); | ||
| 1675 | 1674 | ||
| 1676 | /* | 1675 | /* |
| 1677 | * Compute the stripe number | 1676 | * Compute the stripe number |
| 1678 | */ | 1677 | */ |
| 1679 | stripe = chunk_number / data_disks; | 1678 | stripe = chunk_number; |
| 1680 | 1679 | *dd_idx = sector_div(stripe, data_disks); | |
| 1681 | /* | 1680 | stripe2 = stripe; |
| 1682 | * Compute the data disk and parity disk indexes inside the stripe | ||
| 1683 | */ | ||
| 1684 | *dd_idx = chunk_number % data_disks; | ||
| 1685 | |||
| 1686 | /* | 1681 | /* |
| 1687 | * Select the parity disk based on the user selected algorithm. | 1682 | * Select the parity disk based on the user selected algorithm. |
| 1688 | */ | 1683 | */ |
| @@ -1694,21 +1689,21 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1694 | case 5: | 1689 | case 5: |
| 1695 | switch (algorithm) { | 1690 | switch (algorithm) { |
| 1696 | case ALGORITHM_LEFT_ASYMMETRIC: | 1691 | case ALGORITHM_LEFT_ASYMMETRIC: |
| 1697 | pd_idx = data_disks - stripe % raid_disks; | 1692 | pd_idx = data_disks - sector_div(stripe2, raid_disks); |
| 1698 | if (*dd_idx >= pd_idx) | 1693 | if (*dd_idx >= pd_idx) |
| 1699 | (*dd_idx)++; | 1694 | (*dd_idx)++; |
| 1700 | break; | 1695 | break; |
| 1701 | case ALGORITHM_RIGHT_ASYMMETRIC: | 1696 | case ALGORITHM_RIGHT_ASYMMETRIC: |
| 1702 | pd_idx = stripe % raid_disks; | 1697 | pd_idx = sector_div(stripe2, raid_disks); |
| 1703 | if (*dd_idx >= pd_idx) | 1698 | if (*dd_idx >= pd_idx) |
| 1704 | (*dd_idx)++; | 1699 | (*dd_idx)++; |
| 1705 | break; | 1700 | break; |
| 1706 | case ALGORITHM_LEFT_SYMMETRIC: | 1701 | case ALGORITHM_LEFT_SYMMETRIC: |
| 1707 | pd_idx = data_disks - stripe % raid_disks; | 1702 | pd_idx = data_disks - sector_div(stripe2, raid_disks); |
| 1708 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; | 1703 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
| 1709 | break; | 1704 | break; |
| 1710 | case ALGORITHM_RIGHT_SYMMETRIC: | 1705 | case ALGORITHM_RIGHT_SYMMETRIC: |
| 1711 | pd_idx = stripe % raid_disks; | 1706 | pd_idx = sector_div(stripe2, raid_disks); |
| 1712 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; | 1707 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
| 1713 | break; | 1708 | break; |
| 1714 | case ALGORITHM_PARITY_0: | 1709 | case ALGORITHM_PARITY_0: |
| @@ -1728,7 +1723,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1728 | 1723 | ||
| 1729 | switch (algorithm) { | 1724 | switch (algorithm) { |
| 1730 | case ALGORITHM_LEFT_ASYMMETRIC: | 1725 | case ALGORITHM_LEFT_ASYMMETRIC: |
| 1731 | pd_idx = raid_disks - 1 - (stripe % raid_disks); | 1726 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
| 1732 | qd_idx = pd_idx + 1; | 1727 | qd_idx = pd_idx + 1; |
| 1733 | if (pd_idx == raid_disks-1) { | 1728 | if (pd_idx == raid_disks-1) { |
| 1734 | (*dd_idx)++; /* Q D D D P */ | 1729 | (*dd_idx)++; /* Q D D D P */ |
| @@ -1737,7 +1732,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1737 | (*dd_idx) += 2; /* D D P Q D */ | 1732 | (*dd_idx) += 2; /* D D P Q D */ |
| 1738 | break; | 1733 | break; |
| 1739 | case ALGORITHM_RIGHT_ASYMMETRIC: | 1734 | case ALGORITHM_RIGHT_ASYMMETRIC: |
| 1740 | pd_idx = stripe % raid_disks; | 1735 | pd_idx = sector_div(stripe2, raid_disks); |
| 1741 | qd_idx = pd_idx + 1; | 1736 | qd_idx = pd_idx + 1; |
| 1742 | if (pd_idx == raid_disks-1) { | 1737 | if (pd_idx == raid_disks-1) { |
| 1743 | (*dd_idx)++; /* Q D D D P */ | 1738 | (*dd_idx)++; /* Q D D D P */ |
| @@ -1746,12 +1741,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1746 | (*dd_idx) += 2; /* D D P Q D */ | 1741 | (*dd_idx) += 2; /* D D P Q D */ |
| 1747 | break; | 1742 | break; |
| 1748 | case ALGORITHM_LEFT_SYMMETRIC: | 1743 | case ALGORITHM_LEFT_SYMMETRIC: |
| 1749 | pd_idx = raid_disks - 1 - (stripe % raid_disks); | 1744 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
| 1750 | qd_idx = (pd_idx + 1) % raid_disks; | 1745 | qd_idx = (pd_idx + 1) % raid_disks; |
| 1751 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; | 1746 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; |
| 1752 | break; | 1747 | break; |
| 1753 | case ALGORITHM_RIGHT_SYMMETRIC: | 1748 | case ALGORITHM_RIGHT_SYMMETRIC: |
| 1754 | pd_idx = stripe % raid_disks; | 1749 | pd_idx = sector_div(stripe2, raid_disks); |
| 1755 | qd_idx = (pd_idx + 1) % raid_disks; | 1750 | qd_idx = (pd_idx + 1) % raid_disks; |
| 1756 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; | 1751 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; |
| 1757 | break; | 1752 | break; |
| @@ -1770,7 +1765,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1770 | /* Exactly the same as RIGHT_ASYMMETRIC, but or | 1765 | /* Exactly the same as RIGHT_ASYMMETRIC, but or |
| 1771 | * of blocks for computing Q is different. | 1766 | * of blocks for computing Q is different. |
| 1772 | */ | 1767 | */ |
| 1773 | pd_idx = stripe % raid_disks; | 1768 | pd_idx = sector_div(stripe2, raid_disks); |
| 1774 | qd_idx = pd_idx + 1; | 1769 | qd_idx = pd_idx + 1; |
| 1775 | if (pd_idx == raid_disks-1) { | 1770 | if (pd_idx == raid_disks-1) { |
| 1776 | (*dd_idx)++; /* Q D D D P */ | 1771 | (*dd_idx)++; /* Q D D D P */ |
| @@ -1785,7 +1780,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1785 | * D D D P Q rather than | 1780 | * D D D P Q rather than |
| 1786 | * Q D D D P | 1781 | * Q D D D P |
| 1787 | */ | 1782 | */ |
| 1788 | pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); | 1783 | stripe2 += 1; |
| 1784 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); | ||
| 1789 | qd_idx = pd_idx + 1; | 1785 | qd_idx = pd_idx + 1; |
| 1790 | if (pd_idx == raid_disks-1) { | 1786 | if (pd_idx == raid_disks-1) { |
| 1791 | (*dd_idx)++; /* Q D D D P */ | 1787 | (*dd_idx)++; /* Q D D D P */ |
| @@ -1797,7 +1793,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1797 | 1793 | ||
| 1798 | case ALGORITHM_ROTATING_N_CONTINUE: | 1794 | case ALGORITHM_ROTATING_N_CONTINUE: |
| 1799 | /* Same as left_symmetric but Q is before P */ | 1795 | /* Same as left_symmetric but Q is before P */ |
| 1800 | pd_idx = raid_disks - 1 - (stripe % raid_disks); | 1796 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
| 1801 | qd_idx = (pd_idx + raid_disks - 1) % raid_disks; | 1797 | qd_idx = (pd_idx + raid_disks - 1) % raid_disks; |
| 1802 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; | 1798 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
| 1803 | ddf_layout = 1; | 1799 | ddf_layout = 1; |
| @@ -1805,27 +1801,27 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1805 | 1801 | ||
| 1806 | case ALGORITHM_LEFT_ASYMMETRIC_6: | 1802 | case ALGORITHM_LEFT_ASYMMETRIC_6: |
| 1807 | /* RAID5 left_asymmetric, with Q on last device */ | 1803 | /* RAID5 left_asymmetric, with Q on last device */ |
| 1808 | pd_idx = data_disks - stripe % (raid_disks-1); | 1804 | pd_idx = data_disks - sector_div(stripe2, raid_disks-1); |
| 1809 | if (*dd_idx >= pd_idx) | 1805 | if (*dd_idx >= pd_idx) |
| 1810 | (*dd_idx)++; | 1806 | (*dd_idx)++; |
| 1811 | qd_idx = raid_disks - 1; | 1807 | qd_idx = raid_disks - 1; |
| 1812 | break; | 1808 | break; |
| 1813 | 1809 | ||
| 1814 | case ALGORITHM_RIGHT_ASYMMETRIC_6: | 1810 | case ALGORITHM_RIGHT_ASYMMETRIC_6: |
| 1815 | pd_idx = stripe % (raid_disks-1); | 1811 | pd_idx = sector_div(stripe2, raid_disks-1); |
| 1816 | if (*dd_idx >= pd_idx) | 1812 | if (*dd_idx >= pd_idx) |
| 1817 | (*dd_idx)++; | 1813 | (*dd_idx)++; |
| 1818 | qd_idx = raid_disks - 1; | 1814 | qd_idx = raid_disks - 1; |
| 1819 | break; | 1815 | break; |
| 1820 | 1816 | ||
| 1821 | case ALGORITHM_LEFT_SYMMETRIC_6: | 1817 | case ALGORITHM_LEFT_SYMMETRIC_6: |
| 1822 | pd_idx = data_disks - stripe % (raid_disks-1); | 1818 | pd_idx = data_disks - sector_div(stripe2, raid_disks-1); |
| 1823 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); | 1819 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); |
| 1824 | qd_idx = raid_disks - 1; | 1820 | qd_idx = raid_disks - 1; |
| 1825 | break; | 1821 | break; |
| 1826 | 1822 | ||
| 1827 | case ALGORITHM_RIGHT_SYMMETRIC_6: | 1823 | case ALGORITHM_RIGHT_SYMMETRIC_6: |
| 1828 | pd_idx = stripe % (raid_disks-1); | 1824 | pd_idx = sector_div(stripe2, raid_disks-1); |
| 1829 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); | 1825 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); |
| 1830 | qd_idx = raid_disks - 1; | 1826 | qd_idx = raid_disks - 1; |
| 1831 | break; | 1827 | break; |
| @@ -1870,14 +1866,14 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) | |||
| 1870 | : conf->algorithm; | 1866 | : conf->algorithm; |
| 1871 | sector_t stripe; | 1867 | sector_t stripe; |
| 1872 | int chunk_offset; | 1868 | int chunk_offset; |
| 1873 | int chunk_number, dummy1, dd_idx = i; | 1869 | sector_t chunk_number; |
| 1870 | int dummy1, dd_idx = i; | ||
| 1874 | sector_t r_sector; | 1871 | sector_t r_sector; |
| 1875 | struct stripe_head sh2; | 1872 | struct stripe_head sh2; |
| 1876 | 1873 | ||
| 1877 | 1874 | ||
| 1878 | chunk_offset = sector_div(new_sector, sectors_per_chunk); | 1875 | chunk_offset = sector_div(new_sector, sectors_per_chunk); |
| 1879 | stripe = new_sector; | 1876 | stripe = new_sector; |
| 1880 | BUG_ON(new_sector != stripe); | ||
| 1881 | 1877 | ||
| 1882 | if (i == sh->pd_idx) | 1878 | if (i == sh->pd_idx) |
| 1883 | return 0; | 1879 | return 0; |
| @@ -1970,7 +1966,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) | |||
| 1970 | } | 1966 | } |
| 1971 | 1967 | ||
| 1972 | chunk_number = stripe * data_disks + i; | 1968 | chunk_number = stripe * data_disks + i; |
| 1973 | r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; | 1969 | r_sector = chunk_number * sectors_per_chunk + chunk_offset; |
| 1974 | 1970 | ||
| 1975 | check = raid5_compute_sector(conf, r_sector, | 1971 | check = raid5_compute_sector(conf, r_sector, |
| 1976 | previous, &dummy1, &sh2); | 1972 | previous, &dummy1, &sh2); |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 2191c8d896a0..0d0d625fece2 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
| @@ -311,6 +311,22 @@ config TI_DAC7512 | |||
| 311 | This driver can also be built as a module. If so, the module | 311 | This driver can also be built as a module. If so, the module |
| 312 | will be calles ti_dac7512. | 312 | will be calles ti_dac7512. |
| 313 | 313 | ||
| 314 | config VMWARE_BALLOON | ||
| 315 | tristate "VMware Balloon Driver" | ||
| 316 | depends on X86 | ||
| 317 | help | ||
| 318 | This is VMware physical memory management driver which acts | ||
| 319 | like a "balloon" that can be inflated to reclaim physical pages | ||
| 320 | by reserving them in the guest and invalidating them in the | ||
| 321 | monitor, freeing up the underlying machine pages so they can | ||
| 322 | be allocated to other guests. The balloon can also be deflated | ||
| 323 | to allow the guest to use more physical memory. | ||
| 324 | |||
| 325 | If unsure, say N. | ||
| 326 | |||
| 327 | To compile this driver as a module, choose M here: the | ||
| 328 | module will be called vmware_balloon. | ||
| 329 | |||
| 314 | source "drivers/misc/c2port/Kconfig" | 330 | source "drivers/misc/c2port/Kconfig" |
| 315 | source "drivers/misc/eeprom/Kconfig" | 331 | source "drivers/misc/eeprom/Kconfig" |
| 316 | source "drivers/misc/cb710/Kconfig" | 332 | source "drivers/misc/cb710/Kconfig" |
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 27c484355414..7b6f7eefdf8d 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile | |||
| @@ -29,3 +29,4 @@ obj-$(CONFIG_C2PORT) += c2port/ | |||
| 29 | obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/ | 29 | obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/ |
| 30 | obj-y += eeprom/ | 30 | obj-y += eeprom/ |
| 31 | obj-y += cb710/ | 31 | obj-y += cb710/ |
| 32 | obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o | ||
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmware_balloon.c new file mode 100644 index 000000000000..e7161c4e3798 --- /dev/null +++ b/drivers/misc/vmware_balloon.c | |||
| @@ -0,0 +1,832 @@ | |||
| 1 | /* | ||
| 2 | * VMware Balloon driver. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2000-2010, VMware, Inc. All Rights Reserved. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License as published by the | ||
| 8 | * Free Software Foundation; version 2 of the License and no later version. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, but | ||
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
| 13 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
| 14 | * details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License | ||
| 17 | * along with this program; if not, write to the Free Software | ||
| 18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 19 | * | ||
| 20 | * Maintained by: Dmitry Torokhov <dtor@vmware.com> | ||
| 21 | */ | ||
| 22 | |||
| 23 | /* | ||
| 24 | * This is VMware physical memory management driver for Linux. The driver | ||
| 25 | * acts like a "balloon" that can be inflated to reclaim physical pages by | ||
| 26 | * reserving them in the guest and invalidating them in the monitor, | ||
| 27 | * freeing up the underlying machine pages so they can be allocated to | ||
| 28 | * other guests. The balloon can also be deflated to allow the guest to | ||
| 29 | * use more physical memory. Higher level policies can control the sizes | ||
| 30 | * of balloons in VMs in order to manage physical memory resources. | ||
| 31 | */ | ||
| 32 | |||
| 33 | //#define DEBUG | ||
| 34 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 35 | |||
| 36 | #include <linux/types.h> | ||
| 37 | #include <linux/kernel.h> | ||
| 38 | #include <linux/mm.h> | ||
| 39 | #include <linux/sched.h> | ||
| 40 | #include <linux/module.h> | ||
| 41 | #include <linux/workqueue.h> | ||
| 42 | #include <linux/debugfs.h> | ||
| 43 | #include <linux/seq_file.h> | ||
| 44 | #include <asm/vmware.h> | ||
| 45 | |||
| 46 | MODULE_AUTHOR("VMware, Inc."); | ||
| 47 | MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); | ||
| 48 | MODULE_VERSION("1.2.1.0-K"); | ||
| 49 | MODULE_ALIAS("dmi:*:svnVMware*:*"); | ||
| 50 | MODULE_ALIAS("vmware_vmmemctl"); | ||
| 51 | MODULE_LICENSE("GPL"); | ||
| 52 | |||
| 53 | /* | ||
| 54 | * Various constants controlling rate of inflaint/deflating balloon, | ||
| 55 | * measured in pages. | ||
| 56 | */ | ||
| 57 | |||
| 58 | /* | ||
| 59 | * Rate of allocating memory when there is no memory pressure | ||
| 60 | * (driver performs non-sleeping allocations). | ||
| 61 | */ | ||
| 62 | #define VMW_BALLOON_NOSLEEP_ALLOC_MAX 16384U | ||
| 63 | |||
| 64 | /* | ||
| 65 | * Rates of memory allocaton when guest experiences memory pressure | ||
| 66 | * (driver performs sleeping allocations). | ||
| 67 | */ | ||
| 68 | #define VMW_BALLOON_RATE_ALLOC_MIN 512U | ||
| 69 | #define VMW_BALLOON_RATE_ALLOC_MAX 2048U | ||
| 70 | #define VMW_BALLOON_RATE_ALLOC_INC 16U | ||
| 71 | |||
| 72 | /* | ||
| 73 | * Rates for releasing pages while deflating balloon. | ||
| 74 | */ | ||
| 75 | #define VMW_BALLOON_RATE_FREE_MIN 512U | ||
| 76 | #define VMW_BALLOON_RATE_FREE_MAX 16384U | ||
| 77 | #define VMW_BALLOON_RATE_FREE_INC 16U | ||
| 78 | |||
| 79 | /* | ||
| 80 | * When guest is under memory pressure, use a reduced page allocation | ||
| 81 | * rate for next several cycles. | ||
| 82 | */ | ||
| 83 | #define VMW_BALLOON_SLOW_CYCLES 4 | ||
| 84 | |||
| 85 | /* | ||
| 86 | * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't | ||
| 87 | * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use | ||
| 88 | * __GFP_NOWARN, to suppress page allocation failure warnings. | ||
| 89 | */ | ||
| 90 | #define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN) | ||
| 91 | |||
| 92 | /* | ||
| 93 | * Use GFP_HIGHUSER when executing in a separate kernel thread | ||
| 94 | * context and allocation can sleep. This is less stressful to | ||
| 95 | * the guest memory system, since it allows the thread to block | ||
| 96 | * while memory is reclaimed, and won't take pages from emergency | ||
| 97 | * low-memory pools. | ||
| 98 | */ | ||
| 99 | #define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER) | ||
| 100 | |||
| 101 | /* Maximum number of page allocations without yielding processor */ | ||
| 102 | #define VMW_BALLOON_YIELD_THRESHOLD 1024 | ||
| 103 | |||
| 104 | |||
| 105 | /* | ||
| 106 | * Hypervisor communication port definitions. | ||
| 107 | */ | ||
| 108 | #define VMW_BALLOON_HV_PORT 0x5670 | ||
| 109 | #define VMW_BALLOON_HV_MAGIC 0x456c6d6f | ||
| 110 | #define VMW_BALLOON_PROTOCOL_VERSION 2 | ||
| 111 | #define VMW_BALLOON_GUEST_ID 1 /* Linux */ | ||
| 112 | |||
| 113 | #define VMW_BALLOON_CMD_START 0 | ||
| 114 | #define VMW_BALLOON_CMD_GET_TARGET 1 | ||
| 115 | #define VMW_BALLOON_CMD_LOCK 2 | ||
| 116 | #define VMW_BALLOON_CMD_UNLOCK 3 | ||
| 117 | #define VMW_BALLOON_CMD_GUEST_ID 4 | ||
| 118 | |||
| 119 | /* error codes */ | ||
| 120 | #define VMW_BALLOON_SUCCESS 0 | ||
| 121 | #define VMW_BALLOON_FAILURE -1 | ||
| 122 | #define VMW_BALLOON_ERROR_CMD_INVALID 1 | ||
| 123 | #define VMW_BALLOON_ERROR_PPN_INVALID 2 | ||
| 124 | #define VMW_BALLOON_ERROR_PPN_LOCKED 3 | ||
| 125 | #define VMW_BALLOON_ERROR_PPN_UNLOCKED 4 | ||
| 126 | #define VMW_BALLOON_ERROR_PPN_PINNED 5 | ||
| 127 | #define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6 | ||
| 128 | #define VMW_BALLOON_ERROR_RESET 7 | ||
| 129 | #define VMW_BALLOON_ERROR_BUSY 8 | ||
| 130 | |||
| 131 | #define VMWARE_BALLOON_CMD(cmd, data, result) \ | ||
| 132 | ({ \ | ||
| 133 | unsigned long __stat, __dummy1, __dummy2; \ | ||
| 134 | __asm__ __volatile__ ("inl (%%dx)" : \ | ||
| 135 | "=a"(__stat), \ | ||
| 136 | "=c"(__dummy1), \ | ||
| 137 | "=d"(__dummy2), \ | ||
| 138 | "=b"(result) : \ | ||
| 139 | "0"(VMW_BALLOON_HV_MAGIC), \ | ||
| 140 | "1"(VMW_BALLOON_CMD_##cmd), \ | ||
| 141 | "2"(VMW_BALLOON_HV_PORT), \ | ||
| 142 | "3"(data) : \ | ||
| 143 | "memory"); \ | ||
| 144 | result &= -1UL; \ | ||
| 145 | __stat & -1UL; \ | ||
| 146 | }) | ||
| 147 | |||
| 148 | #ifdef CONFIG_DEBUG_FS | ||
| 149 | struct vmballoon_stats { | ||
| 150 | unsigned int timer; | ||
| 151 | |||
| 152 | /* allocation statustics */ | ||
| 153 | unsigned int alloc; | ||
| 154 | unsigned int alloc_fail; | ||
| 155 | unsigned int sleep_alloc; | ||
| 156 | unsigned int sleep_alloc_fail; | ||
| 157 | unsigned int refused_alloc; | ||
| 158 | unsigned int refused_free; | ||
| 159 | unsigned int free; | ||
| 160 | |||
| 161 | /* monitor operations */ | ||
| 162 | unsigned int lock; | ||
| 163 | unsigned int lock_fail; | ||
| 164 | unsigned int unlock; | ||
| 165 | unsigned int unlock_fail; | ||
| 166 | unsigned int target; | ||
| 167 | unsigned int target_fail; | ||
| 168 | unsigned int start; | ||
| 169 | unsigned int start_fail; | ||
| 170 | unsigned int guest_type; | ||
| 171 | unsigned int guest_type_fail; | ||
| 172 | }; | ||
| 173 | |||
| 174 | #define STATS_INC(stat) (stat)++ | ||
| 175 | #else | ||
| 176 | #define STATS_INC(stat) | ||
| 177 | #endif | ||
| 178 | |||
| 179 | struct vmballoon { | ||
| 180 | |||
| 181 | /* list of reserved physical pages */ | ||
| 182 | struct list_head pages; | ||
| 183 | |||
| 184 | /* transient list of non-balloonable pages */ | ||
| 185 | struct list_head refused_pages; | ||
| 186 | |||
| 187 | /* balloon size in pages */ | ||
| 188 | unsigned int size; | ||
| 189 | unsigned int target; | ||
| 190 | |||
| 191 | /* reset flag */ | ||
| 192 | bool reset_required; | ||
| 193 | |||
| 194 | /* adjustment rates (pages per second) */ | ||
| 195 | unsigned int rate_alloc; | ||
| 196 | unsigned int rate_free; | ||
| 197 | |||
| 198 | /* slowdown page allocations for next few cycles */ | ||
| 199 | unsigned int slow_allocation_cycles; | ||
| 200 | |||
| 201 | #ifdef CONFIG_DEBUG_FS | ||
| 202 | /* statistics */ | ||
| 203 | struct vmballoon_stats stats; | ||
| 204 | |||
| 205 | /* debugfs file exporting statistics */ | ||
| 206 | struct dentry *dbg_entry; | ||
| 207 | #endif | ||
| 208 | |||
| 209 | struct sysinfo sysinfo; | ||
| 210 | |||
| 211 | struct delayed_work dwork; | ||
| 212 | }; | ||
| 213 | |||
| 214 | static struct vmballoon balloon; | ||
| 215 | static struct workqueue_struct *vmballoon_wq; | ||
| 216 | |||
| 217 | /* | ||
| 218 | * Send "start" command to the host, communicating supported version | ||
| 219 | * of the protocol. | ||
| 220 | */ | ||
| 221 | static bool vmballoon_send_start(struct vmballoon *b) | ||
| 222 | { | ||
| 223 | unsigned long status, dummy; | ||
| 224 | |||
| 225 | STATS_INC(b->stats.start); | ||
| 226 | |||
| 227 | status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy); | ||
| 228 | if (status == VMW_BALLOON_SUCCESS) | ||
| 229 | return true; | ||
| 230 | |||
| 231 | pr_debug("%s - failed, hv returns %ld\n", __func__, status); | ||
| 232 | STATS_INC(b->stats.start_fail); | ||
| 233 | return false; | ||
| 234 | } | ||
| 235 | |||
| 236 | static bool vmballoon_check_status(struct vmballoon *b, unsigned long status) | ||
| 237 | { | ||
| 238 | switch (status) { | ||
| 239 | case VMW_BALLOON_SUCCESS: | ||
| 240 | return true; | ||
| 241 | |||
| 242 | case VMW_BALLOON_ERROR_RESET: | ||
| 243 | b->reset_required = true; | ||
| 244 | /* fall through */ | ||
| 245 | |||
| 246 | default: | ||
| 247 | return false; | ||
| 248 | } | ||
| 249 | } | ||
| 250 | |||
| 251 | /* | ||
| 252 | * Communicate guest type to the host so that it can adjust ballooning | ||
| 253 | * algorithm to the one most appropriate for the guest. This command | ||
| 254 | * is normally issued after sending "start" command and is part of | ||
| 255 | * standard reset sequence. | ||
| 256 | */ | ||
| 257 | static bool vmballoon_send_guest_id(struct vmballoon *b) | ||
| 258 | { | ||
| 259 | unsigned long status, dummy; | ||
| 260 | |||
| 261 | status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy); | ||
| 262 | |||
| 263 | STATS_INC(b->stats.guest_type); | ||
| 264 | |||
| 265 | if (vmballoon_check_status(b, status)) | ||
| 266 | return true; | ||
| 267 | |||
| 268 | pr_debug("%s - failed, hv returns %ld\n", __func__, status); | ||
| 269 | STATS_INC(b->stats.guest_type_fail); | ||
| 270 | return false; | ||
| 271 | } | ||
| 272 | |||
| 273 | /* | ||
| 274 | * Retrieve desired balloon size from the host. | ||
| 275 | */ | ||
| 276 | static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target) | ||
| 277 | { | ||
| 278 | unsigned long status; | ||
| 279 | unsigned long target; | ||
| 280 | unsigned long limit; | ||
| 281 | u32 limit32; | ||
| 282 | |||
| 283 | /* | ||
| 284 | * si_meminfo() is cheap. Moreover, we want to provide dynamic | ||
| 285 | * max balloon size later. So let us call si_meminfo() every | ||
| 286 | * iteration. | ||
| 287 | */ | ||
| 288 | si_meminfo(&b->sysinfo); | ||
| 289 | limit = b->sysinfo.totalram; | ||
| 290 | |||
| 291 | /* Ensure limit fits in 32-bits */ | ||
| 292 | limit32 = (u32)limit; | ||
| 293 | if (limit != limit32) | ||
| 294 | return false; | ||
| 295 | |||
| 296 | /* update stats */ | ||
| 297 | STATS_INC(b->stats.target); | ||
| 298 | |||
| 299 | status = VMWARE_BALLOON_CMD(GET_TARGET, limit, target); | ||
| 300 | if (vmballoon_check_status(b, status)) { | ||
| 301 | *new_target = target; | ||
| 302 | return true; | ||
| 303 | } | ||
| 304 | |||
| 305 | pr_debug("%s - failed, hv returns %ld\n", __func__, status); | ||
| 306 | STATS_INC(b->stats.target_fail); | ||
| 307 | return false; | ||
| 308 | } | ||
| 309 | |||
| 310 | /* | ||
| 311 | * Notify the host about allocated page so that host can use it without | ||
| 312 | * fear that guest will need it. Host may reject some pages, we need to | ||
| 313 | * check the return value and maybe submit a different page. | ||
| 314 | */ | ||
| 315 | static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn) | ||
| 316 | { | ||
| 317 | unsigned long status, dummy; | ||
| 318 | u32 pfn32; | ||
| 319 | |||
| 320 | pfn32 = (u32)pfn; | ||
| 321 | if (pfn32 != pfn) | ||
| 322 | return false; | ||
| 323 | |||
| 324 | STATS_INC(b->stats.lock); | ||
| 325 | |||
| 326 | status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy); | ||
| 327 | if (vmballoon_check_status(b, status)) | ||
| 328 | return true; | ||
| 329 | |||
| 330 | pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); | ||
| 331 | STATS_INC(b->stats.lock_fail); | ||
| 332 | return false; | ||
| 333 | } | ||
| 334 | |||
| 335 | /* | ||
| 336 | * Notify the host that guest intends to release given page back into | ||
| 337 | * the pool of available (to the guest) pages. | ||
| 338 | */ | ||
| 339 | static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn) | ||
| 340 | { | ||
| 341 | unsigned long status, dummy; | ||
| 342 | u32 pfn32; | ||
| 343 | |||
| 344 | pfn32 = (u32)pfn; | ||
| 345 | if (pfn32 != pfn) | ||
| 346 | return false; | ||
| 347 | |||
| 348 | STATS_INC(b->stats.unlock); | ||
| 349 | |||
| 350 | status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy); | ||
| 351 | if (vmballoon_check_status(b, status)) | ||
| 352 | return true; | ||
| 353 | |||
| 354 | pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); | ||
| 355 | STATS_INC(b->stats.unlock_fail); | ||
| 356 | return false; | ||
| 357 | } | ||
| 358 | |||
| 359 | /* | ||
| 360 | * Quickly release all pages allocated for the balloon. This function is | ||
| 361 | * called when host decides to "reset" balloon for one reason or another. | ||
| 362 | * Unlike normal "deflate" we do not (shall not) notify host of the pages | ||
| 363 | * being released. | ||
| 364 | */ | ||
| 365 | static void vmballoon_pop(struct vmballoon *b) | ||
| 366 | { | ||
| 367 | struct page *page, *next; | ||
| 368 | unsigned int count = 0; | ||
| 369 | |||
| 370 | list_for_each_entry_safe(page, next, &b->pages, lru) { | ||
| 371 | list_del(&page->lru); | ||
| 372 | __free_page(page); | ||
| 373 | STATS_INC(b->stats.free); | ||
| 374 | b->size--; | ||
| 375 | |||
| 376 | if (++count >= b->rate_free) { | ||
| 377 | count = 0; | ||
| 378 | cond_resched(); | ||
| 379 | } | ||
| 380 | } | ||
| 381 | } | ||
| 382 | |||
| 383 | /* | ||
| 384 | * Perform standard reset sequence by popping the balloon (in case it | ||
| 385 | * is not empty) and then restarting protocol. This operation normally | ||
| 386 | * happens when host responds with VMW_BALLOON_ERROR_RESET to a command. | ||
| 387 | */ | ||
| 388 | static void vmballoon_reset(struct vmballoon *b) | ||
| 389 | { | ||
| 390 | /* free all pages, skipping monitor unlock */ | ||
| 391 | vmballoon_pop(b); | ||
| 392 | |||
| 393 | if (vmballoon_send_start(b)) { | ||
| 394 | b->reset_required = false; | ||
| 395 | if (!vmballoon_send_guest_id(b)) | ||
| 396 | pr_err("failed to send guest ID to the host\n"); | ||
| 397 | } | ||
| 398 | } | ||
| 399 | |||
| 400 | /* | ||
| 401 | * Allocate (or reserve) a page for the balloon and notify the host. If host | ||
| 402 | * refuses the page put it on "refuse" list and allocate another one until host | ||
| 403 | * is satisfied. "Refused" pages are released at the end of inflation cycle | ||
| 404 | * (when we allocate b->rate_alloc pages). | ||
| 405 | */ | ||
| 406 | static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep) | ||
| 407 | { | ||
| 408 | struct page *page; | ||
| 409 | gfp_t flags; | ||
| 410 | bool locked = false; | ||
| 411 | |||
| 412 | do { | ||
| 413 | if (!can_sleep) | ||
| 414 | STATS_INC(b->stats.alloc); | ||
| 415 | else | ||
| 416 | STATS_INC(b->stats.sleep_alloc); | ||
| 417 | |||
| 418 | flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP; | ||
| 419 | page = alloc_page(flags); | ||
| 420 | if (!page) { | ||
| 421 | if (!can_sleep) | ||
| 422 | STATS_INC(b->stats.alloc_fail); | ||
| 423 | else | ||
| 424 | STATS_INC(b->stats.sleep_alloc_fail); | ||
| 425 | return -ENOMEM; | ||
| 426 | } | ||
| 427 | |||
| 428 | /* inform monitor */ | ||
| 429 | locked = vmballoon_send_lock_page(b, page_to_pfn(page)); | ||
| 430 | if (!locked) { | ||
| 431 | if (b->reset_required) { | ||
| 432 | __free_page(page); | ||
| 433 | return -EIO; | ||
| 434 | } | ||
| 435 | |||
| 436 | /* place on list of non-balloonable pages, retry allocation */ | ||
| 437 | list_add(&page->lru, &b->refused_pages); | ||
| 438 | STATS_INC(b->stats.refused_alloc); | ||
| 439 | } | ||
| 440 | } while (!locked); | ||
| 441 | |||
| 442 | /* track allocated page */ | ||
| 443 | list_add(&page->lru, &b->pages); | ||
| 444 | |||
| 445 | /* update balloon size */ | ||
| 446 | b->size++; | ||
| 447 | |||
| 448 | return 0; | ||
| 449 | } | ||
| 450 | |||
| 451 | /* | ||
| 452 | * Release the page allocated for the balloon. Note that we first notify | ||
| 453 | * the host so it can make sure the page will be available for the guest | ||
| 454 | * to use, if needed. | ||
| 455 | */ | ||
| 456 | static int vmballoon_release_page(struct vmballoon *b, struct page *page) | ||
| 457 | { | ||
| 458 | if (!vmballoon_send_unlock_page(b, page_to_pfn(page))) | ||
| 459 | return -EIO; | ||
| 460 | |||
| 461 | list_del(&page->lru); | ||
| 462 | |||
| 463 | /* deallocate page */ | ||
| 464 | __free_page(page); | ||
| 465 | STATS_INC(b->stats.free); | ||
| 466 | |||
| 467 | /* update balloon size */ | ||
| 468 | b->size--; | ||
| 469 | |||
| 470 | return 0; | ||
| 471 | } | ||
| 472 | |||
| 473 | /* | ||
| 474 | * Release pages that were allocated while attempting to inflate the | ||
| 475 | * balloon but were refused by the host for one reason or another. | ||
| 476 | */ | ||
| 477 | static void vmballoon_release_refused_pages(struct vmballoon *b) | ||
| 478 | { | ||
| 479 | struct page *page, *next; | ||
| 480 | |||
| 481 | list_for_each_entry_safe(page, next, &b->refused_pages, lru) { | ||
| 482 | list_del(&page->lru); | ||
| 483 | __free_page(page); | ||
| 484 | STATS_INC(b->stats.refused_free); | ||
| 485 | } | ||
| 486 | } | ||
| 487 | |||
| 488 | /* | ||
| 489 | * Inflate the balloon towards its target size. Note that we try to limit | ||
| 490 | * the rate of allocation to make sure we are not choking the rest of the | ||
| 491 | * system. | ||
| 492 | */ | ||
| 493 | static void vmballoon_inflate(struct vmballoon *b) | ||
| 494 | { | ||
| 495 | unsigned int goal; | ||
| 496 | unsigned int rate; | ||
| 497 | unsigned int i; | ||
| 498 | unsigned int allocations = 0; | ||
| 499 | int error = 0; | ||
| 500 | bool alloc_can_sleep = false; | ||
| 501 | |||
| 502 | pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); | ||
| 503 | |||
| 504 | /* | ||
| 505 | * First try NOSLEEP page allocations to inflate balloon. | ||
| 506 | * | ||
| 507 | * If we do not throttle nosleep allocations, we can drain all | ||
| 508 | * free pages in the guest quickly (if the balloon target is high). | ||
| 509 | * As a side-effect, draining free pages helps to inform (force) | ||
| 510 | * the guest to start swapping if balloon target is not met yet, | ||
| 511 | * which is a desired behavior. However, balloon driver can consume | ||
| 512 | * all available CPU cycles if too many pages are allocated in a | ||
| 513 | * second. Therefore, we throttle nosleep allocations even when | ||
| 514 | * the guest is not under memory pressure. OTOH, if we have already | ||
| 515 | * predicted that the guest is under memory pressure, then we | ||
| 516 | * slowdown page allocations considerably. | ||
| 517 | */ | ||
| 518 | |||
| 519 | goal = b->target - b->size; | ||
| 520 | /* | ||
| 521 | * Start with no sleep allocation rate which may be higher | ||
| 522 | * than sleeping allocation rate. | ||
| 523 | */ | ||
| 524 | rate = b->slow_allocation_cycles ? | ||
| 525 | b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX; | ||
| 526 | |||
| 527 | pr_debug("%s - goal: %d, no-sleep rate: %d, sleep rate: %d\n", | ||
| 528 | __func__, goal, rate, b->rate_alloc); | ||
| 529 | |||
| 530 | for (i = 0; i < goal; i++) { | ||
| 531 | |||
| 532 | error = vmballoon_reserve_page(b, alloc_can_sleep); | ||
| 533 | if (error) { | ||
| 534 | if (error != -ENOMEM) { | ||
| 535 | /* | ||
| 536 | * Not a page allocation failure, stop this | ||
| 537 | * cycle. Maybe we'll get new target from | ||
| 538 | * the host soon. | ||
| 539 | */ | ||
| 540 | break; | ||
| 541 | } | ||
| 542 | |||
| 543 | if (alloc_can_sleep) { | ||
| 544 | /* | ||
| 545 | * CANSLEEP page allocation failed, so guest | ||
| 546 | * is under severe memory pressure. Quickly | ||
| 547 | * decrease allocation rate. | ||
| 548 | */ | ||
| 549 | b->rate_alloc = max(b->rate_alloc / 2, | ||
| 550 | VMW_BALLOON_RATE_ALLOC_MIN); | ||
| 551 | break; | ||
| 552 | } | ||
| 553 | |||
| 554 | /* | ||
| 555 | * NOSLEEP page allocation failed, so the guest is | ||
| 556 | * under memory pressure. Let us slow down page | ||
| 557 | * allocations for next few cycles so that the guest | ||
| 558 | * gets out of memory pressure. Also, if we already | ||
| 559 | * allocated b->rate_alloc pages, let's pause, | ||
| 560 | * otherwise switch to sleeping allocations. | ||
| 561 | */ | ||
| 562 | b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES; | ||
| 563 | |||
| 564 | if (i >= b->rate_alloc) | ||
| 565 | break; | ||
| 566 | |||
| 567 | alloc_can_sleep = true; | ||
| 568 | /* Lower rate for sleeping allocations. */ | ||
| 569 | rate = b->rate_alloc; | ||
| 570 | } | ||
| 571 | |||
| 572 | if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) { | ||
| 573 | cond_resched(); | ||
| 574 | allocations = 0; | ||
| 575 | } | ||
| 576 | |||
| 577 | if (i >= rate) { | ||
| 578 | /* We allocated enough pages, let's take a break. */ | ||
| 579 | break; | ||
| 580 | } | ||
| 581 | } | ||
| 582 | |||
| 583 | /* | ||
| 584 | * We reached our goal without failures so try increasing | ||
| 585 | * allocation rate. | ||
| 586 | */ | ||
| 587 | if (error == 0 && i >= b->rate_alloc) { | ||
| 588 | unsigned int mult = i / b->rate_alloc; | ||
| 589 | |||
| 590 | b->rate_alloc = | ||
| 591 | min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC, | ||
| 592 | VMW_BALLOON_RATE_ALLOC_MAX); | ||
| 593 | } | ||
| 594 | |||
| 595 | vmballoon_release_refused_pages(b); | ||
| 596 | } | ||
| 597 | |||
| 598 | /* | ||
| 599 | * Decrease the size of the balloon allowing guest to use more memory. | ||
| 600 | */ | ||
| 601 | static void vmballoon_deflate(struct vmballoon *b) | ||
| 602 | { | ||
| 603 | struct page *page, *next; | ||
| 604 | unsigned int i = 0; | ||
| 605 | unsigned int goal; | ||
| 606 | int error; | ||
| 607 | |||
| 608 | pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); | ||
| 609 | |||
| 610 | /* limit deallocation rate */ | ||
| 611 | goal = min(b->size - b->target, b->rate_free); | ||
| 612 | |||
| 613 | pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free); | ||
| 614 | |||
| 615 | /* free pages to reach target */ | ||
| 616 | list_for_each_entry_safe(page, next, &b->pages, lru) { | ||
| 617 | error = vmballoon_release_page(b, page); | ||
| 618 | if (error) { | ||
| 619 | /* quickly decrease rate in case of error */ | ||
| 620 | b->rate_free = max(b->rate_free / 2, | ||
| 621 | VMW_BALLOON_RATE_FREE_MIN); | ||
| 622 | return; | ||
| 623 | } | ||
| 624 | |||
| 625 | if (++i >= goal) | ||
| 626 | break; | ||
| 627 | } | ||
| 628 | |||
| 629 | /* slowly increase rate if there were no errors */ | ||
| 630 | b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC, | ||
| 631 | VMW_BALLOON_RATE_FREE_MAX); | ||
| 632 | } | ||
| 633 | |||
| 634 | /* | ||
| 635 | * Balloon work function: reset protocol, if needed, get the new size and | ||
| 636 | * adjust balloon as needed. Repeat in 1 sec. | ||
| 637 | */ | ||
| 638 | static void vmballoon_work(struct work_struct *work) | ||
| 639 | { | ||
| 640 | struct delayed_work *dwork = to_delayed_work(work); | ||
| 641 | struct vmballoon *b = container_of(dwork, struct vmballoon, dwork); | ||
| 642 | unsigned int target; | ||
| 643 | |||
| 644 | STATS_INC(b->stats.timer); | ||
| 645 | |||
| 646 | if (b->reset_required) | ||
| 647 | vmballoon_reset(b); | ||
| 648 | |||
| 649 | if (b->slow_allocation_cycles > 0) | ||
| 650 | b->slow_allocation_cycles--; | ||
| 651 | |||
| 652 | if (vmballoon_send_get_target(b, &target)) { | ||
| 653 | /* update target, adjust size */ | ||
| 654 | b->target = target; | ||
| 655 | |||
| 656 | if (b->size < target) | ||
| 657 | vmballoon_inflate(b); | ||
| 658 | else if (b->size > target) | ||
| 659 | vmballoon_deflate(b); | ||
| 660 | } | ||
| 661 | |||
| 662 | queue_delayed_work(vmballoon_wq, dwork, round_jiffies_relative(HZ)); | ||
| 663 | } | ||
| 664 | |||
| 665 | /* | ||
| 666 | * DEBUGFS Interface | ||
| 667 | */ | ||
| 668 | #ifdef CONFIG_DEBUG_FS | ||
| 669 | |||
| 670 | static int vmballoon_debug_show(struct seq_file *f, void *offset) | ||
| 671 | { | ||
| 672 | struct vmballoon *b = f->private; | ||
| 673 | struct vmballoon_stats *stats = &b->stats; | ||
| 674 | |||
| 675 | /* format size info */ | ||
| 676 | seq_printf(f, | ||
| 677 | "target: %8d pages\n" | ||
| 678 | "current: %8d pages\n", | ||
| 679 | b->target, b->size); | ||
| 680 | |||
| 681 | /* format rate info */ | ||
| 682 | seq_printf(f, | ||
| 683 | "rateNoSleepAlloc: %8d pages/sec\n" | ||
| 684 | "rateSleepAlloc: %8d pages/sec\n" | ||
| 685 | "rateFree: %8d pages/sec\n", | ||
| 686 | VMW_BALLOON_NOSLEEP_ALLOC_MAX, | ||
| 687 | b->rate_alloc, b->rate_free); | ||
| 688 | |||
| 689 | seq_printf(f, | ||
| 690 | "\n" | ||
| 691 | "timer: %8u\n" | ||
| 692 | "start: %8u (%4u failed)\n" | ||
| 693 | "guestType: %8u (%4u failed)\n" | ||
| 694 | "lock: %8u (%4u failed)\n" | ||
| 695 | "unlock: %8u (%4u failed)\n" | ||
| 696 | "target: %8u (%4u failed)\n" | ||
| 697 | "primNoSleepAlloc: %8u (%4u failed)\n" | ||
| 698 | "primCanSleepAlloc: %8u (%4u failed)\n" | ||
| 699 | "primFree: %8u\n" | ||
| 700 | "errAlloc: %8u\n" | ||
| 701 | "errFree: %8u\n", | ||
| 702 | stats->timer, | ||
| 703 | stats->start, stats->start_fail, | ||
| 704 | stats->guest_type, stats->guest_type_fail, | ||
| 705 | stats->lock, stats->lock_fail, | ||
| 706 | stats->unlock, stats->unlock_fail, | ||
| 707 | stats->target, stats->target_fail, | ||
| 708 | stats->alloc, stats->alloc_fail, | ||
| 709 | stats->sleep_alloc, stats->sleep_alloc_fail, | ||
| 710 | stats->free, | ||
| 711 | stats->refused_alloc, stats->refused_free); | ||
| 712 | |||
| 713 | return 0; | ||
| 714 | } | ||
| 715 | |||
| 716 | static int vmballoon_debug_open(struct inode *inode, struct file *file) | ||
| 717 | { | ||
| 718 | return single_open(file, vmballoon_debug_show, inode->i_private); | ||
| 719 | } | ||
| 720 | |||
| 721 | static const struct file_operations vmballoon_debug_fops = { | ||
| 722 | .owner = THIS_MODULE, | ||
| 723 | .open = vmballoon_debug_open, | ||
| 724 | .read = seq_read, | ||
| 725 | .llseek = seq_lseek, | ||
| 726 | .release = single_release, | ||
| 727 | }; | ||
| 728 | |||
| 729 | static int __init vmballoon_debugfs_init(struct vmballoon *b) | ||
| 730 | { | ||
| 731 | int error; | ||
| 732 | |||
| 733 | b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b, | ||
| 734 | &vmballoon_debug_fops); | ||
| 735 | if (IS_ERR(b->dbg_entry)) { | ||
| 736 | error = PTR_ERR(b->dbg_entry); | ||
| 737 | pr_err("failed to create debugfs entry, error: %d\n", error); | ||
| 738 | return error; | ||
| 739 | } | ||
| 740 | |||
| 741 | return 0; | ||
| 742 | } | ||
| 743 | |||
| 744 | static void __exit vmballoon_debugfs_exit(struct vmballoon *b) | ||
| 745 | { | ||
| 746 | debugfs_remove(b->dbg_entry); | ||
| 747 | } | ||
| 748 | |||
| 749 | #else | ||
| 750 | |||
| 751 | static inline int vmballoon_debugfs_init(struct vmballoon *b) | ||
| 752 | { | ||
| 753 | return 0; | ||
| 754 | } | ||
| 755 | |||
| 756 | static inline void vmballoon_debugfs_exit(struct vmballoon *b) | ||
| 757 | { | ||
| 758 | } | ||
| 759 | |||
| 760 | #endif /* CONFIG_DEBUG_FS */ | ||
| 761 | |||
| 762 | static int __init vmballoon_init(void) | ||
| 763 | { | ||
| 764 | int error; | ||
| 765 | |||
| 766 | /* | ||
| 767 | * Check if we are running on VMware's hypervisor and bail out | ||
| 768 | * if we are not. | ||
| 769 | */ | ||
| 770 | if (!vmware_platform()) | ||
| 771 | return -ENODEV; | ||
| 772 | |||
| 773 | vmballoon_wq = create_freezeable_workqueue("vmmemctl"); | ||
| 774 | if (!vmballoon_wq) { | ||
| 775 | pr_err("failed to create workqueue\n"); | ||
| 776 | return -ENOMEM; | ||
| 777 | } | ||
| 778 | |||
| 779 | INIT_LIST_HEAD(&balloon.pages); | ||
| 780 | INIT_LIST_HEAD(&balloon.refused_pages); | ||
| 781 | |||
| 782 | /* initialize rates */ | ||
| 783 | balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX; | ||
| 784 | balloon.rate_free = VMW_BALLOON_RATE_FREE_MAX; | ||
| 785 | |||
| 786 | INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work); | ||
| 787 | |||
| 788 | /* | ||
| 789 | * Start balloon. | ||
| 790 | */ | ||
| 791 | if (!vmballoon_send_start(&balloon)) { | ||
| 792 | pr_err("failed to send start command to the host\n"); | ||
| 793 | error = -EIO; | ||
| 794 | goto fail; | ||
| 795 | } | ||
| 796 | |||
| 797 | if (!vmballoon_send_guest_id(&balloon)) { | ||
| 798 | pr_err("failed to send guest ID to the host\n"); | ||
| 799 | error = -EIO; | ||
| 800 | goto fail; | ||
| 801 | } | ||
| 802 | |||
| 803 | error = vmballoon_debugfs_init(&balloon); | ||
| 804 | if (error) | ||
| 805 | goto fail; | ||
| 806 | |||
| 807 | queue_delayed_work(vmballoon_wq, &balloon.dwork, 0); | ||
| 808 | |||
| 809 | return 0; | ||
| 810 | |||
| 811 | fail: | ||
| 812 | destroy_workqueue(vmballoon_wq); | ||
| 813 | return error; | ||
| 814 | } | ||
| 815 | module_init(vmballoon_init); | ||
| 816 | |||
| 817 | static void __exit vmballoon_exit(void) | ||
| 818 | { | ||
| 819 | cancel_delayed_work_sync(&balloon.dwork); | ||
| 820 | destroy_workqueue(vmballoon_wq); | ||
| 821 | |||
| 822 | vmballoon_debugfs_exit(&balloon); | ||
| 823 | |||
| 824 | /* | ||
| 825 | * Deallocate all reserved memory, and reset connection with monitor. | ||
| 826 | * Reset connection before deallocating memory to avoid potential for | ||
| 827 | * additional spurious resets from guest touching deallocated pages. | ||
| 828 | */ | ||
| 829 | vmballoon_send_start(&balloon); | ||
| 830 | vmballoon_pop(&balloon); | ||
| 831 | } | ||
| 832 | module_exit(vmballoon_exit); | ||
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index f59c07427af3..d60fc5719fef 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c | |||
| @@ -60,7 +60,13 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | |||
| 60 | } | 60 | } |
| 61 | buf64 = (uint64_t *)buf; | 61 | buf64 = (uint64_t *)buf; |
| 62 | while (i < len/8) { | 62 | while (i < len/8) { |
| 63 | uint64_t x; | 63 | /* |
| 64 | * Since GCC has no proper constraint (PR 43518) | ||
| 65 | * force x variable to r2/r3 registers as ldrd instruction | ||
| 66 | * requires first register to be even. | ||
| 67 | */ | ||
| 68 | register uint64_t x asm ("r2"); | ||
| 69 | |||
| 64 | asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base)); | 70 | asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base)); |
| 65 | buf64[i++] = x; | 71 | buf64[i++] = x; |
| 66 | } | 72 | } |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 5ea587e59e48..37499127c801 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -679,7 +679,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) | |||
| 679 | */ | 679 | */ |
| 680 | int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) | 680 | int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) |
| 681 | { | 681 | { |
| 682 | return state > PCI_D0 ? | 682 | return state >= PCI_D0 ? |
| 683 | pci_platform_power_transition(dev, state) : -EINVAL; | 683 | pci_platform_power_transition(dev, state) : -EINVAL; |
| 684 | } | 684 | } |
| 685 | EXPORT_SYMBOL_GPL(__pci_complete_power_transition); | 685 | EXPORT_SYMBOL_GPL(__pci_complete_power_transition); |
| @@ -716,10 +716,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
| 716 | */ | 716 | */ |
| 717 | return 0; | 717 | return 0; |
| 718 | 718 | ||
| 719 | /* Check if we're already there */ | ||
| 720 | if (dev->current_state == state) | ||
| 721 | return 0; | ||
| 722 | |||
| 723 | __pci_start_power_transition(dev, state); | 719 | __pci_start_power_transition(dev, state); |
| 724 | 720 | ||
| 725 | /* This device is quirked not to be put into D3, so | 721 | /* This device is quirked not to be put into D3, so |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index aa495ad9bbd4..7a711ee314b7 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c | |||
| @@ -244,11 +244,17 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) | |||
| 244 | 244 | ||
| 245 | /* Assert Secondary Bus Reset */ | 245 | /* Assert Secondary Bus Reset */ |
| 246 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl); | 246 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl); |
| 247 | p2p_ctrl |= PCI_CB_BRIDGE_CTL_CB_RESET; | 247 | p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET; |
| 248 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); | 248 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); |
| 249 | 249 | ||
| 250 | /* | ||
| 251 | * we should send hot reset message for 2ms to allow it time to | ||
| 252 | * propogate to all downstream ports | ||
| 253 | */ | ||
| 254 | msleep(2); | ||
| 255 | |||
| 250 | /* De-assert Secondary Bus Reset */ | 256 | /* De-assert Secondary Bus Reset */ |
| 251 | p2p_ctrl &= ~PCI_CB_BRIDGE_CTL_CB_RESET; | 257 | p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; |
| 252 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); | 258 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); |
| 253 | 259 | ||
| 254 | /* | 260 | /* |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 882bd8d29fe3..c82548afcd5c 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
| @@ -174,19 +174,14 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
| 174 | pci_read_config_dword(dev, pos, &sz); | 174 | pci_read_config_dword(dev, pos, &sz); |
| 175 | pci_write_config_dword(dev, pos, l); | 175 | pci_write_config_dword(dev, pos, l); |
| 176 | 176 | ||
| 177 | if (!sz) | ||
| 178 | goto fail; /* BAR not implemented */ | ||
| 179 | |||
| 180 | /* | 177 | /* |
| 181 | * All bits set in sz means the device isn't working properly. | 178 | * All bits set in sz means the device isn't working properly. |
| 182 | * If it's a memory BAR or a ROM, bit 0 must be clear; if it's | 179 | * If the BAR isn't implemented, all bits must be 0. If it's a |
| 183 | * an io BAR, bit 1 must be clear. | 180 | * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit |
| 181 | * 1 must be clear. | ||
| 184 | */ | 182 | */ |
| 185 | if (sz == 0xffffffff) { | 183 | if (!sz || sz == 0xffffffff) |
| 186 | dev_err(&dev->dev, "reg %x: invalid size %#x; broken device?\n", | ||
| 187 | pos, sz); | ||
| 188 | goto fail; | 184 | goto fail; |
| 189 | } | ||
| 190 | 185 | ||
| 191 | /* | 186 | /* |
| 192 | * I don't know how l can have all bits set. Copied from old code. | 187 | * I don't know how l can have all bits set. Copied from old code. |
| @@ -249,17 +244,13 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
| 249 | pos, res); | 244 | pos, res); |
| 250 | } | 245 | } |
| 251 | } else { | 246 | } else { |
| 252 | u32 size = pci_size(l, sz, mask); | 247 | sz = pci_size(l, sz, mask); |
| 253 | 248 | ||
| 254 | if (!size) { | 249 | if (!sz) |
| 255 | dev_err(&dev->dev, "reg %x: invalid size " | ||
| 256 | "(l %#x sz %#x mask %#x); broken device?", | ||
| 257 | pos, l, sz, mask); | ||
| 258 | goto fail; | 250 | goto fail; |
| 259 | } | ||
| 260 | 251 | ||
| 261 | res->start = l; | 252 | res->start = l; |
| 262 | res->end = l + size; | 253 | res->end = l + sz; |
| 263 | 254 | ||
| 264 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); | 255 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); |
| 265 | } | 256 | } |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 7bec4588c268..6c3320d75055 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
| @@ -390,6 +390,7 @@ config EEEPC_WMI | |||
| 390 | depends on ACPI_WMI | 390 | depends on ACPI_WMI |
| 391 | depends on INPUT | 391 | depends on INPUT |
| 392 | depends on EXPERIMENTAL | 392 | depends on EXPERIMENTAL |
| 393 | select INPUT_SPARSEKMAP | ||
| 393 | ---help--- | 394 | ---help--- |
| 394 | Say Y here if you want to support WMI-based hotkeys on Eee PC laptops. | 395 | Say Y here if you want to support WMI-based hotkeys on Eee PC laptops. |
| 395 | 396 | ||
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index 52262b012abb..efe8f6388906 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c | |||
| @@ -79,15 +79,15 @@ static uint wapf = 1; | |||
| 79 | module_param(wapf, uint, 0644); | 79 | module_param(wapf, uint, 0644); |
| 80 | MODULE_PARM_DESC(wapf, "WAPF value"); | 80 | MODULE_PARM_DESC(wapf, "WAPF value"); |
| 81 | 81 | ||
| 82 | static uint wlan_status = 1; | 82 | static int wlan_status = 1; |
| 83 | static uint bluetooth_status = 1; | 83 | static int bluetooth_status = 1; |
| 84 | 84 | ||
| 85 | module_param(wlan_status, uint, 0644); | 85 | module_param(wlan_status, int, 0644); |
| 86 | MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot " | 86 | MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot " |
| 87 | "(0 = disabled, 1 = enabled, -1 = don't do anything). " | 87 | "(0 = disabled, 1 = enabled, -1 = don't do anything). " |
| 88 | "default is 1"); | 88 | "default is 1"); |
| 89 | 89 | ||
| 90 | module_param(bluetooth_status, uint, 0644); | 90 | module_param(bluetooth_status, int, 0644); |
| 91 | MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot " | 91 | MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot " |
| 92 | "(0 = disabled, 1 = enabled, -1 = don't do anything). " | 92 | "(0 = disabled, 1 = enabled, -1 = don't do anything). " |
| 93 | "default is 1"); | 93 | "default is 1"); |
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index 6ba6c30e5bb6..66f53c3c35e8 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c | |||
| @@ -217,6 +217,7 @@ static void dell_wmi_notify(u32 value, void *context) | |||
| 217 | if (dell_new_hk_type && (buffer_entry[1] != 0x10)) { | 217 | if (dell_new_hk_type && (buffer_entry[1] != 0x10)) { |
| 218 | printk(KERN_INFO "dell-wmi: Received unknown WMI event" | 218 | printk(KERN_INFO "dell-wmi: Received unknown WMI event" |
| 219 | " (0x%x)\n", buffer_entry[1]); | 219 | " (0x%x)\n", buffer_entry[1]); |
| 220 | kfree(obj); | ||
| 220 | return; | 221 | return; |
| 221 | } | 222 | } |
| 222 | 223 | ||
| @@ -234,7 +235,7 @@ static void dell_wmi_notify(u32 value, void *context) | |||
| 234 | key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) { | 235 | key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) { |
| 235 | /* Don't report brightness notifications that will also | 236 | /* Don't report brightness notifications that will also |
| 236 | * come via ACPI */ | 237 | * come via ACPI */ |
| 237 | return; | 238 | ; |
| 238 | } else { | 239 | } else { |
| 239 | input_report_key(dell_wmi_input_dev, key->keycode, 1); | 240 | input_report_key(dell_wmi_input_dev, key->keycode, 1); |
| 240 | input_sync(dell_wmi_input_dev); | 241 | input_sync(dell_wmi_input_dev); |
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 54a015785ca8..0306174ba875 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c | |||
| @@ -169,7 +169,6 @@ struct eeepc_laptop { | |||
| 169 | struct backlight_device *backlight_device; | 169 | struct backlight_device *backlight_device; |
| 170 | 170 | ||
| 171 | struct input_dev *inputdev; | 171 | struct input_dev *inputdev; |
| 172 | struct key_entry *keymap; | ||
| 173 | 172 | ||
| 174 | struct rfkill *wlan_rfkill; | 173 | struct rfkill *wlan_rfkill; |
| 175 | struct rfkill *bluetooth_rfkill; | 174 | struct rfkill *bluetooth_rfkill; |
| @@ -1204,8 +1203,8 @@ static int eeepc_input_init(struct eeepc_laptop *eeepc) | |||
| 1204 | static void eeepc_input_exit(struct eeepc_laptop *eeepc) | 1203 | static void eeepc_input_exit(struct eeepc_laptop *eeepc) |
| 1205 | { | 1204 | { |
| 1206 | if (eeepc->inputdev) { | 1205 | if (eeepc->inputdev) { |
| 1206 | sparse_keymap_free(eeepc->inputdev); | ||
| 1207 | input_unregister_device(eeepc->inputdev); | 1207 | input_unregister_device(eeepc->inputdev); |
| 1208 | kfree(eeepc->keymap); | ||
| 1209 | } | 1208 | } |
| 1210 | } | 1209 | } |
| 1211 | 1210 | ||
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c index 9f8822658fd7..b227eb469f49 100644 --- a/drivers/platform/x86/eeepc-wmi.c +++ b/drivers/platform/x86/eeepc-wmi.c | |||
| @@ -23,6 +23,8 @@ | |||
| 23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 24 | */ | 24 | */ |
| 25 | 25 | ||
| 26 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 27 | |||
| 26 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
| 27 | #include <linux/module.h> | 29 | #include <linux/module.h> |
| 28 | #include <linux/init.h> | 30 | #include <linux/init.h> |
| @@ -30,22 +32,34 @@ | |||
| 30 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
| 31 | #include <linux/input.h> | 33 | #include <linux/input.h> |
| 32 | #include <linux/input/sparse-keymap.h> | 34 | #include <linux/input/sparse-keymap.h> |
| 35 | #include <linux/fb.h> | ||
| 36 | #include <linux/backlight.h> | ||
| 37 | #include <linux/platform_device.h> | ||
| 33 | #include <acpi/acpi_bus.h> | 38 | #include <acpi/acpi_bus.h> |
| 34 | #include <acpi/acpi_drivers.h> | 39 | #include <acpi/acpi_drivers.h> |
| 35 | 40 | ||
| 41 | #define EEEPC_WMI_FILE "eeepc-wmi" | ||
| 42 | |||
| 36 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); | 43 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); |
| 37 | MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver"); | 44 | MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver"); |
| 38 | MODULE_LICENSE("GPL"); | 45 | MODULE_LICENSE("GPL"); |
| 39 | 46 | ||
| 40 | #define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000" | 47 | #define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000" |
| 48 | #define EEEPC_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66" | ||
| 41 | 49 | ||
| 42 | MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID); | 50 | MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID); |
| 51 | MODULE_ALIAS("wmi:"EEEPC_WMI_MGMT_GUID); | ||
| 43 | 52 | ||
| 44 | #define NOTIFY_BRNUP_MIN 0x11 | 53 | #define NOTIFY_BRNUP_MIN 0x11 |
| 45 | #define NOTIFY_BRNUP_MAX 0x1f | 54 | #define NOTIFY_BRNUP_MAX 0x1f |
| 46 | #define NOTIFY_BRNDOWN_MIN 0x20 | 55 | #define NOTIFY_BRNDOWN_MIN 0x20 |
| 47 | #define NOTIFY_BRNDOWN_MAX 0x2e | 56 | #define NOTIFY_BRNDOWN_MAX 0x2e |
| 48 | 57 | ||
| 58 | #define EEEPC_WMI_METHODID_DEVS 0x53564544 | ||
| 59 | #define EEEPC_WMI_METHODID_DSTS 0x53544344 | ||
| 60 | |||
| 61 | #define EEEPC_WMI_DEVID_BACKLIGHT 0x00050012 | ||
| 62 | |||
| 49 | static const struct key_entry eeepc_wmi_keymap[] = { | 63 | static const struct key_entry eeepc_wmi_keymap[] = { |
| 50 | /* Sleep already handled via generic ACPI code */ | 64 | /* Sleep already handled via generic ACPI code */ |
| 51 | { KE_KEY, 0x5d, { KEY_WLAN } }, | 65 | { KE_KEY, 0x5d, { KEY_WLAN } }, |
| @@ -58,18 +72,198 @@ static const struct key_entry eeepc_wmi_keymap[] = { | |||
| 58 | { KE_END, 0}, | 72 | { KE_END, 0}, |
| 59 | }; | 73 | }; |
| 60 | 74 | ||
| 61 | static struct input_dev *eeepc_wmi_input_dev; | 75 | struct bios_args { |
| 76 | u32 dev_id; | ||
| 77 | u32 ctrl_param; | ||
| 78 | }; | ||
| 79 | |||
| 80 | struct eeepc_wmi { | ||
| 81 | struct input_dev *inputdev; | ||
| 82 | struct backlight_device *backlight_device; | ||
| 83 | }; | ||
| 84 | |||
| 85 | static struct platform_device *platform_device; | ||
| 86 | |||
| 87 | static int eeepc_wmi_input_init(struct eeepc_wmi *eeepc) | ||
| 88 | { | ||
| 89 | int err; | ||
| 90 | |||
| 91 | eeepc->inputdev = input_allocate_device(); | ||
| 92 | if (!eeepc->inputdev) | ||
| 93 | return -ENOMEM; | ||
| 94 | |||
| 95 | eeepc->inputdev->name = "Eee PC WMI hotkeys"; | ||
| 96 | eeepc->inputdev->phys = EEEPC_WMI_FILE "/input0"; | ||
| 97 | eeepc->inputdev->id.bustype = BUS_HOST; | ||
| 98 | eeepc->inputdev->dev.parent = &platform_device->dev; | ||
| 99 | |||
| 100 | err = sparse_keymap_setup(eeepc->inputdev, eeepc_wmi_keymap, NULL); | ||
| 101 | if (err) | ||
| 102 | goto err_free_dev; | ||
| 103 | |||
| 104 | err = input_register_device(eeepc->inputdev); | ||
| 105 | if (err) | ||
| 106 | goto err_free_keymap; | ||
| 107 | |||
| 108 | return 0; | ||
| 109 | |||
| 110 | err_free_keymap: | ||
| 111 | sparse_keymap_free(eeepc->inputdev); | ||
| 112 | err_free_dev: | ||
| 113 | input_free_device(eeepc->inputdev); | ||
| 114 | return err; | ||
| 115 | } | ||
| 116 | |||
| 117 | static void eeepc_wmi_input_exit(struct eeepc_wmi *eeepc) | ||
| 118 | { | ||
| 119 | if (eeepc->inputdev) { | ||
| 120 | sparse_keymap_free(eeepc->inputdev); | ||
| 121 | input_unregister_device(eeepc->inputdev); | ||
| 122 | } | ||
| 123 | |||
| 124 | eeepc->inputdev = NULL; | ||
| 125 | } | ||
| 126 | |||
| 127 | static acpi_status eeepc_wmi_get_devstate(u32 dev_id, u32 *ctrl_param) | ||
| 128 | { | ||
| 129 | struct acpi_buffer input = { (acpi_size)sizeof(u32), &dev_id }; | ||
| 130 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
| 131 | union acpi_object *obj; | ||
| 132 | acpi_status status; | ||
| 133 | u32 tmp; | ||
| 134 | |||
| 135 | status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, | ||
| 136 | 1, EEEPC_WMI_METHODID_DSTS, &input, &output); | ||
| 137 | |||
| 138 | if (ACPI_FAILURE(status)) | ||
| 139 | return status; | ||
| 140 | |||
| 141 | obj = (union acpi_object *)output.pointer; | ||
| 142 | if (obj && obj->type == ACPI_TYPE_INTEGER) | ||
| 143 | tmp = (u32)obj->integer.value; | ||
| 144 | else | ||
| 145 | tmp = 0; | ||
| 146 | |||
| 147 | if (ctrl_param) | ||
| 148 | *ctrl_param = tmp; | ||
| 149 | |||
| 150 | kfree(obj); | ||
| 151 | |||
| 152 | return status; | ||
| 153 | |||
| 154 | } | ||
| 155 | |||
| 156 | static acpi_status eeepc_wmi_set_devstate(u32 dev_id, u32 ctrl_param) | ||
| 157 | { | ||
| 158 | struct bios_args args = { | ||
| 159 | .dev_id = dev_id, | ||
| 160 | .ctrl_param = ctrl_param, | ||
| 161 | }; | ||
| 162 | struct acpi_buffer input = { (acpi_size)sizeof(args), &args }; | ||
| 163 | acpi_status status; | ||
| 164 | |||
| 165 | status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, | ||
| 166 | 1, EEEPC_WMI_METHODID_DEVS, &input, NULL); | ||
| 167 | |||
| 168 | return status; | ||
| 169 | } | ||
| 170 | |||
| 171 | static int read_brightness(struct backlight_device *bd) | ||
| 172 | { | ||
| 173 | static u32 ctrl_param; | ||
| 174 | acpi_status status; | ||
| 175 | |||
| 176 | status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_BACKLIGHT, &ctrl_param); | ||
| 177 | |||
| 178 | if (ACPI_FAILURE(status)) | ||
| 179 | return -1; | ||
| 180 | else | ||
| 181 | return ctrl_param & 0xFF; | ||
| 182 | } | ||
| 183 | |||
| 184 | static int update_bl_status(struct backlight_device *bd) | ||
| 185 | { | ||
| 186 | |||
| 187 | static u32 ctrl_param; | ||
| 188 | acpi_status status; | ||
| 189 | |||
| 190 | ctrl_param = bd->props.brightness; | ||
| 191 | |||
| 192 | status = eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_BACKLIGHT, ctrl_param); | ||
| 193 | |||
| 194 | if (ACPI_FAILURE(status)) | ||
| 195 | return -1; | ||
| 196 | else | ||
| 197 | return 0; | ||
| 198 | } | ||
| 199 | |||
| 200 | static const struct backlight_ops eeepc_wmi_bl_ops = { | ||
| 201 | .get_brightness = read_brightness, | ||
| 202 | .update_status = update_bl_status, | ||
| 203 | }; | ||
| 204 | |||
| 205 | static int eeepc_wmi_backlight_notify(struct eeepc_wmi *eeepc, int code) | ||
| 206 | { | ||
| 207 | struct backlight_device *bd = eeepc->backlight_device; | ||
| 208 | int old = bd->props.brightness; | ||
| 209 | int new; | ||
| 210 | |||
| 211 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) | ||
| 212 | new = code - NOTIFY_BRNUP_MIN + 1; | ||
| 213 | else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX) | ||
| 214 | new = code - NOTIFY_BRNDOWN_MIN; | ||
| 215 | |||
| 216 | bd->props.brightness = new; | ||
| 217 | backlight_update_status(bd); | ||
| 218 | backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY); | ||
| 219 | |||
| 220 | return old; | ||
| 221 | } | ||
| 222 | |||
| 223 | static int eeepc_wmi_backlight_init(struct eeepc_wmi *eeepc) | ||
| 224 | { | ||
| 225 | struct backlight_device *bd; | ||
| 226 | struct backlight_properties props; | ||
| 227 | |||
| 228 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
| 229 | props.max_brightness = 15; | ||
| 230 | bd = backlight_device_register(EEEPC_WMI_FILE, | ||
| 231 | &platform_device->dev, eeepc, | ||
| 232 | &eeepc_wmi_bl_ops, &props); | ||
| 233 | if (IS_ERR(bd)) { | ||
| 234 | pr_err("Could not register backlight device\n"); | ||
| 235 | return PTR_ERR(bd); | ||
| 236 | } | ||
| 237 | |||
| 238 | eeepc->backlight_device = bd; | ||
| 239 | |||
| 240 | bd->props.brightness = read_brightness(bd); | ||
| 241 | bd->props.power = FB_BLANK_UNBLANK; | ||
| 242 | backlight_update_status(bd); | ||
| 243 | |||
| 244 | return 0; | ||
| 245 | } | ||
| 246 | |||
| 247 | static void eeepc_wmi_backlight_exit(struct eeepc_wmi *eeepc) | ||
| 248 | { | ||
| 249 | if (eeepc->backlight_device) | ||
| 250 | backlight_device_unregister(eeepc->backlight_device); | ||
| 251 | |||
| 252 | eeepc->backlight_device = NULL; | ||
| 253 | } | ||
| 62 | 254 | ||
| 63 | static void eeepc_wmi_notify(u32 value, void *context) | 255 | static void eeepc_wmi_notify(u32 value, void *context) |
| 64 | { | 256 | { |
| 257 | struct eeepc_wmi *eeepc = context; | ||
| 65 | struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; | 258 | struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; |
| 66 | union acpi_object *obj; | 259 | union acpi_object *obj; |
| 67 | acpi_status status; | 260 | acpi_status status; |
| 68 | int code; | 261 | int code; |
| 262 | int orig_code; | ||
| 69 | 263 | ||
| 70 | status = wmi_get_event_data(value, &response); | 264 | status = wmi_get_event_data(value, &response); |
| 71 | if (status != AE_OK) { | 265 | if (status != AE_OK) { |
| 72 | pr_err("EEEPC WMI: bad event status 0x%x\n", status); | 266 | pr_err("bad event status 0x%x\n", status); |
| 73 | return; | 267 | return; |
| 74 | } | 268 | } |
| 75 | 269 | ||
| @@ -77,81 +271,142 @@ static void eeepc_wmi_notify(u32 value, void *context) | |||
| 77 | 271 | ||
| 78 | if (obj && obj->type == ACPI_TYPE_INTEGER) { | 272 | if (obj && obj->type == ACPI_TYPE_INTEGER) { |
| 79 | code = obj->integer.value; | 273 | code = obj->integer.value; |
| 274 | orig_code = code; | ||
| 80 | 275 | ||
| 81 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) | 276 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) |
| 82 | code = NOTIFY_BRNUP_MIN; | 277 | code = NOTIFY_BRNUP_MIN; |
| 83 | else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX) | 278 | else if (code >= NOTIFY_BRNDOWN_MIN && |
| 279 | code <= NOTIFY_BRNDOWN_MAX) | ||
| 84 | code = NOTIFY_BRNDOWN_MIN; | 280 | code = NOTIFY_BRNDOWN_MIN; |
| 85 | 281 | ||
| 86 | if (!sparse_keymap_report_event(eeepc_wmi_input_dev, | 282 | if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) { |
| 283 | if (!acpi_video_backlight_support()) | ||
| 284 | eeepc_wmi_backlight_notify(eeepc, orig_code); | ||
| 285 | } | ||
| 286 | |||
| 287 | if (!sparse_keymap_report_event(eeepc->inputdev, | ||
| 87 | code, 1, true)) | 288 | code, 1, true)) |
| 88 | pr_info("EEEPC WMI: Unknown key %x pressed\n", code); | 289 | pr_info("Unknown key %x pressed\n", code); |
| 89 | } | 290 | } |
| 90 | 291 | ||
| 91 | kfree(obj); | 292 | kfree(obj); |
| 92 | } | 293 | } |
| 93 | 294 | ||
| 94 | static int eeepc_wmi_input_setup(void) | 295 | static int __devinit eeepc_wmi_platform_probe(struct platform_device *device) |
| 95 | { | 296 | { |
| 297 | struct eeepc_wmi *eeepc; | ||
| 96 | int err; | 298 | int err; |
| 299 | acpi_status status; | ||
| 97 | 300 | ||
| 98 | eeepc_wmi_input_dev = input_allocate_device(); | 301 | eeepc = platform_get_drvdata(device); |
| 99 | if (!eeepc_wmi_input_dev) | ||
| 100 | return -ENOMEM; | ||
| 101 | |||
| 102 | eeepc_wmi_input_dev->name = "Eee PC WMI hotkeys"; | ||
| 103 | eeepc_wmi_input_dev->phys = "wmi/input0"; | ||
| 104 | eeepc_wmi_input_dev->id.bustype = BUS_HOST; | ||
| 105 | 302 | ||
| 106 | err = sparse_keymap_setup(eeepc_wmi_input_dev, eeepc_wmi_keymap, NULL); | 303 | err = eeepc_wmi_input_init(eeepc); |
| 107 | if (err) | 304 | if (err) |
| 108 | goto err_free_dev; | 305 | goto error_input; |
| 109 | 306 | ||
| 110 | err = input_register_device(eeepc_wmi_input_dev); | 307 | if (!acpi_video_backlight_support()) { |
| 111 | if (err) | 308 | err = eeepc_wmi_backlight_init(eeepc); |
| 112 | goto err_free_keymap; | 309 | if (err) |
| 310 | goto error_backlight; | ||
| 311 | } else | ||
| 312 | pr_info("Backlight controlled by ACPI video driver\n"); | ||
| 313 | |||
| 314 | status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID, | ||
| 315 | eeepc_wmi_notify, eeepc); | ||
| 316 | if (ACPI_FAILURE(status)) { | ||
| 317 | pr_err("Unable to register notify handler - %d\n", | ||
| 318 | status); | ||
| 319 | err = -ENODEV; | ||
| 320 | goto error_wmi; | ||
| 321 | } | ||
| 113 | 322 | ||
| 114 | return 0; | 323 | return 0; |
| 115 | 324 | ||
| 116 | err_free_keymap: | 325 | error_wmi: |
| 117 | sparse_keymap_free(eeepc_wmi_input_dev); | 326 | eeepc_wmi_backlight_exit(eeepc); |
| 118 | err_free_dev: | 327 | error_backlight: |
| 119 | input_free_device(eeepc_wmi_input_dev); | 328 | eeepc_wmi_input_exit(eeepc); |
| 329 | error_input: | ||
| 120 | return err; | 330 | return err; |
| 121 | } | 331 | } |
| 122 | 332 | ||
| 333 | static int __devexit eeepc_wmi_platform_remove(struct platform_device *device) | ||
| 334 | { | ||
| 335 | struct eeepc_wmi *eeepc; | ||
| 336 | |||
| 337 | eeepc = platform_get_drvdata(device); | ||
| 338 | wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID); | ||
| 339 | eeepc_wmi_backlight_exit(eeepc); | ||
| 340 | eeepc_wmi_input_exit(eeepc); | ||
| 341 | |||
| 342 | return 0; | ||
| 343 | } | ||
| 344 | |||
| 345 | static struct platform_driver platform_driver = { | ||
| 346 | .driver = { | ||
| 347 | .name = EEEPC_WMI_FILE, | ||
| 348 | .owner = THIS_MODULE, | ||
| 349 | }, | ||
| 350 | .probe = eeepc_wmi_platform_probe, | ||
| 351 | .remove = __devexit_p(eeepc_wmi_platform_remove), | ||
| 352 | }; | ||
| 353 | |||
| 123 | static int __init eeepc_wmi_init(void) | 354 | static int __init eeepc_wmi_init(void) |
| 124 | { | 355 | { |
| 356 | struct eeepc_wmi *eeepc; | ||
| 125 | int err; | 357 | int err; |
| 126 | acpi_status status; | ||
| 127 | 358 | ||
| 128 | if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID)) { | 359 | if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID) || |
| 129 | pr_warning("EEEPC WMI: No known WMI GUID found\n"); | 360 | !wmi_has_guid(EEEPC_WMI_MGMT_GUID)) { |
| 361 | pr_warning("No known WMI GUID found\n"); | ||
| 130 | return -ENODEV; | 362 | return -ENODEV; |
| 131 | } | 363 | } |
| 132 | 364 | ||
| 133 | err = eeepc_wmi_input_setup(); | 365 | eeepc = kzalloc(sizeof(struct eeepc_wmi), GFP_KERNEL); |
| 134 | if (err) | 366 | if (!eeepc) |
| 135 | return err; | 367 | return -ENOMEM; |
| 136 | 368 | ||
| 137 | status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID, | 369 | platform_device = platform_device_alloc(EEEPC_WMI_FILE, -1); |
| 138 | eeepc_wmi_notify, NULL); | 370 | if (!platform_device) { |
| 139 | if (ACPI_FAILURE(status)) { | 371 | pr_warning("Unable to allocate platform device\n"); |
| 140 | sparse_keymap_free(eeepc_wmi_input_dev); | 372 | err = -ENOMEM; |
| 141 | input_unregister_device(eeepc_wmi_input_dev); | 373 | goto fail_platform; |
| 142 | pr_err("EEEPC WMI: Unable to register notify handler - %d\n", | 374 | } |
| 143 | status); | 375 | |
| 144 | return -ENODEV; | 376 | err = platform_device_add(platform_device); |
| 377 | if (err) { | ||
| 378 | pr_warning("Unable to add platform device\n"); | ||
| 379 | goto put_dev; | ||
| 380 | } | ||
| 381 | |||
| 382 | platform_set_drvdata(platform_device, eeepc); | ||
| 383 | |||
| 384 | err = platform_driver_register(&platform_driver); | ||
| 385 | if (err) { | ||
| 386 | pr_warning("Unable to register platform driver\n"); | ||
| 387 | goto del_dev; | ||
| 145 | } | 388 | } |
| 146 | 389 | ||
| 147 | return 0; | 390 | return 0; |
| 391 | |||
| 392 | del_dev: | ||
| 393 | platform_device_del(platform_device); | ||
| 394 | put_dev: | ||
| 395 | platform_device_put(platform_device); | ||
| 396 | fail_platform: | ||
| 397 | kfree(eeepc); | ||
| 398 | |||
| 399 | return err; | ||
| 148 | } | 400 | } |
| 149 | 401 | ||
| 150 | static void __exit eeepc_wmi_exit(void) | 402 | static void __exit eeepc_wmi_exit(void) |
| 151 | { | 403 | { |
| 152 | wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID); | 404 | struct eeepc_wmi *eeepc; |
| 153 | sparse_keymap_free(eeepc_wmi_input_dev); | 405 | |
| 154 | input_unregister_device(eeepc_wmi_input_dev); | 406 | eeepc = platform_get_drvdata(platform_device); |
| 407 | platform_driver_unregister(&platform_driver); | ||
| 408 | platform_device_unregister(platform_device); | ||
| 409 | kfree(eeepc); | ||
| 155 | } | 410 | } |
| 156 | 411 | ||
| 157 | module_init(eeepc_wmi_init); | 412 | module_init(eeepc_wmi_init); |
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c index a681f5e8f786..ad036dd8da13 100644 --- a/drivers/regulator/mc13783-regulator.c +++ b/drivers/regulator/mc13783-regulator.c | |||
| @@ -618,9 +618,12 @@ static int __devexit mc13783_regulator_remove(struct platform_device *pdev) | |||
| 618 | dev_get_platdata(&pdev->dev); | 618 | dev_get_platdata(&pdev->dev); |
| 619 | int i; | 619 | int i; |
| 620 | 620 | ||
| 621 | platform_set_drvdata(pdev, NULL); | ||
| 622 | |||
| 621 | for (i = 0; i < pdata->num_regulators; i++) | 623 | for (i = 0; i < pdata->num_regulators; i++) |
| 622 | regulator_unregister(priv->regulators[i]); | 624 | regulator_unregister(priv->regulators[i]); |
| 623 | 625 | ||
| 626 | kfree(priv); | ||
| 624 | return 0; | 627 | return 0; |
| 625 | } | 628 | } |
| 626 | 629 | ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index bbea90baf98f..acf222f91f5a 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
| @@ -1899,7 +1899,8 @@ restart: | |||
| 1899 | /* Process requests that may be recovered */ | 1899 | /* Process requests that may be recovered */ |
| 1900 | if (cqr->status == DASD_CQR_NEED_ERP) { | 1900 | if (cqr->status == DASD_CQR_NEED_ERP) { |
| 1901 | erp_fn = base->discipline->erp_action(cqr); | 1901 | erp_fn = base->discipline->erp_action(cqr); |
| 1902 | erp_fn(cqr); | 1902 | if (IS_ERR(erp_fn(cqr))) |
| 1903 | continue; | ||
| 1903 | goto restart; | 1904 | goto restart; |
| 1904 | } | 1905 | } |
| 1905 | 1906 | ||
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 6927e751ce3e..6632649dd6aa 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
| @@ -2309,7 +2309,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) | |||
| 2309 | cqr->retries); | 2309 | cqr->retries); |
| 2310 | dasd_block_set_timer(device->block, (HZ << 3)); | 2310 | dasd_block_set_timer(device->block, (HZ << 3)); |
| 2311 | } | 2311 | } |
| 2312 | return cqr; | 2312 | return erp; |
| 2313 | } | 2313 | } |
| 2314 | 2314 | ||
| 2315 | ccw = cqr->cpaddr; | 2315 | ccw = cqr->cpaddr; |
| @@ -2372,6 +2372,9 @@ dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr) | |||
| 2372 | /* add erp and initialize with default TIC */ | 2372 | /* add erp and initialize with default TIC */ |
| 2373 | erp = dasd_3990_erp_add_erp(cqr); | 2373 | erp = dasd_3990_erp_add_erp(cqr); |
| 2374 | 2374 | ||
| 2375 | if (IS_ERR(erp)) | ||
| 2376 | return erp; | ||
| 2377 | |||
| 2375 | /* inspect sense, determine specific ERP if possible */ | 2378 | /* inspect sense, determine specific ERP if possible */ |
| 2376 | if (erp != cqr) { | 2379 | if (erp != cqr) { |
| 2377 | 2380 | ||
| @@ -2711,6 +2714,8 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) | |||
| 2711 | if (erp == NULL) { | 2714 | if (erp == NULL) { |
| 2712 | /* no matching erp found - set up erp */ | 2715 | /* no matching erp found - set up erp */ |
| 2713 | erp = dasd_3990_erp_additional_erp(cqr); | 2716 | erp = dasd_3990_erp_additional_erp(cqr); |
| 2717 | if (IS_ERR(erp)) | ||
| 2718 | return erp; | ||
| 2714 | } else { | 2719 | } else { |
| 2715 | /* matching erp found - set all leading erp's to DONE */ | 2720 | /* matching erp found - set all leading erp's to DONE */ |
| 2716 | erp = dasd_3990_erp_handle_match_erp(cqr, erp); | 2721 | erp = dasd_3990_erp_handle_match_erp(cqr, erp); |
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 18daf16aa357..7217966f7d31 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c | |||
| @@ -638,11 +638,7 @@ static int __init zcore_reipl_init(void) | |||
| 638 | rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); | 638 | rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); |
| 639 | else | 639 | else |
| 640 | rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE); | 640 | rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE); |
| 641 | if (rc) { | 641 | if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) != |
| 642 | free_page((unsigned long) ipl_block); | ||
| 643 | return rc; | ||
| 644 | } | ||
| 645 | if (csum_partial(ipl_block, ipl_block->hdr.len, 0) != | ||
| 646 | ipib_info.checksum) { | 642 | ipib_info.checksum) { |
| 647 | TRACE("Checksum does not match\n"); | 643 | TRACE("Checksum does not match\n"); |
| 648 | free_page((unsigned long) ipl_block); | 644 | free_page((unsigned long) ipl_block); |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 4038f5b4f144..ce7cb87479fe 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include "chsc.h" | 29 | #include "chsc.h" |
| 30 | 30 | ||
| 31 | static void *sei_page; | 31 | static void *sei_page; |
| 32 | static DEFINE_SPINLOCK(sda_lock); | ||
| 32 | 33 | ||
| 33 | /** | 34 | /** |
| 34 | * chsc_error_from_response() - convert a chsc response to an error | 35 | * chsc_error_from_response() - convert a chsc response to an error |
| @@ -832,11 +833,10 @@ void __init chsc_free_sei_area(void) | |||
| 832 | kfree(sei_page); | 833 | kfree(sei_page); |
| 833 | } | 834 | } |
| 834 | 835 | ||
| 835 | int __init | 836 | int chsc_enable_facility(int operation_code) |
| 836 | chsc_enable_facility(int operation_code) | ||
| 837 | { | 837 | { |
| 838 | int ret; | 838 | int ret; |
| 839 | struct { | 839 | static struct { |
| 840 | struct chsc_header request; | 840 | struct chsc_header request; |
| 841 | u8 reserved1:4; | 841 | u8 reserved1:4; |
| 842 | u8 format:4; | 842 | u8 format:4; |
| @@ -849,33 +849,32 @@ chsc_enable_facility(int operation_code) | |||
| 849 | u32 reserved5:4; | 849 | u32 reserved5:4; |
| 850 | u32 format2:4; | 850 | u32 format2:4; |
| 851 | u32 reserved6:24; | 851 | u32 reserved6:24; |
| 852 | } __attribute__ ((packed)) *sda_area; | 852 | } __attribute__ ((packed, aligned(4096))) sda_area; |
| 853 | 853 | ||
| 854 | sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); | 854 | spin_lock(&sda_lock); |
| 855 | if (!sda_area) | 855 | memset(&sda_area, 0, sizeof(sda_area)); |
| 856 | return -ENOMEM; | 856 | sda_area.request.length = 0x0400; |
| 857 | sda_area->request.length = 0x0400; | 857 | sda_area.request.code = 0x0031; |
| 858 | sda_area->request.code = 0x0031; | 858 | sda_area.operation_code = operation_code; |
| 859 | sda_area->operation_code = operation_code; | ||
| 860 | 859 | ||
| 861 | ret = chsc(sda_area); | 860 | ret = chsc(&sda_area); |
| 862 | if (ret > 0) { | 861 | if (ret > 0) { |
| 863 | ret = (ret == 3) ? -ENODEV : -EBUSY; | 862 | ret = (ret == 3) ? -ENODEV : -EBUSY; |
| 864 | goto out; | 863 | goto out; |
| 865 | } | 864 | } |
| 866 | 865 | ||
| 867 | switch (sda_area->response.code) { | 866 | switch (sda_area.response.code) { |
| 868 | case 0x0101: | 867 | case 0x0101: |
| 869 | ret = -EOPNOTSUPP; | 868 | ret = -EOPNOTSUPP; |
| 870 | break; | 869 | break; |
| 871 | default: | 870 | default: |
| 872 | ret = chsc_error_from_response(sda_area->response.code); | 871 | ret = chsc_error_from_response(sda_area.response.code); |
| 873 | } | 872 | } |
| 874 | if (ret != 0) | 873 | if (ret != 0) |
| 875 | CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", | 874 | CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", |
| 876 | operation_code, sda_area->response.code); | 875 | operation_code, sda_area.response.code); |
| 877 | out: | 876 | out: |
| 878 | free_page((unsigned long)sda_area); | 877 | spin_unlock(&sda_lock); |
| 879 | return ret; | 878 | return ret; |
| 880 | } | 879 | } |
| 881 | 880 | ||
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index 404f630c27ca..3b6f4adc5094 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c | |||
| @@ -124,7 +124,7 @@ static int chsc_subchannel_prepare(struct subchannel *sch) | |||
| 124 | * since we don't have a way to clear the subchannel and | 124 | * since we don't have a way to clear the subchannel and |
| 125 | * cannot disable it with a request running. | 125 | * cannot disable it with a request running. |
| 126 | */ | 126 | */ |
| 127 | cc = stsch(sch->schid, &schib); | 127 | cc = stsch_err(sch->schid, &schib); |
| 128 | if (!cc && scsw_stctl(&schib.scsw)) | 128 | if (!cc && scsw_stctl(&schib.scsw)) |
| 129 | return -EAGAIN; | 129 | return -EAGAIN; |
| 130 | return 0; | 130 | return 0; |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index f736cdcf08ad..5feea1a371e1 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
| @@ -361,7 +361,7 @@ int cio_commit_config(struct subchannel *sch) | |||
| 361 | struct schib schib; | 361 | struct schib schib; |
| 362 | int ccode, retry, ret = 0; | 362 | int ccode, retry, ret = 0; |
| 363 | 363 | ||
| 364 | if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) | 364 | if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) |
| 365 | return -ENODEV; | 365 | return -ENODEV; |
| 366 | 366 | ||
| 367 | for (retry = 0; retry < 5; retry++) { | 367 | for (retry = 0; retry < 5; retry++) { |
| @@ -372,7 +372,7 @@ int cio_commit_config(struct subchannel *sch) | |||
| 372 | return ccode; | 372 | return ccode; |
| 373 | switch (ccode) { | 373 | switch (ccode) { |
| 374 | case 0: /* successful */ | 374 | case 0: /* successful */ |
| 375 | if (stsch(sch->schid, &schib) || | 375 | if (stsch_err(sch->schid, &schib) || |
| 376 | !css_sch_is_valid(&schib)) | 376 | !css_sch_is_valid(&schib)) |
| 377 | return -ENODEV; | 377 | return -ENODEV; |
| 378 | if (cio_check_config(sch, &schib)) { | 378 | if (cio_check_config(sch, &schib)) { |
| @@ -404,7 +404,7 @@ int cio_update_schib(struct subchannel *sch) | |||
| 404 | { | 404 | { |
| 405 | struct schib schib; | 405 | struct schib schib; |
| 406 | 406 | ||
| 407 | if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) | 407 | if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) |
| 408 | return -ENODEV; | 408 | return -ENODEV; |
| 409 | 409 | ||
| 410 | memcpy(&sch->schib, &schib, sizeof(schib)); | 410 | memcpy(&sch->schib, &schib, sizeof(schib)); |
| @@ -771,7 +771,7 @@ cio_get_console_sch_no(void) | |||
| 771 | if (console_irq != -1) { | 771 | if (console_irq != -1) { |
| 772 | /* VM provided us with the irq number of the console. */ | 772 | /* VM provided us with the irq number of the console. */ |
| 773 | schid.sch_no = console_irq; | 773 | schid.sch_no = console_irq; |
| 774 | if (stsch(schid, &console_subchannel.schib) != 0 || | 774 | if (stsch_err(schid, &console_subchannel.schib) != 0 || |
| 775 | (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || | 775 | (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || |
| 776 | !console_subchannel.schib.pmcw.dnv) | 776 | !console_subchannel.schib.pmcw.dnv) |
| 777 | return -1; | 777 | return -1; |
| @@ -863,10 +863,10 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) | |||
| 863 | cc = 0; | 863 | cc = 0; |
| 864 | for (retry=0;retry<3;retry++) { | 864 | for (retry=0;retry<3;retry++) { |
| 865 | schib->pmcw.ena = 0; | 865 | schib->pmcw.ena = 0; |
| 866 | cc = msch(schid, schib); | 866 | cc = msch_err(schid, schib); |
| 867 | if (cc) | 867 | if (cc) |
| 868 | return (cc==3?-ENODEV:-EBUSY); | 868 | return (cc==3?-ENODEV:-EBUSY); |
| 869 | if (stsch(schid, schib) || !css_sch_is_valid(schib)) | 869 | if (stsch_err(schid, schib) || !css_sch_is_valid(schib)) |
| 870 | return -ENODEV; | 870 | return -ENODEV; |
| 871 | if (!schib->pmcw.ena) | 871 | if (!schib->pmcw.ena) |
| 872 | return 0; | 872 | return 0; |
| @@ -913,7 +913,7 @@ static int stsch_reset(struct subchannel_id schid, struct schib *addr) | |||
| 913 | 913 | ||
| 914 | pgm_check_occured = 0; | 914 | pgm_check_occured = 0; |
| 915 | s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; | 915 | s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; |
| 916 | rc = stsch(schid, addr); | 916 | rc = stsch_err(schid, addr); |
| 917 | s390_base_pgm_handler_fn = NULL; | 917 | s390_base_pgm_handler_fn = NULL; |
| 918 | 918 | ||
| 919 | /* The program check handler could have changed pgm_check_occured. */ | 919 | /* The program check handler could have changed pgm_check_occured. */ |
| @@ -950,7 +950,7 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) | |||
| 950 | /* No default clear strategy */ | 950 | /* No default clear strategy */ |
| 951 | break; | 951 | break; |
| 952 | } | 952 | } |
| 953 | stsch(schid, &schib); | 953 | stsch_err(schid, &schib); |
| 954 | __disable_subchannel_easy(schid, &schib); | 954 | __disable_subchannel_easy(schid, &schib); |
| 955 | } | 955 | } |
| 956 | out: | 956 | out: |
| @@ -1086,7 +1086,7 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) | |||
| 1086 | schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; | 1086 | schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; |
| 1087 | if (!schid.one) | 1087 | if (!schid.one) |
| 1088 | return -ENODEV; | 1088 | return -ENODEV; |
| 1089 | if (stsch(schid, &schib)) | 1089 | if (stsch_err(schid, &schib)) |
| 1090 | return -ENODEV; | 1090 | return -ENODEV; |
| 1091 | if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) | 1091 | if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) |
| 1092 | return -ENODEV; | 1092 | return -ENODEV; |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 2769da54f2b9..511649115bd7 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
| @@ -870,15 +870,10 @@ static int __init css_bus_init(void) | |||
| 870 | 870 | ||
| 871 | /* Try to enable MSS. */ | 871 | /* Try to enable MSS. */ |
| 872 | ret = chsc_enable_facility(CHSC_SDA_OC_MSS); | 872 | ret = chsc_enable_facility(CHSC_SDA_OC_MSS); |
| 873 | switch (ret) { | 873 | if (ret) |
| 874 | case 0: /* Success. */ | ||
| 875 | max_ssid = __MAX_SSID; | ||
| 876 | break; | ||
| 877 | case -ENOMEM: | ||
| 878 | goto out; | ||
| 879 | default: | ||
| 880 | max_ssid = 0; | 874 | max_ssid = 0; |
| 881 | } | 875 | else /* Success. */ |
| 876 | max_ssid = __MAX_SSID; | ||
| 882 | 877 | ||
| 883 | ret = slow_subchannel_init(); | 878 | ret = slow_subchannel_init(); |
| 884 | if (ret) | 879 | if (ret) |
| @@ -1048,6 +1043,11 @@ static int __init channel_subsystem_init_sync(void) | |||
| 1048 | } | 1043 | } |
| 1049 | subsys_initcall_sync(channel_subsystem_init_sync); | 1044 | subsys_initcall_sync(channel_subsystem_init_sync); |
| 1050 | 1045 | ||
| 1046 | void channel_subsystem_reinit(void) | ||
| 1047 | { | ||
| 1048 | chsc_enable_facility(CHSC_SDA_OC_MSS); | ||
| 1049 | } | ||
| 1050 | |||
| 1051 | #ifdef CONFIG_PROC_FS | 1051 | #ifdef CONFIG_PROC_FS |
| 1052 | static ssize_t cio_settle_write(struct file *file, const char __user *buf, | 1052 | static ssize_t cio_settle_write(struct file *file, const char __user *buf, |
| 1053 | size_t count, loff_t *ppos) | 1053 | size_t count, loff_t *ppos) |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index c56ab94612f9..c9b852647f01 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
| @@ -45,7 +45,7 @@ static void ccw_timeout_log(struct ccw_device *cdev) | |||
| 45 | sch = to_subchannel(cdev->dev.parent); | 45 | sch = to_subchannel(cdev->dev.parent); |
| 46 | private = to_io_private(sch); | 46 | private = to_io_private(sch); |
| 47 | orb = &private->orb; | 47 | orb = &private->orb; |
| 48 | cc = stsch(sch->schid, &schib); | 48 | cc = stsch_err(sch->schid, &schib); |
| 49 | 49 | ||
| 50 | printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " | 50 | printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " |
| 51 | "device information:\n", get_clock()); | 51 | "device information:\n", get_clock()); |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 18564891ea61..b3b1d2f79398 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
| @@ -2105,7 +2105,8 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) | |||
| 2105 | blktrc.inb_usage = req->qdio_req.qdio_inb_usage; | 2105 | blktrc.inb_usage = req->qdio_req.qdio_inb_usage; |
| 2106 | blktrc.outb_usage = req->qdio_req.qdio_outb_usage; | 2106 | blktrc.outb_usage = req->qdio_req.qdio_outb_usage; |
| 2107 | 2107 | ||
| 2108 | if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) { | 2108 | if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && |
| 2109 | !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { | ||
| 2109 | blktrc.flags |= ZFCP_BLK_LAT_VALID; | 2110 | blktrc.flags |= ZFCP_BLK_LAT_VALID; |
| 2110 | blktrc.channel_lat = lat_in->channel_lat * ticks; | 2111 | blktrc.channel_lat = lat_in->channel_lat * ticks; |
| 2111 | blktrc.fabric_lat = lat_in->fabric_lat * ticks; | 2112 | blktrc.fabric_lat = lat_in->fabric_lat * ticks; |
| @@ -2157,9 +2158,8 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) | |||
| 2157 | fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; | 2158 | fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; |
| 2158 | zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); | 2159 | zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); |
| 2159 | 2160 | ||
| 2160 | zfcp_fsf_req_trace(req, scpnt); | ||
| 2161 | |||
| 2162 | skip_fsfstatus: | 2161 | skip_fsfstatus: |
| 2162 | zfcp_fsf_req_trace(req, scpnt); | ||
| 2163 | zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req); | 2163 | zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req); |
| 2164 | 2164 | ||
| 2165 | scpnt->host_scribble = NULL; | 2165 | scpnt->host_scribble = NULL; |
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 72617b650a7e..e641922f20bc 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c | |||
| @@ -169,6 +169,7 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, | |||
| 169 | SE_DEBUG(DBG_LVL_1, | 169 | SE_DEBUG(DBG_LVL_1, |
| 170 | "Failed to allocate memory for" | 170 | "Failed to allocate memory for" |
| 171 | "mgmt_invalidate_icds \n"); | 171 | "mgmt_invalidate_icds \n"); |
| 172 | spin_unlock(&ctrl->mbox_lock); | ||
| 172 | return -1; | 173 | return -1; |
| 173 | } | 174 | } |
| 174 | nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); | 175 | nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); |
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index 6cf9dc37d78b..6b624e767d3b 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h | |||
| @@ -362,6 +362,7 @@ struct bnx2i_hba { | |||
| 362 | u32 num_ccell; | 362 | u32 num_ccell; |
| 363 | 363 | ||
| 364 | int ofld_conns_active; | 364 | int ofld_conns_active; |
| 365 | wait_queue_head_t eh_wait; | ||
| 365 | 366 | ||
| 366 | int max_active_conns; | 367 | int max_active_conns; |
| 367 | struct iscsi_cid_queue cid_que; | 368 | struct iscsi_cid_queue cid_que; |
| @@ -381,6 +382,7 @@ struct bnx2i_hba { | |||
| 381 | spinlock_t lock; /* protects hba structure access */ | 382 | spinlock_t lock; /* protects hba structure access */ |
| 382 | struct mutex net_dev_lock;/* sync net device access */ | 383 | struct mutex net_dev_lock;/* sync net device access */ |
| 383 | 384 | ||
| 385 | int hba_shutdown_tmo; | ||
| 384 | /* | 386 | /* |
| 385 | * PCI related info. | 387 | * PCI related info. |
| 386 | */ | 388 | */ |
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 6d8172e781cf..5d9296c599f6 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c | |||
| @@ -177,11 +177,22 @@ void bnx2i_stop(void *handle) | |||
| 177 | struct bnx2i_hba *hba = handle; | 177 | struct bnx2i_hba *hba = handle; |
| 178 | 178 | ||
| 179 | /* check if cleanup happened in GOING_DOWN context */ | 179 | /* check if cleanup happened in GOING_DOWN context */ |
| 180 | clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); | ||
| 181 | if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN, | 180 | if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN, |
| 182 | &hba->adapter_state)) | 181 | &hba->adapter_state)) |
| 183 | iscsi_host_for_each_session(hba->shost, | 182 | iscsi_host_for_each_session(hba->shost, |
| 184 | bnx2i_drop_session); | 183 | bnx2i_drop_session); |
| 184 | |||
| 185 | /* Wait for all endpoints to be torn down, Chip will be reset once | ||
| 186 | * control returns to network driver. So it is required to cleanup and | ||
| 187 | * release all connection resources before returning from this routine. | ||
| 188 | */ | ||
| 189 | wait_event_interruptible_timeout(hba->eh_wait, | ||
| 190 | (hba->ofld_conns_active == 0), | ||
| 191 | hba->hba_shutdown_tmo); | ||
| 192 | /* This flag should be cleared last so that ep_disconnect() gracefully | ||
| 193 | * cleans up connection context | ||
| 194 | */ | ||
| 195 | clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); | ||
| 185 | } | 196 | } |
| 186 | 197 | ||
| 187 | /** | 198 | /** |
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index f2e9b18fe76c..fa68ab34b998 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c | |||
| @@ -820,6 +820,11 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic) | |||
| 820 | 820 | ||
| 821 | spin_lock_init(&hba->lock); | 821 | spin_lock_init(&hba->lock); |
| 822 | mutex_init(&hba->net_dev_lock); | 822 | mutex_init(&hba->net_dev_lock); |
| 823 | init_waitqueue_head(&hba->eh_wait); | ||
| 824 | if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) | ||
| 825 | hba->hba_shutdown_tmo = 240 * HZ; | ||
| 826 | else /* 5706/5708/5709 */ | ||
| 827 | hba->hba_shutdown_tmo = 30 * HZ; | ||
| 823 | 828 | ||
| 824 | if (iscsi_host_add(shost, &hba->pcidev->dev)) | 829 | if (iscsi_host_add(shost, &hba->pcidev->dev)) |
| 825 | goto free_dump_mem; | 830 | goto free_dump_mem; |
| @@ -1658,8 +1663,8 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, | |||
| 1658 | */ | 1663 | */ |
| 1659 | hba = bnx2i_check_route(dst_addr); | 1664 | hba = bnx2i_check_route(dst_addr); |
| 1660 | 1665 | ||
| 1661 | if (!hba) { | 1666 | if (!hba || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) { |
| 1662 | rc = -ENOMEM; | 1667 | rc = -EINVAL; |
| 1663 | goto check_busy; | 1668 | goto check_busy; |
| 1664 | } | 1669 | } |
| 1665 | 1670 | ||
| @@ -1804,7 +1809,7 @@ static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) | |||
| 1804 | (bnx2i_ep->state == | 1809 | (bnx2i_ep->state == |
| 1805 | EP_STATE_CONNECT_COMPL)), | 1810 | EP_STATE_CONNECT_COMPL)), |
| 1806 | msecs_to_jiffies(timeout_ms)); | 1811 | msecs_to_jiffies(timeout_ms)); |
| 1807 | if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) | 1812 | if (bnx2i_ep->state == EP_STATE_OFLD_FAILED) |
| 1808 | rc = -1; | 1813 | rc = -1; |
| 1809 | 1814 | ||
| 1810 | if (rc > 0) | 1815 | if (rc > 0) |
| @@ -1957,6 +1962,8 @@ return_bnx2i_ep: | |||
| 1957 | 1962 | ||
| 1958 | if (!hba->ofld_conns_active) | 1963 | if (!hba->ofld_conns_active) |
| 1959 | bnx2i_unreg_dev_all(); | 1964 | bnx2i_unreg_dev_all(); |
| 1965 | |||
| 1966 | wake_up_interruptible(&hba->eh_wait); | ||
| 1960 | } | 1967 | } |
| 1961 | 1968 | ||
| 1962 | 1969 | ||
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 496764349c41..0435d044c9da 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c | |||
| @@ -188,7 +188,8 @@ MODULE_DEVICE_TABLE(pci,dptids); | |||
| 188 | static int adpt_detect(struct scsi_host_template* sht) | 188 | static int adpt_detect(struct scsi_host_template* sht) |
| 189 | { | 189 | { |
| 190 | struct pci_dev *pDev = NULL; | 190 | struct pci_dev *pDev = NULL; |
| 191 | adpt_hba* pHba; | 191 | adpt_hba *pHba; |
| 192 | adpt_hba *next; | ||
| 192 | 193 | ||
| 193 | PINFO("Detecting Adaptec I2O RAID controllers...\n"); | 194 | PINFO("Detecting Adaptec I2O RAID controllers...\n"); |
| 194 | 195 | ||
| @@ -206,7 +207,8 @@ static int adpt_detect(struct scsi_host_template* sht) | |||
| 206 | } | 207 | } |
| 207 | 208 | ||
| 208 | /* In INIT state, Activate IOPs */ | 209 | /* In INIT state, Activate IOPs */ |
| 209 | for (pHba = hba_chain; pHba; pHba = pHba->next) { | 210 | for (pHba = hba_chain; pHba; pHba = next) { |
| 211 | next = pHba->next; | ||
| 210 | // Activate does get status , init outbound, and get hrt | 212 | // Activate does get status , init outbound, and get hrt |
| 211 | if (adpt_i2o_activate_hba(pHba) < 0) { | 213 | if (adpt_i2o_activate_hba(pHba) < 0) { |
| 212 | adpt_i2o_delete_hba(pHba); | 214 | adpt_i2o_delete_hba(pHba); |
| @@ -243,7 +245,8 @@ rebuild_sys_tab: | |||
| 243 | PDEBUG("HBA's in OPERATIONAL state\n"); | 245 | PDEBUG("HBA's in OPERATIONAL state\n"); |
| 244 | 246 | ||
| 245 | printk("dpti: If you have a lot of devices this could take a few minutes.\n"); | 247 | printk("dpti: If you have a lot of devices this could take a few minutes.\n"); |
| 246 | for (pHba = hba_chain; pHba; pHba = pHba->next) { | 248 | for (pHba = hba_chain; pHba; pHba = next) { |
| 249 | next = pHba->next; | ||
| 247 | printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name); | 250 | printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name); |
| 248 | if (adpt_i2o_lct_get(pHba) < 0){ | 251 | if (adpt_i2o_lct_get(pHba) < 0){ |
| 249 | adpt_i2o_delete_hba(pHba); | 252 | adpt_i2o_delete_hba(pHba); |
| @@ -263,7 +266,8 @@ rebuild_sys_tab: | |||
| 263 | adpt_sysfs_class = NULL; | 266 | adpt_sysfs_class = NULL; |
| 264 | } | 267 | } |
| 265 | 268 | ||
| 266 | for (pHba = hba_chain; pHba; pHba = pHba->next) { | 269 | for (pHba = hba_chain; pHba; pHba = next) { |
| 270 | next = pHba->next; | ||
| 267 | if (adpt_scsi_host_alloc(pHba, sht) < 0){ | 271 | if (adpt_scsi_host_alloc(pHba, sht) < 0){ |
| 268 | adpt_i2o_delete_hba(pHba); | 272 | adpt_i2o_delete_hba(pHba); |
| 269 | continue; | 273 | continue; |
| @@ -1229,11 +1233,10 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba) | |||
| 1229 | } | 1233 | } |
| 1230 | } | 1234 | } |
| 1231 | pci_dev_put(pHba->pDev); | 1235 | pci_dev_put(pHba->pDev); |
| 1232 | kfree(pHba); | ||
| 1233 | |||
| 1234 | if (adpt_sysfs_class) | 1236 | if (adpt_sysfs_class) |
| 1235 | device_destroy(adpt_sysfs_class, | 1237 | device_destroy(adpt_sysfs_class, |
| 1236 | MKDEV(DPTI_I2O_MAJOR, pHba->unit)); | 1238 | MKDEV(DPTI_I2O_MAJOR, pHba->unit)); |
| 1239 | kfree(pHba); | ||
| 1237 | 1240 | ||
| 1238 | if(hba_count <= 0){ | 1241 | if(hba_count <= 0){ |
| 1239 | unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER); | 1242 | unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER); |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index ff5ec5ac1fb5..88bad0e81bdd 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
| @@ -323,16 +323,6 @@ static void set_srp_direction(struct scsi_cmnd *cmd, | |||
| 323 | srp_cmd->buf_fmt = fmt; | 323 | srp_cmd->buf_fmt = fmt; |
| 324 | } | 324 | } |
| 325 | 325 | ||
| 326 | static void unmap_sg_list(int num_entries, | ||
| 327 | struct device *dev, | ||
| 328 | struct srp_direct_buf *md) | ||
| 329 | { | ||
| 330 | int i; | ||
| 331 | |||
| 332 | for (i = 0; i < num_entries; ++i) | ||
| 333 | dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL); | ||
| 334 | } | ||
| 335 | |||
| 336 | /** | 326 | /** |
| 337 | * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format | 327 | * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format |
| 338 | * @cmd: srp_cmd whose additional_data member will be unmapped | 328 | * @cmd: srp_cmd whose additional_data member will be unmapped |
| @@ -350,24 +340,9 @@ static void unmap_cmd_data(struct srp_cmd *cmd, | |||
| 350 | 340 | ||
| 351 | if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) | 341 | if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) |
| 352 | return; | 342 | return; |
| 353 | else if (out_fmt == SRP_DATA_DESC_DIRECT || | ||
| 354 | in_fmt == SRP_DATA_DESC_DIRECT) { | ||
| 355 | struct srp_direct_buf *data = | ||
| 356 | (struct srp_direct_buf *) cmd->add_data; | ||
| 357 | dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL); | ||
| 358 | } else { | ||
| 359 | struct srp_indirect_buf *indirect = | ||
| 360 | (struct srp_indirect_buf *) cmd->add_data; | ||
| 361 | int num_mapped = indirect->table_desc.len / | ||
| 362 | sizeof(struct srp_direct_buf); | ||
| 363 | 343 | ||
| 364 | if (num_mapped <= MAX_INDIRECT_BUFS) { | 344 | if (evt_struct->cmnd) |
| 365 | unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]); | 345 | scsi_dma_unmap(evt_struct->cmnd); |
| 366 | return; | ||
| 367 | } | ||
| 368 | |||
| 369 | unmap_sg_list(num_mapped, dev, evt_struct->ext_list); | ||
| 370 | } | ||
| 371 | } | 346 | } |
| 372 | 347 | ||
| 373 | static int map_sg_list(struct scsi_cmnd *cmd, int nseg, | 348 | static int map_sg_list(struct scsi_cmnd *cmd, int nseg, |
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 0ee725ced511..02143af7c1af 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
| @@ -599,7 +599,7 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) | |||
| 599 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); | 599 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); |
| 600 | write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); | 600 | write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); |
| 601 | 601 | ||
| 602 | if (sock->sk->sk_sleep && waitqueue_active(sock->sk->sk_sleep)) { | 602 | if (sock->sk->sk_sleep) { |
| 603 | sock->sk->sk_err = EIO; | 603 | sock->sk->sk_err = EIO; |
| 604 | wake_up_interruptible(sock->sk->sk_sleep); | 604 | wake_up_interruptible(sock->sk->sk_sleep); |
| 605 | } | 605 | } |
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index ec3723831e89..d62b3e467926 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c | |||
| @@ -433,7 +433,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, | |||
| 433 | dd_data = cmdiocbq->context1; | 433 | dd_data = cmdiocbq->context1; |
| 434 | /* normal completion and timeout crossed paths, already done */ | 434 | /* normal completion and timeout crossed paths, already done */ |
| 435 | if (!dd_data) { | 435 | if (!dd_data) { |
| 436 | spin_unlock_irqrestore(&phba->hbalock, flags); | 436 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
| 437 | return; | 437 | return; |
| 438 | } | 438 | } |
| 439 | 439 | ||
| @@ -1196,7 +1196,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, | |||
| 1196 | dd_data = cmdiocbq->context1; | 1196 | dd_data = cmdiocbq->context1; |
| 1197 | /* normal completion and timeout crossed paths, already done */ | 1197 | /* normal completion and timeout crossed paths, already done */ |
| 1198 | if (!dd_data) { | 1198 | if (!dd_data) { |
| 1199 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1199 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
| 1200 | return; | 1200 | return; |
| 1201 | } | 1201 | } |
| 1202 | 1202 | ||
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 359e9a71a021..1c7ef55966fb 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
| @@ -2393,6 +2393,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) | |||
| 2393 | return 0; | 2393 | return 0; |
| 2394 | 2394 | ||
| 2395 | done: | 2395 | done: |
| 2396 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
| 2396 | if (bsg_job->request->msgcode == FC_BSG_HST_CT) | 2397 | if (bsg_job->request->msgcode == FC_BSG_HST_CT) |
| 2397 | kfree(sp->fcport); | 2398 | kfree(sp->fcport); |
| 2398 | kfree(sp->ctx); | 2399 | kfree(sp->ctx); |
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 09d6d4b76f39..caeb7d10ae04 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c | |||
| @@ -467,7 +467,7 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha, | |||
| 467 | if (conn_err_detail) | 467 | if (conn_err_detail) |
| 468 | *conn_err_detail = mbox_sts[5]; | 468 | *conn_err_detail = mbox_sts[5]; |
| 469 | if (tcp_source_port_num) | 469 | if (tcp_source_port_num) |
| 470 | *tcp_source_port_num = (uint16_t) mbox_sts[6] >> 16; | 470 | *tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16); |
| 471 | if (connection_id) | 471 | if (connection_id) |
| 472 | *connection_id = (uint16_t) mbox_sts[6] & 0x00FF; | 472 | *connection_id = (uint16_t) mbox_sts[6] & 0x00FF; |
| 473 | status = QLA_SUCCESS; | 473 | status = QLA_SUCCESS; |
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c index d0b7d2ff9ac5..333580bf37c5 100644 --- a/drivers/scsi/wd7000.c +++ b/drivers/scsi/wd7000.c | |||
| @@ -1587,7 +1587,7 @@ static int wd7000_host_reset(struct scsi_cmnd *SCpnt) | |||
| 1587 | { | 1587 | { |
| 1588 | Adapter *host = (Adapter *) SCpnt->device->host->hostdata; | 1588 | Adapter *host = (Adapter *) SCpnt->device->host->hostdata; |
| 1589 | 1589 | ||
| 1590 | spin_unlock_irq(SCpnt->device->host->host_lock); | 1590 | spin_lock_irq(SCpnt->device->host->host_lock); |
| 1591 | 1591 | ||
| 1592 | if (wd7000_adapter_reset(host) < 0) { | 1592 | if (wd7000_adapter_reset(host) < 0) { |
| 1593 | spin_unlock_irq(SCpnt->device->host->host_lock); | 1593 | spin_unlock_irq(SCpnt->device->host->host_lock); |
diff --git a/drivers/serial/mcf.c b/drivers/serial/mcf.c index 7bb5fee639e3..b5aaef965f24 100644 --- a/drivers/serial/mcf.c +++ b/drivers/serial/mcf.c | |||
| @@ -263,6 +263,7 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios, | |||
| 263 | } | 263 | } |
| 264 | 264 | ||
| 265 | spin_lock_irqsave(&port->lock, flags); | 265 | spin_lock_irqsave(&port->lock, flags); |
| 266 | uart_update_timeout(port, termios->c_cflag, baud); | ||
| 266 | writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR); | 267 | writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR); |
| 267 | writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR); | 268 | writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR); |
| 268 | writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR); | 269 | writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR); |
| @@ -379,6 +380,7 @@ static irqreturn_t mcf_interrupt(int irq, void *data) | |||
| 379 | static void mcf_config_port(struct uart_port *port, int flags) | 380 | static void mcf_config_port(struct uart_port *port, int flags) |
| 380 | { | 381 | { |
| 381 | port->type = PORT_MCF; | 382 | port->type = PORT_MCF; |
| 383 | port->fifosize = MCFUART_TXFIFOSIZE; | ||
| 382 | 384 | ||
| 383 | /* Clear mask, so no surprise interrupts. */ | 385 | /* Clear mask, so no surprise interrupts. */ |
| 384 | writeb(0, port->membase + MCFUART_UIMR); | 386 | writeb(0, port->membase + MCFUART_UIMR); |
| @@ -424,7 +426,7 @@ static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser) | |||
| 424 | /* | 426 | /* |
| 425 | * Define the basic serial functions we support. | 427 | * Define the basic serial functions we support. |
| 426 | */ | 428 | */ |
| 427 | static struct uart_ops mcf_uart_ops = { | 429 | static const struct uart_ops mcf_uart_ops = { |
| 428 | .tx_empty = mcf_tx_empty, | 430 | .tx_empty = mcf_tx_empty, |
| 429 | .get_mctrl = mcf_get_mctrl, | 431 | .get_mctrl = mcf_get_mctrl, |
| 430 | .set_mctrl = mcf_set_mctrl, | 432 | .set_mctrl = mcf_set_mctrl, |
| @@ -443,7 +445,7 @@ static struct uart_ops mcf_uart_ops = { | |||
| 443 | .verify_port = mcf_verify_port, | 445 | .verify_port = mcf_verify_port, |
| 444 | }; | 446 | }; |
| 445 | 447 | ||
| 446 | static struct mcf_uart mcf_ports[3]; | 448 | static struct mcf_uart mcf_ports[4]; |
| 447 | 449 | ||
| 448 | #define MCF_MAXPORTS ARRAY_SIZE(mcf_ports) | 450 | #define MCF_MAXPORTS ARRAY_SIZE(mcf_ports) |
| 449 | 451 | ||
diff --git a/drivers/staging/dt3155/dt3155_drv.c b/drivers/staging/dt3155/dt3155_drv.c index a67c622869d2..e2c44ec6fc45 100644 --- a/drivers/staging/dt3155/dt3155_drv.c +++ b/drivers/staging/dt3155/dt3155_drv.c | |||
| @@ -57,19 +57,8 @@ MA 02111-1307 USA | |||
| 57 | 57 | ||
| 58 | extern void printques(int); | 58 | extern void printques(int); |
| 59 | 59 | ||
| 60 | #ifdef MODULE | ||
| 61 | #include <linux/module.h> | 60 | #include <linux/module.h> |
| 62 | #include <linux/interrupt.h> | 61 | #include <linux/interrupt.h> |
| 63 | |||
| 64 | |||
| 65 | MODULE_LICENSE("GPL"); | ||
| 66 | |||
| 67 | #endif | ||
| 68 | |||
| 69 | #ifndef CONFIG_PCI | ||
| 70 | #error "DT3155 : Kernel PCI support not enabled (DT3155 drive requires PCI)" | ||
| 71 | #endif | ||
| 72 | |||
| 73 | #include <linux/pci.h> | 62 | #include <linux/pci.h> |
| 74 | #include <linux/types.h> | 63 | #include <linux/types.h> |
| 75 | #include <linux/poll.h> | 64 | #include <linux/poll.h> |
| @@ -84,6 +73,9 @@ MODULE_LICENSE("GPL"); | |||
| 84 | #include "dt3155_io.h" | 73 | #include "dt3155_io.h" |
| 85 | #include "allocator.h" | 74 | #include "allocator.h" |
| 86 | 75 | ||
| 76 | |||
| 77 | MODULE_LICENSE("GPL"); | ||
| 78 | |||
| 87 | /* Error variable. Zero means no error. */ | 79 | /* Error variable. Zero means no error. */ |
| 88 | int dt3155_errno = 0; | 80 | int dt3155_errno = 0; |
| 89 | 81 | ||
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 6a3b5cae3a6e..2f3dc4cdf79b 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c | |||
| @@ -301,7 +301,7 @@ static int usb_probe_interface(struct device *dev) | |||
| 301 | 301 | ||
| 302 | intf->condition = USB_INTERFACE_BINDING; | 302 | intf->condition = USB_INTERFACE_BINDING; |
| 303 | 303 | ||
| 304 | /* Bound interfaces are initially active. They are | 304 | /* Probed interfaces are initially active. They are |
| 305 | * runtime-PM-enabled only if the driver has autosuspend support. | 305 | * runtime-PM-enabled only if the driver has autosuspend support. |
| 306 | * They are sensitive to their children's power states. | 306 | * They are sensitive to their children's power states. |
| 307 | */ | 307 | */ |
| @@ -437,11 +437,11 @@ int usb_driver_claim_interface(struct usb_driver *driver, | |||
| 437 | 437 | ||
| 438 | iface->condition = USB_INTERFACE_BOUND; | 438 | iface->condition = USB_INTERFACE_BOUND; |
| 439 | 439 | ||
| 440 | /* Bound interfaces are initially active. They are | 440 | /* Claimed interfaces are initially inactive (suspended). They are |
| 441 | * runtime-PM-enabled only if the driver has autosuspend support. | 441 | * runtime-PM-enabled only if the driver has autosuspend support. |
| 442 | * They are sensitive to their children's power states. | 442 | * They are sensitive to their children's power states. |
| 443 | */ | 443 | */ |
| 444 | pm_runtime_set_active(dev); | 444 | pm_runtime_set_suspended(dev); |
| 445 | pm_suspend_ignore_children(dev, false); | 445 | pm_suspend_ignore_children(dev, false); |
| 446 | if (driver->supports_autosuspend) | 446 | if (driver->supports_autosuspend) |
| 447 | pm_runtime_enable(dev); | 447 | pm_runtime_enable(dev); |
| @@ -1170,7 +1170,7 @@ done: | |||
| 1170 | static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) | 1170 | static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) |
| 1171 | { | 1171 | { |
| 1172 | int status = 0; | 1172 | int status = 0; |
| 1173 | int i = 0; | 1173 | int i = 0, n = 0; |
| 1174 | struct usb_interface *intf; | 1174 | struct usb_interface *intf; |
| 1175 | 1175 | ||
| 1176 | if (udev->state == USB_STATE_NOTATTACHED || | 1176 | if (udev->state == USB_STATE_NOTATTACHED || |
| @@ -1179,7 +1179,8 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) | |||
| 1179 | 1179 | ||
| 1180 | /* Suspend all the interfaces and then udev itself */ | 1180 | /* Suspend all the interfaces and then udev itself */ |
| 1181 | if (udev->actconfig) { | 1181 | if (udev->actconfig) { |
| 1182 | for (; i < udev->actconfig->desc.bNumInterfaces; i++) { | 1182 | n = udev->actconfig->desc.bNumInterfaces; |
| 1183 | for (i = n - 1; i >= 0; --i) { | ||
| 1183 | intf = udev->actconfig->interface[i]; | 1184 | intf = udev->actconfig->interface[i]; |
| 1184 | status = usb_suspend_interface(udev, intf, msg); | 1185 | status = usb_suspend_interface(udev, intf, msg); |
| 1185 | if (status != 0) | 1186 | if (status != 0) |
| @@ -1192,7 +1193,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) | |||
| 1192 | /* If the suspend failed, resume interfaces that did get suspended */ | 1193 | /* If the suspend failed, resume interfaces that did get suspended */ |
| 1193 | if (status != 0) { | 1194 | if (status != 0) { |
| 1194 | msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME); | 1195 | msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME); |
| 1195 | while (--i >= 0) { | 1196 | while (++i < n) { |
| 1196 | intf = udev->actconfig->interface[i]; | 1197 | intf = udev->actconfig->interface[i]; |
| 1197 | usb_resume_interface(udev, intf, msg, 0); | 1198 | usb_resume_interface(udev, intf, msg, 0); |
| 1198 | } | 1199 | } |
| @@ -1263,13 +1264,47 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg) | |||
| 1263 | return status; | 1264 | return status; |
| 1264 | } | 1265 | } |
| 1265 | 1266 | ||
| 1267 | static void choose_wakeup(struct usb_device *udev, pm_message_t msg) | ||
| 1268 | { | ||
| 1269 | int w, i; | ||
| 1270 | struct usb_interface *intf; | ||
| 1271 | |||
| 1272 | /* Remote wakeup is needed only when we actually go to sleep. | ||
| 1273 | * For things like FREEZE and QUIESCE, if the device is already | ||
| 1274 | * autosuspended then its current wakeup setting is okay. | ||
| 1275 | */ | ||
| 1276 | if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) { | ||
| 1277 | if (udev->state != USB_STATE_SUSPENDED) | ||
| 1278 | udev->do_remote_wakeup = 0; | ||
| 1279 | return; | ||
| 1280 | } | ||
| 1281 | |||
| 1282 | /* If remote wakeup is permitted, see whether any interface drivers | ||
| 1283 | * actually want it. | ||
| 1284 | */ | ||
| 1285 | w = 0; | ||
| 1286 | if (device_may_wakeup(&udev->dev) && udev->actconfig) { | ||
| 1287 | for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { | ||
| 1288 | intf = udev->actconfig->interface[i]; | ||
| 1289 | w |= intf->needs_remote_wakeup; | ||
| 1290 | } | ||
| 1291 | } | ||
| 1292 | |||
| 1293 | /* If the device is autosuspended with the wrong wakeup setting, | ||
| 1294 | * autoresume now so the setting can be changed. | ||
| 1295 | */ | ||
| 1296 | if (udev->state == USB_STATE_SUSPENDED && w != udev->do_remote_wakeup) | ||
| 1297 | pm_runtime_resume(&udev->dev); | ||
| 1298 | udev->do_remote_wakeup = w; | ||
| 1299 | } | ||
| 1300 | |||
| 1266 | /* The device lock is held by the PM core */ | 1301 | /* The device lock is held by the PM core */ |
| 1267 | int usb_suspend(struct device *dev, pm_message_t msg) | 1302 | int usb_suspend(struct device *dev, pm_message_t msg) |
| 1268 | { | 1303 | { |
| 1269 | struct usb_device *udev = to_usb_device(dev); | 1304 | struct usb_device *udev = to_usb_device(dev); |
| 1270 | 1305 | ||
| 1271 | do_unbind_rebind(udev, DO_UNBIND); | 1306 | do_unbind_rebind(udev, DO_UNBIND); |
| 1272 | udev->do_remote_wakeup = device_may_wakeup(&udev->dev); | 1307 | choose_wakeup(udev, msg); |
| 1273 | return usb_suspend_both(udev, msg); | 1308 | return usb_suspend_both(udev, msg); |
| 1274 | } | 1309 | } |
| 1275 | 1310 | ||
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 207e7a85aeb0..13ead00aecd5 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
| @@ -543,6 +543,7 @@ static int ehci_init(struct usb_hcd *hcd) | |||
| 543 | */ | 543 | */ |
| 544 | ehci->periodic_size = DEFAULT_I_TDPS; | 544 | ehci->periodic_size = DEFAULT_I_TDPS; |
| 545 | INIT_LIST_HEAD(&ehci->cached_itd_list); | 545 | INIT_LIST_HEAD(&ehci->cached_itd_list); |
| 546 | INIT_LIST_HEAD(&ehci->cached_sitd_list); | ||
| 546 | if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) | 547 | if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) |
| 547 | return retval; | 548 | return retval; |
| 548 | 549 | ||
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 19372673bf09..c7178bcde67a 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
| @@ -801,7 +801,7 @@ static int ehci_hub_control ( | |||
| 801 | * this bit; seems too long to spin routinely... | 801 | * this bit; seems too long to spin routinely... |
| 802 | */ | 802 | */ |
| 803 | retval = handshake(ehci, status_reg, | 803 | retval = handshake(ehci, status_reg, |
| 804 | PORT_RESET, 0, 750); | 804 | PORT_RESET, 0, 1000); |
| 805 | if (retval != 0) { | 805 | if (retval != 0) { |
| 806 | ehci_err (ehci, "port %d reset error %d\n", | 806 | ehci_err (ehci, "port %d reset error %d\n", |
| 807 | wIndex + 1, retval); | 807 | wIndex + 1, retval); |
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c index aeda96e0af67..1f3f01eacaf0 100644 --- a/drivers/usb/host/ehci-mem.c +++ b/drivers/usb/host/ehci-mem.c | |||
| @@ -136,7 +136,7 @@ static inline void qh_put (struct ehci_qh *qh) | |||
| 136 | 136 | ||
| 137 | static void ehci_mem_cleanup (struct ehci_hcd *ehci) | 137 | static void ehci_mem_cleanup (struct ehci_hcd *ehci) |
| 138 | { | 138 | { |
| 139 | free_cached_itd_list(ehci); | 139 | free_cached_lists(ehci); |
| 140 | if (ehci->async) | 140 | if (ehci->async) |
| 141 | qh_put (ehci->async); | 141 | qh_put (ehci->async); |
| 142 | ehci->async = NULL; | 142 | ehci->async = NULL; |
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index a67a0030dd57..40a858335035 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c | |||
| @@ -629,11 +629,13 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) | |||
| 629 | } | 629 | } |
| 630 | snprintf(supply, sizeof(supply), "hsusb%d", i); | 630 | snprintf(supply, sizeof(supply), "hsusb%d", i); |
| 631 | omap->regulator[i] = regulator_get(omap->dev, supply); | 631 | omap->regulator[i] = regulator_get(omap->dev, supply); |
| 632 | if (IS_ERR(omap->regulator[i])) | 632 | if (IS_ERR(omap->regulator[i])) { |
| 633 | omap->regulator[i] = NULL; | ||
| 633 | dev_dbg(&pdev->dev, | 634 | dev_dbg(&pdev->dev, |
| 634 | "failed to get ehci port%d regulator\n", i); | 635 | "failed to get ehci port%d regulator\n", i); |
| 635 | else | 636 | } else { |
| 636 | regulator_enable(omap->regulator[i]); | 637 | regulator_enable(omap->regulator[i]); |
| 638 | } | ||
| 637 | } | 639 | } |
| 638 | 640 | ||
| 639 | ret = omap_start_ehc(omap, hcd); | 641 | ret = omap_start_ehc(omap, hcd); |
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index a0aaaaff2560..805ec633a652 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
| @@ -510,7 +510,7 @@ static int disable_periodic (struct ehci_hcd *ehci) | |||
| 510 | ehci_writel(ehci, cmd, &ehci->regs->command); | 510 | ehci_writel(ehci, cmd, &ehci->regs->command); |
| 511 | /* posted write ... */ | 511 | /* posted write ... */ |
| 512 | 512 | ||
| 513 | free_cached_itd_list(ehci); | 513 | free_cached_lists(ehci); |
| 514 | 514 | ||
| 515 | ehci->next_uframe = -1; | 515 | ehci->next_uframe = -1; |
| 516 | return 0; | 516 | return 0; |
| @@ -2139,13 +2139,27 @@ sitd_complete ( | |||
| 2139 | (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); | 2139 | (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); |
| 2140 | } | 2140 | } |
| 2141 | iso_stream_put (ehci, stream); | 2141 | iso_stream_put (ehci, stream); |
| 2142 | /* OK to recycle this SITD now that its completion callback ran. */ | 2142 | |
| 2143 | done: | 2143 | done: |
| 2144 | sitd->urb = NULL; | 2144 | sitd->urb = NULL; |
| 2145 | sitd->stream = NULL; | 2145 | if (ehci->clock_frame != sitd->frame) { |
| 2146 | list_move(&sitd->sitd_list, &stream->free_list); | 2146 | /* OK to recycle this SITD now. */ |
| 2147 | iso_stream_put(ehci, stream); | 2147 | sitd->stream = NULL; |
| 2148 | 2148 | list_move(&sitd->sitd_list, &stream->free_list); | |
| 2149 | iso_stream_put(ehci, stream); | ||
| 2150 | } else { | ||
| 2151 | /* HW might remember this SITD, so we can't recycle it yet. | ||
| 2152 | * Move it to a safe place until a new frame starts. | ||
| 2153 | */ | ||
| 2154 | list_move(&sitd->sitd_list, &ehci->cached_sitd_list); | ||
| 2155 | if (stream->refcount == 2) { | ||
| 2156 | /* If iso_stream_put() were called here, stream | ||
| 2157 | * would be freed. Instead, just prevent reuse. | ||
| 2158 | */ | ||
| 2159 | stream->ep->hcpriv = NULL; | ||
| 2160 | stream->ep = NULL; | ||
| 2161 | } | ||
| 2162 | } | ||
| 2149 | return retval; | 2163 | return retval; |
| 2150 | } | 2164 | } |
| 2151 | 2165 | ||
| @@ -2211,9 +2225,10 @@ done: | |||
| 2211 | 2225 | ||
| 2212 | /*-------------------------------------------------------------------------*/ | 2226 | /*-------------------------------------------------------------------------*/ |
| 2213 | 2227 | ||
| 2214 | static void free_cached_itd_list(struct ehci_hcd *ehci) | 2228 | static void free_cached_lists(struct ehci_hcd *ehci) |
| 2215 | { | 2229 | { |
| 2216 | struct ehci_itd *itd, *n; | 2230 | struct ehci_itd *itd, *n; |
| 2231 | struct ehci_sitd *sitd, *sn; | ||
| 2217 | 2232 | ||
| 2218 | list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { | 2233 | list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { |
| 2219 | struct ehci_iso_stream *stream = itd->stream; | 2234 | struct ehci_iso_stream *stream = itd->stream; |
| @@ -2221,6 +2236,13 @@ static void free_cached_itd_list(struct ehci_hcd *ehci) | |||
| 2221 | list_move(&itd->itd_list, &stream->free_list); | 2236 | list_move(&itd->itd_list, &stream->free_list); |
| 2222 | iso_stream_put(ehci, stream); | 2237 | iso_stream_put(ehci, stream); |
| 2223 | } | 2238 | } |
| 2239 | |||
| 2240 | list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) { | ||
| 2241 | struct ehci_iso_stream *stream = sitd->stream; | ||
| 2242 | sitd->stream = NULL; | ||
| 2243 | list_move(&sitd->sitd_list, &stream->free_list); | ||
| 2244 | iso_stream_put(ehci, stream); | ||
| 2245 | } | ||
| 2224 | } | 2246 | } |
| 2225 | 2247 | ||
| 2226 | /*-------------------------------------------------------------------------*/ | 2248 | /*-------------------------------------------------------------------------*/ |
| @@ -2247,7 +2269,7 @@ scan_periodic (struct ehci_hcd *ehci) | |||
| 2247 | clock_frame = -1; | 2269 | clock_frame = -1; |
| 2248 | } | 2270 | } |
| 2249 | if (ehci->clock_frame != clock_frame) { | 2271 | if (ehci->clock_frame != clock_frame) { |
| 2250 | free_cached_itd_list(ehci); | 2272 | free_cached_lists(ehci); |
| 2251 | ehci->clock_frame = clock_frame; | 2273 | ehci->clock_frame = clock_frame; |
| 2252 | } | 2274 | } |
| 2253 | clock %= mod; | 2275 | clock %= mod; |
| @@ -2414,7 +2436,7 @@ restart: | |||
| 2414 | clock = now; | 2436 | clock = now; |
| 2415 | clock_frame = clock >> 3; | 2437 | clock_frame = clock >> 3; |
| 2416 | if (ehci->clock_frame != clock_frame) { | 2438 | if (ehci->clock_frame != clock_frame) { |
| 2417 | free_cached_itd_list(ehci); | 2439 | free_cached_lists(ehci); |
| 2418 | ehci->clock_frame = clock_frame; | 2440 | ehci->clock_frame = clock_frame; |
| 2419 | } | 2441 | } |
| 2420 | } else { | 2442 | } else { |
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index b1dce96dd621..556c0b48f3ab 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h | |||
| @@ -87,8 +87,9 @@ struct ehci_hcd { /* one per controller */ | |||
| 87 | int next_uframe; /* scan periodic, start here */ | 87 | int next_uframe; /* scan periodic, start here */ |
| 88 | unsigned periodic_sched; /* periodic activity count */ | 88 | unsigned periodic_sched; /* periodic activity count */ |
| 89 | 89 | ||
| 90 | /* list of itds completed while clock_frame was still active */ | 90 | /* list of itds & sitds completed while clock_frame was still active */ |
| 91 | struct list_head cached_itd_list; | 91 | struct list_head cached_itd_list; |
| 92 | struct list_head cached_sitd_list; | ||
| 92 | unsigned clock_frame; | 93 | unsigned clock_frame; |
| 93 | 94 | ||
| 94 | /* per root hub port */ | 95 | /* per root hub port */ |
| @@ -195,7 +196,7 @@ timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action) | |||
| 195 | clear_bit (action, &ehci->actions); | 196 | clear_bit (action, &ehci->actions); |
| 196 | } | 197 | } |
| 197 | 198 | ||
| 198 | static void free_cached_itd_list(struct ehci_hcd *ehci); | 199 | static void free_cached_lists(struct ehci_hcd *ehci); |
| 199 | 200 | ||
| 200 | /*-------------------------------------------------------------------------*/ | 201 | /*-------------------------------------------------------------------------*/ |
| 201 | 202 | ||
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c index 4aa08d36d077..d22fb4d577b7 100644 --- a/drivers/usb/host/ohci-da8xx.c +++ b/drivers/usb/host/ohci-da8xx.c | |||
| @@ -23,7 +23,7 @@ | |||
| 23 | #error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX." | 23 | #error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX." |
| 24 | #endif | 24 | #endif |
| 25 | 25 | ||
| 26 | #define CFGCHIP2 DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG) | 26 | #define CFGCHIP2 DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG) |
| 27 | 27 | ||
| 28 | static struct clk *usb11_clk; | 28 | static struct clk *usb11_clk; |
| 29 | static struct clk *usb20_clk; | 29 | static struct clk *usb20_clk; |
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c index a9555cb901a1..de8ef945b536 100644 --- a/drivers/usb/misc/usbsevseg.c +++ b/drivers/usb/misc/usbsevseg.c | |||
| @@ -49,6 +49,7 @@ struct usb_sevsegdev { | |||
| 49 | u16 textlength; | 49 | u16 textlength; |
| 50 | 50 | ||
| 51 | u8 shadow_power; /* for PM */ | 51 | u8 shadow_power; /* for PM */ |
| 52 | u8 has_interface_pm; | ||
| 52 | }; | 53 | }; |
| 53 | 54 | ||
| 54 | /* sysfs_streq can't replace this completely | 55 | /* sysfs_streq can't replace this completely |
| @@ -68,12 +69,16 @@ static void update_display_powered(struct usb_sevsegdev *mydev) | |||
| 68 | { | 69 | { |
| 69 | int rc; | 70 | int rc; |
| 70 | 71 | ||
| 71 | if (!mydev->shadow_power && mydev->powered) { | 72 | if (mydev->powered && !mydev->has_interface_pm) { |
| 72 | rc = usb_autopm_get_interface(mydev->intf); | 73 | rc = usb_autopm_get_interface(mydev->intf); |
| 73 | if (rc < 0) | 74 | if (rc < 0) |
| 74 | return; | 75 | return; |
| 76 | mydev->has_interface_pm = 1; | ||
| 75 | } | 77 | } |
| 76 | 78 | ||
| 79 | if (mydev->shadow_power != 1) | ||
| 80 | return; | ||
| 81 | |||
| 77 | rc = usb_control_msg(mydev->udev, | 82 | rc = usb_control_msg(mydev->udev, |
| 78 | usb_sndctrlpipe(mydev->udev, 0), | 83 | usb_sndctrlpipe(mydev->udev, 0), |
| 79 | 0x12, | 84 | 0x12, |
| @@ -86,8 +91,10 @@ static void update_display_powered(struct usb_sevsegdev *mydev) | |||
| 86 | if (rc < 0) | 91 | if (rc < 0) |
| 87 | dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc); | 92 | dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc); |
| 88 | 93 | ||
| 89 | if (mydev->shadow_power && !mydev->powered) | 94 | if (!mydev->powered && mydev->has_interface_pm) { |
| 90 | usb_autopm_put_interface(mydev->intf); | 95 | usb_autopm_put_interface(mydev->intf); |
| 96 | mydev->has_interface_pm = 0; | ||
| 97 | } | ||
| 91 | } | 98 | } |
| 92 | 99 | ||
| 93 | static void update_display_mode(struct usb_sevsegdev *mydev) | 100 | static void update_display_mode(struct usb_sevsegdev *mydev) |
| @@ -351,6 +358,10 @@ static int sevseg_probe(struct usb_interface *interface, | |||
| 351 | mydev->intf = interface; | 358 | mydev->intf = interface; |
| 352 | usb_set_intfdata(interface, mydev); | 359 | usb_set_intfdata(interface, mydev); |
| 353 | 360 | ||
| 361 | /* PM */ | ||
| 362 | mydev->shadow_power = 1; /* currently active */ | ||
| 363 | mydev->has_interface_pm = 0; /* have not issued autopm_get */ | ||
| 364 | |||
| 354 | /*set defaults */ | 365 | /*set defaults */ |
| 355 | mydev->textmode = 0x02; /* ascii mode */ | 366 | mydev->textmode = 0x02; /* ascii mode */ |
| 356 | mydev->mode_msb = 0x06; /* 6 characters */ | 367 | mydev->mode_msb = 0x06; /* 6 characters */ |
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 73d5f346d3e0..c97a0bb5b6db 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
| @@ -97,6 +97,7 @@ static const struct usb_device_id id_table[] = { | |||
| 97 | { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, | 97 | { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, |
| 98 | { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, | 98 | { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, |
| 99 | { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, | 99 | { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, |
| 100 | { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, | ||
| 100 | { } /* Terminating entry */ | 101 | { } /* Terminating entry */ |
| 101 | }; | 102 | }; |
| 102 | 103 | ||
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index d640dc951568..a352d5f3a59c 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h | |||
| @@ -134,3 +134,7 @@ | |||
| 134 | /* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */ | 134 | /* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */ |
| 135 | #define SANWA_VENDOR_ID 0x11ad | 135 | #define SANWA_VENDOR_ID 0x11ad |
| 136 | #define SANWA_PRODUCT_ID 0x0001 | 136 | #define SANWA_PRODUCT_ID 0x0001 |
| 137 | |||
| 138 | /* ADLINK ND-6530 RS232,RS485 and RS422 adapter */ | ||
| 139 | #define ADLINK_VENDOR_ID 0x0b63 | ||
| 140 | #define ADLINK_ND6530_PRODUCT_ID 0x6530 | ||
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c index 0b9362061713..7e3bea23600b 100644 --- a/drivers/usb/serial/qcaux.c +++ b/drivers/usb/serial/qcaux.c | |||
| @@ -42,6 +42,14 @@ | |||
| 42 | #define CMOTECH_PRODUCT_CDU550 0x5553 | 42 | #define CMOTECH_PRODUCT_CDU550 0x5553 |
| 43 | #define CMOTECH_PRODUCT_CDX650 0x6512 | 43 | #define CMOTECH_PRODUCT_CDX650 0x6512 |
| 44 | 44 | ||
| 45 | /* LG devices */ | ||
| 46 | #define LG_VENDOR_ID 0x1004 | ||
| 47 | #define LG_PRODUCT_VX4400_6000 0x6000 /* VX4400/VX6000/Rumor */ | ||
| 48 | |||
| 49 | /* Sanyo devices */ | ||
| 50 | #define SANYO_VENDOR_ID 0x0474 | ||
| 51 | #define SANYO_PRODUCT_KATANA_LX 0x0754 /* SCP-3800 (Katana LX) */ | ||
| 52 | |||
| 45 | static struct usb_device_id id_table[] = { | 53 | static struct usb_device_id id_table[] = { |
| 46 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) }, | 54 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) }, |
| 47 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) }, | 55 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) }, |
| @@ -51,6 +59,8 @@ static struct usb_device_id id_table[] = { | |||
| 51 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) }, | 59 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) }, |
| 52 | { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) }, | 60 | { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) }, |
| 53 | { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) }, | 61 | { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) }, |
| 62 | { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) }, | ||
| 63 | { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) }, | ||
| 54 | { }, | 64 | { }, |
| 55 | }; | 65 | }; |
| 56 | MODULE_DEVICE_TABLE(usb, id_table); | 66 | MODULE_DEVICE_TABLE(usb, id_table); |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 9202f94505e6..ef0bdb08d788 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
| @@ -230,6 +230,7 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = { | |||
| 230 | static const struct usb_device_id id_table[] = { | 230 | static const struct usb_device_id id_table[] = { |
| 231 | { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ | 231 | { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ |
| 232 | { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */ | 232 | { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */ |
| 233 | { USB_DEVICE(0x03F0, 0x211D) }, /* HP ev2210 a.k.a MC5725 */ | ||
| 233 | { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */ | 234 | { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */ |
| 234 | 235 | ||
| 235 | { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ | 236 | { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ |
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 0afe5c71c17e..880e990abb07 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c | |||
| @@ -172,7 +172,7 @@ static unsigned int product_5052_count; | |||
| 172 | /* the array dimension is the number of default entries plus */ | 172 | /* the array dimension is the number of default entries plus */ |
| 173 | /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */ | 173 | /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */ |
| 174 | /* null entry */ | 174 | /* null entry */ |
| 175 | static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = { | 175 | static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = { |
| 176 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, | 176 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, |
| 177 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, | 177 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, |
| 178 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, | 178 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, |
| @@ -180,6 +180,9 @@ static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = { | |||
| 180 | { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, | 180 | { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, |
| 181 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, | 181 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, |
| 182 | { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, | 182 | { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, |
| 183 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) }, | ||
| 184 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) }, | ||
| 185 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) }, | ||
| 183 | { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, | 186 | { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, |
| 184 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, | 187 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, |
| 185 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, | 188 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, |
| @@ -192,7 +195,7 @@ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = { | |||
| 192 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, | 195 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, |
| 193 | }; | 196 | }; |
| 194 | 197 | ||
| 195 | static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] = { | 198 | static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1] = { |
| 196 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, | 199 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, |
| 197 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, | 200 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, |
| 198 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, | 201 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, |
| @@ -200,6 +203,9 @@ static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] | |||
| 200 | { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, | 203 | { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, |
| 201 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, | 204 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, |
| 202 | { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, | 205 | { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, |
| 206 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) }, | ||
| 207 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) }, | ||
| 208 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) }, | ||
| 203 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) }, | 209 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) }, |
| 204 | { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, | 210 | { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, |
| 205 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, | 211 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, |
| @@ -287,6 +293,8 @@ MODULE_FIRMWARE("ti_5052.fw"); | |||
| 287 | MODULE_FIRMWARE("mts_cdma.fw"); | 293 | MODULE_FIRMWARE("mts_cdma.fw"); |
| 288 | MODULE_FIRMWARE("mts_gsm.fw"); | 294 | MODULE_FIRMWARE("mts_gsm.fw"); |
| 289 | MODULE_FIRMWARE("mts_edge.fw"); | 295 | MODULE_FIRMWARE("mts_edge.fw"); |
| 296 | MODULE_FIRMWARE("mts_mt9234mu.fw"); | ||
| 297 | MODULE_FIRMWARE("mts_mt9234zba.fw"); | ||
| 290 | 298 | ||
| 291 | module_param(debug, bool, S_IRUGO | S_IWUSR); | 299 | module_param(debug, bool, S_IRUGO | S_IWUSR); |
| 292 | MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes"); | 300 | MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes"); |
| @@ -1687,6 +1695,7 @@ static int ti_download_firmware(struct ti_device *tdev) | |||
| 1687 | const struct firmware *fw_p; | 1695 | const struct firmware *fw_p; |
| 1688 | char buf[32]; | 1696 | char buf[32]; |
| 1689 | 1697 | ||
| 1698 | dbg("%s\n", __func__); | ||
| 1690 | /* try ID specific firmware first, then try generic firmware */ | 1699 | /* try ID specific firmware first, then try generic firmware */ |
| 1691 | sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, | 1700 | sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, |
| 1692 | dev->descriptor.idProduct); | 1701 | dev->descriptor.idProduct); |
| @@ -1703,7 +1712,15 @@ static int ti_download_firmware(struct ti_device *tdev) | |||
| 1703 | case MTS_EDGE_PRODUCT_ID: | 1712 | case MTS_EDGE_PRODUCT_ID: |
| 1704 | strcpy(buf, "mts_edge.fw"); | 1713 | strcpy(buf, "mts_edge.fw"); |
| 1705 | break; | 1714 | break; |
| 1706 | } | 1715 | case MTS_MT9234MU_PRODUCT_ID: |
| 1716 | strcpy(buf, "mts_mt9234mu.fw"); | ||
| 1717 | break; | ||
| 1718 | case MTS_MT9234ZBA_PRODUCT_ID: | ||
| 1719 | strcpy(buf, "mts_mt9234zba.fw"); | ||
| 1720 | break; | ||
| 1721 | case MTS_MT9234ZBAOLD_PRODUCT_ID: | ||
| 1722 | strcpy(buf, "mts_mt9234zba.fw"); | ||
| 1723 | break; } | ||
| 1707 | } | 1724 | } |
| 1708 | if (buf[0] == '\0') { | 1725 | if (buf[0] == '\0') { |
| 1709 | if (tdev->td_is_3410) | 1726 | if (tdev->td_is_3410) |
| @@ -1718,7 +1735,7 @@ static int ti_download_firmware(struct ti_device *tdev) | |||
| 1718 | return -ENOENT; | 1735 | return -ENOENT; |
| 1719 | } | 1736 | } |
| 1720 | if (fw_p->size > TI_FIRMWARE_BUF_SIZE) { | 1737 | if (fw_p->size > TI_FIRMWARE_BUF_SIZE) { |
| 1721 | dev_err(&dev->dev, "%s - firmware too large\n", __func__); | 1738 | dev_err(&dev->dev, "%s - firmware too large %d \n", __func__, fw_p->size); |
| 1722 | return -ENOENT; | 1739 | return -ENOENT; |
| 1723 | } | 1740 | } |
| 1724 | 1741 | ||
| @@ -1730,6 +1747,7 @@ static int ti_download_firmware(struct ti_device *tdev) | |||
| 1730 | status = ti_do_download(dev, pipe, buffer, fw_p->size); | 1747 | status = ti_do_download(dev, pipe, buffer, fw_p->size); |
| 1731 | kfree(buffer); | 1748 | kfree(buffer); |
| 1732 | } else { | 1749 | } else { |
| 1750 | dbg("%s ENOMEM\n", __func__); | ||
| 1733 | status = -ENOMEM; | 1751 | status = -ENOMEM; |
| 1734 | } | 1752 | } |
| 1735 | release_firmware(fw_p); | 1753 | release_firmware(fw_p); |
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h index f323c6025858..2aac1953993b 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.h +++ b/drivers/usb/serial/ti_usb_3410_5052.h | |||
| @@ -45,6 +45,9 @@ | |||
| 45 | #define MTS_CDMA_PRODUCT_ID 0xF110 | 45 | #define MTS_CDMA_PRODUCT_ID 0xF110 |
| 46 | #define MTS_GSM_PRODUCT_ID 0xF111 | 46 | #define MTS_GSM_PRODUCT_ID 0xF111 |
| 47 | #define MTS_EDGE_PRODUCT_ID 0xF112 | 47 | #define MTS_EDGE_PRODUCT_ID 0xF112 |
| 48 | #define MTS_MT9234MU_PRODUCT_ID 0xF114 | ||
| 49 | #define MTS_MT9234ZBA_PRODUCT_ID 0xF115 | ||
| 50 | #define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319 | ||
| 48 | 51 | ||
| 49 | /* Commands */ | 52 | /* Commands */ |
| 50 | #define TI_GET_VERSION 0x01 | 53 | #define TI_GET_VERSION 0x01 |
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c index 46e79d349498..7ec24e46b34b 100644 --- a/drivers/usb/wusbcore/devconnect.c +++ b/drivers/usb/wusbcore/devconnect.c | |||
| @@ -438,7 +438,7 @@ static void __wusbhc_keep_alive(struct wusbhc *wusbhc) | |||
| 438 | old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr); | 438 | old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr); |
| 439 | keep_alives = 0; | 439 | keep_alives = 0; |
| 440 | for (cnt = 0; | 440 | for (cnt = 0; |
| 441 | keep_alives <= WUIE_ELT_MAX && cnt < wusbhc->ports_max; | 441 | keep_alives < WUIE_ELT_MAX && cnt < wusbhc->ports_max; |
| 442 | cnt++) { | 442 | cnt++) { |
| 443 | unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout); | 443 | unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout); |
| 444 | 444 | ||
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c index 581d2dbf675a..ecf405562f5c 100644 --- a/drivers/video/efifb.c +++ b/drivers/video/efifb.c | |||
| @@ -49,6 +49,7 @@ enum { | |||
| 49 | M_MBP_2, /* MacBook Pro 2nd gen */ | 49 | M_MBP_2, /* MacBook Pro 2nd gen */ |
| 50 | M_MBP_SR, /* MacBook Pro (Santa Rosa) */ | 50 | M_MBP_SR, /* MacBook Pro (Santa Rosa) */ |
| 51 | M_MBP_4, /* MacBook Pro, 4th gen */ | 51 | M_MBP_4, /* MacBook Pro, 4th gen */ |
| 52 | M_MBP_5_1, /* MacBook Pro, 5,1th gen */ | ||
| 52 | M_UNKNOWN /* placeholder */ | 53 | M_UNKNOWN /* placeholder */ |
| 53 | }; | 54 | }; |
| 54 | 55 | ||
| @@ -70,6 +71,7 @@ static struct efifb_dmi_info { | |||
| 70 | [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */ | 71 | [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */ |
| 71 | [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 }, | 72 | [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 }, |
| 72 | [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 }, | 73 | [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 }, |
| 74 | [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 }, | ||
| 73 | [M_UNKNOWN] = { NULL, 0, 0, 0, 0 } | 75 | [M_UNKNOWN] = { NULL, 0, 0, 0, 0 } |
| 74 | }; | 76 | }; |
| 75 | 77 | ||
| @@ -106,6 +108,7 @@ static struct dmi_system_id __initdata dmi_system_table[] = { | |||
| 106 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR), | 108 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR), |
| 107 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR), | 109 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR), |
| 108 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4), | 110 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4), |
| 111 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1), | ||
| 109 | {}, | 112 | {}, |
| 110 | }; | 113 | }; |
| 111 | 114 | ||
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 3aed38886f94..bfec7c29486d 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
| @@ -103,7 +103,8 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num) | |||
| 103 | num = min(num, ARRAY_SIZE(vb->pfns)); | 103 | num = min(num, ARRAY_SIZE(vb->pfns)); |
| 104 | 104 | ||
| 105 | for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) { | 105 | for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) { |
| 106 | struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY); | 106 | struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY | |
| 107 | __GFP_NOMEMALLOC | __GFP_NOWARN); | ||
| 107 | if (!page) { | 108 | if (!page) { |
| 108 | if (printk_ratelimit()) | 109 | if (printk_ratelimit()) |
| 109 | dev_printk(KERN_INFO, &vb->vdev->dev, | 110 | dev_printk(KERN_INFO, &vb->vdev->dev, |
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c index ef36fca2eed4..3a7e9ff8a746 100644 --- a/drivers/w1/masters/omap_hdq.c +++ b/drivers/w1/masters/omap_hdq.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
| 17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
| 18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
| 19 | #include <linux/sched.h> | ||
| 19 | 20 | ||
| 20 | #include <asm/irq.h> | 21 | #include <asm/irq.h> |
| 21 | #include <mach/hardware.h> | 22 | #include <mach/hardware.h> |
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c index 1ed3d554e372..17726a05a0a6 100644 --- a/drivers/w1/slaves/w1_therm.c +++ b/drivers/w1/slaves/w1_therm.c | |||
| @@ -115,9 +115,8 @@ static struct w1_therm_family_converter w1_therm_families[] = { | |||
| 115 | 115 | ||
| 116 | static inline int w1_DS18B20_convert_temp(u8 rom[9]) | 116 | static inline int w1_DS18B20_convert_temp(u8 rom[9]) |
| 117 | { | 117 | { |
| 118 | int t = ((s16)rom[1] << 8) | rom[0]; | 118 | s16 t = le16_to_cpup((__le16 *)rom); |
| 119 | t = t*1000/16; | 119 | return t*1000/16; |
| 120 | return t; | ||
| 121 | } | 120 | } |
| 122 | 121 | ||
| 123 | static inline int w1_DS18S20_convert_temp(u8 rom[9]) | 122 | static inline int w1_DS18S20_convert_temp(u8 rom[9]) |
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index 5e813a816ce4..b3feddc4f7d6 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c | |||
| @@ -138,9 +138,9 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) | |||
| 138 | { | 138 | { |
| 139 | struct afs_super_info *super; | 139 | struct afs_super_info *super; |
| 140 | struct vfsmount *mnt; | 140 | struct vfsmount *mnt; |
| 141 | struct page *page = NULL; | 141 | struct page *page; |
| 142 | size_t size; | 142 | size_t size; |
| 143 | char *buf, *devname = NULL, *options = NULL; | 143 | char *buf, *devname, *options; |
| 144 | int ret; | 144 | int ret; |
| 145 | 145 | ||
| 146 | _enter("{%s}", mntpt->d_name.name); | 146 | _enter("{%s}", mntpt->d_name.name); |
| @@ -150,22 +150,22 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) | |||
| 150 | ret = -EINVAL; | 150 | ret = -EINVAL; |
| 151 | size = mntpt->d_inode->i_size; | 151 | size = mntpt->d_inode->i_size; |
| 152 | if (size > PAGE_SIZE - 1) | 152 | if (size > PAGE_SIZE - 1) |
| 153 | goto error; | 153 | goto error_no_devname; |
| 154 | 154 | ||
| 155 | ret = -ENOMEM; | 155 | ret = -ENOMEM; |
| 156 | devname = (char *) get_zeroed_page(GFP_KERNEL); | 156 | devname = (char *) get_zeroed_page(GFP_KERNEL); |
| 157 | if (!devname) | 157 | if (!devname) |
| 158 | goto error; | 158 | goto error_no_devname; |
| 159 | 159 | ||
| 160 | options = (char *) get_zeroed_page(GFP_KERNEL); | 160 | options = (char *) get_zeroed_page(GFP_KERNEL); |
| 161 | if (!options) | 161 | if (!options) |
| 162 | goto error; | 162 | goto error_no_options; |
| 163 | 163 | ||
| 164 | /* read the contents of the AFS special symlink */ | 164 | /* read the contents of the AFS special symlink */ |
| 165 | page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL); | 165 | page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL); |
| 166 | if (IS_ERR(page)) { | 166 | if (IS_ERR(page)) { |
| 167 | ret = PTR_ERR(page); | 167 | ret = PTR_ERR(page); |
| 168 | goto error; | 168 | goto error_no_page; |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | ret = -EIO; | 171 | ret = -EIO; |
| @@ -196,12 +196,12 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) | |||
| 196 | return mnt; | 196 | return mnt; |
| 197 | 197 | ||
| 198 | error: | 198 | error: |
| 199 | if (page) | 199 | page_cache_release(page); |
| 200 | page_cache_release(page); | 200 | error_no_page: |
| 201 | if (devname) | 201 | free_page((unsigned long) options); |
| 202 | free_page((unsigned long) devname); | 202 | error_no_options: |
| 203 | if (options) | 203 | free_page((unsigned long) devname); |
| 204 | free_page((unsigned long) options); | 204 | error_no_devname: |
| 205 | _leave(" = %d", ret); | 205 | _leave(" = %d", ret); |
| 206 | return ERR_PTR(ret); | 206 | return ERR_PTR(ret); |
| 207 | } | 207 | } |
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index e0e769bdca59..49566c1687d8 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c | |||
| @@ -355,7 +355,7 @@ calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp) | |||
| 355 | 355 | ||
| 356 | if (!flat_reloc_valid(r, start_brk - start_data + text_len)) { | 356 | if (!flat_reloc_valid(r, start_brk - start_data + text_len)) { |
| 357 | printk("BINFMT_FLAT: reloc outside program 0x%x (0 - 0x%x/0x%x)", | 357 | printk("BINFMT_FLAT: reloc outside program 0x%x (0 - 0x%x/0x%x)", |
| 358 | (int) r,(int)(start_brk-start_code),(int)text_len); | 358 | (int) r,(int)(start_brk-start_data+text_len),(int)text_len); |
| 359 | goto failed; | 359 | goto failed; |
| 360 | } | 360 | } |
| 361 | 361 | ||
diff --git a/fs/block_dev.c b/fs/block_dev.c index 2a6d0193f139..6dcee88c2e5d 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -406,16 +406,23 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin) | |||
| 406 | 406 | ||
| 407 | int blkdev_fsync(struct file *filp, struct dentry *dentry, int datasync) | 407 | int blkdev_fsync(struct file *filp, struct dentry *dentry, int datasync) |
| 408 | { | 408 | { |
| 409 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); | 409 | struct inode *bd_inode = filp->f_mapping->host; |
| 410 | struct block_device *bdev = I_BDEV(bd_inode); | ||
| 410 | int error; | 411 | int error; |
| 411 | 412 | ||
| 412 | error = sync_blockdev(bdev); | 413 | /* |
| 413 | if (error) | 414 | * There is no need to serialise calls to blkdev_issue_flush with |
| 414 | return error; | 415 | * i_mutex and doing so causes performance issues with concurrent |
| 415 | 416 | * O_SYNC writers to a block device. | |
| 417 | */ | ||
| 418 | mutex_unlock(&bd_inode->i_mutex); | ||
| 419 | |||
| 416 | error = blkdev_issue_flush(bdev, NULL); | 420 | error = blkdev_issue_flush(bdev, NULL); |
| 417 | if (error == -EOPNOTSUPP) | 421 | if (error == -EOPNOTSUPP) |
| 418 | error = 0; | 422 | error = 0; |
| 423 | |||
| 424 | mutex_lock(&bd_inode->i_mutex); | ||
| 425 | |||
| 419 | return error; | 426 | return error; |
| 420 | } | 427 | } |
| 421 | EXPORT_SYMBOL(blkdev_fsync); | 428 | EXPORT_SYMBOL(blkdev_fsync); |
diff --git a/fs/ioctl.c b/fs/ioctl.c index 6c751106c2e5..7faefb4da939 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c | |||
| @@ -228,14 +228,23 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg) | |||
| 228 | 228 | ||
| 229 | #ifdef CONFIG_BLOCK | 229 | #ifdef CONFIG_BLOCK |
| 230 | 230 | ||
| 231 | #define blk_to_logical(inode, blk) (blk << (inode)->i_blkbits) | 231 | static inline sector_t logical_to_blk(struct inode *inode, loff_t offset) |
| 232 | #define logical_to_blk(inode, offset) (offset >> (inode)->i_blkbits); | 232 | { |
| 233 | return (offset >> inode->i_blkbits); | ||
| 234 | } | ||
| 235 | |||
| 236 | static inline loff_t blk_to_logical(struct inode *inode, sector_t blk) | ||
| 237 | { | ||
| 238 | return (blk << inode->i_blkbits); | ||
| 239 | } | ||
| 233 | 240 | ||
| 234 | /** | 241 | /** |
| 235 | * __generic_block_fiemap - FIEMAP for block based inodes (no locking) | 242 | * __generic_block_fiemap - FIEMAP for block based inodes (no locking) |
| 236 | * @inode - the inode to map | 243 | * @inode: the inode to map |
| 237 | * @arg - the pointer to userspace where we copy everything to | 244 | * @fieinfo: the fiemap info struct that will be passed back to userspace |
| 238 | * @get_block - the fs's get_block function | 245 | * @start: where to start mapping in the inode |
| 246 | * @len: how much space to map | ||
| 247 | * @get_block: the fs's get_block function | ||
| 239 | * | 248 | * |
| 240 | * This does FIEMAP for block based inodes. Basically it will just loop | 249 | * This does FIEMAP for block based inodes. Basically it will just loop |
| 241 | * through get_block until we hit the number of extents we want to map, or we | 250 | * through get_block until we hit the number of extents we want to map, or we |
| @@ -250,58 +259,63 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg) | |||
| 250 | */ | 259 | */ |
| 251 | 260 | ||
| 252 | int __generic_block_fiemap(struct inode *inode, | 261 | int __generic_block_fiemap(struct inode *inode, |
| 253 | struct fiemap_extent_info *fieinfo, u64 start, | 262 | struct fiemap_extent_info *fieinfo, loff_t start, |
| 254 | u64 len, get_block_t *get_block) | 263 | loff_t len, get_block_t *get_block) |
| 255 | { | 264 | { |
| 256 | struct buffer_head tmp; | 265 | struct buffer_head map_bh; |
| 257 | unsigned long long start_blk; | 266 | sector_t start_blk, last_blk; |
| 258 | long long length = 0, map_len = 0; | 267 | loff_t isize = i_size_read(inode); |
| 259 | u64 logical = 0, phys = 0, size = 0; | 268 | u64 logical = 0, phys = 0, size = 0; |
| 260 | u32 flags = FIEMAP_EXTENT_MERGED; | 269 | u32 flags = FIEMAP_EXTENT_MERGED; |
| 261 | int ret = 0, past_eof = 0, whole_file = 0; | 270 | bool past_eof = false, whole_file = false; |
| 271 | int ret = 0; | ||
| 262 | 272 | ||
| 263 | if ((ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC))) | 273 | ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); |
| 274 | if (ret) | ||
| 264 | return ret; | 275 | return ret; |
| 265 | 276 | ||
| 266 | start_blk = logical_to_blk(inode, start); | 277 | /* |
| 267 | 278 | * Either the i_mutex or other appropriate locking needs to be held | |
| 268 | length = (long long)min_t(u64, len, i_size_read(inode)); | 279 | * since we expect isize to not change at all through the duration of |
| 269 | if (length < len) | 280 | * this call. |
| 270 | whole_file = 1; | 281 | */ |
| 282 | if (len >= isize) { | ||
| 283 | whole_file = true; | ||
| 284 | len = isize; | ||
| 285 | } | ||
| 271 | 286 | ||
| 272 | map_len = length; | 287 | start_blk = logical_to_blk(inode, start); |
| 288 | last_blk = logical_to_blk(inode, start + len - 1); | ||
| 273 | 289 | ||
| 274 | do { | 290 | do { |
| 275 | /* | 291 | /* |
| 276 | * we set b_size to the total size we want so it will map as | 292 | * we set b_size to the total size we want so it will map as |
| 277 | * many contiguous blocks as possible at once | 293 | * many contiguous blocks as possible at once |
| 278 | */ | 294 | */ |
| 279 | memset(&tmp, 0, sizeof(struct buffer_head)); | 295 | memset(&map_bh, 0, sizeof(struct buffer_head)); |
| 280 | tmp.b_size = map_len; | 296 | map_bh.b_size = len; |
| 281 | 297 | ||
| 282 | ret = get_block(inode, start_blk, &tmp, 0); | 298 | ret = get_block(inode, start_blk, &map_bh, 0); |
| 283 | if (ret) | 299 | if (ret) |
| 284 | break; | 300 | break; |
| 285 | 301 | ||
| 286 | /* HOLE */ | 302 | /* HOLE */ |
| 287 | if (!buffer_mapped(&tmp)) { | 303 | if (!buffer_mapped(&map_bh)) { |
| 288 | length -= blk_to_logical(inode, 1); | ||
| 289 | start_blk++; | 304 | start_blk++; |
| 290 | 305 | ||
| 291 | /* | 306 | /* |
| 292 | * we want to handle the case where there is an | 307 | * We want to handle the case where there is an |
| 293 | * allocated block at the front of the file, and then | 308 | * allocated block at the front of the file, and then |
| 294 | * nothing but holes up to the end of the file properly, | 309 | * nothing but holes up to the end of the file properly, |
| 295 | * to make sure that extent at the front gets properly | 310 | * to make sure that extent at the front gets properly |
| 296 | * marked with FIEMAP_EXTENT_LAST | 311 | * marked with FIEMAP_EXTENT_LAST |
| 297 | */ | 312 | */ |
| 298 | if (!past_eof && | 313 | if (!past_eof && |
| 299 | blk_to_logical(inode, start_blk) >= | 314 | blk_to_logical(inode, start_blk) >= isize) |
| 300 | blk_to_logical(inode, 0)+i_size_read(inode)) | ||
| 301 | past_eof = 1; | 315 | past_eof = 1; |
| 302 | 316 | ||
| 303 | /* | 317 | /* |
| 304 | * first hole after going past the EOF, this is our | 318 | * First hole after going past the EOF, this is our |
| 305 | * last extent | 319 | * last extent |
| 306 | */ | 320 | */ |
| 307 | if (past_eof && size) { | 321 | if (past_eof && size) { |
| @@ -309,15 +323,18 @@ int __generic_block_fiemap(struct inode *inode, | |||
| 309 | ret = fiemap_fill_next_extent(fieinfo, logical, | 323 | ret = fiemap_fill_next_extent(fieinfo, logical, |
| 310 | phys, size, | 324 | phys, size, |
| 311 | flags); | 325 | flags); |
| 312 | break; | 326 | } else if (size) { |
| 327 | ret = fiemap_fill_next_extent(fieinfo, logical, | ||
| 328 | phys, size, flags); | ||
| 329 | size = 0; | ||
| 313 | } | 330 | } |
| 314 | 331 | ||
| 315 | /* if we have holes up to/past EOF then we're done */ | 332 | /* if we have holes up to/past EOF then we're done */ |
| 316 | if (length <= 0 || past_eof) | 333 | if (start_blk > last_blk || past_eof || ret) |
| 317 | break; | 334 | break; |
| 318 | } else { | 335 | } else { |
| 319 | /* | 336 | /* |
| 320 | * we have gone over the length of what we wanted to | 337 | * We have gone over the length of what we wanted to |
| 321 | * map, and it wasn't the entire file, so add the extent | 338 | * map, and it wasn't the entire file, so add the extent |
| 322 | * we got last time and exit. | 339 | * we got last time and exit. |
| 323 | * | 340 | * |
| @@ -331,7 +348,7 @@ int __generic_block_fiemap(struct inode *inode, | |||
| 331 | * are good to go, just add the extent to the fieinfo | 348 | * are good to go, just add the extent to the fieinfo |
| 332 | * and break | 349 | * and break |
| 333 | */ | 350 | */ |
| 334 | if (length <= 0 && !whole_file) { | 351 | if (start_blk > last_blk && !whole_file) { |
| 335 | ret = fiemap_fill_next_extent(fieinfo, logical, | 352 | ret = fiemap_fill_next_extent(fieinfo, logical, |
| 336 | phys, size, | 353 | phys, size, |
| 337 | flags); | 354 | flags); |
| @@ -351,11 +368,10 @@ int __generic_block_fiemap(struct inode *inode, | |||
| 351 | } | 368 | } |
| 352 | 369 | ||
| 353 | logical = blk_to_logical(inode, start_blk); | 370 | logical = blk_to_logical(inode, start_blk); |
| 354 | phys = blk_to_logical(inode, tmp.b_blocknr); | 371 | phys = blk_to_logical(inode, map_bh.b_blocknr); |
| 355 | size = tmp.b_size; | 372 | size = map_bh.b_size; |
| 356 | flags = FIEMAP_EXTENT_MERGED; | 373 | flags = FIEMAP_EXTENT_MERGED; |
| 357 | 374 | ||
| 358 | length -= tmp.b_size; | ||
| 359 | start_blk += logical_to_blk(inode, size); | 375 | start_blk += logical_to_blk(inode, size); |
| 360 | 376 | ||
| 361 | /* | 377 | /* |
| @@ -363,15 +379,13 @@ int __generic_block_fiemap(struct inode *inode, | |||
| 363 | * soon as we find a hole that the last extent we found | 379 | * soon as we find a hole that the last extent we found |
| 364 | * is marked with FIEMAP_EXTENT_LAST | 380 | * is marked with FIEMAP_EXTENT_LAST |
| 365 | */ | 381 | */ |
| 366 | if (!past_eof && | 382 | if (!past_eof && logical + size >= isize) |
| 367 | logical+size >= | 383 | past_eof = true; |
| 368 | blk_to_logical(inode, 0)+i_size_read(inode)) | ||
| 369 | past_eof = 1; | ||
| 370 | } | 384 | } |
| 371 | cond_resched(); | 385 | cond_resched(); |
| 372 | } while (1); | 386 | } while (1); |
| 373 | 387 | ||
| 374 | /* if ret is 1 then we just hit the end of the extent array */ | 388 | /* If ret is 1 then we just hit the end of the extent array */ |
| 375 | if (ret == 1) | 389 | if (ret == 1) |
| 376 | ret = 0; | 390 | ret = 0; |
| 377 | 391 | ||
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 9dd126276c9f..ed9ba6fe04f5 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c | |||
| @@ -61,7 +61,7 @@ struct inode *jfs_iget(struct super_block *sb, unsigned long ino) | |||
| 61 | inode->i_op = &page_symlink_inode_operations; | 61 | inode->i_op = &page_symlink_inode_operations; |
| 62 | inode->i_mapping->a_ops = &jfs_aops; | 62 | inode->i_mapping->a_ops = &jfs_aops; |
| 63 | } else { | 63 | } else { |
| 64 | inode->i_op = &jfs_symlink_inode_operations; | 64 | inode->i_op = &jfs_fast_symlink_inode_operations; |
| 65 | /* | 65 | /* |
| 66 | * The inline data should be null-terminated, but | 66 | * The inline data should be null-terminated, but |
| 67 | * don't let on-disk corruption crash the kernel | 67 | * don't let on-disk corruption crash the kernel |
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 6c4dfcbf3f55..9e2f6a721668 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c | |||
| @@ -196,7 +196,7 @@ int dbMount(struct inode *ipbmap) | |||
| 196 | bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag); | 196 | bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag); |
| 197 | bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref); | 197 | bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref); |
| 198 | bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel); | 198 | bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel); |
| 199 | bmp->db_agheigth = le32_to_cpu(dbmp_le->dn_agheigth); | 199 | bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight); |
| 200 | bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); | 200 | bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); |
| 201 | bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); | 201 | bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); |
| 202 | bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); | 202 | bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); |
| @@ -288,7 +288,7 @@ int dbSync(struct inode *ipbmap) | |||
| 288 | dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag); | 288 | dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag); |
| 289 | dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref); | 289 | dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref); |
| 290 | dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel); | 290 | dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel); |
| 291 | dbmp_le->dn_agheigth = cpu_to_le32(bmp->db_agheigth); | 291 | dbmp_le->dn_agheight = cpu_to_le32(bmp->db_agheight); |
| 292 | dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth); | 292 | dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth); |
| 293 | dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart); | 293 | dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart); |
| 294 | dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size); | 294 | dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size); |
| @@ -1441,7 +1441,7 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results) | |||
| 1441 | * tree index of this allocation group within the control page. | 1441 | * tree index of this allocation group within the control page. |
| 1442 | */ | 1442 | */ |
| 1443 | agperlev = | 1443 | agperlev = |
| 1444 | (1 << (L2LPERCTL - (bmp->db_agheigth << 1))) / bmp->db_agwidth; | 1444 | (1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth; |
| 1445 | ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1)); | 1445 | ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1)); |
| 1446 | 1446 | ||
| 1447 | /* dmap control page trees fan-out by 4 and a single allocation | 1447 | /* dmap control page trees fan-out by 4 and a single allocation |
| @@ -1460,7 +1460,7 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results) | |||
| 1460 | * the subtree to find the leftmost leaf that describes this | 1460 | * the subtree to find the leftmost leaf that describes this |
| 1461 | * free space. | 1461 | * free space. |
| 1462 | */ | 1462 | */ |
| 1463 | for (k = bmp->db_agheigth; k > 0; k--) { | 1463 | for (k = bmp->db_agheight; k > 0; k--) { |
| 1464 | for (n = 0, m = (ti << 2) + 1; n < 4; n++) { | 1464 | for (n = 0, m = (ti << 2) + 1; n < 4; n++) { |
| 1465 | if (l2nb <= dcp->stree[m + n]) { | 1465 | if (l2nb <= dcp->stree[m + n]) { |
| 1466 | ti = m + n; | 1466 | ti = m + n; |
| @@ -3607,7 +3607,7 @@ void dbFinalizeBmap(struct inode *ipbmap) | |||
| 3607 | } | 3607 | } |
| 3608 | 3608 | ||
| 3609 | /* | 3609 | /* |
| 3610 | * compute db_aglevel, db_agheigth, db_width, db_agstart: | 3610 | * compute db_aglevel, db_agheight, db_width, db_agstart: |
| 3611 | * an ag is covered in aglevel dmapctl summary tree, | 3611 | * an ag is covered in aglevel dmapctl summary tree, |
| 3612 | * at agheight level height (from leaf) with agwidth number of nodes | 3612 | * at agheight level height (from leaf) with agwidth number of nodes |
| 3613 | * each, which starts at agstart index node of the smmary tree node | 3613 | * each, which starts at agstart index node of the smmary tree node |
| @@ -3616,9 +3616,9 @@ void dbFinalizeBmap(struct inode *ipbmap) | |||
| 3616 | bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize); | 3616 | bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize); |
| 3617 | l2nl = | 3617 | l2nl = |
| 3618 | bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL); | 3618 | bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL); |
| 3619 | bmp->db_agheigth = l2nl >> 1; | 3619 | bmp->db_agheight = l2nl >> 1; |
| 3620 | bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheigth << 1)); | 3620 | bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheight << 1)); |
| 3621 | for (i = 5 - bmp->db_agheigth, bmp->db_agstart = 0, n = 1; i > 0; | 3621 | for (i = 5 - bmp->db_agheight, bmp->db_agstart = 0, n = 1; i > 0; |
| 3622 | i--) { | 3622 | i--) { |
| 3623 | bmp->db_agstart += n; | 3623 | bmp->db_agstart += n; |
| 3624 | n <<= 2; | 3624 | n <<= 2; |
diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h index 1a6eb41569bc..6dcb906c55d8 100644 --- a/fs/jfs/jfs_dmap.h +++ b/fs/jfs/jfs_dmap.h | |||
| @@ -210,7 +210,7 @@ struct dbmap_disk { | |||
| 210 | __le32 dn_maxag; /* 4: max active alloc group number */ | 210 | __le32 dn_maxag; /* 4: max active alloc group number */ |
| 211 | __le32 dn_agpref; /* 4: preferred alloc group (hint) */ | 211 | __le32 dn_agpref; /* 4: preferred alloc group (hint) */ |
| 212 | __le32 dn_aglevel; /* 4: dmapctl level holding the AG */ | 212 | __le32 dn_aglevel; /* 4: dmapctl level holding the AG */ |
| 213 | __le32 dn_agheigth; /* 4: height in dmapctl of the AG */ | 213 | __le32 dn_agheight; /* 4: height in dmapctl of the AG */ |
| 214 | __le32 dn_agwidth; /* 4: width in dmapctl of the AG */ | 214 | __le32 dn_agwidth; /* 4: width in dmapctl of the AG */ |
| 215 | __le32 dn_agstart; /* 4: start tree index at AG height */ | 215 | __le32 dn_agstart; /* 4: start tree index at AG height */ |
| 216 | __le32 dn_agl2size; /* 4: l2 num of blks per alloc group */ | 216 | __le32 dn_agl2size; /* 4: l2 num of blks per alloc group */ |
| @@ -229,7 +229,7 @@ struct dbmap { | |||
| 229 | int dn_maxag; /* max active alloc group number */ | 229 | int dn_maxag; /* max active alloc group number */ |
| 230 | int dn_agpref; /* preferred alloc group (hint) */ | 230 | int dn_agpref; /* preferred alloc group (hint) */ |
| 231 | int dn_aglevel; /* dmapctl level holding the AG */ | 231 | int dn_aglevel; /* dmapctl level holding the AG */ |
| 232 | int dn_agheigth; /* height in dmapctl of the AG */ | 232 | int dn_agheight; /* height in dmapctl of the AG */ |
| 233 | int dn_agwidth; /* width in dmapctl of the AG */ | 233 | int dn_agwidth; /* width in dmapctl of the AG */ |
| 234 | int dn_agstart; /* start tree index at AG height */ | 234 | int dn_agstart; /* start tree index at AG height */ |
| 235 | int dn_agl2size; /* l2 num of blks per alloc group */ | 235 | int dn_agl2size; /* l2 num of blks per alloc group */ |
| @@ -255,7 +255,7 @@ struct bmap { | |||
| 255 | #define db_agsize db_bmap.dn_agsize | 255 | #define db_agsize db_bmap.dn_agsize |
| 256 | #define db_agl2size db_bmap.dn_agl2size | 256 | #define db_agl2size db_bmap.dn_agl2size |
| 257 | #define db_agwidth db_bmap.dn_agwidth | 257 | #define db_agwidth db_bmap.dn_agwidth |
| 258 | #define db_agheigth db_bmap.dn_agheigth | 258 | #define db_agheight db_bmap.dn_agheight |
| 259 | #define db_agstart db_bmap.dn_agstart | 259 | #define db_agstart db_bmap.dn_agstart |
| 260 | #define db_numag db_bmap.dn_numag | 260 | #define db_numag db_bmap.dn_numag |
| 261 | #define db_maxlevel db_bmap.dn_maxlevel | 261 | #define db_maxlevel db_bmap.dn_maxlevel |
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h index 79e2c79661df..9e6bda30a6e8 100644 --- a/fs/jfs/jfs_inode.h +++ b/fs/jfs/jfs_inode.h | |||
| @@ -48,5 +48,6 @@ extern const struct file_operations jfs_dir_operations; | |||
| 48 | extern const struct inode_operations jfs_file_inode_operations; | 48 | extern const struct inode_operations jfs_file_inode_operations; |
| 49 | extern const struct file_operations jfs_file_operations; | 49 | extern const struct file_operations jfs_file_operations; |
| 50 | extern const struct inode_operations jfs_symlink_inode_operations; | 50 | extern const struct inode_operations jfs_symlink_inode_operations; |
| 51 | extern const struct inode_operations jfs_fast_symlink_inode_operations; | ||
| 51 | extern const struct dentry_operations jfs_ci_dentry_operations; | 52 | extern const struct dentry_operations jfs_ci_dentry_operations; |
| 52 | #endif /* _H_JFS_INODE */ | 53 | #endif /* _H_JFS_INODE */ |
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 4a3e9f39c21d..a9cf8e8675be 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c | |||
| @@ -956,7 +956,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, | |||
| 956 | */ | 956 | */ |
| 957 | 957 | ||
| 958 | if (ssize <= IDATASIZE) { | 958 | if (ssize <= IDATASIZE) { |
| 959 | ip->i_op = &jfs_symlink_inode_operations; | 959 | ip->i_op = &jfs_fast_symlink_inode_operations; |
| 960 | 960 | ||
| 961 | i_fastsymlink = JFS_IP(ip)->i_inline; | 961 | i_fastsymlink = JFS_IP(ip)->i_inline; |
| 962 | memcpy(i_fastsymlink, name, ssize); | 962 | memcpy(i_fastsymlink, name, ssize); |
| @@ -978,7 +978,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, | |||
| 978 | else { | 978 | else { |
| 979 | jfs_info("jfs_symlink: allocate extent ip:0x%p", ip); | 979 | jfs_info("jfs_symlink: allocate extent ip:0x%p", ip); |
| 980 | 980 | ||
| 981 | ip->i_op = &page_symlink_inode_operations; | 981 | ip->i_op = &jfs_symlink_inode_operations; |
| 982 | ip->i_mapping->a_ops = &jfs_aops; | 982 | ip->i_mapping->a_ops = &jfs_aops; |
| 983 | 983 | ||
| 984 | /* | 984 | /* |
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c index 7f24a0bb08ca..1aba0039f1c9 100644 --- a/fs/jfs/resize.c +++ b/fs/jfs/resize.c | |||
| @@ -81,6 +81,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) | |||
| 81 | struct inode *iplist[1]; | 81 | struct inode *iplist[1]; |
| 82 | struct jfs_superblock *j_sb, *j_sb2; | 82 | struct jfs_superblock *j_sb, *j_sb2; |
| 83 | uint old_agsize; | 83 | uint old_agsize; |
| 84 | int agsizechanged = 0; | ||
| 84 | struct buffer_head *bh, *bh2; | 85 | struct buffer_head *bh, *bh2; |
| 85 | 86 | ||
| 86 | /* If the volume hasn't grown, get out now */ | 87 | /* If the volume hasn't grown, get out now */ |
| @@ -333,6 +334,9 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) | |||
| 333 | */ | 334 | */ |
| 334 | if ((rc = dbExtendFS(ipbmap, XAddress, nblocks))) | 335 | if ((rc = dbExtendFS(ipbmap, XAddress, nblocks))) |
| 335 | goto error_out; | 336 | goto error_out; |
| 337 | |||
| 338 | agsizechanged |= (bmp->db_agsize != old_agsize); | ||
| 339 | |||
| 336 | /* | 340 | /* |
| 337 | * the map now has extended to cover additional nblocks: | 341 | * the map now has extended to cover additional nblocks: |
| 338 | * dn_mapsize = oldMapsize + nblocks; | 342 | * dn_mapsize = oldMapsize + nblocks; |
| @@ -432,7 +436,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) | |||
| 432 | * will correctly identify the new ag); | 436 | * will correctly identify the new ag); |
| 433 | */ | 437 | */ |
| 434 | /* if new AG size the same as old AG size, done! */ | 438 | /* if new AG size the same as old AG size, done! */ |
| 435 | if (bmp->db_agsize != old_agsize) { | 439 | if (agsizechanged) { |
| 436 | if ((rc = diExtendFS(ipimap, ipbmap))) | 440 | if ((rc = diExtendFS(ipimap, ipbmap))) |
| 437 | goto error_out; | 441 | goto error_out; |
| 438 | 442 | ||
diff --git a/fs/jfs/symlink.c b/fs/jfs/symlink.c index 4af1a05aad0a..205b946d8e0d 100644 --- a/fs/jfs/symlink.c +++ b/fs/jfs/symlink.c | |||
| @@ -29,9 +29,21 @@ static void *jfs_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
| 29 | return NULL; | 29 | return NULL; |
| 30 | } | 30 | } |
| 31 | 31 | ||
| 32 | const struct inode_operations jfs_symlink_inode_operations = { | 32 | const struct inode_operations jfs_fast_symlink_inode_operations = { |
| 33 | .readlink = generic_readlink, | 33 | .readlink = generic_readlink, |
| 34 | .follow_link = jfs_follow_link, | 34 | .follow_link = jfs_follow_link, |
| 35 | .setattr = jfs_setattr, | ||
| 36 | .setxattr = jfs_setxattr, | ||
| 37 | .getxattr = jfs_getxattr, | ||
| 38 | .listxattr = jfs_listxattr, | ||
| 39 | .removexattr = jfs_removexattr, | ||
| 40 | }; | ||
| 41 | |||
| 42 | const struct inode_operations jfs_symlink_inode_operations = { | ||
| 43 | .readlink = generic_readlink, | ||
| 44 | .follow_link = page_follow_link_light, | ||
| 45 | .put_link = page_put_link, | ||
| 46 | .setattr = jfs_setattr, | ||
| 35 | .setxattr = jfs_setxattr, | 47 | .setxattr = jfs_setxattr, |
| 36 | .getxattr = jfs_getxattr, | 48 | .getxattr = jfs_getxattr, |
| 37 | .listxattr = jfs_listxattr, | 49 | .listxattr = jfs_listxattr, |
diff --git a/fs/logfs/gc.c b/fs/logfs/gc.c index 84e36f52fe95..76c242fbe1b0 100644 --- a/fs/logfs/gc.c +++ b/fs/logfs/gc.c | |||
| @@ -459,6 +459,14 @@ static void __logfs_gc_pass(struct super_block *sb, int target) | |||
| 459 | struct logfs_block *block; | 459 | struct logfs_block *block; |
| 460 | int round, progress, last_progress = 0; | 460 | int round, progress, last_progress = 0; |
| 461 | 461 | ||
| 462 | /* | ||
| 463 | * Doing too many changes to the segfile at once would result | ||
| 464 | * in a large number of aliases. Write the journal before | ||
| 465 | * things get out of hand. | ||
| 466 | */ | ||
| 467 | if (super->s_shadow_tree.no_shadowed_segments >= MAX_OBJ_ALIASES) | ||
| 468 | logfs_write_anchor(sb); | ||
| 469 | |||
| 462 | if (no_free_segments(sb) >= target && | 470 | if (no_free_segments(sb) >= target && |
| 463 | super->s_no_object_aliases < MAX_OBJ_ALIASES) | 471 | super->s_no_object_aliases < MAX_OBJ_ALIASES) |
| 464 | return; | 472 | return; |
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c index 33bd260b8309..fb0a613f885b 100644 --- a/fs/logfs/journal.c +++ b/fs/logfs/journal.c | |||
| @@ -389,7 +389,10 @@ static void journal_get_erase_count(struct logfs_area *area) | |||
| 389 | static int journal_erase_segment(struct logfs_area *area) | 389 | static int journal_erase_segment(struct logfs_area *area) |
| 390 | { | 390 | { |
| 391 | struct super_block *sb = area->a_sb; | 391 | struct super_block *sb = area->a_sb; |
| 392 | struct logfs_segment_header sh; | 392 | union { |
| 393 | struct logfs_segment_header sh; | ||
| 394 | unsigned char c[ALIGN(sizeof(struct logfs_segment_header), 16)]; | ||
| 395 | } u; | ||
| 393 | u64 ofs; | 396 | u64 ofs; |
| 394 | int err; | 397 | int err; |
| 395 | 398 | ||
| @@ -397,20 +400,21 @@ static int journal_erase_segment(struct logfs_area *area) | |||
| 397 | if (err) | 400 | if (err) |
| 398 | return err; | 401 | return err; |
| 399 | 402 | ||
| 400 | sh.pad = 0; | 403 | memset(&u, 0, sizeof(u)); |
| 401 | sh.type = SEG_JOURNAL; | 404 | u.sh.pad = 0; |
| 402 | sh.level = 0; | 405 | u.sh.type = SEG_JOURNAL; |
| 403 | sh.segno = cpu_to_be32(area->a_segno); | 406 | u.sh.level = 0; |
| 404 | sh.ec = cpu_to_be32(area->a_erase_count); | 407 | u.sh.segno = cpu_to_be32(area->a_segno); |
| 405 | sh.gec = cpu_to_be64(logfs_super(sb)->s_gec); | 408 | u.sh.ec = cpu_to_be32(area->a_erase_count); |
| 406 | sh.crc = logfs_crc32(&sh, sizeof(sh), 4); | 409 | u.sh.gec = cpu_to_be64(logfs_super(sb)->s_gec); |
| 410 | u.sh.crc = logfs_crc32(&u.sh, sizeof(u.sh), 4); | ||
| 407 | 411 | ||
| 408 | /* This causes a bug in segment.c. Not yet. */ | 412 | /* This causes a bug in segment.c. Not yet. */ |
| 409 | //logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count, 0); | 413 | //logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count, 0); |
| 410 | 414 | ||
| 411 | ofs = dev_ofs(sb, area->a_segno, 0); | 415 | ofs = dev_ofs(sb, area->a_segno, 0); |
| 412 | area->a_used_bytes = ALIGN(sizeof(sh), 16); | 416 | area->a_used_bytes = sizeof(u); |
| 413 | logfs_buf_write(area, ofs, &sh, sizeof(sh)); | 417 | logfs_buf_write(area, ofs, &u, sizeof(u)); |
| 414 | return 0; | 418 | return 0; |
| 415 | } | 419 | } |
| 416 | 420 | ||
| @@ -494,6 +498,8 @@ static void account_shadows(struct super_block *sb) | |||
| 494 | 498 | ||
| 495 | btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow); | 499 | btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow); |
| 496 | btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow); | 500 | btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow); |
| 501 | btree_grim_visitor32(&tree->segment_map, 0, NULL); | ||
| 502 | tree->no_shadowed_segments = 0; | ||
| 497 | 503 | ||
| 498 | if (li->li_block) { | 504 | if (li->li_block) { |
| 499 | /* | 505 | /* |
| @@ -607,9 +613,9 @@ static size_t __logfs_write_je(struct super_block *sb, void *buf, u16 type, | |||
| 607 | if (len == 0) | 613 | if (len == 0) |
| 608 | return logfs_write_header(super, header, 0, type); | 614 | return logfs_write_header(super, header, 0, type); |
| 609 | 615 | ||
| 616 | BUG_ON(len > sb->s_blocksize); | ||
| 610 | compr_len = logfs_compress(buf, data, len, sb->s_blocksize); | 617 | compr_len = logfs_compress(buf, data, len, sb->s_blocksize); |
| 611 | if (compr_len < 0 || type == JE_ANCHOR) { | 618 | if (compr_len < 0 || type == JE_ANCHOR) { |
| 612 | BUG_ON(len > sb->s_blocksize); | ||
| 613 | memcpy(data, buf, len); | 619 | memcpy(data, buf, len); |
| 614 | compr_len = len; | 620 | compr_len = len; |
| 615 | compr = COMPR_NONE; | 621 | compr = COMPR_NONE; |
| @@ -661,6 +667,7 @@ static int logfs_write_je_buf(struct super_block *sb, void *buf, u16 type, | |||
| 661 | if (ofs < 0) | 667 | if (ofs < 0) |
| 662 | return ofs; | 668 | return ofs; |
| 663 | logfs_buf_write(area, ofs, super->s_compressed_je, len); | 669 | logfs_buf_write(area, ofs, super->s_compressed_je, len); |
| 670 | BUG_ON(super->s_no_je >= MAX_JOURNAL_ENTRIES); | ||
| 664 | super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs); | 671 | super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs); |
| 665 | return 0; | 672 | return 0; |
| 666 | } | 673 | } |
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h index b84b0eec6024..0a3df1a0c936 100644 --- a/fs/logfs/logfs.h +++ b/fs/logfs/logfs.h | |||
| @@ -257,10 +257,14 @@ struct logfs_shadow { | |||
| 257 | * struct shadow_tree | 257 | * struct shadow_tree |
| 258 | * @new: shadows where old_ofs==0, indexed by new_ofs | 258 | * @new: shadows where old_ofs==0, indexed by new_ofs |
| 259 | * @old: shadows where old_ofs!=0, indexed by old_ofs | 259 | * @old: shadows where old_ofs!=0, indexed by old_ofs |
| 260 | * @segment_map: bitfield of segments containing shadows | ||
| 261 | * @no_shadowed_segment: number of segments containing shadows | ||
| 260 | */ | 262 | */ |
| 261 | struct shadow_tree { | 263 | struct shadow_tree { |
| 262 | struct btree_head64 new; | 264 | struct btree_head64 new; |
| 263 | struct btree_head64 old; | 265 | struct btree_head64 old; |
| 266 | struct btree_head32 segment_map; | ||
| 267 | int no_shadowed_segments; | ||
| 264 | }; | 268 | }; |
| 265 | 269 | ||
| 266 | struct object_alias_item { | 270 | struct object_alias_item { |
| @@ -305,13 +309,14 @@ typedef int write_alias_t(struct super_block *sb, u64 ino, u64 bix, | |||
| 305 | level_t level, int child_no, __be64 val); | 309 | level_t level, int child_no, __be64 val); |
| 306 | struct logfs_block_ops { | 310 | struct logfs_block_ops { |
| 307 | void (*write_block)(struct logfs_block *block); | 311 | void (*write_block)(struct logfs_block *block); |
| 308 | gc_level_t (*block_level)(struct logfs_block *block); | ||
| 309 | void (*free_block)(struct super_block *sb, struct logfs_block*block); | 312 | void (*free_block)(struct super_block *sb, struct logfs_block*block); |
| 310 | int (*write_alias)(struct super_block *sb, | 313 | int (*write_alias)(struct super_block *sb, |
| 311 | struct logfs_block *block, | 314 | struct logfs_block *block, |
| 312 | write_alias_t *write_one_alias); | 315 | write_alias_t *write_one_alias); |
| 313 | }; | 316 | }; |
| 314 | 317 | ||
| 318 | #define MAX_JOURNAL_ENTRIES 256 | ||
| 319 | |||
| 315 | struct logfs_super { | 320 | struct logfs_super { |
| 316 | struct mtd_info *s_mtd; /* underlying device */ | 321 | struct mtd_info *s_mtd; /* underlying device */ |
| 317 | struct block_device *s_bdev; /* underlying device */ | 322 | struct block_device *s_bdev; /* underlying device */ |
| @@ -378,7 +383,7 @@ struct logfs_super { | |||
| 378 | u32 s_journal_ec[LOGFS_JOURNAL_SEGS]; /* journal erasecounts */ | 383 | u32 s_journal_ec[LOGFS_JOURNAL_SEGS]; /* journal erasecounts */ |
| 379 | u64 s_last_version; | 384 | u64 s_last_version; |
| 380 | struct logfs_area *s_journal_area; /* open journal segment */ | 385 | struct logfs_area *s_journal_area; /* open journal segment */ |
| 381 | __be64 s_je_array[64]; | 386 | __be64 s_je_array[MAX_JOURNAL_ENTRIES]; |
| 382 | int s_no_je; | 387 | int s_no_je; |
| 383 | 388 | ||
| 384 | int s_sum_index; /* for the 12 summaries */ | 389 | int s_sum_index; /* for the 12 summaries */ |
| @@ -722,4 +727,10 @@ static inline struct logfs_area *get_area(struct super_block *sb, | |||
| 722 | return logfs_super(sb)->s_area[(__force u8)gc_level]; | 727 | return logfs_super(sb)->s_area[(__force u8)gc_level]; |
| 723 | } | 728 | } |
| 724 | 729 | ||
| 730 | static inline void logfs_mempool_destroy(mempool_t *pool) | ||
| 731 | { | ||
| 732 | if (pool) | ||
| 733 | mempool_destroy(pool); | ||
| 734 | } | ||
| 735 | |||
| 725 | #endif | 736 | #endif |
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c index bff40253dfb2..3159db6958e5 100644 --- a/fs/logfs/readwrite.c +++ b/fs/logfs/readwrite.c | |||
| @@ -430,25 +430,6 @@ static void inode_write_block(struct logfs_block *block) | |||
| 430 | } | 430 | } |
| 431 | } | 431 | } |
| 432 | 432 | ||
| 433 | static gc_level_t inode_block_level(struct logfs_block *block) | ||
| 434 | { | ||
| 435 | BUG_ON(block->inode->i_ino == LOGFS_INO_MASTER); | ||
| 436 | return GC_LEVEL(LOGFS_MAX_LEVELS); | ||
| 437 | } | ||
| 438 | |||
| 439 | static gc_level_t indirect_block_level(struct logfs_block *block) | ||
| 440 | { | ||
| 441 | struct page *page; | ||
| 442 | struct inode *inode; | ||
| 443 | u64 bix; | ||
| 444 | level_t level; | ||
| 445 | |||
| 446 | page = block->page; | ||
| 447 | inode = page->mapping->host; | ||
| 448 | logfs_unpack_index(page->index, &bix, &level); | ||
| 449 | return expand_level(inode->i_ino, level); | ||
| 450 | } | ||
| 451 | |||
| 452 | /* | 433 | /* |
| 453 | * This silences a false, yet annoying gcc warning. I hate it when my editor | 434 | * This silences a false, yet annoying gcc warning. I hate it when my editor |
| 454 | * jumps into bitops.h each time I recompile this file. | 435 | * jumps into bitops.h each time I recompile this file. |
| @@ -587,14 +568,12 @@ static void indirect_free_block(struct super_block *sb, | |||
| 587 | 568 | ||
| 588 | static struct logfs_block_ops inode_block_ops = { | 569 | static struct logfs_block_ops inode_block_ops = { |
| 589 | .write_block = inode_write_block, | 570 | .write_block = inode_write_block, |
| 590 | .block_level = inode_block_level, | ||
| 591 | .free_block = inode_free_block, | 571 | .free_block = inode_free_block, |
| 592 | .write_alias = inode_write_alias, | 572 | .write_alias = inode_write_alias, |
| 593 | }; | 573 | }; |
| 594 | 574 | ||
| 595 | struct logfs_block_ops indirect_block_ops = { | 575 | struct logfs_block_ops indirect_block_ops = { |
| 596 | .write_block = indirect_write_block, | 576 | .write_block = indirect_write_block, |
| 597 | .block_level = indirect_block_level, | ||
| 598 | .free_block = indirect_free_block, | 577 | .free_block = indirect_free_block, |
| 599 | .write_alias = indirect_write_alias, | 578 | .write_alias = indirect_write_alias, |
| 600 | }; | 579 | }; |
| @@ -1241,6 +1220,18 @@ static void free_shadow(struct inode *inode, struct logfs_shadow *shadow) | |||
| 1241 | mempool_free(shadow, super->s_shadow_pool); | 1220 | mempool_free(shadow, super->s_shadow_pool); |
| 1242 | } | 1221 | } |
| 1243 | 1222 | ||
| 1223 | static void mark_segment(struct shadow_tree *tree, u32 segno) | ||
| 1224 | { | ||
| 1225 | int err; | ||
| 1226 | |||
| 1227 | if (!btree_lookup32(&tree->segment_map, segno)) { | ||
| 1228 | err = btree_insert32(&tree->segment_map, segno, (void *)1, | ||
| 1229 | GFP_NOFS); | ||
| 1230 | BUG_ON(err); | ||
| 1231 | tree->no_shadowed_segments++; | ||
| 1232 | } | ||
| 1233 | } | ||
| 1234 | |||
| 1244 | /** | 1235 | /** |
| 1245 | * fill_shadow_tree - Propagate shadow tree changes due to a write | 1236 | * fill_shadow_tree - Propagate shadow tree changes due to a write |
| 1246 | * @inode: Inode owning the page | 1237 | * @inode: Inode owning the page |
| @@ -1288,6 +1279,8 @@ static void fill_shadow_tree(struct inode *inode, struct page *page, | |||
| 1288 | 1279 | ||
| 1289 | super->s_dirty_used_bytes += shadow->new_len; | 1280 | super->s_dirty_used_bytes += shadow->new_len; |
| 1290 | super->s_dirty_free_bytes += shadow->old_len; | 1281 | super->s_dirty_free_bytes += shadow->old_len; |
| 1282 | mark_segment(tree, shadow->old_ofs >> super->s_segshift); | ||
| 1283 | mark_segment(tree, shadow->new_ofs >> super->s_segshift); | ||
| 1291 | } | 1284 | } |
| 1292 | } | 1285 | } |
| 1293 | 1286 | ||
| @@ -1845,19 +1838,37 @@ static int __logfs_truncate(struct inode *inode, u64 size) | |||
| 1845 | return logfs_truncate_direct(inode, size); | 1838 | return logfs_truncate_direct(inode, size); |
| 1846 | } | 1839 | } |
| 1847 | 1840 | ||
| 1848 | int logfs_truncate(struct inode *inode, u64 size) | 1841 | /* |
| 1842 | * Truncate, by changing the segment file, can consume a fair amount | ||
| 1843 | * of resources. So back off from time to time and do some GC. | ||
| 1844 | * 8 or 2048 blocks should be well within safety limits even if | ||
| 1845 | * every single block resided in a different segment. | ||
| 1846 | */ | ||
| 1847 | #define TRUNCATE_STEP (8 * 1024 * 1024) | ||
| 1848 | int logfs_truncate(struct inode *inode, u64 target) | ||
| 1849 | { | 1849 | { |
| 1850 | struct super_block *sb = inode->i_sb; | 1850 | struct super_block *sb = inode->i_sb; |
| 1851 | int err; | 1851 | u64 size = i_size_read(inode); |
| 1852 | int err = 0; | ||
| 1852 | 1853 | ||
| 1853 | logfs_get_wblocks(sb, NULL, 1); | 1854 | size = ALIGN(size, TRUNCATE_STEP); |
| 1854 | err = __logfs_truncate(inode, size); | 1855 | while (size > target) { |
| 1855 | if (!err) | 1856 | if (size > TRUNCATE_STEP) |
| 1856 | err = __logfs_write_inode(inode, 0); | 1857 | size -= TRUNCATE_STEP; |
| 1857 | logfs_put_wblocks(sb, NULL, 1); | 1858 | else |
| 1859 | size = 0; | ||
| 1860 | if (size < target) | ||
| 1861 | size = target; | ||
| 1862 | |||
| 1863 | logfs_get_wblocks(sb, NULL, 1); | ||
| 1864 | err = __logfs_truncate(inode, target); | ||
| 1865 | if (!err) | ||
| 1866 | err = __logfs_write_inode(inode, 0); | ||
| 1867 | logfs_put_wblocks(sb, NULL, 1); | ||
| 1868 | } | ||
| 1858 | 1869 | ||
| 1859 | if (!err) | 1870 | if (!err) |
| 1860 | err = vmtruncate(inode, size); | 1871 | err = vmtruncate(inode, target); |
| 1861 | 1872 | ||
| 1862 | /* I don't trust error recovery yet. */ | 1873 | /* I don't trust error recovery yet. */ |
| 1863 | WARN_ON(err); | 1874 | WARN_ON(err); |
| @@ -2251,8 +2262,6 @@ void logfs_cleanup_rw(struct super_block *sb) | |||
| 2251 | struct logfs_super *super = logfs_super(sb); | 2262 | struct logfs_super *super = logfs_super(sb); |
| 2252 | 2263 | ||
| 2253 | destroy_meta_inode(super->s_segfile_inode); | 2264 | destroy_meta_inode(super->s_segfile_inode); |
| 2254 | if (super->s_block_pool) | 2265 | logfs_mempool_destroy(super->s_block_pool); |
| 2255 | mempool_destroy(super->s_block_pool); | 2266 | logfs_mempool_destroy(super->s_shadow_pool); |
| 2256 | if (super->s_shadow_pool) | ||
| 2257 | mempool_destroy(super->s_shadow_pool); | ||
| 2258 | } | 2267 | } |
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c index 801a3a141625..f77ce2b470ba 100644 --- a/fs/logfs/segment.c +++ b/fs/logfs/segment.c | |||
| @@ -183,14 +183,8 @@ static int btree_write_alias(struct super_block *sb, struct logfs_block *block, | |||
| 183 | return 0; | 183 | return 0; |
| 184 | } | 184 | } |
| 185 | 185 | ||
| 186 | static gc_level_t btree_block_level(struct logfs_block *block) | ||
| 187 | { | ||
| 188 | return expand_level(block->ino, block->level); | ||
| 189 | } | ||
| 190 | |||
| 191 | static struct logfs_block_ops btree_block_ops = { | 186 | static struct logfs_block_ops btree_block_ops = { |
| 192 | .write_block = btree_write_block, | 187 | .write_block = btree_write_block, |
| 193 | .block_level = btree_block_level, | ||
| 194 | .free_block = __free_block, | 188 | .free_block = __free_block, |
| 195 | .write_alias = btree_write_alias, | 189 | .write_alias = btree_write_alias, |
| 196 | }; | 190 | }; |
| @@ -919,7 +913,7 @@ err: | |||
| 919 | for (i--; i >= 0; i--) | 913 | for (i--; i >= 0; i--) |
| 920 | free_area(super->s_area[i]); | 914 | free_area(super->s_area[i]); |
| 921 | free_area(super->s_journal_area); | 915 | free_area(super->s_journal_area); |
| 922 | mempool_destroy(super->s_alias_pool); | 916 | logfs_mempool_destroy(super->s_alias_pool); |
| 923 | return -ENOMEM; | 917 | return -ENOMEM; |
| 924 | } | 918 | } |
| 925 | 919 | ||
diff --git a/fs/logfs/super.c b/fs/logfs/super.c index b60bfac3263c..5866ee6e1327 100644 --- a/fs/logfs/super.c +++ b/fs/logfs/super.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include "logfs.h" | 12 | #include "logfs.h" |
| 13 | #include <linux/bio.h> | 13 | #include <linux/bio.h> |
| 14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
| 15 | #include <linux/blkdev.h> | ||
| 15 | #include <linux/mtd/mtd.h> | 16 | #include <linux/mtd/mtd.h> |
| 16 | #include <linux/statfs.h> | 17 | #include <linux/statfs.h> |
| 17 | #include <linux/buffer_head.h> | 18 | #include <linux/buffer_head.h> |
| @@ -137,6 +138,10 @@ static int logfs_sb_set(struct super_block *sb, void *_super) | |||
| 137 | sb->s_fs_info = super; | 138 | sb->s_fs_info = super; |
| 138 | sb->s_mtd = super->s_mtd; | 139 | sb->s_mtd = super->s_mtd; |
| 139 | sb->s_bdev = super->s_bdev; | 140 | sb->s_bdev = super->s_bdev; |
| 141 | if (sb->s_bdev) | ||
| 142 | sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info; | ||
| 143 | if (sb->s_mtd) | ||
| 144 | sb->s_bdi = sb->s_mtd->backing_dev_info; | ||
| 140 | return 0; | 145 | return 0; |
| 141 | } | 146 | } |
| 142 | 147 | ||
| @@ -452,6 +457,8 @@ static int logfs_read_sb(struct super_block *sb, int read_only) | |||
| 452 | 457 | ||
| 453 | btree_init_mempool64(&super->s_shadow_tree.new, super->s_btree_pool); | 458 | btree_init_mempool64(&super->s_shadow_tree.new, super->s_btree_pool); |
| 454 | btree_init_mempool64(&super->s_shadow_tree.old, super->s_btree_pool); | 459 | btree_init_mempool64(&super->s_shadow_tree.old, super->s_btree_pool); |
| 460 | btree_init_mempool32(&super->s_shadow_tree.segment_map, | ||
| 461 | super->s_btree_pool); | ||
| 455 | 462 | ||
| 456 | ret = logfs_init_mapping(sb); | 463 | ret = logfs_init_mapping(sb); |
| 457 | if (ret) | 464 | if (ret) |
| @@ -516,8 +523,8 @@ static void logfs_kill_sb(struct super_block *sb) | |||
| 516 | if (super->s_erase_page) | 523 | if (super->s_erase_page) |
| 517 | __free_page(super->s_erase_page); | 524 | __free_page(super->s_erase_page); |
| 518 | super->s_devops->put_device(sb); | 525 | super->s_devops->put_device(sb); |
| 519 | mempool_destroy(super->s_btree_pool); | 526 | logfs_mempool_destroy(super->s_btree_pool); |
| 520 | mempool_destroy(super->s_alias_pool); | 527 | logfs_mempool_destroy(super->s_alias_pool); |
| 521 | kfree(super); | 528 | kfree(super); |
| 522 | log_super("LogFS: Finished unmounting\n"); | 529 | log_super("LogFS: Finished unmounting\n"); |
| 523 | } | 530 | } |
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c index f8a6075abf50..07930449a958 100644 --- a/fs/reiserfs/dir.c +++ b/fs/reiserfs/dir.c | |||
| @@ -46,8 +46,6 @@ static inline bool is_privroot_deh(struct dentry *dir, | |||
| 46 | struct reiserfs_de_head *deh) | 46 | struct reiserfs_de_head *deh) |
| 47 | { | 47 | { |
| 48 | struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root; | 48 | struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root; |
| 49 | if (reiserfs_expose_privroot(dir->d_sb)) | ||
| 50 | return 0; | ||
| 51 | return (dir == dir->d_parent && privroot->d_inode && | 49 | return (dir == dir->d_parent && privroot->d_inode && |
| 52 | deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid); | 50 | deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid); |
| 53 | } | 51 | } |
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 4f9586bb7631..e7cc00e636dc 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c | |||
| @@ -554,7 +554,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th, | |||
| 554 | if (!err && new_size < i_size_read(dentry->d_inode)) { | 554 | if (!err && new_size < i_size_read(dentry->d_inode)) { |
| 555 | struct iattr newattrs = { | 555 | struct iattr newattrs = { |
| 556 | .ia_ctime = current_fs_time(inode->i_sb), | 556 | .ia_ctime = current_fs_time(inode->i_sb), |
| 557 | .ia_size = buffer_size, | 557 | .ia_size = new_size, |
| 558 | .ia_valid = ATTR_SIZE | ATTR_CTIME, | 558 | .ia_valid = ATTR_SIZE | ATTR_CTIME, |
| 559 | }; | 559 | }; |
| 560 | 560 | ||
| @@ -973,21 +973,13 @@ int reiserfs_permission(struct inode *inode, int mask) | |||
| 973 | return generic_permission(inode, mask, NULL); | 973 | return generic_permission(inode, mask, NULL); |
| 974 | } | 974 | } |
| 975 | 975 | ||
| 976 | /* This will catch lookups from the fs root to .reiserfs_priv */ | 976 | static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd) |
| 977 | static int | ||
| 978 | xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name) | ||
| 979 | { | 977 | { |
| 980 | struct dentry *priv_root = REISERFS_SB(dentry->d_sb)->priv_root; | 978 | return -EPERM; |
| 981 | if (container_of(q1, struct dentry, d_name) == priv_root) | ||
| 982 | return -ENOENT; | ||
| 983 | if (q1->len == name->len && | ||
| 984 | !memcmp(q1->name, name->name, name->len)) | ||
| 985 | return 0; | ||
| 986 | return 1; | ||
| 987 | } | 979 | } |
| 988 | 980 | ||
| 989 | static const struct dentry_operations xattr_lookup_poison_ops = { | 981 | static const struct dentry_operations xattr_lookup_poison_ops = { |
| 990 | .d_compare = xattr_lookup_poison, | 982 | .d_revalidate = xattr_hide_revalidate, |
| 991 | }; | 983 | }; |
| 992 | 984 | ||
| 993 | int reiserfs_lookup_privroot(struct super_block *s) | 985 | int reiserfs_lookup_privroot(struct super_block *s) |
| @@ -1001,8 +993,7 @@ int reiserfs_lookup_privroot(struct super_block *s) | |||
| 1001 | strlen(PRIVROOT_NAME)); | 993 | strlen(PRIVROOT_NAME)); |
| 1002 | if (!IS_ERR(dentry)) { | 994 | if (!IS_ERR(dentry)) { |
| 1003 | REISERFS_SB(s)->priv_root = dentry; | 995 | REISERFS_SB(s)->priv_root = dentry; |
| 1004 | if (!reiserfs_expose_privroot(s)) | 996 | dentry->d_op = &xattr_lookup_poison_ops; |
| 1005 | s->s_root->d_op = &xattr_lookup_poison_ops; | ||
| 1006 | if (dentry->d_inode) | 997 | if (dentry->d_inode) |
| 1007 | dentry->d_inode->i_flags |= S_PRIVATE; | 998 | dentry->d_inode->i_flags |= S_PRIVATE; |
| 1008 | } else | 999 | } else |
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 81f3b14d5d76..68f883b30a53 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 20 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | 20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | 22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
| 23 | * DEALINGS IN THE SOFTWARE. | 23 | * DEALINGS IN THE SOFTWARE. |
diff --git a/include/linux/firewire-constants.h b/include/linux/firewire-constants.h index 9c63f06e67f2..9b4bb5fbba4b 100644 --- a/include/linux/firewire-constants.h +++ b/include/linux/firewire-constants.h | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 20 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | 20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | 22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
| 23 | * DEALINGS IN THE SOFTWARE. | 23 | * DEALINGS IN THE SOFTWARE. |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 39d57bc6cc71..44f35aea2f1f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -2315,8 +2315,9 @@ extern int vfs_fstatat(int , char __user *, struct kstat *, int); | |||
| 2315 | extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, | 2315 | extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, |
| 2316 | unsigned long arg); | 2316 | unsigned long arg); |
| 2317 | extern int __generic_block_fiemap(struct inode *inode, | 2317 | extern int __generic_block_fiemap(struct inode *inode, |
| 2318 | struct fiemap_extent_info *fieinfo, u64 start, | 2318 | struct fiemap_extent_info *fieinfo, |
| 2319 | u64 len, get_block_t *get_block); | 2319 | loff_t start, loff_t len, |
| 2320 | get_block_t *get_block); | ||
| 2320 | extern int generic_block_fiemap(struct inode *inode, | 2321 | extern int generic_block_fiemap(struct inode *inode, |
| 2321 | struct fiemap_extent_info *fieinfo, u64 start, | 2322 | struct fiemap_extent_info *fieinfo, u64 start, |
| 2322 | u64 len, get_block_t *get_block); | 2323 | u64 len, get_block_t *get_block); |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a3fd0f91d943..169d07758ee5 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -54,7 +54,7 @@ extern struct kmem_cache *kvm_vcpu_cache; | |||
| 54 | */ | 54 | */ |
| 55 | struct kvm_io_bus { | 55 | struct kvm_io_bus { |
| 56 | int dev_count; | 56 | int dev_count; |
| 57 | #define NR_IOBUS_DEVS 6 | 57 | #define NR_IOBUS_DEVS 200 |
| 58 | struct kvm_io_device *devs[NR_IOBUS_DEVS]; | 58 | struct kvm_io_device *devs[NR_IOBUS_DEVS]; |
| 59 | }; | 59 | }; |
| 60 | 60 | ||
| @@ -119,6 +119,11 @@ struct kvm_memory_slot { | |||
| 119 | int user_alloc; | 119 | int user_alloc; |
| 120 | }; | 120 | }; |
| 121 | 121 | ||
| 122 | static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) | ||
| 123 | { | ||
| 124 | return ALIGN(memslot->npages, BITS_PER_LONG) / 8; | ||
| 125 | } | ||
| 126 | |||
| 122 | struct kvm_kernel_irq_routing_entry { | 127 | struct kvm_kernel_irq_routing_entry { |
| 123 | u32 gsi; | 128 | u32 gsi; |
| 124 | u32 type; | 129 | u32 type; |
diff --git a/include/linux/poison.h b/include/linux/poison.h index 2110a81c5e2a..34066ffd893d 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h | |||
| @@ -48,6 +48,15 @@ | |||
| 48 | #define POISON_FREE 0x6b /* for use-after-free poisoning */ | 48 | #define POISON_FREE 0x6b /* for use-after-free poisoning */ |
| 49 | #define POISON_END 0xa5 /* end-byte of poisoning */ | 49 | #define POISON_END 0xa5 /* end-byte of poisoning */ |
| 50 | 50 | ||
| 51 | /********** mm/hugetlb.c **********/ | ||
| 52 | /* | ||
| 53 | * Private mappings of hugetlb pages use this poisoned value for | ||
| 54 | * page->mapping. The core VM should not be doing anything with this mapping | ||
| 55 | * but futex requires the existence of some page->mapping value even though it | ||
| 56 | * is unused if PAGE_MAPPING_ANON is set. | ||
| 57 | */ | ||
| 58 | #define HUGETLB_POISON ((void *)(0x00300300 + POISON_POINTER_DELTA + PAGE_MAPPING_ANON)) | ||
| 59 | |||
| 51 | /********** arch/$ARCH/mm/init.c **********/ | 60 | /********** arch/$ARCH/mm/init.c **********/ |
| 52 | #define POISON_FREE_INITMEM 0xcc | 61 | #define POISON_FREE_INITMEM 0xcc |
| 53 | 62 | ||
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 28c9fd020d39..ebd747265294 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h | |||
| @@ -183,9 +183,13 @@ static inline struct regulator *__must_check regulator_get(struct device *dev, | |||
| 183 | { | 183 | { |
| 184 | /* Nothing except the stubbed out regulator API should be | 184 | /* Nothing except the stubbed out regulator API should be |
| 185 | * looking at the value except to check if it is an error | 185 | * looking at the value except to check if it is an error |
| 186 | * value so the actual return value doesn't matter. | 186 | * value. Drivers are free to handle NULL specifically by |
| 187 | * skipping all regulator API calls, but they don't have to. | ||
| 188 | * Drivers which don't, should make sure they properly handle | ||
| 189 | * corner cases of the API, such as regulator_get_voltage() | ||
| 190 | * returning 0. | ||
| 187 | */ | 191 | */ |
| 188 | return (struct regulator *)id; | 192 | return NULL; |
| 189 | } | 193 | } |
| 190 | static inline void regulator_put(struct regulator *regulator) | 194 | static inline void regulator_put(struct regulator *regulator) |
| 191 | { | 195 | { |
diff --git a/init/initramfs.c b/init/initramfs.c index 37d3859b1b32..4b9c20205092 100644 --- a/init/initramfs.c +++ b/init/initramfs.c | |||
| @@ -457,7 +457,8 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len) | |||
| 457 | compress_name); | 457 | compress_name); |
| 458 | message = msg_buf; | 458 | message = msg_buf; |
| 459 | } | 459 | } |
| 460 | } | 460 | } else |
| 461 | error("junk in compressed archive"); | ||
| 461 | if (state != Reset) | 462 | if (state != Reset) |
| 462 | error("junk in compressed archive"); | 463 | error("junk in compressed archive"); |
| 463 | this_header = saved_offset + my_inptr; | 464 | this_header = saved_offset + my_inptr; |
diff --git a/kernel/cred.c b/kernel/cred.c index e1dbe9eef800..62af1816c235 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
| @@ -398,6 +398,8 @@ struct cred *prepare_usermodehelper_creds(void) | |||
| 398 | 398 | ||
| 399 | error: | 399 | error: |
| 400 | put_cred(new); | 400 | put_cred(new); |
| 401 | return NULL; | ||
| 402 | |||
| 401 | free_tgcred: | 403 | free_tgcred: |
| 402 | #ifdef CONFIG_KEYS | 404 | #ifdef CONFIG_KEYS |
| 403 | kfree(tgcred); | 405 | kfree(tgcred); |
| @@ -791,8 +793,6 @@ bool creds_are_invalid(const struct cred *cred) | |||
| 791 | { | 793 | { |
| 792 | if (cred->magic != CRED_MAGIC) | 794 | if (cred->magic != CRED_MAGIC) |
| 793 | return true; | 795 | return true; |
| 794 | if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers)) | ||
| 795 | return true; | ||
| 796 | #ifdef CONFIG_SECURITY_SELINUX | 796 | #ifdef CONFIG_SECURITY_SELINUX |
| 797 | if (selinux_is_enabled()) { | 797 | if (selinux_is_enabled()) { |
| 798 | if ((unsigned long) cred->security < PAGE_SIZE) | 798 | if ((unsigned long) cred->security < PAGE_SIZE) |
diff --git a/kernel/sys.c b/kernel/sys.c index 6d1a7e0f9d5b..7cb426a58965 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -1118,7 +1118,7 @@ DECLARE_RWSEM(uts_sem); | |||
| 1118 | 1118 | ||
| 1119 | #ifdef COMPAT_UTS_MACHINE | 1119 | #ifdef COMPAT_UTS_MACHINE |
| 1120 | #define override_architecture(name) \ | 1120 | #define override_architecture(name) \ |
| 1121 | (current->personality == PER_LINUX32 && \ | 1121 | (personality(current->personality) == PER_LINUX32 && \ |
| 1122 | copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ | 1122 | copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ |
| 1123 | sizeof(COMPAT_UTS_MACHINE))) | 1123 | sizeof(COMPAT_UTS_MACHINE))) |
| 1124 | #else | 1124 | #else |
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c index db521f45626e..bcb3a4bd68ff 100644 --- a/lib/decompress_unlzo.c +++ b/lib/decompress_unlzo.c | |||
| @@ -97,7 +97,7 @@ STATIC inline int INIT unlzo(u8 *input, int in_len, | |||
| 97 | u32 src_len, dst_len; | 97 | u32 src_len, dst_len; |
| 98 | size_t tmp; | 98 | size_t tmp; |
| 99 | u8 *in_buf, *in_buf_save, *out_buf; | 99 | u8 *in_buf, *in_buf_save, *out_buf; |
| 100 | int obytes_processed = 0; | 100 | int ret = -1; |
| 101 | 101 | ||
| 102 | set_error_fn(error_fn); | 102 | set_error_fn(error_fn); |
| 103 | 103 | ||
| @@ -174,15 +174,22 @@ STATIC inline int INIT unlzo(u8 *input, int in_len, | |||
| 174 | 174 | ||
| 175 | /* decompress */ | 175 | /* decompress */ |
| 176 | tmp = dst_len; | 176 | tmp = dst_len; |
| 177 | r = lzo1x_decompress_safe((u8 *) in_buf, src_len, | 177 | |
| 178 | /* When the input data is not compressed at all, | ||
| 179 | * lzo1x_decompress_safe will fail, so call memcpy() | ||
| 180 | * instead */ | ||
| 181 | if (unlikely(dst_len == src_len)) | ||
| 182 | memcpy(out_buf, in_buf, src_len); | ||
| 183 | else { | ||
| 184 | r = lzo1x_decompress_safe((u8 *) in_buf, src_len, | ||
| 178 | out_buf, &tmp); | 185 | out_buf, &tmp); |
| 179 | 186 | ||
| 180 | if (r != LZO_E_OK || dst_len != tmp) { | 187 | if (r != LZO_E_OK || dst_len != tmp) { |
| 181 | error("Compressed data violation"); | 188 | error("Compressed data violation"); |
| 182 | goto exit_2; | 189 | goto exit_2; |
| 190 | } | ||
| 183 | } | 191 | } |
| 184 | 192 | ||
| 185 | obytes_processed += dst_len; | ||
| 186 | if (flush) | 193 | if (flush) |
| 187 | flush(out_buf, dst_len); | 194 | flush(out_buf, dst_len); |
| 188 | if (output) | 195 | if (output) |
| @@ -196,6 +203,7 @@ STATIC inline int INIT unlzo(u8 *input, int in_len, | |||
| 196 | in_buf += src_len; | 203 | in_buf += src_len; |
| 197 | } | 204 | } |
| 198 | 205 | ||
| 206 | ret = 0; | ||
| 199 | exit_2: | 207 | exit_2: |
| 200 | if (!input) | 208 | if (!input) |
| 201 | free(in_buf); | 209 | free(in_buf); |
| @@ -203,7 +211,7 @@ exit_1: | |||
| 203 | if (!output) | 211 | if (!output) |
| 204 | free(out_buf); | 212 | free(out_buf); |
| 205 | exit: | 213 | exit: |
| 206 | return obytes_processed; | 214 | return ret; |
| 207 | } | 215 | } |
| 208 | 216 | ||
| 209 | #define decompress unlzo | 217 | #define decompress unlzo |
diff --git a/lib/flex_array.c b/lib/flex_array.c index 66eef2e4483e..41b1804fa728 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c | |||
| @@ -99,7 +99,7 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, | |||
| 99 | ret->element_size = element_size; | 99 | ret->element_size = element_size; |
| 100 | ret->total_nr_elements = total; | 100 | ret->total_nr_elements = total; |
| 101 | if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) | 101 | if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) |
| 102 | memset(ret->parts[0], FLEX_ARRAY_FREE, | 102 | memset(&ret->parts[0], FLEX_ARRAY_FREE, |
| 103 | FLEX_ARRAY_BASE_BYTES_LEFT); | 103 | FLEX_ARRAY_BASE_BYTES_LEFT); |
| 104 | return ret; | 104 | return ret; |
| 105 | } | 105 | } |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 7376b7c55ffe..46d34b0b74a8 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -118,6 +118,7 @@ long long simple_strtoll(const char *cp, char **endp, unsigned int base) | |||
| 118 | 118 | ||
| 119 | return simple_strtoull(cp, endp, base); | 119 | return simple_strtoull(cp, endp, base); |
| 120 | } | 120 | } |
| 121 | EXPORT_SYMBOL(simple_strtoll); | ||
| 121 | 122 | ||
| 122 | /** | 123 | /** |
| 123 | * strict_strtoul - convert a string to an unsigned long strictly | 124 | * strict_strtoul - convert a string to an unsigned long strictly |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 6034dc9e9796..ffbdfc86aedf 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -546,6 +546,7 @@ static void free_huge_page(struct page *page) | |||
| 546 | 546 | ||
| 547 | mapping = (struct address_space *) page_private(page); | 547 | mapping = (struct address_space *) page_private(page); |
| 548 | set_page_private(page, 0); | 548 | set_page_private(page, 0); |
| 549 | page->mapping = NULL; | ||
| 549 | BUG_ON(page_count(page)); | 550 | BUG_ON(page_count(page)); |
| 550 | INIT_LIST_HEAD(&page->lru); | 551 | INIT_LIST_HEAD(&page->lru); |
| 551 | 552 | ||
| @@ -2447,8 +2448,10 @@ retry: | |||
| 2447 | spin_lock(&inode->i_lock); | 2448 | spin_lock(&inode->i_lock); |
| 2448 | inode->i_blocks += blocks_per_huge_page(h); | 2449 | inode->i_blocks += blocks_per_huge_page(h); |
| 2449 | spin_unlock(&inode->i_lock); | 2450 | spin_unlock(&inode->i_lock); |
| 2450 | } else | 2451 | } else { |
| 2451 | lock_page(page); | 2452 | lock_page(page); |
| 2453 | page->mapping = HUGETLB_POISON; | ||
| 2454 | } | ||
| 2452 | } | 2455 | } |
| 2453 | 2456 | ||
| 2454 | /* | 2457 | /* |
| @@ -365,7 +365,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) | |||
| 365 | do { | 365 | do { |
| 366 | cond_resched(); | 366 | cond_resched(); |
| 367 | page = follow_page(vma, addr, FOLL_GET); | 367 | page = follow_page(vma, addr, FOLL_GET); |
| 368 | if (!page) | 368 | if (IS_ERR_OR_NULL(page)) |
| 369 | break; | 369 | break; |
| 370 | if (PageKsm(page)) | 370 | if (PageKsm(page)) |
| 371 | ret = handle_mm_fault(vma->vm_mm, vma, addr, | 371 | ret = handle_mm_fault(vma->vm_mm, vma, addr, |
| @@ -447,7 +447,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item) | |||
| 447 | goto out; | 447 | goto out; |
| 448 | 448 | ||
| 449 | page = follow_page(vma, addr, FOLL_GET); | 449 | page = follow_page(vma, addr, FOLL_GET); |
| 450 | if (!page) | 450 | if (IS_ERR_OR_NULL(page)) |
| 451 | goto out; | 451 | goto out; |
| 452 | if (PageAnon(page)) { | 452 | if (PageAnon(page)) { |
| 453 | flush_anon_page(vma, page, addr); | 453 | flush_anon_page(vma, page, addr); |
| @@ -1086,7 +1086,7 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, | |||
| 1086 | cond_resched(); | 1086 | cond_resched(); |
| 1087 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); | 1087 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); |
| 1088 | tree_page = get_mergeable_page(tree_rmap_item); | 1088 | tree_page = get_mergeable_page(tree_rmap_item); |
| 1089 | if (!tree_page) | 1089 | if (IS_ERR_OR_NULL(tree_page)) |
| 1090 | return NULL; | 1090 | return NULL; |
| 1091 | 1091 | ||
| 1092 | /* | 1092 | /* |
| @@ -1294,7 +1294,7 @@ next_mm: | |||
| 1294 | if (ksm_test_exit(mm)) | 1294 | if (ksm_test_exit(mm)) |
| 1295 | break; | 1295 | break; |
| 1296 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); | 1296 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); |
| 1297 | if (*page && PageAnon(*page)) { | 1297 | if (!IS_ERR_OR_NULL(*page) && PageAnon(*page)) { |
| 1298 | flush_anon_page(vma, *page, ksm_scan.address); | 1298 | flush_anon_page(vma, *page, ksm_scan.address); |
| 1299 | flush_dcache_page(*page); | 1299 | flush_dcache_page(*page); |
| 1300 | rmap_item = get_next_rmap_item(slot, | 1300 | rmap_item = get_next_rmap_item(slot, |
| @@ -1308,7 +1308,7 @@ next_mm: | |||
| 1308 | up_read(&mm->mmap_sem); | 1308 | up_read(&mm->mmap_sem); |
| 1309 | return rmap_item; | 1309 | return rmap_item; |
| 1310 | } | 1310 | } |
| 1311 | if (*page) | 1311 | if (!IS_ERR_OR_NULL(*page)) |
| 1312 | put_page(*page); | 1312 | put_page(*page); |
| 1313 | ksm_scan.address += PAGE_SIZE; | 1313 | ksm_scan.address += PAGE_SIZE; |
| 1314 | cond_resched(); | 1314 | cond_resched(); |
| @@ -1367,7 +1367,7 @@ next_mm: | |||
| 1367 | static void ksm_do_scan(unsigned int scan_npages) | 1367 | static void ksm_do_scan(unsigned int scan_npages) |
| 1368 | { | 1368 | { |
| 1369 | struct rmap_item *rmap_item; | 1369 | struct rmap_item *rmap_item; |
| 1370 | struct page *page; | 1370 | struct page *uninitialized_var(page); |
| 1371 | 1371 | ||
| 1372 | while (scan_npages--) { | 1372 | while (scan_npages--) { |
| 1373 | cond_resched(); | 1373 | cond_resched(); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f4ede99c8b9b..6c755de385f7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -2429,11 +2429,11 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) | |||
| 2429 | } | 2429 | } |
| 2430 | unlock_page_cgroup(pc); | 2430 | unlock_page_cgroup(pc); |
| 2431 | 2431 | ||
| 2432 | *ptr = mem; | ||
| 2432 | if (mem) { | 2433 | if (mem) { |
| 2433 | ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false); | 2434 | ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false); |
| 2434 | css_put(&mem->css); | 2435 | css_put(&mem->css); |
| 2435 | } | 2436 | } |
| 2436 | *ptr = mem; | ||
| 2437 | return ret; | 2437 | return ret; |
| 2438 | } | 2438 | } |
| 2439 | 2439 | ||
| @@ -133,8 +133,8 @@ int anon_vma_prepare(struct vm_area_struct *vma) | |||
| 133 | goto out_enomem_free_avc; | 133 | goto out_enomem_free_avc; |
| 134 | allocated = anon_vma; | 134 | allocated = anon_vma; |
| 135 | } | 135 | } |
| 136 | spin_lock(&anon_vma->lock); | ||
| 137 | 136 | ||
| 137 | spin_lock(&anon_vma->lock); | ||
| 138 | /* page_table_lock to protect against threads */ | 138 | /* page_table_lock to protect against threads */ |
| 139 | spin_lock(&mm->page_table_lock); | 139 | spin_lock(&mm->page_table_lock); |
| 140 | if (likely(!vma->anon_vma)) { | 140 | if (likely(!vma->anon_vma)) { |
| @@ -144,14 +144,15 @@ int anon_vma_prepare(struct vm_area_struct *vma) | |||
| 144 | list_add(&avc->same_vma, &vma->anon_vma_chain); | 144 | list_add(&avc->same_vma, &vma->anon_vma_chain); |
| 145 | list_add(&avc->same_anon_vma, &anon_vma->head); | 145 | list_add(&avc->same_anon_vma, &anon_vma->head); |
| 146 | allocated = NULL; | 146 | allocated = NULL; |
| 147 | avc = NULL; | ||
| 147 | } | 148 | } |
| 148 | spin_unlock(&mm->page_table_lock); | 149 | spin_unlock(&mm->page_table_lock); |
| 149 | |||
| 150 | spin_unlock(&anon_vma->lock); | 150 | spin_unlock(&anon_vma->lock); |
| 151 | if (unlikely(allocated)) { | 151 | |
| 152 | if (unlikely(allocated)) | ||
| 152 | anon_vma_free(allocated); | 153 | anon_vma_free(allocated); |
| 154 | if (unlikely(avc)) | ||
| 153 | anon_vma_chain_free(avc); | 155 | anon_vma_chain_free(avc); |
| 154 | } | ||
| 155 | } | 156 | } |
| 156 | return 0; | 157 | return 0; |
| 157 | 158 | ||
diff --git a/security/inode.c b/security/inode.c index c3a793881d04..1c812e874504 100644 --- a/security/inode.c +++ b/security/inode.c | |||
| @@ -161,13 +161,13 @@ static int create_by_name(const char *name, mode_t mode, | |||
| 161 | 161 | ||
| 162 | mutex_lock(&parent->d_inode->i_mutex); | 162 | mutex_lock(&parent->d_inode->i_mutex); |
| 163 | *dentry = lookup_one_len(name, parent, strlen(name)); | 163 | *dentry = lookup_one_len(name, parent, strlen(name)); |
| 164 | if (!IS_ERR(dentry)) { | 164 | if (!IS_ERR(*dentry)) { |
| 165 | if ((mode & S_IFMT) == S_IFDIR) | 165 | if ((mode & S_IFMT) == S_IFDIR) |
| 166 | error = mkdir(parent->d_inode, *dentry, mode); | 166 | error = mkdir(parent->d_inode, *dentry, mode); |
| 167 | else | 167 | else |
| 168 | error = create(parent->d_inode, *dentry, mode); | 168 | error = create(parent->d_inode, *dentry, mode); |
| 169 | } else | 169 | } else |
| 170 | error = PTR_ERR(dentry); | 170 | error = PTR_ERR(*dentry); |
| 171 | mutex_unlock(&parent->d_inode->i_mutex); | 171 | mutex_unlock(&parent->d_inode->i_mutex); |
| 172 | 172 | ||
| 173 | return error; | 173 | return error; |
diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 03fe63ed55bd..ea97c3120d66 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c | |||
| @@ -68,7 +68,8 @@ static int call_sbin_request_key(struct key_construction *cons, | |||
| 68 | { | 68 | { |
| 69 | const struct cred *cred = current_cred(); | 69 | const struct cred *cred = current_cred(); |
| 70 | key_serial_t prkey, sskey; | 70 | key_serial_t prkey, sskey; |
| 71 | struct key *key = cons->key, *authkey = cons->authkey, *keyring; | 71 | struct key *key = cons->key, *authkey = cons->authkey, *keyring, |
| 72 | *session; | ||
| 72 | char *argv[9], *envp[3], uid_str[12], gid_str[12]; | 73 | char *argv[9], *envp[3], uid_str[12], gid_str[12]; |
| 73 | char key_str[12], keyring_str[3][12]; | 74 | char key_str[12], keyring_str[3][12]; |
| 74 | char desc[20]; | 75 | char desc[20]; |
| @@ -112,10 +113,12 @@ static int call_sbin_request_key(struct key_construction *cons, | |||
| 112 | if (cred->tgcred->process_keyring) | 113 | if (cred->tgcred->process_keyring) |
| 113 | prkey = cred->tgcred->process_keyring->serial; | 114 | prkey = cred->tgcred->process_keyring->serial; |
| 114 | 115 | ||
| 115 | if (cred->tgcred->session_keyring) | 116 | rcu_read_lock(); |
| 116 | sskey = rcu_dereference(cred->tgcred->session_keyring)->serial; | 117 | session = rcu_dereference(cred->tgcred->session_keyring); |
| 117 | else | 118 | if (!session) |
| 118 | sskey = cred->user->session_keyring->serial; | 119 | session = cred->user->session_keyring; |
| 120 | sskey = session->serial; | ||
| 121 | rcu_read_unlock(); | ||
| 119 | 122 | ||
| 120 | sprintf(keyring_str[2], "%d", sskey); | 123 | sprintf(keyring_str[2], "%d", sskey); |
| 121 | 124 | ||
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index f669442b7c82..cec68152dcb1 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
| @@ -2273,6 +2273,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = { | |||
| 2273 | SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), | 2273 | SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), |
| 2274 | SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), | 2274 | SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), |
| 2275 | SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB), | 2275 | SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB), |
| 2276 | SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB), | ||
| 2276 | SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), | 2277 | SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), |
| 2277 | {} | 2278 | {} |
| 2278 | }; | 2279 | }; |
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index af34606c30c3..e9fdfc4b1c57 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c | |||
| @@ -519,14 +519,6 @@ static int ad198x_suspend(struct hda_codec *codec, pm_message_t state) | |||
| 519 | ad198x_power_eapd(codec); | 519 | ad198x_power_eapd(codec); |
| 520 | return 0; | 520 | return 0; |
| 521 | } | 521 | } |
| 522 | |||
| 523 | static int ad198x_resume(struct hda_codec *codec) | ||
| 524 | { | ||
| 525 | ad198x_init(codec); | ||
| 526 | snd_hda_codec_resume_amp(codec); | ||
| 527 | snd_hda_codec_resume_cache(codec); | ||
| 528 | return 0; | ||
| 529 | } | ||
| 530 | #endif | 522 | #endif |
| 531 | 523 | ||
| 532 | static struct hda_codec_ops ad198x_patch_ops = { | 524 | static struct hda_codec_ops ad198x_patch_ops = { |
| @@ -539,7 +531,6 @@ static struct hda_codec_ops ad198x_patch_ops = { | |||
| 539 | #endif | 531 | #endif |
| 540 | #ifdef SND_HDA_NEEDS_RESUME | 532 | #ifdef SND_HDA_NEEDS_RESUME |
| 541 | .suspend = ad198x_suspend, | 533 | .suspend = ad198x_suspend, |
| 542 | .resume = ad198x_resume, | ||
| 543 | #endif | 534 | #endif |
| 544 | .reboot_notify = ad198x_shutup, | 535 | .reboot_notify = ad198x_shutup, |
| 545 | }; | 536 | }; |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index aad1627f56f1..7404dba16f83 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -4143,7 +4143,7 @@ static struct snd_pci_quirk alc880_cfg_tbl[] = { | |||
| 4143 | SND_PCI_QUIRK(0x1695, 0x4012, "EPox EP-5LDA", ALC880_5ST_DIG), | 4143 | SND_PCI_QUIRK(0x1695, 0x4012, "EPox EP-5LDA", ALC880_5ST_DIG), |
| 4144 | SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_F1734), | 4144 | SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_F1734), |
| 4145 | SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FUJITSU), | 4145 | SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FUJITSU), |
| 4146 | SND_PCI_QUIRK(0x1734, 0x10ac, "FSC", ALC880_UNIWILL), | 4146 | SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_F1734), |
| 4147 | SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU), | 4147 | SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU), |
| 4148 | SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW), | 4148 | SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW), |
| 4149 | SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG), | 4149 | SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG), |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index c4be3fab94e5..7fb7d017a347 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
| @@ -1607,6 +1607,10 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = { | |||
| 1607 | "Dell Studio 1555", STAC_DELL_M6_DMIC), | 1607 | "Dell Studio 1555", STAC_DELL_M6_DMIC), |
| 1608 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd, | 1608 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd, |
| 1609 | "Dell Studio 1557", STAC_DELL_M6_DMIC), | 1609 | "Dell Studio 1557", STAC_DELL_M6_DMIC), |
| 1610 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe, | ||
| 1611 | "Dell Studio XPS 1645", STAC_DELL_M6_BOTH), | ||
| 1612 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413, | ||
| 1613 | "Dell Studio 1558", STAC_DELL_M6_BOTH), | ||
| 1610 | {} /* terminator */ | 1614 | {} /* terminator */ |
| 1611 | }; | 1615 | }; |
| 1612 | 1616 | ||
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c index b64e78139d63..b56e33676780 100644 --- a/sound/pci/maestro3.c +++ b/sound/pci/maestro3.c | |||
| @@ -849,6 +849,7 @@ struct snd_m3 { | |||
| 849 | struct snd_kcontrol *master_switch; | 849 | struct snd_kcontrol *master_switch; |
| 850 | struct snd_kcontrol *master_volume; | 850 | struct snd_kcontrol *master_volume; |
| 851 | struct tasklet_struct hwvol_tq; | 851 | struct tasklet_struct hwvol_tq; |
| 852 | unsigned int in_suspend; | ||
| 852 | 853 | ||
| 853 | #ifdef CONFIG_PM | 854 | #ifdef CONFIG_PM |
| 854 | u16 *suspend_mem; | 855 | u16 *suspend_mem; |
| @@ -884,6 +885,7 @@ static DEFINE_PCI_DEVICE_TABLE(snd_m3_ids) = { | |||
| 884 | MODULE_DEVICE_TABLE(pci, snd_m3_ids); | 885 | MODULE_DEVICE_TABLE(pci, snd_m3_ids); |
| 885 | 886 | ||
| 886 | static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = { | 887 | static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = { |
| 888 | SND_PCI_QUIRK(0x0E11, 0x0094, "Compaq Evo N600c", 0x0c), | ||
| 887 | SND_PCI_QUIRK(0x10f7, 0x833e, "Panasonic CF-28", 0x0d), | 889 | SND_PCI_QUIRK(0x10f7, 0x833e, "Panasonic CF-28", 0x0d), |
| 888 | SND_PCI_QUIRK(0x10f7, 0x833d, "Panasonic CF-72", 0x0d), | 890 | SND_PCI_QUIRK(0x10f7, 0x833d, "Panasonic CF-72", 0x0d), |
| 889 | SND_PCI_QUIRK(0x1033, 0x80f1, "NEC LM800J/7", 0x03), | 891 | SND_PCI_QUIRK(0x1033, 0x80f1, "NEC LM800J/7", 0x03), |
| @@ -1613,6 +1615,11 @@ static void snd_m3_update_hw_volume(unsigned long private_data) | |||
| 1613 | outb(0x88, chip->iobase + SHADOW_MIX_REG_MASTER); | 1615 | outb(0x88, chip->iobase + SHADOW_MIX_REG_MASTER); |
| 1614 | outb(0x88, chip->iobase + HW_VOL_COUNTER_MASTER); | 1616 | outb(0x88, chip->iobase + HW_VOL_COUNTER_MASTER); |
| 1615 | 1617 | ||
| 1618 | /* Ignore spurious HV interrupts during suspend / resume, this avoids | ||
| 1619 | mistaking them for a mute button press. */ | ||
| 1620 | if (chip->in_suspend) | ||
| 1621 | return; | ||
| 1622 | |||
| 1616 | if (!chip->master_switch || !chip->master_volume) | 1623 | if (!chip->master_switch || !chip->master_volume) |
| 1617 | return; | 1624 | return; |
| 1618 | 1625 | ||
| @@ -2424,6 +2431,7 @@ static int m3_suspend(struct pci_dev *pci, pm_message_t state) | |||
| 2424 | if (chip->suspend_mem == NULL) | 2431 | if (chip->suspend_mem == NULL) |
| 2425 | return 0; | 2432 | return 0; |
| 2426 | 2433 | ||
| 2434 | chip->in_suspend = 1; | ||
| 2427 | snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); | 2435 | snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); |
| 2428 | snd_pcm_suspend_all(chip->pcm); | 2436 | snd_pcm_suspend_all(chip->pcm); |
| 2429 | snd_ac97_suspend(chip->ac97); | 2437 | snd_ac97_suspend(chip->ac97); |
| @@ -2497,6 +2505,7 @@ static int m3_resume(struct pci_dev *pci) | |||
| 2497 | snd_m3_hv_init(chip); | 2505 | snd_m3_hv_init(chip); |
| 2498 | 2506 | ||
| 2499 | snd_power_change_state(card, SNDRV_CTL_POWER_D0); | 2507 | snd_power_change_state(card, SNDRV_CTL_POWER_D0); |
| 2508 | chip->in_suspend = 0; | ||
| 2500 | return 0; | 2509 | return 0; |
| 2501 | } | 2510 | } |
| 2502 | #endif /* CONFIG_PM */ | 2511 | #endif /* CONFIG_PM */ |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5a0cd194dce0..c82ae2492634 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -341,7 +341,11 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, | |||
| 341 | struct mm_struct *mm) | 341 | struct mm_struct *mm) |
| 342 | { | 342 | { |
| 343 | struct kvm *kvm = mmu_notifier_to_kvm(mn); | 343 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 344 | int idx; | ||
| 345 | |||
| 346 | idx = srcu_read_lock(&kvm->srcu); | ||
| 344 | kvm_arch_flush_shadow(kvm); | 347 | kvm_arch_flush_shadow(kvm); |
| 348 | srcu_read_unlock(&kvm->srcu, idx); | ||
| 345 | } | 349 | } |
| 346 | 350 | ||
| 347 | static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { | 351 | static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { |
| @@ -648,7 +652,7 @@ skip_lpage: | |||
| 648 | 652 | ||
| 649 | /* Allocate page dirty bitmap if needed */ | 653 | /* Allocate page dirty bitmap if needed */ |
| 650 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { | 654 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { |
| 651 | unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; | 655 | unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new); |
| 652 | 656 | ||
| 653 | new.dirty_bitmap = vmalloc(dirty_bytes); | 657 | new.dirty_bitmap = vmalloc(dirty_bytes); |
| 654 | if (!new.dirty_bitmap) | 658 | if (!new.dirty_bitmap) |
| @@ -768,7 +772,7 @@ int kvm_get_dirty_log(struct kvm *kvm, | |||
| 768 | { | 772 | { |
| 769 | struct kvm_memory_slot *memslot; | 773 | struct kvm_memory_slot *memslot; |
| 770 | int r, i; | 774 | int r, i; |
| 771 | int n; | 775 | unsigned long n; |
| 772 | unsigned long any = 0; | 776 | unsigned long any = 0; |
| 773 | 777 | ||
| 774 | r = -EINVAL; | 778 | r = -EINVAL; |
| @@ -780,7 +784,7 @@ int kvm_get_dirty_log(struct kvm *kvm, | |||
| 780 | if (!memslot->dirty_bitmap) | 784 | if (!memslot->dirty_bitmap) |
| 781 | goto out; | 785 | goto out; |
| 782 | 786 | ||
| 783 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 787 | n = kvm_dirty_bitmap_bytes(memslot); |
| 784 | 788 | ||
| 785 | for (i = 0; !any && i < n/sizeof(long); ++i) | 789 | for (i = 0; !any && i < n/sizeof(long); ++i) |
| 786 | any = memslot->dirty_bitmap[i]; | 790 | any = memslot->dirty_bitmap[i]; |
| @@ -1186,10 +1190,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) | |||
| 1186 | memslot = gfn_to_memslot_unaliased(kvm, gfn); | 1190 | memslot = gfn_to_memslot_unaliased(kvm, gfn); |
| 1187 | if (memslot && memslot->dirty_bitmap) { | 1191 | if (memslot && memslot->dirty_bitmap) { |
| 1188 | unsigned long rel_gfn = gfn - memslot->base_gfn; | 1192 | unsigned long rel_gfn = gfn - memslot->base_gfn; |
| 1193 | unsigned long *p = memslot->dirty_bitmap + | ||
| 1194 | rel_gfn / BITS_PER_LONG; | ||
| 1195 | int offset = rel_gfn % BITS_PER_LONG; | ||
| 1189 | 1196 | ||
| 1190 | /* avoid RMW */ | 1197 | /* avoid RMW */ |
| 1191 | if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) | 1198 | if (!generic_test_le_bit(offset, p)) |
| 1192 | generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); | 1199 | generic___set_le_bit(offset, p); |
| 1193 | } | 1200 | } |
| 1194 | } | 1201 | } |
| 1195 | 1202 | ||
