diff options
205 files changed, 2715 insertions, 881 deletions
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst index ab2fe0eda1d7..8b9164990956 100644 --- a/Documentation/admin-guide/pm/intel_pstate.rst +++ b/Documentation/admin-guide/pm/intel_pstate.rst | |||
| @@ -410,7 +410,7 @@ argument is passed to the kernel in the command line. | |||
| 410 | That only is supported in some configurations, though (for example, if | 410 | That only is supported in some configurations, though (for example, if |
| 411 | the `HWP feature is enabled in the processor <Active Mode With HWP_>`_, | 411 | the `HWP feature is enabled in the processor <Active Mode With HWP_>`_, |
| 412 | the operation mode of the driver cannot be changed), and if it is not | 412 | the operation mode of the driver cannot be changed), and if it is not |
| 413 | supported in the current configuration, writes to this attribute with | 413 | supported in the current configuration, writes to this attribute will |
| 414 | fail with an appropriate error. | 414 | fail with an appropriate error. |
| 415 | 415 | ||
| 416 | Interpretation of Policy Attributes | 416 | Interpretation of Policy Attributes |
diff --git a/Documentation/trace/histogram.txt b/Documentation/trace/histogram.txt index e73bcf9cb5f3..7ffea6aa22e3 100644 --- a/Documentation/trace/histogram.txt +++ b/Documentation/trace/histogram.txt | |||
| @@ -1729,35 +1729,35 @@ If a variable isn't a key variable or prefixed with 'vals=', the | |||
| 1729 | associated event field will be saved in a variable but won't be summed | 1729 | associated event field will be saved in a variable but won't be summed |
| 1730 | as a value: | 1730 | as a value: |
| 1731 | 1731 | ||
| 1732 | # echo 'hist:keys=next_pid:ts1=common_timestamp ... >> event/trigger | 1732 | # echo 'hist:keys=next_pid:ts1=common_timestamp ...' >> event/trigger |
| 1733 | 1733 | ||
| 1734 | Multiple variables can be assigned at the same time. The below would | 1734 | Multiple variables can be assigned at the same time. The below would |
| 1735 | result in both ts0 and b being created as variables, with both | 1735 | result in both ts0 and b being created as variables, with both |
| 1736 | common_timestamp and field1 additionally being summed as values: | 1736 | common_timestamp and field1 additionally being summed as values: |
| 1737 | 1737 | ||
| 1738 | # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ... >> \ | 1738 | # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ...' >> \ |
| 1739 | event/trigger | 1739 | event/trigger |
| 1740 | 1740 | ||
| 1741 | Note that variable assignments can appear either preceding or | 1741 | Note that variable assignments can appear either preceding or |
| 1742 | following their use. The command below behaves identically to the | 1742 | following their use. The command below behaves identically to the |
| 1743 | command above: | 1743 | command above: |
| 1744 | 1744 | ||
| 1745 | # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ... >> \ | 1745 | # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ...' >> \ |
| 1746 | event/trigger | 1746 | event/trigger |
| 1747 | 1747 | ||
| 1748 | Any number of variables not bound to a 'vals=' prefix can also be | 1748 | Any number of variables not bound to a 'vals=' prefix can also be |
| 1749 | assigned by simply separating them with colons. Below is the same | 1749 | assigned by simply separating them with colons. Below is the same |
| 1750 | thing but without the values being summed in the histogram: | 1750 | thing but without the values being summed in the histogram: |
| 1751 | 1751 | ||
| 1752 | # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ... >> event/trigger | 1752 | # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ...' >> event/trigger |
| 1753 | 1753 | ||
| 1754 | Variables set as above can be referenced and used in expressions on | 1754 | Variables set as above can be referenced and used in expressions on |
| 1755 | another event. | 1755 | another event. |
| 1756 | 1756 | ||
| 1757 | For example, here's how a latency can be calculated: | 1757 | For example, here's how a latency can be calculated: |
| 1758 | 1758 | ||
| 1759 | # echo 'hist:keys=pid,prio:ts0=common_timestamp ... >> event1/trigger | 1759 | # echo 'hist:keys=pid,prio:ts0=common_timestamp ...' >> event1/trigger |
| 1760 | # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ... >> event2/trigger | 1760 | # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ...' >> event2/trigger |
| 1761 | 1761 | ||
| 1762 | In the first line above, the event's timetamp is saved into the | 1762 | In the first line above, the event's timetamp is saved into the |
| 1763 | variable ts0. In the next line, ts0 is subtracted from the second | 1763 | variable ts0. In the next line, ts0 is subtracted from the second |
| @@ -1766,7 +1766,7 @@ yet another variable, 'wakeup_lat'. The hist trigger below in turn | |||
| 1766 | makes use of the wakeup_lat variable to compute a combined latency | 1766 | makes use of the wakeup_lat variable to compute a combined latency |
| 1767 | using the same key and variable from yet another event: | 1767 | using the same key and variable from yet another event: |
| 1768 | 1768 | ||
| 1769 | # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ... >> event3/trigger | 1769 | # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ...' >> event3/trigger |
| 1770 | 1770 | ||
| 1771 | 2.2.2 Synthetic Events | 1771 | 2.2.2 Synthetic Events |
| 1772 | ---------------------- | 1772 | ---------------------- |
| @@ -1807,10 +1807,11 @@ the command that defined it with a '!': | |||
| 1807 | At this point, there isn't yet an actual 'wakeup_latency' event | 1807 | At this point, there isn't yet an actual 'wakeup_latency' event |
| 1808 | instantiated in the event subsytem - for this to happen, a 'hist | 1808 | instantiated in the event subsytem - for this to happen, a 'hist |
| 1809 | trigger action' needs to be instantiated and bound to actual fields | 1809 | trigger action' needs to be instantiated and bound to actual fields |
| 1810 | and variables defined on other events (see Section 6.3.3 below). | 1810 | and variables defined on other events (see Section 2.2.3 below on |
| 1811 | how that is done using hist trigger 'onmatch' action). Once that is | ||
| 1812 | done, the 'wakeup_latency' synthetic event instance is created. | ||
| 1811 | 1813 | ||
| 1812 | Once that is done, an event instance is created, and a histogram can | 1814 | A histogram can now be defined for the new synthetic event: |
| 1813 | be defined using it: | ||
| 1814 | 1815 | ||
| 1815 | # echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \ | 1816 | # echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \ |
| 1816 | /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger | 1817 | /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger |
| @@ -1960,7 +1961,7 @@ hist trigger specification. | |||
| 1960 | back to that pid, the timestamp difference is calculated. If the | 1961 | back to that pid, the timestamp difference is calculated. If the |
| 1961 | resulting latency, stored in wakeup_lat, exceeds the current | 1962 | resulting latency, stored in wakeup_lat, exceeds the current |
| 1962 | maximum latency, the values specified in the save() fields are | 1963 | maximum latency, the values specified in the save() fields are |
| 1963 | recoreded: | 1964 | recorded: |
| 1964 | 1965 | ||
| 1965 | # echo 'hist:keys=pid:ts0=common_timestamp.usecs \ | 1966 | # echo 'hist:keys=pid:ts0=common_timestamp.usecs \ |
| 1966 | if comm=="cyclictest"' >> \ | 1967 | if comm=="cyclictest"' >> \ |
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 495b7742ab58..d10944e619d3 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
| @@ -4610,7 +4610,7 @@ This capability indicates that kvm will implement the interfaces to handle | |||
| 4610 | reset, migration and nested KVM for branch prediction blocking. The stfle | 4610 | reset, migration and nested KVM for branch prediction blocking. The stfle |
| 4611 | facility 82 should not be provided to the guest without this capability. | 4611 | facility 82 should not be provided to the guest without this capability. |
| 4612 | 4612 | ||
| 4613 | 8.14 KVM_CAP_HYPERV_TLBFLUSH | 4613 | 8.18 KVM_CAP_HYPERV_TLBFLUSH |
| 4614 | 4614 | ||
| 4615 | Architectures: x86 | 4615 | Architectures: x86 |
| 4616 | 4616 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index edf3cf5ea691..6cfd16790add 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -15572,6 +15572,7 @@ M: x86@kernel.org | |||
| 15572 | L: linux-kernel@vger.kernel.org | 15572 | L: linux-kernel@vger.kernel.org |
| 15573 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core | 15573 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core |
| 15574 | S: Maintained | 15574 | S: Maintained |
| 15575 | F: Documentation/devicetree/bindings/x86/ | ||
| 15575 | F: Documentation/x86/ | 15576 | F: Documentation/x86/ |
| 15576 | F: arch/x86/ | 15577 | F: arch/x86/ |
| 15577 | 15578 | ||
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 0c4805a572c8..04a4a138ed13 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
| @@ -555,11 +555,6 @@ config SMP | |||
| 555 | 555 | ||
| 556 | If you don't know what to do here, say N. | 556 | If you don't know what to do here, say N. |
| 557 | 557 | ||
| 558 | config HAVE_DEC_LOCK | ||
| 559 | bool | ||
| 560 | depends on SMP | ||
| 561 | default y | ||
| 562 | |||
| 563 | config NR_CPUS | 558 | config NR_CPUS |
| 564 | int "Maximum number of CPUs (2-32)" | 559 | int "Maximum number of CPUs (2-32)" |
| 565 | range 2 32 | 560 | range 2 32 |
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile index 04f9729de57c..854d5e79979e 100644 --- a/arch/alpha/lib/Makefile +++ b/arch/alpha/lib/Makefile | |||
| @@ -35,8 +35,6 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \ | |||
| 35 | callback_srm.o srm_puts.o srm_printk.o \ | 35 | callback_srm.o srm_puts.o srm_printk.o \ |
| 36 | fls.o | 36 | fls.o |
| 37 | 37 | ||
| 38 | lib-$(CONFIG_SMP) += dec_and_lock.o | ||
| 39 | |||
| 40 | # The division routines are built from single source, with different defines. | 38 | # The division routines are built from single source, with different defines. |
| 41 | AFLAGS___divqu.o = -DDIV | 39 | AFLAGS___divqu.o = -DDIV |
| 42 | AFLAGS___remqu.o = -DREM | 40 | AFLAGS___remqu.o = -DREM |
diff --git a/arch/alpha/lib/dec_and_lock.c b/arch/alpha/lib/dec_and_lock.c deleted file mode 100644 index a117707f57fe..000000000000 --- a/arch/alpha/lib/dec_and_lock.c +++ /dev/null | |||
| @@ -1,44 +0,0 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * arch/alpha/lib/dec_and_lock.c | ||
| 4 | * | ||
| 5 | * ll/sc version of atomic_dec_and_lock() | ||
| 6 | * | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/spinlock.h> | ||
| 10 | #include <linux/atomic.h> | ||
| 11 | #include <linux/export.h> | ||
| 12 | |||
| 13 | asm (".text \n\ | ||
| 14 | .global _atomic_dec_and_lock \n\ | ||
| 15 | .ent _atomic_dec_and_lock \n\ | ||
| 16 | .align 4 \n\ | ||
| 17 | _atomic_dec_and_lock: \n\ | ||
| 18 | .prologue 0 \n\ | ||
| 19 | 1: ldl_l $1, 0($16) \n\ | ||
| 20 | subl $1, 1, $1 \n\ | ||
| 21 | beq $1, 2f \n\ | ||
| 22 | stl_c $1, 0($16) \n\ | ||
| 23 | beq $1, 4f \n\ | ||
| 24 | mb \n\ | ||
| 25 | clr $0 \n\ | ||
| 26 | ret \n\ | ||
| 27 | 2: br $29, 3f \n\ | ||
| 28 | 3: ldgp $29, 0($29) \n\ | ||
| 29 | br $atomic_dec_and_lock_1..ng \n\ | ||
| 30 | .subsection 2 \n\ | ||
| 31 | 4: br 1b \n\ | ||
| 32 | .previous \n\ | ||
| 33 | .end _atomic_dec_and_lock"); | ||
| 34 | |||
| 35 | static int __used atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock) | ||
| 36 | { | ||
| 37 | /* Slow path */ | ||
| 38 | spin_lock(lock); | ||
| 39 | if (atomic_dec_and_test(atomic)) | ||
| 40 | return 1; | ||
| 41 | spin_unlock(lock); | ||
| 42 | return 0; | ||
| 43 | } | ||
| 44 | EXPORT_SYMBOL(_atomic_dec_and_lock); | ||
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 8073625371f5..07060e5b5864 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c | |||
| @@ -59,6 +59,9 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; | |||
| 59 | 59 | ||
| 60 | static __read_mostly unsigned int xen_events_irq; | 60 | static __read_mostly unsigned int xen_events_irq; |
| 61 | 61 | ||
| 62 | uint32_t xen_start_flags; | ||
| 63 | EXPORT_SYMBOL(xen_start_flags); | ||
| 64 | |||
| 62 | int xen_remap_domain_gfn_array(struct vm_area_struct *vma, | 65 | int xen_remap_domain_gfn_array(struct vm_area_struct *vma, |
| 63 | unsigned long addr, | 66 | unsigned long addr, |
| 64 | xen_pfn_t *gfn, int nr, | 67 | xen_pfn_t *gfn, int nr, |
| @@ -293,9 +296,7 @@ void __init xen_early_init(void) | |||
| 293 | xen_setup_features(); | 296 | xen_setup_features(); |
| 294 | 297 | ||
| 295 | if (xen_feature(XENFEAT_dom0)) | 298 | if (xen_feature(XENFEAT_dom0)) |
| 296 | xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; | 299 | xen_start_flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; |
| 297 | else | ||
| 298 | xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED); | ||
| 299 | 300 | ||
| 300 | if (!console_set_on_cmdline && !xen_initial_domain()) | 301 | if (!console_set_on_cmdline && !xen_initial_domain()) |
| 301 | add_preferred_console("hvc", 0, NULL); | 302 | add_preferred_console("hvc", 0, NULL); |
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 253188fb8cb0..e3e50950a863 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c | |||
| @@ -223,8 +223,8 @@ static int ctr_encrypt(struct skcipher_request *req) | |||
| 223 | kernel_neon_begin(); | 223 | kernel_neon_begin(); |
| 224 | aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | 224 | aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, |
| 225 | (u8 *)ctx->key_enc, rounds, blocks, walk.iv); | 225 | (u8 *)ctx->key_enc, rounds, blocks, walk.iv); |
| 226 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); | ||
| 227 | kernel_neon_end(); | 226 | kernel_neon_end(); |
| 227 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); | ||
| 228 | } | 228 | } |
| 229 | if (walk.nbytes) { | 229 | if (walk.nbytes) { |
| 230 | u8 __aligned(8) tail[AES_BLOCK_SIZE]; | 230 | u8 __aligned(8) tail[AES_BLOCK_SIZE]; |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index fda9a8ca48be..fe8777b12f86 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
| @@ -306,6 +306,7 @@ struct kvm_vcpu_arch { | |||
| 306 | #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ | 306 | #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ |
| 307 | #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ | 307 | #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ |
| 308 | #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ | 308 | #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ |
| 309 | #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ | ||
| 309 | 310 | ||
| 310 | #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) | 311 | #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) |
| 311 | 312 | ||
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6171178075dc..a8f84812c6e8 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h | |||
| @@ -728,6 +728,17 @@ asm( | |||
| 728 | asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \ | 728 | asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \ |
| 729 | } while (0) | 729 | } while (0) |
| 730 | 730 | ||
| 731 | /* | ||
| 732 | * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the | ||
| 733 | * set mask are set. Other bits are left as-is. | ||
| 734 | */ | ||
| 735 | #define sysreg_clear_set(sysreg, clear, set) do { \ | ||
| 736 | u64 __scs_val = read_sysreg(sysreg); \ | ||
| 737 | u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \ | ||
| 738 | if (__scs_new != __scs_val) \ | ||
| 739 | write_sysreg(__scs_new, sysreg); \ | ||
| 740 | } while (0) | ||
| 741 | |||
| 731 | static inline void config_sctlr_el1(u32 clear, u32 set) | 742 | static inline void config_sctlr_el1(u32 clear, u32 set) |
| 732 | { | 743 | { |
| 733 | u32 val; | 744 | u32 val; |
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d2856b129097..f24892a40d2c 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c | |||
| @@ -937,7 +937,7 @@ static int __init parse_kpti(char *str) | |||
| 937 | __kpti_forced = enabled ? 1 : -1; | 937 | __kpti_forced = enabled ? 1 : -1; |
| 938 | return 0; | 938 | return 0; |
| 939 | } | 939 | } |
| 940 | __setup("kpti=", parse_kpti); | 940 | early_param("kpti", parse_kpti); |
| 941 | #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ | 941 | #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ |
| 942 | 942 | ||
| 943 | #ifdef CONFIG_ARM64_HW_AFDBM | 943 | #ifdef CONFIG_ARM64_HW_AFDBM |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index f3e2e3aec0b0..2faa9863d2e5 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
| @@ -179,7 +179,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
| 179 | * This is the secondary CPU boot entry. We're using this CPUs | 179 | * This is the secondary CPU boot entry. We're using this CPUs |
| 180 | * idle thread stack, but a set of temporary page tables. | 180 | * idle thread stack, but a set of temporary page tables. |
| 181 | */ | 181 | */ |
| 182 | asmlinkage void secondary_start_kernel(void) | 182 | asmlinkage notrace void secondary_start_kernel(void) |
| 183 | { | 183 | { |
| 184 | u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; | 184 | u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; |
| 185 | struct mm_struct *mm = &init_mm; | 185 | struct mm_struct *mm = &init_mm; |
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index dc6ecfa5a2d2..aac7808ce216 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c | |||
| @@ -5,13 +5,14 @@ | |||
| 5 | * Copyright 2018 Arm Limited | 5 | * Copyright 2018 Arm Limited |
| 6 | * Author: Dave Martin <Dave.Martin@arm.com> | 6 | * Author: Dave Martin <Dave.Martin@arm.com> |
| 7 | */ | 7 | */ |
| 8 | #include <linux/bottom_half.h> | 8 | #include <linux/irqflags.h> |
| 9 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
| 10 | #include <linux/thread_info.h> | 10 | #include <linux/thread_info.h> |
| 11 | #include <linux/kvm_host.h> | 11 | #include <linux/kvm_host.h> |
| 12 | #include <asm/kvm_asm.h> | 12 | #include <asm/kvm_asm.h> |
| 13 | #include <asm/kvm_host.h> | 13 | #include <asm/kvm_host.h> |
| 14 | #include <asm/kvm_mmu.h> | 14 | #include <asm/kvm_mmu.h> |
| 15 | #include <asm/sysreg.h> | ||
| 15 | 16 | ||
| 16 | /* | 17 | /* |
| 17 | * Called on entry to KVM_RUN unless this vcpu previously ran at least | 18 | * Called on entry to KVM_RUN unless this vcpu previously ran at least |
| @@ -61,10 +62,16 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) | |||
| 61 | { | 62 | { |
| 62 | BUG_ON(!current->mm); | 63 | BUG_ON(!current->mm); |
| 63 | 64 | ||
| 64 | vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_HOST_SVE_IN_USE); | 65 | vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | |
| 66 | KVM_ARM64_HOST_SVE_IN_USE | | ||
| 67 | KVM_ARM64_HOST_SVE_ENABLED); | ||
| 65 | vcpu->arch.flags |= KVM_ARM64_FP_HOST; | 68 | vcpu->arch.flags |= KVM_ARM64_FP_HOST; |
| 69 | |||
| 66 | if (test_thread_flag(TIF_SVE)) | 70 | if (test_thread_flag(TIF_SVE)) |
| 67 | vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE; | 71 | vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE; |
| 72 | |||
| 73 | if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) | ||
| 74 | vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; | ||
| 68 | } | 75 | } |
| 69 | 76 | ||
| 70 | /* | 77 | /* |
| @@ -92,19 +99,30 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) | |||
| 92 | */ | 99 | */ |
| 93 | void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) | 100 | void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) |
| 94 | { | 101 | { |
| 95 | local_bh_disable(); | 102 | unsigned long flags; |
| 96 | 103 | ||
| 97 | update_thread_flag(TIF_SVE, | 104 | local_irq_save(flags); |
| 98 | vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE); | ||
| 99 | 105 | ||
| 100 | if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { | 106 | if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { |
| 101 | /* Clean guest FP state to memory and invalidate cpu view */ | 107 | /* Clean guest FP state to memory and invalidate cpu view */ |
| 102 | fpsimd_save(); | 108 | fpsimd_save(); |
| 103 | fpsimd_flush_cpu_state(); | 109 | fpsimd_flush_cpu_state(); |
| 104 | } else if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { | 110 | } else if (system_supports_sve()) { |
| 105 | /* Ensure user trap controls are correctly restored */ | 111 | /* |
| 106 | fpsimd_bind_task_to_cpu(); | 112 | * The FPSIMD/SVE state in the CPU has not been touched, and we |
| 113 | * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been | ||
| 114 | * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE | ||
| 115 | * for EL0. To avoid spurious traps, restore the trap state | ||
| 116 | * seen by kvm_arch_vcpu_load_fp(): | ||
| 117 | */ | ||
| 118 | if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED) | ||
| 119 | sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN); | ||
| 120 | else | ||
| 121 | sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); | ||
| 107 | } | 122 | } |
| 108 | 123 | ||
| 109 | local_bh_enable(); | 124 | update_thread_flag(TIF_SVE, |
| 125 | vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE); | ||
| 126 | |||
| 127 | local_irq_restore(flags); | ||
| 110 | } | 128 | } |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 49e217ac7e1e..61e93f0b5482 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
| @@ -583,13 +583,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, | |||
| 583 | size >> PAGE_SHIFT); | 583 | size >> PAGE_SHIFT); |
| 584 | return NULL; | 584 | return NULL; |
| 585 | } | 585 | } |
| 586 | if (!coherent) | ||
| 587 | __dma_flush_area(page_to_virt(page), iosize); | ||
| 588 | |||
| 589 | addr = dma_common_contiguous_remap(page, size, VM_USERMAP, | 586 | addr = dma_common_contiguous_remap(page, size, VM_USERMAP, |
| 590 | prot, | 587 | prot, |
| 591 | __builtin_return_address(0)); | 588 | __builtin_return_address(0)); |
| 592 | if (!addr) { | 589 | if (addr) { |
| 590 | memset(addr, 0, size); | ||
| 591 | if (!coherent) | ||
| 592 | __dma_flush_area(page_to_virt(page), iosize); | ||
| 593 | } else { | ||
| 593 | iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs); | 594 | iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs); |
| 594 | dma_release_from_contiguous(dev, page, | 595 | dma_release_from_contiguous(dev, page, |
| 595 | size >> PAGE_SHIFT); | 596 | size >> PAGE_SHIFT); |
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 5f9a73a4452c..03646e6a2ef4 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S | |||
| @@ -217,8 +217,9 @@ ENDPROC(idmap_cpu_replace_ttbr1) | |||
| 217 | 217 | ||
| 218 | .macro __idmap_kpti_put_pgtable_ent_ng, type | 218 | .macro __idmap_kpti_put_pgtable_ent_ng, type |
| 219 | orr \type, \type, #PTE_NG // Same bit for blocks and pages | 219 | orr \type, \type, #PTE_NG // Same bit for blocks and pages |
| 220 | str \type, [cur_\()\type\()p] // Update the entry and ensure it | 220 | str \type, [cur_\()\type\()p] // Update the entry and ensure |
| 221 | dc civac, cur_\()\type\()p // is visible to all CPUs. | 221 | dmb sy // that it is visible to all |
| 222 | dc civac, cur_\()\type\()p // CPUs. | ||
| 222 | .endm | 223 | .endm |
| 223 | 224 | ||
| 224 | /* | 225 | /* |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 3f9deec70b92..08c10c518f83 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
| @@ -65,6 +65,7 @@ config MIPS | |||
| 65 | select HAVE_OPROFILE | 65 | select HAVE_OPROFILE |
| 66 | select HAVE_PERF_EVENTS | 66 | select HAVE_PERF_EVENTS |
| 67 | select HAVE_REGS_AND_STACK_ACCESS_API | 67 | select HAVE_REGS_AND_STACK_ACCESS_API |
| 68 | select HAVE_RSEQ | ||
| 68 | select HAVE_STACKPROTECTOR | 69 | select HAVE_STACKPROTECTOR |
| 69 | select HAVE_SYSCALL_TRACEPOINTS | 70 | select HAVE_SYSCALL_TRACEPOINTS |
| 70 | select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP | 71 | select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP |
diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c index 6b2c6f3baefa..75fb96ca61db 100644 --- a/arch/mips/ath79/mach-pb44.c +++ b/arch/mips/ath79/mach-pb44.c | |||
| @@ -34,7 +34,7 @@ | |||
| 34 | #define PB44_KEYS_DEBOUNCE_INTERVAL (3 * PB44_KEYS_POLL_INTERVAL) | 34 | #define PB44_KEYS_DEBOUNCE_INTERVAL (3 * PB44_KEYS_POLL_INTERVAL) |
| 35 | 35 | ||
| 36 | static struct gpiod_lookup_table pb44_i2c_gpiod_table = { | 36 | static struct gpiod_lookup_table pb44_i2c_gpiod_table = { |
| 37 | .dev_id = "i2c-gpio", | 37 | .dev_id = "i2c-gpio.0", |
| 38 | .table = { | 38 | .table = { |
| 39 | GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SDA, | 39 | GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SDA, |
| 40 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 40 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 6054d49e608e..8c9cbf13d32a 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c | |||
| @@ -212,6 +212,12 @@ static int __init bcm47xx_cpu_fixes(void) | |||
| 212 | */ | 212 | */ |
| 213 | if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706) | 213 | if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706) |
| 214 | cpu_wait = NULL; | 214 | cpu_wait = NULL; |
| 215 | |||
| 216 | /* | ||
| 217 | * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail" | ||
| 218 | * Enable ExternalSync for sync instruction to take effect | ||
| 219 | */ | ||
| 220 | set_c0_config7(MIPS_CONF7_ES); | ||
| 215 | break; | 221 | break; |
| 216 | #endif | 222 | #endif |
| 217 | } | 223 | } |
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index a7d0b836f2f7..cea8ad864b3f 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h | |||
| @@ -414,6 +414,8 @@ static inline type pfx##in##bwlq##p(unsigned long port) \ | |||
| 414 | __val = *__addr; \ | 414 | __val = *__addr; \ |
| 415 | slow; \ | 415 | slow; \ |
| 416 | \ | 416 | \ |
| 417 | /* prevent prefetching of coherent DMA data prematurely */ \ | ||
| 418 | rmb(); \ | ||
| 417 | return pfx##ioswab##bwlq(__addr, __val); \ | 419 | return pfx##ioswab##bwlq(__addr, __val); \ |
| 418 | } | 420 | } |
| 419 | 421 | ||
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index ae461d91cd1f..0bc270806ec5 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h | |||
| @@ -681,6 +681,8 @@ | |||
| 681 | #define MIPS_CONF7_WII (_ULCAST_(1) << 31) | 681 | #define MIPS_CONF7_WII (_ULCAST_(1) << 31) |
| 682 | 682 | ||
| 683 | #define MIPS_CONF7_RPS (_ULCAST_(1) << 2) | 683 | #define MIPS_CONF7_RPS (_ULCAST_(1) << 2) |
| 684 | /* ExternalSync */ | ||
| 685 | #define MIPS_CONF7_ES (_ULCAST_(1) << 8) | ||
| 684 | 686 | ||
| 685 | #define MIPS_CONF7_IAR (_ULCAST_(1) << 10) | 687 | #define MIPS_CONF7_IAR (_ULCAST_(1) << 10) |
| 686 | #define MIPS_CONF7_AR (_ULCAST_(1) << 16) | 688 | #define MIPS_CONF7_AR (_ULCAST_(1) << 16) |
| @@ -2765,6 +2767,7 @@ __BUILD_SET_C0(status) | |||
| 2765 | __BUILD_SET_C0(cause) | 2767 | __BUILD_SET_C0(cause) |
| 2766 | __BUILD_SET_C0(config) | 2768 | __BUILD_SET_C0(config) |
| 2767 | __BUILD_SET_C0(config5) | 2769 | __BUILD_SET_C0(config5) |
| 2770 | __BUILD_SET_C0(config7) | ||
| 2768 | __BUILD_SET_C0(intcontrol) | 2771 | __BUILD_SET_C0(intcontrol) |
| 2769 | __BUILD_SET_C0(intctl) | 2772 | __BUILD_SET_C0(intctl) |
| 2770 | __BUILD_SET_C0(srsmap) | 2773 | __BUILD_SET_C0(srsmap) |
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index bb05e9916a5f..f25dd1d83fb7 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h | |||
| @@ -388,17 +388,19 @@ | |||
| 388 | #define __NR_pkey_alloc (__NR_Linux + 364) | 388 | #define __NR_pkey_alloc (__NR_Linux + 364) |
| 389 | #define __NR_pkey_free (__NR_Linux + 365) | 389 | #define __NR_pkey_free (__NR_Linux + 365) |
| 390 | #define __NR_statx (__NR_Linux + 366) | 390 | #define __NR_statx (__NR_Linux + 366) |
| 391 | #define __NR_rseq (__NR_Linux + 367) | ||
| 392 | #define __NR_io_pgetevents (__NR_Linux + 368) | ||
| 391 | 393 | ||
| 392 | 394 | ||
| 393 | /* | 395 | /* |
| 394 | * Offset of the last Linux o32 flavoured syscall | 396 | * Offset of the last Linux o32 flavoured syscall |
| 395 | */ | 397 | */ |
| 396 | #define __NR_Linux_syscalls 366 | 398 | #define __NR_Linux_syscalls 368 |
| 397 | 399 | ||
| 398 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ | 400 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ |
| 399 | 401 | ||
| 400 | #define __NR_O32_Linux 4000 | 402 | #define __NR_O32_Linux 4000 |
| 401 | #define __NR_O32_Linux_syscalls 366 | 403 | #define __NR_O32_Linux_syscalls 368 |
| 402 | 404 | ||
| 403 | #if _MIPS_SIM == _MIPS_SIM_ABI64 | 405 | #if _MIPS_SIM == _MIPS_SIM_ABI64 |
| 404 | 406 | ||
| @@ -733,16 +735,18 @@ | |||
| 733 | #define __NR_pkey_alloc (__NR_Linux + 324) | 735 | #define __NR_pkey_alloc (__NR_Linux + 324) |
| 734 | #define __NR_pkey_free (__NR_Linux + 325) | 736 | #define __NR_pkey_free (__NR_Linux + 325) |
| 735 | #define __NR_statx (__NR_Linux + 326) | 737 | #define __NR_statx (__NR_Linux + 326) |
| 738 | #define __NR_rseq (__NR_Linux + 327) | ||
| 739 | #define __NR_io_pgetevents (__NR_Linux + 328) | ||
| 736 | 740 | ||
| 737 | /* | 741 | /* |
| 738 | * Offset of the last Linux 64-bit flavoured syscall | 742 | * Offset of the last Linux 64-bit flavoured syscall |
| 739 | */ | 743 | */ |
| 740 | #define __NR_Linux_syscalls 326 | 744 | #define __NR_Linux_syscalls 328 |
| 741 | 745 | ||
| 742 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ | 746 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ |
| 743 | 747 | ||
| 744 | #define __NR_64_Linux 5000 | 748 | #define __NR_64_Linux 5000 |
| 745 | #define __NR_64_Linux_syscalls 326 | 749 | #define __NR_64_Linux_syscalls 328 |
| 746 | 750 | ||
| 747 | #if _MIPS_SIM == _MIPS_SIM_NABI32 | 751 | #if _MIPS_SIM == _MIPS_SIM_NABI32 |
| 748 | 752 | ||
| @@ -1081,15 +1085,17 @@ | |||
| 1081 | #define __NR_pkey_alloc (__NR_Linux + 328) | 1085 | #define __NR_pkey_alloc (__NR_Linux + 328) |
| 1082 | #define __NR_pkey_free (__NR_Linux + 329) | 1086 | #define __NR_pkey_free (__NR_Linux + 329) |
| 1083 | #define __NR_statx (__NR_Linux + 330) | 1087 | #define __NR_statx (__NR_Linux + 330) |
| 1088 | #define __NR_rseq (__NR_Linux + 331) | ||
| 1089 | #define __NR_io_pgetevents (__NR_Linux + 332) | ||
| 1084 | 1090 | ||
| 1085 | /* | 1091 | /* |
| 1086 | * Offset of the last N32 flavoured syscall | 1092 | * Offset of the last N32 flavoured syscall |
| 1087 | */ | 1093 | */ |
| 1088 | #define __NR_Linux_syscalls 330 | 1094 | #define __NR_Linux_syscalls 332 |
| 1089 | 1095 | ||
| 1090 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ | 1096 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ |
| 1091 | 1097 | ||
| 1092 | #define __NR_N32_Linux 6000 | 1098 | #define __NR_N32_Linux 6000 |
| 1093 | #define __NR_N32_Linux_syscalls 330 | 1099 | #define __NR_N32_Linux_syscalls 332 |
| 1094 | 1100 | ||
| 1095 | #endif /* _UAPI_ASM_UNISTD_H */ | 1101 | #endif /* _UAPI_ASM_UNISTD_H */ |
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 38a302919e6b..d7de8adcfcc8 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
| @@ -79,6 +79,10 @@ FEXPORT(ret_from_fork) | |||
| 79 | jal schedule_tail # a0 = struct task_struct *prev | 79 | jal schedule_tail # a0 = struct task_struct *prev |
| 80 | 80 | ||
| 81 | FEXPORT(syscall_exit) | 81 | FEXPORT(syscall_exit) |
| 82 | #ifdef CONFIG_DEBUG_RSEQ | ||
| 83 | move a0, sp | ||
| 84 | jal rseq_syscall | ||
| 85 | #endif | ||
| 82 | local_irq_disable # make sure need_resched and | 86 | local_irq_disable # make sure need_resched and |
| 83 | # signals dont change between | 87 | # signals dont change between |
| 84 | # sampling and return | 88 | # sampling and return |
| @@ -141,6 +145,10 @@ work_notifysig: # deal with pending signals and | |||
| 141 | j resume_userspace_check | 145 | j resume_userspace_check |
| 142 | 146 | ||
| 143 | FEXPORT(syscall_exit_partial) | 147 | FEXPORT(syscall_exit_partial) |
| 148 | #ifdef CONFIG_DEBUG_RSEQ | ||
| 149 | move a0, sp | ||
| 150 | jal rseq_syscall | ||
| 151 | #endif | ||
| 144 | local_irq_disable # make sure need_resched doesn't | 152 | local_irq_disable # make sure need_resched doesn't |
| 145 | # change between and return | 153 | # change between and return |
| 146 | LONG_L a2, TI_FLAGS($28) # current->work | 154 | LONG_L a2, TI_FLAGS($28) # current->work |
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index f2ee7e1e3342..cff52b283e03 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S | |||
| @@ -119,10 +119,20 @@ NESTED(_mcount, PT_SIZE, ra) | |||
| 119 | EXPORT_SYMBOL(_mcount) | 119 | EXPORT_SYMBOL(_mcount) |
| 120 | PTR_LA t1, ftrace_stub | 120 | PTR_LA t1, ftrace_stub |
| 121 | PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */ | 121 | PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */ |
| 122 | bne t1, t2, static_trace | 122 | beq t1, t2, fgraph_trace |
| 123 | nop | 123 | nop |
| 124 | 124 | ||
| 125 | MCOUNT_SAVE_REGS | ||
| 126 | |||
| 127 | move a0, ra /* arg1: self return address */ | ||
| 128 | jalr t2 /* (1) call *ftrace_trace_function */ | ||
| 129 | move a1, AT /* arg2: parent's return address */ | ||
| 130 | |||
| 131 | MCOUNT_RESTORE_REGS | ||
| 132 | |||
| 133 | fgraph_trace: | ||
| 125 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 134 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 135 | PTR_LA t1, ftrace_stub | ||
| 126 | PTR_L t3, ftrace_graph_return | 136 | PTR_L t3, ftrace_graph_return |
| 127 | bne t1, t3, ftrace_graph_caller | 137 | bne t1, t3, ftrace_graph_caller |
| 128 | nop | 138 | nop |
| @@ -131,24 +141,11 @@ EXPORT_SYMBOL(_mcount) | |||
| 131 | bne t1, t3, ftrace_graph_caller | 141 | bne t1, t3, ftrace_graph_caller |
| 132 | nop | 142 | nop |
| 133 | #endif | 143 | #endif |
| 134 | b ftrace_stub | ||
| 135 | #ifdef CONFIG_32BIT | ||
| 136 | addiu sp, sp, 8 | ||
| 137 | #else | ||
| 138 | nop | ||
| 139 | #endif | ||
| 140 | 144 | ||
| 141 | static_trace: | ||
| 142 | MCOUNT_SAVE_REGS | ||
| 143 | |||
| 144 | move a0, ra /* arg1: self return address */ | ||
| 145 | jalr t2 /* (1) call *ftrace_trace_function */ | ||
| 146 | move a1, AT /* arg2: parent's return address */ | ||
| 147 | |||
| 148 | MCOUNT_RESTORE_REGS | ||
| 149 | #ifdef CONFIG_32BIT | 145 | #ifdef CONFIG_32BIT |
| 150 | addiu sp, sp, 8 | 146 | addiu sp, sp, 8 |
| 151 | #endif | 147 | #endif |
| 148 | |||
| 152 | .globl ftrace_stub | 149 | .globl ftrace_stub |
| 153 | ftrace_stub: | 150 | ftrace_stub: |
| 154 | RETURN_BACK | 151 | RETURN_BACK |
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index a9a7d78803cd..91d3c8c46097 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
| @@ -590,3 +590,5 @@ EXPORT(sys_call_table) | |||
| 590 | PTR sys_pkey_alloc | 590 | PTR sys_pkey_alloc |
| 591 | PTR sys_pkey_free /* 4365 */ | 591 | PTR sys_pkey_free /* 4365 */ |
| 592 | PTR sys_statx | 592 | PTR sys_statx |
| 593 | PTR sys_rseq | ||
| 594 | PTR sys_io_pgetevents | ||
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 65d5aeeb9bdb..358d9599983d 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
| @@ -439,4 +439,6 @@ EXPORT(sys_call_table) | |||
| 439 | PTR sys_pkey_alloc | 439 | PTR sys_pkey_alloc |
| 440 | PTR sys_pkey_free /* 5325 */ | 440 | PTR sys_pkey_free /* 5325 */ |
| 441 | PTR sys_statx | 441 | PTR sys_statx |
| 442 | PTR sys_rseq | ||
| 443 | PTR sys_io_pgetevents | ||
| 442 | .size sys_call_table,.-sys_call_table | 444 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index cbf190ef9e8a..c65eaacc1abf 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
| @@ -434,4 +434,6 @@ EXPORT(sysn32_call_table) | |||
| 434 | PTR sys_pkey_alloc | 434 | PTR sys_pkey_alloc |
| 435 | PTR sys_pkey_free | 435 | PTR sys_pkey_free |
| 436 | PTR sys_statx /* 6330 */ | 436 | PTR sys_statx /* 6330 */ |
| 437 | PTR sys_rseq | ||
| 438 | PTR compat_sys_io_pgetevents | ||
| 437 | .size sysn32_call_table,.-sysn32_call_table | 439 | .size sysn32_call_table,.-sysn32_call_table |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 9ebe3e2403b1..73913f072e39 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
| @@ -583,4 +583,6 @@ EXPORT(sys32_call_table) | |||
| 583 | PTR sys_pkey_alloc | 583 | PTR sys_pkey_alloc |
| 584 | PTR sys_pkey_free /* 4365 */ | 584 | PTR sys_pkey_free /* 4365 */ |
| 585 | PTR sys_statx | 585 | PTR sys_statx |
| 586 | PTR sys_rseq | ||
| 587 | PTR compat_sys_io_pgetevents | ||
| 586 | .size sys32_call_table,.-sys32_call_table | 588 | .size sys32_call_table,.-sys32_call_table |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 9e224469c788..00f2535d2226 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
| @@ -801,6 +801,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) | |||
| 801 | regs->regs[0] = 0; /* Don't deal with this again. */ | 801 | regs->regs[0] = 0; /* Don't deal with this again. */ |
| 802 | } | 802 | } |
| 803 | 803 | ||
| 804 | rseq_signal_deliver(regs); | ||
| 805 | |||
| 804 | if (sig_uses_siginfo(&ksig->ka, abi)) | 806 | if (sig_uses_siginfo(&ksig->ka, abi)) |
| 805 | ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn, | 807 | ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn, |
| 806 | ksig, regs, oldset); | 808 | ksig, regs, oldset); |
| @@ -868,6 +870,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, | |||
| 868 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 870 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
| 869 | clear_thread_flag(TIF_NOTIFY_RESUME); | 871 | clear_thread_flag(TIF_NOTIFY_RESUME); |
| 870 | tracehook_notify_resume(regs); | 872 | tracehook_notify_resume(regs); |
| 873 | rseq_handle_notify_resume(regs); | ||
| 871 | } | 874 | } |
| 872 | 875 | ||
| 873 | user_enter(); | 876 | user_enter(); |
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index bd06a3ccda31..2ea575cb3401 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile | |||
| @@ -244,6 +244,7 @@ cpu-as-$(CONFIG_4xx) += -Wa,-m405 | |||
| 244 | cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) | 244 | cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) |
| 245 | cpu-as-$(CONFIG_E200) += -Wa,-me200 | 245 | cpu-as-$(CONFIG_E200) += -Wa,-me200 |
| 246 | cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 | 246 | cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 |
| 247 | cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc) | ||
| 247 | 248 | ||
| 248 | KBUILD_AFLAGS += $(cpu-as-y) | 249 | KBUILD_AFLAGS += $(cpu-as-y) |
| 249 | KBUILD_CFLAGS += $(cpu-as-y) | 250 | KBUILD_CFLAGS += $(cpu-as-y) |
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h index 6a6673907e45..e4633803fe43 100644 --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h | |||
| @@ -108,6 +108,7 @@ static inline void pgtable_free(void *table, unsigned index_size) | |||
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | #define check_pgt_cache() do { } while (0) | 110 | #define check_pgt_cache() do { } while (0) |
| 111 | #define get_hugepd_cache_index(x) (x) | ||
| 111 | 112 | ||
| 112 | #ifdef CONFIG_SMP | 113 | #ifdef CONFIG_SMP |
| 113 | static inline void pgtable_free_tlb(struct mmu_gather *tlb, | 114 | static inline void pgtable_free_tlb(struct mmu_gather *tlb, |
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h index af5f2baac80f..a069dfcac9a9 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h | |||
| @@ -49,6 +49,27 @@ static inline int hugepd_ok(hugepd_t hpd) | |||
| 49 | } | 49 | } |
| 50 | #define is_hugepd(hpd) (hugepd_ok(hpd)) | 50 | #define is_hugepd(hpd) (hugepd_ok(hpd)) |
| 51 | 51 | ||
| 52 | /* | ||
| 53 | * 16M and 16G huge page directory tables are allocated from slab cache | ||
| 54 | * | ||
| 55 | */ | ||
| 56 | #define H_16M_CACHE_INDEX (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE - 24) | ||
| 57 | #define H_16G_CACHE_INDEX \ | ||
| 58 | (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + H_PUD_INDEX_SIZE - 34) | ||
| 59 | |||
| 60 | static inline int get_hugepd_cache_index(int index) | ||
| 61 | { | ||
| 62 | switch (index) { | ||
| 63 | case H_16M_CACHE_INDEX: | ||
| 64 | return HTLB_16M_INDEX; | ||
| 65 | case H_16G_CACHE_INDEX: | ||
| 66 | return HTLB_16G_INDEX; | ||
| 67 | default: | ||
| 68 | BUG(); | ||
| 69 | } | ||
| 70 | /* should not reach */ | ||
| 71 | } | ||
| 72 | |||
| 52 | #else /* !CONFIG_HUGETLB_PAGE */ | 73 | #else /* !CONFIG_HUGETLB_PAGE */ |
| 53 | static inline int pmd_huge(pmd_t pmd) { return 0; } | 74 | static inline int pmd_huge(pmd_t pmd) { return 0; } |
| 54 | static inline int pud_huge(pud_t pud) { return 0; } | 75 | static inline int pud_huge(pud_t pud) { return 0; } |
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h index fb4b3ba52339..d7ee249d6890 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h | |||
| @@ -45,8 +45,17 @@ static inline int hugepd_ok(hugepd_t hpd) | |||
| 45 | { | 45 | { |
| 46 | return 0; | 46 | return 0; |
| 47 | } | 47 | } |
| 48 | |||
| 48 | #define is_hugepd(pdep) 0 | 49 | #define is_hugepd(pdep) 0 |
| 49 | 50 | ||
| 51 | /* | ||
| 52 | * This should never get called | ||
| 53 | */ | ||
| 54 | static inline int get_hugepd_cache_index(int index) | ||
| 55 | { | ||
| 56 | BUG(); | ||
| 57 | } | ||
| 58 | |||
| 50 | #else /* !CONFIG_HUGETLB_PAGE */ | 59 | #else /* !CONFIG_HUGETLB_PAGE */ |
| 51 | static inline int pmd_huge(pmd_t pmd) { return 0; } | 60 | static inline int pmd_huge(pmd_t pmd) { return 0; } |
| 52 | static inline int pud_huge(pud_t pud) { return 0; } | 61 | static inline int pud_huge(pud_t pud) { return 0; } |
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 63cee159022b..42aafba7a308 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h | |||
| @@ -287,6 +287,11 @@ enum pgtable_index { | |||
| 287 | PMD_INDEX, | 287 | PMD_INDEX, |
| 288 | PUD_INDEX, | 288 | PUD_INDEX, |
| 289 | PGD_INDEX, | 289 | PGD_INDEX, |
| 290 | /* | ||
| 291 | * Below are used with 4k page size and hugetlb | ||
| 292 | */ | ||
| 293 | HTLB_16M_INDEX, | ||
| 294 | HTLB_16G_INDEX, | ||
| 290 | }; | 295 | }; |
| 291 | 296 | ||
| 292 | extern unsigned long __vmalloc_start; | 297 | extern unsigned long __vmalloc_start; |
diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h index 0f571e0ebca1..bd9ba8defd72 100644 --- a/arch/powerpc/include/asm/nmi.h +++ b/arch/powerpc/include/asm/nmi.h | |||
| @@ -8,7 +8,7 @@ extern void arch_touch_nmi_watchdog(void); | |||
| 8 | static inline void arch_touch_nmi_watchdog(void) {} | 8 | static inline void arch_touch_nmi_watchdog(void) {} |
| 9 | #endif | 9 | #endif |
| 10 | 10 | ||
| 11 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_STACKTRACE) | 11 | #if defined(CONFIG_NMI_IPI) && defined(CONFIG_STACKTRACE) |
| 12 | extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, | 12 | extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, |
| 13 | bool exclude_self); | 13 | bool exclude_self); |
| 14 | #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace | 14 | #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace |
diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h index 1707781d2f20..9de40eb614da 100644 --- a/arch/powerpc/include/asm/nohash/32/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h | |||
| @@ -109,6 +109,7 @@ static inline void pgtable_free(void *table, unsigned index_size) | |||
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | #define check_pgt_cache() do { } while (0) | 111 | #define check_pgt_cache() do { } while (0) |
| 112 | #define get_hugepd_cache_index(x) (x) | ||
| 112 | 113 | ||
| 113 | #ifdef CONFIG_SMP | 114 | #ifdef CONFIG_SMP |
| 114 | static inline void pgtable_free_tlb(struct mmu_gather *tlb, | 115 | static inline void pgtable_free_tlb(struct mmu_gather *tlb, |
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h index 0e693f322cb2..e2d62d033708 100644 --- a/arch/powerpc/include/asm/nohash/64/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h | |||
| @@ -141,6 +141,7 @@ static inline void pgtable_free(void *table, int shift) | |||
| 141 | } | 141 | } |
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | #define get_hugepd_cache_index(x) (x) | ||
| 144 | #ifdef CONFIG_SMP | 145 | #ifdef CONFIG_SMP |
| 145 | static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) | 146 | static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) |
| 146 | { | 147 | { |
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 4be1c0de9406..96dd3d871986 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c | |||
| @@ -711,7 +711,8 @@ static __init void cpufeatures_cpu_quirks(void) | |||
| 711 | cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST; | 711 | cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST; |
| 712 | cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG; | 712 | cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG; |
| 713 | cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1; | 713 | cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1; |
| 714 | } else /* DD2.1 and up have DD2_1 */ | 714 | } else if ((version & 0xffff0000) == 0x004e0000) |
| 715 | /* DD2.1 and up have DD2_1 */ | ||
| 715 | cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1; | 716 | cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1; |
| 716 | 717 | ||
| 717 | if ((version & 0xffff0000) == 0x004e0000) { | 718 | if ((version & 0xffff0000) == 0x004e0000) { |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 62b1a40d8957..40b44bb53a4e 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
| @@ -701,11 +701,18 @@ static int ppc_panic_event(struct notifier_block *this, | |||
| 701 | unsigned long event, void *ptr) | 701 | unsigned long event, void *ptr) |
| 702 | { | 702 | { |
| 703 | /* | 703 | /* |
| 704 | * panic does a local_irq_disable, but we really | ||
| 705 | * want interrupts to be hard disabled. | ||
| 706 | */ | ||
| 707 | hard_irq_disable(); | ||
| 708 | |||
| 709 | /* | ||
| 704 | * If firmware-assisted dump has been registered then trigger | 710 | * If firmware-assisted dump has been registered then trigger |
| 705 | * firmware-assisted dump and let firmware handle everything else. | 711 | * firmware-assisted dump and let firmware handle everything else. |
| 706 | */ | 712 | */ |
| 707 | crash_fadump(NULL, ptr); | 713 | crash_fadump(NULL, ptr); |
| 708 | ppc_md.panic(ptr); /* May not return */ | 714 | if (ppc_md.panic) |
| 715 | ppc_md.panic(ptr); /* May not return */ | ||
| 709 | return NOTIFY_DONE; | 716 | return NOTIFY_DONE; |
| 710 | } | 717 | } |
| 711 | 718 | ||
| @@ -716,7 +723,8 @@ static struct notifier_block ppc_panic_block = { | |||
| 716 | 723 | ||
| 717 | void __init setup_panic(void) | 724 | void __init setup_panic(void) |
| 718 | { | 725 | { |
| 719 | if (!ppc_md.panic) | 726 | /* PPC64 always does a hard irq disable in its panic handler */ |
| 727 | if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic) | ||
| 720 | return; | 728 | return; |
| 721 | atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block); | 729 | atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block); |
| 722 | } | 730 | } |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 7a7ce8ad455e..225bc5f91049 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
| @@ -387,6 +387,14 @@ void early_setup_secondary(void) | |||
| 387 | 387 | ||
| 388 | #endif /* CONFIG_SMP */ | 388 | #endif /* CONFIG_SMP */ |
| 389 | 389 | ||
| 390 | void panic_smp_self_stop(void) | ||
| 391 | { | ||
| 392 | hard_irq_disable(); | ||
| 393 | spin_begin(); | ||
| 394 | while (1) | ||
| 395 | spin_cpu_relax(); | ||
| 396 | } | ||
| 397 | |||
| 390 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE) | 398 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE) |
| 391 | static bool use_spinloop(void) | 399 | static bool use_spinloop(void) |
| 392 | { | 400 | { |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 5eadfffabe35..4794d6b4f4d2 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
| @@ -600,9 +600,6 @@ static void nmi_stop_this_cpu(struct pt_regs *regs) | |||
| 600 | nmi_ipi_busy_count--; | 600 | nmi_ipi_busy_count--; |
| 601 | nmi_ipi_unlock(); | 601 | nmi_ipi_unlock(); |
| 602 | 602 | ||
| 603 | /* Remove this CPU */ | ||
| 604 | set_cpu_online(smp_processor_id(), false); | ||
| 605 | |||
| 606 | spin_begin(); | 603 | spin_begin(); |
| 607 | while (1) | 604 | while (1) |
| 608 | spin_cpu_relax(); | 605 | spin_cpu_relax(); |
| @@ -617,9 +614,6 @@ void smp_send_stop(void) | |||
| 617 | 614 | ||
| 618 | static void stop_this_cpu(void *dummy) | 615 | static void stop_this_cpu(void *dummy) |
| 619 | { | 616 | { |
| 620 | /* Remove this CPU */ | ||
| 621 | set_cpu_online(smp_processor_id(), false); | ||
| 622 | |||
| 623 | hard_irq_disable(); | 617 | hard_irq_disable(); |
| 624 | spin_begin(); | 618 | spin_begin(); |
| 625 | while (1) | 619 | while (1) |
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index 07e97f289c52..e2c50b55138f 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c | |||
| @@ -196,7 +196,7 @@ save_stack_trace_tsk_reliable(struct task_struct *tsk, | |||
| 196 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk_reliable); | 196 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk_reliable); |
| 197 | #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */ | 197 | #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */ |
| 198 | 198 | ||
| 199 | #ifdef CONFIG_PPC_BOOK3S_64 | 199 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) |
| 200 | static void handle_backtrace_ipi(struct pt_regs *regs) | 200 | static void handle_backtrace_ipi(struct pt_regs *regs) |
| 201 | { | 201 | { |
| 202 | nmi_cpu_backtrace(regs); | 202 | nmi_cpu_backtrace(regs); |
| @@ -242,4 +242,4 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) | |||
| 242 | { | 242 | { |
| 243 | nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi); | 243 | nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi); |
| 244 | } | 244 | } |
| 245 | #endif /* CONFIG_PPC64 */ | 245 | #endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */ |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 7c5f479c5c00..8a9a49c13865 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
| @@ -337,7 +337,8 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif | |||
| 337 | if (shift >= pdshift) | 337 | if (shift >= pdshift) |
| 338 | hugepd_free(tlb, hugepte); | 338 | hugepd_free(tlb, hugepte); |
| 339 | else | 339 | else |
| 340 | pgtable_free_tlb(tlb, hugepte, pdshift - shift); | 340 | pgtable_free_tlb(tlb, hugepte, |
| 341 | get_hugepd_cache_index(pdshift - shift)); | ||
| 341 | } | 342 | } |
| 342 | 343 | ||
| 343 | static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, | 344 | static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, |
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index c1f4ca45c93a..4afbfbb64bfd 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c | |||
| @@ -409,6 +409,18 @@ static inline void pgtable_free(void *table, int index) | |||
| 409 | case PUD_INDEX: | 409 | case PUD_INDEX: |
| 410 | kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table); | 410 | kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table); |
| 411 | break; | 411 | break; |
| 412 | #if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE) | ||
| 413 | /* 16M hugepd directory at pud level */ | ||
| 414 | case HTLB_16M_INDEX: | ||
| 415 | BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0); | ||
| 416 | kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table); | ||
| 417 | break; | ||
| 418 | /* 16G hugepd directory at the pgd level */ | ||
| 419 | case HTLB_16G_INDEX: | ||
| 420 | BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0); | ||
| 421 | kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table); | ||
| 422 | break; | ||
| 423 | #endif | ||
| 412 | /* We don't free pgd table via RCU callback */ | 424 | /* We don't free pgd table via RCU callback */ |
| 413 | default: | 425 | default: |
| 414 | BUG(); | 426 | BUG(); |
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 67a6e86d3e7e..1135b43a597c 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c | |||
| @@ -689,22 +689,17 @@ EXPORT_SYMBOL(radix__flush_tlb_kernel_range); | |||
| 689 | static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; | 689 | static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; |
| 690 | static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2; | 690 | static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2; |
| 691 | 691 | ||
| 692 | void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 692 | static inline void __radix__flush_tlb_range(struct mm_struct *mm, |
| 693 | unsigned long end) | 693 | unsigned long start, unsigned long end, |
| 694 | bool flush_all_sizes) | ||
| 694 | 695 | ||
| 695 | { | 696 | { |
| 696 | struct mm_struct *mm = vma->vm_mm; | ||
| 697 | unsigned long pid; | 697 | unsigned long pid; |
| 698 | unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift; | 698 | unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift; |
| 699 | unsigned long page_size = 1UL << page_shift; | 699 | unsigned long page_size = 1UL << page_shift; |
| 700 | unsigned long nr_pages = (end - start) >> page_shift; | 700 | unsigned long nr_pages = (end - start) >> page_shift; |
| 701 | bool local, full; | 701 | bool local, full; |
| 702 | 702 | ||
| 703 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 704 | if (is_vm_hugetlb_page(vma)) | ||
| 705 | return radix__flush_hugetlb_tlb_range(vma, start, end); | ||
| 706 | #endif | ||
| 707 | |||
| 708 | pid = mm->context.id; | 703 | pid = mm->context.id; |
| 709 | if (unlikely(pid == MMU_NO_CONTEXT)) | 704 | if (unlikely(pid == MMU_NO_CONTEXT)) |
| 710 | return; | 705 | return; |
| @@ -738,37 +733,64 @@ is_local: | |||
| 738 | _tlbie_pid(pid, RIC_FLUSH_TLB); | 733 | _tlbie_pid(pid, RIC_FLUSH_TLB); |
| 739 | } | 734 | } |
| 740 | } else { | 735 | } else { |
| 741 | bool hflush = false; | 736 | bool hflush = flush_all_sizes; |
| 737 | bool gflush = flush_all_sizes; | ||
| 742 | unsigned long hstart, hend; | 738 | unsigned long hstart, hend; |
| 739 | unsigned long gstart, gend; | ||
| 743 | 740 | ||
| 744 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 741 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) |
| 745 | hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT; | ||
| 746 | hend = end >> HPAGE_PMD_SHIFT; | ||
| 747 | if (hstart < hend) { | ||
| 748 | hstart <<= HPAGE_PMD_SHIFT; | ||
| 749 | hend <<= HPAGE_PMD_SHIFT; | ||
| 750 | hflush = true; | 742 | hflush = true; |
| 743 | |||
| 744 | if (hflush) { | ||
| 745 | hstart = (start + PMD_SIZE - 1) & PMD_MASK; | ||
| 746 | hend = end & PMD_MASK; | ||
| 747 | if (hstart == hend) | ||
| 748 | hflush = false; | ||
| 749 | } | ||
| 750 | |||
| 751 | if (gflush) { | ||
| 752 | gstart = (start + PUD_SIZE - 1) & PUD_MASK; | ||
| 753 | gend = end & PUD_MASK; | ||
| 754 | if (gstart == gend) | ||
| 755 | gflush = false; | ||
| 751 | } | 756 | } |
| 752 | #endif | ||
| 753 | 757 | ||
| 754 | asm volatile("ptesync": : :"memory"); | 758 | asm volatile("ptesync": : :"memory"); |
| 755 | if (local) { | 759 | if (local) { |
| 756 | __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize); | 760 | __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize); |
| 757 | if (hflush) | 761 | if (hflush) |
| 758 | __tlbiel_va_range(hstart, hend, pid, | 762 | __tlbiel_va_range(hstart, hend, pid, |
| 759 | HPAGE_PMD_SIZE, MMU_PAGE_2M); | 763 | PMD_SIZE, MMU_PAGE_2M); |
| 764 | if (gflush) | ||
| 765 | __tlbiel_va_range(gstart, gend, pid, | ||
| 766 | PUD_SIZE, MMU_PAGE_1G); | ||
| 760 | asm volatile("ptesync": : :"memory"); | 767 | asm volatile("ptesync": : :"memory"); |
| 761 | } else { | 768 | } else { |
| 762 | __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize); | 769 | __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize); |
| 763 | if (hflush) | 770 | if (hflush) |
| 764 | __tlbie_va_range(hstart, hend, pid, | 771 | __tlbie_va_range(hstart, hend, pid, |
| 765 | HPAGE_PMD_SIZE, MMU_PAGE_2M); | 772 | PMD_SIZE, MMU_PAGE_2M); |
| 773 | if (gflush) | ||
| 774 | __tlbie_va_range(gstart, gend, pid, | ||
| 775 | PUD_SIZE, MMU_PAGE_1G); | ||
| 766 | fixup_tlbie(); | 776 | fixup_tlbie(); |
| 767 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); | 777 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
| 768 | } | 778 | } |
| 769 | } | 779 | } |
| 770 | preempt_enable(); | 780 | preempt_enable(); |
| 771 | } | 781 | } |
| 782 | |||
| 783 | void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
| 784 | unsigned long end) | ||
| 785 | |||
| 786 | { | ||
| 787 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 788 | if (is_vm_hugetlb_page(vma)) | ||
| 789 | return radix__flush_hugetlb_tlb_range(vma, start, end); | ||
| 790 | #endif | ||
| 791 | |||
| 792 | __radix__flush_tlb_range(vma->vm_mm, start, end, false); | ||
| 793 | } | ||
| 772 | EXPORT_SYMBOL(radix__flush_tlb_range); | 794 | EXPORT_SYMBOL(radix__flush_tlb_range); |
| 773 | 795 | ||
| 774 | static int radix_get_mmu_psize(int page_size) | 796 | static int radix_get_mmu_psize(int page_size) |
| @@ -837,6 +859,8 @@ void radix__tlb_flush(struct mmu_gather *tlb) | |||
| 837 | int psize = 0; | 859 | int psize = 0; |
| 838 | struct mm_struct *mm = tlb->mm; | 860 | struct mm_struct *mm = tlb->mm; |
| 839 | int page_size = tlb->page_size; | 861 | int page_size = tlb->page_size; |
| 862 | unsigned long start = tlb->start; | ||
| 863 | unsigned long end = tlb->end; | ||
| 840 | 864 | ||
| 841 | /* | 865 | /* |
| 842 | * if page size is not something we understand, do a full mm flush | 866 | * if page size is not something we understand, do a full mm flush |
| @@ -847,15 +871,45 @@ void radix__tlb_flush(struct mmu_gather *tlb) | |||
| 847 | */ | 871 | */ |
| 848 | if (tlb->fullmm) { | 872 | if (tlb->fullmm) { |
| 849 | __flush_all_mm(mm, true); | 873 | __flush_all_mm(mm, true); |
| 874 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) | ||
| 875 | } else if (mm_tlb_flush_nested(mm)) { | ||
| 876 | /* | ||
| 877 | * If there is a concurrent invalidation that is clearing ptes, | ||
| 878 | * then it's possible this invalidation will miss one of those | ||
| 879 | * cleared ptes and miss flushing the TLB. If this invalidate | ||
| 880 | * returns before the other one flushes TLBs, that can result | ||
| 881 | * in it returning while there are still valid TLBs inside the | ||
| 882 | * range to be invalidated. | ||
| 883 | * | ||
| 884 | * See mm/memory.c:tlb_finish_mmu() for more details. | ||
| 885 | * | ||
| 886 | * The solution to this is ensure the entire range is always | ||
| 887 | * flushed here. The problem for powerpc is that the flushes | ||
| 888 | * are page size specific, so this "forced flush" would not | ||
| 889 | * do the right thing if there are a mix of page sizes in | ||
| 890 | * the range to be invalidated. So use __flush_tlb_range | ||
| 891 | * which invalidates all possible page sizes in the range. | ||
| 892 | * | ||
| 893 | * PWC flush probably is not be required because the core code | ||
| 894 | * shouldn't free page tables in this path, but accounting | ||
| 895 | * for the possibility makes us a bit more robust. | ||
| 896 | * | ||
| 897 | * need_flush_all is an uncommon case because page table | ||
| 898 | * teardown should be done with exclusive locks held (but | ||
| 899 | * after locks are dropped another invalidate could come | ||
| 900 | * in), it could be optimized further if necessary. | ||
| 901 | */ | ||
| 902 | if (!tlb->need_flush_all) | ||
| 903 | __radix__flush_tlb_range(mm, start, end, true); | ||
| 904 | else | ||
| 905 | radix__flush_all_mm(mm); | ||
| 906 | #endif | ||
| 850 | } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) { | 907 | } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) { |
| 851 | if (!tlb->need_flush_all) | 908 | if (!tlb->need_flush_all) |
| 852 | radix__flush_tlb_mm(mm); | 909 | radix__flush_tlb_mm(mm); |
| 853 | else | 910 | else |
| 854 | radix__flush_all_mm(mm); | 911 | radix__flush_all_mm(mm); |
| 855 | } else { | 912 | } else { |
| 856 | unsigned long start = tlb->start; | ||
| 857 | unsigned long end = tlb->end; | ||
| 858 | |||
| 859 | if (!tlb->need_flush_all) | 913 | if (!tlb->need_flush_all) |
| 860 | radix__flush_tlb_range_psize(mm, start, end, psize); | 914 | radix__flush_tlb_range_psize(mm, start, end, psize); |
| 861 | else | 915 | else |
| @@ -1043,6 +1097,8 @@ extern void radix_kvm_prefetch_workaround(struct mm_struct *mm) | |||
| 1043 | for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) { | 1097 | for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) { |
| 1044 | if (sib == cpu) | 1098 | if (sib == cpu) |
| 1045 | continue; | 1099 | continue; |
| 1100 | if (!cpu_possible(sib)) | ||
| 1101 | continue; | ||
| 1046 | if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu) | 1102 | if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu) |
| 1047 | flush = true; | 1103 | flush = true; |
| 1048 | } | 1104 | } |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index f0a6ea22429d..a08e82856563 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
| @@ -258,11 +258,6 @@ archscripts: scripts_basic | |||
| 258 | archheaders: | 258 | archheaders: |
| 259 | $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all | 259 | $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all |
| 260 | 260 | ||
| 261 | archprepare: | ||
| 262 | ifeq ($(CONFIG_KEXEC_FILE),y) | ||
| 263 | $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c | ||
| 264 | endif | ||
| 265 | |||
| 266 | ### | 261 | ### |
| 267 | # Kernel objects | 262 | # Kernel objects |
| 268 | 263 | ||
| @@ -327,7 +322,6 @@ archclean: | |||
| 327 | $(Q)rm -rf $(objtree)/arch/x86_64 | 322 | $(Q)rm -rf $(objtree)/arch/x86_64 |
| 328 | $(Q)$(MAKE) $(clean)=$(boot) | 323 | $(Q)$(MAKE) $(clean)=$(boot) |
| 329 | $(Q)$(MAKE) $(clean)=arch/x86/tools | 324 | $(Q)$(MAKE) $(clean)=arch/x86/tools |
| 330 | $(Q)$(MAKE) $(clean)=arch/x86/purgatory | ||
| 331 | 325 | ||
| 332 | define archhelp | 326 | define archhelp |
| 333 | echo '* bzImage - Compressed kernel image (arch/x86/boot/bzImage)' | 327 | echo '* bzImage - Compressed kernel image (arch/x86/boot/bzImage)' |
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 042b5e892ed1..14de0432d288 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h | |||
| @@ -38,7 +38,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, | |||
| 38 | { | 38 | { |
| 39 | unsigned long mask; | 39 | unsigned long mask; |
| 40 | 40 | ||
| 41 | asm ("cmp %1,%2; sbb %0,%0;" | 41 | asm volatile ("cmp %1,%2; sbb %0,%0;" |
| 42 | :"=r" (mask) | 42 | :"=r" (mask) |
| 43 | :"g"(size),"r" (index) | 43 | :"g"(size),"r" (index) |
| 44 | :"cc"); | 44 | :"cc"); |
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 425e6b8b9547..6aa8499e1f62 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h | |||
| @@ -114,6 +114,7 @@ | |||
| 114 | #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f | 114 | #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f |
| 115 | #define VMX_MISC_SAVE_EFER_LMA 0x00000020 | 115 | #define VMX_MISC_SAVE_EFER_LMA 0x00000020 |
| 116 | #define VMX_MISC_ACTIVITY_HLT 0x00000040 | 116 | #define VMX_MISC_ACTIVITY_HLT 0x00000040 |
| 117 | #define VMX_MISC_ZERO_LEN_INS 0x40000000 | ||
| 117 | 118 | ||
| 118 | /* VMFUNC functions */ | 119 | /* VMFUNC functions */ |
| 119 | #define VMX_VMFUNC_EPTP_SWITCHING 0x00000001 | 120 | #define VMX_VMFUNC_EPTP_SWITCHING 0x00000001 |
| @@ -351,11 +352,13 @@ enum vmcs_field { | |||
| 351 | #define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK | 352 | #define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK |
| 352 | 353 | ||
| 353 | #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ | 354 | #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ |
| 355 | #define INTR_TYPE_RESERVED (1 << 8) /* reserved */ | ||
| 354 | #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ | 356 | #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ |
| 355 | #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ | 357 | #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ |
| 356 | #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ | 358 | #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ |
| 357 | #define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */ | 359 | #define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */ |
| 358 | #define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ | 360 | #define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ |
| 361 | #define INTR_TYPE_OTHER_EVENT (7 << 8) /* other event */ | ||
| 359 | 362 | ||
| 360 | /* GUEST_INTERRUPTIBILITY_INFO flags. */ | 363 | /* GUEST_INTERRUPTIBILITY_INFO flags. */ |
| 361 | #define GUEST_INTR_STATE_STI 0x00000001 | 364 | #define GUEST_INTR_STATE_STI 0x00000001 |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index efaf2d4f9c3c..d492752f79e1 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
| 27 | #include <linux/crash_dump.h> | 27 | #include <linux/crash_dump.h> |
| 28 | #include <linux/reboot.h> | 28 | #include <linux/reboot.h> |
| 29 | #include <linux/memory.h> | ||
| 29 | 30 | ||
| 30 | #include <asm/uv/uv_mmrs.h> | 31 | #include <asm/uv/uv_mmrs.h> |
| 31 | #include <asm/uv/uv_hub.h> | 32 | #include <asm/uv/uv_hub.h> |
| @@ -392,6 +393,51 @@ extern int uv_hub_info_version(void) | |||
| 392 | } | 393 | } |
| 393 | EXPORT_SYMBOL(uv_hub_info_version); | 394 | EXPORT_SYMBOL(uv_hub_info_version); |
| 394 | 395 | ||
| 396 | /* Default UV memory block size is 2GB */ | ||
| 397 | static unsigned long mem_block_size = (2UL << 30); | ||
| 398 | |||
| 399 | /* Kernel parameter to specify UV mem block size */ | ||
| 400 | static int parse_mem_block_size(char *ptr) | ||
| 401 | { | ||
| 402 | unsigned long size = memparse(ptr, NULL); | ||
| 403 | |||
| 404 | /* Size will be rounded down by set_block_size() below */ | ||
| 405 | mem_block_size = size; | ||
| 406 | return 0; | ||
| 407 | } | ||
| 408 | early_param("uv_memblksize", parse_mem_block_size); | ||
| 409 | |||
| 410 | static __init int adj_blksize(u32 lgre) | ||
| 411 | { | ||
| 412 | unsigned long base = (unsigned long)lgre << UV_GAM_RANGE_SHFT; | ||
| 413 | unsigned long size; | ||
| 414 | |||
| 415 | for (size = mem_block_size; size > MIN_MEMORY_BLOCK_SIZE; size >>= 1) | ||
| 416 | if (IS_ALIGNED(base, size)) | ||
| 417 | break; | ||
| 418 | |||
| 419 | if (size >= mem_block_size) | ||
| 420 | return 0; | ||
| 421 | |||
| 422 | mem_block_size = size; | ||
| 423 | return 1; | ||
| 424 | } | ||
| 425 | |||
| 426 | static __init void set_block_size(void) | ||
| 427 | { | ||
| 428 | unsigned int order = ffs(mem_block_size); | ||
| 429 | |||
| 430 | if (order) { | ||
| 431 | /* adjust for ffs return of 1..64 */ | ||
| 432 | set_memory_block_size_order(order - 1); | ||
| 433 | pr_info("UV: mem_block_size set to 0x%lx\n", mem_block_size); | ||
| 434 | } else { | ||
| 435 | /* bad or zero value, default to 1UL << 31 (2GB) */ | ||
| 436 | pr_err("UV: mem_block_size error with 0x%lx\n", mem_block_size); | ||
| 437 | set_memory_block_size_order(31); | ||
| 438 | } | ||
| 439 | } | ||
| 440 | |||
| 395 | /* Build GAM range lookup table: */ | 441 | /* Build GAM range lookup table: */ |
| 396 | static __init void build_uv_gr_table(void) | 442 | static __init void build_uv_gr_table(void) |
| 397 | { | 443 | { |
| @@ -1180,23 +1226,30 @@ static void __init decode_gam_rng_tbl(unsigned long ptr) | |||
| 1180 | << UV_GAM_RANGE_SHFT); | 1226 | << UV_GAM_RANGE_SHFT); |
| 1181 | int order = 0; | 1227 | int order = 0; |
| 1182 | char suffix[] = " KMGTPE"; | 1228 | char suffix[] = " KMGTPE"; |
| 1229 | int flag = ' '; | ||
| 1183 | 1230 | ||
| 1184 | while (size > 9999 && order < sizeof(suffix)) { | 1231 | while (size > 9999 && order < sizeof(suffix)) { |
| 1185 | size /= 1024; | 1232 | size /= 1024; |
| 1186 | order++; | 1233 | order++; |
| 1187 | } | 1234 | } |
| 1188 | 1235 | ||
| 1236 | /* adjust max block size to current range start */ | ||
| 1237 | if (gre->type == 1 || gre->type == 2) | ||
| 1238 | if (adj_blksize(lgre)) | ||
| 1239 | flag = '*'; | ||
| 1240 | |||
| 1189 | if (!index) { | 1241 | if (!index) { |
| 1190 | pr_info("UV: GAM Range Table...\n"); | 1242 | pr_info("UV: GAM Range Table...\n"); |
| 1191 | pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN"); | 1243 | pr_info("UV: # %20s %14s %6s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN"); |
| 1192 | } | 1244 | } |
| 1193 | pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n", | 1245 | pr_info("UV: %2d: 0x%014lx-0x%014lx%c %5lu%c %3d %04x %02x %02x\n", |
| 1194 | index++, | 1246 | index++, |
| 1195 | (unsigned long)lgre << UV_GAM_RANGE_SHFT, | 1247 | (unsigned long)lgre << UV_GAM_RANGE_SHFT, |
| 1196 | (unsigned long)gre->limit << UV_GAM_RANGE_SHFT, | 1248 | (unsigned long)gre->limit << UV_GAM_RANGE_SHFT, |
| 1197 | size, suffix[order], | 1249 | flag, size, suffix[order], |
| 1198 | gre->type, gre->nasid, gre->sockid, gre->pnode); | 1250 | gre->type, gre->nasid, gre->sockid, gre->pnode); |
| 1199 | 1251 | ||
| 1252 | /* update to next range start */ | ||
| 1200 | lgre = gre->limit; | 1253 | lgre = gre->limit; |
| 1201 | if (sock_min > gre->sockid) | 1254 | if (sock_min > gre->sockid) |
| 1202 | sock_min = gre->sockid; | 1255 | sock_min = gre->sockid; |
| @@ -1427,6 +1480,7 @@ static void __init uv_system_init_hub(void) | |||
| 1427 | 1480 | ||
| 1428 | build_socket_tables(); | 1481 | build_socket_tables(); |
| 1429 | build_uv_gr_table(); | 1482 | build_uv_gr_table(); |
| 1483 | set_block_size(); | ||
| 1430 | uv_init_hub_info(&hub_info); | 1484 | uv_init_hub_info(&hub_info); |
| 1431 | uv_possible_blades = num_possible_nodes(); | 1485 | uv_possible_blades = num_possible_nodes(); |
| 1432 | if (!_node_to_pnode) | 1486 | if (!_node_to_pnode) |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index cd0fda1fff6d..404df26b7de8 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <asm/pgtable.h> | 27 | #include <asm/pgtable.h> |
| 28 | #include <asm/set_memory.h> | 28 | #include <asm/set_memory.h> |
| 29 | #include <asm/intel-family.h> | 29 | #include <asm/intel-family.h> |
| 30 | #include <asm/hypervisor.h> | ||
| 30 | 31 | ||
| 31 | static void __init spectre_v2_select_mitigation(void); | 32 | static void __init spectre_v2_select_mitigation(void); |
| 32 | static void __init ssb_select_mitigation(void); | 33 | static void __init ssb_select_mitigation(void); |
| @@ -664,6 +665,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr | |||
| 664 | if (boot_cpu_has(X86_FEATURE_PTI)) | 665 | if (boot_cpu_has(X86_FEATURE_PTI)) |
| 665 | return sprintf(buf, "Mitigation: PTI\n"); | 666 | return sprintf(buf, "Mitigation: PTI\n"); |
| 666 | 667 | ||
| 668 | if (hypervisor_is_type(X86_HYPER_XEN_PV)) | ||
| 669 | return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); | ||
| 670 | |||
| 667 | break; | 671 | break; |
| 668 | 672 | ||
| 669 | case X86_BUG_SPECTRE_V1: | 673 | case X86_BUG_SPECTRE_V1: |
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index 38354c66df81..0c5fcbd998cf 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c | |||
| @@ -671,7 +671,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) | |||
| 671 | num_sharing_cache = ((eax >> 14) & 0xfff) + 1; | 671 | num_sharing_cache = ((eax >> 14) & 0xfff) + 1; |
| 672 | 672 | ||
| 673 | if (num_sharing_cache) { | 673 | if (num_sharing_cache) { |
| 674 | int bits = get_count_order(num_sharing_cache) - 1; | 674 | int bits = get_count_order(num_sharing_cache); |
| 675 | 675 | ||
| 676 | per_cpu(cpu_llc_id, cpu) = c->apicid >> bits; | 676 | per_cpu(cpu_llc_id, cpu) = c->apicid >> bits; |
| 677 | } | 677 | } |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 0df7151cfef4..eb4cb3efd20e 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -1,3 +1,6 @@ | |||
| 1 | /* cpu_feature_enabled() cannot be used this early */ | ||
| 2 | #define USE_EARLY_PGTABLE_L5 | ||
| 3 | |||
| 1 | #include <linux/bootmem.h> | 4 | #include <linux/bootmem.h> |
| 2 | #include <linux/linkage.h> | 5 | #include <linux/linkage.h> |
| 3 | #include <linux/bitops.h> | 6 | #include <linux/bitops.h> |
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c index 5bbd06f38ff6..f34d89c01edc 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c | |||
| @@ -160,6 +160,11 @@ static struct severity { | |||
| 160 | SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR), | 160 | SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR), |
| 161 | USER | 161 | USER |
| 162 | ), | 162 | ), |
| 163 | MCESEV( | ||
| 164 | PANIC, "Data load in unrecoverable area of kernel", | ||
| 165 | SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), | ||
| 166 | KERNEL | ||
| 167 | ), | ||
| 163 | #endif | 168 | #endif |
| 164 | MCESEV( | 169 | MCESEV( |
| 165 | PANIC, "Action required: unknown MCACOD", | 170 | PANIC, "Action required: unknown MCACOD", |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index e4cf6ff1c2e1..c102ad51025e 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
| @@ -772,23 +772,25 @@ EXPORT_SYMBOL_GPL(machine_check_poll); | |||
| 772 | static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, | 772 | static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, |
| 773 | struct pt_regs *regs) | 773 | struct pt_regs *regs) |
| 774 | { | 774 | { |
| 775 | int i, ret = 0; | ||
| 776 | char *tmp; | 775 | char *tmp; |
| 776 | int i; | ||
| 777 | 777 | ||
| 778 | for (i = 0; i < mca_cfg.banks; i++) { | 778 | for (i = 0; i < mca_cfg.banks; i++) { |
| 779 | m->status = mce_rdmsrl(msr_ops.status(i)); | 779 | m->status = mce_rdmsrl(msr_ops.status(i)); |
| 780 | if (m->status & MCI_STATUS_VAL) { | 780 | if (!(m->status & MCI_STATUS_VAL)) |
| 781 | __set_bit(i, validp); | 781 | continue; |
| 782 | if (quirk_no_way_out) | 782 | |
| 783 | quirk_no_way_out(i, m, regs); | 783 | __set_bit(i, validp); |
| 784 | } | 784 | if (quirk_no_way_out) |
| 785 | quirk_no_way_out(i, m, regs); | ||
| 785 | 786 | ||
| 786 | if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { | 787 | if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { |
| 788 | mce_read_aux(m, i); | ||
| 787 | *msg = tmp; | 789 | *msg = tmp; |
| 788 | ret = 1; | 790 | return 1; |
| 789 | } | 791 | } |
| 790 | } | 792 | } |
| 791 | return ret; | 793 | return 0; |
| 792 | } | 794 | } |
| 793 | 795 | ||
| 794 | /* | 796 | /* |
| @@ -1205,13 +1207,18 @@ void do_machine_check(struct pt_regs *regs, long error_code) | |||
| 1205 | lmce = m.mcgstatus & MCG_STATUS_LMCES; | 1207 | lmce = m.mcgstatus & MCG_STATUS_LMCES; |
| 1206 | 1208 | ||
| 1207 | /* | 1209 | /* |
| 1210 | * Local machine check may already know that we have to panic. | ||
| 1211 | * Broadcast machine check begins rendezvous in mce_start() | ||
| 1208 | * Go through all banks in exclusion of the other CPUs. This way we | 1212 | * Go through all banks in exclusion of the other CPUs. This way we |
| 1209 | * don't report duplicated events on shared banks because the first one | 1213 | * don't report duplicated events on shared banks because the first one |
| 1210 | * to see it will clear it. If this is a Local MCE, then no need to | 1214 | * to see it will clear it. |
| 1211 | * perform rendezvous. | ||
| 1212 | */ | 1215 | */ |
| 1213 | if (!lmce) | 1216 | if (lmce) { |
| 1217 | if (no_way_out) | ||
| 1218 | mce_panic("Fatal local machine check", &m, msg); | ||
| 1219 | } else { | ||
| 1214 | order = mce_start(&no_way_out); | 1220 | order = mce_start(&no_way_out); |
| 1221 | } | ||
| 1215 | 1222 | ||
| 1216 | for (i = 0; i < cfg->banks; i++) { | 1223 | for (i = 0; i < cfg->banks; i++) { |
| 1217 | __clear_bit(i, toclear); | 1224 | __clear_bit(i, toclear); |
| @@ -1287,12 +1294,17 @@ void do_machine_check(struct pt_regs *regs, long error_code) | |||
| 1287 | no_way_out = worst >= MCE_PANIC_SEVERITY; | 1294 | no_way_out = worst >= MCE_PANIC_SEVERITY; |
| 1288 | } else { | 1295 | } else { |
| 1289 | /* | 1296 | /* |
| 1290 | * Local MCE skipped calling mce_reign() | 1297 | * If there was a fatal machine check we should have |
| 1291 | * If we found a fatal error, we need to panic here. | 1298 | * already called mce_panic earlier in this function. |
| 1299 | * Since we re-read the banks, we might have found | ||
| 1300 | * something new. Check again to see if we found a | ||
| 1301 | * fatal error. We call "mce_severity()" again to | ||
| 1302 | * make sure we have the right "msg". | ||
| 1292 | */ | 1303 | */ |
| 1293 | if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) | 1304 | if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) { |
| 1294 | mce_panic("Machine check from unknown source", | 1305 | mce_severity(&m, cfg->tolerant, &msg, true); |
| 1295 | NULL, NULL); | 1306 | mce_panic("Local fatal machine check!", &m, msg); |
| 1307 | } | ||
| 1296 | } | 1308 | } |
| 1297 | 1309 | ||
| 1298 | /* | 1310 | /* |
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 1c2cfa0644aa..97ccf4c3b45b 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c | |||
| @@ -190,8 +190,11 @@ static void save_microcode_patch(void *data, unsigned int size) | |||
| 190 | p = memdup_patch(data, size); | 190 | p = memdup_patch(data, size); |
| 191 | if (!p) | 191 | if (!p) |
| 192 | pr_err("Error allocating buffer %p\n", data); | 192 | pr_err("Error allocating buffer %p\n", data); |
| 193 | else | 193 | else { |
| 194 | list_replace(&iter->plist, &p->plist); | 194 | list_replace(&iter->plist, &p->plist); |
| 195 | kfree(iter->data); | ||
| 196 | kfree(iter); | ||
| 197 | } | ||
| 195 | } | 198 | } |
| 196 | } | 199 | } |
| 197 | 200 | ||
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index a21d6ace648e..8047379e575a 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
| @@ -44,7 +44,7 @@ static unsigned int __initdata next_early_pgt; | |||
| 44 | pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); | 44 | pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); |
| 45 | 45 | ||
| 46 | #ifdef CONFIG_X86_5LEVEL | 46 | #ifdef CONFIG_X86_5LEVEL |
| 47 | unsigned int __pgtable_l5_enabled __initdata; | 47 | unsigned int __pgtable_l5_enabled __ro_after_init; |
| 48 | unsigned int pgdir_shift __ro_after_init = 39; | 48 | unsigned int pgdir_shift __ro_after_init = 39; |
| 49 | EXPORT_SYMBOL(pgdir_shift); | 49 | EXPORT_SYMBOL(pgdir_shift); |
| 50 | unsigned int ptrs_per_p4d __ro_after_init = 1; | 50 | unsigned int ptrs_per_p4d __ro_after_init = 1; |
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 697a4ce04308..736348ead421 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
| @@ -645,12 +645,19 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev) | |||
| 645 | /* Skylake */ | 645 | /* Skylake */ |
| 646 | static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev) | 646 | static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev) |
| 647 | { | 647 | { |
| 648 | u32 capid0; | 648 | u32 capid0, capid5; |
| 649 | 649 | ||
| 650 | pci_read_config_dword(pdev, 0x84, &capid0); | 650 | pci_read_config_dword(pdev, 0x84, &capid0); |
| 651 | pci_read_config_dword(pdev, 0x98, &capid5); | ||
| 651 | 652 | ||
| 652 | if ((capid0 & 0xc0) == 0xc0) | 653 | /* |
| 654 | * CAPID0{7:6} indicate whether this is an advanced RAS SKU | ||
| 655 | * CAPID5{8:5} indicate that various NVDIMM usage modes are | ||
| 656 | * enabled, so memory machine check recovery is also enabled. | ||
| 657 | */ | ||
| 658 | if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0)) | ||
| 653 | static_branch_inc(&mcsafe_key); | 659 | static_branch_inc(&mcsafe_key); |
| 660 | |||
| 654 | } | 661 | } |
| 655 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap); | 662 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap); |
| 656 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap); | 663 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap); |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index a535dd64de63..e6db475164ed 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
| @@ -835,16 +835,18 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr) | |||
| 835 | char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : | 835 | char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : |
| 836 | "simd exception"; | 836 | "simd exception"; |
| 837 | 837 | ||
| 838 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) | ||
| 839 | return; | ||
| 840 | cond_local_irq_enable(regs); | 838 | cond_local_irq_enable(regs); |
| 841 | 839 | ||
| 842 | if (!user_mode(regs)) { | 840 | if (!user_mode(regs)) { |
| 843 | if (!fixup_exception(regs, trapnr)) { | 841 | if (fixup_exception(regs, trapnr)) |
| 844 | task->thread.error_code = error_code; | 842 | return; |
| 845 | task->thread.trap_nr = trapnr; | 843 | |
| 844 | task->thread.error_code = error_code; | ||
| 845 | task->thread.trap_nr = trapnr; | ||
| 846 | |||
| 847 | if (notify_die(DIE_TRAP, str, regs, error_code, | ||
| 848 | trapnr, SIGFPE) != NOTIFY_STOP) | ||
| 846 | die(str, regs, error_code); | 849 | die(str, regs, error_code); |
| 847 | } | ||
| 848 | return; | 850 | return; |
| 849 | } | 851 | } |
| 850 | 852 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 559a12b6184d..1689f433f3a0 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -1705,6 +1705,17 @@ static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu) | |||
| 1705 | MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS; | 1705 | MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS; |
| 1706 | } | 1706 | } |
| 1707 | 1707 | ||
| 1708 | static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu) | ||
| 1709 | { | ||
| 1710 | return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS; | ||
| 1711 | } | ||
| 1712 | |||
| 1713 | static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu) | ||
| 1714 | { | ||
| 1715 | return to_vmx(vcpu)->nested.msrs.procbased_ctls_high & | ||
| 1716 | CPU_BASED_MONITOR_TRAP_FLAG; | ||
| 1717 | } | ||
| 1718 | |||
| 1708 | static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) | 1719 | static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) |
| 1709 | { | 1720 | { |
| 1710 | return vmcs12->cpu_based_vm_exec_control & bit; | 1721 | return vmcs12->cpu_based_vm_exec_control & bit; |
| @@ -11620,6 +11631,62 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
| 11620 | !nested_cr3_valid(vcpu, vmcs12->host_cr3)) | 11631 | !nested_cr3_valid(vcpu, vmcs12->host_cr3)) |
| 11621 | return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; | 11632 | return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; |
| 11622 | 11633 | ||
| 11634 | /* | ||
| 11635 | * From the Intel SDM, volume 3: | ||
| 11636 | * Fields relevant to VM-entry event injection must be set properly. | ||
| 11637 | * These fields are the VM-entry interruption-information field, the | ||
| 11638 | * VM-entry exception error code, and the VM-entry instruction length. | ||
| 11639 | */ | ||
| 11640 | if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { | ||
| 11641 | u32 intr_info = vmcs12->vm_entry_intr_info_field; | ||
| 11642 | u8 vector = intr_info & INTR_INFO_VECTOR_MASK; | ||
| 11643 | u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK; | ||
| 11644 | bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK; | ||
| 11645 | bool should_have_error_code; | ||
| 11646 | bool urg = nested_cpu_has2(vmcs12, | ||
| 11647 | SECONDARY_EXEC_UNRESTRICTED_GUEST); | ||
| 11648 | bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; | ||
| 11649 | |||
| 11650 | /* VM-entry interruption-info field: interruption type */ | ||
| 11651 | if (intr_type == INTR_TYPE_RESERVED || | ||
| 11652 | (intr_type == INTR_TYPE_OTHER_EVENT && | ||
| 11653 | !nested_cpu_supports_monitor_trap_flag(vcpu))) | ||
| 11654 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | ||
| 11655 | |||
| 11656 | /* VM-entry interruption-info field: vector */ | ||
| 11657 | if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || | ||
| 11658 | (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || | ||
| 11659 | (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) | ||
| 11660 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | ||
| 11661 | |||
| 11662 | /* VM-entry interruption-info field: deliver error code */ | ||
| 11663 | should_have_error_code = | ||
| 11664 | intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && | ||
| 11665 | x86_exception_has_error_code(vector); | ||
| 11666 | if (has_error_code != should_have_error_code) | ||
| 11667 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | ||
| 11668 | |||
| 11669 | /* VM-entry exception error code */ | ||
| 11670 | if (has_error_code && | ||
| 11671 | vmcs12->vm_entry_exception_error_code & GENMASK(31, 15)) | ||
| 11672 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | ||
| 11673 | |||
| 11674 | /* VM-entry interruption-info field: reserved bits */ | ||
| 11675 | if (intr_info & INTR_INFO_RESVD_BITS_MASK) | ||
| 11676 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | ||
| 11677 | |||
| 11678 | /* VM-entry instruction length */ | ||
| 11679 | switch (intr_type) { | ||
| 11680 | case INTR_TYPE_SOFT_EXCEPTION: | ||
| 11681 | case INTR_TYPE_SOFT_INTR: | ||
| 11682 | case INTR_TYPE_PRIV_SW_EXCEPTION: | ||
| 11683 | if ((vmcs12->vm_entry_instruction_len > 15) || | ||
| 11684 | (vmcs12->vm_entry_instruction_len == 0 && | ||
| 11685 | !nested_cpu_has_zero_length_injection(vcpu))) | ||
| 11686 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | ||
| 11687 | } | ||
| 11688 | } | ||
| 11689 | |||
| 11623 | return 0; | 11690 | return 0; |
| 11624 | } | 11691 | } |
| 11625 | 11692 | ||
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 331993c49dae..257f27620bc2 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h | |||
| @@ -110,6 +110,15 @@ static inline bool is_la57_mode(struct kvm_vcpu *vcpu) | |||
| 110 | #endif | 110 | #endif |
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | static inline bool x86_exception_has_error_code(unsigned int vector) | ||
| 114 | { | ||
| 115 | static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) | | ||
| 116 | BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) | | ||
| 117 | BIT(PF_VECTOR) | BIT(AC_VECTOR); | ||
| 118 | |||
| 119 | return (1U << vector) & exception_has_error_code; | ||
| 120 | } | ||
| 121 | |||
| 113 | static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) | 122 | static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) |
| 114 | { | 123 | { |
| 115 | return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; | 124 | return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 045f492d5f68..a688617c727e 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
| @@ -1350,16 +1350,28 @@ int kern_addr_valid(unsigned long addr) | |||
| 1350 | /* Amount of ram needed to start using large blocks */ | 1350 | /* Amount of ram needed to start using large blocks */ |
| 1351 | #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30) | 1351 | #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30) |
| 1352 | 1352 | ||
| 1353 | /* Adjustable memory block size */ | ||
| 1354 | static unsigned long set_memory_block_size; | ||
| 1355 | int __init set_memory_block_size_order(unsigned int order) | ||
| 1356 | { | ||
| 1357 | unsigned long size = 1UL << order; | ||
| 1358 | |||
| 1359 | if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE) | ||
| 1360 | return -EINVAL; | ||
| 1361 | |||
| 1362 | set_memory_block_size = size; | ||
| 1363 | return 0; | ||
| 1364 | } | ||
| 1365 | |||
| 1353 | static unsigned long probe_memory_block_size(void) | 1366 | static unsigned long probe_memory_block_size(void) |
| 1354 | { | 1367 | { |
| 1355 | unsigned long boot_mem_end = max_pfn << PAGE_SHIFT; | 1368 | unsigned long boot_mem_end = max_pfn << PAGE_SHIFT; |
| 1356 | unsigned long bz; | 1369 | unsigned long bz; |
| 1357 | 1370 | ||
| 1358 | /* If this is UV system, always set 2G block size */ | 1371 | /* If memory block size has been set, then use it */ |
| 1359 | if (is_uv_system()) { | 1372 | bz = set_memory_block_size; |
| 1360 | bz = MAX_BLOCK_SIZE; | 1373 | if (bz) |
| 1361 | goto done; | 1374 | goto done; |
| 1362 | } | ||
| 1363 | 1375 | ||
| 1364 | /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */ | 1376 | /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */ |
| 1365 | if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) { | 1377 | if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) { |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index c9081c6671f0..3b5318505c69 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
| @@ -65,6 +65,13 @@ __read_mostly int xen_have_vector_callback; | |||
| 65 | EXPORT_SYMBOL_GPL(xen_have_vector_callback); | 65 | EXPORT_SYMBOL_GPL(xen_have_vector_callback); |
| 66 | 66 | ||
| 67 | /* | 67 | /* |
| 68 | * NB: needs to live in .data because it's used by xen_prepare_pvh which runs | ||
| 69 | * before clearing the bss. | ||
| 70 | */ | ||
| 71 | uint32_t xen_start_flags __attribute__((section(".data"))) = 0; | ||
| 72 | EXPORT_SYMBOL(xen_start_flags); | ||
| 73 | |||
| 74 | /* | ||
| 68 | * Point at some empty memory to start with. We map the real shared_info | 75 | * Point at some empty memory to start with. We map the real shared_info |
| 69 | * page as soon as fixmap is up and running. | 76 | * page as soon as fixmap is up and running. |
| 70 | */ | 77 | */ |
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 357969a3697c..8d4e2e1ae60b 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c | |||
| @@ -1203,6 +1203,7 @@ asmlinkage __visible void __init xen_start_kernel(void) | |||
| 1203 | return; | 1203 | return; |
| 1204 | 1204 | ||
| 1205 | xen_domain_type = XEN_PV_DOMAIN; | 1205 | xen_domain_type = XEN_PV_DOMAIN; |
| 1206 | xen_start_flags = xen_start_info->flags; | ||
| 1206 | 1207 | ||
| 1207 | xen_setup_features(); | 1208 | xen_setup_features(); |
| 1208 | 1209 | ||
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c index aa1c6a6831a9..c85d1a88f476 100644 --- a/arch/x86/xen/enlighten_pvh.c +++ b/arch/x86/xen/enlighten_pvh.c | |||
| @@ -97,6 +97,7 @@ void __init xen_prepare_pvh(void) | |||
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | xen_pvh = 1; | 99 | xen_pvh = 1; |
| 100 | xen_start_flags = pvh_start_info.flags; | ||
| 100 | 101 | ||
| 101 | msr = cpuid_ebx(xen_cpuid_base() + 2); | 102 | msr = cpuid_ebx(xen_cpuid_base() + 2); |
| 102 | pfn = __pa(hypercall_page); | 103 | pfn = __pa(hypercall_page); |
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index 2e20ae2fa2d6..e3b18ad49889 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <xen/interface/vcpu.h> | 32 | #include <xen/interface/vcpu.h> |
| 33 | #include <xen/interface/xenpmu.h> | 33 | #include <xen/interface/xenpmu.h> |
| 34 | 34 | ||
| 35 | #include <asm/spec-ctrl.h> | ||
| 35 | #include <asm/xen/interface.h> | 36 | #include <asm/xen/interface.h> |
| 36 | #include <asm/xen/hypercall.h> | 37 | #include <asm/xen/hypercall.h> |
| 37 | 38 | ||
| @@ -70,6 +71,8 @@ static void cpu_bringup(void) | |||
| 70 | cpu_data(cpu).x86_max_cores = 1; | 71 | cpu_data(cpu).x86_max_cores = 1; |
| 71 | set_cpu_sibling_map(cpu); | 72 | set_cpu_sibling_map(cpu); |
| 72 | 73 | ||
| 74 | speculative_store_bypass_ht_init(); | ||
| 75 | |||
| 73 | xen_setup_cpu_clockevents(); | 76 | xen_setup_cpu_clockevents(); |
| 74 | 77 | ||
| 75 | notify_cpu_starting(cpu); | 78 | notify_cpu_starting(cpu); |
| @@ -250,6 +253,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus) | |||
| 250 | } | 253 | } |
| 251 | set_cpu_sibling_map(0); | 254 | set_cpu_sibling_map(0); |
| 252 | 255 | ||
| 256 | speculative_store_bypass_ht_init(); | ||
| 257 | |||
| 253 | xen_pmu_init(0); | 258 | xen_pmu_init(0); |
| 254 | 259 | ||
| 255 | if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0)) | 260 | if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0)) |
diff --git a/block/bio.c b/block/bio.c index 9710e275f230..67eff5eddc49 100644 --- a/block/bio.c +++ b/block/bio.c | |||
| @@ -1807,9 +1807,6 @@ again: | |||
| 1807 | if (!bio_integrity_endio(bio)) | 1807 | if (!bio_integrity_endio(bio)) |
| 1808 | return; | 1808 | return; |
| 1809 | 1809 | ||
| 1810 | if (WARN_ONCE(bio->bi_next, "driver left bi_next not NULL")) | ||
| 1811 | bio->bi_next = NULL; | ||
| 1812 | |||
| 1813 | /* | 1810 | /* |
| 1814 | * Need to have a real endio function for chained bios, otherwise | 1811 | * Need to have a real endio function for chained bios, otherwise |
| 1815 | * various corner cases will break (like stacking block devices that | 1812 | * various corner cases will break (like stacking block devices that |
diff --git a/block/blk-core.c b/block/blk-core.c index cf0ee764b908..afd2596ea3d3 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
| @@ -273,10 +273,6 @@ static void req_bio_endio(struct request *rq, struct bio *bio, | |||
| 273 | bio_advance(bio, nbytes); | 273 | bio_advance(bio, nbytes); |
| 274 | 274 | ||
| 275 | /* don't actually finish bio if it's part of flush sequence */ | 275 | /* don't actually finish bio if it's part of flush sequence */ |
| 276 | /* | ||
| 277 | * XXX this code looks suspicious - it's not consistent with advancing | ||
| 278 | * req->bio in caller | ||
| 279 | */ | ||
| 280 | if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) | 276 | if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) |
| 281 | bio_endio(bio); | 277 | bio_endio(bio); |
| 282 | } | 278 | } |
| @@ -3081,10 +3077,8 @@ bool blk_update_request(struct request *req, blk_status_t error, | |||
| 3081 | struct bio *bio = req->bio; | 3077 | struct bio *bio = req->bio; |
| 3082 | unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); | 3078 | unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); |
| 3083 | 3079 | ||
| 3084 | if (bio_bytes == bio->bi_iter.bi_size) { | 3080 | if (bio_bytes == bio->bi_iter.bi_size) |
| 3085 | req->bio = bio->bi_next; | 3081 | req->bio = bio->bi_next; |
| 3086 | bio->bi_next = NULL; | ||
| 3087 | } | ||
| 3088 | 3082 | ||
| 3089 | /* Completion has already been traced */ | 3083 | /* Completion has already been traced */ |
| 3090 | bio_clear_flag(bio, BIO_TRACE_COMPLETION); | 3084 | bio_clear_flag(bio, BIO_TRACE_COMPLETION); |
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index ffa622366922..1c4532e92938 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c | |||
| @@ -356,7 +356,7 @@ static const char *const blk_mq_rq_state_name_array[] = { | |||
| 356 | 356 | ||
| 357 | static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state) | 357 | static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state) |
| 358 | { | 358 | { |
| 359 | if (WARN_ON_ONCE((unsigned int)rq_state > | 359 | if (WARN_ON_ONCE((unsigned int)rq_state >= |
| 360 | ARRAY_SIZE(blk_mq_rq_state_name_array))) | 360 | ARRAY_SIZE(blk_mq_rq_state_name_array))) |
| 361 | return "(?)"; | 361 | return "(?)"; |
| 362 | return blk_mq_rq_state_name_array[rq_state]; | 362 | return blk_mq_rq_state_name_array[rq_state]; |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 70c65bb6c013..b429d515b568 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -781,7 +781,6 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved) | |||
| 781 | WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); | 781 | WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); |
| 782 | } | 782 | } |
| 783 | 783 | ||
| 784 | req->rq_flags &= ~RQF_TIMED_OUT; | ||
| 785 | blk_add_timer(req); | 784 | blk_add_timer(req); |
| 786 | } | 785 | } |
| 787 | 786 | ||
diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 01e2b353a2b9..15c1f5e12eb8 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c | |||
| @@ -144,6 +144,7 @@ do_local: | |||
| 144 | 144 | ||
| 145 | local_irq_restore(flags); | 145 | local_irq_restore(flags); |
| 146 | } | 146 | } |
| 147 | EXPORT_SYMBOL(__blk_complete_request); | ||
| 147 | 148 | ||
| 148 | /** | 149 | /** |
| 149 | * blk_complete_request - end I/O on a request | 150 | * blk_complete_request - end I/O on a request |
diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 4b8a48d48ba1..f2cfd56e1606 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c | |||
| @@ -210,6 +210,7 @@ void blk_add_timer(struct request *req) | |||
| 210 | if (!req->timeout) | 210 | if (!req->timeout) |
| 211 | req->timeout = q->rq_timeout; | 211 | req->timeout = q->rq_timeout; |
| 212 | 212 | ||
| 213 | req->rq_flags &= ~RQF_TIMED_OUT; | ||
| 213 | blk_rq_set_deadline(req, jiffies + req->timeout); | 214 | blk_rq_set_deadline(req, jiffies + req->timeout); |
| 214 | 215 | ||
| 215 | /* | 216 | /* |
diff --git a/block/sed-opal.c b/block/sed-opal.c index 945f4b8610e0..e0de4dd448b3 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c | |||
| @@ -877,7 +877,7 @@ static size_t response_get_string(const struct parsed_resp *resp, int n, | |||
| 877 | return 0; | 877 | return 0; |
| 878 | } | 878 | } |
| 879 | 879 | ||
| 880 | if (n > resp->num) { | 880 | if (n >= resp->num) { |
| 881 | pr_debug("Response has %d tokens. Can't access %d\n", | 881 | pr_debug("Response has %d tokens. Can't access %d\n", |
| 882 | resp->num, n); | 882 | resp->num, n); |
| 883 | return 0; | 883 | return 0; |
| @@ -916,7 +916,7 @@ static u64 response_get_u64(const struct parsed_resp *resp, int n) | |||
| 916 | return 0; | 916 | return 0; |
| 917 | } | 917 | } |
| 918 | 918 | ||
| 919 | if (n > resp->num) { | 919 | if (n >= resp->num) { |
| 920 | pr_debug("Response has %d tokens. Can't access %d\n", | 920 | pr_debug("Response has %d tokens. Can't access %d\n", |
| 921 | resp->num, n); | 921 | resp->num, n); |
| 922 | return 0; | 922 | return 0; |
diff --git a/crypto/morus640.c b/crypto/morus640.c index 9fbcde307daf..5eede3749e64 100644 --- a/crypto/morus640.c +++ b/crypto/morus640.c | |||
| @@ -274,8 +274,9 @@ static void crypto_morus640_decrypt_chunk(struct morus640_state *state, u8 *dst, | |||
| 274 | union morus640_block_in tail; | 274 | union morus640_block_in tail; |
| 275 | 275 | ||
| 276 | memcpy(tail.bytes, src, size); | 276 | memcpy(tail.bytes, src, size); |
| 277 | memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size); | ||
| 277 | 278 | ||
| 278 | crypto_morus640_load_a(&m, src); | 279 | crypto_morus640_load_a(&m, tail.bytes); |
| 279 | crypto_morus640_core(state, &m); | 280 | crypto_morus640_core(state, &m); |
| 280 | crypto_morus640_store_a(tail.bytes, &m); | 281 | crypto_morus640_store_a(tail.bytes, &m); |
| 281 | memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size); | 282 | memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size); |
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index 264ec12c0b9c..7f6735d9003f 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c | |||
| @@ -152,7 +152,7 @@ static SHA3_INLINE void keccakf_round(u64 st[25]) | |||
| 152 | st[24] ^= bc[ 4]; | 152 | st[24] ^= bc[ 4]; |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | static void __optimize("O3") keccakf(u64 st[25]) | 155 | static void keccakf(u64 st[25]) |
| 156 | { | 156 | { |
| 157 | int round; | 157 | int round; |
| 158 | 158 | ||
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 38a286975c31..f8fecfec5df9 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/pm_domain.h> | 22 | #include <linux/pm_domain.h> |
| 23 | #include <linux/pm_runtime.h> | 23 | #include <linux/pm_runtime.h> |
| 24 | #include <linux/pwm.h> | 24 | #include <linux/pwm.h> |
| 25 | #include <linux/suspend.h> | ||
| 25 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
| 26 | 27 | ||
| 27 | #include "internal.h" | 28 | #include "internal.h" |
| @@ -946,9 +947,10 @@ static void lpss_iosf_exit_d3_state(void) | |||
| 946 | mutex_unlock(&lpss_iosf_mutex); | 947 | mutex_unlock(&lpss_iosf_mutex); |
| 947 | } | 948 | } |
| 948 | 949 | ||
| 949 | static int acpi_lpss_suspend(struct device *dev, bool wakeup) | 950 | static int acpi_lpss_suspend(struct device *dev, bool runtime) |
| 950 | { | 951 | { |
| 951 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); | 952 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); |
| 953 | bool wakeup = runtime || device_may_wakeup(dev); | ||
| 952 | int ret; | 954 | int ret; |
| 953 | 955 | ||
| 954 | if (pdata->dev_desc->flags & LPSS_SAVE_CTX) | 956 | if (pdata->dev_desc->flags & LPSS_SAVE_CTX) |
| @@ -961,13 +963,14 @@ static int acpi_lpss_suspend(struct device *dev, bool wakeup) | |||
| 961 | * wrong status for devices being about to be powered off. See | 963 | * wrong status for devices being about to be powered off. See |
| 962 | * lpss_iosf_enter_d3_state() for further information. | 964 | * lpss_iosf_enter_d3_state() for further information. |
| 963 | */ | 965 | */ |
| 964 | if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) | 966 | if ((runtime || !pm_suspend_via_firmware()) && |
| 967 | lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) | ||
| 965 | lpss_iosf_enter_d3_state(); | 968 | lpss_iosf_enter_d3_state(); |
| 966 | 969 | ||
| 967 | return ret; | 970 | return ret; |
| 968 | } | 971 | } |
| 969 | 972 | ||
| 970 | static int acpi_lpss_resume(struct device *dev) | 973 | static int acpi_lpss_resume(struct device *dev, bool runtime) |
| 971 | { | 974 | { |
| 972 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); | 975 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); |
| 973 | int ret; | 976 | int ret; |
| @@ -976,7 +979,8 @@ static int acpi_lpss_resume(struct device *dev) | |||
| 976 | * This call is kept first to be in symmetry with | 979 | * This call is kept first to be in symmetry with |
| 977 | * acpi_lpss_runtime_suspend() one. | 980 | * acpi_lpss_runtime_suspend() one. |
| 978 | */ | 981 | */ |
| 979 | if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) | 982 | if ((runtime || !pm_resume_via_firmware()) && |
| 983 | lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) | ||
| 980 | lpss_iosf_exit_d3_state(); | 984 | lpss_iosf_exit_d3_state(); |
| 981 | 985 | ||
| 982 | ret = acpi_dev_resume(dev); | 986 | ret = acpi_dev_resume(dev); |
| @@ -1000,12 +1004,12 @@ static int acpi_lpss_suspend_late(struct device *dev) | |||
| 1000 | return 0; | 1004 | return 0; |
| 1001 | 1005 | ||
| 1002 | ret = pm_generic_suspend_late(dev); | 1006 | ret = pm_generic_suspend_late(dev); |
| 1003 | return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev)); | 1007 | return ret ? ret : acpi_lpss_suspend(dev, false); |
| 1004 | } | 1008 | } |
| 1005 | 1009 | ||
| 1006 | static int acpi_lpss_resume_early(struct device *dev) | 1010 | static int acpi_lpss_resume_early(struct device *dev) |
| 1007 | { | 1011 | { |
| 1008 | int ret = acpi_lpss_resume(dev); | 1012 | int ret = acpi_lpss_resume(dev, false); |
| 1009 | 1013 | ||
| 1010 | return ret ? ret : pm_generic_resume_early(dev); | 1014 | return ret ? ret : pm_generic_resume_early(dev); |
| 1011 | } | 1015 | } |
| @@ -1020,7 +1024,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev) | |||
| 1020 | 1024 | ||
| 1021 | static int acpi_lpss_runtime_resume(struct device *dev) | 1025 | static int acpi_lpss_runtime_resume(struct device *dev) |
| 1022 | { | 1026 | { |
| 1023 | int ret = acpi_lpss_resume(dev); | 1027 | int ret = acpi_lpss_resume(dev, true); |
| 1024 | 1028 | ||
| 1025 | return ret ? ret : pm_generic_runtime_resume(dev); | 1029 | return ret ? ret : pm_generic_runtime_resume(dev); |
| 1026 | } | 1030 | } |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index bb94cf0731fe..442a9e24f439 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -2037,6 +2037,17 @@ static inline void acpi_ec_query_exit(void) | |||
| 2037 | } | 2037 | } |
| 2038 | } | 2038 | } |
| 2039 | 2039 | ||
| 2040 | static const struct dmi_system_id acpi_ec_no_wakeup[] = { | ||
| 2041 | { | ||
| 2042 | .ident = "Thinkpad X1 Carbon 6th", | ||
| 2043 | .matches = { | ||
| 2044 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
| 2045 | DMI_MATCH(DMI_PRODUCT_NAME, "20KGS3JF01"), | ||
| 2046 | }, | ||
| 2047 | }, | ||
| 2048 | { }, | ||
| 2049 | }; | ||
| 2050 | |||
| 2040 | int __init acpi_ec_init(void) | 2051 | int __init acpi_ec_init(void) |
| 2041 | { | 2052 | { |
| 2042 | int result; | 2053 | int result; |
| @@ -2047,6 +2058,15 @@ int __init acpi_ec_init(void) | |||
| 2047 | if (result) | 2058 | if (result) |
| 2048 | return result; | 2059 | return result; |
| 2049 | 2060 | ||
| 2061 | /* | ||
| 2062 | * Disable EC wakeup on following systems to prevent periodic | ||
| 2063 | * wakeup from EC GPE. | ||
| 2064 | */ | ||
| 2065 | if (dmi_check_system(acpi_ec_no_wakeup)) { | ||
| 2066 | ec_no_wakeup = true; | ||
| 2067 | pr_debug("Disabling EC wakeup on suspend-to-idle\n"); | ||
| 2068 | } | ||
| 2069 | |||
| 2050 | /* Drivers must be started after acpi_ec_query_init() */ | 2070 | /* Drivers must be started after acpi_ec_query_init() */ |
| 2051 | dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); | 2071 | dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); |
| 2052 | /* | 2072 | /* |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 36622b52e419..df3e1a44707a 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
| @@ -236,6 +236,13 @@ struct device_link *device_link_add(struct device *consumer, | |||
| 236 | link->rpm_active = true; | 236 | link->rpm_active = true; |
| 237 | } | 237 | } |
| 238 | pm_runtime_new_link(consumer); | 238 | pm_runtime_new_link(consumer); |
| 239 | /* | ||
| 240 | * If the link is being added by the consumer driver at probe | ||
| 241 | * time, balance the decrementation of the supplier's runtime PM | ||
| 242 | * usage counter after consumer probe in driver_probe_device(). | ||
| 243 | */ | ||
| 244 | if (consumer->links.status == DL_DEV_PROBING) | ||
| 245 | pm_runtime_get_noresume(supplier); | ||
| 239 | } | 246 | } |
| 240 | get_device(supplier); | 247 | get_device(supplier); |
| 241 | link->supplier = supplier; | 248 | link->supplier = supplier; |
| @@ -255,12 +262,12 @@ struct device_link *device_link_add(struct device *consumer, | |||
| 255 | switch (consumer->links.status) { | 262 | switch (consumer->links.status) { |
| 256 | case DL_DEV_PROBING: | 263 | case DL_DEV_PROBING: |
| 257 | /* | 264 | /* |
| 258 | * Balance the decrementation of the supplier's | 265 | * Some callers expect the link creation during |
| 259 | * runtime PM usage counter after consumer probe | 266 | * consumer driver probe to resume the supplier |
| 260 | * in driver_probe_device(). | 267 | * even without DL_FLAG_RPM_ACTIVE. |
| 261 | */ | 268 | */ |
| 262 | if (flags & DL_FLAG_PM_RUNTIME) | 269 | if (flags & DL_FLAG_PM_RUNTIME) |
| 263 | pm_runtime_get_sync(supplier); | 270 | pm_runtime_resume(supplier); |
| 264 | 271 | ||
| 265 | link->status = DL_STATE_CONSUMER_PROBE; | 272 | link->status = DL_STATE_CONSUMER_PROBE; |
| 266 | break; | 273 | break; |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 3b7083b8ecbb..74a05561b620 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
| @@ -76,6 +76,7 @@ struct link_dead_args { | |||
| 76 | #define NBD_HAS_CONFIG_REF 4 | 76 | #define NBD_HAS_CONFIG_REF 4 |
| 77 | #define NBD_BOUND 5 | 77 | #define NBD_BOUND 5 |
| 78 | #define NBD_DESTROY_ON_DISCONNECT 6 | 78 | #define NBD_DESTROY_ON_DISCONNECT 6 |
| 79 | #define NBD_DISCONNECT_ON_CLOSE 7 | ||
| 79 | 80 | ||
| 80 | struct nbd_config { | 81 | struct nbd_config { |
| 81 | u32 flags; | 82 | u32 flags; |
| @@ -138,6 +139,7 @@ static void nbd_config_put(struct nbd_device *nbd); | |||
| 138 | static void nbd_connect_reply(struct genl_info *info, int index); | 139 | static void nbd_connect_reply(struct genl_info *info, int index); |
| 139 | static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info); | 140 | static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info); |
| 140 | static void nbd_dead_link_work(struct work_struct *work); | 141 | static void nbd_dead_link_work(struct work_struct *work); |
| 142 | static void nbd_disconnect_and_put(struct nbd_device *nbd); | ||
| 141 | 143 | ||
| 142 | static inline struct device *nbd_to_dev(struct nbd_device *nbd) | 144 | static inline struct device *nbd_to_dev(struct nbd_device *nbd) |
| 143 | { | 145 | { |
| @@ -1305,6 +1307,12 @@ out: | |||
| 1305 | static void nbd_release(struct gendisk *disk, fmode_t mode) | 1307 | static void nbd_release(struct gendisk *disk, fmode_t mode) |
| 1306 | { | 1308 | { |
| 1307 | struct nbd_device *nbd = disk->private_data; | 1309 | struct nbd_device *nbd = disk->private_data; |
| 1310 | struct block_device *bdev = bdget_disk(disk, 0); | ||
| 1311 | |||
| 1312 | if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && | ||
| 1313 | bdev->bd_openers == 0) | ||
| 1314 | nbd_disconnect_and_put(nbd); | ||
| 1315 | |||
| 1308 | nbd_config_put(nbd); | 1316 | nbd_config_put(nbd); |
| 1309 | nbd_put(nbd); | 1317 | nbd_put(nbd); |
| 1310 | } | 1318 | } |
| @@ -1705,6 +1713,10 @@ again: | |||
| 1705 | &config->runtime_flags); | 1713 | &config->runtime_flags); |
| 1706 | put_dev = true; | 1714 | put_dev = true; |
| 1707 | } | 1715 | } |
| 1716 | if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { | ||
| 1717 | set_bit(NBD_DISCONNECT_ON_CLOSE, | ||
| 1718 | &config->runtime_flags); | ||
| 1719 | } | ||
| 1708 | } | 1720 | } |
| 1709 | 1721 | ||
| 1710 | if (info->attrs[NBD_ATTR_SOCKETS]) { | 1722 | if (info->attrs[NBD_ATTR_SOCKETS]) { |
| @@ -1749,6 +1761,17 @@ out: | |||
| 1749 | return ret; | 1761 | return ret; |
| 1750 | } | 1762 | } |
| 1751 | 1763 | ||
| 1764 | static void nbd_disconnect_and_put(struct nbd_device *nbd) | ||
| 1765 | { | ||
| 1766 | mutex_lock(&nbd->config_lock); | ||
| 1767 | nbd_disconnect(nbd); | ||
| 1768 | nbd_clear_sock(nbd); | ||
| 1769 | mutex_unlock(&nbd->config_lock); | ||
| 1770 | if (test_and_clear_bit(NBD_HAS_CONFIG_REF, | ||
| 1771 | &nbd->config->runtime_flags)) | ||
| 1772 | nbd_config_put(nbd); | ||
| 1773 | } | ||
| 1774 | |||
| 1752 | static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info) | 1775 | static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info) |
| 1753 | { | 1776 | { |
| 1754 | struct nbd_device *nbd; | 1777 | struct nbd_device *nbd; |
| @@ -1781,13 +1804,7 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info) | |||
| 1781 | nbd_put(nbd); | 1804 | nbd_put(nbd); |
| 1782 | return 0; | 1805 | return 0; |
| 1783 | } | 1806 | } |
| 1784 | mutex_lock(&nbd->config_lock); | 1807 | nbd_disconnect_and_put(nbd); |
| 1785 | nbd_disconnect(nbd); | ||
| 1786 | nbd_clear_sock(nbd); | ||
| 1787 | mutex_unlock(&nbd->config_lock); | ||
| 1788 | if (test_and_clear_bit(NBD_HAS_CONFIG_REF, | ||
| 1789 | &nbd->config->runtime_flags)) | ||
| 1790 | nbd_config_put(nbd); | ||
| 1791 | nbd_config_put(nbd); | 1808 | nbd_config_put(nbd); |
| 1792 | nbd_put(nbd); | 1809 | nbd_put(nbd); |
| 1793 | return 0; | 1810 | return 0; |
| @@ -1798,7 +1815,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) | |||
| 1798 | struct nbd_device *nbd = NULL; | 1815 | struct nbd_device *nbd = NULL; |
| 1799 | struct nbd_config *config; | 1816 | struct nbd_config *config; |
| 1800 | int index; | 1817 | int index; |
| 1801 | int ret = -EINVAL; | 1818 | int ret = 0; |
| 1802 | bool put_dev = false; | 1819 | bool put_dev = false; |
| 1803 | 1820 | ||
| 1804 | if (!netlink_capable(skb, CAP_SYS_ADMIN)) | 1821 | if (!netlink_capable(skb, CAP_SYS_ADMIN)) |
| @@ -1838,6 +1855,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) | |||
| 1838 | !nbd->task_recv) { | 1855 | !nbd->task_recv) { |
| 1839 | dev_err(nbd_to_dev(nbd), | 1856 | dev_err(nbd_to_dev(nbd), |
| 1840 | "not configured, cannot reconfigure\n"); | 1857 | "not configured, cannot reconfigure\n"); |
| 1858 | ret = -EINVAL; | ||
| 1841 | goto out; | 1859 | goto out; |
| 1842 | } | 1860 | } |
| 1843 | 1861 | ||
| @@ -1862,6 +1880,14 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) | |||
| 1862 | &config->runtime_flags)) | 1880 | &config->runtime_flags)) |
| 1863 | refcount_inc(&nbd->refs); | 1881 | refcount_inc(&nbd->refs); |
| 1864 | } | 1882 | } |
| 1883 | |||
| 1884 | if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { | ||
| 1885 | set_bit(NBD_DISCONNECT_ON_CLOSE, | ||
| 1886 | &config->runtime_flags); | ||
| 1887 | } else { | ||
| 1888 | clear_bit(NBD_DISCONNECT_ON_CLOSE, | ||
| 1889 | &config->runtime_flags); | ||
| 1890 | } | ||
| 1865 | } | 1891 | } |
| 1866 | 1892 | ||
| 1867 | if (info->attrs[NBD_ATTR_SOCKETS]) { | 1893 | if (info->attrs[NBD_ATTR_SOCKETS]) { |
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 7948049f6c43..042c778e5a4e 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
| @@ -1365,7 +1365,7 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) | |||
| 1365 | static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq) | 1365 | static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq) |
| 1366 | { | 1366 | { |
| 1367 | pr_info("null: rq %p timed out\n", rq); | 1367 | pr_info("null: rq %p timed out\n", rq); |
| 1368 | blk_mq_complete_request(rq); | 1368 | __blk_complete_request(rq); |
| 1369 | return BLK_EH_DONE; | 1369 | return BLK_EH_DONE; |
| 1370 | } | 1370 | } |
| 1371 | 1371 | ||
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 91bb98c42a1c..aaf9e5afaad4 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c | |||
| @@ -516,11 +516,18 @@ EXPORT_SYMBOL_GPL(hwrng_register); | |||
| 516 | 516 | ||
| 517 | void hwrng_unregister(struct hwrng *rng) | 517 | void hwrng_unregister(struct hwrng *rng) |
| 518 | { | 518 | { |
| 519 | int err; | ||
| 520 | |||
| 519 | mutex_lock(&rng_mutex); | 521 | mutex_lock(&rng_mutex); |
| 520 | 522 | ||
| 521 | list_del(&rng->list); | 523 | list_del(&rng->list); |
| 522 | if (current_rng == rng) | 524 | if (current_rng == rng) { |
| 523 | enable_best_rng(); | 525 | err = enable_best_rng(); |
| 526 | if (err) { | ||
| 527 | drop_current_rng(); | ||
| 528 | cur_rng_set_by_user = 0; | ||
| 529 | } | ||
| 530 | } | ||
| 524 | 531 | ||
| 525 | if (list_empty(&rng_list)) { | 532 | if (list_empty(&rng_list)) { |
| 526 | mutex_unlock(&rng_mutex); | 533 | mutex_unlock(&rng_mutex); |
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c index e5cdc3af684c..2717f88c7904 100644 --- a/drivers/clocksource/timer-stm32.c +++ b/drivers/clocksource/timer-stm32.c | |||
| @@ -304,8 +304,10 @@ static int __init stm32_timer_init(struct device_node *node) | |||
| 304 | 304 | ||
| 305 | to->private_data = kzalloc(sizeof(struct stm32_timer_private), | 305 | to->private_data = kzalloc(sizeof(struct stm32_timer_private), |
| 306 | GFP_KERNEL); | 306 | GFP_KERNEL); |
| 307 | if (!to->private_data) | 307 | if (!to->private_data) { |
| 308 | ret = -ENOMEM; | ||
| 308 | goto deinit; | 309 | goto deinit; |
| 310 | } | ||
| 309 | 311 | ||
| 310 | rstc = of_reset_control_get(node, NULL); | 312 | rstc = of_reset_control_get(node, NULL); |
| 311 | if (!IS_ERR(rstc)) { | 313 | if (!IS_ERR(rstc)) { |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 1de5ec8d5ea3..ece120da3353 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -294,6 +294,7 @@ struct pstate_funcs { | |||
| 294 | static struct pstate_funcs pstate_funcs __read_mostly; | 294 | static struct pstate_funcs pstate_funcs __read_mostly; |
| 295 | 295 | ||
| 296 | static int hwp_active __read_mostly; | 296 | static int hwp_active __read_mostly; |
| 297 | static int hwp_mode_bdw __read_mostly; | ||
| 297 | static bool per_cpu_limits __read_mostly; | 298 | static bool per_cpu_limits __read_mostly; |
| 298 | static bool hwp_boost __read_mostly; | 299 | static bool hwp_boost __read_mostly; |
| 299 | 300 | ||
| @@ -1413,7 +1414,15 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) | |||
| 1413 | cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); | 1414 | cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); |
| 1414 | cpu->pstate.scaling = pstate_funcs.get_scaling(); | 1415 | cpu->pstate.scaling = pstate_funcs.get_scaling(); |
| 1415 | cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; | 1416 | cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; |
| 1416 | cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; | 1417 | |
| 1418 | if (hwp_active && !hwp_mode_bdw) { | ||
| 1419 | unsigned int phy_max, current_max; | ||
| 1420 | |||
| 1421 | intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max); | ||
| 1422 | cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling; | ||
| 1423 | } else { | ||
| 1424 | cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; | ||
| 1425 | } | ||
| 1417 | 1426 | ||
| 1418 | if (pstate_funcs.get_aperf_mperf_shift) | 1427 | if (pstate_funcs.get_aperf_mperf_shift) |
| 1419 | cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); | 1428 | cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); |
| @@ -2467,28 +2476,36 @@ static inline bool intel_pstate_has_acpi_ppc(void) { return false; } | |||
| 2467 | static inline void intel_pstate_request_control_from_smm(void) {} | 2476 | static inline void intel_pstate_request_control_from_smm(void) {} |
| 2468 | #endif /* CONFIG_ACPI */ | 2477 | #endif /* CONFIG_ACPI */ |
| 2469 | 2478 | ||
| 2479 | #define INTEL_PSTATE_HWP_BROADWELL 0x01 | ||
| 2480 | |||
| 2481 | #define ICPU_HWP(model, hwp_mode) \ | ||
| 2482 | { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode } | ||
| 2483 | |||
| 2470 | static const struct x86_cpu_id hwp_support_ids[] __initconst = { | 2484 | static const struct x86_cpu_id hwp_support_ids[] __initconst = { |
| 2471 | { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, | 2485 | ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), |
| 2486 | ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL), | ||
| 2487 | ICPU_HWP(X86_MODEL_ANY, 0), | ||
| 2472 | {} | 2488 | {} |
| 2473 | }; | 2489 | }; |
| 2474 | 2490 | ||
| 2475 | static int __init intel_pstate_init(void) | 2491 | static int __init intel_pstate_init(void) |
| 2476 | { | 2492 | { |
| 2493 | const struct x86_cpu_id *id; | ||
| 2477 | int rc; | 2494 | int rc; |
| 2478 | 2495 | ||
| 2479 | if (no_load) | 2496 | if (no_load) |
| 2480 | return -ENODEV; | 2497 | return -ENODEV; |
| 2481 | 2498 | ||
| 2482 | if (x86_match_cpu(hwp_support_ids)) { | 2499 | id = x86_match_cpu(hwp_support_ids); |
| 2500 | if (id) { | ||
| 2483 | copy_cpu_funcs(&core_funcs); | 2501 | copy_cpu_funcs(&core_funcs); |
| 2484 | if (!no_hwp) { | 2502 | if (!no_hwp) { |
| 2485 | hwp_active++; | 2503 | hwp_active++; |
| 2504 | hwp_mode_bdw = id->driver_data; | ||
| 2486 | intel_pstate.attr = hwp_cpufreq_attrs; | 2505 | intel_pstate.attr = hwp_cpufreq_attrs; |
| 2487 | goto hwp_cpu_matched; | 2506 | goto hwp_cpu_matched; |
| 2488 | } | 2507 | } |
| 2489 | } else { | 2508 | } else { |
| 2490 | const struct x86_cpu_id *id; | ||
| 2491 | |||
| 2492 | id = x86_match_cpu(intel_pstate_cpu_ids); | 2509 | id = x86_match_cpu(intel_pstate_cpu_ids); |
| 2493 | if (!id) | 2510 | if (!id) |
| 2494 | return -ENODEV; | 2511 | return -ENODEV; |
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c index d049fe4b80c4..01bddacf5c3b 100644 --- a/drivers/cpufreq/qcom-cpufreq-kryo.c +++ b/drivers/cpufreq/qcom-cpufreq-kryo.c | |||
| @@ -42,6 +42,8 @@ enum _msm8996_version { | |||
| 42 | NUM_OF_MSM8996_VERSIONS, | 42 | NUM_OF_MSM8996_VERSIONS, |
| 43 | }; | 43 | }; |
| 44 | 44 | ||
| 45 | struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; | ||
| 46 | |||
| 45 | static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void) | 47 | static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void) |
| 46 | { | 48 | { |
| 47 | size_t len; | 49 | size_t len; |
| @@ -74,7 +76,6 @@ static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void) | |||
| 74 | static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) | 76 | static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) |
| 75 | { | 77 | { |
| 76 | struct opp_table *opp_tables[NR_CPUS] = {0}; | 78 | struct opp_table *opp_tables[NR_CPUS] = {0}; |
| 77 | struct platform_device *cpufreq_dt_pdev; | ||
| 78 | enum _msm8996_version msm8996_version; | 79 | enum _msm8996_version msm8996_version; |
| 79 | struct nvmem_cell *speedbin_nvmem; | 80 | struct nvmem_cell *speedbin_nvmem; |
| 80 | struct device_node *np; | 81 | struct device_node *np; |
| @@ -115,6 +116,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) | |||
| 115 | 116 | ||
| 116 | speedbin = nvmem_cell_read(speedbin_nvmem, &len); | 117 | speedbin = nvmem_cell_read(speedbin_nvmem, &len); |
| 117 | nvmem_cell_put(speedbin_nvmem); | 118 | nvmem_cell_put(speedbin_nvmem); |
| 119 | if (IS_ERR(speedbin)) | ||
| 120 | return PTR_ERR(speedbin); | ||
| 118 | 121 | ||
| 119 | switch (msm8996_version) { | 122 | switch (msm8996_version) { |
| 120 | case MSM8996_V3: | 123 | case MSM8996_V3: |
| @@ -127,6 +130,7 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) | |||
| 127 | BUG(); | 130 | BUG(); |
| 128 | break; | 131 | break; |
| 129 | } | 132 | } |
| 133 | kfree(speedbin); | ||
| 130 | 134 | ||
| 131 | for_each_possible_cpu(cpu) { | 135 | for_each_possible_cpu(cpu) { |
| 132 | cpu_dev = get_cpu_device(cpu); | 136 | cpu_dev = get_cpu_device(cpu); |
| @@ -162,8 +166,15 @@ free_opp: | |||
| 162 | return ret; | 166 | return ret; |
| 163 | } | 167 | } |
| 164 | 168 | ||
| 169 | static int qcom_cpufreq_kryo_remove(struct platform_device *pdev) | ||
| 170 | { | ||
| 171 | platform_device_unregister(cpufreq_dt_pdev); | ||
| 172 | return 0; | ||
| 173 | } | ||
| 174 | |||
| 165 | static struct platform_driver qcom_cpufreq_kryo_driver = { | 175 | static struct platform_driver qcom_cpufreq_kryo_driver = { |
| 166 | .probe = qcom_cpufreq_kryo_probe, | 176 | .probe = qcom_cpufreq_kryo_probe, |
| 177 | .remove = qcom_cpufreq_kryo_remove, | ||
| 167 | .driver = { | 178 | .driver = { |
| 168 | .name = "qcom-cpufreq-kryo", | 179 | .name = "qcom-cpufreq-kryo", |
| 169 | }, | 180 | }, |
| @@ -198,8 +209,9 @@ static int __init qcom_cpufreq_kryo_init(void) | |||
| 198 | if (unlikely(ret < 0)) | 209 | if (unlikely(ret < 0)) |
| 199 | return ret; | 210 | return ret; |
| 200 | 211 | ||
| 201 | ret = PTR_ERR_OR_ZERO(platform_device_register_simple( | 212 | kryo_cpufreq_pdev = platform_device_register_simple( |
| 202 | "qcom-cpufreq-kryo", -1, NULL, 0)); | 213 | "qcom-cpufreq-kryo", -1, NULL, 0); |
| 214 | ret = PTR_ERR_OR_ZERO(kryo_cpufreq_pdev); | ||
| 203 | if (0 == ret) | 215 | if (0 == ret) |
| 204 | return 0; | 216 | return 0; |
| 205 | 217 | ||
| @@ -208,5 +220,12 @@ static int __init qcom_cpufreq_kryo_init(void) | |||
| 208 | } | 220 | } |
| 209 | module_init(qcom_cpufreq_kryo_init); | 221 | module_init(qcom_cpufreq_kryo_init); |
| 210 | 222 | ||
| 223 | static void __init qcom_cpufreq_kryo_exit(void) | ||
| 224 | { | ||
| 225 | platform_device_unregister(kryo_cpufreq_pdev); | ||
| 226 | platform_driver_unregister(&qcom_cpufreq_kryo_driver); | ||
| 227 | } | ||
| 228 | module_exit(qcom_cpufreq_kryo_exit); | ||
| 229 | |||
| 211 | MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver"); | 230 | MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver"); |
| 212 | MODULE_LICENSE("GPL v2"); | 231 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 00c7aab8e7d0..afebbd87c4aa 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c | |||
| @@ -1548,15 +1548,14 @@ skip_copy: | |||
| 1548 | tp->urg_data = 0; | 1548 | tp->urg_data = 0; |
| 1549 | 1549 | ||
| 1550 | if ((avail + offset) >= skb->len) { | 1550 | if ((avail + offset) >= skb->len) { |
| 1551 | if (likely(skb)) | ||
| 1552 | chtls_free_skb(sk, skb); | ||
| 1553 | buffers_freed++; | ||
| 1554 | if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) { | 1551 | if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) { |
| 1555 | tp->copied_seq += skb->len; | 1552 | tp->copied_seq += skb->len; |
| 1556 | hws->rcvpld = skb->hdr_len; | 1553 | hws->rcvpld = skb->hdr_len; |
| 1557 | } else { | 1554 | } else { |
| 1558 | tp->copied_seq += hws->rcvpld; | 1555 | tp->copied_seq += hws->rcvpld; |
| 1559 | } | 1556 | } |
| 1557 | chtls_free_skb(sk, skb); | ||
| 1558 | buffers_freed++; | ||
| 1560 | hws->copied_seq = 0; | 1559 | hws->copied_seq = 0; |
| 1561 | if (copied >= target && | 1560 | if (copied >= target && |
| 1562 | !skb_peek(&sk->sk_receive_queue)) | 1561 | !skb_peek(&sk->sk_receive_queue)) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3317d1536f4f..6e5284e6c028 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
| @@ -2158,10 +2158,18 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) | |||
| 2158 | switch (asic_type) { | 2158 | switch (asic_type) { |
| 2159 | #if defined(CONFIG_DRM_AMD_DC) | 2159 | #if defined(CONFIG_DRM_AMD_DC) |
| 2160 | case CHIP_BONAIRE: | 2160 | case CHIP_BONAIRE: |
| 2161 | case CHIP_HAWAII: | ||
| 2162 | case CHIP_KAVERI: | 2161 | case CHIP_KAVERI: |
| 2163 | case CHIP_KABINI: | 2162 | case CHIP_KABINI: |
| 2164 | case CHIP_MULLINS: | 2163 | case CHIP_MULLINS: |
| 2164 | /* | ||
| 2165 | * We have systems in the wild with these ASICs that require | ||
| 2166 | * LVDS and VGA support which is not supported with DC. | ||
| 2167 | * | ||
| 2168 | * Fallback to the non-DC driver here by default so as not to | ||
| 2169 | * cause regressions. | ||
| 2170 | */ | ||
| 2171 | return amdgpu_dc > 0; | ||
| 2172 | case CHIP_HAWAII: | ||
| 2165 | case CHIP_CARRIZO: | 2173 | case CHIP_CARRIZO: |
| 2166 | case CHIP_STONEY: | 2174 | case CHIP_STONEY: |
| 2167 | case CHIP_POLARIS10: | 2175 | case CHIP_POLARIS10: |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 5e4e1bd90383..3526efa8960e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
| @@ -762,8 +762,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, | |||
| 762 | domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); | 762 | domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); |
| 763 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) { | 763 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) { |
| 764 | adev->vram_pin_size += amdgpu_bo_size(bo); | 764 | adev->vram_pin_size += amdgpu_bo_size(bo); |
| 765 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) | 765 | adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo); |
| 766 | adev->invisible_pin_size += amdgpu_bo_size(bo); | ||
| 767 | } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { | 766 | } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { |
| 768 | adev->gart_pin_size += amdgpu_bo_size(bo); | 767 | adev->gart_pin_size += amdgpu_bo_size(bo); |
| 769 | } | 768 | } |
| @@ -790,25 +789,22 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) | |||
| 790 | bo->pin_count--; | 789 | bo->pin_count--; |
| 791 | if (bo->pin_count) | 790 | if (bo->pin_count) |
| 792 | return 0; | 791 | return 0; |
| 793 | for (i = 0; i < bo->placement.num_placement; i++) { | ||
| 794 | bo->placements[i].lpfn = 0; | ||
| 795 | bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; | ||
| 796 | } | ||
| 797 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); | ||
| 798 | if (unlikely(r)) { | ||
| 799 | dev_err(adev->dev, "%p validate failed for unpin\n", bo); | ||
| 800 | goto error; | ||
| 801 | } | ||
| 802 | 792 | ||
| 803 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { | 793 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { |
| 804 | adev->vram_pin_size -= amdgpu_bo_size(bo); | 794 | adev->vram_pin_size -= amdgpu_bo_size(bo); |
| 805 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) | 795 | adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo); |
| 806 | adev->invisible_pin_size -= amdgpu_bo_size(bo); | ||
| 807 | } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { | 796 | } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { |
| 808 | adev->gart_pin_size -= amdgpu_bo_size(bo); | 797 | adev->gart_pin_size -= amdgpu_bo_size(bo); |
| 809 | } | 798 | } |
| 810 | 799 | ||
| 811 | error: | 800 | for (i = 0; i < bo->placement.num_placement; i++) { |
| 801 | bo->placements[i].lpfn = 0; | ||
| 802 | bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; | ||
| 803 | } | ||
| 804 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); | ||
| 805 | if (unlikely(r)) | ||
| 806 | dev_err(adev->dev, "%p validate failed for unpin\n", bo); | ||
| 807 | |||
| 812 | return r; | 808 | return r; |
| 813 | } | 809 | } |
| 814 | 810 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index e969c879d87e..e5da4654b630 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | |||
| @@ -73,6 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem); | |||
| 73 | uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); | 73 | uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); |
| 74 | int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); | 74 | int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); |
| 75 | 75 | ||
| 76 | u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo); | ||
| 76 | uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); | 77 | uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); |
| 77 | uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); | 78 | uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); |
| 78 | 79 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index bcf68f80bbf0..3ff08e326838 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
| @@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) | |||
| 130 | unsigned version_major, version_minor, family_id; | 130 | unsigned version_major, version_minor, family_id; |
| 131 | int i, j, r; | 131 | int i, j, r; |
| 132 | 132 | ||
| 133 | INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); | 133 | INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); |
| 134 | 134 | ||
| 135 | switch (adev->asic_type) { | 135 | switch (adev->asic_type) { |
| 136 | #ifdef CONFIG_DRM_AMDGPU_CIK | 136 | #ifdef CONFIG_DRM_AMDGPU_CIK |
| @@ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) | |||
| 314 | void *ptr; | 314 | void *ptr; |
| 315 | int i, j; | 315 | int i, j; |
| 316 | 316 | ||
| 317 | cancel_delayed_work_sync(&adev->uvd.idle_work); | ||
| 318 | |||
| 317 | for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { | 319 | for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { |
| 318 | if (adev->uvd.inst[j].vcpu_bo == NULL) | 320 | if (adev->uvd.inst[j].vcpu_bo == NULL) |
| 319 | continue; | 321 | continue; |
| 320 | 322 | ||
| 321 | cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work); | ||
| 322 | |||
| 323 | /* only valid for physical mode */ | 323 | /* only valid for physical mode */ |
| 324 | if (adev->asic_type < CHIP_POLARIS10) { | 324 | if (adev->asic_type < CHIP_POLARIS10) { |
| 325 | for (i = 0; i < adev->uvd.max_handles; ++i) | 325 | for (i = 0; i < adev->uvd.max_handles; ++i) |
| @@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
| 1145 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work) | 1145 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work) |
| 1146 | { | 1146 | { |
| 1147 | struct amdgpu_device *adev = | 1147 | struct amdgpu_device *adev = |
| 1148 | container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); | 1148 | container_of(work, struct amdgpu_device, uvd.idle_work.work); |
| 1149 | unsigned fences = 0, i, j; | 1149 | unsigned fences = 0, i, j; |
| 1150 | 1150 | ||
| 1151 | for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { | 1151 | for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { |
| @@ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) | |||
| 1167 | AMD_CG_STATE_GATE); | 1167 | AMD_CG_STATE_GATE); |
| 1168 | } | 1168 | } |
| 1169 | } else { | 1169 | } else { |
| 1170 | schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); | 1170 | schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); |
| 1171 | } | 1171 | } |
| 1172 | } | 1172 | } |
| 1173 | 1173 | ||
| @@ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) | |||
| 1179 | if (amdgpu_sriov_vf(adev)) | 1179 | if (amdgpu_sriov_vf(adev)) |
| 1180 | return; | 1180 | return; |
| 1181 | 1181 | ||
| 1182 | set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work); | 1182 | set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); |
| 1183 | if (set_clocks) { | 1183 | if (set_clocks) { |
| 1184 | if (adev->pm.dpm_enabled) { | 1184 | if (adev->pm.dpm_enabled) { |
| 1185 | amdgpu_dpm_enable_uvd(adev, true); | 1185 | amdgpu_dpm_enable_uvd(adev, true); |
| @@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) | |||
| 1196 | void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) | 1196 | void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) |
| 1197 | { | 1197 | { |
| 1198 | if (!amdgpu_sriov_vf(ring->adev)) | 1198 | if (!amdgpu_sriov_vf(ring->adev)) |
| 1199 | schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); | 1199 | schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); |
| 1200 | } | 1200 | } |
| 1201 | 1201 | ||
| 1202 | /** | 1202 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index b1579fba134c..8b23a1b00c76 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | |||
| @@ -44,7 +44,6 @@ struct amdgpu_uvd_inst { | |||
| 44 | void *saved_bo; | 44 | void *saved_bo; |
| 45 | atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; | 45 | atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; |
| 46 | struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; | 46 | struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; |
| 47 | struct delayed_work idle_work; | ||
| 48 | struct amdgpu_ring ring; | 47 | struct amdgpu_ring ring; |
| 49 | struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; | 48 | struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; |
| 50 | struct amdgpu_irq_src irq; | 49 | struct amdgpu_irq_src irq; |
| @@ -62,6 +61,7 @@ struct amdgpu_uvd { | |||
| 62 | bool address_64_bit; | 61 | bool address_64_bit; |
| 63 | bool use_ctx_buf; | 62 | bool use_ctx_buf; |
| 64 | struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; | 63 | struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; |
| 64 | struct delayed_work idle_work; | ||
| 65 | }; | 65 | }; |
| 66 | 66 | ||
| 67 | int amdgpu_uvd_sw_init(struct amdgpu_device *adev); | 67 | int amdgpu_uvd_sw_init(struct amdgpu_device *adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 9aca653bec07..b6333f92ba45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | |||
| @@ -97,6 +97,38 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, | |||
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | /** | 99 | /** |
| 100 | * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size | ||
| 101 | * | ||
| 102 | * @bo: &amdgpu_bo buffer object (must be in VRAM) | ||
| 103 | * | ||
| 104 | * Returns: | ||
| 105 | * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM. | ||
| 106 | */ | ||
| 107 | u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo) | ||
| 108 | { | ||
| 109 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | ||
| 110 | struct ttm_mem_reg *mem = &bo->tbo.mem; | ||
| 111 | struct drm_mm_node *nodes = mem->mm_node; | ||
| 112 | unsigned pages = mem->num_pages; | ||
| 113 | u64 usage = 0; | ||
| 114 | |||
| 115 | if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size) | ||
| 116 | return 0; | ||
| 117 | |||
| 118 | if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) | ||
| 119 | return amdgpu_bo_size(bo); | ||
| 120 | |||
| 121 | while (nodes && pages) { | ||
| 122 | usage += nodes->size << PAGE_SHIFT; | ||
| 123 | usage -= amdgpu_vram_mgr_vis_size(adev, nodes); | ||
| 124 | pages -= nodes->size; | ||
| 125 | ++nodes; | ||
| 126 | } | ||
| 127 | |||
| 128 | return usage; | ||
| 129 | } | ||
| 130 | |||
| 131 | /** | ||
| 100 | * amdgpu_vram_mgr_new - allocate new ranges | 132 | * amdgpu_vram_mgr_new - allocate new ranges |
| 101 | * | 133 | * |
| 102 | * @man: TTM memory type manager | 134 | * @man: TTM memory type manager |
| @@ -135,7 +167,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, | |||
| 135 | num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); | 167 | num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); |
| 136 | } | 168 | } |
| 137 | 169 | ||
| 138 | nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL); | 170 | nodes = kvmalloc_array(num_nodes, sizeof(*nodes), |
| 171 | GFP_KERNEL | __GFP_ZERO); | ||
| 139 | if (!nodes) | 172 | if (!nodes) |
| 140 | return -ENOMEM; | 173 | return -ENOMEM; |
| 141 | 174 | ||
| @@ -190,7 +223,7 @@ error: | |||
| 190 | drm_mm_remove_node(&nodes[i]); | 223 | drm_mm_remove_node(&nodes[i]); |
| 191 | spin_unlock(&mgr->lock); | 224 | spin_unlock(&mgr->lock); |
| 192 | 225 | ||
| 193 | kfree(nodes); | 226 | kvfree(nodes); |
| 194 | return r == -ENOSPC ? 0 : r; | 227 | return r == -ENOSPC ? 0 : r; |
| 195 | } | 228 | } |
| 196 | 229 | ||
| @@ -229,7 +262,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, | |||
| 229 | atomic64_sub(usage, &mgr->usage); | 262 | atomic64_sub(usage, &mgr->usage); |
| 230 | atomic64_sub(vis_usage, &mgr->vis_usage); | 263 | atomic64_sub(vis_usage, &mgr->vis_usage); |
| 231 | 264 | ||
| 232 | kfree(mem->mm_node); | 265 | kvfree(mem->mm_node); |
| 233 | mem->mm_node = NULL; | 266 | mem->mm_node = NULL; |
| 234 | } | 267 | } |
| 235 | 268 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index dbe4b1f66784..22364875a943 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c | |||
| @@ -1090,7 +1090,7 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr) | |||
| 1090 | static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) | 1090 | static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) |
| 1091 | { | 1091 | { |
| 1092 | struct amdgpu_device *adev = hwmgr->adev; | 1092 | struct amdgpu_device *adev = hwmgr->adev; |
| 1093 | int result; | 1093 | int result = 0; |
| 1094 | uint32_t num_se = 0; | 1094 | uint32_t num_se = 0; |
| 1095 | uint32_t count, data; | 1095 | uint32_t count, data; |
| 1096 | 1096 | ||
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 73c875db45f4..47e0992f3908 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c | |||
| @@ -839,7 +839,7 @@ static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane) | |||
| 839 | return ret; | 839 | return ret; |
| 840 | } | 840 | } |
| 841 | 841 | ||
| 842 | if (desc->layout.xstride && desc->layout.pstride) { | 842 | if (desc->layout.xstride[0] && desc->layout.pstride[0]) { |
| 843 | int ret; | 843 | int ret; |
| 844 | 844 | ||
| 845 | ret = drm_plane_create_rotation_property(&plane->base, | 845 | ret = drm_plane_create_rotation_property(&plane->base, |
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 7ab36042a822..250effa0e6b8 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c | |||
| @@ -36,8 +36,11 @@ | |||
| 36 | 36 | ||
| 37 | #define SII8620_BURST_BUF_LEN 288 | 37 | #define SII8620_BURST_BUF_LEN 288 |
| 38 | #define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3) | 38 | #define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3) |
| 39 | #define MHL1_MAX_LCLK 225000 | 39 | |
| 40 | #define MHL3_MAX_LCLK 600000 | 40 | #define MHL1_MAX_PCLK 75000 |
| 41 | #define MHL1_MAX_PCLK_PP_MODE 150000 | ||
| 42 | #define MHL3_MAX_PCLK 200000 | ||
| 43 | #define MHL3_MAX_PCLK_PP_MODE 300000 | ||
| 41 | 44 | ||
| 42 | enum sii8620_mode { | 45 | enum sii8620_mode { |
| 43 | CM_DISCONNECTED, | 46 | CM_DISCONNECTED, |
| @@ -80,6 +83,9 @@ struct sii8620 { | |||
| 80 | u8 devcap[MHL_DCAP_SIZE]; | 83 | u8 devcap[MHL_DCAP_SIZE]; |
| 81 | u8 xdevcap[MHL_XDC_SIZE]; | 84 | u8 xdevcap[MHL_XDC_SIZE]; |
| 82 | u8 avif[HDMI_INFOFRAME_SIZE(AVI)]; | 85 | u8 avif[HDMI_INFOFRAME_SIZE(AVI)]; |
| 86 | bool feature_complete; | ||
| 87 | bool devcap_read; | ||
| 88 | bool sink_detected; | ||
| 83 | struct edid *edid; | 89 | struct edid *edid; |
| 84 | unsigned int gen2_write_burst:1; | 90 | unsigned int gen2_write_burst:1; |
| 85 | enum sii8620_mt_state mt_state; | 91 | enum sii8620_mt_state mt_state; |
| @@ -476,7 +482,7 @@ static void sii8620_update_array(u8 *dst, u8 *src, int count) | |||
| 476 | } | 482 | } |
| 477 | } | 483 | } |
| 478 | 484 | ||
| 479 | static void sii8620_sink_detected(struct sii8620 *ctx, int ret) | 485 | static void sii8620_identify_sink(struct sii8620 *ctx) |
| 480 | { | 486 | { |
| 481 | static const char * const sink_str[] = { | 487 | static const char * const sink_str[] = { |
| 482 | [SINK_NONE] = "NONE", | 488 | [SINK_NONE] = "NONE", |
| @@ -487,7 +493,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret) | |||
| 487 | char sink_name[20]; | 493 | char sink_name[20]; |
| 488 | struct device *dev = ctx->dev; | 494 | struct device *dev = ctx->dev; |
| 489 | 495 | ||
| 490 | if (ret < 0) | 496 | if (!ctx->sink_detected || !ctx->devcap_read) |
| 491 | return; | 497 | return; |
| 492 | 498 | ||
| 493 | sii8620_fetch_edid(ctx); | 499 | sii8620_fetch_edid(ctx); |
| @@ -496,6 +502,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret) | |||
| 496 | sii8620_mhl_disconnected(ctx); | 502 | sii8620_mhl_disconnected(ctx); |
| 497 | return; | 503 | return; |
| 498 | } | 504 | } |
| 505 | sii8620_set_upstream_edid(ctx); | ||
| 499 | 506 | ||
| 500 | if (drm_detect_hdmi_monitor(ctx->edid)) | 507 | if (drm_detect_hdmi_monitor(ctx->edid)) |
| 501 | ctx->sink_type = SINK_HDMI; | 508 | ctx->sink_type = SINK_HDMI; |
| @@ -508,53 +515,6 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret) | |||
| 508 | sink_str[ctx->sink_type], sink_name); | 515 | sink_str[ctx->sink_type], sink_name); |
| 509 | } | 516 | } |
| 510 | 517 | ||
| 511 | static void sii8620_hsic_init(struct sii8620 *ctx) | ||
| 512 | { | ||
| 513 | if (!sii8620_is_mhl3(ctx)) | ||
| 514 | return; | ||
| 515 | |||
| 516 | sii8620_write(ctx, REG_FCGC, | ||
| 517 | BIT_FCGC_HSIC_HOSTMODE | BIT_FCGC_HSIC_ENABLE); | ||
| 518 | sii8620_setbits(ctx, REG_HRXCTRL3, | ||
| 519 | BIT_HRXCTRL3_HRX_STAY_RESET | BIT_HRXCTRL3_STATUS_EN, ~0); | ||
| 520 | sii8620_setbits(ctx, REG_TTXNUMB, MSK_TTXNUMB_TTX_NUMBPS, 4); | ||
| 521 | sii8620_setbits(ctx, REG_TRXCTRL, BIT_TRXCTRL_TRX_FROM_SE_COC, ~0); | ||
| 522 | sii8620_setbits(ctx, REG_HTXCTRL, BIT_HTXCTRL_HTX_DRVCONN1, 0); | ||
| 523 | sii8620_setbits(ctx, REG_KEEPER, MSK_KEEPER_MODE, VAL_KEEPER_MODE_HOST); | ||
| 524 | sii8620_write_seq_static(ctx, | ||
| 525 | REG_TDMLLCTL, 0, | ||
| 526 | REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST | | ||
| 527 | BIT_UTSRST_KEEPER_SRST | BIT_UTSRST_FC_SRST, | ||
| 528 | REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST, | ||
| 529 | REG_HRXINTL, 0xff, | ||
| 530 | REG_HRXINTH, 0xff, | ||
| 531 | REG_TTXINTL, 0xff, | ||
| 532 | REG_TTXINTH, 0xff, | ||
| 533 | REG_TRXINTL, 0xff, | ||
| 534 | REG_TRXINTH, 0xff, | ||
| 535 | REG_HTXINTL, 0xff, | ||
| 536 | REG_HTXINTH, 0xff, | ||
| 537 | REG_FCINTR0, 0xff, | ||
| 538 | REG_FCINTR1, 0xff, | ||
| 539 | REG_FCINTR2, 0xff, | ||
| 540 | REG_FCINTR3, 0xff, | ||
| 541 | REG_FCINTR4, 0xff, | ||
| 542 | REG_FCINTR5, 0xff, | ||
| 543 | REG_FCINTR6, 0xff, | ||
| 544 | REG_FCINTR7, 0xff | ||
| 545 | ); | ||
| 546 | } | ||
| 547 | |||
| 548 | static void sii8620_edid_read(struct sii8620 *ctx, int ret) | ||
| 549 | { | ||
| 550 | if (ret < 0) | ||
| 551 | return; | ||
| 552 | |||
| 553 | sii8620_set_upstream_edid(ctx); | ||
| 554 | sii8620_hsic_init(ctx); | ||
| 555 | sii8620_enable_hpd(ctx); | ||
| 556 | } | ||
| 557 | |||
| 558 | static void sii8620_mr_devcap(struct sii8620 *ctx) | 518 | static void sii8620_mr_devcap(struct sii8620 *ctx) |
| 559 | { | 519 | { |
| 560 | u8 dcap[MHL_DCAP_SIZE]; | 520 | u8 dcap[MHL_DCAP_SIZE]; |
| @@ -570,6 +530,8 @@ static void sii8620_mr_devcap(struct sii8620 *ctx) | |||
| 570 | dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L], | 530 | dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L], |
| 571 | dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]); | 531 | dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]); |
| 572 | sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE); | 532 | sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE); |
| 533 | ctx->devcap_read = true; | ||
| 534 | sii8620_identify_sink(ctx); | ||
| 573 | } | 535 | } |
| 574 | 536 | ||
| 575 | static void sii8620_mr_xdevcap(struct sii8620 *ctx) | 537 | static void sii8620_mr_xdevcap(struct sii8620 *ctx) |
| @@ -807,6 +769,7 @@ static void sii8620_burst_rx_all(struct sii8620 *ctx) | |||
| 807 | static void sii8620_fetch_edid(struct sii8620 *ctx) | 769 | static void sii8620_fetch_edid(struct sii8620 *ctx) |
| 808 | { | 770 | { |
| 809 | u8 lm_ddc, ddc_cmd, int3, cbus; | 771 | u8 lm_ddc, ddc_cmd, int3, cbus; |
| 772 | unsigned long timeout; | ||
| 810 | int fetched, i; | 773 | int fetched, i; |
| 811 | int edid_len = EDID_LENGTH; | 774 | int edid_len = EDID_LENGTH; |
| 812 | u8 *edid; | 775 | u8 *edid; |
| @@ -856,23 +819,31 @@ static void sii8620_fetch_edid(struct sii8620 *ctx) | |||
| 856 | REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK | 819 | REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK |
| 857 | ); | 820 | ); |
| 858 | 821 | ||
| 859 | do { | 822 | int3 = 0; |
| 860 | int3 = sii8620_readb(ctx, REG_INTR3); | 823 | timeout = jiffies + msecs_to_jiffies(200); |
| 824 | for (;;) { | ||
| 861 | cbus = sii8620_readb(ctx, REG_CBUS_STATUS); | 825 | cbus = sii8620_readb(ctx, REG_CBUS_STATUS); |
| 862 | 826 | if (~cbus & BIT_CBUS_STATUS_CBUS_CONNECTED) { | |
| 863 | if (int3 & BIT_DDC_CMD_DONE) | 827 | kfree(edid); |
| 864 | break; | 828 | edid = NULL; |
| 865 | 829 | goto end; | |
| 866 | if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) { | 830 | } |
| 831 | if (int3 & BIT_DDC_CMD_DONE) { | ||
| 832 | if (sii8620_readb(ctx, REG_DDC_DOUT_CNT) | ||
| 833 | >= FETCH_SIZE) | ||
| 834 | break; | ||
| 835 | } else { | ||
| 836 | int3 = sii8620_readb(ctx, REG_INTR3); | ||
| 837 | } | ||
| 838 | if (time_is_before_jiffies(timeout)) { | ||
| 839 | ctx->error = -ETIMEDOUT; | ||
| 840 | dev_err(ctx->dev, "timeout during EDID read\n"); | ||
| 867 | kfree(edid); | 841 | kfree(edid); |
| 868 | edid = NULL; | 842 | edid = NULL; |
| 869 | goto end; | 843 | goto end; |
| 870 | } | 844 | } |
| 871 | } while (1); | ||
| 872 | |||
| 873 | sii8620_readb(ctx, REG_DDC_STATUS); | ||
| 874 | while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE) | ||
| 875 | usleep_range(10, 20); | 845 | usleep_range(10, 20); |
| 846 | } | ||
| 876 | 847 | ||
| 877 | sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE); | 848 | sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE); |
| 878 | if (fetched + FETCH_SIZE == EDID_LENGTH) { | 849 | if (fetched + FETCH_SIZE == EDID_LENGTH) { |
| @@ -971,8 +942,17 @@ static int sii8620_hw_on(struct sii8620 *ctx) | |||
| 971 | ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); | 942 | ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); |
| 972 | if (ret) | 943 | if (ret) |
| 973 | return ret; | 944 | return ret; |
| 945 | |||
| 974 | usleep_range(10000, 20000); | 946 | usleep_range(10000, 20000); |
| 975 | return clk_prepare_enable(ctx->clk_xtal); | 947 | ret = clk_prepare_enable(ctx->clk_xtal); |
| 948 | if (ret) | ||
| 949 | return ret; | ||
| 950 | |||
| 951 | msleep(100); | ||
| 952 | gpiod_set_value(ctx->gpio_reset, 0); | ||
| 953 | msleep(100); | ||
| 954 | |||
| 955 | return 0; | ||
| 976 | } | 956 | } |
| 977 | 957 | ||
| 978 | static int sii8620_hw_off(struct sii8620 *ctx) | 958 | static int sii8620_hw_off(struct sii8620 *ctx) |
| @@ -982,17 +962,6 @@ static int sii8620_hw_off(struct sii8620 *ctx) | |||
| 982 | return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); | 962 | return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); |
| 983 | } | 963 | } |
| 984 | 964 | ||
| 985 | static void sii8620_hw_reset(struct sii8620 *ctx) | ||
| 986 | { | ||
| 987 | usleep_range(10000, 20000); | ||
| 988 | gpiod_set_value(ctx->gpio_reset, 0); | ||
| 989 | usleep_range(5000, 20000); | ||
| 990 | gpiod_set_value(ctx->gpio_reset, 1); | ||
| 991 | usleep_range(10000, 20000); | ||
| 992 | gpiod_set_value(ctx->gpio_reset, 0); | ||
| 993 | msleep(300); | ||
| 994 | } | ||
| 995 | |||
| 996 | static void sii8620_cbus_reset(struct sii8620 *ctx) | 965 | static void sii8620_cbus_reset(struct sii8620 *ctx) |
| 997 | { | 966 | { |
| 998 | sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST | 967 | sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST |
| @@ -1048,20 +1017,11 @@ static void sii8620_stop_video(struct sii8620 *ctx) | |||
| 1048 | 1017 | ||
| 1049 | static void sii8620_set_format(struct sii8620 *ctx) | 1018 | static void sii8620_set_format(struct sii8620 *ctx) |
| 1050 | { | 1019 | { |
| 1051 | u8 out_fmt; | ||
| 1052 | |||
| 1053 | if (sii8620_is_mhl3(ctx)) { | 1020 | if (sii8620_is_mhl3(ctx)) { |
| 1054 | sii8620_setbits(ctx, REG_M3_P0CTRL, | 1021 | sii8620_setbits(ctx, REG_M3_P0CTRL, |
| 1055 | BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED, | 1022 | BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED, |
| 1056 | ctx->use_packed_pixel ? ~0 : 0); | 1023 | ctx->use_packed_pixel ? ~0 : 0); |
| 1057 | } else { | 1024 | } else { |
| 1058 | if (ctx->use_packed_pixel) | ||
| 1059 | sii8620_write_seq_static(ctx, | ||
| 1060 | REG_VID_MODE, BIT_VID_MODE_M1080P, | ||
| 1061 | REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1, | ||
| 1062 | REG_MHLTX_CTL6, 0x60 | ||
| 1063 | ); | ||
| 1064 | else | ||
| 1065 | sii8620_write_seq_static(ctx, | 1025 | sii8620_write_seq_static(ctx, |
| 1066 | REG_VID_MODE, 0, | 1026 | REG_VID_MODE, 0, |
| 1067 | REG_MHL_TOP_CTL, 1, | 1027 | REG_MHL_TOP_CTL, 1, |
| @@ -1069,15 +1029,9 @@ static void sii8620_set_format(struct sii8620 *ctx) | |||
| 1069 | ); | 1029 | ); |
| 1070 | } | 1030 | } |
| 1071 | 1031 | ||
| 1072 | if (ctx->use_packed_pixel) | ||
| 1073 | out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) | | ||
| 1074 | BIT_TPI_OUTPUT_CSCMODE709; | ||
| 1075 | else | ||
| 1076 | out_fmt = VAL_TPI_FORMAT(RGB, FULL); | ||
| 1077 | |||
| 1078 | sii8620_write_seq(ctx, | 1032 | sii8620_write_seq(ctx, |
| 1079 | REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL), | 1033 | REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL), |
| 1080 | REG_TPI_OUTPUT, out_fmt, | 1034 | REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL), |
| 1081 | ); | 1035 | ); |
| 1082 | } | 1036 | } |
| 1083 | 1037 | ||
| @@ -1216,7 +1170,7 @@ static void sii8620_start_video(struct sii8620 *ctx) | |||
| 1216 | int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3); | 1170 | int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3); |
| 1217 | int i; | 1171 | int i; |
| 1218 | 1172 | ||
| 1219 | for (i = 0; i < ARRAY_SIZE(clk_spec); ++i) | 1173 | for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i) |
| 1220 | if (clk < clk_spec[i].max_clk) | 1174 | if (clk < clk_spec[i].max_clk) |
| 1221 | break; | 1175 | break; |
| 1222 | 1176 | ||
| @@ -1534,6 +1488,16 @@ static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode) | |||
| 1534 | ); | 1488 | ); |
| 1535 | } | 1489 | } |
| 1536 | 1490 | ||
| 1491 | static void sii8620_hpd_unplugged(struct sii8620 *ctx) | ||
| 1492 | { | ||
| 1493 | sii8620_disable_hpd(ctx); | ||
| 1494 | ctx->sink_type = SINK_NONE; | ||
| 1495 | ctx->sink_detected = false; | ||
| 1496 | ctx->feature_complete = false; | ||
| 1497 | kfree(ctx->edid); | ||
| 1498 | ctx->edid = NULL; | ||
| 1499 | } | ||
| 1500 | |||
| 1537 | static void sii8620_disconnect(struct sii8620 *ctx) | 1501 | static void sii8620_disconnect(struct sii8620 *ctx) |
| 1538 | { | 1502 | { |
| 1539 | sii8620_disable_gen2_write_burst(ctx); | 1503 | sii8620_disable_gen2_write_burst(ctx); |
| @@ -1561,7 +1525,7 @@ static void sii8620_disconnect(struct sii8620 *ctx) | |||
| 1561 | REG_MHL_DP_CTL6, 0x2A, | 1525 | REG_MHL_DP_CTL6, 0x2A, |
| 1562 | REG_MHL_DP_CTL7, 0x03 | 1526 | REG_MHL_DP_CTL7, 0x03 |
| 1563 | ); | 1527 | ); |
| 1564 | sii8620_disable_hpd(ctx); | 1528 | sii8620_hpd_unplugged(ctx); |
| 1565 | sii8620_write_seq_static(ctx, | 1529 | sii8620_write_seq_static(ctx, |
| 1566 | REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, | 1530 | REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, |
| 1567 | REG_MHL_COC_CTL1, 0x07, | 1531 | REG_MHL_COC_CTL1, 0x07, |
| @@ -1609,10 +1573,8 @@ static void sii8620_disconnect(struct sii8620 *ctx) | |||
| 1609 | memset(ctx->xstat, 0, sizeof(ctx->xstat)); | 1573 | memset(ctx->xstat, 0, sizeof(ctx->xstat)); |
| 1610 | memset(ctx->devcap, 0, sizeof(ctx->devcap)); | 1574 | memset(ctx->devcap, 0, sizeof(ctx->devcap)); |
| 1611 | memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap)); | 1575 | memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap)); |
| 1576 | ctx->devcap_read = false; | ||
| 1612 | ctx->cbus_status = 0; | 1577 | ctx->cbus_status = 0; |
| 1613 | ctx->sink_type = SINK_NONE; | ||
| 1614 | kfree(ctx->edid); | ||
| 1615 | ctx->edid = NULL; | ||
| 1616 | sii8620_mt_cleanup(ctx); | 1578 | sii8620_mt_cleanup(ctx); |
| 1617 | } | 1579 | } |
| 1618 | 1580 | ||
| @@ -1703,9 +1665,6 @@ static void sii8620_status_changed_path(struct sii8620 *ctx) | |||
| 1703 | sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), | 1665 | sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), |
| 1704 | MHL_DST_LM_CLK_MODE_NORMAL | 1666 | MHL_DST_LM_CLK_MODE_NORMAL |
| 1705 | | MHL_DST_LM_PATH_ENABLED); | 1667 | | MHL_DST_LM_PATH_ENABLED); |
| 1706 | if (!sii8620_is_mhl3(ctx)) | ||
| 1707 | sii8620_mt_read_devcap(ctx, false); | ||
| 1708 | sii8620_mt_set_cont(ctx, sii8620_sink_detected); | ||
| 1709 | } else { | 1668 | } else { |
| 1710 | sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), | 1669 | sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), |
| 1711 | MHL_DST_LM_CLK_MODE_NORMAL); | 1670 | MHL_DST_LM_CLK_MODE_NORMAL); |
| @@ -1722,9 +1681,14 @@ static void sii8620_msc_mr_write_stat(struct sii8620 *ctx) | |||
| 1722 | sii8620_update_array(ctx->stat, st, MHL_DST_SIZE); | 1681 | sii8620_update_array(ctx->stat, st, MHL_DST_SIZE); |
| 1723 | sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE); | 1682 | sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE); |
| 1724 | 1683 | ||
| 1725 | if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) | 1684 | if (ctx->stat[MHL_DST_CONNECTED_RDY] & st[MHL_DST_CONNECTED_RDY] & |
| 1685 | MHL_DST_CONN_DCAP_RDY) { | ||
| 1726 | sii8620_status_dcap_ready(ctx); | 1686 | sii8620_status_dcap_ready(ctx); |
| 1727 | 1687 | ||
| 1688 | if (!sii8620_is_mhl3(ctx)) | ||
| 1689 | sii8620_mt_read_devcap(ctx, false); | ||
| 1690 | } | ||
| 1691 | |||
| 1728 | if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) | 1692 | if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) |
| 1729 | sii8620_status_changed_path(ctx); | 1693 | sii8620_status_changed_path(ctx); |
| 1730 | } | 1694 | } |
| @@ -1808,8 +1772,11 @@ static void sii8620_msc_mr_set_int(struct sii8620 *ctx) | |||
| 1808 | } | 1772 | } |
| 1809 | if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ) | 1773 | if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ) |
| 1810 | sii8620_send_features(ctx); | 1774 | sii8620_send_features(ctx); |
| 1811 | if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) | 1775 | if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) { |
| 1812 | sii8620_edid_read(ctx, 0); | 1776 | ctx->feature_complete = true; |
| 1777 | if (ctx->edid) | ||
| 1778 | sii8620_enable_hpd(ctx); | ||
| 1779 | } | ||
| 1813 | } | 1780 | } |
| 1814 | 1781 | ||
| 1815 | static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx) | 1782 | static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx) |
| @@ -1884,6 +1851,15 @@ static void sii8620_irq_msc(struct sii8620 *ctx) | |||
| 1884 | if (stat & BIT_CBUS_MSC_MR_WRITE_STAT) | 1851 | if (stat & BIT_CBUS_MSC_MR_WRITE_STAT) |
| 1885 | sii8620_msc_mr_write_stat(ctx); | 1852 | sii8620_msc_mr_write_stat(ctx); |
| 1886 | 1853 | ||
| 1854 | if (stat & BIT_CBUS_HPD_CHG) { | ||
| 1855 | if (ctx->cbus_status & BIT_CBUS_STATUS_CBUS_HPD) { | ||
| 1856 | ctx->sink_detected = true; | ||
| 1857 | sii8620_identify_sink(ctx); | ||
| 1858 | } else { | ||
| 1859 | sii8620_hpd_unplugged(ctx); | ||
| 1860 | } | ||
| 1861 | } | ||
| 1862 | |||
| 1887 | if (stat & BIT_CBUS_MSC_MR_SET_INT) | 1863 | if (stat & BIT_CBUS_MSC_MR_SET_INT) |
| 1888 | sii8620_msc_mr_set_int(ctx); | 1864 | sii8620_msc_mr_set_int(ctx); |
| 1889 | 1865 | ||
| @@ -1931,14 +1907,6 @@ static void sii8620_irq_edid(struct sii8620 *ctx) | |||
| 1931 | ctx->mt_state = MT_STATE_DONE; | 1907 | ctx->mt_state = MT_STATE_DONE; |
| 1932 | } | 1908 | } |
| 1933 | 1909 | ||
| 1934 | static void sii8620_scdt_high(struct sii8620 *ctx) | ||
| 1935 | { | ||
| 1936 | sii8620_write_seq_static(ctx, | ||
| 1937 | REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI, | ||
| 1938 | REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI, | ||
| 1939 | ); | ||
| 1940 | } | ||
| 1941 | |||
| 1942 | static void sii8620_irq_scdt(struct sii8620 *ctx) | 1910 | static void sii8620_irq_scdt(struct sii8620 *ctx) |
| 1943 | { | 1911 | { |
| 1944 | u8 stat = sii8620_readb(ctx, REG_INTR5); | 1912 | u8 stat = sii8620_readb(ctx, REG_INTR5); |
| @@ -1946,53 +1914,13 @@ static void sii8620_irq_scdt(struct sii8620 *ctx) | |||
| 1946 | if (stat & BIT_INTR_SCDT_CHANGE) { | 1914 | if (stat & BIT_INTR_SCDT_CHANGE) { |
| 1947 | u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3); | 1915 | u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3); |
| 1948 | 1916 | ||
| 1949 | if (cstat & BIT_TMDS_CSTAT_P3_SCDT) { | 1917 | if (cstat & BIT_TMDS_CSTAT_P3_SCDT) |
| 1950 | if (ctx->sink_type == SINK_HDMI) | 1918 | sii8620_start_video(ctx); |
| 1951 | /* enable infoframe interrupt */ | ||
| 1952 | sii8620_scdt_high(ctx); | ||
| 1953 | else | ||
| 1954 | sii8620_start_video(ctx); | ||
| 1955 | } | ||
| 1956 | } | 1919 | } |
| 1957 | 1920 | ||
| 1958 | sii8620_write(ctx, REG_INTR5, stat); | 1921 | sii8620_write(ctx, REG_INTR5, stat); |
| 1959 | } | 1922 | } |
| 1960 | 1923 | ||
| 1961 | static void sii8620_new_vsi(struct sii8620 *ctx) | ||
| 1962 | { | ||
| 1963 | u8 vsif[11]; | ||
| 1964 | |||
| 1965 | sii8620_write(ctx, REG_RX_HDMI_CTRL2, | ||
| 1966 | VAL_RX_HDMI_CTRL2_DEFVAL | | ||
| 1967 | BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI); | ||
| 1968 | sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif, | ||
| 1969 | ARRAY_SIZE(vsif)); | ||
| 1970 | } | ||
| 1971 | |||
| 1972 | static void sii8620_new_avi(struct sii8620 *ctx) | ||
| 1973 | { | ||
| 1974 | sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL); | ||
| 1975 | sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif, | ||
| 1976 | ARRAY_SIZE(ctx->avif)); | ||
| 1977 | } | ||
| 1978 | |||
| 1979 | static void sii8620_irq_infr(struct sii8620 *ctx) | ||
| 1980 | { | ||
| 1981 | u8 stat = sii8620_readb(ctx, REG_INTR8) | ||
| 1982 | & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI); | ||
| 1983 | |||
| 1984 | sii8620_write(ctx, REG_INTR8, stat); | ||
| 1985 | |||
| 1986 | if (stat & BIT_CEA_NEW_VSI) | ||
| 1987 | sii8620_new_vsi(ctx); | ||
| 1988 | |||
| 1989 | if (stat & BIT_CEA_NEW_AVI) | ||
| 1990 | sii8620_new_avi(ctx); | ||
| 1991 | |||
| 1992 | if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI)) | ||
| 1993 | sii8620_start_video(ctx); | ||
| 1994 | } | ||
| 1995 | |||
| 1996 | static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret) | 1924 | static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret) |
| 1997 | { | 1925 | { |
| 1998 | if (ret < 0) | 1926 | if (ret < 0) |
| @@ -2043,11 +1971,11 @@ static void sii8620_irq_ddc(struct sii8620 *ctx) | |||
| 2043 | 1971 | ||
| 2044 | if (stat & BIT_DDC_CMD_DONE) { | 1972 | if (stat & BIT_DDC_CMD_DONE) { |
| 2045 | sii8620_write(ctx, REG_INTR3_MASK, 0); | 1973 | sii8620_write(ctx, REG_INTR3_MASK, 0); |
| 2046 | if (sii8620_is_mhl3(ctx)) | 1974 | if (sii8620_is_mhl3(ctx) && !ctx->feature_complete) |
| 2047 | sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), | 1975 | sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), |
| 2048 | MHL_INT_RC_FEAT_REQ); | 1976 | MHL_INT_RC_FEAT_REQ); |
| 2049 | else | 1977 | else |
| 2050 | sii8620_edid_read(ctx, 0); | 1978 | sii8620_enable_hpd(ctx); |
| 2051 | } | 1979 | } |
| 2052 | sii8620_write(ctx, REG_INTR3, stat); | 1980 | sii8620_write(ctx, REG_INTR3, stat); |
| 2053 | } | 1981 | } |
| @@ -2074,7 +2002,6 @@ static irqreturn_t sii8620_irq_thread(int irq, void *data) | |||
| 2074 | { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid }, | 2002 | { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid }, |
| 2075 | { BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc }, | 2003 | { BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc }, |
| 2076 | { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt }, | 2004 | { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt }, |
| 2077 | { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr }, | ||
| 2078 | }; | 2005 | }; |
| 2079 | struct sii8620 *ctx = data; | 2006 | struct sii8620 *ctx = data; |
| 2080 | u8 stats[LEN_FAST_INTR_STAT]; | 2007 | u8 stats[LEN_FAST_INTR_STAT]; |
| @@ -2112,7 +2039,6 @@ static void sii8620_cable_in(struct sii8620 *ctx) | |||
| 2112 | dev_err(dev, "Error powering on, %d.\n", ret); | 2039 | dev_err(dev, "Error powering on, %d.\n", ret); |
| 2113 | return; | 2040 | return; |
| 2114 | } | 2041 | } |
| 2115 | sii8620_hw_reset(ctx); | ||
| 2116 | 2042 | ||
| 2117 | sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver)); | 2043 | sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver)); |
| 2118 | ret = sii8620_clear_error(ctx); | 2044 | ret = sii8620_clear_error(ctx); |
| @@ -2268,17 +2194,43 @@ static void sii8620_detach(struct drm_bridge *bridge) | |||
| 2268 | rc_unregister_device(ctx->rc_dev); | 2194 | rc_unregister_device(ctx->rc_dev); |
| 2269 | } | 2195 | } |
| 2270 | 2196 | ||
| 2197 | static int sii8620_is_packing_required(struct sii8620 *ctx, | ||
| 2198 | const struct drm_display_mode *mode) | ||
| 2199 | { | ||
| 2200 | int max_pclk, max_pclk_pp_mode; | ||
| 2201 | |||
| 2202 | if (sii8620_is_mhl3(ctx)) { | ||
| 2203 | max_pclk = MHL3_MAX_PCLK; | ||
| 2204 | max_pclk_pp_mode = MHL3_MAX_PCLK_PP_MODE; | ||
| 2205 | } else { | ||
| 2206 | max_pclk = MHL1_MAX_PCLK; | ||
| 2207 | max_pclk_pp_mode = MHL1_MAX_PCLK_PP_MODE; | ||
| 2208 | } | ||
| 2209 | |||
| 2210 | if (mode->clock < max_pclk) | ||
| 2211 | return 0; | ||
| 2212 | else if (mode->clock < max_pclk_pp_mode) | ||
| 2213 | return 1; | ||
| 2214 | else | ||
| 2215 | return -1; | ||
| 2216 | } | ||
| 2217 | |||
| 2271 | static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge, | 2218 | static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge, |
| 2272 | const struct drm_display_mode *mode) | 2219 | const struct drm_display_mode *mode) |
| 2273 | { | 2220 | { |
| 2274 | struct sii8620 *ctx = bridge_to_sii8620(bridge); | 2221 | struct sii8620 *ctx = bridge_to_sii8620(bridge); |
| 2222 | int pack_required = sii8620_is_packing_required(ctx, mode); | ||
| 2275 | bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] & | 2223 | bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] & |
| 2276 | MHL_DCAP_VID_LINK_PPIXEL; | 2224 | MHL_DCAP_VID_LINK_PPIXEL; |
| 2277 | unsigned int max_pclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : | ||
| 2278 | MHL1_MAX_LCLK; | ||
| 2279 | max_pclk /= can_pack ? 2 : 3; | ||
| 2280 | 2225 | ||
| 2281 | return (mode->clock > max_pclk) ? MODE_CLOCK_HIGH : MODE_OK; | 2226 | switch (pack_required) { |
| 2227 | case 0: | ||
| 2228 | return MODE_OK; | ||
| 2229 | case 1: | ||
| 2230 | return (can_pack) ? MODE_OK : MODE_CLOCK_HIGH; | ||
| 2231 | default: | ||
| 2232 | return MODE_CLOCK_HIGH; | ||
| 2233 | } | ||
| 2282 | } | 2234 | } |
| 2283 | 2235 | ||
| 2284 | static bool sii8620_mode_fixup(struct drm_bridge *bridge, | 2236 | static bool sii8620_mode_fixup(struct drm_bridge *bridge, |
| @@ -2286,43 +2238,16 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge, | |||
| 2286 | struct drm_display_mode *adjusted_mode) | 2238 | struct drm_display_mode *adjusted_mode) |
| 2287 | { | 2239 | { |
| 2288 | struct sii8620 *ctx = bridge_to_sii8620(bridge); | 2240 | struct sii8620 *ctx = bridge_to_sii8620(bridge); |
| 2289 | int max_lclk; | ||
| 2290 | bool ret = true; | ||
| 2291 | 2241 | ||
| 2292 | mutex_lock(&ctx->lock); | 2242 | mutex_lock(&ctx->lock); |
| 2293 | 2243 | ||
| 2294 | max_lclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : MHL1_MAX_LCLK; | 2244 | ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode); |
| 2295 | if (max_lclk > 3 * adjusted_mode->clock) { | 2245 | ctx->video_code = drm_match_cea_mode(adjusted_mode); |
| 2296 | ctx->use_packed_pixel = 0; | 2246 | ctx->pixel_clock = adjusted_mode->clock; |
| 2297 | goto end; | 2247 | |
| 2298 | } | ||
| 2299 | if ((ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) && | ||
| 2300 | max_lclk > 2 * adjusted_mode->clock) { | ||
| 2301 | ctx->use_packed_pixel = 1; | ||
| 2302 | goto end; | ||
| 2303 | } | ||
| 2304 | ret = false; | ||
| 2305 | end: | ||
| 2306 | if (ret) { | ||
| 2307 | u8 vic = drm_match_cea_mode(adjusted_mode); | ||
| 2308 | |||
| 2309 | if (!vic) { | ||
| 2310 | union hdmi_infoframe frm; | ||
| 2311 | u8 mhl_vic[] = { 0, 95, 94, 93, 98 }; | ||
| 2312 | |||
| 2313 | /* FIXME: We need the connector here */ | ||
| 2314 | drm_hdmi_vendor_infoframe_from_display_mode( | ||
| 2315 | &frm.vendor.hdmi, NULL, adjusted_mode); | ||
| 2316 | vic = frm.vendor.hdmi.vic; | ||
| 2317 | if (vic >= ARRAY_SIZE(mhl_vic)) | ||
| 2318 | vic = 0; | ||
| 2319 | vic = mhl_vic[vic]; | ||
| 2320 | } | ||
| 2321 | ctx->video_code = vic; | ||
| 2322 | ctx->pixel_clock = adjusted_mode->clock; | ||
| 2323 | } | ||
| 2324 | mutex_unlock(&ctx->lock); | 2248 | mutex_unlock(&ctx->lock); |
| 2325 | return ret; | 2249 | |
| 2250 | return true; | ||
| 2326 | } | 2251 | } |
| 2327 | 2252 | ||
| 2328 | static const struct drm_bridge_funcs sii8620_bridge_funcs = { | 2253 | static const struct drm_bridge_funcs sii8620_bridge_funcs = { |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index b553a6f2ff0e..7af748ed1c58 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
| @@ -369,13 +369,6 @@ EXPORT_SYMBOL(drm_dev_exit); | |||
| 369 | */ | 369 | */ |
| 370 | void drm_dev_unplug(struct drm_device *dev) | 370 | void drm_dev_unplug(struct drm_device *dev) |
| 371 | { | 371 | { |
| 372 | drm_dev_unregister(dev); | ||
| 373 | |||
| 374 | mutex_lock(&drm_global_mutex); | ||
| 375 | if (dev->open_count == 0) | ||
| 376 | drm_dev_put(dev); | ||
| 377 | mutex_unlock(&drm_global_mutex); | ||
| 378 | |||
| 379 | /* | 372 | /* |
| 380 | * After synchronizing any critical read section is guaranteed to see | 373 | * After synchronizing any critical read section is guaranteed to see |
| 381 | * the new value of ->unplugged, and any critical section which might | 374 | * the new value of ->unplugged, and any critical section which might |
| @@ -384,6 +377,13 @@ void drm_dev_unplug(struct drm_device *dev) | |||
| 384 | */ | 377 | */ |
| 385 | dev->unplugged = true; | 378 | dev->unplugged = true; |
| 386 | synchronize_srcu(&drm_unplug_srcu); | 379 | synchronize_srcu(&drm_unplug_srcu); |
| 380 | |||
| 381 | drm_dev_unregister(dev); | ||
| 382 | |||
| 383 | mutex_lock(&drm_global_mutex); | ||
| 384 | if (dev->open_count == 0) | ||
| 385 | drm_dev_put(dev); | ||
| 386 | mutex_unlock(&drm_global_mutex); | ||
| 387 | } | 387 | } |
| 388 | EXPORT_SYMBOL(drm_dev_unplug); | 388 | EXPORT_SYMBOL(drm_dev_unplug); |
| 389 | 389 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 34c125e2d90c..7014a96546f4 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -340,14 +340,21 @@ struct drm_i915_file_private { | |||
| 340 | 340 | ||
| 341 | unsigned int bsd_engine; | 341 | unsigned int bsd_engine; |
| 342 | 342 | ||
| 343 | /* Client can have a maximum of 3 contexts banned before | 343 | /* |
| 344 | * it is denied of creating new contexts. As one context | 344 | * Every context ban increments per client ban score. Also |
| 345 | * ban needs 4 consecutive hangs, and more if there is | 345 | * hangs in short succession increments ban score. If ban threshold |
| 346 | * progress in between, this is a last resort stop gap measure | 346 | * is reached, client is considered banned and submitting more work |
| 347 | * to limit the badly behaving clients access to gpu. | 347 | * will fail. This is a stop gap measure to limit the badly behaving |
| 348 | * clients access to gpu. Note that unbannable contexts never increment | ||
| 349 | * the client ban score. | ||
| 348 | */ | 350 | */ |
| 349 | #define I915_MAX_CLIENT_CONTEXT_BANS 3 | 351 | #define I915_CLIENT_SCORE_HANG_FAST 1 |
| 350 | atomic_t context_bans; | 352 | #define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ) |
| 353 | #define I915_CLIENT_SCORE_CONTEXT_BAN 3 | ||
| 354 | #define I915_CLIENT_SCORE_BANNED 9 | ||
| 355 | /** ban_score: Accumulated score of all ctx bans and fast hangs. */ | ||
| 356 | atomic_t ban_score; | ||
| 357 | unsigned long hang_timestamp; | ||
| 351 | }; | 358 | }; |
| 352 | 359 | ||
| 353 | /* Interface history: | 360 | /* Interface history: |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3704f4c0c2c9..d44ad7bc1e94 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -2933,32 +2933,54 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, | |||
| 2933 | return 0; | 2933 | return 0; |
| 2934 | } | 2934 | } |
| 2935 | 2935 | ||
| 2936 | static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv, | ||
| 2937 | const struct i915_gem_context *ctx) | ||
| 2938 | { | ||
| 2939 | unsigned int score; | ||
| 2940 | unsigned long prev_hang; | ||
| 2941 | |||
| 2942 | if (i915_gem_context_is_banned(ctx)) | ||
| 2943 | score = I915_CLIENT_SCORE_CONTEXT_BAN; | ||
| 2944 | else | ||
| 2945 | score = 0; | ||
| 2946 | |||
| 2947 | prev_hang = xchg(&file_priv->hang_timestamp, jiffies); | ||
| 2948 | if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES)) | ||
| 2949 | score += I915_CLIENT_SCORE_HANG_FAST; | ||
| 2950 | |||
| 2951 | if (score) { | ||
| 2952 | atomic_add(score, &file_priv->ban_score); | ||
| 2953 | |||
| 2954 | DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n", | ||
| 2955 | ctx->name, score, | ||
| 2956 | atomic_read(&file_priv->ban_score)); | ||
| 2957 | } | ||
| 2958 | } | ||
| 2959 | |||
| 2936 | static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) | 2960 | static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) |
| 2937 | { | 2961 | { |
| 2938 | bool banned; | 2962 | unsigned int score; |
| 2963 | bool banned, bannable; | ||
| 2939 | 2964 | ||
| 2940 | atomic_inc(&ctx->guilty_count); | 2965 | atomic_inc(&ctx->guilty_count); |
| 2941 | 2966 | ||
| 2942 | banned = false; | 2967 | bannable = i915_gem_context_is_bannable(ctx); |
| 2943 | if (i915_gem_context_is_bannable(ctx)) { | 2968 | score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score); |
| 2944 | unsigned int score; | 2969 | banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; |
| 2945 | 2970 | ||
| 2946 | score = atomic_add_return(CONTEXT_SCORE_GUILTY, | 2971 | DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n", |
| 2947 | &ctx->ban_score); | 2972 | ctx->name, atomic_read(&ctx->guilty_count), |
| 2948 | banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; | 2973 | score, yesno(banned && bannable)); |
| 2949 | 2974 | ||
| 2950 | DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", | 2975 | /* Cool contexts don't accumulate client ban score */ |
| 2951 | ctx->name, score, yesno(banned)); | 2976 | if (!bannable) |
| 2952 | } | ||
| 2953 | if (!banned) | ||
| 2954 | return; | 2977 | return; |
| 2955 | 2978 | ||
| 2956 | i915_gem_context_set_banned(ctx); | 2979 | if (banned) |
| 2957 | if (!IS_ERR_OR_NULL(ctx->file_priv)) { | 2980 | i915_gem_context_set_banned(ctx); |
| 2958 | atomic_inc(&ctx->file_priv->context_bans); | 2981 | |
| 2959 | DRM_DEBUG_DRIVER("client %s has had %d context banned\n", | 2982 | if (!IS_ERR_OR_NULL(ctx->file_priv)) |
| 2960 | ctx->name, atomic_read(&ctx->file_priv->context_bans)); | 2983 | i915_gem_client_mark_guilty(ctx->file_priv, ctx); |
| 2961 | } | ||
| 2962 | } | 2984 | } |
| 2963 | 2985 | ||
| 2964 | static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) | 2986 | static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) |
| @@ -5736,6 +5758,7 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) | |||
| 5736 | INIT_LIST_HEAD(&file_priv->mm.request_list); | 5758 | INIT_LIST_HEAD(&file_priv->mm.request_list); |
| 5737 | 5759 | ||
| 5738 | file_priv->bsd_engine = -1; | 5760 | file_priv->bsd_engine = -1; |
| 5761 | file_priv->hang_timestamp = jiffies; | ||
| 5739 | 5762 | ||
| 5740 | ret = i915_gem_context_open(i915, file); | 5763 | ret = i915_gem_context_open(i915, file); |
| 5741 | if (ret) | 5764 | if (ret) |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 33f8a4b3c981..060335d3d9e0 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
| @@ -652,7 +652,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv) | |||
| 652 | 652 | ||
| 653 | static bool client_is_banned(struct drm_i915_file_private *file_priv) | 653 | static bool client_is_banned(struct drm_i915_file_private *file_priv) |
| 654 | { | 654 | { |
| 655 | return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS; | 655 | return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; |
| 656 | } | 656 | } |
| 657 | 657 | ||
| 658 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, | 658 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index f627a8c47c58..22df17c8ca9b 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
| @@ -489,7 +489,9 @@ eb_validate_vma(struct i915_execbuffer *eb, | |||
| 489 | } | 489 | } |
| 490 | 490 | ||
| 491 | static int | 491 | static int |
| 492 | eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma) | 492 | eb_add_vma(struct i915_execbuffer *eb, |
| 493 | unsigned int i, unsigned batch_idx, | ||
| 494 | struct i915_vma *vma) | ||
| 493 | { | 495 | { |
| 494 | struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; | 496 | struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; |
| 495 | int err; | 497 | int err; |
| @@ -522,6 +524,24 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma) | |||
| 522 | eb->flags[i] = entry->flags; | 524 | eb->flags[i] = entry->flags; |
| 523 | vma->exec_flags = &eb->flags[i]; | 525 | vma->exec_flags = &eb->flags[i]; |
| 524 | 526 | ||
| 527 | /* | ||
| 528 | * SNA is doing fancy tricks with compressing batch buffers, which leads | ||
| 529 | * to negative relocation deltas. Usually that works out ok since the | ||
| 530 | * relocate address is still positive, except when the batch is placed | ||
| 531 | * very low in the GTT. Ensure this doesn't happen. | ||
| 532 | * | ||
| 533 | * Note that actual hangs have only been observed on gen7, but for | ||
| 534 | * paranoia do it everywhere. | ||
| 535 | */ | ||
| 536 | if (i == batch_idx) { | ||
| 537 | if (!(eb->flags[i] & EXEC_OBJECT_PINNED)) | ||
| 538 | eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; | ||
| 539 | if (eb->reloc_cache.has_fence) | ||
| 540 | eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; | ||
| 541 | |||
| 542 | eb->batch = vma; | ||
| 543 | } | ||
| 544 | |||
| 525 | err = 0; | 545 | err = 0; |
| 526 | if (eb_pin_vma(eb, entry, vma)) { | 546 | if (eb_pin_vma(eb, entry, vma)) { |
| 527 | if (entry->offset != vma->node.start) { | 547 | if (entry->offset != vma->node.start) { |
| @@ -716,7 +736,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) | |||
| 716 | { | 736 | { |
| 717 | struct radix_tree_root *handles_vma = &eb->ctx->handles_vma; | 737 | struct radix_tree_root *handles_vma = &eb->ctx->handles_vma; |
| 718 | struct drm_i915_gem_object *obj; | 738 | struct drm_i915_gem_object *obj; |
| 719 | unsigned int i; | 739 | unsigned int i, batch; |
| 720 | int err; | 740 | int err; |
| 721 | 741 | ||
| 722 | if (unlikely(i915_gem_context_is_closed(eb->ctx))) | 742 | if (unlikely(i915_gem_context_is_closed(eb->ctx))) |
| @@ -728,6 +748,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) | |||
| 728 | INIT_LIST_HEAD(&eb->relocs); | 748 | INIT_LIST_HEAD(&eb->relocs); |
| 729 | INIT_LIST_HEAD(&eb->unbound); | 749 | INIT_LIST_HEAD(&eb->unbound); |
| 730 | 750 | ||
| 751 | batch = eb_batch_index(eb); | ||
| 752 | |||
| 731 | for (i = 0; i < eb->buffer_count; i++) { | 753 | for (i = 0; i < eb->buffer_count; i++) { |
| 732 | u32 handle = eb->exec[i].handle; | 754 | u32 handle = eb->exec[i].handle; |
| 733 | struct i915_lut_handle *lut; | 755 | struct i915_lut_handle *lut; |
| @@ -770,33 +792,16 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) | |||
| 770 | lut->handle = handle; | 792 | lut->handle = handle; |
| 771 | 793 | ||
| 772 | add_vma: | 794 | add_vma: |
| 773 | err = eb_add_vma(eb, i, vma); | 795 | err = eb_add_vma(eb, i, batch, vma); |
| 774 | if (unlikely(err)) | 796 | if (unlikely(err)) |
| 775 | goto err_vma; | 797 | goto err_vma; |
| 776 | 798 | ||
| 777 | GEM_BUG_ON(vma != eb->vma[i]); | 799 | GEM_BUG_ON(vma != eb->vma[i]); |
| 778 | GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); | 800 | GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); |
| 801 | GEM_BUG_ON(drm_mm_node_allocated(&vma->node) && | ||
| 802 | eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i])); | ||
| 779 | } | 803 | } |
| 780 | 804 | ||
| 781 | /* take note of the batch buffer before we might reorder the lists */ | ||
| 782 | i = eb_batch_index(eb); | ||
| 783 | eb->batch = eb->vma[i]; | ||
| 784 | GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]); | ||
| 785 | |||
| 786 | /* | ||
| 787 | * SNA is doing fancy tricks with compressing batch buffers, which leads | ||
| 788 | * to negative relocation deltas. Usually that works out ok since the | ||
| 789 | * relocate address is still positive, except when the batch is placed | ||
| 790 | * very low in the GTT. Ensure this doesn't happen. | ||
| 791 | * | ||
| 792 | * Note that actual hangs have only been observed on gen7, but for | ||
| 793 | * paranoia do it everywhere. | ||
| 794 | */ | ||
| 795 | if (!(eb->flags[i] & EXEC_OBJECT_PINNED)) | ||
| 796 | eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; | ||
| 797 | if (eb->reloc_cache.has_fence) | ||
| 798 | eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; | ||
| 799 | |||
| 800 | eb->args->flags |= __EXEC_VALIDATED; | 805 | eb->args->flags |= __EXEC_VALIDATED; |
| 801 | return eb_reserve(eb); | 806 | return eb_reserve(eb); |
| 802 | 807 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index f9bc3aaa90d0..4a02747ac658 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -1893,9 +1893,17 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, | |||
| 1893 | 1893 | ||
| 1894 | /* | 1894 | /* |
| 1895 | * Clear the PIPE*STAT regs before the IIR | 1895 | * Clear the PIPE*STAT regs before the IIR |
| 1896 | * | ||
| 1897 | * Toggle the enable bits to make sure we get an | ||
| 1898 | * edge in the ISR pipe event bit if we don't clear | ||
| 1899 | * all the enabled status bits. Otherwise the edge | ||
| 1900 | * triggered IIR on i965/g4x wouldn't notice that | ||
| 1901 | * an interrupt is still pending. | ||
| 1896 | */ | 1902 | */ |
| 1897 | if (pipe_stats[pipe]) | 1903 | if (pipe_stats[pipe]) { |
| 1898 | I915_WRITE(reg, enable_mask | pipe_stats[pipe]); | 1904 | I915_WRITE(reg, pipe_stats[pipe]); |
| 1905 | I915_WRITE(reg, enable_mask); | ||
| 1906 | } | ||
| 1899 | } | 1907 | } |
| 1900 | spin_unlock(&dev_priv->irq_lock); | 1908 | spin_unlock(&dev_priv->irq_lock); |
| 1901 | } | 1909 | } |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f11bb213ec07..7720569f2024 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -2425,12 +2425,17 @@ enum i915_power_well_id { | |||
| 2425 | #define _3D_CHICKEN _MMIO(0x2084) | 2425 | #define _3D_CHICKEN _MMIO(0x2084) |
| 2426 | #define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) | 2426 | #define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) |
| 2427 | #define _3D_CHICKEN2 _MMIO(0x208c) | 2427 | #define _3D_CHICKEN2 _MMIO(0x208c) |
| 2428 | |||
| 2429 | #define FF_SLICE_CHICKEN _MMIO(0x2088) | ||
| 2430 | #define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1) | ||
| 2431 | |||
| 2428 | /* Disables pipelining of read flushes past the SF-WIZ interface. | 2432 | /* Disables pipelining of read flushes past the SF-WIZ interface. |
| 2429 | * Required on all Ironlake steppings according to the B-Spec, but the | 2433 | * Required on all Ironlake steppings according to the B-Spec, but the |
| 2430 | * particular danger of not doing so is not specified. | 2434 | * particular danger of not doing so is not specified. |
| 2431 | */ | 2435 | */ |
| 2432 | # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) | 2436 | # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) |
| 2433 | #define _3D_CHICKEN3 _MMIO(0x2090) | 2437 | #define _3D_CHICKEN3 _MMIO(0x2090) |
| 2438 | #define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12) | ||
| 2434 | #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) | 2439 | #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) |
| 2435 | #define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5) | 2440 | #define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5) |
| 2436 | #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) | 2441 | #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index de0e22322c76..072b326d5ee0 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
| @@ -304,6 +304,9 @@ intel_crt_mode_valid(struct drm_connector *connector, | |||
| 304 | int max_dotclk = dev_priv->max_dotclk_freq; | 304 | int max_dotclk = dev_priv->max_dotclk_freq; |
| 305 | int max_clock; | 305 | int max_clock; |
| 306 | 306 | ||
| 307 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 308 | return MODE_NO_DBLESCAN; | ||
| 309 | |||
| 307 | if (mode->clock < 25000) | 310 | if (mode->clock < 25000) |
| 308 | return MODE_CLOCK_LOW; | 311 | return MODE_CLOCK_LOW; |
| 309 | 312 | ||
| @@ -337,6 +340,12 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, | |||
| 337 | struct intel_crtc_state *pipe_config, | 340 | struct intel_crtc_state *pipe_config, |
| 338 | struct drm_connector_state *conn_state) | 341 | struct drm_connector_state *conn_state) |
| 339 | { | 342 | { |
| 343 | struct drm_display_mode *adjusted_mode = | ||
| 344 | &pipe_config->base.adjusted_mode; | ||
| 345 | |||
| 346 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 347 | return false; | ||
| 348 | |||
| 340 | return true; | 349 | return true; |
| 341 | } | 350 | } |
| 342 | 351 | ||
| @@ -344,6 +353,12 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder, | |||
| 344 | struct intel_crtc_state *pipe_config, | 353 | struct intel_crtc_state *pipe_config, |
| 345 | struct drm_connector_state *conn_state) | 354 | struct drm_connector_state *conn_state) |
| 346 | { | 355 | { |
| 356 | struct drm_display_mode *adjusted_mode = | ||
| 357 | &pipe_config->base.adjusted_mode; | ||
| 358 | |||
| 359 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 360 | return false; | ||
| 361 | |||
| 347 | pipe_config->has_pch_encoder = true; | 362 | pipe_config->has_pch_encoder = true; |
| 348 | 363 | ||
| 349 | return true; | 364 | return true; |
| @@ -354,6 +369,11 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder, | |||
| 354 | struct drm_connector_state *conn_state) | 369 | struct drm_connector_state *conn_state) |
| 355 | { | 370 | { |
| 356 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 371 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
| 372 | struct drm_display_mode *adjusted_mode = | ||
| 373 | &pipe_config->base.adjusted_mode; | ||
| 374 | |||
| 375 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 376 | return false; | ||
| 357 | 377 | ||
| 358 | pipe_config->has_pch_encoder = true; | 378 | pipe_config->has_pch_encoder = true; |
| 359 | 379 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dee3a8e659f1..2cc6faa1daa8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -14469,12 +14469,22 @@ static enum drm_mode_status | |||
| 14469 | intel_mode_valid(struct drm_device *dev, | 14469 | intel_mode_valid(struct drm_device *dev, |
| 14470 | const struct drm_display_mode *mode) | 14470 | const struct drm_display_mode *mode) |
| 14471 | { | 14471 | { |
| 14472 | /* | ||
| 14473 | * Can't reject DBLSCAN here because Xorg ddxen can add piles | ||
| 14474 | * of DBLSCAN modes to the output's mode list when they detect | ||
| 14475 | * the scaling mode property on the connector. And they don't | ||
| 14476 | * ask the kernel to validate those modes in any way until | ||
| 14477 | * modeset time at which point the client gets a protocol error. | ||
| 14478 | * So in order to not upset those clients we silently ignore the | ||
| 14479 | * DBLSCAN flag on such connectors. For other connectors we will | ||
| 14480 | * reject modes with the DBLSCAN flag in encoder->compute_config(). | ||
| 14481 | * And we always reject DBLSCAN modes in connector->mode_valid() | ||
| 14482 | * as we never want such modes on the connector's mode list. | ||
| 14483 | */ | ||
| 14484 | |||
| 14472 | if (mode->vscan > 1) | 14485 | if (mode->vscan > 1) |
| 14473 | return MODE_NO_VSCAN; | 14486 | return MODE_NO_VSCAN; |
| 14474 | 14487 | ||
| 14475 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 14476 | return MODE_NO_DBLESCAN; | ||
| 14477 | |||
| 14478 | if (mode->flags & DRM_MODE_FLAG_HSKEW) | 14488 | if (mode->flags & DRM_MODE_FLAG_HSKEW) |
| 14479 | return MODE_H_ILLEGAL; | 14489 | return MODE_H_ILLEGAL; |
| 14480 | 14490 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 8320f0e8e3be..16faea30114a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -420,6 +420,9 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
| 420 | int max_rate, mode_rate, max_lanes, max_link_clock; | 420 | int max_rate, mode_rate, max_lanes, max_link_clock; |
| 421 | int max_dotclk; | 421 | int max_dotclk; |
| 422 | 422 | ||
| 423 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 424 | return MODE_NO_DBLESCAN; | ||
| 425 | |||
| 423 | max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); | 426 | max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); |
| 424 | 427 | ||
| 425 | if (intel_dp_is_edp(intel_dp) && fixed_mode) { | 428 | if (intel_dp_is_edp(intel_dp) && fixed_mode) { |
| @@ -1862,7 +1865,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
| 1862 | conn_state->scaling_mode); | 1865 | conn_state->scaling_mode); |
| 1863 | } | 1866 | } |
| 1864 | 1867 | ||
| 1865 | if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && | 1868 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) |
| 1869 | return false; | ||
| 1870 | |||
| 1871 | if (HAS_GMCH_DISPLAY(dev_priv) && | ||
| 1866 | adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) | 1872 | adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) |
| 1867 | return false; | 1873 | return false; |
| 1868 | 1874 | ||
| @@ -2784,16 +2790,6 @@ static void g4x_disable_dp(struct intel_encoder *encoder, | |||
| 2784 | const struct drm_connector_state *old_conn_state) | 2790 | const struct drm_connector_state *old_conn_state) |
| 2785 | { | 2791 | { |
| 2786 | intel_disable_dp(encoder, old_crtc_state, old_conn_state); | 2792 | intel_disable_dp(encoder, old_crtc_state, old_conn_state); |
| 2787 | |||
| 2788 | /* disable the port before the pipe on g4x */ | ||
| 2789 | intel_dp_link_down(encoder, old_crtc_state); | ||
| 2790 | } | ||
| 2791 | |||
| 2792 | static void ilk_disable_dp(struct intel_encoder *encoder, | ||
| 2793 | const struct intel_crtc_state *old_crtc_state, | ||
| 2794 | const struct drm_connector_state *old_conn_state) | ||
| 2795 | { | ||
| 2796 | intel_disable_dp(encoder, old_crtc_state, old_conn_state); | ||
| 2797 | } | 2793 | } |
| 2798 | 2794 | ||
| 2799 | static void vlv_disable_dp(struct intel_encoder *encoder, | 2795 | static void vlv_disable_dp(struct intel_encoder *encoder, |
| @@ -2807,13 +2803,19 @@ static void vlv_disable_dp(struct intel_encoder *encoder, | |||
| 2807 | intel_disable_dp(encoder, old_crtc_state, old_conn_state); | 2803 | intel_disable_dp(encoder, old_crtc_state, old_conn_state); |
| 2808 | } | 2804 | } |
| 2809 | 2805 | ||
| 2810 | static void ilk_post_disable_dp(struct intel_encoder *encoder, | 2806 | static void g4x_post_disable_dp(struct intel_encoder *encoder, |
| 2811 | const struct intel_crtc_state *old_crtc_state, | 2807 | const struct intel_crtc_state *old_crtc_state, |
| 2812 | const struct drm_connector_state *old_conn_state) | 2808 | const struct drm_connector_state *old_conn_state) |
| 2813 | { | 2809 | { |
| 2814 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 2810 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
| 2815 | enum port port = encoder->port; | 2811 | enum port port = encoder->port; |
| 2816 | 2812 | ||
| 2813 | /* | ||
| 2814 | * Bspec does not list a specific disable sequence for g4x DP. | ||
| 2815 | * Follow the ilk+ sequence (disable pipe before the port) for | ||
| 2816 | * g4x DP as it does not suffer from underruns like the normal | ||
| 2817 | * g4x modeset sequence (disable pipe after the port). | ||
| 2818 | */ | ||
| 2817 | intel_dp_link_down(encoder, old_crtc_state); | 2819 | intel_dp_link_down(encoder, old_crtc_state); |
| 2818 | 2820 | ||
| 2819 | /* Only ilk+ has port A */ | 2821 | /* Only ilk+ has port A */ |
| @@ -6337,7 +6339,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
| 6337 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); | 6339 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); |
| 6338 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); | 6340 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); |
| 6339 | 6341 | ||
| 6340 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) | 6342 | if (!HAS_GMCH_DISPLAY(dev_priv)) |
| 6341 | connector->interlace_allowed = true; | 6343 | connector->interlace_allowed = true; |
| 6342 | connector->doublescan_allowed = 0; | 6344 | connector->doublescan_allowed = 0; |
| 6343 | 6345 | ||
| @@ -6436,15 +6438,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, | |||
| 6436 | intel_encoder->enable = vlv_enable_dp; | 6438 | intel_encoder->enable = vlv_enable_dp; |
| 6437 | intel_encoder->disable = vlv_disable_dp; | 6439 | intel_encoder->disable = vlv_disable_dp; |
| 6438 | intel_encoder->post_disable = vlv_post_disable_dp; | 6440 | intel_encoder->post_disable = vlv_post_disable_dp; |
| 6439 | } else if (INTEL_GEN(dev_priv) >= 5) { | ||
| 6440 | intel_encoder->pre_enable = g4x_pre_enable_dp; | ||
| 6441 | intel_encoder->enable = g4x_enable_dp; | ||
| 6442 | intel_encoder->disable = ilk_disable_dp; | ||
| 6443 | intel_encoder->post_disable = ilk_post_disable_dp; | ||
| 6444 | } else { | 6441 | } else { |
| 6445 | intel_encoder->pre_enable = g4x_pre_enable_dp; | 6442 | intel_encoder->pre_enable = g4x_pre_enable_dp; |
| 6446 | intel_encoder->enable = g4x_enable_dp; | 6443 | intel_encoder->enable = g4x_enable_dp; |
| 6447 | intel_encoder->disable = g4x_disable_dp; | 6444 | intel_encoder->disable = g4x_disable_dp; |
| 6445 | intel_encoder->post_disable = g4x_post_disable_dp; | ||
| 6448 | } | 6446 | } |
| 6449 | 6447 | ||
| 6450 | intel_dig_port->dp.output_reg = output_reg; | 6448 | intel_dig_port->dp.output_reg = output_reg; |
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 9e6956c08688..5890500a3a8b 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c | |||
| @@ -48,6 +48,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, | |||
| 48 | bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, | 48 | bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, |
| 49 | DP_DPCD_QUIRK_LIMITED_M_N); | 49 | DP_DPCD_QUIRK_LIMITED_M_N); |
| 50 | 50 | ||
| 51 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 52 | return false; | ||
| 53 | |||
| 51 | pipe_config->has_pch_encoder = false; | 54 | pipe_config->has_pch_encoder = false; |
| 52 | bpp = 24; | 55 | bpp = 24; |
| 53 | if (intel_dp->compliance.test_data.bpc) { | 56 | if (intel_dp->compliance.test_data.bpc) { |
| @@ -366,6 +369,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, | |||
| 366 | if (!intel_dp) | 369 | if (!intel_dp) |
| 367 | return MODE_ERROR; | 370 | return MODE_ERROR; |
| 368 | 371 | ||
| 372 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 373 | return MODE_NO_DBLESCAN; | ||
| 374 | |||
| 369 | max_link_clock = intel_dp_max_link_rate(intel_dp); | 375 | max_link_clock = intel_dp_max_link_rate(intel_dp); |
| 370 | max_lanes = intel_dp_max_lane_count(intel_dp); | 376 | max_lanes = intel_dp_max_lane_count(intel_dp); |
| 371 | 377 | ||
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index cf39ca90d887..f349b3920199 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
| @@ -326,6 +326,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, | |||
| 326 | conn_state->scaling_mode); | 326 | conn_state->scaling_mode); |
| 327 | } | 327 | } |
| 328 | 328 | ||
| 329 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 330 | return false; | ||
| 331 | |||
| 329 | /* DSI uses short packets for sync events, so clear mode flags for DSI */ | 332 | /* DSI uses short packets for sync events, so clear mode flags for DSI */ |
| 330 | adjusted_mode->flags = 0; | 333 | adjusted_mode->flags = 0; |
| 331 | 334 | ||
| @@ -1266,6 +1269,9 @@ intel_dsi_mode_valid(struct drm_connector *connector, | |||
| 1266 | 1269 | ||
| 1267 | DRM_DEBUG_KMS("\n"); | 1270 | DRM_DEBUG_KMS("\n"); |
| 1268 | 1271 | ||
| 1272 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 1273 | return MODE_NO_DBLESCAN; | ||
| 1274 | |||
| 1269 | if (fixed_mode) { | 1275 | if (fixed_mode) { |
| 1270 | if (mode->hdisplay > fixed_mode->hdisplay) | 1276 | if (mode->hdisplay > fixed_mode->hdisplay) |
| 1271 | return MODE_PANEL; | 1277 | return MODE_PANEL; |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index a70d767313aa..61d908e0df0e 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
| @@ -219,6 +219,9 @@ intel_dvo_mode_valid(struct drm_connector *connector, | |||
| 219 | int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; | 219 | int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; |
| 220 | int target_clock = mode->clock; | 220 | int target_clock = mode->clock; |
| 221 | 221 | ||
| 222 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 223 | return MODE_NO_DBLESCAN; | ||
| 224 | |||
| 222 | /* XXX: Validate clock range */ | 225 | /* XXX: Validate clock range */ |
| 223 | 226 | ||
| 224 | if (fixed_mode) { | 227 | if (fixed_mode) { |
| @@ -254,6 +257,9 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder, | |||
| 254 | if (fixed_mode) | 257 | if (fixed_mode) |
| 255 | intel_fixed_panel_mode(fixed_mode, adjusted_mode); | 258 | intel_fixed_panel_mode(fixed_mode, adjusted_mode); |
| 256 | 259 | ||
| 260 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 261 | return false; | ||
| 262 | |||
| 257 | return true; | 263 | return true; |
| 258 | } | 264 | } |
| 259 | 265 | ||
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index ee929f31f7db..d8cb53ef4351 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -1557,6 +1557,9 @@ intel_hdmi_mode_valid(struct drm_connector *connector, | |||
| 1557 | bool force_dvi = | 1557 | bool force_dvi = |
| 1558 | READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI; | 1558 | READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI; |
| 1559 | 1559 | ||
| 1560 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 1561 | return MODE_NO_DBLESCAN; | ||
| 1562 | |||
| 1560 | clock = mode->clock; | 1563 | clock = mode->clock; |
| 1561 | 1564 | ||
| 1562 | if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) | 1565 | if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) |
| @@ -1677,6 +1680,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
| 1677 | int desired_bpp; | 1680 | int desired_bpp; |
| 1678 | bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI; | 1681 | bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI; |
| 1679 | 1682 | ||
| 1683 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 1684 | return false; | ||
| 1685 | |||
| 1680 | pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink; | 1686 | pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink; |
| 1681 | 1687 | ||
| 1682 | if (pipe_config->has_hdmi_sink) | 1688 | if (pipe_config->has_hdmi_sink) |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 15434cad5430..7c4c8fb1dae4 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
| @@ -1545,11 +1545,21 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) | |||
| 1545 | /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ | 1545 | /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ |
| 1546 | batch = gen8_emit_flush_coherentl3_wa(engine, batch); | 1546 | batch = gen8_emit_flush_coherentl3_wa(engine, batch); |
| 1547 | 1547 | ||
| 1548 | *batch++ = MI_LOAD_REGISTER_IMM(3); | ||
| 1549 | |||
| 1548 | /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */ | 1550 | /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */ |
| 1549 | *batch++ = MI_LOAD_REGISTER_IMM(1); | ||
| 1550 | *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2); | 1551 | *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2); |
| 1551 | *batch++ = _MASKED_BIT_DISABLE( | 1552 | *batch++ = _MASKED_BIT_DISABLE( |
| 1552 | GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE); | 1553 | GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE); |
| 1554 | |||
| 1555 | /* BSpec: 11391 */ | ||
| 1556 | *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN); | ||
| 1557 | *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX); | ||
| 1558 | |||
| 1559 | /* BSpec: 11299 */ | ||
| 1560 | *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3); | ||
| 1561 | *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX); | ||
| 1562 | |||
| 1553 | *batch++ = MI_NOOP; | 1563 | *batch++ = MI_NOOP; |
| 1554 | 1564 | ||
| 1555 | /* WaClearSlmSpaceAtContextSwitch:kbl */ | 1565 | /* WaClearSlmSpaceAtContextSwitch:kbl */ |
| @@ -2641,10 +2651,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, | |||
| 2641 | context_size += LRC_HEADER_PAGES * PAGE_SIZE; | 2651 | context_size += LRC_HEADER_PAGES * PAGE_SIZE; |
| 2642 | 2652 | ||
| 2643 | ctx_obj = i915_gem_object_create(ctx->i915, context_size); | 2653 | ctx_obj = i915_gem_object_create(ctx->i915, context_size); |
| 2644 | if (IS_ERR(ctx_obj)) { | 2654 | if (IS_ERR(ctx_obj)) |
| 2645 | ret = PTR_ERR(ctx_obj); | 2655 | return PTR_ERR(ctx_obj); |
| 2646 | goto error_deref_obj; | ||
| 2647 | } | ||
| 2648 | 2656 | ||
| 2649 | vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL); | 2657 | vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL); |
| 2650 | if (IS_ERR(vma)) { | 2658 | if (IS_ERR(vma)) { |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index d278f24ba6ae..48f618dc9abb 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
| @@ -380,6 +380,8 @@ intel_lvds_mode_valid(struct drm_connector *connector, | |||
| 380 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; | 380 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; |
| 381 | int max_pixclk = to_i915(connector->dev)->max_dotclk_freq; | 381 | int max_pixclk = to_i915(connector->dev)->max_dotclk_freq; |
| 382 | 382 | ||
| 383 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 384 | return MODE_NO_DBLESCAN; | ||
| 383 | if (mode->hdisplay > fixed_mode->hdisplay) | 385 | if (mode->hdisplay > fixed_mode->hdisplay) |
| 384 | return MODE_PANEL; | 386 | return MODE_PANEL; |
| 385 | if (mode->vdisplay > fixed_mode->vdisplay) | 387 | if (mode->vdisplay > fixed_mode->vdisplay) |
| @@ -429,6 +431,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, | |||
| 429 | intel_fixed_panel_mode(intel_connector->panel.fixed_mode, | 431 | intel_fixed_panel_mode(intel_connector->panel.fixed_mode, |
| 430 | adjusted_mode); | 432 | adjusted_mode); |
| 431 | 433 | ||
| 434 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 435 | return false; | ||
| 436 | |||
| 432 | if (HAS_PCH_SPLIT(dev_priv)) { | 437 | if (HAS_PCH_SPLIT(dev_priv)) { |
| 433 | pipe_config->has_pch_encoder = true; | 438 | pipe_config->has_pch_encoder = true; |
| 434 | 439 | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 25005023c243..26975df4e593 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
| @@ -1160,6 +1160,9 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder, | |||
| 1160 | adjusted_mode); | 1160 | adjusted_mode); |
| 1161 | } | 1161 | } |
| 1162 | 1162 | ||
| 1163 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 1164 | return false; | ||
| 1165 | |||
| 1163 | /* | 1166 | /* |
| 1164 | * Make the CRTC code factor in the SDVO pixel multiplier. The | 1167 | * Make the CRTC code factor in the SDVO pixel multiplier. The |
| 1165 | * SDVO device will factor out the multiplier during mode_set. | 1168 | * SDVO device will factor out the multiplier during mode_set. |
| @@ -1621,6 +1624,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector, | |||
| 1621 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); | 1624 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); |
| 1622 | int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; | 1625 | int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; |
| 1623 | 1626 | ||
| 1627 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 1628 | return MODE_NO_DBLESCAN; | ||
| 1629 | |||
| 1624 | if (intel_sdvo->pixel_clock_min > mode->clock) | 1630 | if (intel_sdvo->pixel_clock_min > mode->clock) |
| 1625 | return MODE_CLOCK_LOW; | 1631 | return MODE_CLOCK_LOW; |
| 1626 | 1632 | ||
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 885fc3809f7f..b55b5c157e38 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
| @@ -850,6 +850,9 @@ intel_tv_mode_valid(struct drm_connector *connector, | |||
| 850 | const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); | 850 | const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); |
| 851 | int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; | 851 | int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; |
| 852 | 852 | ||
| 853 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
| 854 | return MODE_NO_DBLESCAN; | ||
| 855 | |||
| 853 | if (mode->clock > max_dotclk) | 856 | if (mode->clock > max_dotclk) |
| 854 | return MODE_CLOCK_HIGH; | 857 | return MODE_CLOCK_HIGH; |
| 855 | 858 | ||
| @@ -877,16 +880,21 @@ intel_tv_compute_config(struct intel_encoder *encoder, | |||
| 877 | struct drm_connector_state *conn_state) | 880 | struct drm_connector_state *conn_state) |
| 878 | { | 881 | { |
| 879 | const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state); | 882 | const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state); |
| 883 | struct drm_display_mode *adjusted_mode = | ||
| 884 | &pipe_config->base.adjusted_mode; | ||
| 880 | 885 | ||
| 881 | if (!tv_mode) | 886 | if (!tv_mode) |
| 882 | return false; | 887 | return false; |
| 883 | 888 | ||
| 884 | pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock; | 889 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) |
| 890 | return false; | ||
| 891 | |||
| 892 | adjusted_mode->crtc_clock = tv_mode->clock; | ||
| 885 | DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); | 893 | DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); |
| 886 | pipe_config->pipe_bpp = 8*3; | 894 | pipe_config->pipe_bpp = 8*3; |
| 887 | 895 | ||
| 888 | /* TV has it's own notion of sync and other mode flags, so clear them. */ | 896 | /* TV has it's own notion of sync and other mode flags, so clear them. */ |
| 889 | pipe_config->base.adjusted_mode.flags = 0; | 897 | adjusted_mode->flags = 0; |
| 890 | 898 | ||
| 891 | /* | 899 | /* |
| 892 | * FIXME: We don't check whether the input mode is actually what we want | 900 | * FIXME: We don't check whether the input mode is actually what we want |
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c index 291c08117ab6..397143b639c6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c +++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c | |||
| @@ -132,7 +132,7 @@ curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm, | |||
| 132 | 132 | ||
| 133 | nvif_object_map(&wndw->wimm.base.user, NULL, 0); | 133 | nvif_object_map(&wndw->wimm.base.user, NULL, 0); |
| 134 | wndw->immd = func; | 134 | wndw->immd = func; |
| 135 | wndw->ctxdma.parent = &disp->core->chan.base.user; | 135 | wndw->ctxdma.parent = NULL; |
| 136 | return 0; | 136 | return 0; |
| 137 | } | 137 | } |
| 138 | 138 | ||
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 224963b533a6..c5a9bc1af5af 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c | |||
| @@ -444,14 +444,17 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) | |||
| 444 | if (ret) | 444 | if (ret) |
| 445 | return ret; | 445 | return ret; |
| 446 | 446 | ||
| 447 | ctxdma = nv50_wndw_ctxdma_new(wndw, fb); | 447 | if (wndw->ctxdma.parent) { |
| 448 | if (IS_ERR(ctxdma)) { | 448 | ctxdma = nv50_wndw_ctxdma_new(wndw, fb); |
| 449 | nouveau_bo_unpin(fb->nvbo); | 449 | if (IS_ERR(ctxdma)) { |
| 450 | return PTR_ERR(ctxdma); | 450 | nouveau_bo_unpin(fb->nvbo); |
| 451 | return PTR_ERR(ctxdma); | ||
| 452 | } | ||
| 453 | |||
| 454 | asyw->image.handle[0] = ctxdma->object.handle; | ||
| 451 | } | 455 | } |
| 452 | 456 | ||
| 453 | asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv); | 457 | asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv); |
| 454 | asyw->image.handle[0] = ctxdma->object.handle; | ||
| 455 | asyw->image.offset[0] = fb->nvbo->bo.offset; | 458 | asyw->image.offset[0] = fb->nvbo->bo.offset; |
| 456 | 459 | ||
| 457 | if (wndw->func->prepare) { | 460 | if (wndw->func->prepare) { |
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index b8cda9449241..768207fbbae3 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c | |||
| @@ -623,7 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, | |||
| 623 | struct qxl_cursor_cmd *cmd; | 623 | struct qxl_cursor_cmd *cmd; |
| 624 | struct qxl_cursor *cursor; | 624 | struct qxl_cursor *cursor; |
| 625 | struct drm_gem_object *obj; | 625 | struct drm_gem_object *obj; |
| 626 | struct qxl_bo *cursor_bo = NULL, *user_bo = NULL; | 626 | struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL; |
| 627 | int ret; | 627 | int ret; |
| 628 | void *user_ptr; | 628 | void *user_ptr; |
| 629 | int size = 64*64*4; | 629 | int size = 64*64*4; |
| @@ -677,7 +677,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, | |||
| 677 | cursor_bo, 0); | 677 | cursor_bo, 0); |
| 678 | cmd->type = QXL_CURSOR_SET; | 678 | cmd->type = QXL_CURSOR_SET; |
| 679 | 679 | ||
| 680 | qxl_bo_unref(&qcrtc->cursor_bo); | 680 | old_cursor_bo = qcrtc->cursor_bo; |
| 681 | qcrtc->cursor_bo = cursor_bo; | 681 | qcrtc->cursor_bo = cursor_bo; |
| 682 | cursor_bo = NULL; | 682 | cursor_bo = NULL; |
| 683 | } else { | 683 | } else { |
| @@ -697,6 +697,9 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, | |||
| 697 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); | 697 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); |
| 698 | qxl_release_fence_buffer_objects(release); | 698 | qxl_release_fence_buffer_objects(release); |
| 699 | 699 | ||
| 700 | if (old_cursor_bo) | ||
| 701 | qxl_bo_unref(&old_cursor_bo); | ||
| 702 | |||
| 700 | qxl_bo_unref(&cursor_bo); | 703 | qxl_bo_unref(&cursor_bo); |
| 701 | 704 | ||
| 702 | return; | 705 | return; |
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 08747fc3ee71..8232b39e16ca 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c | |||
| @@ -17,7 +17,6 @@ | |||
| 17 | #include <drm/drm_encoder.h> | 17 | #include <drm/drm_encoder.h> |
| 18 | #include <drm/drm_modes.h> | 18 | #include <drm/drm_modes.h> |
| 19 | #include <drm/drm_of.h> | 19 | #include <drm/drm_of.h> |
| 20 | #include <drm/drm_panel.h> | ||
| 21 | 20 | ||
| 22 | #include <uapi/drm/drm_mode.h> | 21 | #include <uapi/drm/drm_mode.h> |
| 23 | 22 | ||
| @@ -418,9 +417,6 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon, | |||
| 418 | static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, | 417 | static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, |
| 419 | const struct drm_display_mode *mode) | 418 | const struct drm_display_mode *mode) |
| 420 | { | 419 | { |
| 421 | struct drm_panel *panel = tcon->panel; | ||
| 422 | struct drm_connector *connector = panel->connector; | ||
| 423 | struct drm_display_info display_info = connector->display_info; | ||
| 424 | unsigned int bp, hsync, vsync; | 420 | unsigned int bp, hsync, vsync; |
| 425 | u8 clk_delay; | 421 | u8 clk_delay; |
| 426 | u32 val = 0; | 422 | u32 val = 0; |
| @@ -478,27 +474,6 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, | |||
| 478 | if (mode->flags & DRM_MODE_FLAG_PVSYNC) | 474 | if (mode->flags & DRM_MODE_FLAG_PVSYNC) |
| 479 | val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE; | 475 | val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE; |
| 480 | 476 | ||
| 481 | /* | ||
| 482 | * On A20 and similar SoCs, the only way to achieve Positive Edge | ||
| 483 | * (Rising Edge), is setting dclk clock phase to 2/3(240°). | ||
| 484 | * By default TCON works in Negative Edge(Falling Edge), | ||
| 485 | * this is why phase is set to 0 in that case. | ||
| 486 | * Unfortunately there's no way to logically invert dclk through | ||
| 487 | * IO_POL register. | ||
| 488 | * The only acceptable way to work, triple checked with scope, | ||
| 489 | * is using clock phase set to 0° for Negative Edge and set to 240° | ||
| 490 | * for Positive Edge. | ||
| 491 | * On A33 and similar SoCs there would be a 90° phase option, | ||
| 492 | * but it divides also dclk by 2. | ||
| 493 | * Following code is a way to avoid quirks all around TCON | ||
| 494 | * and DOTCLOCK drivers. | ||
| 495 | */ | ||
| 496 | if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) | ||
| 497 | clk_set_phase(tcon->dclk, 240); | ||
| 498 | |||
| 499 | if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) | ||
| 500 | clk_set_phase(tcon->dclk, 0); | ||
| 501 | |||
| 502 | regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG, | 477 | regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG, |
| 503 | SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE, | 478 | SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE, |
| 504 | val); | 479 | val); |
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index bf3bb7e1adab..9d3ef879dc51 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c | |||
| @@ -1074,6 +1074,13 @@ static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = { | |||
| 1074 | DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"), | 1074 | DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"), |
| 1075 | }, | 1075 | }, |
| 1076 | }, | 1076 | }, |
| 1077 | { | ||
| 1078 | .ident = "Dell XPS13 9333", | ||
| 1079 | .matches = { | ||
| 1080 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
| 1081 | DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"), | ||
| 1082 | }, | ||
| 1083 | }, | ||
| 1077 | { } | 1084 | { } |
| 1078 | }; | 1085 | }; |
| 1079 | 1086 | ||
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 155d4d1d1585..f9d1349c3286 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c | |||
| @@ -4175,7 +4175,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
| 4175 | * The temperature is already monitored if the respective bit in <mask> | 4175 | * The temperature is already monitored if the respective bit in <mask> |
| 4176 | * is set. | 4176 | * is set. |
| 4177 | */ | 4177 | */ |
| 4178 | for (i = 0; i < 32; i++) { | 4178 | for (i = 0; i < 31; i++) { |
| 4179 | if (!(data->temp_mask & BIT(i + 1))) | 4179 | if (!(data->temp_mask & BIT(i + 1))) |
| 4180 | continue; | 4180 | continue; |
| 4181 | if (!reg_temp_alternate[i]) | 4181 | if (!reg_temp_alternate[i]) |
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index 0f52d44b3f69..f5fe0100f9ff 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c | |||
| @@ -199,7 +199,7 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |||
| 199 | 199 | ||
| 200 | fail: | 200 | fail: |
| 201 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | 201 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); |
| 202 | gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs)); | 202 | gicv2m_unalloc_msi(v2m, hwirq, nr_irqs); |
| 203 | return err; | 203 | return err; |
| 204 | } | 204 | } |
| 205 | 205 | ||
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 5377d7e2afba..d7842d312d3e 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
| @@ -182,6 +182,22 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev, | |||
| 182 | return its->collections + its_dev->event_map.col_map[event]; | 182 | return its->collections + its_dev->event_map.col_map[event]; |
| 183 | } | 183 | } |
| 184 | 184 | ||
| 185 | static struct its_collection *valid_col(struct its_collection *col) | ||
| 186 | { | ||
| 187 | if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15))) | ||
| 188 | return NULL; | ||
| 189 | |||
| 190 | return col; | ||
| 191 | } | ||
| 192 | |||
| 193 | static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) | ||
| 194 | { | ||
| 195 | if (valid_col(its->collections + vpe->col_idx)) | ||
| 196 | return vpe; | ||
| 197 | |||
| 198 | return NULL; | ||
| 199 | } | ||
| 200 | |||
| 185 | /* | 201 | /* |
| 186 | * ITS command descriptors - parameters to be encoded in a command | 202 | * ITS command descriptors - parameters to be encoded in a command |
| 187 | * block. | 203 | * block. |
| @@ -439,7 +455,7 @@ static struct its_collection *its_build_mapti_cmd(struct its_node *its, | |||
| 439 | 455 | ||
| 440 | its_fixup_cmd(cmd); | 456 | its_fixup_cmd(cmd); |
| 441 | 457 | ||
| 442 | return col; | 458 | return valid_col(col); |
| 443 | } | 459 | } |
| 444 | 460 | ||
| 445 | static struct its_collection *its_build_movi_cmd(struct its_node *its, | 461 | static struct its_collection *its_build_movi_cmd(struct its_node *its, |
| @@ -458,7 +474,7 @@ static struct its_collection *its_build_movi_cmd(struct its_node *its, | |||
| 458 | 474 | ||
| 459 | its_fixup_cmd(cmd); | 475 | its_fixup_cmd(cmd); |
| 460 | 476 | ||
| 461 | return col; | 477 | return valid_col(col); |
| 462 | } | 478 | } |
| 463 | 479 | ||
| 464 | static struct its_collection *its_build_discard_cmd(struct its_node *its, | 480 | static struct its_collection *its_build_discard_cmd(struct its_node *its, |
| @@ -476,7 +492,7 @@ static struct its_collection *its_build_discard_cmd(struct its_node *its, | |||
| 476 | 492 | ||
| 477 | its_fixup_cmd(cmd); | 493 | its_fixup_cmd(cmd); |
| 478 | 494 | ||
| 479 | return col; | 495 | return valid_col(col); |
| 480 | } | 496 | } |
| 481 | 497 | ||
| 482 | static struct its_collection *its_build_inv_cmd(struct its_node *its, | 498 | static struct its_collection *its_build_inv_cmd(struct its_node *its, |
| @@ -494,7 +510,7 @@ static struct its_collection *its_build_inv_cmd(struct its_node *its, | |||
| 494 | 510 | ||
| 495 | its_fixup_cmd(cmd); | 511 | its_fixup_cmd(cmd); |
| 496 | 512 | ||
| 497 | return col; | 513 | return valid_col(col); |
| 498 | } | 514 | } |
| 499 | 515 | ||
| 500 | static struct its_collection *its_build_int_cmd(struct its_node *its, | 516 | static struct its_collection *its_build_int_cmd(struct its_node *its, |
| @@ -512,7 +528,7 @@ static struct its_collection *its_build_int_cmd(struct its_node *its, | |||
| 512 | 528 | ||
| 513 | its_fixup_cmd(cmd); | 529 | its_fixup_cmd(cmd); |
| 514 | 530 | ||
| 515 | return col; | 531 | return valid_col(col); |
| 516 | } | 532 | } |
| 517 | 533 | ||
| 518 | static struct its_collection *its_build_clear_cmd(struct its_node *its, | 534 | static struct its_collection *its_build_clear_cmd(struct its_node *its, |
| @@ -530,7 +546,7 @@ static struct its_collection *its_build_clear_cmd(struct its_node *its, | |||
| 530 | 546 | ||
| 531 | its_fixup_cmd(cmd); | 547 | its_fixup_cmd(cmd); |
| 532 | 548 | ||
| 533 | return col; | 549 | return valid_col(col); |
| 534 | } | 550 | } |
| 535 | 551 | ||
| 536 | static struct its_collection *its_build_invall_cmd(struct its_node *its, | 552 | static struct its_collection *its_build_invall_cmd(struct its_node *its, |
| @@ -554,7 +570,7 @@ static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, | |||
| 554 | 570 | ||
| 555 | its_fixup_cmd(cmd); | 571 | its_fixup_cmd(cmd); |
| 556 | 572 | ||
| 557 | return desc->its_vinvall_cmd.vpe; | 573 | return valid_vpe(its, desc->its_vinvall_cmd.vpe); |
| 558 | } | 574 | } |
| 559 | 575 | ||
| 560 | static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, | 576 | static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, |
| @@ -576,7 +592,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, | |||
| 576 | 592 | ||
| 577 | its_fixup_cmd(cmd); | 593 | its_fixup_cmd(cmd); |
| 578 | 594 | ||
| 579 | return desc->its_vmapp_cmd.vpe; | 595 | return valid_vpe(its, desc->its_vmapp_cmd.vpe); |
| 580 | } | 596 | } |
| 581 | 597 | ||
| 582 | static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, | 598 | static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, |
| @@ -599,7 +615,7 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, | |||
| 599 | 615 | ||
| 600 | its_fixup_cmd(cmd); | 616 | its_fixup_cmd(cmd); |
| 601 | 617 | ||
| 602 | return desc->its_vmapti_cmd.vpe; | 618 | return valid_vpe(its, desc->its_vmapti_cmd.vpe); |
| 603 | } | 619 | } |
| 604 | 620 | ||
| 605 | static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, | 621 | static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, |
| @@ -622,7 +638,7 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, | |||
| 622 | 638 | ||
| 623 | its_fixup_cmd(cmd); | 639 | its_fixup_cmd(cmd); |
| 624 | 640 | ||
| 625 | return desc->its_vmovi_cmd.vpe; | 641 | return valid_vpe(its, desc->its_vmovi_cmd.vpe); |
| 626 | } | 642 | } |
| 627 | 643 | ||
| 628 | static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, | 644 | static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, |
| @@ -640,7 +656,7 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, | |||
| 640 | 656 | ||
| 641 | its_fixup_cmd(cmd); | 657 | its_fixup_cmd(cmd); |
| 642 | 658 | ||
| 643 | return desc->its_vmovp_cmd.vpe; | 659 | return valid_vpe(its, desc->its_vmovp_cmd.vpe); |
| 644 | } | 660 | } |
| 645 | 661 | ||
| 646 | static u64 its_cmd_ptr_to_offset(struct its_node *its, | 662 | static u64 its_cmd_ptr_to_offset(struct its_node *its, |
| @@ -1824,11 +1840,16 @@ static int its_alloc_tables(struct its_node *its) | |||
| 1824 | 1840 | ||
| 1825 | static int its_alloc_collections(struct its_node *its) | 1841 | static int its_alloc_collections(struct its_node *its) |
| 1826 | { | 1842 | { |
| 1843 | int i; | ||
| 1844 | |||
| 1827 | its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), | 1845 | its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), |
| 1828 | GFP_KERNEL); | 1846 | GFP_KERNEL); |
| 1829 | if (!its->collections) | 1847 | if (!its->collections) |
| 1830 | return -ENOMEM; | 1848 | return -ENOMEM; |
| 1831 | 1849 | ||
| 1850 | for (i = 0; i < nr_cpu_ids; i++) | ||
| 1851 | its->collections[i].target_address = ~0ULL; | ||
| 1852 | |||
| 1832 | return 0; | 1853 | return 0; |
| 1833 | } | 1854 | } |
| 1834 | 1855 | ||
| @@ -2310,7 +2331,14 @@ static int its_irq_domain_activate(struct irq_domain *domain, | |||
| 2310 | cpu_mask = cpumask_of_node(its_dev->its->numa_node); | 2331 | cpu_mask = cpumask_of_node(its_dev->its->numa_node); |
| 2311 | 2332 | ||
| 2312 | /* Bind the LPI to the first possible CPU */ | 2333 | /* Bind the LPI to the first possible CPU */ |
| 2313 | cpu = cpumask_first(cpu_mask); | 2334 | cpu = cpumask_first_and(cpu_mask, cpu_online_mask); |
| 2335 | if (cpu >= nr_cpu_ids) { | ||
| 2336 | if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) | ||
| 2337 | return -EINVAL; | ||
| 2338 | |||
| 2339 | cpu = cpumask_first(cpu_online_mask); | ||
| 2340 | } | ||
| 2341 | |||
| 2314 | its_dev->event_map.col_map[event] = cpu; | 2342 | its_dev->event_map.col_map[event] = cpu; |
| 2315 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); | 2343 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
| 2316 | 2344 | ||
| @@ -3399,6 +3427,16 @@ static int redist_disable_lpis(void) | |||
| 3399 | u64 timeout = USEC_PER_SEC; | 3427 | u64 timeout = USEC_PER_SEC; |
| 3400 | u64 val; | 3428 | u64 val; |
| 3401 | 3429 | ||
| 3430 | /* | ||
| 3431 | * If coming via a CPU hotplug event, we don't need to disable | ||
| 3432 | * LPIs before trying to re-enable them. They are already | ||
| 3433 | * configured and all is well in the world. Detect this case | ||
| 3434 | * by checking the allocation of the pending table for the | ||
| 3435 | * current CPU. | ||
| 3436 | */ | ||
| 3437 | if (gic_data_rdist()->pend_page) | ||
| 3438 | return 0; | ||
| 3439 | |||
| 3402 | if (!gic_rdists_supports_plpis()) { | 3440 | if (!gic_rdists_supports_plpis()) { |
| 3403 | pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); | 3441 | pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); |
| 3404 | return -ENXIO; | 3442 | return -ENXIO; |
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c index 1ec3bfe56693..c671b3212010 100644 --- a/drivers/irqchip/irq-ls-scfg-msi.c +++ b/drivers/irqchip/irq-ls-scfg-msi.c | |||
| @@ -93,8 +93,12 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) | |||
| 93 | msg->address_lo = lower_32_bits(msi_data->msiir_addr); | 93 | msg->address_lo = lower_32_bits(msi_data->msiir_addr); |
| 94 | msg->data = data->hwirq; | 94 | msg->data = data->hwirq; |
| 95 | 95 | ||
| 96 | if (msi_affinity_flag) | 96 | if (msi_affinity_flag) { |
| 97 | msg->data |= cpumask_first(data->common->affinity); | 97 | const struct cpumask *mask; |
| 98 | |||
| 99 | mask = irq_data_get_effective_affinity_mask(data); | ||
| 100 | msg->data |= cpumask_first(mask); | ||
| 101 | } | ||
| 98 | 102 | ||
| 99 | iommu_dma_map_msi_msg(data->irq, msg); | 103 | iommu_dma_map_msi_msg(data->irq, msg); |
| 100 | } | 104 | } |
| @@ -121,7 +125,7 @@ static int ls_scfg_msi_set_affinity(struct irq_data *irq_data, | |||
| 121 | return -EINVAL; | 125 | return -EINVAL; |
| 122 | } | 126 | } |
| 123 | 127 | ||
| 124 | cpumask_copy(irq_data->common->affinity, mask); | 128 | irq_data_update_effective_affinity(irq_data, cpumask_of(cpu)); |
| 125 | 129 | ||
| 126 | return IRQ_SET_MASK_OK; | 130 | return IRQ_SET_MASK_OK; |
| 127 | } | 131 | } |
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig index 10c08982185a..9c03f35d9df1 100644 --- a/drivers/lightnvm/Kconfig +++ b/drivers/lightnvm/Kconfig | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | menuconfig NVM | 5 | menuconfig NVM |
| 6 | bool "Open-Channel SSD target support" | 6 | bool "Open-Channel SSD target support" |
| 7 | depends on BLOCK && HAS_DMA && PCI | 7 | depends on BLOCK && PCI |
| 8 | select BLK_DEV_NVME | 8 | select BLK_DEV_NVME |
| 9 | help | 9 | help |
| 10 | Say Y here to get to enable Open-channel SSDs. | 10 | Say Y here to get to enable Open-channel SSDs. |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 21710a7460c8..46df030b2c3f 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
| @@ -1808,6 +1808,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, | |||
| 1808 | u32 max_segments = | 1808 | u32 max_segments = |
| 1809 | (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; | 1809 | (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; |
| 1810 | 1810 | ||
| 1811 | max_segments = min_not_zero(max_segments, ctrl->max_segments); | ||
| 1811 | blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); | 1812 | blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); |
| 1812 | blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); | 1813 | blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); |
| 1813 | } | 1814 | } |
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index b528a2f5826c..41d45a1b5c62 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
| @@ -2790,6 +2790,9 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) | |||
| 2790 | /* re-enable the admin_q so anything new can fast fail */ | 2790 | /* re-enable the admin_q so anything new can fast fail */ |
| 2791 | blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); | 2791 | blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); |
| 2792 | 2792 | ||
| 2793 | /* resume the io queues so that things will fast fail */ | ||
| 2794 | nvme_start_queues(&ctrl->ctrl); | ||
| 2795 | |||
| 2793 | nvme_fc_ctlr_inactive_on_rport(ctrl); | 2796 | nvme_fc_ctlr_inactive_on_rport(ctrl); |
| 2794 | } | 2797 | } |
| 2795 | 2798 | ||
| @@ -2804,9 +2807,6 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) | |||
| 2804 | * waiting for io to terminate | 2807 | * waiting for io to terminate |
| 2805 | */ | 2808 | */ |
| 2806 | nvme_fc_delete_association(ctrl); | 2809 | nvme_fc_delete_association(ctrl); |
| 2807 | |||
| 2808 | /* resume the io queues so that things will fast fail */ | ||
| 2809 | nvme_start_queues(nctrl); | ||
| 2810 | } | 2810 | } |
| 2811 | 2811 | ||
| 2812 | static void | 2812 | static void |
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 231807cbc849..0c4a33df3b2f 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
| @@ -170,6 +170,7 @@ struct nvme_ctrl { | |||
| 170 | u64 cap; | 170 | u64 cap; |
| 171 | u32 page_size; | 171 | u32 page_size; |
| 172 | u32 max_hw_sectors; | 172 | u32 max_hw_sectors; |
| 173 | u32 max_segments; | ||
| 173 | u16 oncs; | 174 | u16 oncs; |
| 174 | u16 oacs; | 175 | u16 oacs; |
| 175 | u16 nssa; | 176 | u16 nssa; |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index fc33804662e7..ba943f211687 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -38,6 +38,13 @@ | |||
| 38 | 38 | ||
| 39 | #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) | 39 | #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) |
| 40 | 40 | ||
| 41 | /* | ||
| 42 | * These can be higher, but we need to ensure that any command doesn't | ||
| 43 | * require an sg allocation that needs more than a page of data. | ||
| 44 | */ | ||
| 45 | #define NVME_MAX_KB_SZ 4096 | ||
| 46 | #define NVME_MAX_SEGS 127 | ||
| 47 | |||
| 41 | static int use_threaded_interrupts; | 48 | static int use_threaded_interrupts; |
| 42 | module_param(use_threaded_interrupts, int, 0); | 49 | module_param(use_threaded_interrupts, int, 0); |
| 43 | 50 | ||
| @@ -100,6 +107,8 @@ struct nvme_dev { | |||
| 100 | struct nvme_ctrl ctrl; | 107 | struct nvme_ctrl ctrl; |
| 101 | struct completion ioq_wait; | 108 | struct completion ioq_wait; |
| 102 | 109 | ||
| 110 | mempool_t *iod_mempool; | ||
| 111 | |||
| 103 | /* shadow doorbell buffer support: */ | 112 | /* shadow doorbell buffer support: */ |
| 104 | u32 *dbbuf_dbs; | 113 | u32 *dbbuf_dbs; |
| 105 | dma_addr_t dbbuf_dbs_dma_addr; | 114 | dma_addr_t dbbuf_dbs_dma_addr; |
| @@ -477,10 +486,7 @@ static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev) | |||
| 477 | iod->use_sgl = nvme_pci_use_sgls(dev, rq); | 486 | iod->use_sgl = nvme_pci_use_sgls(dev, rq); |
| 478 | 487 | ||
| 479 | if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { | 488 | if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { |
| 480 | size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg, | 489 | iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); |
| 481 | iod->use_sgl); | ||
| 482 | |||
| 483 | iod->sg = kmalloc(alloc_size, GFP_ATOMIC); | ||
| 484 | if (!iod->sg) | 490 | if (!iod->sg) |
| 485 | return BLK_STS_RESOURCE; | 491 | return BLK_STS_RESOURCE; |
| 486 | } else { | 492 | } else { |
| @@ -526,7 +532,7 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req) | |||
| 526 | } | 532 | } |
| 527 | 533 | ||
| 528 | if (iod->sg != iod->inline_sg) | 534 | if (iod->sg != iod->inline_sg) |
| 529 | kfree(iod->sg); | 535 | mempool_free(iod->sg, dev->iod_mempool); |
| 530 | } | 536 | } |
| 531 | 537 | ||
| 532 | #ifdef CONFIG_BLK_DEV_INTEGRITY | 538 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
| @@ -2280,6 +2286,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) | |||
| 2280 | blk_put_queue(dev->ctrl.admin_q); | 2286 | blk_put_queue(dev->ctrl.admin_q); |
| 2281 | kfree(dev->queues); | 2287 | kfree(dev->queues); |
| 2282 | free_opal_dev(dev->ctrl.opal_dev); | 2288 | free_opal_dev(dev->ctrl.opal_dev); |
| 2289 | mempool_destroy(dev->iod_mempool); | ||
| 2283 | kfree(dev); | 2290 | kfree(dev); |
| 2284 | } | 2291 | } |
| 2285 | 2292 | ||
| @@ -2289,6 +2296,7 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status) | |||
| 2289 | 2296 | ||
| 2290 | nvme_get_ctrl(&dev->ctrl); | 2297 | nvme_get_ctrl(&dev->ctrl); |
| 2291 | nvme_dev_disable(dev, false); | 2298 | nvme_dev_disable(dev, false); |
| 2299 | nvme_kill_queues(&dev->ctrl); | ||
| 2292 | if (!queue_work(nvme_wq, &dev->remove_work)) | 2300 | if (!queue_work(nvme_wq, &dev->remove_work)) |
| 2293 | nvme_put_ctrl(&dev->ctrl); | 2301 | nvme_put_ctrl(&dev->ctrl); |
| 2294 | } | 2302 | } |
| @@ -2333,6 +2341,13 @@ static void nvme_reset_work(struct work_struct *work) | |||
| 2333 | if (result) | 2341 | if (result) |
| 2334 | goto out; | 2342 | goto out; |
| 2335 | 2343 | ||
| 2344 | /* | ||
| 2345 | * Limit the max command size to prevent iod->sg allocations going | ||
| 2346 | * over a single page. | ||
| 2347 | */ | ||
| 2348 | dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1; | ||
| 2349 | dev->ctrl.max_segments = NVME_MAX_SEGS; | ||
| 2350 | |||
| 2336 | result = nvme_init_identify(&dev->ctrl); | 2351 | result = nvme_init_identify(&dev->ctrl); |
| 2337 | if (result) | 2352 | if (result) |
| 2338 | goto out; | 2353 | goto out; |
| @@ -2405,7 +2420,6 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work) | |||
| 2405 | struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); | 2420 | struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); |
| 2406 | struct pci_dev *pdev = to_pci_dev(dev->dev); | 2421 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
| 2407 | 2422 | ||
| 2408 | nvme_kill_queues(&dev->ctrl); | ||
| 2409 | if (pci_get_drvdata(pdev)) | 2423 | if (pci_get_drvdata(pdev)) |
| 2410 | device_release_driver(&pdev->dev); | 2424 | device_release_driver(&pdev->dev); |
| 2411 | nvme_put_ctrl(&dev->ctrl); | 2425 | nvme_put_ctrl(&dev->ctrl); |
| @@ -2509,6 +2523,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2509 | int node, result = -ENOMEM; | 2523 | int node, result = -ENOMEM; |
| 2510 | struct nvme_dev *dev; | 2524 | struct nvme_dev *dev; |
| 2511 | unsigned long quirks = id->driver_data; | 2525 | unsigned long quirks = id->driver_data; |
| 2526 | size_t alloc_size; | ||
| 2512 | 2527 | ||
| 2513 | node = dev_to_node(&pdev->dev); | 2528 | node = dev_to_node(&pdev->dev); |
| 2514 | if (node == NUMA_NO_NODE) | 2529 | if (node == NUMA_NO_NODE) |
| @@ -2546,6 +2561,23 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2546 | if (result) | 2561 | if (result) |
| 2547 | goto release_pools; | 2562 | goto release_pools; |
| 2548 | 2563 | ||
| 2564 | /* | ||
| 2565 | * Double check that our mempool alloc size will cover the biggest | ||
| 2566 | * command we support. | ||
| 2567 | */ | ||
| 2568 | alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ, | ||
| 2569 | NVME_MAX_SEGS, true); | ||
| 2570 | WARN_ON_ONCE(alloc_size > PAGE_SIZE); | ||
| 2571 | |||
| 2572 | dev->iod_mempool = mempool_create_node(1, mempool_kmalloc, | ||
| 2573 | mempool_kfree, | ||
| 2574 | (void *) alloc_size, | ||
| 2575 | GFP_KERNEL, node); | ||
| 2576 | if (!dev->iod_mempool) { | ||
| 2577 | result = -ENOMEM; | ||
| 2578 | goto release_pools; | ||
| 2579 | } | ||
| 2580 | |||
| 2549 | dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); | 2581 | dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); |
| 2550 | 2582 | ||
| 2551 | nvme_get_ctrl(&dev->ctrl); | 2583 | nvme_get_ctrl(&dev->ctrl); |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index c9424da0d23e..9544625c0b7d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
| @@ -560,12 +560,6 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) | |||
| 560 | if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) | 560 | if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) |
| 561 | return; | 561 | return; |
| 562 | 562 | ||
| 563 | if (nvme_rdma_queue_idx(queue) == 0) { | ||
| 564 | nvme_rdma_free_qe(queue->device->dev, | ||
| 565 | &queue->ctrl->async_event_sqe, | ||
| 566 | sizeof(struct nvme_command), DMA_TO_DEVICE); | ||
| 567 | } | ||
| 568 | |||
| 569 | nvme_rdma_destroy_queue_ib(queue); | 563 | nvme_rdma_destroy_queue_ib(queue); |
| 570 | rdma_destroy_id(queue->cm_id); | 564 | rdma_destroy_id(queue->cm_id); |
| 571 | } | 565 | } |
| @@ -698,7 +692,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, | |||
| 698 | set = &ctrl->tag_set; | 692 | set = &ctrl->tag_set; |
| 699 | memset(set, 0, sizeof(*set)); | 693 | memset(set, 0, sizeof(*set)); |
| 700 | set->ops = &nvme_rdma_mq_ops; | 694 | set->ops = &nvme_rdma_mq_ops; |
| 701 | set->queue_depth = nctrl->opts->queue_size; | 695 | set->queue_depth = nctrl->sqsize + 1; |
| 702 | set->reserved_tags = 1; /* fabric connect */ | 696 | set->reserved_tags = 1; /* fabric connect */ |
| 703 | set->numa_node = NUMA_NO_NODE; | 697 | set->numa_node = NUMA_NO_NODE; |
| 704 | set->flags = BLK_MQ_F_SHOULD_MERGE; | 698 | set->flags = BLK_MQ_F_SHOULD_MERGE; |
| @@ -734,11 +728,12 @@ out: | |||
| 734 | static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, | 728 | static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, |
| 735 | bool remove) | 729 | bool remove) |
| 736 | { | 730 | { |
| 737 | nvme_rdma_stop_queue(&ctrl->queues[0]); | ||
| 738 | if (remove) { | 731 | if (remove) { |
| 739 | blk_cleanup_queue(ctrl->ctrl.admin_q); | 732 | blk_cleanup_queue(ctrl->ctrl.admin_q); |
| 740 | nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); | 733 | nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); |
| 741 | } | 734 | } |
| 735 | nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, | ||
| 736 | sizeof(struct nvme_command), DMA_TO_DEVICE); | ||
| 742 | nvme_rdma_free_queue(&ctrl->queues[0]); | 737 | nvme_rdma_free_queue(&ctrl->queues[0]); |
| 743 | } | 738 | } |
| 744 | 739 | ||
| @@ -755,11 +750,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, | |||
| 755 | 750 | ||
| 756 | ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev); | 751 | ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev); |
| 757 | 752 | ||
| 753 | error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe, | ||
| 754 | sizeof(struct nvme_command), DMA_TO_DEVICE); | ||
| 755 | if (error) | ||
| 756 | goto out_free_queue; | ||
| 757 | |||
| 758 | if (new) { | 758 | if (new) { |
| 759 | ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); | 759 | ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); |
| 760 | if (IS_ERR(ctrl->ctrl.admin_tagset)) { | 760 | if (IS_ERR(ctrl->ctrl.admin_tagset)) { |
| 761 | error = PTR_ERR(ctrl->ctrl.admin_tagset); | 761 | error = PTR_ERR(ctrl->ctrl.admin_tagset); |
| 762 | goto out_free_queue; | 762 | goto out_free_async_qe; |
| 763 | } | 763 | } |
| 764 | 764 | ||
| 765 | ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); | 765 | ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); |
| @@ -795,12 +795,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, | |||
| 795 | if (error) | 795 | if (error) |
| 796 | goto out_stop_queue; | 796 | goto out_stop_queue; |
| 797 | 797 | ||
| 798 | error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev, | ||
| 799 | &ctrl->async_event_sqe, sizeof(struct nvme_command), | ||
| 800 | DMA_TO_DEVICE); | ||
| 801 | if (error) | ||
| 802 | goto out_stop_queue; | ||
| 803 | |||
| 804 | return 0; | 798 | return 0; |
| 805 | 799 | ||
| 806 | out_stop_queue: | 800 | out_stop_queue: |
| @@ -811,6 +805,9 @@ out_cleanup_queue: | |||
| 811 | out_free_tagset: | 805 | out_free_tagset: |
| 812 | if (new) | 806 | if (new) |
| 813 | nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); | 807 | nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); |
| 808 | out_free_async_qe: | ||
| 809 | nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, | ||
| 810 | sizeof(struct nvme_command), DMA_TO_DEVICE); | ||
| 814 | out_free_queue: | 811 | out_free_queue: |
| 815 | nvme_rdma_free_queue(&ctrl->queues[0]); | 812 | nvme_rdma_free_queue(&ctrl->queues[0]); |
| 816 | return error; | 813 | return error; |
| @@ -819,7 +816,6 @@ out_free_queue: | |||
| 819 | static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl, | 816 | static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl, |
| 820 | bool remove) | 817 | bool remove) |
| 821 | { | 818 | { |
| 822 | nvme_rdma_stop_io_queues(ctrl); | ||
| 823 | if (remove) { | 819 | if (remove) { |
| 824 | blk_cleanup_queue(ctrl->ctrl.connect_q); | 820 | blk_cleanup_queue(ctrl->ctrl.connect_q); |
| 825 | nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset); | 821 | nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset); |
| @@ -888,9 +884,9 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) | |||
| 888 | list_del(&ctrl->list); | 884 | list_del(&ctrl->list); |
| 889 | mutex_unlock(&nvme_rdma_ctrl_mutex); | 885 | mutex_unlock(&nvme_rdma_ctrl_mutex); |
| 890 | 886 | ||
| 891 | kfree(ctrl->queues); | ||
| 892 | nvmf_free_options(nctrl->opts); | 887 | nvmf_free_options(nctrl->opts); |
| 893 | free_ctrl: | 888 | free_ctrl: |
| 889 | kfree(ctrl->queues); | ||
| 894 | kfree(ctrl); | 890 | kfree(ctrl); |
| 895 | } | 891 | } |
| 896 | 892 | ||
| @@ -949,6 +945,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) | |||
| 949 | return; | 945 | return; |
| 950 | 946 | ||
| 951 | destroy_admin: | 947 | destroy_admin: |
| 948 | nvme_rdma_stop_queue(&ctrl->queues[0]); | ||
| 952 | nvme_rdma_destroy_admin_queue(ctrl, false); | 949 | nvme_rdma_destroy_admin_queue(ctrl, false); |
| 953 | requeue: | 950 | requeue: |
| 954 | dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", | 951 | dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", |
| @@ -965,12 +962,14 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) | |||
| 965 | 962 | ||
| 966 | if (ctrl->ctrl.queue_count > 1) { | 963 | if (ctrl->ctrl.queue_count > 1) { |
| 967 | nvme_stop_queues(&ctrl->ctrl); | 964 | nvme_stop_queues(&ctrl->ctrl); |
| 965 | nvme_rdma_stop_io_queues(ctrl); | ||
| 968 | blk_mq_tagset_busy_iter(&ctrl->tag_set, | 966 | blk_mq_tagset_busy_iter(&ctrl->tag_set, |
| 969 | nvme_cancel_request, &ctrl->ctrl); | 967 | nvme_cancel_request, &ctrl->ctrl); |
| 970 | nvme_rdma_destroy_io_queues(ctrl, false); | 968 | nvme_rdma_destroy_io_queues(ctrl, false); |
| 971 | } | 969 | } |
| 972 | 970 | ||
| 973 | blk_mq_quiesce_queue(ctrl->ctrl.admin_q); | 971 | blk_mq_quiesce_queue(ctrl->ctrl.admin_q); |
| 972 | nvme_rdma_stop_queue(&ctrl->queues[0]); | ||
| 974 | blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, | 973 | blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, |
| 975 | nvme_cancel_request, &ctrl->ctrl); | 974 | nvme_cancel_request, &ctrl->ctrl); |
| 976 | nvme_rdma_destroy_admin_queue(ctrl, false); | 975 | nvme_rdma_destroy_admin_queue(ctrl, false); |
| @@ -1736,6 +1735,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) | |||
| 1736 | { | 1735 | { |
| 1737 | if (ctrl->ctrl.queue_count > 1) { | 1736 | if (ctrl->ctrl.queue_count > 1) { |
| 1738 | nvme_stop_queues(&ctrl->ctrl); | 1737 | nvme_stop_queues(&ctrl->ctrl); |
| 1738 | nvme_rdma_stop_io_queues(ctrl); | ||
| 1739 | blk_mq_tagset_busy_iter(&ctrl->tag_set, | 1739 | blk_mq_tagset_busy_iter(&ctrl->tag_set, |
| 1740 | nvme_cancel_request, &ctrl->ctrl); | 1740 | nvme_cancel_request, &ctrl->ctrl); |
| 1741 | nvme_rdma_destroy_io_queues(ctrl, shutdown); | 1741 | nvme_rdma_destroy_io_queues(ctrl, shutdown); |
| @@ -1747,6 +1747,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) | |||
| 1747 | nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); | 1747 | nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); |
| 1748 | 1748 | ||
| 1749 | blk_mq_quiesce_queue(ctrl->ctrl.admin_q); | 1749 | blk_mq_quiesce_queue(ctrl->ctrl.admin_q); |
| 1750 | nvme_rdma_stop_queue(&ctrl->queues[0]); | ||
| 1750 | blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, | 1751 | blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, |
| 1751 | nvme_cancel_request, &ctrl->ctrl); | 1752 | nvme_cancel_request, &ctrl->ctrl); |
| 1752 | blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); | 1753 | blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); |
| @@ -1932,11 +1933,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, | |||
| 1932 | goto out_free_ctrl; | 1933 | goto out_free_ctrl; |
| 1933 | } | 1934 | } |
| 1934 | 1935 | ||
| 1935 | ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops, | ||
| 1936 | 0 /* no quirks, we're perfect! */); | ||
| 1937 | if (ret) | ||
| 1938 | goto out_free_ctrl; | ||
| 1939 | |||
| 1940 | INIT_DELAYED_WORK(&ctrl->reconnect_work, | 1936 | INIT_DELAYED_WORK(&ctrl->reconnect_work, |
| 1941 | nvme_rdma_reconnect_ctrl_work); | 1937 | nvme_rdma_reconnect_ctrl_work); |
| 1942 | INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work); | 1938 | INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work); |
| @@ -1950,14 +1946,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, | |||
| 1950 | ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), | 1946 | ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), |
| 1951 | GFP_KERNEL); | 1947 | GFP_KERNEL); |
| 1952 | if (!ctrl->queues) | 1948 | if (!ctrl->queues) |
| 1953 | goto out_uninit_ctrl; | 1949 | goto out_free_ctrl; |
| 1950 | |||
| 1951 | ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops, | ||
| 1952 | 0 /* no quirks, we're perfect! */); | ||
| 1953 | if (ret) | ||
| 1954 | goto out_kfree_queues; | ||
| 1954 | 1955 | ||
| 1955 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); | 1956 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); |
| 1956 | WARN_ON_ONCE(!changed); | 1957 | WARN_ON_ONCE(!changed); |
| 1957 | 1958 | ||
| 1958 | ret = nvme_rdma_configure_admin_queue(ctrl, true); | 1959 | ret = nvme_rdma_configure_admin_queue(ctrl, true); |
| 1959 | if (ret) | 1960 | if (ret) |
| 1960 | goto out_kfree_queues; | 1961 | goto out_uninit_ctrl; |
| 1961 | 1962 | ||
| 1962 | /* sanity check icdoff */ | 1963 | /* sanity check icdoff */ |
| 1963 | if (ctrl->ctrl.icdoff) { | 1964 | if (ctrl->ctrl.icdoff) { |
| @@ -1974,20 +1975,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, | |||
| 1974 | goto out_remove_admin_queue; | 1975 | goto out_remove_admin_queue; |
| 1975 | } | 1976 | } |
| 1976 | 1977 | ||
| 1977 | if (opts->queue_size > ctrl->ctrl.maxcmd) { | 1978 | /* only warn if argument is too large here, will clamp later */ |
| 1978 | /* warn if maxcmd is lower than queue_size */ | ||
| 1979 | dev_warn(ctrl->ctrl.device, | ||
| 1980 | "queue_size %zu > ctrl maxcmd %u, clamping down\n", | ||
| 1981 | opts->queue_size, ctrl->ctrl.maxcmd); | ||
| 1982 | opts->queue_size = ctrl->ctrl.maxcmd; | ||
| 1983 | } | ||
| 1984 | |||
| 1985 | if (opts->queue_size > ctrl->ctrl.sqsize + 1) { | 1979 | if (opts->queue_size > ctrl->ctrl.sqsize + 1) { |
| 1986 | /* warn if sqsize is lower than queue_size */ | ||
| 1987 | dev_warn(ctrl->ctrl.device, | 1980 | dev_warn(ctrl->ctrl.device, |
| 1988 | "queue_size %zu > ctrl sqsize %u, clamping down\n", | 1981 | "queue_size %zu > ctrl sqsize %u, clamping down\n", |
| 1989 | opts->queue_size, ctrl->ctrl.sqsize + 1); | 1982 | opts->queue_size, ctrl->ctrl.sqsize + 1); |
| 1990 | opts->queue_size = ctrl->ctrl.sqsize + 1; | 1983 | } |
| 1984 | |||
| 1985 | /* warn if maxcmd is lower than sqsize+1 */ | ||
| 1986 | if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { | ||
| 1987 | dev_warn(ctrl->ctrl.device, | ||
| 1988 | "sqsize %u > ctrl maxcmd %u, clamping down\n", | ||
| 1989 | ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); | ||
| 1990 | ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; | ||
| 1991 | } | 1991 | } |
| 1992 | 1992 | ||
| 1993 | if (opts->nr_io_queues) { | 1993 | if (opts->nr_io_queues) { |
| @@ -2013,15 +2013,16 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, | |||
| 2013 | return &ctrl->ctrl; | 2013 | return &ctrl->ctrl; |
| 2014 | 2014 | ||
| 2015 | out_remove_admin_queue: | 2015 | out_remove_admin_queue: |
| 2016 | nvme_rdma_stop_queue(&ctrl->queues[0]); | ||
| 2016 | nvme_rdma_destroy_admin_queue(ctrl, true); | 2017 | nvme_rdma_destroy_admin_queue(ctrl, true); |
| 2017 | out_kfree_queues: | ||
| 2018 | kfree(ctrl->queues); | ||
| 2019 | out_uninit_ctrl: | 2018 | out_uninit_ctrl: |
| 2020 | nvme_uninit_ctrl(&ctrl->ctrl); | 2019 | nvme_uninit_ctrl(&ctrl->ctrl); |
| 2021 | nvme_put_ctrl(&ctrl->ctrl); | 2020 | nvme_put_ctrl(&ctrl->ctrl); |
| 2022 | if (ret > 0) | 2021 | if (ret > 0) |
| 2023 | ret = -EIO; | 2022 | ret = -EIO; |
| 2024 | return ERR_PTR(ret); | 2023 | return ERR_PTR(ret); |
| 2024 | out_kfree_queues: | ||
| 2025 | kfree(ctrl->queues); | ||
| 2025 | out_free_ctrl: | 2026 | out_free_ctrl: |
| 2026 | kfree(ctrl); | 2027 | kfree(ctrl); |
| 2027 | return ERR_PTR(ret); | 2028 | return ERR_PTR(ret); |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index a03da764ecae..74d4b785d2da 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
| @@ -686,6 +686,14 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) | |||
| 686 | } | 686 | } |
| 687 | 687 | ||
| 688 | ctrl->csts = NVME_CSTS_RDY; | 688 | ctrl->csts = NVME_CSTS_RDY; |
| 689 | |||
| 690 | /* | ||
| 691 | * Controllers that are not yet enabled should not really enforce the | ||
| 692 | * keep alive timeout, but we still want to track a timeout and cleanup | ||
| 693 | * in case a host died before it enabled the controller. Hence, simply | ||
| 694 | * reset the keep alive timer when the controller is enabled. | ||
| 695 | */ | ||
| 696 | mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); | ||
| 689 | } | 697 | } |
| 690 | 698 | ||
| 691 | static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) | 699 | static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) |
diff --git a/drivers/opp/core.c b/drivers/opp/core.c index ab2f3fead6b1..31ff03dbeb83 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c | |||
| @@ -598,7 +598,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table, | |||
| 598 | } | 598 | } |
| 599 | 599 | ||
| 600 | /* Scaling up? Scale voltage before frequency */ | 600 | /* Scaling up? Scale voltage before frequency */ |
| 601 | if (freq > old_freq) { | 601 | if (freq >= old_freq) { |
| 602 | ret = _set_opp_voltage(dev, reg, new_supply); | 602 | ret = _set_opp_voltage(dev, reg, new_supply); |
| 603 | if (ret) | 603 | if (ret) |
| 604 | goto restore_voltage; | 604 | goto restore_voltage; |
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c index 76243caa08c6..b5c880b50bb3 100644 --- a/drivers/pinctrl/actions/pinctrl-owl.c +++ b/drivers/pinctrl/actions/pinctrl-owl.c | |||
| @@ -333,7 +333,7 @@ static int owl_pin_config_set(struct pinctrl_dev *pctrldev, | |||
| 333 | unsigned long flags; | 333 | unsigned long flags; |
| 334 | unsigned int param; | 334 | unsigned int param; |
| 335 | u32 reg, bit, width, arg; | 335 | u32 reg, bit, width, arg; |
| 336 | int ret, i; | 336 | int ret = 0, i; |
| 337 | 337 | ||
| 338 | info = &pctrl->soc->padinfo[pin]; | 338 | info = &pctrl->soc->padinfo[pin]; |
| 339 | 339 | ||
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index b601039d6c69..c4aa411f5935 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c | |||
| @@ -101,10 +101,11 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np) | |||
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | static int dt_to_map_one_config(struct pinctrl *p, | 103 | static int dt_to_map_one_config(struct pinctrl *p, |
| 104 | struct pinctrl_dev *pctldev, | 104 | struct pinctrl_dev *hog_pctldev, |
| 105 | const char *statename, | 105 | const char *statename, |
| 106 | struct device_node *np_config) | 106 | struct device_node *np_config) |
| 107 | { | 107 | { |
| 108 | struct pinctrl_dev *pctldev = NULL; | ||
| 108 | struct device_node *np_pctldev; | 109 | struct device_node *np_pctldev; |
| 109 | const struct pinctrl_ops *ops; | 110 | const struct pinctrl_ops *ops; |
| 110 | int ret; | 111 | int ret; |
| @@ -123,8 +124,10 @@ static int dt_to_map_one_config(struct pinctrl *p, | |||
| 123 | return -EPROBE_DEFER; | 124 | return -EPROBE_DEFER; |
| 124 | } | 125 | } |
| 125 | /* If we're creating a hog we can use the passed pctldev */ | 126 | /* If we're creating a hog we can use the passed pctldev */ |
| 126 | if (pctldev && (np_pctldev == p->dev->of_node)) | 127 | if (hog_pctldev && (np_pctldev == p->dev->of_node)) { |
| 128 | pctldev = hog_pctldev; | ||
| 127 | break; | 129 | break; |
| 130 | } | ||
| 128 | pctldev = get_pinctrl_dev_from_of_node(np_pctldev); | 131 | pctldev = get_pinctrl_dev_from_of_node(np_pctldev); |
| 129 | if (pctldev) | 132 | if (pctldev) |
| 130 | break; | 133 | break; |
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c index ad6da1184c9f..e3f1ab2290fc 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c | |||
| @@ -1459,6 +1459,9 @@ static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset) | |||
| 1459 | struct mtk_pinctrl *hw = gpiochip_get_data(chip); | 1459 | struct mtk_pinctrl *hw = gpiochip_get_data(chip); |
| 1460 | unsigned long eint_n; | 1460 | unsigned long eint_n; |
| 1461 | 1461 | ||
| 1462 | if (!hw->eint) | ||
| 1463 | return -ENOTSUPP; | ||
| 1464 | |||
| 1462 | eint_n = offset; | 1465 | eint_n = offset; |
| 1463 | 1466 | ||
| 1464 | return mtk_eint_find_irq(hw->eint, eint_n); | 1467 | return mtk_eint_find_irq(hw->eint, eint_n); |
| @@ -1471,7 +1474,8 @@ static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset, | |||
| 1471 | unsigned long eint_n; | 1474 | unsigned long eint_n; |
| 1472 | u32 debounce; | 1475 | u32 debounce; |
| 1473 | 1476 | ||
| 1474 | if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE) | 1477 | if (!hw->eint || |
| 1478 | pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE) | ||
| 1475 | return -ENOTSUPP; | 1479 | return -ENOTSUPP; |
| 1476 | 1480 | ||
| 1477 | debounce = pinconf_to_config_argument(config); | 1481 | debounce = pinconf_to_config_argument(config); |
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index b3799695d8db..16ff56f93501 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c | |||
| @@ -1000,11 +1000,6 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev) | |||
| 1000 | return -ENOMEM; | 1000 | return -ENOMEM; |
| 1001 | 1001 | ||
| 1002 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1002 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1003 | if (!res) { | ||
| 1004 | dev_err(&pdev->dev, "Unable to get eint resource\n"); | ||
| 1005 | return -ENODEV; | ||
| 1006 | } | ||
| 1007 | |||
| 1008 | pctl->eint->base = devm_ioremap_resource(&pdev->dev, res); | 1003 | pctl->eint->base = devm_ioremap_resource(&pdev->dev, res); |
| 1009 | if (IS_ERR(pctl->eint->base)) | 1004 | if (IS_ERR(pctl->eint->base)) |
| 1010 | return PTR_ERR(pctl->eint->base); | 1005 | return PTR_ERR(pctl->eint->base); |
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index b3153c095199..e5647dac0818 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c | |||
| @@ -1590,8 +1590,11 @@ static int pcs_save_context(struct pcs_device *pcs) | |||
| 1590 | 1590 | ||
| 1591 | mux_bytes = pcs->width / BITS_PER_BYTE; | 1591 | mux_bytes = pcs->width / BITS_PER_BYTE; |
| 1592 | 1592 | ||
| 1593 | if (!pcs->saved_vals) | 1593 | if (!pcs->saved_vals) { |
| 1594 | pcs->saved_vals = devm_kzalloc(pcs->dev, pcs->size, GFP_ATOMIC); | 1594 | pcs->saved_vals = devm_kzalloc(pcs->dev, pcs->size, GFP_ATOMIC); |
| 1595 | if (!pcs->saved_vals) | ||
| 1596 | return -ENOMEM; | ||
| 1597 | } | ||
| 1595 | 1598 | ||
| 1596 | switch (pcs->width) { | 1599 | switch (pcs->width) { |
| 1597 | case 64: | 1600 | case 64: |
| @@ -1651,8 +1654,13 @@ static int pinctrl_single_suspend(struct platform_device *pdev, | |||
| 1651 | if (!pcs) | 1654 | if (!pcs) |
| 1652 | return -EINVAL; | 1655 | return -EINVAL; |
| 1653 | 1656 | ||
| 1654 | if (pcs->flags & PCS_CONTEXT_LOSS_OFF) | 1657 | if (pcs->flags & PCS_CONTEXT_LOSS_OFF) { |
| 1655 | pcs_save_context(pcs); | 1658 | int ret; |
| 1659 | |||
| 1660 | ret = pcs_save_context(pcs); | ||
| 1661 | if (ret < 0) | ||
| 1662 | return ret; | ||
| 1663 | } | ||
| 1656 | 1664 | ||
| 1657 | return pinctrl_force_sleep(pcs->pctl); | 1665 | return pinctrl_force_sleep(pcs->pctl); |
| 1658 | } | 1666 | } |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 1da3d71e9f61..13948102ca29 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
| @@ -3592,7 +3592,7 @@ fc_bsg_job_timeout(struct request *req) | |||
| 3592 | 3592 | ||
| 3593 | /* the blk_end_sync_io() doesn't check the error */ | 3593 | /* the blk_end_sync_io() doesn't check the error */ |
| 3594 | if (inflight) | 3594 | if (inflight) |
| 3595 | blk_mq_complete_request(req); | 3595 | __blk_complete_request(req); |
| 3596 | return BLK_EH_DONE; | 3596 | return BLK_EH_DONE; |
| 3597 | } | 3597 | } |
| 3598 | 3598 | ||
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index 36f59a1be7e9..61389bdc7926 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c | |||
| @@ -654,10 +654,17 @@ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc) | |||
| 654 | static int scsifront_sdev_configure(struct scsi_device *sdev) | 654 | static int scsifront_sdev_configure(struct scsi_device *sdev) |
| 655 | { | 655 | { |
| 656 | struct vscsifrnt_info *info = shost_priv(sdev->host); | 656 | struct vscsifrnt_info *info = shost_priv(sdev->host); |
| 657 | int err; | ||
| 657 | 658 | ||
| 658 | if (info && current == info->curr) | 659 | if (info && current == info->curr) { |
| 659 | xenbus_printf(XBT_NIL, info->dev->nodename, | 660 | err = xenbus_printf(XBT_NIL, info->dev->nodename, |
| 660 | info->dev_state_path, "%d", XenbusStateConnected); | 661 | info->dev_state_path, "%d", XenbusStateConnected); |
| 662 | if (err) { | ||
| 663 | xenbus_dev_error(info->dev, err, | ||
| 664 | "%s: writing dev_state_path", __func__); | ||
| 665 | return err; | ||
| 666 | } | ||
| 667 | } | ||
| 661 | 668 | ||
| 662 | return 0; | 669 | return 0; |
| 663 | } | 670 | } |
| @@ -665,10 +672,15 @@ static int scsifront_sdev_configure(struct scsi_device *sdev) | |||
| 665 | static void scsifront_sdev_destroy(struct scsi_device *sdev) | 672 | static void scsifront_sdev_destroy(struct scsi_device *sdev) |
| 666 | { | 673 | { |
| 667 | struct vscsifrnt_info *info = shost_priv(sdev->host); | 674 | struct vscsifrnt_info *info = shost_priv(sdev->host); |
| 675 | int err; | ||
| 668 | 676 | ||
| 669 | if (info && current == info->curr) | 677 | if (info && current == info->curr) { |
| 670 | xenbus_printf(XBT_NIL, info->dev->nodename, | 678 | err = xenbus_printf(XBT_NIL, info->dev->nodename, |
| 671 | info->dev_state_path, "%d", XenbusStateClosed); | 679 | info->dev_state_path, "%d", XenbusStateClosed); |
| 680 | if (err) | ||
| 681 | xenbus_dev_error(info->dev, err, | ||
| 682 | "%s: writing dev_state_path", __func__); | ||
| 683 | } | ||
| 672 | } | 684 | } |
| 673 | 685 | ||
| 674 | static struct scsi_host_template scsifront_sht = { | 686 | static struct scsi_host_template scsifront_sht = { |
| @@ -1003,9 +1015,12 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) | |||
| 1003 | 1015 | ||
| 1004 | if (scsi_add_device(info->host, chn, tgt, lun)) { | 1016 | if (scsi_add_device(info->host, chn, tgt, lun)) { |
| 1005 | dev_err(&dev->dev, "scsi_add_device\n"); | 1017 | dev_err(&dev->dev, "scsi_add_device\n"); |
| 1006 | xenbus_printf(XBT_NIL, dev->nodename, | 1018 | err = xenbus_printf(XBT_NIL, dev->nodename, |
| 1007 | info->dev_state_path, | 1019 | info->dev_state_path, |
| 1008 | "%d", XenbusStateClosed); | 1020 | "%d", XenbusStateClosed); |
| 1021 | if (err) | ||
| 1022 | xenbus_dev_error(dev, err, | ||
| 1023 | "%s: writing dev_state_path", __func__); | ||
| 1009 | } | 1024 | } |
| 1010 | break; | 1025 | break; |
| 1011 | case VSCSIFRONT_OP_DEL_LUN: | 1026 | case VSCSIFRONT_OP_DEL_LUN: |
| @@ -1019,10 +1034,14 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) | |||
| 1019 | } | 1034 | } |
| 1020 | break; | 1035 | break; |
| 1021 | case VSCSIFRONT_OP_READD_LUN: | 1036 | case VSCSIFRONT_OP_READD_LUN: |
| 1022 | if (device_state == XenbusStateConnected) | 1037 | if (device_state == XenbusStateConnected) { |
| 1023 | xenbus_printf(XBT_NIL, dev->nodename, | 1038 | err = xenbus_printf(XBT_NIL, dev->nodename, |
| 1024 | info->dev_state_path, | 1039 | info->dev_state_path, |
| 1025 | "%d", XenbusStateConnected); | 1040 | "%d", XenbusStateConnected); |
| 1041 | if (err) | ||
| 1042 | xenbus_dev_error(dev, err, | ||
| 1043 | "%s: writing dev_state_path", __func__); | ||
| 1044 | } | ||
| 1026 | break; | 1045 | break; |
| 1027 | default: | 1046 | default: |
| 1028 | break; | 1047 | break; |
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 451e833f5931..48b154276179 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
| @@ -41,4 +41,4 @@ obj-$(CONFIG_XEN_PVCALLS_FRONTEND) += pvcalls-front.o | |||
| 41 | xen-evtchn-y := evtchn.o | 41 | xen-evtchn-y := evtchn.o |
| 42 | xen-gntdev-y := gntdev.o | 42 | xen-gntdev-y := gntdev.o |
| 43 | xen-gntalloc-y := gntalloc.o | 43 | xen-gntalloc-y := gntalloc.o |
| 44 | xen-privcmd-y := privcmd.o | 44 | xen-privcmd-y := privcmd.o privcmd-buf.o |
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 762378f1811c..08e4af04d6f2 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
| @@ -628,8 +628,6 @@ static void __unbind_from_irq(unsigned int irq) | |||
| 628 | xen_irq_info_cleanup(info); | 628 | xen_irq_info_cleanup(info); |
| 629 | } | 629 | } |
| 630 | 630 | ||
| 631 | BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND); | ||
| 632 | |||
| 633 | xen_free_irq(irq); | 631 | xen_free_irq(irq); |
| 634 | } | 632 | } |
| 635 | 633 | ||
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 2473b0a9e6e4..ba9f3eec2bd0 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
| @@ -799,7 +799,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages) | |||
| 799 | 799 | ||
| 800 | return 0; | 800 | return 0; |
| 801 | } | 801 | } |
| 802 | EXPORT_SYMBOL(gnttab_alloc_pages); | 802 | EXPORT_SYMBOL_GPL(gnttab_alloc_pages); |
| 803 | 803 | ||
| 804 | /** | 804 | /** |
| 805 | * gnttab_free_pages - free pages allocated by gnttab_alloc_pages() | 805 | * gnttab_free_pages - free pages allocated by gnttab_alloc_pages() |
| @@ -820,7 +820,7 @@ void gnttab_free_pages(int nr_pages, struct page **pages) | |||
| 820 | } | 820 | } |
| 821 | free_xenballooned_pages(nr_pages, pages); | 821 | free_xenballooned_pages(nr_pages, pages); |
| 822 | } | 822 | } |
| 823 | EXPORT_SYMBOL(gnttab_free_pages); | 823 | EXPORT_SYMBOL_GPL(gnttab_free_pages); |
| 824 | 824 | ||
| 825 | /* Handling of paged out grant targets (GNTST_eagain) */ | 825 | /* Handling of paged out grant targets (GNTST_eagain) */ |
| 826 | #define MAX_DELAY 256 | 826 | #define MAX_DELAY 256 |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 8835065029d3..c93d8ef8df34 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
| @@ -289,8 +289,15 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path, | |||
| 289 | return; | 289 | return; |
| 290 | } | 290 | } |
| 291 | 291 | ||
| 292 | if (sysrq_key != '\0') | 292 | if (sysrq_key != '\0') { |
| 293 | xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); | 293 | err = xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); |
| 294 | if (err) { | ||
| 295 | pr_err("%s: Error %d writing sysrq in control/sysrq\n", | ||
| 296 | __func__, err); | ||
| 297 | xenbus_transaction_end(xbt, 1); | ||
| 298 | return; | ||
| 299 | } | ||
| 300 | } | ||
| 294 | 301 | ||
| 295 | err = xenbus_transaction_end(xbt, 0); | 302 | err = xenbus_transaction_end(xbt, 0); |
| 296 | if (err == -EAGAIN) | 303 | if (err == -EAGAIN) |
| @@ -342,7 +349,12 @@ static int setup_shutdown_watcher(void) | |||
| 342 | continue; | 349 | continue; |
| 343 | snprintf(node, FEATURE_PATH_SIZE, "feature-%s", | 350 | snprintf(node, FEATURE_PATH_SIZE, "feature-%s", |
| 344 | shutdown_handlers[idx].command); | 351 | shutdown_handlers[idx].command); |
| 345 | xenbus_printf(XBT_NIL, "control", node, "%u", 1); | 352 | err = xenbus_printf(XBT_NIL, "control", node, "%u", 1); |
| 353 | if (err) { | ||
| 354 | pr_err("%s: Error %d writing %s\n", __func__, | ||
| 355 | err, node); | ||
| 356 | return err; | ||
| 357 | } | ||
| 346 | } | 358 | } |
| 347 | 359 | ||
| 348 | return 0; | 360 | return 0; |
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c new file mode 100644 index 000000000000..df1ed37c3269 --- /dev/null +++ b/drivers/xen/privcmd-buf.c | |||
| @@ -0,0 +1,210 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 OR MIT | ||
| 2 | |||
| 3 | /****************************************************************************** | ||
| 4 | * privcmd-buf.c | ||
| 5 | * | ||
| 6 | * Mmap of hypercall buffers. | ||
| 7 | * | ||
| 8 | * Copyright (c) 2018 Juergen Gross | ||
| 9 | */ | ||
| 10 | |||
| 11 | #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt | ||
| 12 | |||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/list.h> | ||
| 16 | #include <linux/miscdevice.h> | ||
| 17 | #include <linux/mm.h> | ||
| 18 | #include <linux/slab.h> | ||
| 19 | |||
| 20 | #include "privcmd.h" | ||
| 21 | |||
| 22 | MODULE_LICENSE("GPL"); | ||
| 23 | |||
| 24 | static unsigned int limit = 64; | ||
| 25 | module_param(limit, uint, 0644); | ||
| 26 | MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by " | ||
| 27 | "the privcmd-buf device per open file"); | ||
| 28 | |||
| 29 | struct privcmd_buf_private { | ||
| 30 | struct mutex lock; | ||
| 31 | struct list_head list; | ||
| 32 | unsigned int allocated; | ||
| 33 | }; | ||
| 34 | |||
| 35 | struct privcmd_buf_vma_private { | ||
| 36 | struct privcmd_buf_private *file_priv; | ||
| 37 | struct list_head list; | ||
| 38 | unsigned int users; | ||
| 39 | unsigned int n_pages; | ||
| 40 | struct page *pages[]; | ||
| 41 | }; | ||
| 42 | |||
| 43 | static int privcmd_buf_open(struct inode *ino, struct file *file) | ||
| 44 | { | ||
| 45 | struct privcmd_buf_private *file_priv; | ||
| 46 | |||
| 47 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); | ||
| 48 | if (!file_priv) | ||
| 49 | return -ENOMEM; | ||
| 50 | |||
| 51 | mutex_init(&file_priv->lock); | ||
| 52 | INIT_LIST_HEAD(&file_priv->list); | ||
| 53 | |||
| 54 | file->private_data = file_priv; | ||
| 55 | |||
| 56 | return 0; | ||
| 57 | } | ||
| 58 | |||
| 59 | static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv) | ||
| 60 | { | ||
| 61 | unsigned int i; | ||
| 62 | |||
| 63 | vma_priv->file_priv->allocated -= vma_priv->n_pages; | ||
| 64 | |||
| 65 | list_del(&vma_priv->list); | ||
| 66 | |||
| 67 | for (i = 0; i < vma_priv->n_pages; i++) | ||
| 68 | if (vma_priv->pages[i]) | ||
| 69 | __free_page(vma_priv->pages[i]); | ||
| 70 | |||
| 71 | kfree(vma_priv); | ||
| 72 | } | ||
| 73 | |||
| 74 | static int privcmd_buf_release(struct inode *ino, struct file *file) | ||
| 75 | { | ||
| 76 | struct privcmd_buf_private *file_priv = file->private_data; | ||
| 77 | struct privcmd_buf_vma_private *vma_priv; | ||
| 78 | |||
| 79 | mutex_lock(&file_priv->lock); | ||
| 80 | |||
| 81 | while (!list_empty(&file_priv->list)) { | ||
| 82 | vma_priv = list_first_entry(&file_priv->list, | ||
| 83 | struct privcmd_buf_vma_private, | ||
| 84 | list); | ||
| 85 | privcmd_buf_vmapriv_free(vma_priv); | ||
| 86 | } | ||
| 87 | |||
| 88 | mutex_unlock(&file_priv->lock); | ||
| 89 | |||
| 90 | kfree(file_priv); | ||
| 91 | |||
| 92 | return 0; | ||
| 93 | } | ||
| 94 | |||
| 95 | static void privcmd_buf_vma_open(struct vm_area_struct *vma) | ||
| 96 | { | ||
| 97 | struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data; | ||
| 98 | |||
| 99 | if (!vma_priv) | ||
| 100 | return; | ||
| 101 | |||
| 102 | mutex_lock(&vma_priv->file_priv->lock); | ||
| 103 | vma_priv->users++; | ||
| 104 | mutex_unlock(&vma_priv->file_priv->lock); | ||
| 105 | } | ||
| 106 | |||
| 107 | static void privcmd_buf_vma_close(struct vm_area_struct *vma) | ||
| 108 | { | ||
| 109 | struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data; | ||
| 110 | struct privcmd_buf_private *file_priv; | ||
| 111 | |||
| 112 | if (!vma_priv) | ||
| 113 | return; | ||
| 114 | |||
| 115 | file_priv = vma_priv->file_priv; | ||
| 116 | |||
| 117 | mutex_lock(&file_priv->lock); | ||
| 118 | |||
| 119 | vma_priv->users--; | ||
| 120 | if (!vma_priv->users) | ||
| 121 | privcmd_buf_vmapriv_free(vma_priv); | ||
| 122 | |||
| 123 | mutex_unlock(&file_priv->lock); | ||
| 124 | } | ||
| 125 | |||
| 126 | static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf) | ||
| 127 | { | ||
| 128 | pr_debug("fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", | ||
| 129 | vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end, | ||
| 130 | vmf->pgoff, (void *)vmf->address); | ||
| 131 | |||
| 132 | return VM_FAULT_SIGBUS; | ||
| 133 | } | ||
| 134 | |||
| 135 | static const struct vm_operations_struct privcmd_buf_vm_ops = { | ||
| 136 | .open = privcmd_buf_vma_open, | ||
| 137 | .close = privcmd_buf_vma_close, | ||
| 138 | .fault = privcmd_buf_vma_fault, | ||
| 139 | }; | ||
| 140 | |||
| 141 | static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma) | ||
| 142 | { | ||
| 143 | struct privcmd_buf_private *file_priv = file->private_data; | ||
| 144 | struct privcmd_buf_vma_private *vma_priv; | ||
| 145 | unsigned long count = vma_pages(vma); | ||
| 146 | unsigned int i; | ||
| 147 | int ret = 0; | ||
| 148 | |||
| 149 | if (!(vma->vm_flags & VM_SHARED) || count > limit || | ||
| 150 | file_priv->allocated + count > limit) | ||
| 151 | return -EINVAL; | ||
| 152 | |||
| 153 | vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *), | ||
| 154 | GFP_KERNEL); | ||
| 155 | if (!vma_priv) | ||
| 156 | return -ENOMEM; | ||
| 157 | |||
| 158 | vma_priv->n_pages = count; | ||
| 159 | count = 0; | ||
| 160 | for (i = 0; i < vma_priv->n_pages; i++) { | ||
| 161 | vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
| 162 | if (!vma_priv->pages[i]) | ||
| 163 | break; | ||
| 164 | count++; | ||
| 165 | } | ||
| 166 | |||
| 167 | mutex_lock(&file_priv->lock); | ||
| 168 | |||
| 169 | file_priv->allocated += count; | ||
| 170 | |||
| 171 | vma_priv->file_priv = file_priv; | ||
| 172 | vma_priv->users = 1; | ||
| 173 | |||
| 174 | vma->vm_flags |= VM_IO | VM_DONTEXPAND; | ||
| 175 | vma->vm_ops = &privcmd_buf_vm_ops; | ||
| 176 | vma->vm_private_data = vma_priv; | ||
| 177 | |||
| 178 | list_add(&vma_priv->list, &file_priv->list); | ||
| 179 | |||
| 180 | if (vma_priv->n_pages != count) | ||
| 181 | ret = -ENOMEM; | ||
| 182 | else | ||
| 183 | for (i = 0; i < vma_priv->n_pages; i++) { | ||
| 184 | ret = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE, | ||
| 185 | vma_priv->pages[i]); | ||
| 186 | if (ret) | ||
| 187 | break; | ||
| 188 | } | ||
| 189 | |||
| 190 | if (ret) | ||
| 191 | privcmd_buf_vmapriv_free(vma_priv); | ||
| 192 | |||
| 193 | mutex_unlock(&file_priv->lock); | ||
| 194 | |||
| 195 | return ret; | ||
| 196 | } | ||
| 197 | |||
| 198 | const struct file_operations xen_privcmdbuf_fops = { | ||
| 199 | .owner = THIS_MODULE, | ||
| 200 | .open = privcmd_buf_open, | ||
| 201 | .release = privcmd_buf_release, | ||
| 202 | .mmap = privcmd_buf_mmap, | ||
| 203 | }; | ||
| 204 | EXPORT_SYMBOL_GPL(xen_privcmdbuf_fops); | ||
| 205 | |||
| 206 | struct miscdevice xen_privcmdbuf_dev = { | ||
| 207 | .minor = MISC_DYNAMIC_MINOR, | ||
| 208 | .name = "xen/hypercall", | ||
| 209 | .fops = &xen_privcmdbuf_fops, | ||
| 210 | }; | ||
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 8ae0349d9f0a..7e6e682104dc 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c | |||
| @@ -1007,12 +1007,21 @@ static int __init privcmd_init(void) | |||
| 1007 | pr_err("Could not register Xen privcmd device\n"); | 1007 | pr_err("Could not register Xen privcmd device\n"); |
| 1008 | return err; | 1008 | return err; |
| 1009 | } | 1009 | } |
| 1010 | |||
| 1011 | err = misc_register(&xen_privcmdbuf_dev); | ||
| 1012 | if (err != 0) { | ||
| 1013 | pr_err("Could not register Xen hypercall-buf device\n"); | ||
| 1014 | misc_deregister(&privcmd_dev); | ||
| 1015 | return err; | ||
| 1016 | } | ||
| 1017 | |||
| 1010 | return 0; | 1018 | return 0; |
| 1011 | } | 1019 | } |
| 1012 | 1020 | ||
| 1013 | static void __exit privcmd_exit(void) | 1021 | static void __exit privcmd_exit(void) |
| 1014 | { | 1022 | { |
| 1015 | misc_deregister(&privcmd_dev); | 1023 | misc_deregister(&privcmd_dev); |
| 1024 | misc_deregister(&xen_privcmdbuf_dev); | ||
| 1016 | } | 1025 | } |
| 1017 | 1026 | ||
| 1018 | module_init(privcmd_init); | 1027 | module_init(privcmd_init); |
diff --git a/drivers/xen/privcmd.h b/drivers/xen/privcmd.h index 14facaeed36f..0dd9f8f67ee3 100644 --- a/drivers/xen/privcmd.h +++ b/drivers/xen/privcmd.h | |||
| @@ -1,3 +1,6 @@ | |||
| 1 | #include <linux/fs.h> | 1 | #include <linux/fs.h> |
| 2 | 2 | ||
| 3 | extern const struct file_operations xen_privcmd_fops; | 3 | extern const struct file_operations xen_privcmd_fops; |
| 4 | extern const struct file_operations xen_privcmdbuf_fops; | ||
| 5 | |||
| 6 | extern struct miscdevice xen_privcmdbuf_dev; | ||
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 7bc88fd43cfc..e2f3e8b0fba9 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c | |||
| @@ -1012,6 +1012,7 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state, | |||
| 1012 | { | 1012 | { |
| 1013 | struct v2p_entry *entry; | 1013 | struct v2p_entry *entry; |
| 1014 | unsigned long flags; | 1014 | unsigned long flags; |
| 1015 | int err; | ||
| 1015 | 1016 | ||
| 1016 | if (try) { | 1017 | if (try) { |
| 1017 | spin_lock_irqsave(&info->v2p_lock, flags); | 1018 | spin_lock_irqsave(&info->v2p_lock, flags); |
| @@ -1027,8 +1028,11 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state, | |||
| 1027 | scsiback_del_translation_entry(info, vir); | 1028 | scsiback_del_translation_entry(info, vir); |
| 1028 | } | 1029 | } |
| 1029 | } else if (!try) { | 1030 | } else if (!try) { |
| 1030 | xenbus_printf(XBT_NIL, info->dev->nodename, state, | 1031 | err = xenbus_printf(XBT_NIL, info->dev->nodename, state, |
| 1031 | "%d", XenbusStateClosed); | 1032 | "%d", XenbusStateClosed); |
| 1033 | if (err) | ||
| 1034 | xenbus_dev_error(info->dev, err, | ||
| 1035 | "%s: writing %s", __func__, state); | ||
| 1032 | } | 1036 | } |
| 1033 | } | 1037 | } |
| 1034 | 1038 | ||
| @@ -1067,8 +1071,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op, | |||
| 1067 | snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent); | 1071 | snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent); |
| 1068 | val = xenbus_read(XBT_NIL, dev->nodename, str, NULL); | 1072 | val = xenbus_read(XBT_NIL, dev->nodename, str, NULL); |
| 1069 | if (IS_ERR(val)) { | 1073 | if (IS_ERR(val)) { |
| 1070 | xenbus_printf(XBT_NIL, dev->nodename, state, | 1074 | err = xenbus_printf(XBT_NIL, dev->nodename, state, |
| 1071 | "%d", XenbusStateClosed); | 1075 | "%d", XenbusStateClosed); |
| 1076 | if (err) | ||
| 1077 | xenbus_dev_error(info->dev, err, | ||
| 1078 | "%s: writing %s", __func__, state); | ||
| 1072 | return; | 1079 | return; |
| 1073 | } | 1080 | } |
| 1074 | strlcpy(phy, val, VSCSI_NAMELEN); | 1081 | strlcpy(phy, val, VSCSI_NAMELEN); |
| @@ -1079,8 +1086,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op, | |||
| 1079 | err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u", | 1086 | err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u", |
| 1080 | &vir.hst, &vir.chn, &vir.tgt, &vir.lun); | 1087 | &vir.hst, &vir.chn, &vir.tgt, &vir.lun); |
| 1081 | if (XENBUS_EXIST_ERR(err)) { | 1088 | if (XENBUS_EXIST_ERR(err)) { |
| 1082 | xenbus_printf(XBT_NIL, dev->nodename, state, | 1089 | err = xenbus_printf(XBT_NIL, dev->nodename, state, |
| 1083 | "%d", XenbusStateClosed); | 1090 | "%d", XenbusStateClosed); |
| 1091 | if (err) | ||
| 1092 | xenbus_dev_error(info->dev, err, | ||
| 1093 | "%s: writing %s", __func__, state); | ||
| 1084 | return; | 1094 | return; |
| 1085 | } | 1095 | } |
| 1086 | 1096 | ||
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index cc40802ddfa8..00e759f05161 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h | |||
| @@ -748,7 +748,6 @@ extern void ext2_free_blocks (struct inode *, unsigned long, | |||
| 748 | unsigned long); | 748 | unsigned long); |
| 749 | extern unsigned long ext2_count_free_blocks (struct super_block *); | 749 | extern unsigned long ext2_count_free_blocks (struct super_block *); |
| 750 | extern unsigned long ext2_count_dirs (struct super_block *); | 750 | extern unsigned long ext2_count_dirs (struct super_block *); |
| 751 | extern void ext2_check_blocks_bitmap (struct super_block *); | ||
| 752 | extern struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb, | 751 | extern struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb, |
| 753 | unsigned int block_group, | 752 | unsigned int block_group, |
| 754 | struct buffer_head ** bh); | 753 | struct buffer_head ** bh); |
| @@ -771,7 +770,6 @@ extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page | |||
| 771 | extern struct inode * ext2_new_inode (struct inode *, umode_t, const struct qstr *); | 770 | extern struct inode * ext2_new_inode (struct inode *, umode_t, const struct qstr *); |
| 772 | extern void ext2_free_inode (struct inode *); | 771 | extern void ext2_free_inode (struct inode *); |
| 773 | extern unsigned long ext2_count_free_inodes (struct super_block *); | 772 | extern unsigned long ext2_count_free_inodes (struct super_block *); |
| 774 | extern void ext2_check_inodes_bitmap (struct super_block *); | ||
| 775 | extern unsigned long ext2_count_free (struct buffer_head *, unsigned); | 773 | extern unsigned long ext2_count_free (struct buffer_head *, unsigned); |
| 776 | 774 | ||
| 777 | /* inode.c */ | 775 | /* inode.c */ |
diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 25ab1274090f..8ff53f8da3bc 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c | |||
| @@ -557,6 +557,9 @@ static int parse_options(char *options, struct super_block *sb, | |||
| 557 | set_opt (opts->s_mount_opt, NO_UID32); | 557 | set_opt (opts->s_mount_opt, NO_UID32); |
| 558 | break; | 558 | break; |
| 559 | case Opt_nocheck: | 559 | case Opt_nocheck: |
| 560 | ext2_msg(sb, KERN_WARNING, | ||
| 561 | "Option nocheck/check=none is deprecated and" | ||
| 562 | " will be removed in June 2020."); | ||
| 560 | clear_opt (opts->s_mount_opt, CHECK); | 563 | clear_opt (opts->s_mount_opt, CHECK); |
| 561 | break; | 564 | break; |
| 562 | case Opt_debug: | 565 | case Opt_debug: |
| @@ -1335,9 +1338,6 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data) | |||
| 1335 | new_opts.s_resgid = sbi->s_resgid; | 1338 | new_opts.s_resgid = sbi->s_resgid; |
| 1336 | spin_unlock(&sbi->s_lock); | 1339 | spin_unlock(&sbi->s_lock); |
| 1337 | 1340 | ||
| 1338 | /* | ||
| 1339 | * Allow the "check" option to be passed as a remount option. | ||
| 1340 | */ | ||
| 1341 | if (!parse_options(data, sb, &new_opts)) | 1341 | if (!parse_options(data, sb, &new_opts)) |
| 1342 | return -EINVAL; | 1342 | return -EINVAL; |
| 1343 | 1343 | ||
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index bbd0465535eb..f033f3a69a3b 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c | |||
| @@ -883,8 +883,10 @@ struct inode *nfs_delegation_find_inode(struct nfs_client *clp, | |||
| 883 | rcu_read_lock(); | 883 | rcu_read_lock(); |
| 884 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { | 884 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { |
| 885 | res = nfs_delegation_find_inode_server(server, fhandle); | 885 | res = nfs_delegation_find_inode_server(server, fhandle); |
| 886 | if (res != ERR_PTR(-ENOENT)) | 886 | if (res != ERR_PTR(-ENOENT)) { |
| 887 | rcu_read_unlock(); | ||
| 887 | return res; | 888 | return res; |
| 889 | } | ||
| 888 | } | 890 | } |
| 889 | rcu_read_unlock(); | 891 | rcu_read_unlock(); |
| 890 | return ERR_PTR(-ENOENT); | 892 | return ERR_PTR(-ENOENT); |
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index d4a07acad598..8f003792ccde 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c | |||
| @@ -1243,17 +1243,18 @@ static int ff_layout_read_done_cb(struct rpc_task *task, | |||
| 1243 | hdr->ds_clp, hdr->lseg, | 1243 | hdr->ds_clp, hdr->lseg, |
| 1244 | hdr->pgio_mirror_idx); | 1244 | hdr->pgio_mirror_idx); |
| 1245 | 1245 | ||
| 1246 | clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); | ||
| 1247 | clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); | ||
| 1246 | switch (err) { | 1248 | switch (err) { |
| 1247 | case -NFS4ERR_RESET_TO_PNFS: | 1249 | case -NFS4ERR_RESET_TO_PNFS: |
| 1248 | if (ff_layout_choose_best_ds_for_read(hdr->lseg, | 1250 | if (ff_layout_choose_best_ds_for_read(hdr->lseg, |
| 1249 | hdr->pgio_mirror_idx + 1, | 1251 | hdr->pgio_mirror_idx + 1, |
| 1250 | &hdr->pgio_mirror_idx)) | 1252 | &hdr->pgio_mirror_idx)) |
| 1251 | goto out_eagain; | 1253 | goto out_eagain; |
| 1252 | ff_layout_read_record_layoutstats_done(task, hdr); | 1254 | set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); |
| 1253 | pnfs_read_resend_pnfs(hdr); | ||
| 1254 | return task->tk_status; | 1255 | return task->tk_status; |
| 1255 | case -NFS4ERR_RESET_TO_MDS: | 1256 | case -NFS4ERR_RESET_TO_MDS: |
| 1256 | ff_layout_reset_read(hdr); | 1257 | set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); |
| 1257 | return task->tk_status; | 1258 | return task->tk_status; |
| 1258 | case -EAGAIN: | 1259 | case -EAGAIN: |
| 1259 | goto out_eagain; | 1260 | goto out_eagain; |
| @@ -1403,6 +1404,10 @@ static void ff_layout_read_release(void *data) | |||
| 1403 | struct nfs_pgio_header *hdr = data; | 1404 | struct nfs_pgio_header *hdr = data; |
| 1404 | 1405 | ||
| 1405 | ff_layout_read_record_layoutstats_done(&hdr->task, hdr); | 1406 | ff_layout_read_record_layoutstats_done(&hdr->task, hdr); |
| 1407 | if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) | ||
| 1408 | pnfs_read_resend_pnfs(hdr); | ||
| 1409 | else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags)) | ||
| 1410 | ff_layout_reset_read(hdr); | ||
| 1406 | pnfs_generic_rw_release(data); | 1411 | pnfs_generic_rw_release(data); |
| 1407 | } | 1412 | } |
| 1408 | 1413 | ||
| @@ -1423,12 +1428,14 @@ static int ff_layout_write_done_cb(struct rpc_task *task, | |||
| 1423 | hdr->ds_clp, hdr->lseg, | 1428 | hdr->ds_clp, hdr->lseg, |
| 1424 | hdr->pgio_mirror_idx); | 1429 | hdr->pgio_mirror_idx); |
| 1425 | 1430 | ||
| 1431 | clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); | ||
| 1432 | clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); | ||
| 1426 | switch (err) { | 1433 | switch (err) { |
| 1427 | case -NFS4ERR_RESET_TO_PNFS: | 1434 | case -NFS4ERR_RESET_TO_PNFS: |
| 1428 | ff_layout_reset_write(hdr, true); | 1435 | set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); |
| 1429 | return task->tk_status; | 1436 | return task->tk_status; |
| 1430 | case -NFS4ERR_RESET_TO_MDS: | 1437 | case -NFS4ERR_RESET_TO_MDS: |
| 1431 | ff_layout_reset_write(hdr, false); | 1438 | set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); |
| 1432 | return task->tk_status; | 1439 | return task->tk_status; |
| 1433 | case -EAGAIN: | 1440 | case -EAGAIN: |
| 1434 | return -EAGAIN; | 1441 | return -EAGAIN; |
| @@ -1575,6 +1582,10 @@ static void ff_layout_write_release(void *data) | |||
| 1575 | struct nfs_pgio_header *hdr = data; | 1582 | struct nfs_pgio_header *hdr = data; |
| 1576 | 1583 | ||
| 1577 | ff_layout_write_record_layoutstats_done(&hdr->task, hdr); | 1584 | ff_layout_write_record_layoutstats_done(&hdr->task, hdr); |
| 1585 | if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) | ||
| 1586 | ff_layout_reset_write(hdr, true); | ||
| 1587 | else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags)) | ||
| 1588 | ff_layout_reset_write(hdr, false); | ||
| 1578 | pnfs_generic_rw_release(data); | 1589 | pnfs_generic_rw_release(data); |
| 1579 | } | 1590 | } |
| 1580 | 1591 | ||
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index ed45090e4df6..6dd146885da9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
| @@ -3294,6 +3294,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) | |||
| 3294 | struct nfs4_closedata *calldata = data; | 3294 | struct nfs4_closedata *calldata = data; |
| 3295 | struct nfs4_state *state = calldata->state; | 3295 | struct nfs4_state *state = calldata->state; |
| 3296 | struct inode *inode = calldata->inode; | 3296 | struct inode *inode = calldata->inode; |
| 3297 | struct pnfs_layout_hdr *lo; | ||
| 3297 | bool is_rdonly, is_wronly, is_rdwr; | 3298 | bool is_rdonly, is_wronly, is_rdwr; |
| 3298 | int call_close = 0; | 3299 | int call_close = 0; |
| 3299 | 3300 | ||
| @@ -3337,6 +3338,12 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) | |||
| 3337 | goto out_wait; | 3338 | goto out_wait; |
| 3338 | } | 3339 | } |
| 3339 | 3340 | ||
| 3341 | lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL; | ||
| 3342 | if (lo && !pnfs_layout_is_valid(lo)) { | ||
| 3343 | calldata->arg.lr_args = NULL; | ||
| 3344 | calldata->res.lr_res = NULL; | ||
| 3345 | } | ||
| 3346 | |||
| 3340 | if (calldata->arg.fmode == 0) | 3347 | if (calldata->arg.fmode == 0) |
| 3341 | task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; | 3348 | task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; |
| 3342 | 3349 | ||
| @@ -5972,12 +5979,19 @@ static void nfs4_delegreturn_release(void *calldata) | |||
| 5972 | static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) | 5979 | static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) |
| 5973 | { | 5980 | { |
| 5974 | struct nfs4_delegreturndata *d_data; | 5981 | struct nfs4_delegreturndata *d_data; |
| 5982 | struct pnfs_layout_hdr *lo; | ||
| 5975 | 5983 | ||
| 5976 | d_data = (struct nfs4_delegreturndata *)data; | 5984 | d_data = (struct nfs4_delegreturndata *)data; |
| 5977 | 5985 | ||
| 5978 | if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) | 5986 | if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) |
| 5979 | return; | 5987 | return; |
| 5980 | 5988 | ||
| 5989 | lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL; | ||
| 5990 | if (lo && !pnfs_layout_is_valid(lo)) { | ||
| 5991 | d_data->args.lr_args = NULL; | ||
| 5992 | d_data->res.lr_res = NULL; | ||
| 5993 | } | ||
| 5994 | |||
| 5981 | nfs4_setup_sequence(d_data->res.server->nfs_client, | 5995 | nfs4_setup_sequence(d_data->res.server->nfs_client, |
| 5982 | &d_data->args.seq_args, | 5996 | &d_data->args.seq_args, |
| 5983 | &d_data->res.seq_res, | 5997 | &d_data->res.seq_res, |
| @@ -8650,6 +8664,8 @@ nfs4_layoutget_handle_exception(struct rpc_task *task, | |||
| 8650 | 8664 | ||
| 8651 | dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); | 8665 | dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); |
| 8652 | 8666 | ||
| 8667 | nfs4_sequence_free_slot(&lgp->res.seq_res); | ||
| 8668 | |||
| 8653 | switch (nfs4err) { | 8669 | switch (nfs4err) { |
| 8654 | case 0: | 8670 | case 0: |
| 8655 | goto out; | 8671 | goto out; |
| @@ -8714,7 +8730,6 @@ nfs4_layoutget_handle_exception(struct rpc_task *task, | |||
| 8714 | goto out; | 8730 | goto out; |
| 8715 | } | 8731 | } |
| 8716 | 8732 | ||
| 8717 | nfs4_sequence_free_slot(&lgp->res.seq_res); | ||
| 8718 | err = nfs4_handle_exception(server, nfs4err, exception); | 8733 | err = nfs4_handle_exception(server, nfs4err, exception); |
| 8719 | if (!status) { | 8734 | if (!status) { |
| 8720 | if (exception->retry) | 8735 | if (exception->retry) |
| @@ -8786,20 +8801,22 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout) | |||
| 8786 | if (IS_ERR(task)) | 8801 | if (IS_ERR(task)) |
| 8787 | return ERR_CAST(task); | 8802 | return ERR_CAST(task); |
| 8788 | status = rpc_wait_for_completion_task(task); | 8803 | status = rpc_wait_for_completion_task(task); |
| 8789 | if (status == 0) { | 8804 | if (status != 0) |
| 8805 | goto out; | ||
| 8806 | |||
| 8807 | /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ | ||
| 8808 | if (task->tk_status < 0 || lgp->res.layoutp->len == 0) { | ||
| 8790 | status = nfs4_layoutget_handle_exception(task, lgp, &exception); | 8809 | status = nfs4_layoutget_handle_exception(task, lgp, &exception); |
| 8791 | *timeout = exception.timeout; | 8810 | *timeout = exception.timeout; |
| 8792 | } | 8811 | } else |
| 8793 | 8812 | lseg = pnfs_layout_process(lgp); | |
| 8813 | out: | ||
| 8794 | trace_nfs4_layoutget(lgp->args.ctx, | 8814 | trace_nfs4_layoutget(lgp->args.ctx, |
| 8795 | &lgp->args.range, | 8815 | &lgp->args.range, |
| 8796 | &lgp->res.range, | 8816 | &lgp->res.range, |
| 8797 | &lgp->res.stateid, | 8817 | &lgp->res.stateid, |
| 8798 | status); | 8818 | status); |
| 8799 | 8819 | ||
| 8800 | /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ | ||
| 8801 | if (status == 0 && lgp->res.layoutp->len) | ||
| 8802 | lseg = pnfs_layout_process(lgp); | ||
| 8803 | rpc_put_task(task); | 8820 | rpc_put_task(task); |
| 8804 | dprintk("<-- %s status=%d\n", __func__, status); | 8821 | dprintk("<-- %s status=%d\n", __func__, status); |
| 8805 | if (status) | 8822 | if (status) |
| @@ -8817,6 +8834,8 @@ nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) | |||
| 8817 | &lrp->args.seq_args, | 8834 | &lrp->args.seq_args, |
| 8818 | &lrp->res.seq_res, | 8835 | &lrp->res.seq_res, |
| 8819 | task); | 8836 | task); |
| 8837 | if (!pnfs_layout_is_valid(lrp->args.layout)) | ||
| 8838 | rpc_exit(task, 0); | ||
| 8820 | } | 8839 | } |
| 8821 | 8840 | ||
| 8822 | static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) | 8841 | static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) |
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index a8f5e6b16749..3fe81424337d 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h | |||
| @@ -801,6 +801,11 @@ static inline void nfs4_lgopen_release(struct nfs4_layoutget *lgp) | |||
| 801 | { | 801 | { |
| 802 | } | 802 | } |
| 803 | 803 | ||
| 804 | static inline bool pnfs_layout_is_valid(const struct pnfs_layout_hdr *lo) | ||
| 805 | { | ||
| 806 | return false; | ||
| 807 | } | ||
| 808 | |||
| 804 | #endif /* CONFIG_NFS_V4_1 */ | 809 | #endif /* CONFIG_NFS_V4_1 */ |
| 805 | 810 | ||
| 806 | #if IS_ENABLED(CONFIG_NFS_V4_2) | 811 | #if IS_ENABLED(CONFIG_NFS_V4_2) |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index d88231e3b2be..fc20e06c56ba 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
| @@ -711,21 +711,18 @@ EXPORT_SYMBOL(dquot_quota_sync); | |||
| 711 | static unsigned long | 711 | static unsigned long |
| 712 | dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | 712 | dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) |
| 713 | { | 713 | { |
| 714 | struct list_head *head; | ||
| 715 | struct dquot *dquot; | 714 | struct dquot *dquot; |
| 716 | unsigned long freed = 0; | 715 | unsigned long freed = 0; |
| 717 | 716 | ||
| 718 | spin_lock(&dq_list_lock); | 717 | spin_lock(&dq_list_lock); |
| 719 | head = free_dquots.prev; | 718 | while (!list_empty(&free_dquots) && sc->nr_to_scan) { |
| 720 | while (head != &free_dquots && sc->nr_to_scan) { | 719 | dquot = list_first_entry(&free_dquots, struct dquot, dq_free); |
| 721 | dquot = list_entry(head, struct dquot, dq_free); | ||
| 722 | remove_dquot_hash(dquot); | 720 | remove_dquot_hash(dquot); |
| 723 | remove_free_dquot(dquot); | 721 | remove_free_dquot(dquot); |
| 724 | remove_inuse(dquot); | 722 | remove_inuse(dquot); |
| 725 | do_destroy_dquot(dquot); | 723 | do_destroy_dquot(dquot); |
| 726 | sc->nr_to_scan--; | 724 | sc->nr_to_scan--; |
| 727 | freed++; | 725 | freed++; |
| 728 | head = free_dquots.prev; | ||
| 729 | } | 726 | } |
| 730 | spin_unlock(&dq_list_lock); | 727 | spin_unlock(&dq_list_lock); |
| 731 | return freed; | 728 | return freed; |
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 1b961b1d9699..fcda0fc97b90 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c | |||
| @@ -533,8 +533,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb, | |||
| 533 | udf_write_aext(table, &epos, &eloc, | 533 | udf_write_aext(table, &epos, &eloc, |
| 534 | (etype << 30) | elen, 1); | 534 | (etype << 30) | elen, 1); |
| 535 | } else | 535 | } else |
| 536 | udf_delete_aext(table, epos, eloc, | 536 | udf_delete_aext(table, epos); |
| 537 | (etype << 30) | elen); | ||
| 538 | } else { | 537 | } else { |
| 539 | alloc_count = 0; | 538 | alloc_count = 0; |
| 540 | } | 539 | } |
| @@ -630,7 +629,7 @@ static udf_pblk_t udf_table_new_block(struct super_block *sb, | |||
| 630 | if (goal_elen) | 629 | if (goal_elen) |
| 631 | udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1); | 630 | udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1); |
| 632 | else | 631 | else |
| 633 | udf_delete_aext(table, goal_epos, goal_eloc, goal_elen); | 632 | udf_delete_aext(table, goal_epos); |
| 634 | brelse(goal_epos.bh); | 633 | brelse(goal_epos.bh); |
| 635 | 634 | ||
| 636 | udf_add_free_space(sb, partition, -1); | 635 | udf_add_free_space(sb, partition, -1); |
diff --git a/fs/udf/directory.c b/fs/udf/directory.c index 0a98a2369738..d9523013096f 100644 --- a/fs/udf/directory.c +++ b/fs/udf/directory.c | |||
| @@ -141,10 +141,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos, | |||
| 141 | fibh->ebh->b_data, | 141 | fibh->ebh->b_data, |
| 142 | sizeof(struct fileIdentDesc) + fibh->soffset); | 142 | sizeof(struct fileIdentDesc) + fibh->soffset); |
| 143 | 143 | ||
| 144 | fi_len = (sizeof(struct fileIdentDesc) + | 144 | fi_len = udf_dir_entry_len(cfi); |
| 145 | cfi->lengthFileIdent + | ||
| 146 | le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3; | ||
| 147 | |||
| 148 | *nf_pos += fi_len - (fibh->eoffset - fibh->soffset); | 145 | *nf_pos += fi_len - (fibh->eoffset - fibh->soffset); |
| 149 | fibh->eoffset = fibh->soffset + fi_len; | 146 | fibh->eoffset = fibh->soffset + fi_len; |
| 150 | } else { | 147 | } else { |
| @@ -152,6 +149,9 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos, | |||
| 152 | sizeof(struct fileIdentDesc)); | 149 | sizeof(struct fileIdentDesc)); |
| 153 | } | 150 | } |
| 154 | } | 151 | } |
| 152 | /* Got last entry outside of dir size - fs is corrupted! */ | ||
| 153 | if (*nf_pos > dir->i_size) | ||
| 154 | return NULL; | ||
| 155 | return fi; | 155 | return fi; |
| 156 | } | 156 | } |
| 157 | 157 | ||
diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 7f39d17352c9..9915a58fbabd 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c | |||
| @@ -1147,8 +1147,7 @@ static void udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr | |||
| 1147 | 1147 | ||
| 1148 | if (startnum > endnum) { | 1148 | if (startnum > endnum) { |
| 1149 | for (i = 0; i < (startnum - endnum); i++) | 1149 | for (i = 0; i < (startnum - endnum); i++) |
| 1150 | udf_delete_aext(inode, *epos, laarr[i].extLocation, | 1150 | udf_delete_aext(inode, *epos); |
| 1151 | laarr[i].extLength); | ||
| 1152 | } else if (startnum < endnum) { | 1151 | } else if (startnum < endnum) { |
| 1153 | for (i = 0; i < (endnum - startnum); i++) { | 1152 | for (i = 0; i < (endnum - startnum); i++) { |
| 1154 | udf_insert_aext(inode, *epos, laarr[i].extLocation, | 1153 | udf_insert_aext(inode, *epos, laarr[i].extLocation, |
| @@ -2176,14 +2175,15 @@ static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos, | |||
| 2176 | return (nelen >> 30); | 2175 | return (nelen >> 30); |
| 2177 | } | 2176 | } |
| 2178 | 2177 | ||
| 2179 | int8_t udf_delete_aext(struct inode *inode, struct extent_position epos, | 2178 | int8_t udf_delete_aext(struct inode *inode, struct extent_position epos) |
| 2180 | struct kernel_lb_addr eloc, uint32_t elen) | ||
| 2181 | { | 2179 | { |
| 2182 | struct extent_position oepos; | 2180 | struct extent_position oepos; |
| 2183 | int adsize; | 2181 | int adsize; |
| 2184 | int8_t etype; | 2182 | int8_t etype; |
| 2185 | struct allocExtDesc *aed; | 2183 | struct allocExtDesc *aed; |
| 2186 | struct udf_inode_info *iinfo; | 2184 | struct udf_inode_info *iinfo; |
| 2185 | struct kernel_lb_addr eloc; | ||
| 2186 | uint32_t elen; | ||
| 2187 | 2187 | ||
| 2188 | if (epos.bh) { | 2188 | if (epos.bh) { |
| 2189 | get_bh(epos.bh); | 2189 | get_bh(epos.bh); |
diff --git a/fs/udf/namei.c b/fs/udf/namei.c index c586026508db..06f37ddd2997 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c | |||
| @@ -351,8 +351,6 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir, | |||
| 351 | loff_t f_pos; | 351 | loff_t f_pos; |
| 352 | loff_t size = udf_ext0_offset(dir) + dir->i_size; | 352 | loff_t size = udf_ext0_offset(dir) + dir->i_size; |
| 353 | int nfidlen; | 353 | int nfidlen; |
| 354 | uint8_t lfi; | ||
| 355 | uint16_t liu; | ||
| 356 | udf_pblk_t block; | 354 | udf_pblk_t block; |
| 357 | struct kernel_lb_addr eloc; | 355 | struct kernel_lb_addr eloc; |
| 358 | uint32_t elen = 0; | 356 | uint32_t elen = 0; |
| @@ -383,7 +381,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir, | |||
| 383 | namelen = 0; | 381 | namelen = 0; |
| 384 | } | 382 | } |
| 385 | 383 | ||
| 386 | nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3; | 384 | nfidlen = ALIGN(sizeof(struct fileIdentDesc) + namelen, UDF_NAME_PAD); |
| 387 | 385 | ||
| 388 | f_pos = udf_ext0_offset(dir); | 386 | f_pos = udf_ext0_offset(dir); |
| 389 | 387 | ||
| @@ -424,12 +422,8 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir, | |||
| 424 | goto out_err; | 422 | goto out_err; |
| 425 | } | 423 | } |
| 426 | 424 | ||
| 427 | liu = le16_to_cpu(cfi->lengthOfImpUse); | ||
| 428 | lfi = cfi->lengthFileIdent; | ||
| 429 | |||
| 430 | if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { | 425 | if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { |
| 431 | if (((sizeof(struct fileIdentDesc) + | 426 | if (udf_dir_entry_len(cfi) == nfidlen) { |
| 432 | liu + lfi + 3) & ~3) == nfidlen) { | ||
| 433 | cfi->descTag.tagSerialNum = cpu_to_le16(1); | 427 | cfi->descTag.tagSerialNum = cpu_to_le16(1); |
| 434 | cfi->fileVersionNum = cpu_to_le16(1); | 428 | cfi->fileVersionNum = cpu_to_le16(1); |
| 435 | cfi->fileCharacteristics = 0; | 429 | cfi->fileCharacteristics = 0; |
| @@ -1201,9 +1195,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 1201 | 1195 | ||
| 1202 | if (dir_fi) { | 1196 | if (dir_fi) { |
| 1203 | dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location); | 1197 | dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location); |
| 1204 | udf_update_tag((char *)dir_fi, | 1198 | udf_update_tag((char *)dir_fi, udf_dir_entry_len(dir_fi)); |
| 1205 | (sizeof(struct fileIdentDesc) + | ||
| 1206 | le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3); | ||
| 1207 | if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) | 1199 | if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) |
| 1208 | mark_inode_dirty(old_inode); | 1200 | mark_inode_dirty(old_inode); |
| 1209 | else | 1201 | else |
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h index bae311b59400..84c47dde4d26 100644 --- a/fs/udf/udfdecl.h +++ b/fs/udf/udfdecl.h | |||
| @@ -132,6 +132,12 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb, | |||
| 132 | extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *, | 132 | extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *, |
| 133 | struct fileIdentDesc *, struct udf_fileident_bh *, | 133 | struct fileIdentDesc *, struct udf_fileident_bh *, |
| 134 | uint8_t *, uint8_t *); | 134 | uint8_t *, uint8_t *); |
| 135 | static inline unsigned int udf_dir_entry_len(struct fileIdentDesc *cfi) | ||
| 136 | { | ||
| 137 | return ALIGN(sizeof(struct fileIdentDesc) + | ||
| 138 | le16_to_cpu(cfi->lengthOfImpUse) + cfi->lengthFileIdent, | ||
| 139 | UDF_NAME_PAD); | ||
| 140 | } | ||
| 135 | 141 | ||
| 136 | /* file.c */ | 142 | /* file.c */ |
| 137 | extern long udf_ioctl(struct file *, unsigned int, unsigned long); | 143 | extern long udf_ioctl(struct file *, unsigned int, unsigned long); |
| @@ -167,8 +173,7 @@ extern int udf_add_aext(struct inode *, struct extent_position *, | |||
| 167 | struct kernel_lb_addr *, uint32_t, int); | 173 | struct kernel_lb_addr *, uint32_t, int); |
| 168 | extern void udf_write_aext(struct inode *, struct extent_position *, | 174 | extern void udf_write_aext(struct inode *, struct extent_position *, |
| 169 | struct kernel_lb_addr *, uint32_t, int); | 175 | struct kernel_lb_addr *, uint32_t, int); |
| 170 | extern int8_t udf_delete_aext(struct inode *, struct extent_position, | 176 | extern int8_t udf_delete_aext(struct inode *, struct extent_position); |
| 171 | struct kernel_lb_addr, uint32_t); | ||
| 172 | extern int8_t udf_next_aext(struct inode *, struct extent_position *, | 177 | extern int8_t udf_next_aext(struct inode *, struct extent_position *, |
| 173 | struct kernel_lb_addr *, uint32_t *, int); | 178 | struct kernel_lb_addr *, uint32_t *, int); |
| 174 | extern int8_t udf_current_aext(struct inode *, struct extent_position *, | 179 | extern int8_t udf_current_aext(struct inode *, struct extent_position *, |
diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 40a916efd7c0..1194a4c78d55 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h | |||
| @@ -309,7 +309,7 @@ static inline void acpi_processor_ppc_exit(void) | |||
| 309 | { | 309 | { |
| 310 | return; | 310 | return; |
| 311 | } | 311 | } |
| 312 | static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr, | 312 | static inline void acpi_processor_ppc_has_changed(struct acpi_processor *pr, |
| 313 | int event_flag) | 313 | int event_flag) |
| 314 | { | 314 | { |
| 315 | static unsigned int printout = 1; | 315 | static unsigned int printout = 1; |
| @@ -320,7 +320,6 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr, | |||
| 320 | "Consider compiling CPUfreq support into your kernel.\n"); | 320 | "Consider compiling CPUfreq support into your kernel.\n"); |
| 321 | printout = 0; | 321 | printout = 0; |
| 322 | } | 322 | } |
| 323 | return 0; | ||
| 324 | } | 323 | } |
| 325 | static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) | 324 | static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) |
| 326 | { | 325 | { |
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h index 0763f065b975..d10f1e7d6ba8 100644 --- a/include/asm-generic/qspinlock_types.h +++ b/include/asm-generic/qspinlock_types.h | |||
| @@ -63,7 +63,7 @@ typedef struct qspinlock { | |||
| 63 | /* | 63 | /* |
| 64 | * Initializier | 64 | * Initializier |
| 65 | */ | 65 | */ |
| 66 | #define __ARCH_SPIN_LOCK_UNLOCKED { .val = ATOMIC_INIT(0) } | 66 | #define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } } |
| 67 | 67 | ||
| 68 | /* | 68 | /* |
| 69 | * Bitfields in the atomic value: | 69 | * Bitfields in the atomic value: |
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 0bd432a4d7bd..24251762c20c 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h | |||
| @@ -22,7 +22,6 @@ struct dentry; | |||
| 22 | */ | 22 | */ |
| 23 | enum wb_state { | 23 | enum wb_state { |
| 24 | WB_registered, /* bdi_register() was done */ | 24 | WB_registered, /* bdi_register() was done */ |
| 25 | WB_shutting_down, /* wb_shutdown() in progress */ | ||
| 26 | WB_writeback_running, /* Writeback is in progress */ | 25 | WB_writeback_running, /* Writeback is in progress */ |
| 27 | WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */ | 26 | WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */ |
| 28 | WB_start_all, /* nr_pages == 0 (all) work pending */ | 27 | WB_start_all, /* nr_pages == 0 (all) work pending */ |
| @@ -189,6 +188,7 @@ struct backing_dev_info { | |||
| 189 | #ifdef CONFIG_CGROUP_WRITEBACK | 188 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 190 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ | 189 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ |
| 191 | struct rb_root cgwb_congested_tree; /* their congested states */ | 190 | struct rb_root cgwb_congested_tree; /* their congested states */ |
| 191 | struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ | ||
| 192 | #else | 192 | #else |
| 193 | struct bdi_writeback_congested *wb_congested; | 193 | struct bdi_writeback_congested *wb_congested; |
| 194 | #endif | 194 | #endif |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 4bd2f34947f4..201de12a9957 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
| @@ -503,6 +503,7 @@ struct irq_chip { | |||
| 503 | * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip | 503 | * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip |
| 504 | * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask | 504 | * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask |
| 505 | * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode | 505 | * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode |
| 506 | * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs | ||
| 506 | */ | 507 | */ |
| 507 | enum { | 508 | enum { |
| 508 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), | 509 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 25b33b664537..dd1e40ddac7d 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
| @@ -145,11 +145,6 @@ static inline void *irq_desc_get_handler_data(struct irq_desc *desc) | |||
| 145 | return desc->irq_common_data.handler_data; | 145 | return desc->irq_common_data.handler_data; |
| 146 | } | 146 | } |
| 147 | 147 | ||
| 148 | static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) | ||
| 149 | { | ||
| 150 | return desc->irq_common_data.msi_desc; | ||
| 151 | } | ||
| 152 | |||
| 153 | /* | 148 | /* |
| 154 | * Architectures call this to let the generic IRQ layer | 149 | * Architectures call this to let the generic IRQ layer |
| 155 | * handle an interrupt. | 150 | * handle an interrupt. |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index d23123238534..941dc0a5a877 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -666,7 +666,7 @@ do { \ | |||
| 666 | * your code. (Extra memory is used for special buffers that are | 666 | * your code. (Extra memory is used for special buffers that are |
| 667 | * allocated when trace_printk() is used.) | 667 | * allocated when trace_printk() is used.) |
| 668 | * | 668 | * |
| 669 | * A little optization trick is done here. If there's only one | 669 | * A little optimization trick is done here. If there's only one |
| 670 | * argument, there's no need to scan the string for printf formats. | 670 | * argument, there's no need to scan the string for printf formats. |
| 671 | * The trace_puts() will suffice. But how can we take advantage of | 671 | * The trace_puts() will suffice. But how can we take advantage of |
| 672 | * using trace_puts() when trace_printk() has only one argument? | 672 | * using trace_puts() when trace_printk() has only one argument? |
diff --git a/include/linux/memory.h b/include/linux/memory.h index 31ca3e28b0eb..a6ddefc60517 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h | |||
| @@ -38,6 +38,7 @@ struct memory_block { | |||
| 38 | 38 | ||
| 39 | int arch_get_memory_phys_device(unsigned long start_pfn); | 39 | int arch_get_memory_phys_device(unsigned long start_pfn); |
| 40 | unsigned long memory_block_size_bytes(void); | 40 | unsigned long memory_block_size_bytes(void); |
| 41 | int set_memory_block_size_order(unsigned int order); | ||
| 41 | 42 | ||
| 42 | /* These states are exposed to userspace as text strings in sysfs */ | 43 | /* These states are exposed to userspace as text strings in sysfs */ |
| 43 | #define MEM_ONLINE (1<<0) /* exposed to userspace */ | 44 | #define MEM_ONLINE (1<<0) /* exposed to userspace */ |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 9dee3c23895d..712eed156d09 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
| @@ -1438,6 +1438,8 @@ enum { | |||
| 1438 | NFS_IOHDR_EOF, | 1438 | NFS_IOHDR_EOF, |
| 1439 | NFS_IOHDR_REDO, | 1439 | NFS_IOHDR_REDO, |
| 1440 | NFS_IOHDR_STAT, | 1440 | NFS_IOHDR_STAT, |
| 1441 | NFS_IOHDR_RESEND_PNFS, | ||
| 1442 | NFS_IOHDR_RESEND_MDS, | ||
| 1441 | }; | 1443 | }; |
| 1442 | 1444 | ||
| 1443 | struct nfs_io_completion; | 1445 | struct nfs_io_completion; |
diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 4193c41e383a..a685da2c4522 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h | |||
| @@ -98,5 +98,7 @@ extern __must_check bool refcount_dec_if_one(refcount_t *r); | |||
| 98 | extern __must_check bool refcount_dec_not_one(refcount_t *r); | 98 | extern __must_check bool refcount_dec_not_one(refcount_t *r); |
| 99 | extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); | 99 | extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); |
| 100 | extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); | 100 | extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); |
| 101 | 101 | extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r, | |
| 102 | spinlock_t *lock, | ||
| 103 | unsigned long *flags); | ||
| 102 | #endif /* _LINUX_REFCOUNT_H */ | 104 | #endif /* _LINUX_REFCOUNT_H */ |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 1e8a46435838..fd57888d4942 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
| @@ -427,6 +427,11 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | |||
| 427 | #define atomic_dec_and_lock(atomic, lock) \ | 427 | #define atomic_dec_and_lock(atomic, lock) \ |
| 428 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) | 428 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
| 429 | 429 | ||
| 430 | extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, | ||
| 431 | unsigned long *flags); | ||
| 432 | #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \ | ||
| 433 | __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))) | ||
| 434 | |||
| 430 | int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, | 435 | int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, |
| 431 | size_t max_size, unsigned int cpu_mult, | 436 | size_t max_size, unsigned int cpu_mult, |
| 432 | gfp_t gfp); | 437 | gfp_t gfp); |
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h index 85a3fb65e40a..20d6cc91435d 100644 --- a/include/uapi/linux/nbd.h +++ b/include/uapi/linux/nbd.h | |||
| @@ -53,6 +53,9 @@ enum { | |||
| 53 | /* These are client behavior specific flags. */ | 53 | /* These are client behavior specific flags. */ |
| 54 | #define NBD_CFLAG_DESTROY_ON_DISCONNECT (1 << 0) /* delete the nbd device on | 54 | #define NBD_CFLAG_DESTROY_ON_DISCONNECT (1 << 0) /* delete the nbd device on |
| 55 | disconnect. */ | 55 | disconnect. */ |
| 56 | #define NBD_CFLAG_DISCONNECT_ON_CLOSE (1 << 1) /* disconnect the nbd device on | ||
| 57 | * close by last opener. | ||
| 58 | */ | ||
| 56 | 59 | ||
| 57 | /* userspace doesn't need the nbd_device structure */ | 60 | /* userspace doesn't need the nbd_device structure */ |
| 58 | 61 | ||
diff --git a/include/xen/xen.h b/include/xen/xen.h index 9d4340c907d1..1e1d9bd0bd37 100644 --- a/include/xen/xen.h +++ b/include/xen/xen.h | |||
| @@ -25,12 +25,16 @@ extern bool xen_pvh; | |||
| 25 | #define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN) | 25 | #define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN) |
| 26 | #define xen_pvh_domain() (xen_pvh) | 26 | #define xen_pvh_domain() (xen_pvh) |
| 27 | 27 | ||
| 28 | #include <linux/types.h> | ||
| 29 | |||
| 30 | extern uint32_t xen_start_flags; | ||
| 31 | |||
| 28 | #ifdef CONFIG_XEN_DOM0 | 32 | #ifdef CONFIG_XEN_DOM0 |
| 29 | #include <xen/interface/xen.h> | 33 | #include <xen/interface/xen.h> |
| 30 | #include <asm/xen/hypervisor.h> | 34 | #include <asm/xen/hypervisor.h> |
| 31 | 35 | ||
| 32 | #define xen_initial_domain() (xen_domain() && \ | 36 | #define xen_initial_domain() (xen_domain() && \ |
| 33 | xen_start_info && xen_start_info->flags & SIF_INITDOMAIN) | 37 | (xen_start_flags & SIF_INITDOMAIN)) |
| 34 | #else /* !CONFIG_XEN_DOM0 */ | 38 | #else /* !CONFIG_XEN_DOM0 */ |
| 35 | #define xen_initial_domain() (0) | 39 | #define xen_initial_domain() (0) |
| 36 | #endif /* CONFIG_XEN_DOM0 */ | 40 | #endif /* CONFIG_XEN_DOM0 */ |
diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index 4dadeb3d6666..6f636136cccc 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c | |||
| @@ -55,6 +55,7 @@ static const struct irq_bit_descr irqchip_flags[] = { | |||
| 55 | BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE), | 55 | BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE), |
| 56 | BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE), | 56 | BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE), |
| 57 | BIT_MASK_DESCR(IRQCHIP_EOI_THREADED), | 57 | BIT_MASK_DESCR(IRQCHIP_EOI_THREADED), |
| 58 | BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI), | ||
| 58 | }; | 59 | }; |
| 59 | 60 | ||
| 60 | static void | 61 | static void |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index edcac5de7ebc..5fa4d3138bf1 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
| @@ -1265,11 +1265,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) | |||
| 1265 | this.parent = NULL; | 1265 | this.parent = NULL; |
| 1266 | this.class = class; | 1266 | this.class = class; |
| 1267 | 1267 | ||
| 1268 | local_irq_save(flags); | 1268 | raw_local_irq_save(flags); |
| 1269 | arch_spin_lock(&lockdep_lock); | 1269 | arch_spin_lock(&lockdep_lock); |
| 1270 | ret = __lockdep_count_forward_deps(&this); | 1270 | ret = __lockdep_count_forward_deps(&this); |
| 1271 | arch_spin_unlock(&lockdep_lock); | 1271 | arch_spin_unlock(&lockdep_lock); |
| 1272 | local_irq_restore(flags); | 1272 | raw_local_irq_restore(flags); |
| 1273 | 1273 | ||
| 1274 | return ret; | 1274 | return ret; |
| 1275 | } | 1275 | } |
| @@ -1292,11 +1292,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) | |||
| 1292 | this.parent = NULL; | 1292 | this.parent = NULL; |
| 1293 | this.class = class; | 1293 | this.class = class; |
| 1294 | 1294 | ||
| 1295 | local_irq_save(flags); | 1295 | raw_local_irq_save(flags); |
| 1296 | arch_spin_lock(&lockdep_lock); | 1296 | arch_spin_lock(&lockdep_lock); |
| 1297 | ret = __lockdep_count_backward_deps(&this); | 1297 | ret = __lockdep_count_backward_deps(&this); |
| 1298 | arch_spin_unlock(&lockdep_lock); | 1298 | arch_spin_unlock(&lockdep_lock); |
| 1299 | local_irq_restore(flags); | 1299 | raw_local_irq_restore(flags); |
| 1300 | 1300 | ||
| 1301 | return ret; | 1301 | return ret; |
| 1302 | } | 1302 | } |
| @@ -4411,7 +4411,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) | |||
| 4411 | if (unlikely(!debug_locks)) | 4411 | if (unlikely(!debug_locks)) |
| 4412 | return; | 4412 | return; |
| 4413 | 4413 | ||
| 4414 | local_irq_save(flags); | 4414 | raw_local_irq_save(flags); |
| 4415 | for (i = 0; i < curr->lockdep_depth; i++) { | 4415 | for (i = 0; i < curr->lockdep_depth; i++) { |
| 4416 | hlock = curr->held_locks + i; | 4416 | hlock = curr->held_locks + i; |
| 4417 | 4417 | ||
| @@ -4422,7 +4422,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) | |||
| 4422 | print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); | 4422 | print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); |
| 4423 | break; | 4423 | break; |
| 4424 | } | 4424 | } |
| 4425 | local_irq_restore(flags); | 4425 | raw_local_irq_restore(flags); |
| 4426 | } | 4426 | } |
| 4427 | EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); | 4427 | EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); |
| 4428 | 4428 | ||
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index bc1e507be9ff..776308d2fa9e 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c | |||
| @@ -181,6 +181,7 @@ void down_read_non_owner(struct rw_semaphore *sem) | |||
| 181 | might_sleep(); | 181 | might_sleep(); |
| 182 | 182 | ||
| 183 | __down_read(sem); | 183 | __down_read(sem); |
| 184 | rwsem_set_reader_owned(sem); | ||
| 184 | } | 185 | } |
| 185 | 186 | ||
| 186 | EXPORT_SYMBOL(down_read_non_owner); | 187 | EXPORT_SYMBOL(down_read_non_owner); |
diff --git a/kernel/softirq.c b/kernel/softirq.c index de2f57fddc04..900dcfee542c 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -139,9 +139,13 @@ static void __local_bh_enable(unsigned int cnt) | |||
| 139 | { | 139 | { |
| 140 | lockdep_assert_irqs_disabled(); | 140 | lockdep_assert_irqs_disabled(); |
| 141 | 141 | ||
| 142 | if (preempt_count() == cnt) | ||
| 143 | trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); | ||
| 144 | |||
| 142 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) | 145 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
| 143 | trace_softirqs_on(_RET_IP_); | 146 | trace_softirqs_on(_RET_IP_); |
| 144 | preempt_count_sub(cnt); | 147 | |
| 148 | __preempt_count_sub(cnt); | ||
| 145 | } | 149 | } |
| 146 | 150 | ||
| 147 | /* | 151 | /* |
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 055a4a728c00..3e93c54bd3a1 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c | |||
| @@ -1659,7 +1659,7 @@ EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); | |||
| 1659 | int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts) | 1659 | int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts) |
| 1660 | { | 1660 | { |
| 1661 | switch(restart->nanosleep.type) { | 1661 | switch(restart->nanosleep.type) { |
| 1662 | #ifdef CONFIG_COMPAT | 1662 | #ifdef CONFIG_COMPAT_32BIT_TIME |
| 1663 | case TT_COMPAT: | 1663 | case TT_COMPAT: |
| 1664 | if (compat_put_timespec64(ts, restart->nanosleep.compat_rmtp)) | 1664 | if (compat_put_timespec64(ts, restart->nanosleep.compat_rmtp)) |
| 1665 | return -EFAULT; | 1665 | return -EFAULT; |
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 5a6251ac6f7a..9cdf54b04ca8 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c | |||
| @@ -604,7 +604,6 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, | |||
| 604 | /* | 604 | /* |
| 605 | * Disarm any old timer after extracting its expiry time. | 605 | * Disarm any old timer after extracting its expiry time. |
| 606 | */ | 606 | */ |
| 607 | lockdep_assert_irqs_disabled(); | ||
| 608 | 607 | ||
| 609 | ret = 0; | 608 | ret = 0; |
| 610 | old_incr = timer->it.cpu.incr; | 609 | old_incr = timer->it.cpu.incr; |
| @@ -1049,7 +1048,6 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer) | |||
| 1049 | /* | 1048 | /* |
| 1050 | * Now re-arm for the new expiry time. | 1049 | * Now re-arm for the new expiry time. |
| 1051 | */ | 1050 | */ |
| 1052 | lockdep_assert_irqs_disabled(); | ||
| 1053 | arm_timer(timer); | 1051 | arm_timer(timer); |
| 1054 | unlock: | 1052 | unlock: |
| 1055 | unlock_task_sighand(p, &flags); | 1053 | unlock_task_sighand(p, &flags); |
diff --git a/kernel/time/time.c b/kernel/time/time.c index 6fa99213fc72..2b41e8e2d31d 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | */ | 28 | */ |
| 29 | 29 | ||
| 30 | #include <linux/export.h> | 30 | #include <linux/export.h> |
| 31 | #include <linux/kernel.h> | ||
| 31 | #include <linux/timex.h> | 32 | #include <linux/timex.h> |
| 32 | #include <linux/capability.h> | 33 | #include <linux/capability.h> |
| 33 | #include <linux/timekeeper_internal.h> | 34 | #include <linux/timekeeper_internal.h> |
| @@ -314,9 +315,10 @@ unsigned int jiffies_to_msecs(const unsigned long j) | |||
| 314 | return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); | 315 | return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); |
| 315 | #else | 316 | #else |
| 316 | # if BITS_PER_LONG == 32 | 317 | # if BITS_PER_LONG == 32 |
| 317 | return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32; | 318 | return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >> |
| 319 | HZ_TO_MSEC_SHR32; | ||
| 318 | # else | 320 | # else |
| 319 | return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN; | 321 | return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN); |
| 320 | # endif | 322 | # endif |
| 321 | #endif | 323 | #endif |
| 322 | } | 324 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c9336e98ac59..a0079b4c7a49 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -1360,8 +1360,6 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 1360 | void | 1360 | void |
| 1361 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 1361 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
| 1362 | { | 1362 | { |
| 1363 | struct ring_buffer *buf; | ||
| 1364 | |||
| 1365 | if (tr->stop_count) | 1363 | if (tr->stop_count) |
| 1366 | return; | 1364 | return; |
| 1367 | 1365 | ||
| @@ -1375,9 +1373,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 1375 | 1373 | ||
| 1376 | arch_spin_lock(&tr->max_lock); | 1374 | arch_spin_lock(&tr->max_lock); |
| 1377 | 1375 | ||
| 1378 | buf = tr->trace_buffer.buffer; | 1376 | swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); |
| 1379 | tr->trace_buffer.buffer = tr->max_buffer.buffer; | ||
| 1380 | tr->max_buffer.buffer = buf; | ||
| 1381 | 1377 | ||
| 1382 | __update_max_tr(tr, tsk, cpu); | 1378 | __update_max_tr(tr, tsk, cpu); |
| 1383 | arch_spin_unlock(&tr->max_lock); | 1379 | arch_spin_unlock(&tr->max_lock); |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index e1c818dbc0d7..0dceb77d1d42 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
| @@ -78,7 +78,8 @@ static const char * ops[] = { OPS }; | |||
| 78 | C(TOO_MANY_PREDS, "Too many terms in predicate expression"), \ | 78 | C(TOO_MANY_PREDS, "Too many terms in predicate expression"), \ |
| 79 | C(INVALID_FILTER, "Meaningless filter expression"), \ | 79 | C(INVALID_FILTER, "Meaningless filter expression"), \ |
| 80 | C(IP_FIELD_ONLY, "Only 'ip' field is supported for function trace"), \ | 80 | C(IP_FIELD_ONLY, "Only 'ip' field is supported for function trace"), \ |
| 81 | C(INVALID_VALUE, "Invalid value (did you forget quotes)?"), | 81 | C(INVALID_VALUE, "Invalid value (did you forget quotes)?"), \ |
| 82 | C(NO_FILTER, "No filter found"), | ||
| 82 | 83 | ||
| 83 | #undef C | 84 | #undef C |
| 84 | #define C(a, b) FILT_ERR_##a | 85 | #define C(a, b) FILT_ERR_##a |
| @@ -550,6 +551,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds, | |||
| 550 | goto out_free; | 551 | goto out_free; |
| 551 | } | 552 | } |
| 552 | 553 | ||
| 554 | if (!N) { | ||
| 555 | /* No program? */ | ||
| 556 | ret = -EINVAL; | ||
| 557 | parse_error(pe, FILT_ERR_NO_FILTER, ptr - str); | ||
| 558 | goto out_free; | ||
| 559 | } | ||
| 560 | |||
| 553 | prog[N].pred = NULL; /* #13 */ | 561 | prog[N].pred = NULL; /* #13 */ |
| 554 | prog[N].target = 1; /* TRUE */ | 562 | prog[N].target = 1; /* TRUE */ |
| 555 | prog[N+1].pred = NULL; | 563 | prog[N+1].pred = NULL; |
diff --git a/lib/Makefile b/lib/Makefile index 8153fdab287f..90dc5520b784 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -23,7 +23,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ | |||
| 23 | sha1.o chacha20.o irq_regs.o argv_split.o \ | 23 | sha1.o chacha20.o irq_regs.o argv_split.o \ |
| 24 | flex_proportions.o ratelimit.o show_mem.o \ | 24 | flex_proportions.o ratelimit.o show_mem.o \ |
| 25 | is_single_threaded.o plist.o decompress.o kobject_uevent.o \ | 25 | is_single_threaded.o plist.o decompress.o kobject_uevent.o \ |
| 26 | earlycpio.o seq_buf.o siphash.o \ | 26 | earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ |
| 27 | nmi_backtrace.o nodemask.o win_minmax.o | 27 | nmi_backtrace.o nodemask.o win_minmax.o |
| 28 | 28 | ||
| 29 | lib-$(CONFIG_PRINTK) += dump_stack.o | 29 | lib-$(CONFIG_PRINTK) += dump_stack.o |
| @@ -95,10 +95,6 @@ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o | |||
| 95 | obj-$(CONFIG_DEBUG_LIST) += list_debug.o | 95 | obj-$(CONFIG_DEBUG_LIST) += list_debug.o |
| 96 | obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o | 96 | obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o |
| 97 | 97 | ||
| 98 | ifneq ($(CONFIG_HAVE_DEC_LOCK),y) | ||
| 99 | lib-y += dec_and_lock.o | ||
| 100 | endif | ||
| 101 | |||
| 102 | obj-$(CONFIG_BITREVERSE) += bitrev.o | 98 | obj-$(CONFIG_BITREVERSE) += bitrev.o |
| 103 | obj-$(CONFIG_RATIONAL) += rational.o | 99 | obj-$(CONFIG_RATIONAL) += rational.o |
| 104 | obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o | 100 | obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o |
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index 347fa7ac2e8a..9555b68bb774 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c | |||
| @@ -33,3 +33,19 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | |||
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | EXPORT_SYMBOL(_atomic_dec_and_lock); | 35 | EXPORT_SYMBOL(_atomic_dec_and_lock); |
| 36 | |||
| 37 | int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, | ||
| 38 | unsigned long *flags) | ||
| 39 | { | ||
| 40 | /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ | ||
| 41 | if (atomic_add_unless(atomic, -1, 1)) | ||
| 42 | return 0; | ||
| 43 | |||
| 44 | /* Otherwise do it the slow way */ | ||
| 45 | spin_lock_irqsave(lock, *flags); | ||
| 46 | if (atomic_dec_and_test(atomic)) | ||
| 47 | return 1; | ||
| 48 | spin_unlock_irqrestore(lock, *flags); | ||
| 49 | return 0; | ||
| 50 | } | ||
| 51 | EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave); | ||
diff --git a/lib/refcount.c b/lib/refcount.c index 0eb48353abe3..d3b81cefce91 100644 --- a/lib/refcount.c +++ b/lib/refcount.c | |||
| @@ -350,3 +350,31 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) | |||
| 350 | } | 350 | } |
| 351 | EXPORT_SYMBOL(refcount_dec_and_lock); | 351 | EXPORT_SYMBOL(refcount_dec_and_lock); |
| 352 | 352 | ||
| 353 | /** | ||
| 354 | * refcount_dec_and_lock_irqsave - return holding spinlock with disabled | ||
| 355 | * interrupts if able to decrement refcount to 0 | ||
| 356 | * @r: the refcount | ||
| 357 | * @lock: the spinlock to be locked | ||
| 358 | * @flags: saved IRQ-flags if the is acquired | ||
| 359 | * | ||
| 360 | * Same as refcount_dec_and_lock() above except that the spinlock is acquired | ||
| 361 | * with disabled interupts. | ||
| 362 | * | ||
| 363 | * Return: true and hold spinlock if able to decrement refcount to 0, false | ||
| 364 | * otherwise | ||
| 365 | */ | ||
| 366 | bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, | ||
| 367 | unsigned long *flags) | ||
| 368 | { | ||
| 369 | if (refcount_dec_not_one(r)) | ||
| 370 | return false; | ||
| 371 | |||
| 372 | spin_lock_irqsave(lock, *flags); | ||
| 373 | if (!refcount_dec_and_test(r)) { | ||
| 374 | spin_unlock_irqrestore(lock, *flags); | ||
| 375 | return false; | ||
| 376 | } | ||
| 377 | |||
| 378 | return true; | ||
| 379 | } | ||
| 380 | EXPORT_SYMBOL(refcount_dec_and_lock_irqsave); | ||
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 347cc834c04a..2e5d3df0853d 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
| @@ -359,15 +359,8 @@ static void wb_shutdown(struct bdi_writeback *wb) | |||
| 359 | spin_lock_bh(&wb->work_lock); | 359 | spin_lock_bh(&wb->work_lock); |
| 360 | if (!test_and_clear_bit(WB_registered, &wb->state)) { | 360 | if (!test_and_clear_bit(WB_registered, &wb->state)) { |
| 361 | spin_unlock_bh(&wb->work_lock); | 361 | spin_unlock_bh(&wb->work_lock); |
| 362 | /* | ||
| 363 | * Wait for wb shutdown to finish if someone else is just | ||
| 364 | * running wb_shutdown(). Otherwise we could proceed to wb / | ||
| 365 | * bdi destruction before wb_shutdown() is finished. | ||
| 366 | */ | ||
| 367 | wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE); | ||
| 368 | return; | 362 | return; |
| 369 | } | 363 | } |
| 370 | set_bit(WB_shutting_down, &wb->state); | ||
| 371 | spin_unlock_bh(&wb->work_lock); | 364 | spin_unlock_bh(&wb->work_lock); |
| 372 | 365 | ||
| 373 | cgwb_remove_from_bdi_list(wb); | 366 | cgwb_remove_from_bdi_list(wb); |
| @@ -379,12 +372,6 @@ static void wb_shutdown(struct bdi_writeback *wb) | |||
| 379 | mod_delayed_work(bdi_wq, &wb->dwork, 0); | 372 | mod_delayed_work(bdi_wq, &wb->dwork, 0); |
| 380 | flush_delayed_work(&wb->dwork); | 373 | flush_delayed_work(&wb->dwork); |
| 381 | WARN_ON(!list_empty(&wb->work_list)); | 374 | WARN_ON(!list_empty(&wb->work_list)); |
| 382 | /* | ||
| 383 | * Make sure bit gets cleared after shutdown is finished. Matches with | ||
| 384 | * the barrier provided by test_and_clear_bit() above. | ||
| 385 | */ | ||
| 386 | smp_wmb(); | ||
| 387 | clear_and_wake_up_bit(WB_shutting_down, &wb->state); | ||
| 388 | } | 375 | } |
| 389 | 376 | ||
| 390 | static void wb_exit(struct bdi_writeback *wb) | 377 | static void wb_exit(struct bdi_writeback *wb) |
| @@ -508,10 +495,12 @@ static void cgwb_release_workfn(struct work_struct *work) | |||
| 508 | struct bdi_writeback *wb = container_of(work, struct bdi_writeback, | 495 | struct bdi_writeback *wb = container_of(work, struct bdi_writeback, |
| 509 | release_work); | 496 | release_work); |
| 510 | 497 | ||
| 498 | mutex_lock(&wb->bdi->cgwb_release_mutex); | ||
| 511 | wb_shutdown(wb); | 499 | wb_shutdown(wb); |
| 512 | 500 | ||
| 513 | css_put(wb->memcg_css); | 501 | css_put(wb->memcg_css); |
| 514 | css_put(wb->blkcg_css); | 502 | css_put(wb->blkcg_css); |
| 503 | mutex_unlock(&wb->bdi->cgwb_release_mutex); | ||
| 515 | 504 | ||
| 516 | fprop_local_destroy_percpu(&wb->memcg_completions); | 505 | fprop_local_destroy_percpu(&wb->memcg_completions); |
| 517 | percpu_ref_exit(&wb->refcnt); | 506 | percpu_ref_exit(&wb->refcnt); |
| @@ -697,6 +686,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) | |||
| 697 | 686 | ||
| 698 | INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); | 687 | INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); |
| 699 | bdi->cgwb_congested_tree = RB_ROOT; | 688 | bdi->cgwb_congested_tree = RB_ROOT; |
| 689 | mutex_init(&bdi->cgwb_release_mutex); | ||
| 700 | 690 | ||
| 701 | ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); | 691 | ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); |
| 702 | if (!ret) { | 692 | if (!ret) { |
| @@ -717,7 +707,10 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi) | |||
| 717 | spin_lock_irq(&cgwb_lock); | 707 | spin_lock_irq(&cgwb_lock); |
| 718 | radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) | 708 | radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) |
| 719 | cgwb_kill(*slot); | 709 | cgwb_kill(*slot); |
| 710 | spin_unlock_irq(&cgwb_lock); | ||
| 720 | 711 | ||
| 712 | mutex_lock(&bdi->cgwb_release_mutex); | ||
| 713 | spin_lock_irq(&cgwb_lock); | ||
| 721 | while (!list_empty(&bdi->wb_list)) { | 714 | while (!list_empty(&bdi->wb_list)) { |
| 722 | wb = list_first_entry(&bdi->wb_list, struct bdi_writeback, | 715 | wb = list_first_entry(&bdi->wb_list, struct bdi_writeback, |
| 723 | bdi_node); | 716 | bdi_node); |
| @@ -726,6 +719,7 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi) | |||
| 726 | spin_lock_irq(&cgwb_lock); | 719 | spin_lock_irq(&cgwb_lock); |
| 727 | } | 720 | } |
| 728 | spin_unlock_irq(&cgwb_lock); | 721 | spin_unlock_irq(&cgwb_lock); |
| 722 | mutex_unlock(&bdi->cgwb_release_mutex); | ||
| 729 | } | 723 | } |
| 730 | 724 | ||
| 731 | /** | 725 | /** |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 3c85af058227..3fabf9f6a0f9 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
| @@ -987,8 +987,6 @@ bool xprt_prepare_transmit(struct rpc_task *task) | |||
| 987 | task->tk_status = -EAGAIN; | 987 | task->tk_status = -EAGAIN; |
| 988 | goto out_unlock; | 988 | goto out_unlock; |
| 989 | } | 989 | } |
| 990 | if (!bc_prealloc(req) && !req->rq_xmit_bytes_sent) | ||
| 991 | req->rq_xid = xprt_alloc_xid(xprt); | ||
| 992 | ret = true; | 990 | ret = true; |
| 993 | out_unlock: | 991 | out_unlock: |
| 994 | spin_unlock_bh(&xprt->transport_lock); | 992 | spin_unlock_bh(&xprt->transport_lock); |
| @@ -1298,7 +1296,12 @@ void xprt_retry_reserve(struct rpc_task *task) | |||
| 1298 | 1296 | ||
| 1299 | static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) | 1297 | static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) |
| 1300 | { | 1298 | { |
| 1301 | return (__force __be32)xprt->xid++; | 1299 | __be32 xid; |
| 1300 | |||
| 1301 | spin_lock(&xprt->reserve_lock); | ||
| 1302 | xid = (__force __be32)xprt->xid++; | ||
| 1303 | spin_unlock(&xprt->reserve_lock); | ||
| 1304 | return xid; | ||
| 1302 | } | 1305 | } |
| 1303 | 1306 | ||
| 1304 | static inline void xprt_init_xid(struct rpc_xprt *xprt) | 1307 | static inline void xprt_init_xid(struct rpc_xprt *xprt) |
| @@ -1316,6 +1319,7 @@ void xprt_request_init(struct rpc_task *task) | |||
| 1316 | req->rq_task = task; | 1319 | req->rq_task = task; |
| 1317 | req->rq_xprt = xprt; | 1320 | req->rq_xprt = xprt; |
| 1318 | req->rq_buffer = NULL; | 1321 | req->rq_buffer = NULL; |
| 1322 | req->rq_xid = xprt_alloc_xid(xprt); | ||
| 1319 | req->rq_connect_cookie = xprt->connect_cookie - 1; | 1323 | req->rq_connect_cookie = xprt->connect_cookie - 1; |
| 1320 | req->rq_bytes_sent = 0; | 1324 | req->rq_bytes_sent = 0; |
| 1321 | req->rq_snd_buf.len = 0; | 1325 | req->rq_snd_buf.len = 0; |
diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 34d9e9ce97c2..e7889f486ca1 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build | |||
| @@ -239,6 +239,7 @@ cmd_record_mcount = \ | |||
| 239 | "$(CC_FLAGS_FTRACE)" ]; then \ | 239 | "$(CC_FLAGS_FTRACE)" ]; then \ |
| 240 | $(sub_cmd_record_mcount) \ | 240 | $(sub_cmd_record_mcount) \ |
| 241 | fi; | 241 | fi; |
| 242 | endif # -record-mcount | ||
| 242 | endif # CONFIG_FTRACE_MCOUNT_RECORD | 243 | endif # CONFIG_FTRACE_MCOUNT_RECORD |
| 243 | 244 | ||
| 244 | ifdef CONFIG_STACK_VALIDATION | 245 | ifdef CONFIG_STACK_VALIDATION |
| @@ -263,7 +264,6 @@ ifneq ($(RETPOLINE_CFLAGS),) | |||
| 263 | objtool_args += --retpoline | 264 | objtool_args += --retpoline |
| 264 | endif | 265 | endif |
| 265 | endif | 266 | endif |
| 266 | endif | ||
| 267 | 267 | ||
| 268 | 268 | ||
| 269 | ifdef CONFIG_MODVERSIONS | 269 | ifdef CONFIG_MODVERSIONS |
diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 38047c6aa575..f4a25bd1871f 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c | |||
| @@ -164,6 +164,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func, | |||
| 164 | "lbug_with_loc", | 164 | "lbug_with_loc", |
| 165 | "fortify_panic", | 165 | "fortify_panic", |
| 166 | "usercopy_abort", | 166 | "usercopy_abort", |
| 167 | "machine_real_restart", | ||
| 167 | }; | 168 | }; |
| 168 | 169 | ||
| 169 | if (func->bind == STB_WEAK) | 170 | if (func->bind == STB_WEAK) |
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8 index ca9ef7017624..d39e4ff7d0bf 100644 --- a/tools/power/x86/turbostat/turbostat.8 +++ b/tools/power/x86/turbostat/turbostat.8 | |||
| @@ -56,7 +56,7 @@ name as necessary to disambiguate it from others is necessary. Note that option | |||
| 56 | .PP | 56 | .PP |
| 57 | \fB--hide column\fP do not show the specified built-in columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--hide sysfs" to hide the sysfs statistics columns as a group. | 57 | \fB--hide column\fP do not show the specified built-in columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--hide sysfs" to hide the sysfs statistics columns as a group. |
| 58 | .PP | 58 | .PP |
| 59 | \fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default. Currently the only built-in counters disabled by default are "usec" and "Time_Of_Day_Seconds". | 59 | \fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default. Currently the only built-in counters disabled by default are "usec", "Time_Of_Day_Seconds", "APIC" and "X2APIC". |
| 60 | The column name "all" can be used to enable all disabled-by-default built-in counters. | 60 | The column name "all" can be used to enable all disabled-by-default built-in counters. |
| 61 | .PP | 61 | .PP |
| 62 | \fB--show column\fP show only the specified built-in columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--show sysfs" to show the sysfs statistics columns as a group. | 62 | \fB--show column\fP show only the specified built-in columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--show sysfs" to show the sysfs statistics columns as a group. |
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index d6cff3070ebd..4d14bbbf9b63 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c | |||
| @@ -109,6 +109,7 @@ unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */ | |||
| 109 | unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */ | 109 | unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */ |
| 110 | unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ | 110 | unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ |
| 111 | unsigned int has_misc_feature_control; | 111 | unsigned int has_misc_feature_control; |
| 112 | unsigned int first_counter_read = 1; | ||
| 112 | 113 | ||
| 113 | #define RAPL_PKG (1 << 0) | 114 | #define RAPL_PKG (1 << 0) |
| 114 | /* 0x610 MSR_PKG_POWER_LIMIT */ | 115 | /* 0x610 MSR_PKG_POWER_LIMIT */ |
| @@ -170,6 +171,8 @@ struct thread_data { | |||
| 170 | unsigned long long irq_count; | 171 | unsigned long long irq_count; |
| 171 | unsigned int smi_count; | 172 | unsigned int smi_count; |
| 172 | unsigned int cpu_id; | 173 | unsigned int cpu_id; |
| 174 | unsigned int apic_id; | ||
| 175 | unsigned int x2apic_id; | ||
| 173 | unsigned int flags; | 176 | unsigned int flags; |
| 174 | #define CPU_IS_FIRST_THREAD_IN_CORE 0x2 | 177 | #define CPU_IS_FIRST_THREAD_IN_CORE 0x2 |
| 175 | #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4 | 178 | #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4 |
| @@ -381,19 +384,23 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr) | |||
| 381 | } | 384 | } |
| 382 | 385 | ||
| 383 | /* | 386 | /* |
| 384 | * Each string in this array is compared in --show and --hide cmdline. | 387 | * This list matches the column headers, except |
| 385 | * Thus, strings that are proper sub-sets must follow their more specific peers. | 388 | * 1. built-in only, the sysfs counters are not here -- we learn of those at run-time |
| 389 | * 2. Core and CPU are moved to the end, we can't have strings that contain them | ||
| 390 | * matching on them for --show and --hide. | ||
| 386 | */ | 391 | */ |
| 387 | struct msr_counter bic[] = { | 392 | struct msr_counter bic[] = { |
| 388 | { 0x0, "usec" }, | 393 | { 0x0, "usec" }, |
| 389 | { 0x0, "Time_Of_Day_Seconds" }, | 394 | { 0x0, "Time_Of_Day_Seconds" }, |
| 390 | { 0x0, "Package" }, | 395 | { 0x0, "Package" }, |
| 396 | { 0x0, "Node" }, | ||
| 391 | { 0x0, "Avg_MHz" }, | 397 | { 0x0, "Avg_MHz" }, |
| 398 | { 0x0, "Busy%" }, | ||
| 392 | { 0x0, "Bzy_MHz" }, | 399 | { 0x0, "Bzy_MHz" }, |
| 393 | { 0x0, "TSC_MHz" }, | 400 | { 0x0, "TSC_MHz" }, |
| 394 | { 0x0, "IRQ" }, | 401 | { 0x0, "IRQ" }, |
| 395 | { 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL}, | 402 | { 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL}, |
| 396 | { 0x0, "Busy%" }, | 403 | { 0x0, "sysfs" }, |
| 397 | { 0x0, "CPU%c1" }, | 404 | { 0x0, "CPU%c1" }, |
| 398 | { 0x0, "CPU%c3" }, | 405 | { 0x0, "CPU%c3" }, |
| 399 | { 0x0, "CPU%c6" }, | 406 | { 0x0, "CPU%c6" }, |
| @@ -424,73 +431,73 @@ struct msr_counter bic[] = { | |||
| 424 | { 0x0, "Cor_J" }, | 431 | { 0x0, "Cor_J" }, |
| 425 | { 0x0, "GFX_J" }, | 432 | { 0x0, "GFX_J" }, |
| 426 | { 0x0, "RAM_J" }, | 433 | { 0x0, "RAM_J" }, |
| 427 | { 0x0, "Core" }, | ||
| 428 | { 0x0, "CPU" }, | ||
| 429 | { 0x0, "Mod%c6" }, | 434 | { 0x0, "Mod%c6" }, |
| 430 | { 0x0, "sysfs" }, | ||
| 431 | { 0x0, "Totl%C0" }, | 435 | { 0x0, "Totl%C0" }, |
| 432 | { 0x0, "Any%C0" }, | 436 | { 0x0, "Any%C0" }, |
| 433 | { 0x0, "GFX%C0" }, | 437 | { 0x0, "GFX%C0" }, |
| 434 | { 0x0, "CPUGFX%" }, | 438 | { 0x0, "CPUGFX%" }, |
| 435 | { 0x0, "Node%" }, | 439 | { 0x0, "Core" }, |
| 440 | { 0x0, "CPU" }, | ||
| 441 | { 0x0, "APIC" }, | ||
| 442 | { 0x0, "X2APIC" }, | ||
| 436 | }; | 443 | }; |
| 437 | 444 | ||
| 438 | |||
| 439 | |||
| 440 | #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter)) | 445 | #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter)) |
| 441 | #define BIC_USEC (1ULL << 0) | 446 | #define BIC_USEC (1ULL << 0) |
| 442 | #define BIC_TOD (1ULL << 1) | 447 | #define BIC_TOD (1ULL << 1) |
| 443 | #define BIC_Package (1ULL << 2) | 448 | #define BIC_Package (1ULL << 2) |
| 444 | #define BIC_Avg_MHz (1ULL << 3) | 449 | #define BIC_Node (1ULL << 3) |
| 445 | #define BIC_Bzy_MHz (1ULL << 4) | 450 | #define BIC_Avg_MHz (1ULL << 4) |
| 446 | #define BIC_TSC_MHz (1ULL << 5) | 451 | #define BIC_Busy (1ULL << 5) |
| 447 | #define BIC_IRQ (1ULL << 6) | 452 | #define BIC_Bzy_MHz (1ULL << 6) |
| 448 | #define BIC_SMI (1ULL << 7) | 453 | #define BIC_TSC_MHz (1ULL << 7) |
| 449 | #define BIC_Busy (1ULL << 8) | 454 | #define BIC_IRQ (1ULL << 8) |
| 450 | #define BIC_CPU_c1 (1ULL << 9) | 455 | #define BIC_SMI (1ULL << 9) |
| 451 | #define BIC_CPU_c3 (1ULL << 10) | 456 | #define BIC_sysfs (1ULL << 10) |
| 452 | #define BIC_CPU_c6 (1ULL << 11) | 457 | #define BIC_CPU_c1 (1ULL << 11) |
| 453 | #define BIC_CPU_c7 (1ULL << 12) | 458 | #define BIC_CPU_c3 (1ULL << 12) |
| 454 | #define BIC_ThreadC (1ULL << 13) | 459 | #define BIC_CPU_c6 (1ULL << 13) |
| 455 | #define BIC_CoreTmp (1ULL << 14) | 460 | #define BIC_CPU_c7 (1ULL << 14) |
| 456 | #define BIC_CoreCnt (1ULL << 15) | 461 | #define BIC_ThreadC (1ULL << 15) |
| 457 | #define BIC_PkgTmp (1ULL << 16) | 462 | #define BIC_CoreTmp (1ULL << 16) |
| 458 | #define BIC_GFX_rc6 (1ULL << 17) | 463 | #define BIC_CoreCnt (1ULL << 17) |
| 459 | #define BIC_GFXMHz (1ULL << 18) | 464 | #define BIC_PkgTmp (1ULL << 18) |
| 460 | #define BIC_Pkgpc2 (1ULL << 19) | 465 | #define BIC_GFX_rc6 (1ULL << 19) |
| 461 | #define BIC_Pkgpc3 (1ULL << 20) | 466 | #define BIC_GFXMHz (1ULL << 20) |
| 462 | #define BIC_Pkgpc6 (1ULL << 21) | 467 | #define BIC_Pkgpc2 (1ULL << 21) |
| 463 | #define BIC_Pkgpc7 (1ULL << 22) | 468 | #define BIC_Pkgpc3 (1ULL << 22) |
| 464 | #define BIC_Pkgpc8 (1ULL << 23) | 469 | #define BIC_Pkgpc6 (1ULL << 23) |
| 465 | #define BIC_Pkgpc9 (1ULL << 24) | 470 | #define BIC_Pkgpc7 (1ULL << 24) |
| 466 | #define BIC_Pkgpc10 (1ULL << 25) | 471 | #define BIC_Pkgpc8 (1ULL << 25) |
| 467 | #define BIC_CPU_LPI (1ULL << 26) | 472 | #define BIC_Pkgpc9 (1ULL << 26) |
| 468 | #define BIC_SYS_LPI (1ULL << 27) | 473 | #define BIC_Pkgpc10 (1ULL << 27) |
| 469 | #define BIC_PkgWatt (1ULL << 26) | 474 | #define BIC_CPU_LPI (1ULL << 28) |
| 470 | #define BIC_CorWatt (1ULL << 27) | 475 | #define BIC_SYS_LPI (1ULL << 29) |
| 471 | #define BIC_GFXWatt (1ULL << 28) | 476 | #define BIC_PkgWatt (1ULL << 30) |
| 472 | #define BIC_PkgCnt (1ULL << 29) | 477 | #define BIC_CorWatt (1ULL << 31) |
| 473 | #define BIC_RAMWatt (1ULL << 30) | 478 | #define BIC_GFXWatt (1ULL << 32) |
| 474 | #define BIC_PKG__ (1ULL << 31) | 479 | #define BIC_PkgCnt (1ULL << 33) |
| 475 | #define BIC_RAM__ (1ULL << 32) | 480 | #define BIC_RAMWatt (1ULL << 34) |
| 476 | #define BIC_Pkg_J (1ULL << 33) | 481 | #define BIC_PKG__ (1ULL << 35) |
| 477 | #define BIC_Cor_J (1ULL << 34) | 482 | #define BIC_RAM__ (1ULL << 36) |
| 478 | #define BIC_GFX_J (1ULL << 35) | 483 | #define BIC_Pkg_J (1ULL << 37) |
| 479 | #define BIC_RAM_J (1ULL << 36) | 484 | #define BIC_Cor_J (1ULL << 38) |
| 480 | #define BIC_Core (1ULL << 37) | 485 | #define BIC_GFX_J (1ULL << 39) |
| 481 | #define BIC_CPU (1ULL << 38) | 486 | #define BIC_RAM_J (1ULL << 40) |
| 482 | #define BIC_Mod_c6 (1ULL << 39) | 487 | #define BIC_Mod_c6 (1ULL << 41) |
| 483 | #define BIC_sysfs (1ULL << 40) | 488 | #define BIC_Totl_c0 (1ULL << 42) |
| 484 | #define BIC_Totl_c0 (1ULL << 41) | 489 | #define BIC_Any_c0 (1ULL << 43) |
| 485 | #define BIC_Any_c0 (1ULL << 42) | 490 | #define BIC_GFX_c0 (1ULL << 44) |
| 486 | #define BIC_GFX_c0 (1ULL << 43) | 491 | #define BIC_CPUGFX (1ULL << 45) |
| 487 | #define BIC_CPUGFX (1ULL << 44) | 492 | #define BIC_Core (1ULL << 46) |
| 488 | #define BIC_Node (1ULL << 45) | 493 | #define BIC_CPU (1ULL << 47) |
| 489 | 494 | #define BIC_APIC (1ULL << 48) | |
| 490 | #define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD) | 495 | #define BIC_X2APIC (1ULL << 49) |
| 496 | |||
| 497 | #define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC) | ||
| 491 | 498 | ||
| 492 | unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT); | 499 | unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT); |
| 493 | unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs; | 500 | unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC; |
| 494 | 501 | ||
| 495 | #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME) | 502 | #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME) |
| 496 | #define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME) | 503 | #define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME) |
| @@ -517,17 +524,34 @@ void help(void) | |||
| 517 | "when COMMAND completes.\n" | 524 | "when COMMAND completes.\n" |
| 518 | "If no COMMAND is specified, turbostat wakes every 5-seconds\n" | 525 | "If no COMMAND is specified, turbostat wakes every 5-seconds\n" |
| 519 | "to print statistics, until interrupted.\n" | 526 | "to print statistics, until interrupted.\n" |
| 520 | "--add add a counter\n" | 527 | " -a, --add add a counter\n" |
| 521 | " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n" | 528 | " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n" |
| 522 | "--cpu cpu-set limit output to summary plus cpu-set:\n" | 529 | " -c, --cpu cpu-set limit output to summary plus cpu-set:\n" |
| 523 | " {core | package | j,k,l..m,n-p }\n" | 530 | " {core | package | j,k,l..m,n-p }\n" |
| 524 | "--quiet skip decoding system configuration header\n" | 531 | " -d, --debug displays usec, Time_Of_Day_Seconds and more debugging\n" |
| 525 | "--interval sec.subsec Override default 5-second measurement interval\n" | 532 | " -D, --Dump displays the raw counter values\n" |
| 526 | "--help print this help message\n" | 533 | " -e, --enable [all | column]\n" |
| 527 | "--list list column headers only\n" | 534 | " shows all or the specified disabled column\n" |
| 528 | "--num_iterations num number of the measurement iterations\n" | 535 | " -H, --hide [column|column,column,...]\n" |
| 529 | "--out file create or truncate \"file\" for all output\n" | 536 | " hide the specified column(s)\n" |
| 530 | "--version print version information\n" | 537 | " -i, --interval sec.subsec\n" |
| 538 | " Override default 5-second measurement interval\n" | ||
| 539 | " -J, --Joules displays energy in Joules instead of Watts\n" | ||
| 540 | " -l, --list list column headers only\n" | ||
| 541 | " -n, --num_iterations num\n" | ||
| 542 | " number of the measurement iterations\n" | ||
| 543 | " -o, --out file\n" | ||
| 544 | " create or truncate \"file\" for all output\n" | ||
| 545 | " -q, --quiet skip decoding system configuration header\n" | ||
| 546 | " -s, --show [column|column,column,...]\n" | ||
| 547 | " show only the specified column(s)\n" | ||
| 548 | " -S, --Summary\n" | ||
| 549 | " limits output to 1-line system summary per interval\n" | ||
| 550 | " -T, --TCC temperature\n" | ||
| 551 | " sets the Thermal Control Circuit temperature in\n" | ||
| 552 | " degrees Celsius\n" | ||
| 553 | " -h, --help print this help message\n" | ||
| 554 | " -v, --version print version information\n" | ||
| 531 | "\n" | 555 | "\n" |
| 532 | "For more help, run \"man turbostat\"\n"); | 556 | "For more help, run \"man turbostat\"\n"); |
| 533 | } | 557 | } |
| @@ -601,6 +625,10 @@ void print_header(char *delim) | |||
| 601 | outp += sprintf(outp, "%sCore", (printed++ ? delim : "")); | 625 | outp += sprintf(outp, "%sCore", (printed++ ? delim : "")); |
| 602 | if (DO_BIC(BIC_CPU)) | 626 | if (DO_BIC(BIC_CPU)) |
| 603 | outp += sprintf(outp, "%sCPU", (printed++ ? delim : "")); | 627 | outp += sprintf(outp, "%sCPU", (printed++ ? delim : "")); |
| 628 | if (DO_BIC(BIC_APIC)) | ||
| 629 | outp += sprintf(outp, "%sAPIC", (printed++ ? delim : "")); | ||
| 630 | if (DO_BIC(BIC_X2APIC)) | ||
| 631 | outp += sprintf(outp, "%sX2APIC", (printed++ ? delim : "")); | ||
| 604 | if (DO_BIC(BIC_Avg_MHz)) | 632 | if (DO_BIC(BIC_Avg_MHz)) |
| 605 | outp += sprintf(outp, "%sAvg_MHz", (printed++ ? delim : "")); | 633 | outp += sprintf(outp, "%sAvg_MHz", (printed++ ? delim : "")); |
| 606 | if (DO_BIC(BIC_Busy)) | 634 | if (DO_BIC(BIC_Busy)) |
| @@ -880,6 +908,10 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
| 880 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); | 908 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); |
| 881 | if (DO_BIC(BIC_CPU)) | 909 | if (DO_BIC(BIC_CPU)) |
| 882 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); | 910 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); |
| 911 | if (DO_BIC(BIC_APIC)) | ||
| 912 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); | ||
| 913 | if (DO_BIC(BIC_X2APIC)) | ||
| 914 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); | ||
| 883 | } else { | 915 | } else { |
| 884 | if (DO_BIC(BIC_Package)) { | 916 | if (DO_BIC(BIC_Package)) { |
| 885 | if (p) | 917 | if (p) |
| @@ -904,6 +936,10 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
| 904 | } | 936 | } |
| 905 | if (DO_BIC(BIC_CPU)) | 937 | if (DO_BIC(BIC_CPU)) |
| 906 | outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->cpu_id); | 938 | outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->cpu_id); |
| 939 | if (DO_BIC(BIC_APIC)) | ||
| 940 | outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->apic_id); | ||
| 941 | if (DO_BIC(BIC_X2APIC)) | ||
| 942 | outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->x2apic_id); | ||
| 907 | } | 943 | } |
| 908 | 944 | ||
| 909 | if (DO_BIC(BIC_Avg_MHz)) | 945 | if (DO_BIC(BIC_Avg_MHz)) |
| @@ -1231,6 +1267,12 @@ delta_thread(struct thread_data *new, struct thread_data *old, | |||
| 1231 | int i; | 1267 | int i; |
| 1232 | struct msr_counter *mp; | 1268 | struct msr_counter *mp; |
| 1233 | 1269 | ||
| 1270 | /* we run cpuid just the 1st time, copy the results */ | ||
| 1271 | if (DO_BIC(BIC_APIC)) | ||
| 1272 | new->apic_id = old->apic_id; | ||
| 1273 | if (DO_BIC(BIC_X2APIC)) | ||
| 1274 | new->x2apic_id = old->x2apic_id; | ||
| 1275 | |||
| 1234 | /* | 1276 | /* |
| 1235 | * the timestamps from start of measurement interval are in "old" | 1277 | * the timestamps from start of measurement interval are in "old" |
| 1236 | * the timestamp from end of measurement interval are in "new" | 1278 | * the timestamp from end of measurement interval are in "new" |
| @@ -1393,6 +1435,12 @@ int sum_counters(struct thread_data *t, struct core_data *c, | |||
| 1393 | int i; | 1435 | int i; |
| 1394 | struct msr_counter *mp; | 1436 | struct msr_counter *mp; |
| 1395 | 1437 | ||
| 1438 | /* copy un-changing apic_id's */ | ||
| 1439 | if (DO_BIC(BIC_APIC)) | ||
| 1440 | average.threads.apic_id = t->apic_id; | ||
| 1441 | if (DO_BIC(BIC_X2APIC)) | ||
| 1442 | average.threads.x2apic_id = t->x2apic_id; | ||
| 1443 | |||
| 1396 | /* remember first tv_begin */ | 1444 | /* remember first tv_begin */ |
| 1397 | if (average.threads.tv_begin.tv_sec == 0) | 1445 | if (average.threads.tv_begin.tv_sec == 0) |
| 1398 | average.threads.tv_begin = t->tv_begin; | 1446 | average.threads.tv_begin = t->tv_begin; |
| @@ -1619,6 +1667,34 @@ int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp) | |||
| 1619 | return 0; | 1667 | return 0; |
| 1620 | } | 1668 | } |
| 1621 | 1669 | ||
| 1670 | void get_apic_id(struct thread_data *t) | ||
| 1671 | { | ||
| 1672 | unsigned int eax, ebx, ecx, edx, max_level; | ||
| 1673 | |||
| 1674 | eax = ebx = ecx = edx = 0; | ||
| 1675 | |||
| 1676 | if (!genuine_intel) | ||
| 1677 | return; | ||
| 1678 | |||
| 1679 | __cpuid(0, max_level, ebx, ecx, edx); | ||
| 1680 | |||
| 1681 | __cpuid(1, eax, ebx, ecx, edx); | ||
| 1682 | t->apic_id = (ebx >> 24) & 0xf; | ||
| 1683 | |||
| 1684 | if (max_level < 0xb) | ||
| 1685 | return; | ||
| 1686 | |||
| 1687 | if (!DO_BIC(BIC_X2APIC)) | ||
| 1688 | return; | ||
| 1689 | |||
| 1690 | ecx = 0; | ||
| 1691 | __cpuid(0xb, eax, ebx, ecx, edx); | ||
| 1692 | t->x2apic_id = edx; | ||
| 1693 | |||
| 1694 | if (debug && (t->apic_id != t->x2apic_id)) | ||
| 1695 | fprintf(stderr, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id); | ||
| 1696 | } | ||
| 1697 | |||
| 1622 | /* | 1698 | /* |
| 1623 | * get_counters(...) | 1699 | * get_counters(...) |
| 1624 | * migrate to cpu | 1700 | * migrate to cpu |
| @@ -1632,7 +1708,6 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) | |||
| 1632 | struct msr_counter *mp; | 1708 | struct msr_counter *mp; |
| 1633 | int i; | 1709 | int i; |
| 1634 | 1710 | ||
| 1635 | |||
| 1636 | gettimeofday(&t->tv_begin, (struct timezone *)NULL); | 1711 | gettimeofday(&t->tv_begin, (struct timezone *)NULL); |
| 1637 | 1712 | ||
| 1638 | if (cpu_migrate(cpu)) { | 1713 | if (cpu_migrate(cpu)) { |
| @@ -1640,6 +1715,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) | |||
| 1640 | return -1; | 1715 | return -1; |
| 1641 | } | 1716 | } |
| 1642 | 1717 | ||
| 1718 | if (first_counter_read) | ||
| 1719 | get_apic_id(t); | ||
| 1643 | retry: | 1720 | retry: |
| 1644 | t->tsc = rdtsc(); /* we are running on local CPU of interest */ | 1721 | t->tsc = rdtsc(); /* we are running on local CPU of interest */ |
| 1645 | 1722 | ||
| @@ -2432,6 +2509,12 @@ void set_node_data(void) | |||
| 2432 | if (pni[pkg].count > topo.nodes_per_pkg) | 2509 | if (pni[pkg].count > topo.nodes_per_pkg) |
| 2433 | topo.nodes_per_pkg = pni[0].count; | 2510 | topo.nodes_per_pkg = pni[0].count; |
| 2434 | 2511 | ||
| 2512 | /* Fake 1 node per pkg for machines that don't | ||
| 2513 | * expose nodes and thus avoid -nan results | ||
| 2514 | */ | ||
| 2515 | if (topo.nodes_per_pkg == 0) | ||
| 2516 | topo.nodes_per_pkg = 1; | ||
| 2517 | |||
| 2435 | for (cpu = 0; cpu < topo.num_cpus; cpu++) { | 2518 | for (cpu = 0; cpu < topo.num_cpus; cpu++) { |
| 2436 | pkg = cpus[cpu].physical_package_id; | 2519 | pkg = cpus[cpu].physical_package_id; |
| 2437 | node = cpus[cpu].physical_node_id; | 2520 | node = cpus[cpu].physical_node_id; |
| @@ -2879,6 +2962,7 @@ void do_sleep(void) | |||
| 2879 | } | 2962 | } |
| 2880 | } | 2963 | } |
| 2881 | 2964 | ||
| 2965 | |||
| 2882 | void turbostat_loop() | 2966 | void turbostat_loop() |
| 2883 | { | 2967 | { |
| 2884 | int retval; | 2968 | int retval; |
| @@ -2892,6 +2976,7 @@ restart: | |||
| 2892 | 2976 | ||
| 2893 | snapshot_proc_sysfs_files(); | 2977 | snapshot_proc_sysfs_files(); |
| 2894 | retval = for_all_cpus(get_counters, EVEN_COUNTERS); | 2978 | retval = for_all_cpus(get_counters, EVEN_COUNTERS); |
| 2979 | first_counter_read = 0; | ||
| 2895 | if (retval < -1) { | 2980 | if (retval < -1) { |
| 2896 | exit(retval); | 2981 | exit(retval); |
| 2897 | } else if (retval == -1) { | 2982 | } else if (retval == -1) { |
| @@ -4392,7 +4477,7 @@ void process_cpuid() | |||
| 4392 | if (!quiet) { | 4477 | if (!quiet) { |
| 4393 | fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n", | 4478 | fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n", |
| 4394 | max_level, family, model, stepping, family, model, stepping); | 4479 | max_level, family, model, stepping, family, model, stepping); |
| 4395 | fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s\n", | 4480 | fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n", |
| 4396 | ecx & (1 << 0) ? "SSE3" : "-", | 4481 | ecx & (1 << 0) ? "SSE3" : "-", |
| 4397 | ecx & (1 << 3) ? "MONITOR" : "-", | 4482 | ecx & (1 << 3) ? "MONITOR" : "-", |
| 4398 | ecx & (1 << 6) ? "SMX" : "-", | 4483 | ecx & (1 << 6) ? "SMX" : "-", |
| @@ -4401,6 +4486,7 @@ void process_cpuid() | |||
| 4401 | edx & (1 << 4) ? "TSC" : "-", | 4486 | edx & (1 << 4) ? "TSC" : "-", |
| 4402 | edx & (1 << 5) ? "MSR" : "-", | 4487 | edx & (1 << 5) ? "MSR" : "-", |
| 4403 | edx & (1 << 22) ? "ACPI-TM" : "-", | 4488 | edx & (1 << 22) ? "ACPI-TM" : "-", |
| 4489 | edx & (1 << 28) ? "HT" : "-", | ||
| 4404 | edx & (1 << 29) ? "TM" : "-"); | 4490 | edx & (1 << 29) ? "TM" : "-"); |
| 4405 | } | 4491 | } |
| 4406 | 4492 | ||
| @@ -4652,7 +4738,6 @@ void process_cpuid() | |||
| 4652 | return; | 4738 | return; |
| 4653 | } | 4739 | } |
| 4654 | 4740 | ||
| 4655 | |||
| 4656 | /* | 4741 | /* |
| 4657 | * in /dev/cpu/ return success for names that are numbers | 4742 | * in /dev/cpu/ return success for names that are numbers |
| 4658 | * ie. filter out ".", "..", "microcode". | 4743 | * ie. filter out ".", "..", "microcode". |
| @@ -4842,6 +4927,13 @@ void init_counter(struct thread_data *thread_base, struct core_data *core_base, | |||
| 4842 | struct core_data *c; | 4927 | struct core_data *c; |
| 4843 | struct pkg_data *p; | 4928 | struct pkg_data *p; |
| 4844 | 4929 | ||
| 4930 | |||
| 4931 | /* Workaround for systems where physical_node_id==-1 | ||
| 4932 | * and logical_node_id==(-1 - topo.num_cpus) | ||
| 4933 | */ | ||
| 4934 | if (node_id < 0) | ||
| 4935 | node_id = 0; | ||
| 4936 | |||
| 4845 | t = GET_THREAD(thread_base, thread_id, core_id, node_id, pkg_id); | 4937 | t = GET_THREAD(thread_base, thread_id, core_id, node_id, pkg_id); |
| 4846 | c = GET_CORE(core_base, core_id, node_id, pkg_id); | 4938 | c = GET_CORE(core_base, core_id, node_id, pkg_id); |
| 4847 | p = GET_PKG(pkg_base, pkg_id); | 4939 | p = GET_PKG(pkg_base, pkg_id); |
| @@ -4946,6 +5038,7 @@ int fork_it(char **argv) | |||
| 4946 | 5038 | ||
| 4947 | snapshot_proc_sysfs_files(); | 5039 | snapshot_proc_sysfs_files(); |
| 4948 | status = for_all_cpus(get_counters, EVEN_COUNTERS); | 5040 | status = for_all_cpus(get_counters, EVEN_COUNTERS); |
| 5041 | first_counter_read = 0; | ||
| 4949 | if (status) | 5042 | if (status) |
| 4950 | exit(status); | 5043 | exit(status); |
| 4951 | /* clear affinity side-effect of get_counters() */ | 5044 | /* clear affinity side-effect of get_counters() */ |
| @@ -5009,7 +5102,7 @@ int get_and_dump_counters(void) | |||
| 5009 | } | 5102 | } |
| 5010 | 5103 | ||
| 5011 | void print_version() { | 5104 | void print_version() { |
| 5012 | fprintf(outf, "turbostat version 18.06.01" | 5105 | fprintf(outf, "turbostat version 18.06.20" |
| 5013 | " - Len Brown <lenb@kernel.org>\n"); | 5106 | " - Len Brown <lenb@kernel.org>\n"); |
| 5014 | } | 5107 | } |
| 5015 | 5108 | ||
| @@ -5381,7 +5474,7 @@ void cmdline(int argc, char **argv) | |||
| 5381 | break; | 5474 | break; |
| 5382 | case 'e': | 5475 | case 'e': |
| 5383 | /* --enable specified counter */ | 5476 | /* --enable specified counter */ |
| 5384 | bic_enabled |= bic_lookup(optarg, SHOW_LIST); | 5477 | bic_enabled = bic_enabled | bic_lookup(optarg, SHOW_LIST); |
| 5385 | break; | 5478 | break; |
| 5386 | case 'd': | 5479 | case 'd': |
| 5387 | debug++; | 5480 | debug++; |
| @@ -5465,7 +5558,6 @@ void cmdline(int argc, char **argv) | |||
| 5465 | int main(int argc, char **argv) | 5558 | int main(int argc, char **argv) |
| 5466 | { | 5559 | { |
| 5467 | outf = stderr; | 5560 | outf = stderr; |
| 5468 | |||
| 5469 | cmdline(argc, argv); | 5561 | cmdline(argc, argv); |
| 5470 | 5562 | ||
| 5471 | if (!quiet) | 5563 | if (!quiet) |
diff --git a/tools/testing/selftests/pstore/pstore_post_reboot_tests b/tools/testing/selftests/pstore/pstore_post_reboot_tests index 6ccb154cb4aa..22f8df1ad7d4 100755 --- a/tools/testing/selftests/pstore/pstore_post_reboot_tests +++ b/tools/testing/selftests/pstore/pstore_post_reboot_tests | |||
| @@ -7,13 +7,16 @@ | |||
| 7 | # | 7 | # |
| 8 | # Released under the terms of the GPL v2. | 8 | # Released under the terms of the GPL v2. |
| 9 | 9 | ||
| 10 | # Kselftest framework requirement - SKIP code is 4. | ||
| 11 | ksft_skip=4 | ||
| 12 | |||
| 10 | . ./common_tests | 13 | . ./common_tests |
| 11 | 14 | ||
| 12 | if [ -e $REBOOT_FLAG ]; then | 15 | if [ -e $REBOOT_FLAG ]; then |
| 13 | rm $REBOOT_FLAG | 16 | rm $REBOOT_FLAG |
| 14 | else | 17 | else |
| 15 | prlog "pstore_crash_test has not been executed yet. we skip further tests." | 18 | prlog "pstore_crash_test has not been executed yet. we skip further tests." |
| 16 | exit 0 | 19 | exit $ksft_skip |
| 17 | fi | 20 | fi |
| 18 | 21 | ||
| 19 | prlog -n "Mounting pstore filesystem ... " | 22 | prlog -n "Mounting pstore filesystem ... " |
diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c index 6a9f602a8718..615252331813 100644 --- a/tools/testing/selftests/rseq/param_test.c +++ b/tools/testing/selftests/rseq/param_test.c | |||
| @@ -137,6 +137,30 @@ unsigned int yield_mod_cnt, nr_abort; | |||
| 137 | "subic. %%" INJECT_ASM_REG ", %%" INJECT_ASM_REG ", 1\n\t" \ | 137 | "subic. %%" INJECT_ASM_REG ", %%" INJECT_ASM_REG ", 1\n\t" \ |
| 138 | "bne 222b\n\t" \ | 138 | "bne 222b\n\t" \ |
| 139 | "333:\n\t" | 139 | "333:\n\t" |
| 140 | |||
| 141 | #elif defined(__mips__) | ||
| 142 | |||
| 143 | #define RSEQ_INJECT_INPUT \ | ||
| 144 | , [loop_cnt_1]"m"(loop_cnt[1]) \ | ||
| 145 | , [loop_cnt_2]"m"(loop_cnt[2]) \ | ||
| 146 | , [loop_cnt_3]"m"(loop_cnt[3]) \ | ||
| 147 | , [loop_cnt_4]"m"(loop_cnt[4]) \ | ||
| 148 | , [loop_cnt_5]"m"(loop_cnt[5]) \ | ||
| 149 | , [loop_cnt_6]"m"(loop_cnt[6]) | ||
| 150 | |||
| 151 | #define INJECT_ASM_REG "$5" | ||
| 152 | |||
| 153 | #define RSEQ_INJECT_CLOBBER \ | ||
| 154 | , INJECT_ASM_REG | ||
| 155 | |||
| 156 | #define RSEQ_INJECT_ASM(n) \ | ||
| 157 | "lw " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \ | ||
| 158 | "beqz " INJECT_ASM_REG ", 333f\n\t" \ | ||
| 159 | "222:\n\t" \ | ||
| 160 | "addiu " INJECT_ASM_REG ", -1\n\t" \ | ||
| 161 | "bnez " INJECT_ASM_REG ", 222b\n\t" \ | ||
| 162 | "333:\n\t" | ||
| 163 | |||
| 140 | #else | 164 | #else |
| 141 | #error unsupported target | 165 | #error unsupported target |
| 142 | #endif | 166 | #endif |
diff --git a/tools/testing/selftests/rseq/rseq-mips.h b/tools/testing/selftests/rseq/rseq-mips.h new file mode 100644 index 000000000000..7f48ecf46994 --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-mips.h | |||
| @@ -0,0 +1,725 @@ | |||
| 1 | /* SPDX-License-Identifier: LGPL-2.1 OR MIT */ | ||
| 2 | /* | ||
| 3 | * Author: Paul Burton <paul.burton@mips.com> | ||
| 4 | * (C) Copyright 2018 MIPS Tech LLC | ||
| 5 | * | ||
| 6 | * Based on rseq-arm.h: | ||
| 7 | * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | ||
| 8 | */ | ||
| 9 | |||
| 10 | #define RSEQ_SIG 0x53053053 | ||
| 11 | |||
| 12 | #define rseq_smp_mb() __asm__ __volatile__ ("sync" ::: "memory") | ||
| 13 | #define rseq_smp_rmb() rseq_smp_mb() | ||
| 14 | #define rseq_smp_wmb() rseq_smp_mb() | ||
| 15 | |||
| 16 | #define rseq_smp_load_acquire(p) \ | ||
| 17 | __extension__ ({ \ | ||
| 18 | __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \ | ||
| 19 | rseq_smp_mb(); \ | ||
| 20 | ____p1; \ | ||
| 21 | }) | ||
| 22 | |||
| 23 | #define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb() | ||
| 24 | |||
| 25 | #define rseq_smp_store_release(p, v) \ | ||
| 26 | do { \ | ||
| 27 | rseq_smp_mb(); \ | ||
| 28 | RSEQ_WRITE_ONCE(*p, v); \ | ||
| 29 | } while (0) | ||
| 30 | |||
| 31 | #ifdef RSEQ_SKIP_FASTPATH | ||
| 32 | #include "rseq-skip.h" | ||
| 33 | #else /* !RSEQ_SKIP_FASTPATH */ | ||
| 34 | |||
| 35 | #if _MIPS_SZLONG == 64 | ||
| 36 | # define LONG ".dword" | ||
| 37 | # define LONG_LA "dla" | ||
| 38 | # define LONG_L "ld" | ||
| 39 | # define LONG_S "sd" | ||
| 40 | # define LONG_ADDI "daddiu" | ||
| 41 | # define U32_U64_PAD(x) x | ||
| 42 | #elif _MIPS_SZLONG == 32 | ||
| 43 | # define LONG ".word" | ||
| 44 | # define LONG_LA "la" | ||
| 45 | # define LONG_L "lw" | ||
| 46 | # define LONG_S "sw" | ||
| 47 | # define LONG_ADDI "addiu" | ||
| 48 | # ifdef __BIG_ENDIAN | ||
| 49 | # define U32_U64_PAD(x) "0x0, " x | ||
| 50 | # else | ||
| 51 | # define U32_U64_PAD(x) x ", 0x0" | ||
| 52 | # endif | ||
| 53 | #else | ||
| 54 | # error unsupported _MIPS_SZLONG | ||
| 55 | #endif | ||
| 56 | |||
| 57 | #define __RSEQ_ASM_DEFINE_TABLE(version, flags, start_ip, \ | ||
| 58 | post_commit_offset, abort_ip) \ | ||
| 59 | ".pushsection __rseq_table, \"aw\"\n\t" \ | ||
| 60 | ".balign 32\n\t" \ | ||
| 61 | ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \ | ||
| 62 | LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \ | ||
| 63 | LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \ | ||
| 64 | LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \ | ||
| 65 | ".popsection\n\t" | ||
| 66 | |||
| 67 | #define RSEQ_ASM_DEFINE_TABLE(start_ip, post_commit_ip, abort_ip) \ | ||
| 68 | __RSEQ_ASM_DEFINE_TABLE(0x0, 0x0, start_ip, \ | ||
| 69 | (post_commit_ip - start_ip), abort_ip) | ||
| 70 | |||
| 71 | #define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \ | ||
| 72 | RSEQ_INJECT_ASM(1) \ | ||
| 73 | LONG_LA " $4, " __rseq_str(cs_label) "\n\t" \ | ||
| 74 | LONG_S " $4, %[" __rseq_str(rseq_cs) "]\n\t" \ | ||
| 75 | __rseq_str(label) ":\n\t" | ||
| 76 | |||
| 77 | #define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \ | ||
| 78 | RSEQ_INJECT_ASM(2) \ | ||
| 79 | "lw $4, %[" __rseq_str(current_cpu_id) "]\n\t" \ | ||
| 80 | "bne $4, %[" __rseq_str(cpu_id) "], " __rseq_str(label) "\n\t" | ||
| 81 | |||
| 82 | #define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \ | ||
| 83 | abort_label, version, flags, \ | ||
| 84 | start_ip, post_commit_offset, abort_ip) \ | ||
| 85 | ".balign 32\n\t" \ | ||
| 86 | __rseq_str(table_label) ":\n\t" \ | ||
| 87 | ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \ | ||
| 88 | LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \ | ||
| 89 | LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \ | ||
| 90 | LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \ | ||
| 91 | ".word " __rseq_str(RSEQ_SIG) "\n\t" \ | ||
| 92 | __rseq_str(label) ":\n\t" \ | ||
| 93 | teardown \ | ||
| 94 | "b %l[" __rseq_str(abort_label) "]\n\t" | ||
| 95 | |||
| 96 | #define RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, abort_label, \ | ||
| 97 | start_ip, post_commit_ip, abort_ip) \ | ||
| 98 | __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \ | ||
| 99 | abort_label, 0x0, 0x0, start_ip, \ | ||
| 100 | (post_commit_ip - start_ip), abort_ip) | ||
| 101 | |||
| 102 | #define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \ | ||
| 103 | __rseq_str(label) ":\n\t" \ | ||
| 104 | teardown \ | ||
| 105 | "b %l[" __rseq_str(cmpfail_label) "]\n\t" | ||
| 106 | |||
| 107 | #define rseq_workaround_gcc_asm_size_guess() __asm__ __volatile__("") | ||
| 108 | |||
| 109 | static inline __attribute__((always_inline)) | ||
| 110 | int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) | ||
| 111 | { | ||
| 112 | RSEQ_INJECT_C(9) | ||
| 113 | |||
| 114 | rseq_workaround_gcc_asm_size_guess(); | ||
| 115 | __asm__ __volatile__ goto ( | ||
| 116 | RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */ | ||
| 117 | /* Start rseq by storing table entry pointer into rseq_cs. */ | ||
| 118 | RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) | ||
| 119 | RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) | ||
| 120 | RSEQ_INJECT_ASM(3) | ||
| 121 | LONG_L " $4, %[v]\n\t" | ||
| 122 | "bne $4, %[expect], %l[cmpfail]\n\t" | ||
| 123 | RSEQ_INJECT_ASM(4) | ||
| 124 | #ifdef RSEQ_COMPARE_TWICE | ||
| 125 | RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) | ||
| 126 | LONG_L " $4, %[v]\n\t" | ||
| 127 | "bne $4, %[expect], %l[error2]\n\t" | ||
| 128 | #endif | ||
| 129 | /* final store */ | ||
| 130 | LONG_S " %[newv], %[v]\n\t" | ||
| 131 | "2:\n\t" | ||
| 132 | RSEQ_INJECT_ASM(5) | ||
| 133 | "b 5f\n\t" | ||
| 134 | RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) | ||
| 135 | "5:\n\t" | ||
| 136 | : /* gcc asm goto does not allow outputs */ | ||
| 137 | : [cpu_id] "r" (cpu), | ||
| 138 | [current_cpu_id] "m" (__rseq_abi.cpu_id), | ||
| 139 | [rseq_cs] "m" (__rseq_abi.rseq_cs), | ||
| 140 | [v] "m" (*v), | ||
| 141 | [expect] "r" (expect), | ||
| 142 | [newv] "r" (newv) | ||
| 143 | RSEQ_INJECT_INPUT | ||
| 144 | : "$4", "memory" | ||
| 145 | RSEQ_INJECT_CLOBBER | ||
| 146 | : abort, cmpfail | ||
| 147 | #ifdef RSEQ_COMPARE_TWICE | ||
| 148 | , error1, error2 | ||
| 149 | #endif | ||
| 150 | ); | ||
| 151 | rseq_workaround_gcc_asm_size_guess(); | ||
| 152 | return 0; | ||
| 153 | abort: | ||
| 154 | rseq_workaround_gcc_asm_size_guess(); | ||
| 155 | RSEQ_INJECT_FAILED | ||
| 156 | return -1; | ||
| 157 | cmpfail: | ||
| 158 | rseq_workaround_gcc_asm_size_guess(); | ||
| 159 | return 1; | ||
| 160 | #ifdef RSEQ_COMPARE_TWICE | ||
| 161 | error1: | ||
| 162 | rseq_bug("cpu_id comparison failed"); | ||
| 163 | error2: | ||
| 164 | rseq_bug("expected value comparison failed"); | ||
| 165 | #endif | ||
| 166 | } | ||
| 167 | |||
| 168 | static inline __attribute__((always_inline)) | ||
| 169 | int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, | ||
| 170 | off_t voffp, intptr_t *load, int cpu) | ||
| 171 | { | ||
| 172 | RSEQ_INJECT_C(9) | ||
| 173 | |||
| 174 | rseq_workaround_gcc_asm_size_guess(); | ||
| 175 | __asm__ __volatile__ goto ( | ||
| 176 | RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */ | ||
| 177 | /* Start rseq by storing table entry pointer into rseq_cs. */ | ||
| 178 | RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) | ||
| 179 | RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) | ||
| 180 | RSEQ_INJECT_ASM(3) | ||
| 181 | LONG_L " $4, %[v]\n\t" | ||
| 182 | "beq $4, %[expectnot], %l[cmpfail]\n\t" | ||
| 183 | RSEQ_INJECT_ASM(4) | ||
| 184 | #ifdef RSEQ_COMPARE_TWICE | ||
| 185 | RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) | ||
| 186 | LONG_L " $4, %[v]\n\t" | ||
| 187 | "beq $4, %[expectnot], %l[error2]\n\t" | ||
| 188 | #endif | ||
| 189 | LONG_S " $4, %[load]\n\t" | ||
| 190 | LONG_ADDI " $4, %[voffp]\n\t" | ||
| 191 | LONG_L " $4, 0($4)\n\t" | ||
| 192 | /* final store */ | ||
| 193 | LONG_S " $4, %[v]\n\t" | ||
| 194 | "2:\n\t" | ||
| 195 | RSEQ_INJECT_ASM(5) | ||
| 196 | "b 5f\n\t" | ||
| 197 | RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) | ||
| 198 | "5:\n\t" | ||
| 199 | : /* gcc asm goto does not allow outputs */ | ||
| 200 | : [cpu_id] "r" (cpu), | ||
| 201 | [current_cpu_id] "m" (__rseq_abi.cpu_id), | ||
| 202 | [rseq_cs] "m" (__rseq_abi.rseq_cs), | ||
| 203 | /* final store input */ | ||
| 204 | [v] "m" (*v), | ||
| 205 | [expectnot] "r" (expectnot), | ||
| 206 | [voffp] "Ir" (voffp), | ||
| 207 | [load] "m" (*load) | ||
| 208 | RSEQ_INJECT_INPUT | ||
| 209 | : "$4", "memory" | ||
| 210 | RSEQ_INJECT_CLOBBER | ||
| 211 | : abort, cmpfail | ||
| 212 | #ifdef RSEQ_COMPARE_TWICE | ||
| 213 | , error1, error2 | ||
| 214 | #endif | ||
| 215 | ); | ||
| 216 | rseq_workaround_gcc_asm_size_guess(); | ||
| 217 | return 0; | ||
| 218 | abort: | ||
| 219 | rseq_workaround_gcc_asm_size_guess(); | ||
| 220 | RSEQ_INJECT_FAILED | ||
| 221 | return -1; | ||
| 222 | cmpfail: | ||
| 223 | rseq_workaround_gcc_asm_size_guess(); | ||
| 224 | return 1; | ||
| 225 | #ifdef RSEQ_COMPARE_TWICE | ||
| 226 | error1: | ||
| 227 | rseq_bug("cpu_id comparison failed"); | ||
| 228 | error2: | ||
| 229 | rseq_bug("expected value comparison failed"); | ||
| 230 | #endif | ||
| 231 | } | ||
| 232 | |||
| 233 | static inline __attribute__((always_inline)) | ||
| 234 | int rseq_addv(intptr_t *v, intptr_t count, int cpu) | ||
| 235 | { | ||
| 236 | RSEQ_INJECT_C(9) | ||
| 237 | |||
| 238 | rseq_workaround_gcc_asm_size_guess(); | ||
| 239 | __asm__ __volatile__ goto ( | ||
| 240 | RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */ | ||
| 241 | /* Start rseq by storing table entry pointer into rseq_cs. */ | ||
| 242 | RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) | ||
| 243 | RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) | ||
| 244 | RSEQ_INJECT_ASM(3) | ||
| 245 | #ifdef RSEQ_COMPARE_TWICE | ||
| 246 | RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) | ||
| 247 | #endif | ||
| 248 | LONG_L " $4, %[v]\n\t" | ||
| 249 | LONG_ADDI " $4, %[count]\n\t" | ||
| 250 | /* final store */ | ||
| 251 | LONG_S " $4, %[v]\n\t" | ||
| 252 | "2:\n\t" | ||
| 253 | RSEQ_INJECT_ASM(4) | ||
| 254 | "b 5f\n\t" | ||
| 255 | RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) | ||
| 256 | "5:\n\t" | ||
| 257 | : /* gcc asm goto does not allow outputs */ | ||
| 258 | : [cpu_id] "r" (cpu), | ||
| 259 | [current_cpu_id] "m" (__rseq_abi.cpu_id), | ||
| 260 | [rseq_cs] "m" (__rseq_abi.rseq_cs), | ||
| 261 | [v] "m" (*v), | ||
| 262 | [count] "Ir" (count) | ||
| 263 | RSEQ_INJECT_INPUT | ||
| 264 | : "$4", "memory" | ||
| 265 | RSEQ_INJECT_CLOBBER | ||
| 266 | : abort | ||
| 267 | #ifdef RSEQ_COMPARE_TWICE | ||
| 268 | , error1 | ||
| 269 | #endif | ||
| 270 | ); | ||
| 271 | rseq_workaround_gcc_asm_size_guess(); | ||
| 272 | return 0; | ||
| 273 | abort: | ||
| 274 | rseq_workaround_gcc_asm_size_guess(); | ||
| 275 | RSEQ_INJECT_FAILED | ||
| 276 | return -1; | ||
| 277 | #ifdef RSEQ_COMPARE_TWICE | ||
| 278 | error1: | ||
| 279 | rseq_bug("cpu_id comparison failed"); | ||
| 280 | #endif | ||
| 281 | } | ||
| 282 | |||
| 283 | static inline __attribute__((always_inline)) | ||
| 284 | int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, | ||
| 285 | intptr_t *v2, intptr_t newv2, | ||
| 286 | intptr_t newv, int cpu) | ||
| 287 | { | ||
| 288 | RSEQ_INJECT_C(9) | ||
| 289 | |||
| 290 | rseq_workaround_gcc_asm_size_guess(); | ||
| 291 | __asm__ __volatile__ goto ( | ||
| 292 | RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */ | ||
| 293 | /* Start rseq by storing table entry pointer into rseq_cs. */ | ||
| 294 | RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) | ||
| 295 | RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) | ||
| 296 | RSEQ_INJECT_ASM(3) | ||
| 297 | LONG_L " $4, %[v]\n\t" | ||
| 298 | "bne $4, %[expect], %l[cmpfail]\n\t" | ||
| 299 | RSEQ_INJECT_ASM(4) | ||
| 300 | #ifdef RSEQ_COMPARE_TWICE | ||
| 301 | RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) | ||
| 302 | LONG_L " $4, %[v]\n\t" | ||
| 303 | "bne $4, %[expect], %l[error2]\n\t" | ||
| 304 | #endif | ||
| 305 | /* try store */ | ||
| 306 | LONG_S " %[newv2], %[v2]\n\t" | ||
| 307 | RSEQ_INJECT_ASM(5) | ||
| 308 | /* final store */ | ||
| 309 | LONG_S " %[newv], %[v]\n\t" | ||
| 310 | "2:\n\t" | ||
| 311 | RSEQ_INJECT_ASM(6) | ||
| 312 | "b 5f\n\t" | ||
| 313 | RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) | ||
| 314 | "5:\n\t" | ||
| 315 | : /* gcc asm goto does not allow outputs */ | ||
| 316 | : [cpu_id] "r" (cpu), | ||
| 317 | [current_cpu_id] "m" (__rseq_abi.cpu_id), | ||
| 318 | [rseq_cs] "m" (__rseq_abi.rseq_cs), | ||
| 319 | /* try store input */ | ||
| 320 | [v2] "m" (*v2), | ||
| 321 | [newv2] "r" (newv2), | ||
| 322 | /* final store input */ | ||
| 323 | [v] "m" (*v), | ||
| 324 | [expect] "r" (expect), | ||
| 325 | [newv] "r" (newv) | ||
| 326 | RSEQ_INJECT_INPUT | ||
| 327 | : "$4", "memory" | ||
| 328 | RSEQ_INJECT_CLOBBER | ||
| 329 | : abort, cmpfail | ||
| 330 | #ifdef RSEQ_COMPARE_TWICE | ||
| 331 | , error1, error2 | ||
| 332 | #endif | ||
| 333 | ); | ||
| 334 | rseq_workaround_gcc_asm_size_guess(); | ||
| 335 | return 0; | ||
| 336 | abort: | ||
| 337 | rseq_workaround_gcc_asm_size_guess(); | ||
| 338 | RSEQ_INJECT_FAILED | ||
| 339 | return -1; | ||
| 340 | cmpfail: | ||
| 341 | rseq_workaround_gcc_asm_size_guess(); | ||
| 342 | return 1; | ||
| 343 | #ifdef RSEQ_COMPARE_TWICE | ||
| 344 | error1: | ||
| 345 | rseq_bug("cpu_id comparison failed"); | ||
| 346 | error2: | ||
| 347 | rseq_bug("expected value comparison failed"); | ||
| 348 | #endif | ||
| 349 | } | ||
| 350 | |||
| 351 | static inline __attribute__((always_inline)) | ||
| 352 | int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, | ||
| 353 | intptr_t *v2, intptr_t newv2, | ||
| 354 | intptr_t newv, int cpu) | ||
| 355 | { | ||
| 356 | RSEQ_INJECT_C(9) | ||
| 357 | |||
| 358 | rseq_workaround_gcc_asm_size_guess(); | ||
| 359 | __asm__ __volatile__ goto ( | ||
| 360 | RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */ | ||
| 361 | /* Start rseq by storing table entry pointer into rseq_cs. */ | ||
| 362 | RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) | ||
| 363 | RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) | ||
| 364 | RSEQ_INJECT_ASM(3) | ||
| 365 | LONG_L " $4, %[v]\n\t" | ||
| 366 | "bne $4, %[expect], %l[cmpfail]\n\t" | ||
| 367 | RSEQ_INJECT_ASM(4) | ||
| 368 | #ifdef RSEQ_COMPARE_TWICE | ||
| 369 | RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) | ||
| 370 | LONG_L " $4, %[v]\n\t" | ||
| 371 | "bne $4, %[expect], %l[error2]\n\t" | ||
| 372 | #endif | ||
| 373 | /* try store */ | ||
| 374 | LONG_S " %[newv2], %[v2]\n\t" | ||
| 375 | RSEQ_INJECT_ASM(5) | ||
| 376 | "sync\n\t" /* full sync provides store-release */ | ||
| 377 | /* final store */ | ||
| 378 | LONG_S " %[newv], %[v]\n\t" | ||
| 379 | "2:\n\t" | ||
| 380 | RSEQ_INJECT_ASM(6) | ||
| 381 | "b 5f\n\t" | ||
| 382 | RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) | ||
| 383 | "5:\n\t" | ||
| 384 | : /* gcc asm goto does not allow outputs */ | ||
| 385 | : [cpu_id] "r" (cpu), | ||
| 386 | [current_cpu_id] "m" (__rseq_abi.cpu_id), | ||
| 387 | [rseq_cs] "m" (__rseq_abi.rseq_cs), | ||
| 388 | /* try store input */ | ||
| 389 | [v2] "m" (*v2), | ||
| 390 | [newv2] "r" (newv2), | ||
| 391 | /* final store input */ | ||
| 392 | [v] "m" (*v), | ||
| 393 | [expect] "r" (expect), | ||
| 394 | [newv] "r" (newv) | ||
| 395 | RSEQ_INJECT_INPUT | ||
| 396 | : "$4", "memory" | ||
| 397 | RSEQ_INJECT_CLOBBER | ||
| 398 | : abort, cmpfail | ||
| 399 | #ifdef RSEQ_COMPARE_TWICE | ||
| 400 | , error1, error2 | ||
| 401 | #endif | ||
| 402 | ); | ||
| 403 | rseq_workaround_gcc_asm_size_guess(); | ||
| 404 | return 0; | ||
| 405 | abort: | ||
| 406 | rseq_workaround_gcc_asm_size_guess(); | ||
| 407 | RSEQ_INJECT_FAILED | ||
| 408 | return -1; | ||
| 409 | cmpfail: | ||
| 410 | rseq_workaround_gcc_asm_size_guess(); | ||
| 411 | return 1; | ||
| 412 | #ifdef RSEQ_COMPARE_TWICE | ||
| 413 | error1: | ||
| 414 | rseq_bug("cpu_id comparison failed"); | ||
| 415 | error2: | ||
| 416 | rseq_bug("expected value comparison failed"); | ||
| 417 | #endif | ||
| 418 | } | ||
| 419 | |||
| 420 | static inline __attribute__((always_inline)) | ||
| 421 | int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, | ||
| 422 | intptr_t *v2, intptr_t expect2, | ||
| 423 | intptr_t newv, int cpu) | ||
| 424 | { | ||
| 425 | RSEQ_INJECT_C(9) | ||
| 426 | |||
| 427 | rseq_workaround_gcc_asm_size_guess(); | ||
| 428 | __asm__ __volatile__ goto ( | ||
| 429 | RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */ | ||
| 430 | /* Start rseq by storing table entry pointer into rseq_cs. */ | ||
| 431 | RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) | ||
| 432 | RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) | ||
| 433 | RSEQ_INJECT_ASM(3) | ||
| 434 | LONG_L " $4, %[v]\n\t" | ||
| 435 | "bne $4, %[expect], %l[cmpfail]\n\t" | ||
| 436 | RSEQ_INJECT_ASM(4) | ||
| 437 | LONG_L " $4, %[v2]\n\t" | ||
| 438 | "bne $4, %[expect2], %l[cmpfail]\n\t" | ||
| 439 | RSEQ_INJECT_ASM(5) | ||
| 440 | #ifdef RSEQ_COMPARE_TWICE | ||
| 441 | RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) | ||
| 442 | LONG_L " $4, %[v]\n\t" | ||
| 443 | "bne $4, %[expect], %l[error2]\n\t" | ||
| 444 | LONG_L " $4, %[v2]\n\t" | ||
| 445 | "bne $4, %[expect2], %l[error3]\n\t" | ||
| 446 | #endif | ||
| 447 | /* final store */ | ||
| 448 | LONG_S " %[newv], %[v]\n\t" | ||
| 449 | "2:\n\t" | ||
| 450 | RSEQ_INJECT_ASM(6) | ||
| 451 | "b 5f\n\t" | ||
| 452 | RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) | ||
| 453 | "5:\n\t" | ||
| 454 | : /* gcc asm goto does not allow outputs */ | ||
| 455 | : [cpu_id] "r" (cpu), | ||
| 456 | [current_cpu_id] "m" (__rseq_abi.cpu_id), | ||
| 457 | [rseq_cs] "m" (__rseq_abi.rseq_cs), | ||
| 458 | /* cmp2 input */ | ||
| 459 | [v2] "m" (*v2), | ||
| 460 | [expect2] "r" (expect2), | ||
| 461 | /* final store input */ | ||
| 462 | [v] "m" (*v), | ||
| 463 | [expect] "r" (expect), | ||
| 464 | [newv] "r" (newv) | ||
| 465 | RSEQ_INJECT_INPUT | ||
| 466 | : "$4", "memory" | ||
| 467 | RSEQ_INJECT_CLOBBER | ||
| 468 | : abort, cmpfail | ||
| 469 | #ifdef RSEQ_COMPARE_TWICE | ||
| 470 | , error1, error2, error3 | ||
| 471 | #endif | ||
| 472 | ); | ||
| 473 | rseq_workaround_gcc_asm_size_guess(); | ||
| 474 | return 0; | ||
| 475 | abort: | ||
| 476 | rseq_workaround_gcc_asm_size_guess(); | ||
| 477 | RSEQ_INJECT_FAILED | ||
| 478 | return -1; | ||
| 479 | cmpfail: | ||
| 480 | rseq_workaround_gcc_asm_size_guess(); | ||
| 481 | return 1; | ||
| 482 | #ifdef RSEQ_COMPARE_TWICE | ||
| 483 | error1: | ||
| 484 | rseq_bug("cpu_id comparison failed"); | ||
| 485 | error2: | ||
| 486 | rseq_bug("1st expected value comparison failed"); | ||
| 487 | error3: | ||
| 488 | rseq_bug("2nd expected value comparison failed"); | ||
| 489 | #endif | ||
| 490 | } | ||
| 491 | |||
| 492 | static inline __attribute__((always_inline)) | ||
| 493 | int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, | ||
| 494 | void *dst, void *src, size_t len, | ||
| 495 | intptr_t newv, int cpu) | ||
| 496 | { | ||
| 497 | uintptr_t rseq_scratch[3]; | ||
| 498 | |||
| 499 | RSEQ_INJECT_C(9) | ||
| 500 | |||
| 501 | rseq_workaround_gcc_asm_size_guess(); | ||
| 502 | __asm__ __volatile__ goto ( | ||
| 503 | RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */ | ||
| 504 | LONG_S " %[src], %[rseq_scratch0]\n\t" | ||
| 505 | LONG_S " %[dst], %[rseq_scratch1]\n\t" | ||
| 506 | LONG_S " %[len], %[rseq_scratch2]\n\t" | ||
| 507 | /* Start rseq by storing table entry pointer into rseq_cs. */ | ||
| 508 | RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) | ||
| 509 | RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) | ||
| 510 | RSEQ_INJECT_ASM(3) | ||
| 511 | LONG_L " $4, %[v]\n\t" | ||
| 512 | "bne $4, %[expect], 5f\n\t" | ||
| 513 | RSEQ_INJECT_ASM(4) | ||
| 514 | #ifdef RSEQ_COMPARE_TWICE | ||
| 515 | RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f) | ||
| 516 | LONG_L " $4, %[v]\n\t" | ||
| 517 | "bne $4, %[expect], 7f\n\t" | ||
| 518 | #endif | ||
| 519 | /* try memcpy */ | ||
| 520 | "beqz %[len], 333f\n\t" \ | ||
| 521 | "222:\n\t" \ | ||
| 522 | "lb $4, 0(%[src])\n\t" \ | ||
| 523 | "sb $4, 0(%[dst])\n\t" \ | ||
| 524 | LONG_ADDI " %[src], 1\n\t" \ | ||
| 525 | LONG_ADDI " %[dst], 1\n\t" \ | ||
| 526 | LONG_ADDI " %[len], -1\n\t" \ | ||
| 527 | "bnez %[len], 222b\n\t" \ | ||
| 528 | "333:\n\t" \ | ||
| 529 | RSEQ_INJECT_ASM(5) | ||
| 530 | /* final store */ | ||
| 531 | LONG_S " %[newv], %[v]\n\t" | ||
| 532 | "2:\n\t" | ||
| 533 | RSEQ_INJECT_ASM(6) | ||
| 534 | /* teardown */ | ||
| 535 | LONG_L " %[len], %[rseq_scratch2]\n\t" | ||
| 536 | LONG_L " %[dst], %[rseq_scratch1]\n\t" | ||
| 537 | LONG_L " %[src], %[rseq_scratch0]\n\t" | ||
| 538 | "b 8f\n\t" | ||
| 539 | RSEQ_ASM_DEFINE_ABORT(3, 4, | ||
| 540 | /* teardown */ | ||
| 541 | LONG_L " %[len], %[rseq_scratch2]\n\t" | ||
| 542 | LONG_L " %[dst], %[rseq_scratch1]\n\t" | ||
| 543 | LONG_L " %[src], %[rseq_scratch0]\n\t", | ||
| 544 | abort, 1b, 2b, 4f) | ||
| 545 | RSEQ_ASM_DEFINE_CMPFAIL(5, | ||
| 546 | /* teardown */ | ||
| 547 | LONG_L " %[len], %[rseq_scratch2]\n\t" | ||
| 548 | LONG_L " %[dst], %[rseq_scratch1]\n\t" | ||
| 549 | LONG_L " %[src], %[rseq_scratch0]\n\t", | ||
| 550 | cmpfail) | ||
| 551 | #ifdef RSEQ_COMPARE_TWICE | ||
| 552 | RSEQ_ASM_DEFINE_CMPFAIL(6, | ||
| 553 | /* teardown */ | ||
| 554 | LONG_L " %[len], %[rseq_scratch2]\n\t" | ||
| 555 | LONG_L " %[dst], %[rseq_scratch1]\n\t" | ||
| 556 | LONG_L " %[src], %[rseq_scratch0]\n\t", | ||
| 557 | error1) | ||
| 558 | RSEQ_ASM_DEFINE_CMPFAIL(7, | ||
| 559 | /* teardown */ | ||
| 560 | LONG_L " %[len], %[rseq_scratch2]\n\t" | ||
| 561 | LONG_L " %[dst], %[rseq_scratch1]\n\t" | ||
| 562 | LONG_L " %[src], %[rseq_scratch0]\n\t", | ||
| 563 | error2) | ||
| 564 | #endif | ||
| 565 | "8:\n\t" | ||
| 566 | : /* gcc asm goto does not allow outputs */ | ||
| 567 | : [cpu_id] "r" (cpu), | ||
| 568 | [current_cpu_id] "m" (__rseq_abi.cpu_id), | ||
| 569 | [rseq_cs] "m" (__rseq_abi.rseq_cs), | ||
| 570 | /* final store input */ | ||
| 571 | [v] "m" (*v), | ||
| 572 | [expect] "r" (expect), | ||
| 573 | [newv] "r" (newv), | ||
| 574 | /* try memcpy input */ | ||
| 575 | [dst] "r" (dst), | ||
| 576 | [src] "r" (src), | ||
| 577 | [len] "r" (len), | ||
| 578 | [rseq_scratch0] "m" (rseq_scratch[0]), | ||
| 579 | [rseq_scratch1] "m" (rseq_scratch[1]), | ||
| 580 | [rseq_scratch2] "m" (rseq_scratch[2]) | ||
| 581 | RSEQ_INJECT_INPUT | ||
| 582 | : "$4", "memory" | ||
| 583 | RSEQ_INJECT_CLOBBER | ||
| 584 | : abort, cmpfail | ||
| 585 | #ifdef RSEQ_COMPARE_TWICE | ||
| 586 | , error1, error2 | ||
| 587 | #endif | ||
| 588 | ); | ||
| 589 | rseq_workaround_gcc_asm_size_guess(); | ||
| 590 | return 0; | ||
| 591 | abort: | ||
| 592 | rseq_workaround_gcc_asm_size_guess(); | ||
| 593 | RSEQ_INJECT_FAILED | ||
| 594 | return -1; | ||
| 595 | cmpfail: | ||
| 596 | rseq_workaround_gcc_asm_size_guess(); | ||
| 597 | return 1; | ||
| 598 | #ifdef RSEQ_COMPARE_TWICE | ||
| 599 | error1: | ||
| 600 | rseq_workaround_gcc_asm_size_guess(); | ||
| 601 | rseq_bug("cpu_id comparison failed"); | ||
| 602 | error2: | ||
| 603 | rseq_workaround_gcc_asm_size_guess(); | ||
| 604 | rseq_bug("expected value comparison failed"); | ||
| 605 | #endif | ||
| 606 | } | ||
| 607 | |||
| 608 | static inline __attribute__((always_inline)) | ||
| 609 | int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, | ||
| 610 | void *dst, void *src, size_t len, | ||
| 611 | intptr_t newv, int cpu) | ||
| 612 | { | ||
| 613 | uintptr_t rseq_scratch[3]; | ||
| 614 | |||
| 615 | RSEQ_INJECT_C(9) | ||
| 616 | |||
| 617 | rseq_workaround_gcc_asm_size_guess(); | ||
| 618 | __asm__ __volatile__ goto ( | ||
| 619 | RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */ | ||
| 620 | LONG_S " %[src], %[rseq_scratch0]\n\t" | ||
| 621 | LONG_S " %[dst], %[rseq_scratch1]\n\t" | ||
| 622 | LONG_S " %[len], %[rseq_scratch2]\n\t" | ||
| 623 | /* Start rseq by storing table entry pointer into rseq_cs. */ | ||
| 624 | RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) | ||
| 625 | RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) | ||
| 626 | RSEQ_INJECT_ASM(3) | ||
| 627 | LONG_L " $4, %[v]\n\t" | ||
| 628 | "bne $4, %[expect], 5f\n\t" | ||
| 629 | RSEQ_INJECT_ASM(4) | ||
| 630 | #ifdef RSEQ_COMPARE_TWICE | ||
| 631 | RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f) | ||
| 632 | LONG_L " $4, %[v]\n\t" | ||
| 633 | "bne $4, %[expect], 7f\n\t" | ||
| 634 | #endif | ||
| 635 | /* try memcpy */ | ||
| 636 | "beqz %[len], 333f\n\t" \ | ||
| 637 | "222:\n\t" \ | ||
| 638 | "lb $4, 0(%[src])\n\t" \ | ||
| 639 | "sb $4, 0(%[dst])\n\t" \ | ||
| 640 | LONG_ADDI " %[src], 1\n\t" \ | ||
| 641 | LONG_ADDI " %[dst], 1\n\t" \ | ||
| 642 | LONG_ADDI " %[len], -1\n\t" \ | ||
| 643 | "bnez %[len], 222b\n\t" \ | ||
| 644 | "333:\n\t" \ | ||
| 645 | RSEQ_INJECT_ASM(5) | ||
| 646 | "sync\n\t" /* full sync provides store-release */ | ||
| 647 | /* final store */ | ||
| 648 | LONG_S " %[newv], %[v]\n\t" | ||
| 649 | "2:\n\t" | ||
| 650 | RSEQ_INJECT_ASM(6) | ||
| 651 | /* teardown */ | ||
| 652 | LONG_L " %[len], %[rseq_scratch2]\n\t" | ||
| 653 | LONG_L " %[dst], %[rseq_scratch1]\n\t" | ||
| 654 | LONG_L " %[src], %[rseq_scratch0]\n\t" | ||
| 655 | "b 8f\n\t" | ||
| 656 | RSEQ_ASM_DEFINE_ABORT(3, 4, | ||
| 657 | /* teardown */ | ||
| 658 | LONG_L " %[len], %[rseq_scratch2]\n\t" | ||
| 659 | LONG_L " %[dst], %[rseq_scratch1]\n\t" | ||
| 660 | LONG_L " %[src], %[rseq_scratch0]\n\t", | ||
| 661 | abort, 1b, 2b, 4f) | ||
| 662 | RSEQ_ASM_DEFINE_CMPFAIL(5, | ||
| 663 | /* teardown */ | ||
| 664 | LONG_L " %[len], %[rseq_scratch2]\n\t" | ||
| 665 | LONG_L " %[dst], %[rseq_scratch1]\n\t" | ||
| 666 | LONG_L " %[src], %[rseq_scratch0]\n\t", | ||
| 667 | cmpfail) | ||
| 668 | #ifdef RSEQ_COMPARE_TWICE | ||
| 669 | RSEQ_ASM_DEFINE_CMPFAIL(6, | ||
| 670 | /* teardown */ | ||
| 671 | LONG_L " %[len], %[rseq_scratch2]\n\t" | ||
| 672 | LONG_L " %[dst], %[rseq_scratch1]\n\t" | ||
| 673 | LONG_L " %[src], %[rseq_scratch0]\n\t", | ||
| 674 | error1) | ||
| 675 | RSEQ_ASM_DEFINE_CMPFAIL(7, | ||
| 676 | /* teardown */ | ||
| 677 | LONG_L " %[len], %[rseq_scratch2]\n\t" | ||
| 678 | LONG_L " %[dst], %[rseq_scratch1]\n\t" | ||
| 679 | LONG_L " %[src], %[rseq_scratch0]\n\t", | ||
| 680 | error2) | ||
| 681 | #endif | ||
| 682 | "8:\n\t" | ||
| 683 | : /* gcc asm goto does not allow outputs */ | ||
| 684 | : [cpu_id] "r" (cpu), | ||
| 685 | [current_cpu_id] "m" (__rseq_abi.cpu_id), | ||
| 686 | [rseq_cs] "m" (__rseq_abi.rseq_cs), | ||
| 687 | /* final store input */ | ||
| 688 | [v] "m" (*v), | ||
| 689 | [expect] "r" (expect), | ||
| 690 | [newv] "r" (newv), | ||
| 691 | /* try memcpy input */ | ||
| 692 | [dst] "r" (dst), | ||
| 693 | [src] "r" (src), | ||
| 694 | [len] "r" (len), | ||
| 695 | [rseq_scratch0] "m" (rseq_scratch[0]), | ||
| 696 | [rseq_scratch1] "m" (rseq_scratch[1]), | ||
| 697 | [rseq_scratch2] "m" (rseq_scratch[2]) | ||
| 698 | RSEQ_INJECT_INPUT | ||
| 699 | : "$4", "memory" | ||
| 700 | RSEQ_INJECT_CLOBBER | ||
| 701 | : abort, cmpfail | ||
| 702 | #ifdef RSEQ_COMPARE_TWICE | ||
| 703 | , error1, error2 | ||
| 704 | #endif | ||
| 705 | ); | ||
| 706 | rseq_workaround_gcc_asm_size_guess(); | ||
| 707 | return 0; | ||
| 708 | abort: | ||
| 709 | rseq_workaround_gcc_asm_size_guess(); | ||
| 710 | RSEQ_INJECT_FAILED | ||
| 711 | return -1; | ||
| 712 | cmpfail: | ||
| 713 | rseq_workaround_gcc_asm_size_guess(); | ||
| 714 | return 1; | ||
| 715 | #ifdef RSEQ_COMPARE_TWICE | ||
| 716 | error1: | ||
| 717 | rseq_workaround_gcc_asm_size_guess(); | ||
| 718 | rseq_bug("cpu_id comparison failed"); | ||
| 719 | error2: | ||
| 720 | rseq_workaround_gcc_asm_size_guess(); | ||
| 721 | rseq_bug("expected value comparison failed"); | ||
| 722 | #endif | ||
| 723 | } | ||
| 724 | |||
| 725 | #endif /* !RSEQ_SKIP_FASTPATH */ | ||
diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h index 0a808575cbc4..a4684112676c 100644 --- a/tools/testing/selftests/rseq/rseq.h +++ b/tools/testing/selftests/rseq/rseq.h | |||
| @@ -73,6 +73,8 @@ extern __thread volatile struct rseq __rseq_abi; | |||
| 73 | #include <rseq-arm.h> | 73 | #include <rseq-arm.h> |
| 74 | #elif defined(__PPC__) | 74 | #elif defined(__PPC__) |
| 75 | #include <rseq-ppc.h> | 75 | #include <rseq-ppc.h> |
| 76 | #elif defined(__mips__) | ||
| 77 | #include <rseq-mips.h> | ||
| 76 | #else | 78 | #else |
| 77 | #error unsupported target | 79 | #error unsupported target |
| 78 | #endif | 80 | #endif |
diff --git a/tools/testing/selftests/sparc64/Makefile b/tools/testing/selftests/sparc64/Makefile index 2082eeffd779..a19531dba4dc 100644 --- a/tools/testing/selftests/sparc64/Makefile +++ b/tools/testing/selftests/sparc64/Makefile | |||
| @@ -1,7 +1,18 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | ||
| 2 | uname_M := $(shell uname -m 2>/dev/null || echo not) | ||
| 3 | ARCH ?= $(shell echo $(uname_M) | sed -e s/x86_64/x86/) | ||
| 4 | |||
| 5 | ifneq ($(ARCH),sparc64) | ||
| 6 | nothing: | ||
| 7 | .PHONY: all clean run_tests install | ||
| 8 | .SILENT: | ||
| 9 | else | ||
| 10 | |||
| 1 | SUBDIRS := drivers | 11 | SUBDIRS := drivers |
| 2 | 12 | ||
| 3 | TEST_PROGS := run.sh | 13 | TEST_PROGS := run.sh |
| 4 | 14 | ||
| 15 | |||
| 5 | .PHONY: all clean | 16 | .PHONY: all clean |
| 6 | 17 | ||
| 7 | include ../lib.mk | 18 | include ../lib.mk |
| @@ -18,10 +29,6 @@ all: | |||
| 18 | fi \ | 29 | fi \ |
| 19 | done | 30 | done |
| 20 | 31 | ||
| 21 | override define RUN_TESTS | ||
| 22 | @cd $(OUTPUT); ./run.sh | ||
| 23 | endef | ||
| 24 | |||
| 25 | override define INSTALL_RULE | 32 | override define INSTALL_RULE |
| 26 | mkdir -p $(INSTALL_PATH) | 33 | mkdir -p $(INSTALL_PATH) |
| 27 | install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) | 34 | install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) |
| @@ -33,10 +40,6 @@ override define INSTALL_RULE | |||
| 33 | done; | 40 | done; |
| 34 | endef | 41 | endef |
| 35 | 42 | ||
| 36 | override define EMIT_TESTS | ||
| 37 | echo "./run.sh" | ||
| 38 | endef | ||
| 39 | |||
| 40 | override define CLEAN | 43 | override define CLEAN |
| 41 | @for DIR in $(SUBDIRS); do \ | 44 | @for DIR in $(SUBDIRS); do \ |
| 42 | BUILD_TARGET=$(OUTPUT)/$$DIR; \ | 45 | BUILD_TARGET=$(OUTPUT)/$$DIR; \ |
| @@ -44,3 +47,4 @@ override define CLEAN | |||
| 44 | make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ | 47 | make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ |
| 45 | done | 48 | done |
| 46 | endef | 49 | endef |
| 50 | endif | ||
diff --git a/tools/testing/selftests/sparc64/drivers/Makefile b/tools/testing/selftests/sparc64/drivers/Makefile index 6264f40bbdbc..deb0df415565 100644 --- a/tools/testing/selftests/sparc64/drivers/Makefile +++ b/tools/testing/selftests/sparc64/drivers/Makefile | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | 1 | # SPDX-License-Identifier: GPL-2.0 | |
| 2 | INCLUDEDIR := -I. | 2 | INCLUDEDIR := -I. |
| 3 | CFLAGS := $(CFLAGS) $(INCLUDEDIR) -Wall -O2 -g | 3 | CFLAGS := $(CFLAGS) $(INCLUDEDIR) -Wall -O2 -g |
| 4 | 4 | ||
diff --git a/tools/testing/selftests/static_keys/test_static_keys.sh b/tools/testing/selftests/static_keys/test_static_keys.sh index 24cff498b31a..fc9f8cde7d42 100755 --- a/tools/testing/selftests/static_keys/test_static_keys.sh +++ b/tools/testing/selftests/static_keys/test_static_keys.sh | |||
| @@ -2,6 +2,19 @@ | |||
| 2 | # SPDX-License-Identifier: GPL-2.0 | 2 | # SPDX-License-Identifier: GPL-2.0 |
| 3 | # Runs static keys kernel module tests | 3 | # Runs static keys kernel module tests |
| 4 | 4 | ||
| 5 | # Kselftest framework requirement - SKIP code is 4. | ||
| 6 | ksft_skip=4 | ||
| 7 | |||
| 8 | if ! /sbin/modprobe -q -n test_static_key_base; then | ||
| 9 | echo "static_key: module test_static_key_base is not found [SKIP]" | ||
| 10 | exit $ksft_skip | ||
| 11 | fi | ||
| 12 | |||
| 13 | if ! /sbin/modprobe -q -n test_static_keys; then | ||
| 14 | echo "static_key: module test_static_keys is not found [SKIP]" | ||
| 15 | exit $ksft_skip | ||
| 16 | fi | ||
| 17 | |||
| 5 | if /sbin/modprobe -q test_static_key_base; then | 18 | if /sbin/modprobe -q test_static_key_base; then |
| 6 | if /sbin/modprobe -q test_static_keys; then | 19 | if /sbin/modprobe -q test_static_keys; then |
| 7 | echo "static_key: ok" | 20 | echo "static_key: ok" |
diff --git a/tools/testing/selftests/sync/config b/tools/testing/selftests/sync/config new file mode 100644 index 000000000000..1ab7e8130db2 --- /dev/null +++ b/tools/testing/selftests/sync/config | |||
| @@ -0,0 +1,4 @@ | |||
| 1 | CONFIG_STAGING=y | ||
| 2 | CONFIG_ANDROID=y | ||
| 3 | CONFIG_SYNC=y | ||
| 4 | CONFIG_SW_SYNC=y | ||
diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh index ec232c3cfcaa..584eb8ea780a 100755 --- a/tools/testing/selftests/sysctl/sysctl.sh +++ b/tools/testing/selftests/sysctl/sysctl.sh | |||
| @@ -14,6 +14,9 @@ | |||
| 14 | 14 | ||
| 15 | # This performs a series tests against the proc sysctl interface. | 15 | # This performs a series tests against the proc sysctl interface. |
| 16 | 16 | ||
| 17 | # Kselftest framework requirement - SKIP code is 4. | ||
| 18 | ksft_skip=4 | ||
| 19 | |||
| 17 | TEST_NAME="sysctl" | 20 | TEST_NAME="sysctl" |
| 18 | TEST_DRIVER="test_${TEST_NAME}" | 21 | TEST_DRIVER="test_${TEST_NAME}" |
| 19 | TEST_DIR=$(dirname $0) | 22 | TEST_DIR=$(dirname $0) |
| @@ -41,7 +44,7 @@ test_modprobe() | |||
| 41 | echo "$0: $DIR not present" >&2 | 44 | echo "$0: $DIR not present" >&2 |
| 42 | echo "You must have the following enabled in your kernel:" >&2 | 45 | echo "You must have the following enabled in your kernel:" >&2 |
| 43 | cat $TEST_DIR/config >&2 | 46 | cat $TEST_DIR/config >&2 |
| 44 | exit 1 | 47 | exit $ksft_skip |
| 45 | fi | 48 | fi |
| 46 | } | 49 | } |
| 47 | 50 | ||
| @@ -98,28 +101,30 @@ test_reqs() | |||
| 98 | uid=$(id -u) | 101 | uid=$(id -u) |
| 99 | if [ $uid -ne 0 ]; then | 102 | if [ $uid -ne 0 ]; then |
| 100 | echo $msg must be run as root >&2 | 103 | echo $msg must be run as root >&2 |
| 101 | exit 0 | 104 | exit $ksft_skip |
| 102 | fi | 105 | fi |
| 103 | 106 | ||
| 104 | if ! which perl 2> /dev/null > /dev/null; then | 107 | if ! which perl 2> /dev/null > /dev/null; then |
| 105 | echo "$0: You need perl installed" | 108 | echo "$0: You need perl installed" |
| 106 | exit 1 | 109 | exit $ksft_skip |
| 107 | fi | 110 | fi |
| 108 | if ! which getconf 2> /dev/null > /dev/null; then | 111 | if ! which getconf 2> /dev/null > /dev/null; then |
| 109 | echo "$0: You need getconf installed" | 112 | echo "$0: You need getconf installed" |
| 110 | exit 1 | 113 | exit $ksft_skip |
| 111 | fi | 114 | fi |
| 112 | if ! which diff 2> /dev/null > /dev/null; then | 115 | if ! which diff 2> /dev/null > /dev/null; then |
| 113 | echo "$0: You need diff installed" | 116 | echo "$0: You need diff installed" |
| 114 | exit 1 | 117 | exit $ksft_skip |
| 115 | fi | 118 | fi |
| 116 | } | 119 | } |
| 117 | 120 | ||
| 118 | function load_req_mod() | 121 | function load_req_mod() |
| 119 | { | 122 | { |
| 120 | trap "test_modprobe" EXIT | ||
| 121 | |||
| 122 | if [ ! -d $DIR ]; then | 123 | if [ ! -d $DIR ]; then |
| 124 | if ! modprobe -q -n $TEST_DRIVER; then | ||
| 125 | echo "$0: module $TEST_DRIVER not found [SKIP]" | ||
| 126 | exit $ksft_skip | ||
| 127 | fi | ||
| 123 | modprobe $TEST_DRIVER | 128 | modprobe $TEST_DRIVER |
| 124 | if [ $? -ne 0 ]; then | 129 | if [ $? -ne 0 ]; then |
| 125 | exit | 130 | exit |
| @@ -765,6 +770,7 @@ function parse_args() | |||
| 765 | test_reqs | 770 | test_reqs |
| 766 | allow_user_defaults | 771 | allow_user_defaults |
| 767 | check_production_sysctl_writes_strict | 772 | check_production_sysctl_writes_strict |
| 773 | test_modprobe | ||
| 768 | load_req_mod | 774 | load_req_mod |
| 769 | 775 | ||
| 770 | trap "test_finish" EXIT | 776 | trap "test_finish" EXIT |
diff --git a/tools/testing/selftests/user/test_user_copy.sh b/tools/testing/selftests/user/test_user_copy.sh index d60506fc77f8..f9b31a57439b 100755 --- a/tools/testing/selftests/user/test_user_copy.sh +++ b/tools/testing/selftests/user/test_user_copy.sh | |||
| @@ -2,6 +2,13 @@ | |||
| 2 | # SPDX-License-Identifier: GPL-2.0 | 2 | # SPDX-License-Identifier: GPL-2.0 |
| 3 | # Runs copy_to/from_user infrastructure using test_user_copy kernel module | 3 | # Runs copy_to/from_user infrastructure using test_user_copy kernel module |
| 4 | 4 | ||
| 5 | # Kselftest framework requirement - SKIP code is 4. | ||
| 6 | ksft_skip=4 | ||
| 7 | |||
| 8 | if ! /sbin/modprobe -q -n test_user_copy; then | ||
| 9 | echo "user: module test_user_copy is not found [SKIP]" | ||
| 10 | exit $ksft_skip | ||
| 11 | fi | ||
| 5 | if /sbin/modprobe -q test_user_copy; then | 12 | if /sbin/modprobe -q test_user_copy; then |
| 6 | /sbin/modprobe -q -r test_user_copy | 13 | /sbin/modprobe -q -r test_user_copy |
| 7 | echo "user_copy: ok" | 14 | echo "user_copy: ok" |
diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c index 1097f04e4d80..bcec71250873 100644 --- a/tools/testing/selftests/vm/compaction_test.c +++ b/tools/testing/selftests/vm/compaction_test.c | |||
| @@ -16,6 +16,8 @@ | |||
| 16 | #include <unistd.h> | 16 | #include <unistd.h> |
| 17 | #include <string.h> | 17 | #include <string.h> |
| 18 | 18 | ||
| 19 | #include "../kselftest.h" | ||
| 20 | |||
| 19 | #define MAP_SIZE 1048576 | 21 | #define MAP_SIZE 1048576 |
| 20 | 22 | ||
| 21 | struct map_list { | 23 | struct map_list { |
| @@ -169,7 +171,7 @@ int main(int argc, char **argv) | |||
| 169 | printf("Either the sysctl compact_unevictable_allowed is not\n" | 171 | printf("Either the sysctl compact_unevictable_allowed is not\n" |
| 170 | "set to 1 or couldn't read the proc file.\n" | 172 | "set to 1 or couldn't read the proc file.\n" |
| 171 | "Skipping the test\n"); | 173 | "Skipping the test\n"); |
| 172 | return 0; | 174 | return KSFT_SKIP; |
| 173 | } | 175 | } |
| 174 | 176 | ||
| 175 | lim.rlim_cur = RLIM_INFINITY; | 177 | lim.rlim_cur = RLIM_INFINITY; |
diff --git a/tools/testing/selftests/vm/mlock2-tests.c b/tools/testing/selftests/vm/mlock2-tests.c index 4997b9222cfa..637b6d0ac0d0 100644 --- a/tools/testing/selftests/vm/mlock2-tests.c +++ b/tools/testing/selftests/vm/mlock2-tests.c | |||
| @@ -9,6 +9,8 @@ | |||
| 9 | #include <stdbool.h> | 9 | #include <stdbool.h> |
| 10 | #include "mlock2.h" | 10 | #include "mlock2.h" |
| 11 | 11 | ||
| 12 | #include "../kselftest.h" | ||
| 13 | |||
| 12 | struct vm_boundaries { | 14 | struct vm_boundaries { |
| 13 | unsigned long start; | 15 | unsigned long start; |
| 14 | unsigned long end; | 16 | unsigned long end; |
| @@ -303,7 +305,7 @@ static int test_mlock_lock() | |||
| 303 | if (mlock2_(map, 2 * page_size, 0)) { | 305 | if (mlock2_(map, 2 * page_size, 0)) { |
| 304 | if (errno == ENOSYS) { | 306 | if (errno == ENOSYS) { |
| 305 | printf("Cannot call new mlock family, skipping test\n"); | 307 | printf("Cannot call new mlock family, skipping test\n"); |
| 306 | _exit(0); | 308 | _exit(KSFT_SKIP); |
| 307 | } | 309 | } |
| 308 | perror("mlock2(0)"); | 310 | perror("mlock2(0)"); |
| 309 | goto unmap; | 311 | goto unmap; |
| @@ -412,7 +414,7 @@ static int test_mlock_onfault() | |||
| 412 | if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) { | 414 | if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) { |
| 413 | if (errno == ENOSYS) { | 415 | if (errno == ENOSYS) { |
| 414 | printf("Cannot call new mlock family, skipping test\n"); | 416 | printf("Cannot call new mlock family, skipping test\n"); |
| 415 | _exit(0); | 417 | _exit(KSFT_SKIP); |
| 416 | } | 418 | } |
| 417 | perror("mlock2(MLOCK_ONFAULT)"); | 419 | perror("mlock2(MLOCK_ONFAULT)"); |
| 418 | goto unmap; | 420 | goto unmap; |
| @@ -425,7 +427,7 @@ static int test_mlock_onfault() | |||
| 425 | if (munlock(map, 2 * page_size)) { | 427 | if (munlock(map, 2 * page_size)) { |
| 426 | if (errno == ENOSYS) { | 428 | if (errno == ENOSYS) { |
| 427 | printf("Cannot call new mlock family, skipping test\n"); | 429 | printf("Cannot call new mlock family, skipping test\n"); |
| 428 | _exit(0); | 430 | _exit(KSFT_SKIP); |
| 429 | } | 431 | } |
| 430 | perror("munlock()"); | 432 | perror("munlock()"); |
| 431 | goto unmap; | 433 | goto unmap; |
| @@ -457,7 +459,7 @@ static int test_lock_onfault_of_present() | |||
| 457 | if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) { | 459 | if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) { |
| 458 | if (errno == ENOSYS) { | 460 | if (errno == ENOSYS) { |
| 459 | printf("Cannot call new mlock family, skipping test\n"); | 461 | printf("Cannot call new mlock family, skipping test\n"); |
| 460 | _exit(0); | 462 | _exit(KSFT_SKIP); |
| 461 | } | 463 | } |
| 462 | perror("mlock2(MLOCK_ONFAULT)"); | 464 | perror("mlock2(MLOCK_ONFAULT)"); |
| 463 | goto unmap; | 465 | goto unmap; |
| @@ -583,7 +585,7 @@ static int test_vma_management(bool call_mlock) | |||
| 583 | if (call_mlock && mlock2_(map, 3 * page_size, MLOCK_ONFAULT)) { | 585 | if (call_mlock && mlock2_(map, 3 * page_size, MLOCK_ONFAULT)) { |
| 584 | if (errno == ENOSYS) { | 586 | if (errno == ENOSYS) { |
| 585 | printf("Cannot call new mlock family, skipping test\n"); | 587 | printf("Cannot call new mlock family, skipping test\n"); |
| 586 | _exit(0); | 588 | _exit(KSFT_SKIP); |
| 587 | } | 589 | } |
| 588 | perror("mlock(ONFAULT)\n"); | 590 | perror("mlock(ONFAULT)\n"); |
| 589 | goto out; | 591 | goto out; |
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests index 22d564673830..88cbe5575f0c 100755 --- a/tools/testing/selftests/vm/run_vmtests +++ b/tools/testing/selftests/vm/run_vmtests | |||
| @@ -2,6 +2,9 @@ | |||
| 2 | # SPDX-License-Identifier: GPL-2.0 | 2 | # SPDX-License-Identifier: GPL-2.0 |
| 3 | #please run as root | 3 | #please run as root |
| 4 | 4 | ||
| 5 | # Kselftest framework requirement - SKIP code is 4. | ||
| 6 | ksft_skip=4 | ||
| 7 | |||
| 5 | mnt=./huge | 8 | mnt=./huge |
| 6 | exitcode=0 | 9 | exitcode=0 |
| 7 | 10 | ||
| @@ -36,7 +39,7 @@ if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then | |||
| 36 | echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages | 39 | echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages |
| 37 | if [ $? -ne 0 ]; then | 40 | if [ $? -ne 0 ]; then |
| 38 | echo "Please run this test as root" | 41 | echo "Please run this test as root" |
| 39 | exit 1 | 42 | exit $ksft_skip |
| 40 | fi | 43 | fi |
| 41 | while read name size unit; do | 44 | while read name size unit; do |
| 42 | if [ "$name" = "HugePages_Free:" ]; then | 45 | if [ "$name" = "HugePages_Free:" ]; then |
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index de2f9ec8a87f..7b8171e3128a 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c | |||
| @@ -69,6 +69,8 @@ | |||
| 69 | #include <setjmp.h> | 69 | #include <setjmp.h> |
| 70 | #include <stdbool.h> | 70 | #include <stdbool.h> |
| 71 | 71 | ||
| 72 | #include "../kselftest.h" | ||
| 73 | |||
| 72 | #ifdef __NR_userfaultfd | 74 | #ifdef __NR_userfaultfd |
| 73 | 75 | ||
| 74 | static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; | 76 | static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; |
| @@ -1322,7 +1324,7 @@ int main(int argc, char **argv) | |||
| 1322 | int main(void) | 1324 | int main(void) |
| 1323 | { | 1325 | { |
| 1324 | printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n"); | 1326 | printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n"); |
| 1325 | return 0; | 1327 | return KSFT_SKIP; |
| 1326 | } | 1328 | } |
| 1327 | 1329 | ||
| 1328 | #endif /* __NR_userfaultfd */ | 1330 | #endif /* __NR_userfaultfd */ |
diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh index 754de7da426a..232e958ec454 100755 --- a/tools/testing/selftests/zram/zram.sh +++ b/tools/testing/selftests/zram/zram.sh | |||
| @@ -2,6 +2,9 @@ | |||
| 2 | # SPDX-License-Identifier: GPL-2.0 | 2 | # SPDX-License-Identifier: GPL-2.0 |
| 3 | TCID="zram.sh" | 3 | TCID="zram.sh" |
| 4 | 4 | ||
| 5 | # Kselftest framework requirement - SKIP code is 4. | ||
| 6 | ksft_skip=4 | ||
| 7 | |||
| 5 | . ./zram_lib.sh | 8 | . ./zram_lib.sh |
| 6 | 9 | ||
| 7 | run_zram () { | 10 | run_zram () { |
| @@ -24,5 +27,5 @@ elif [ -b /dev/zram0 ]; then | |||
| 24 | else | 27 | else |
| 25 | echo "$TCID : No zram.ko module or /dev/zram0 device file not found" | 28 | echo "$TCID : No zram.ko module or /dev/zram0 device file not found" |
| 26 | echo "$TCID : CONFIG_ZRAM is not set" | 29 | echo "$TCID : CONFIG_ZRAM is not set" |
| 27 | exit 1 | 30 | exit $ksft_skip |
| 28 | fi | 31 | fi |
diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh index f6a9c73e7a44..9e73a4fb9b0a 100755 --- a/tools/testing/selftests/zram/zram_lib.sh +++ b/tools/testing/selftests/zram/zram_lib.sh | |||
| @@ -18,6 +18,9 @@ MODULE=0 | |||
| 18 | dev_makeswap=-1 | 18 | dev_makeswap=-1 |
| 19 | dev_mounted=-1 | 19 | dev_mounted=-1 |
| 20 | 20 | ||
| 21 | # Kselftest framework requirement - SKIP code is 4. | ||
| 22 | ksft_skip=4 | ||
| 23 | |||
| 21 | trap INT | 24 | trap INT |
| 22 | 25 | ||
| 23 | check_prereqs() | 26 | check_prereqs() |
| @@ -27,7 +30,7 @@ check_prereqs() | |||
| 27 | 30 | ||
| 28 | if [ $uid -ne 0 ]; then | 31 | if [ $uid -ne 0 ]; then |
| 29 | echo $msg must be run as root >&2 | 32 | echo $msg must be run as root >&2 |
| 30 | exit 0 | 33 | exit $ksft_skip |
| 31 | fi | 34 | fi |
| 32 | } | 35 | } |
| 33 | 36 | ||
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index 72143cfaf6ec..ea434ddc8499 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig | |||
| @@ -47,7 +47,7 @@ config KVM_GENERIC_DIRTYLOG_READ_PROTECT | |||
| 47 | 47 | ||
| 48 | config KVM_COMPAT | 48 | config KVM_COMPAT |
| 49 | def_bool y | 49 | def_bool y |
| 50 | depends on KVM && COMPAT && !S390 | 50 | depends on KVM && COMPAT && !(S390 || ARM64) |
| 51 | 51 | ||
| 52 | config HAVE_KVM_IRQ_BYPASS | 52 | config HAVE_KVM_IRQ_BYPASS |
| 53 | bool | 53 | bool |
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 8d90de213ce9..1d90d79706bd 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c | |||
| @@ -297,6 +297,8 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) | |||
| 297 | phys_addr_t next; | 297 | phys_addr_t next; |
| 298 | 298 | ||
| 299 | assert_spin_locked(&kvm->mmu_lock); | 299 | assert_spin_locked(&kvm->mmu_lock); |
| 300 | WARN_ON(size & ~PAGE_MASK); | ||
| 301 | |||
| 300 | pgd = kvm->arch.pgd + stage2_pgd_index(addr); | 302 | pgd = kvm->arch.pgd + stage2_pgd_index(addr); |
| 301 | do { | 303 | do { |
| 302 | /* | 304 | /* |
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index ff7dc890941a..cdce653e3c47 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
| @@ -617,11 +617,6 @@ int vgic_v3_probe(const struct gic_kvm_info *info) | |||
| 617 | pr_warn("GICV physical address 0x%llx not page aligned\n", | 617 | pr_warn("GICV physical address 0x%llx not page aligned\n", |
| 618 | (unsigned long long)info->vcpu.start); | 618 | (unsigned long long)info->vcpu.start); |
| 619 | kvm_vgic_global_state.vcpu_base = 0; | 619 | kvm_vgic_global_state.vcpu_base = 0; |
| 620 | } else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) { | ||
| 621 | pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n", | ||
| 622 | (unsigned long long)resource_size(&info->vcpu), | ||
| 623 | PAGE_SIZE); | ||
| 624 | kvm_vgic_global_state.vcpu_base = 0; | ||
| 625 | } else { | 620 | } else { |
| 626 | kvm_vgic_global_state.vcpu_base = info->vcpu.start; | 621 | kvm_vgic_global_state.vcpu_base = info->vcpu.start; |
| 627 | kvm_vgic_global_state.can_emulate_gicv2 = true; | 622 | kvm_vgic_global_state.can_emulate_gicv2 = true; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ada21f47f22b..8b47507faab5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -116,6 +116,11 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, | |||
| 116 | #ifdef CONFIG_KVM_COMPAT | 116 | #ifdef CONFIG_KVM_COMPAT |
| 117 | static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, | 117 | static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, |
| 118 | unsigned long arg); | 118 | unsigned long arg); |
| 119 | #define KVM_COMPAT(c) .compat_ioctl = (c) | ||
| 120 | #else | ||
| 121 | static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, | ||
| 122 | unsigned long arg) { return -EINVAL; } | ||
| 123 | #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl | ||
| 119 | #endif | 124 | #endif |
| 120 | static int hardware_enable_all(void); | 125 | static int hardware_enable_all(void); |
| 121 | static void hardware_disable_all(void); | 126 | static void hardware_disable_all(void); |
| @@ -2396,11 +2401,9 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp) | |||
| 2396 | static struct file_operations kvm_vcpu_fops = { | 2401 | static struct file_operations kvm_vcpu_fops = { |
| 2397 | .release = kvm_vcpu_release, | 2402 | .release = kvm_vcpu_release, |
| 2398 | .unlocked_ioctl = kvm_vcpu_ioctl, | 2403 | .unlocked_ioctl = kvm_vcpu_ioctl, |
| 2399 | #ifdef CONFIG_KVM_COMPAT | ||
| 2400 | .compat_ioctl = kvm_vcpu_compat_ioctl, | ||
| 2401 | #endif | ||
| 2402 | .mmap = kvm_vcpu_mmap, | 2404 | .mmap = kvm_vcpu_mmap, |
| 2403 | .llseek = noop_llseek, | 2405 | .llseek = noop_llseek, |
| 2406 | KVM_COMPAT(kvm_vcpu_compat_ioctl), | ||
| 2404 | }; | 2407 | }; |
| 2405 | 2408 | ||
| 2406 | /* | 2409 | /* |
| @@ -2824,10 +2827,8 @@ static int kvm_device_release(struct inode *inode, struct file *filp) | |||
| 2824 | 2827 | ||
| 2825 | static const struct file_operations kvm_device_fops = { | 2828 | static const struct file_operations kvm_device_fops = { |
| 2826 | .unlocked_ioctl = kvm_device_ioctl, | 2829 | .unlocked_ioctl = kvm_device_ioctl, |
| 2827 | #ifdef CONFIG_KVM_COMPAT | ||
| 2828 | .compat_ioctl = kvm_device_ioctl, | ||
| 2829 | #endif | ||
| 2830 | .release = kvm_device_release, | 2830 | .release = kvm_device_release, |
| 2831 | KVM_COMPAT(kvm_device_ioctl), | ||
| 2831 | }; | 2832 | }; |
| 2832 | 2833 | ||
| 2833 | struct kvm_device *kvm_device_from_filp(struct file *filp) | 2834 | struct kvm_device *kvm_device_from_filp(struct file *filp) |
| @@ -3165,10 +3166,8 @@ static long kvm_vm_compat_ioctl(struct file *filp, | |||
| 3165 | static struct file_operations kvm_vm_fops = { | 3166 | static struct file_operations kvm_vm_fops = { |
| 3166 | .release = kvm_vm_release, | 3167 | .release = kvm_vm_release, |
| 3167 | .unlocked_ioctl = kvm_vm_ioctl, | 3168 | .unlocked_ioctl = kvm_vm_ioctl, |
| 3168 | #ifdef CONFIG_KVM_COMPAT | ||
| 3169 | .compat_ioctl = kvm_vm_compat_ioctl, | ||
| 3170 | #endif | ||
| 3171 | .llseek = noop_llseek, | 3169 | .llseek = noop_llseek, |
| 3170 | KVM_COMPAT(kvm_vm_compat_ioctl), | ||
| 3172 | }; | 3171 | }; |
| 3173 | 3172 | ||
| 3174 | static int kvm_dev_ioctl_create_vm(unsigned long type) | 3173 | static int kvm_dev_ioctl_create_vm(unsigned long type) |
| @@ -3259,8 +3258,8 @@ out: | |||
| 3259 | 3258 | ||
| 3260 | static struct file_operations kvm_chardev_ops = { | 3259 | static struct file_operations kvm_chardev_ops = { |
| 3261 | .unlocked_ioctl = kvm_dev_ioctl, | 3260 | .unlocked_ioctl = kvm_dev_ioctl, |
| 3262 | .compat_ioctl = kvm_dev_ioctl, | ||
| 3263 | .llseek = noop_llseek, | 3261 | .llseek = noop_llseek, |
| 3262 | KVM_COMPAT(kvm_dev_ioctl), | ||
| 3264 | }; | 3263 | }; |
| 3265 | 3264 | ||
| 3266 | static struct miscdevice kvm_dev = { | 3265 | static struct miscdevice kvm_dev = { |
