diff options
365 files changed, 3989 insertions, 1688 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 986e44387dad..2ba45caabada 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt | |||
| @@ -653,6 +653,9 @@ | |||
| 653 | cpuidle.off=1 [CPU_IDLE] | 653 | cpuidle.off=1 [CPU_IDLE] |
| 654 | disable the cpuidle sub-system | 654 | disable the cpuidle sub-system |
| 655 | 655 | ||
| 656 | cpufreq.off=1 [CPU_FREQ] | ||
| 657 | disable the cpufreq sub-system | ||
| 658 | |||
| 656 | cpu_init_udelay=N | 659 | cpu_init_udelay=N |
| 657 | [X86] Delay for N microsec between assert and de-assert | 660 | [X86] Delay for N microsec between assert and de-assert |
| 658 | of APIC INIT to start processors. This delay occurs | 661 | of APIC INIT to start processors. This delay occurs |
| @@ -1183,6 +1186,12 @@ | |||
| 1183 | functions that can be changed at run time by the | 1186 | functions that can be changed at run time by the |
| 1184 | set_graph_notrace file in the debugfs tracing directory. | 1187 | set_graph_notrace file in the debugfs tracing directory. |
| 1185 | 1188 | ||
| 1189 | ftrace_graph_max_depth=<uint> | ||
| 1190 | [FTRACE] Used with the function graph tracer. This is | ||
| 1191 | the max depth it will trace into a function. This value | ||
| 1192 | can be changed at run time by the max_graph_depth file | ||
| 1193 | in the tracefs tracing directory. default: 0 (no limit) | ||
| 1194 | |||
| 1186 | gamecon.map[2|3]= | 1195 | gamecon.map[2|3]= |
| 1187 | [HW,JOY] Multisystem joystick and NES/SNES/PSX pad | 1196 | [HW,JOY] Multisystem joystick and NES/SNES/PSX pad |
| 1188 | support via parallel port (up to 5 devices per port) | 1197 | support via parallel port (up to 5 devices per port) |
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index a71b8095dbd8..2f66683500b8 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt | |||
| @@ -68,3 +68,4 @@ stable kernels. | |||
| 68 | | | | | | | 68 | | | | | | |
| 69 | | Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | | 69 | | Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | |
| 70 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | | 70 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | |
| 71 | | Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | | ||
diff --git a/Documentation/dev-tools/kcov.rst b/Documentation/dev-tools/kcov.rst index 2c41b713841f..44886c91e112 100644 --- a/Documentation/dev-tools/kcov.rst +++ b/Documentation/dev-tools/kcov.rst | |||
| @@ -10,7 +10,7 @@ Note that kcov does not aim to collect as much coverage as possible. It aims | |||
| 10 | to collect more or less stable coverage that is function of syscall inputs. | 10 | to collect more or less stable coverage that is function of syscall inputs. |
| 11 | To achieve this goal it does not collect coverage in soft/hard interrupts | 11 | To achieve this goal it does not collect coverage in soft/hard interrupts |
| 12 | and instrumentation of some inherently non-deterministic parts of kernel is | 12 | and instrumentation of some inherently non-deterministic parts of kernel is |
| 13 | disbled (e.g. scheduler, locking). | 13 | disabled (e.g. scheduler, locking). |
| 14 | 14 | ||
| 15 | Usage | 15 | Usage |
| 16 | ----- | 16 | ----- |
diff --git a/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt b/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt index c3f6546ebac7..6a23ad9ac53a 100644 --- a/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt | |||
| @@ -45,7 +45,7 @@ Required Properties: | |||
| 45 | Optional Properties: | 45 | Optional Properties: |
| 46 | - reg-names: In addition to the required properties, the following are optional | 46 | - reg-names: In addition to the required properties, the following are optional |
| 47 | - "efuse-address" - Contains efuse base address used to pick up ABB info. | 47 | - "efuse-address" - Contains efuse base address used to pick up ABB info. |
| 48 | - "ldo-address" - Contains address of ABB LDO overide register address. | 48 | - "ldo-address" - Contains address of ABB LDO override register. |
| 49 | "efuse-address" is required for this. | 49 | "efuse-address" is required for this. |
| 50 | - ti,ldovbb-vset-mask - Required if ldo-address is set, mask for LDO override | 50 | - ti,ldovbb-vset-mask - Required if ldo-address is set, mask for LDO override |
| 51 | register to provide override vset value. | 51 | register to provide override vset value. |
diff --git a/Documentation/devicetree/bindings/usb/usb251xb.txt b/Documentation/devicetree/bindings/usb/usb251xb.txt index 0c065f77658f..3957d4edaa74 100644 --- a/Documentation/devicetree/bindings/usb/usb251xb.txt +++ b/Documentation/devicetree/bindings/usb/usb251xb.txt | |||
| @@ -7,18 +7,18 @@ Required properties : | |||
| 7 | - compatible : Should be "microchip,usb251xb" or one of the specific types: | 7 | - compatible : Should be "microchip,usb251xb" or one of the specific types: |
| 8 | "microchip,usb2512b", "microchip,usb2512bi", "microchip,usb2513b", | 8 | "microchip,usb2512b", "microchip,usb2512bi", "microchip,usb2513b", |
| 9 | "microchip,usb2513bi", "microchip,usb2514b", "microchip,usb2514bi" | 9 | "microchip,usb2513bi", "microchip,usb2514b", "microchip,usb2514bi" |
| 10 | - hub-reset-gpios : Should specify the gpio for hub reset | 10 | - reset-gpios : Should specify the gpio for hub reset |
| 11 | - reg : I2C address on the selected bus (default is <0x2C>) | ||
| 11 | 12 | ||
| 12 | Optional properties : | 13 | Optional properties : |
| 13 | - reg : I2C address on the selected bus (default is <0x2C>) | ||
| 14 | - skip-config : Skip Hub configuration, but only send the USB-Attach command | 14 | - skip-config : Skip Hub configuration, but only send the USB-Attach command |
| 15 | - vendor-id : USB Vendor ID of the hub (16 bit, default is 0x0424) | 15 | - vendor-id : Set USB Vendor ID of the hub (16 bit, default is 0x0424) |
| 16 | - product-id : USB Product ID of the hub (16 bit, default depends on type) | 16 | - product-id : Set USB Product ID of the hub (16 bit, default depends on type) |
| 17 | - device-id : USB Device ID of the hub (16 bit, default is 0x0bb3) | 17 | - device-id : Set USB Device ID of the hub (16 bit, default is 0x0bb3) |
| 18 | - language-id : USB Language ID (16 bit, default is 0x0000) | 18 | - language-id : Set USB Language ID (16 bit, default is 0x0000) |
| 19 | - manufacturer : USB Manufacturer string (max 31 characters long) | 19 | - manufacturer : Set USB Manufacturer string (max 31 characters long) |
| 20 | - product : USB Product string (max 31 characters long) | 20 | - product : Set USB Product string (max 31 characters long) |
| 21 | - serial : USB Serial string (max 31 characters long) | 21 | - serial : Set USB Serial string (max 31 characters long) |
| 22 | - {bus,self}-powered : selects between self- and bus-powered operation (default | 22 | - {bus,self}-powered : selects between self- and bus-powered operation (default |
| 23 | is self-powered) | 23 | is self-powered) |
| 24 | - disable-hi-speed : disable USB Hi-Speed support | 24 | - disable-hi-speed : disable USB Hi-Speed support |
| @@ -31,8 +31,10 @@ Optional properties : | |||
| 31 | (default is individual) | 31 | (default is individual) |
| 32 | - dynamic-power-switching : enable auto-switching from self- to bus-powered | 32 | - dynamic-power-switching : enable auto-switching from self- to bus-powered |
| 33 | operation if the local power source is removed or unavailable | 33 | operation if the local power source is removed or unavailable |
| 34 | - oc-delay-{100us,4ms,8ms,16ms} : set over current timer delay (default is 8ms) | 34 | - oc-delay-us : Delay time (in microseconds) for filtering the over-current |
| 35 | - compound-device : indicated the hub is part of a compound device | 35 | sense inputs. Valid values are 100, 4000, 8000 (default) and 16000. If |
| 36 | an invalid value is given, the default is used instead. | ||
| 37 | - compound-device : indicate the hub is part of a compound device | ||
| 36 | - port-mapping-mode : enable port mapping mode | 38 | - port-mapping-mode : enable port mapping mode |
| 37 | - string-support : enable string descriptor support (required for manufacturer, | 39 | - string-support : enable string descriptor support (required for manufacturer, |
| 38 | product and serial string configuration) | 40 | product and serial string configuration) |
| @@ -40,34 +42,15 @@ Optional properties : | |||
| 40 | device connected. | 42 | device connected. |
| 41 | - sp-disabled-ports : Specifies the ports which will be self-power disabled | 43 | - sp-disabled-ports : Specifies the ports which will be self-power disabled |
| 42 | - bp-disabled-ports : Specifies the ports which will be bus-power disabled | 44 | - bp-disabled-ports : Specifies the ports which will be bus-power disabled |
| 43 | - max-sp-power : Specifies the maximum current the hub consumes from an | 45 | - power-on-time-ms : Specifies the time it takes from the time the host |
| 44 | upstream port when operating as self-powered hub including the power | 46 | initiates the power-on sequence to a port until the port has adequate |
| 45 | consumption of a permanently attached peripheral if the hub is | 47 | power. The value is given in ms in a 0 - 510 range (default is 100ms). |
| 46 | configured as a compound device. The value is given in mA in a 0 - 500 | ||
| 47 | range (default is 2). | ||
| 48 | - max-bp-power : Specifies the maximum current the hub consumes from an | ||
| 49 | upstream port when operating as bus-powered hub including the power | ||
| 50 | consumption of a permanently attached peripheral if the hub is | ||
| 51 | configured as a compound device. The value is given in mA in a 0 - 500 | ||
| 52 | range (default is 100). | ||
| 53 | - max-sp-current : Specifies the maximum current the hub consumes from an | ||
| 54 | upstream port when operating as self-powered hub EXCLUDING the power | ||
| 55 | consumption of a permanently attached peripheral if the hub is | ||
| 56 | configured as a compound device. The value is given in mA in a 0 - 500 | ||
| 57 | range (default is 2). | ||
| 58 | - max-bp-current : Specifies the maximum current the hub consumes from an | ||
| 59 | upstream port when operating as bus-powered hub EXCLUDING the power | ||
| 60 | consumption of a permanently attached peripheral if the hub is | ||
| 61 | configured as a compound device. The value is given in mA in a 0 - 500 | ||
| 62 | range (default is 100). | ||
| 63 | - power-on-time : Specifies the time it takes from the time the host initiates | ||
| 64 | the power-on sequence to a port until the port has adequate power. The | ||
| 65 | value is given in ms in a 0 - 510 range (default is 100ms). | ||
| 66 | 48 | ||
| 67 | Examples: | 49 | Examples: |
| 68 | usb2512b@2c { | 50 | usb2512b@2c { |
| 69 | compatible = "microchip,usb2512b"; | 51 | compatible = "microchip,usb2512b"; |
| 70 | hub-reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; | 52 | reg = <0x2c>; |
| 53 | reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; | ||
| 71 | }; | 54 | }; |
| 72 | 55 | ||
| 73 | usb2514b@2c { | 56 | usb2514b@2c { |
diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index e4991fb1eedc..41ef9d8efe95 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt | |||
| @@ -12,7 +12,7 @@ kprobes can probe (this means, all functions body except for __kprobes | |||
| 12 | functions). Unlike the Tracepoint based event, this can be added and removed | 12 | functions). Unlike the Tracepoint based event, this can be added and removed |
| 13 | dynamically, on the fly. | 13 | dynamically, on the fly. |
| 14 | 14 | ||
| 15 | To enable this feature, build your kernel with CONFIG_KPROBE_EVENT=y. | 15 | To enable this feature, build your kernel with CONFIG_KPROBE_EVENTS=y. |
| 16 | 16 | ||
| 17 | Similar to the events tracer, this doesn't need to be activated via | 17 | Similar to the events tracer, this doesn't need to be activated via |
| 18 | current_tracer. Instead of that, add probe points via | 18 | current_tracer. Instead of that, add probe points via |
diff --git a/Documentation/trace/uprobetracer.txt b/Documentation/trace/uprobetracer.txt index fa7b680ee8a0..bf526a7c5559 100644 --- a/Documentation/trace/uprobetracer.txt +++ b/Documentation/trace/uprobetracer.txt | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | Overview | 7 | Overview |
| 8 | -------- | 8 | -------- |
| 9 | Uprobe based trace events are similar to kprobe based trace events. | 9 | Uprobe based trace events are similar to kprobe based trace events. |
| 10 | To enable this feature, build your kernel with CONFIG_UPROBE_EVENT=y. | 10 | To enable this feature, build your kernel with CONFIG_UPROBE_EVENTS=y. |
| 11 | 11 | ||
| 12 | Similar to the kprobe-event tracer, this doesn't need to be activated via | 12 | Similar to the kprobe-event tracer, this doesn't need to be activated via |
| 13 | current_tracer. Instead of that, add probe points via | 13 | current_tracer. Instead of that, add probe points via |
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 069450938b79..3c248f772ae6 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
| @@ -951,6 +951,10 @@ This ioctl allows the user to create or modify a guest physical memory | |||
| 951 | slot. When changing an existing slot, it may be moved in the guest | 951 | slot. When changing an existing slot, it may be moved in the guest |
| 952 | physical memory space, or its flags may be modified. It may not be | 952 | physical memory space, or its flags may be modified. It may not be |
| 953 | resized. Slots may not overlap in guest physical address space. | 953 | resized. Slots may not overlap in guest physical address space. |
| 954 | Bits 0-15 of "slot" specifies the slot id and this value should be | ||
| 955 | less than the maximum number of user memory slots supported per VM. | ||
| 956 | The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS, | ||
| 957 | if this capability is supported by the architecture. | ||
| 954 | 958 | ||
| 955 | If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot" | 959 | If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot" |
| 956 | specifies the address space which is being modified. They must be | 960 | specifies the address space which is being modified. They must be |
diff --git a/Documentation/vm/userfaultfd.txt b/Documentation/vm/userfaultfd.txt index 0e5543a920e5..bb2f945f87ab 100644 --- a/Documentation/vm/userfaultfd.txt +++ b/Documentation/vm/userfaultfd.txt | |||
| @@ -172,10 +172,6 @@ the same read(2) protocol as for the page fault notifications. The | |||
| 172 | manager has to explicitly enable these events by setting appropriate | 172 | manager has to explicitly enable these events by setting appropriate |
| 173 | bits in uffdio_api.features passed to UFFDIO_API ioctl: | 173 | bits in uffdio_api.features passed to UFFDIO_API ioctl: |
| 174 | 174 | ||
| 175 | UFFD_FEATURE_EVENT_EXIT - enable notification about exit() of the | ||
| 176 | non-cooperative process. When the monitored process exits, the uffd | ||
| 177 | manager will get UFFD_EVENT_EXIT. | ||
| 178 | |||
| 179 | UFFD_FEATURE_EVENT_FORK - enable userfaultfd hooks for fork(). When | 175 | UFFD_FEATURE_EVENT_FORK - enable userfaultfd hooks for fork(). When |
| 180 | this feature is enabled, the userfaultfd context of the parent process | 176 | this feature is enabled, the userfaultfd context of the parent process |
| 181 | is duplicated into the newly created process. The manager receives | 177 | is duplicated into the newly created process. The manager receives |
diff --git a/MAINTAINERS b/MAINTAINERS index c265a5fe4848..c776906f67a9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -8307,7 +8307,6 @@ M: Richard Leitner <richard.leitner@skidata.com> | |||
| 8307 | L: linux-usb@vger.kernel.org | 8307 | L: linux-usb@vger.kernel.org |
| 8308 | S: Maintained | 8308 | S: Maintained |
| 8309 | F: drivers/usb/misc/usb251xb.c | 8309 | F: drivers/usb/misc/usb251xb.c |
| 8310 | F: include/linux/platform_data/usb251xb.h | ||
| 8311 | F: Documentation/devicetree/bindings/usb/usb251xb.txt | 8310 | F: Documentation/devicetree/bindings/usb/usb251xb.txt |
| 8312 | 8311 | ||
| 8313 | MICROSOFT SURFACE PRO 3 BUTTON DRIVER | 8312 | MICROSOFT SURFACE PRO 3 BUTTON DRIVER |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 4 | 1 | VERSION = 4 |
| 2 | PATCHLEVEL = 11 | 2 | PATCHLEVEL = 11 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = -rc2 |
| 5 | NAME = Fearless Coyote | 5 | NAME = Fearless Coyote |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h index 317ff773e1ca..b18fcb606908 100644 --- a/arch/arc/include/asm/hugepage.h +++ b/arch/arc/include/asm/hugepage.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #define _ASM_ARC_HUGEPAGE_H | 11 | #define _ASM_ARC_HUGEPAGE_H |
| 12 | 12 | ||
| 13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
| 14 | #define __ARCH_USE_5LEVEL_HACK | ||
| 14 | #include <asm-generic/pgtable-nopmd.h> | 15 | #include <asm-generic/pgtable-nopmd.h> |
| 15 | 16 | ||
| 16 | static inline pte_t pmd_pte(pmd_t pmd) | 17 | static inline pte_t pmd_pte(pmd_t pmd) |
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index e94ca72b974e..ee22d40afef4 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | 37 | ||
| 38 | #include <asm/page.h> | 38 | #include <asm/page.h> |
| 39 | #include <asm/mmu.h> | 39 | #include <asm/mmu.h> |
| 40 | #define __ARCH_USE_5LEVEL_HACK | ||
| 40 | #include <asm-generic/pgtable-nopmd.h> | 41 | #include <asm-generic/pgtable-nopmd.h> |
| 41 | #include <linux/const.h> | 42 | #include <linux/const.h> |
| 42 | 43 | ||
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index e22089fb44dc..a3f0b3d50089 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h | |||
| @@ -209,6 +209,7 @@ | |||
| 209 | #define HSR_EC_IABT_HYP (0x21) | 209 | #define HSR_EC_IABT_HYP (0x21) |
| 210 | #define HSR_EC_DABT (0x24) | 210 | #define HSR_EC_DABT (0x24) |
| 211 | #define HSR_EC_DABT_HYP (0x25) | 211 | #define HSR_EC_DABT_HYP (0x25) |
| 212 | #define HSR_EC_MAX (0x3f) | ||
| 212 | 213 | ||
| 213 | #define HSR_WFI_IS_WFE (_AC(1, UL) << 0) | 214 | #define HSR_WFI_IS_WFE (_AC(1, UL) << 0) |
| 214 | 215 | ||
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index cc495d799c67..31ee468ce667 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
| @@ -30,7 +30,6 @@ | |||
| 30 | #define __KVM_HAVE_ARCH_INTC_INITIALIZED | 30 | #define __KVM_HAVE_ARCH_INTC_INITIALIZED |
| 31 | 31 | ||
| 32 | #define KVM_USER_MEM_SLOTS 32 | 32 | #define KVM_USER_MEM_SLOTS 32 |
| 33 | #define KVM_PRIVATE_MEM_SLOTS 4 | ||
| 34 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 33 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
| 35 | #define KVM_HAVE_ONE_REG | 34 | #define KVM_HAVE_ONE_REG |
| 36 | #define KVM_HALT_POLL_NS_DEFAULT 500000 | 35 | #define KVM_HALT_POLL_NS_DEFAULT 500000 |
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index a8d656d9aec7..1c462381c225 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | 20 | ||
| 21 | #else | 21 | #else |
| 22 | 22 | ||
| 23 | #define __ARCH_USE_5LEVEL_HACK | ||
| 23 | #include <asm-generic/pgtable-nopud.h> | 24 | #include <asm-generic/pgtable-nopud.h> |
| 24 | #include <asm/memory.h> | 25 | #include <asm/memory.h> |
| 25 | #include <asm/pgtable-hwdef.h> | 26 | #include <asm/pgtable-hwdef.h> |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index c9a2103faeb9..96dba7cd8be7 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
| @@ -221,6 +221,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
| 221 | case KVM_CAP_MAX_VCPUS: | 221 | case KVM_CAP_MAX_VCPUS: |
| 222 | r = KVM_MAX_VCPUS; | 222 | r = KVM_MAX_VCPUS; |
| 223 | break; | 223 | break; |
| 224 | case KVM_CAP_NR_MEMSLOTS: | ||
| 225 | r = KVM_USER_MEM_SLOTS; | ||
| 226 | break; | ||
| 224 | case KVM_CAP_MSI_DEVID: | 227 | case KVM_CAP_MSI_DEVID: |
| 225 | if (!kvm) | 228 | if (!kvm) |
| 226 | r = -EINVAL; | 229 | r = -EINVAL; |
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 4e40d1955e35..96af65a30d78 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c | |||
| @@ -79,7 +79,19 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
| 79 | return 1; | 79 | return 1; |
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
| 83 | { | ||
| 84 | u32 hsr = kvm_vcpu_get_hsr(vcpu); | ||
| 85 | |||
| 86 | kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n", | ||
| 87 | hsr); | ||
| 88 | |||
| 89 | kvm_inject_undefined(vcpu); | ||
| 90 | return 1; | ||
| 91 | } | ||
| 92 | |||
| 82 | static exit_handle_fn arm_exit_handlers[] = { | 93 | static exit_handle_fn arm_exit_handlers[] = { |
| 94 | [0 ... HSR_EC_MAX] = kvm_handle_unknown_ec, | ||
| 83 | [HSR_EC_WFI] = kvm_handle_wfx, | 95 | [HSR_EC_WFI] = kvm_handle_wfx, |
| 84 | [HSR_EC_CP15_32] = kvm_handle_cp15_32, | 96 | [HSR_EC_CP15_32] = kvm_handle_cp15_32, |
| 85 | [HSR_EC_CP15_64] = kvm_handle_cp15_64, | 97 | [HSR_EC_CP15_64] = kvm_handle_cp15_64, |
| @@ -98,13 +110,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) | |||
| 98 | { | 110 | { |
| 99 | u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); | 111 | u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); |
| 100 | 112 | ||
| 101 | if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || | ||
| 102 | !arm_exit_handlers[hsr_ec]) { | ||
| 103 | kvm_err("Unknown exception class: hsr: %#08x\n", | ||
| 104 | (unsigned int)kvm_vcpu_get_hsr(vcpu)); | ||
| 105 | BUG(); | ||
| 106 | } | ||
| 107 | |||
| 108 | return arm_exit_handlers[hsr_ec]; | 113 | return arm_exit_handlers[hsr_ec]; |
| 109 | } | 114 | } |
| 110 | 115 | ||
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index ce18c91b50a1..f0325d96b97a 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c | |||
| @@ -198,6 +198,8 @@ static const struct dma_map_ops xen_swiotlb_dma_ops = { | |||
| 198 | .unmap_page = xen_swiotlb_unmap_page, | 198 | .unmap_page = xen_swiotlb_unmap_page, |
| 199 | .dma_supported = xen_swiotlb_dma_supported, | 199 | .dma_supported = xen_swiotlb_dma_supported, |
| 200 | .set_dma_mask = xen_swiotlb_set_dma_mask, | 200 | .set_dma_mask = xen_swiotlb_set_dma_mask, |
| 201 | .mmap = xen_swiotlb_dma_mmap, | ||
| 202 | .get_sgtable = xen_swiotlb_get_sgtable, | ||
| 201 | }; | 203 | }; |
| 202 | 204 | ||
| 203 | int __init xen_mm_init(void) | 205 | int __init xen_mm_init(void) |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a39029b5414e..8c7c244247b6 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
| @@ -508,6 +508,16 @@ config QCOM_FALKOR_ERRATUM_1009 | |||
| 508 | 508 | ||
| 509 | If unsure, say Y. | 509 | If unsure, say Y. |
| 510 | 510 | ||
| 511 | config QCOM_QDF2400_ERRATUM_0065 | ||
| 512 | bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size" | ||
| 513 | default y | ||
| 514 | help | ||
| 515 | On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports | ||
| 516 | ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have | ||
| 517 | been indicated as 16Bytes (0xf), not 8Bytes (0x7). | ||
| 518 | |||
| 519 | If unsure, say Y. | ||
| 520 | |||
| 511 | endmenu | 521 | endmenu |
| 512 | 522 | ||
| 513 | 523 | ||
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f21fd3894370..e7705e7bb07b 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
| @@ -30,8 +30,7 @@ | |||
| 30 | 30 | ||
| 31 | #define __KVM_HAVE_ARCH_INTC_INITIALIZED | 31 | #define __KVM_HAVE_ARCH_INTC_INITIALIZED |
| 32 | 32 | ||
| 33 | #define KVM_USER_MEM_SLOTS 32 | 33 | #define KVM_USER_MEM_SLOTS 512 |
| 34 | #define KVM_PRIVATE_MEM_SLOTS 4 | ||
| 35 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 34 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
| 36 | #define KVM_HALT_POLL_NS_DEFAULT 500000 | 35 | #define KVM_HALT_POLL_NS_DEFAULT 500000 |
| 37 | 36 | ||
diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h index 69b2fd41503c..345a072b5856 100644 --- a/arch/arm64/include/asm/pgtable-types.h +++ b/arch/arm64/include/asm/pgtable-types.h | |||
| @@ -55,9 +55,13 @@ typedef struct { pteval_t pgprot; } pgprot_t; | |||
| 55 | #define __pgprot(x) ((pgprot_t) { (x) } ) | 55 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
| 56 | 56 | ||
| 57 | #if CONFIG_PGTABLE_LEVELS == 2 | 57 | #if CONFIG_PGTABLE_LEVELS == 2 |
| 58 | #define __ARCH_USE_5LEVEL_HACK | ||
| 58 | #include <asm-generic/pgtable-nopmd.h> | 59 | #include <asm-generic/pgtable-nopmd.h> |
| 59 | #elif CONFIG_PGTABLE_LEVELS == 3 | 60 | #elif CONFIG_PGTABLE_LEVELS == 3 |
| 61 | #define __ARCH_USE_5LEVEL_HACK | ||
| 60 | #include <asm-generic/pgtable-nopud.h> | 62 | #include <asm-generic/pgtable-nopud.h> |
| 63 | #elif CONFIG_PGTABLE_LEVELS == 4 | ||
| 64 | #include <asm-generic/5level-fixup.h> | ||
| 61 | #endif | 65 | #endif |
| 62 | 66 | ||
| 63 | #endif /* __ASM_PGTABLE_TYPES_H */ | 67 | #endif /* __ASM_PGTABLE_TYPES_H */ |
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 1bfe30dfbfe7..fa1b18e364fc 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c | |||
| @@ -135,7 +135,19 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
| 135 | return ret; | 135 | return ret; |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
| 139 | { | ||
| 140 | u32 hsr = kvm_vcpu_get_hsr(vcpu); | ||
| 141 | |||
| 142 | kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n", | ||
| 143 | hsr, esr_get_class_string(hsr)); | ||
| 144 | |||
| 145 | kvm_inject_undefined(vcpu); | ||
| 146 | return 1; | ||
| 147 | } | ||
| 148 | |||
| 138 | static exit_handle_fn arm_exit_handlers[] = { | 149 | static exit_handle_fn arm_exit_handlers[] = { |
| 150 | [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec, | ||
| 139 | [ESR_ELx_EC_WFx] = kvm_handle_wfx, | 151 | [ESR_ELx_EC_WFx] = kvm_handle_wfx, |
| 140 | [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32, | 152 | [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32, |
| 141 | [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64, | 153 | [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64, |
| @@ -162,13 +174,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) | |||
| 162 | u32 hsr = kvm_vcpu_get_hsr(vcpu); | 174 | u32 hsr = kvm_vcpu_get_hsr(vcpu); |
| 163 | u8 hsr_ec = ESR_ELx_EC(hsr); | 175 | u8 hsr_ec = ESR_ELx_EC(hsr); |
| 164 | 176 | ||
| 165 | if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || | ||
| 166 | !arm_exit_handlers[hsr_ec]) { | ||
| 167 | kvm_err("Unknown exception class: hsr: %#08x -- %s\n", | ||
| 168 | hsr, esr_get_class_string(hsr)); | ||
| 169 | BUG(); | ||
| 170 | } | ||
| 171 | |||
| 172 | return arm_exit_handlers[hsr_ec]; | 177 | return arm_exit_handlers[hsr_ec]; |
| 173 | } | 178 | } |
| 174 | 179 | ||
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index e8e7ba2bc11f..9e1d2b75eecd 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c | |||
| @@ -18,14 +18,62 @@ | |||
| 18 | #include <asm/kvm_hyp.h> | 18 | #include <asm/kvm_hyp.h> |
| 19 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
| 20 | 20 | ||
| 21 | static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) | ||
| 22 | { | ||
| 23 | u64 val; | ||
| 24 | |||
| 25 | /* | ||
| 26 | * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and | ||
| 27 | * most TLB operations target EL2/EL0. In order to affect the | ||
| 28 | * guest TLBs (EL1/EL0), we need to change one of these two | ||
| 29 | * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so | ||
| 30 | * let's flip TGE before executing the TLB operation. | ||
| 31 | */ | ||
| 32 | write_sysreg(kvm->arch.vttbr, vttbr_el2); | ||
| 33 | val = read_sysreg(hcr_el2); | ||
| 34 | val &= ~HCR_TGE; | ||
| 35 | write_sysreg(val, hcr_el2); | ||
| 36 | isb(); | ||
| 37 | } | ||
| 38 | |||
| 39 | static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm) | ||
| 40 | { | ||
| 41 | write_sysreg(kvm->arch.vttbr, vttbr_el2); | ||
| 42 | isb(); | ||
| 43 | } | ||
| 44 | |||
| 45 | static hyp_alternate_select(__tlb_switch_to_guest, | ||
| 46 | __tlb_switch_to_guest_nvhe, | ||
| 47 | __tlb_switch_to_guest_vhe, | ||
| 48 | ARM64_HAS_VIRT_HOST_EXTN); | ||
| 49 | |||
| 50 | static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm) | ||
| 51 | { | ||
| 52 | /* | ||
| 53 | * We're done with the TLB operation, let's restore the host's | ||
| 54 | * view of HCR_EL2. | ||
| 55 | */ | ||
| 56 | write_sysreg(0, vttbr_el2); | ||
| 57 | write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); | ||
| 58 | } | ||
| 59 | |||
| 60 | static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm) | ||
| 61 | { | ||
| 62 | write_sysreg(0, vttbr_el2); | ||
| 63 | } | ||
| 64 | |||
| 65 | static hyp_alternate_select(__tlb_switch_to_host, | ||
| 66 | __tlb_switch_to_host_nvhe, | ||
| 67 | __tlb_switch_to_host_vhe, | ||
| 68 | ARM64_HAS_VIRT_HOST_EXTN); | ||
| 69 | |||
| 21 | void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) | 70 | void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
| 22 | { | 71 | { |
| 23 | dsb(ishst); | 72 | dsb(ishst); |
| 24 | 73 | ||
| 25 | /* Switch to requested VMID */ | 74 | /* Switch to requested VMID */ |
| 26 | kvm = kern_hyp_va(kvm); | 75 | kvm = kern_hyp_va(kvm); |
| 27 | write_sysreg(kvm->arch.vttbr, vttbr_el2); | 76 | __tlb_switch_to_guest()(kvm); |
| 28 | isb(); | ||
| 29 | 77 | ||
| 30 | /* | 78 | /* |
| 31 | * We could do so much better if we had the VA as well. | 79 | * We could do so much better if we had the VA as well. |
| @@ -46,7 +94,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) | |||
| 46 | dsb(ish); | 94 | dsb(ish); |
| 47 | isb(); | 95 | isb(); |
| 48 | 96 | ||
| 49 | write_sysreg(0, vttbr_el2); | 97 | __tlb_switch_to_host()(kvm); |
| 50 | } | 98 | } |
| 51 | 99 | ||
| 52 | void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) | 100 | void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) |
| @@ -55,14 +103,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) | |||
| 55 | 103 | ||
| 56 | /* Switch to requested VMID */ | 104 | /* Switch to requested VMID */ |
| 57 | kvm = kern_hyp_va(kvm); | 105 | kvm = kern_hyp_va(kvm); |
| 58 | write_sysreg(kvm->arch.vttbr, vttbr_el2); | 106 | __tlb_switch_to_guest()(kvm); |
| 59 | isb(); | ||
| 60 | 107 | ||
| 61 | __tlbi(vmalls12e1is); | 108 | __tlbi(vmalls12e1is); |
| 62 | dsb(ish); | 109 | dsb(ish); |
| 63 | isb(); | 110 | isb(); |
| 64 | 111 | ||
| 65 | write_sysreg(0, vttbr_el2); | 112 | __tlb_switch_to_host()(kvm); |
| 66 | } | 113 | } |
| 67 | 114 | ||
| 68 | void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) | 115 | void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) |
| @@ -70,14 +117,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) | |||
| 70 | struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); | 117 | struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); |
| 71 | 118 | ||
| 72 | /* Switch to requested VMID */ | 119 | /* Switch to requested VMID */ |
| 73 | write_sysreg(kvm->arch.vttbr, vttbr_el2); | 120 | __tlb_switch_to_guest()(kvm); |
| 74 | isb(); | ||
| 75 | 121 | ||
| 76 | __tlbi(vmalle1); | 122 | __tlbi(vmalle1); |
| 77 | dsb(nsh); | 123 | dsb(nsh); |
| 78 | isb(); | 124 | isb(); |
| 79 | 125 | ||
| 80 | write_sysreg(0, vttbr_el2); | 126 | __tlb_switch_to_host()(kvm); |
| 81 | } | 127 | } |
| 82 | 128 | ||
| 83 | void __hyp_text __kvm_flush_vm_context(void) | 129 | void __hyp_text __kvm_flush_vm_context(void) |
diff --git a/arch/avr32/include/asm/pgtable-2level.h b/arch/avr32/include/asm/pgtable-2level.h index 425dd567b5b9..d5b1c63993ec 100644 --- a/arch/avr32/include/asm/pgtable-2level.h +++ b/arch/avr32/include/asm/pgtable-2level.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #ifndef __ASM_AVR32_PGTABLE_2LEVEL_H | 8 | #ifndef __ASM_AVR32_PGTABLE_2LEVEL_H |
| 9 | #define __ASM_AVR32_PGTABLE_2LEVEL_H | 9 | #define __ASM_AVR32_PGTABLE_2LEVEL_H |
| 10 | 10 | ||
| 11 | #define __ARCH_USE_5LEVEL_HACK | ||
| 11 | #include <asm-generic/pgtable-nopmd.h> | 12 | #include <asm-generic/pgtable-nopmd.h> |
| 12 | 13 | ||
| 13 | /* | 14 | /* |
diff --git a/arch/avr32/oprofile/backtrace.c b/arch/avr32/oprofile/backtrace.c index 75d9ad6f99cf..29cf2f191bfd 100644 --- a/arch/avr32/oprofile/backtrace.c +++ b/arch/avr32/oprofile/backtrace.c | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | */ | 14 | */ |
| 15 | 15 | ||
| 16 | #include <linux/oprofile.h> | 16 | #include <linux/oprofile.h> |
| 17 | #include <linux/sched.h> | 17 | #include <linux/ptrace.h> |
| 18 | #include <linux/uaccess.h> | 18 | #include <linux/uaccess.h> |
| 19 | 19 | ||
| 20 | /* The first two words of each frame on the stack look like this if we have | 20 | /* The first two words of each frame on the stack look like this if we have |
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c index ae6903d7fdbe..14970f11bbf2 100644 --- a/arch/cris/arch-v32/drivers/cryptocop.c +++ b/arch/cris/arch-v32/drivers/cryptocop.c | |||
| @@ -2086,7 +2086,7 @@ static void cryptocop_job_queue_close(void) | |||
| 2086 | dma_in_cfg.en = regk_dma_no; | 2086 | dma_in_cfg.en = regk_dma_no; |
| 2087 | REG_WR(dma, IN_DMA_INST, rw_cfg, dma_in_cfg); | 2087 | REG_WR(dma, IN_DMA_INST, rw_cfg, dma_in_cfg); |
| 2088 | 2088 | ||
| 2089 | /* Disble the cryptocop. */ | 2089 | /* Disable the cryptocop. */ |
| 2090 | rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg); | 2090 | rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg); |
| 2091 | rw_cfg.en = 0; | 2091 | rw_cfg.en = 0; |
| 2092 | REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg); | 2092 | REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg); |
diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h index 2a3210ba4c72..fa3a73004cc5 100644 --- a/arch/cris/include/asm/pgtable.h +++ b/arch/cris/include/asm/pgtable.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #define _CRIS_PGTABLE_H | 6 | #define _CRIS_PGTABLE_H |
| 7 | 7 | ||
| 8 | #include <asm/page.h> | 8 | #include <asm/page.h> |
| 9 | #define __ARCH_USE_5LEVEL_HACK | ||
| 9 | #include <asm-generic/pgtable-nopmd.h> | 10 | #include <asm-generic/pgtable-nopmd.h> |
| 10 | 11 | ||
| 11 | #ifndef __ASSEMBLY__ | 12 | #ifndef __ASSEMBLY__ |
diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h index a0513d463a1f..ab6e7e961b54 100644 --- a/arch/frv/include/asm/pgtable.h +++ b/arch/frv/include/asm/pgtable.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #ifndef _ASM_PGTABLE_H | 16 | #ifndef _ASM_PGTABLE_H |
| 17 | #define _ASM_PGTABLE_H | 17 | #define _ASM_PGTABLE_H |
| 18 | 18 | ||
| 19 | #include <asm-generic/5level-fixup.h> | ||
| 19 | #include <asm/mem-layout.h> | 20 | #include <asm/mem-layout.h> |
| 20 | #include <asm/setup.h> | 21 | #include <asm/setup.h> |
| 21 | #include <asm/processor.h> | 22 | #include <asm/processor.h> |
diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h index 8341db67821d..7d265d28ba5e 100644 --- a/arch/h8300/include/asm/pgtable.h +++ b/arch/h8300/include/asm/pgtable.h | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | #ifndef _H8300_PGTABLE_H | 1 | #ifndef _H8300_PGTABLE_H |
| 2 | #define _H8300_PGTABLE_H | 2 | #define _H8300_PGTABLE_H |
| 3 | #define __ARCH_USE_5LEVEL_HACK | ||
| 3 | #include <asm-generic/pgtable-nopud.h> | 4 | #include <asm-generic/pgtable-nopud.h> |
| 4 | #include <asm-generic/pgtable.h> | 5 | #include <asm-generic/pgtable.h> |
| 5 | #define pgtable_cache_init() do { } while (0) | 6 | #define pgtable_cache_init() do { } while (0) |
diff --git a/arch/h8300/kernel/ptrace_h.c b/arch/h8300/kernel/ptrace_h.c index fe3b5673baba..f5ff3b794c85 100644 --- a/arch/h8300/kernel/ptrace_h.c +++ b/arch/h8300/kernel/ptrace_h.c | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/linkage.h> | 11 | #include <linux/linkage.h> |
| 12 | #include <linux/sched.h> | 12 | #include <linux/sched/signal.h> |
| 13 | #include <asm/ptrace.h> | 13 | #include <asm/ptrace.h> |
| 14 | 14 | ||
| 15 | #define BREAKINST 0x5730 /* trapa #3 */ | 15 | #define BREAKINST 0x5730 /* trapa #3 */ |
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h index 49eab8136ec3..24a9177fb897 100644 --- a/arch/hexagon/include/asm/pgtable.h +++ b/arch/hexagon/include/asm/pgtable.h | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | */ | 26 | */ |
| 27 | #include <linux/swap.h> | 27 | #include <linux/swap.h> |
| 28 | #include <asm/page.h> | 28 | #include <asm/page.h> |
| 29 | #define __ARCH_USE_5LEVEL_HACK | ||
| 29 | #include <asm-generic/pgtable-nopmd.h> | 30 | #include <asm-generic/pgtable-nopmd.h> |
| 30 | 31 | ||
| 31 | /* A handy thing to have if one has the RAM. Declared in head.S */ | 32 | /* A handy thing to have if one has the RAM. Declared in head.S */ |
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 384794e665fc..6cc22c8d8923 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h | |||
| @@ -587,8 +587,10 @@ extern struct page *zero_page_memmap_ptr; | |||
| 587 | 587 | ||
| 588 | 588 | ||
| 589 | #if CONFIG_PGTABLE_LEVELS == 3 | 589 | #if CONFIG_PGTABLE_LEVELS == 3 |
| 590 | #define __ARCH_USE_5LEVEL_HACK | ||
| 590 | #include <asm-generic/pgtable-nopud.h> | 591 | #include <asm-generic/pgtable-nopud.h> |
| 591 | #endif | 592 | #endif |
| 593 | #include <asm-generic/5level-fixup.h> | ||
| 592 | #include <asm-generic/pgtable.h> | 594 | #include <asm-generic/pgtable.h> |
| 593 | 595 | ||
| 594 | #endif /* _ASM_IA64_PGTABLE_H */ | 596 | #endif /* _ASM_IA64_PGTABLE_H */ |
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h index ffa3a3a2ecad..0c151e5af079 100644 --- a/arch/metag/include/asm/pgtable.h +++ b/arch/metag/include/asm/pgtable.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #define _METAG_PGTABLE_H | 6 | #define _METAG_PGTABLE_H |
| 7 | 7 | ||
| 8 | #include <asm/pgtable-bits.h> | 8 | #include <asm/pgtable-bits.h> |
| 9 | #define __ARCH_USE_5LEVEL_HACK | ||
| 9 | #include <asm-generic/pgtable-nopmd.h> | 10 | #include <asm-generic/pgtable-nopmd.h> |
| 10 | 11 | ||
| 11 | /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ | 12 | /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ |
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index fd850879854d..d506bb0893f9 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h | |||
| @@ -95,7 +95,8 @@ typedef struct { unsigned long pgd; } pgd_t; | |||
| 95 | # else /* CONFIG_MMU */ | 95 | # else /* CONFIG_MMU */ |
| 96 | typedef struct { unsigned long ste[64]; } pmd_t; | 96 | typedef struct { unsigned long ste[64]; } pmd_t; |
| 97 | typedef struct { pmd_t pue[1]; } pud_t; | 97 | typedef struct { pmd_t pue[1]; } pud_t; |
| 98 | typedef struct { pud_t pge[1]; } pgd_t; | 98 | typedef struct { pud_t p4e[1]; } p4d_t; |
| 99 | typedef struct { p4d_t pge[1]; } pgd_t; | ||
| 99 | # endif /* CONFIG_MMU */ | 100 | # endif /* CONFIG_MMU */ |
| 100 | 101 | ||
| 101 | # define pte_val(x) ((x).pte) | 102 | # define pte_val(x) ((x).pte) |
diff --git a/arch/mips/cavium-octeon/cpu.c b/arch/mips/cavium-octeon/cpu.c index a5b427909b5c..036d56cc4591 100644 --- a/arch/mips/cavium-octeon/cpu.c +++ b/arch/mips/cavium-octeon/cpu.c | |||
| @@ -10,7 +10,9 @@ | |||
| 10 | #include <linux/irqflags.h> | 10 | #include <linux/irqflags.h> |
| 11 | #include <linux/notifier.h> | 11 | #include <linux/notifier.h> |
| 12 | #include <linux/prefetch.h> | 12 | #include <linux/prefetch.h> |
| 13 | #include <linux/ptrace.h> | ||
| 13 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
| 15 | #include <linux/sched/task_stack.h> | ||
| 14 | 16 | ||
| 15 | #include <asm/cop2.h> | 17 | #include <asm/cop2.h> |
| 16 | #include <asm/current.h> | 18 | #include <asm/current.h> |
diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.c b/arch/mips/cavium-octeon/crypto/octeon-crypto.c index 4d22365844af..cfb4a146cf17 100644 --- a/arch/mips/cavium-octeon/crypto/octeon-crypto.c +++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <asm/cop2.h> | 9 | #include <asm/cop2.h> |
| 10 | #include <linux/export.h> | 10 | #include <linux/export.h> |
| 11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/sched/task_stack.h> | ||
| 12 | 13 | ||
| 13 | #include "octeon-crypto.h" | 14 | #include "octeon-crypto.h" |
| 14 | 15 | ||
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 4b94b7fbafa3..3de786545ded 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <linux/kernel_stat.h> | 12 | #include <linux/kernel_stat.h> |
| 13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
| 14 | #include <linux/sched/hotplug.h> | 14 | #include <linux/sched/hotplug.h> |
| 15 | #include <linux/sched/task_stack.h> | ||
| 15 | #include <linux/init.h> | 16 | #include <linux/init.h> |
| 16 | #include <linux/export.h> | 17 | #include <linux/export.h> |
| 17 | 18 | ||
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index 321752bcbab6..f94455f964ec 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | 12 | ||
| 13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
| 14 | #include <linux/sched/task_stack.h> | 14 | #include <linux/sched/task_stack.h> |
| 15 | #include <linux/ptrace.h> | ||
| 15 | #include <linux/thread_info.h> | 16 | #include <linux/thread_info.h> |
| 16 | #include <linux/bitops.h> | 17 | #include <linux/bitops.h> |
| 17 | 18 | ||
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index d21f3da7bdb6..6f94bed571c4 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <asm/cachectl.h> | 16 | #include <asm/cachectl.h> |
| 17 | #include <asm/fixmap.h> | 17 | #include <asm/fixmap.h> |
| 18 | 18 | ||
| 19 | #define __ARCH_USE_5LEVEL_HACK | ||
| 19 | #include <asm-generic/pgtable-nopmd.h> | 20 | #include <asm-generic/pgtable-nopmd.h> |
| 20 | 21 | ||
| 21 | extern int temp_tlb_entry; | 22 | extern int temp_tlb_entry; |
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 514cbc0a6a67..130a2a6c1531 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <asm/cachectl.h> | 17 | #include <asm/cachectl.h> |
| 18 | #include <asm/fixmap.h> | 18 | #include <asm/fixmap.h> |
| 19 | 19 | ||
| 20 | #define __ARCH_USE_5LEVEL_HACK | ||
| 20 | #if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48) | 21 | #if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48) |
| 21 | #include <asm-generic/pgtable-nopmd.h> | 22 | #include <asm-generic/pgtable-nopmd.h> |
| 22 | #else | 23 | #else |
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 3daa2cae50b0..1b070a76fcdd 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
| 12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
| 13 | #include <linux/sched/hotplug.h> | 13 | #include <linux/sched/hotplug.h> |
| 14 | #include <linux/sched/task_stack.h> | ||
| 14 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
| 15 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
| 16 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index e077ea3e11fb..e398cbc3d776 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
| 24 | #include <linux/irqchip/mips-gic.h> | 24 | #include <linux/irqchip/mips-gic.h> |
| 25 | #include <linux/compiler.h> | 25 | #include <linux/compiler.h> |
| 26 | #include <linux/sched/task_stack.h> | ||
| 26 | #include <linux/smp.h> | 27 | #include <linux/smp.h> |
| 27 | 28 | ||
| 28 | #include <linux/atomic.h> | 29 | #include <linux/atomic.h> |
diff --git a/arch/mips/loongson64/loongson-3/cop2-ex.c b/arch/mips/loongson64/loongson-3/cop2-ex.c index ea13764d0a03..621d6af5f6eb 100644 --- a/arch/mips/loongson64/loongson-3/cop2-ex.c +++ b/arch/mips/loongson64/loongson-3/cop2-ex.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
| 14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
| 15 | #include <linux/notifier.h> | 15 | #include <linux/notifier.h> |
| 16 | #include <linux/ptrace.h> | ||
| 16 | 17 | ||
| 17 | #include <asm/fpu.h> | 18 | #include <asm/fpu.h> |
| 18 | #include <asm/cop2.h> | 19 | #include <asm/cop2.h> |
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index 10d86d54880a..bddf1ef553a4 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
| 36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
| 37 | #include <linux/init.h> | 37 | #include <linux/init.h> |
| 38 | #include <linux/sched/task_stack.h> | ||
| 38 | #include <linux/smp.h> | 39 | #include <linux/smp.h> |
| 39 | #include <linux/irq.h> | 40 | #include <linux/irq.h> |
| 40 | 41 | ||
diff --git a/arch/mips/netlogic/xlp/cop2-ex.c b/arch/mips/netlogic/xlp/cop2-ex.c index 52bc5de42005..21e439b3db70 100644 --- a/arch/mips/netlogic/xlp/cop2-ex.c +++ b/arch/mips/netlogic/xlp/cop2-ex.c | |||
| @@ -9,11 +9,14 @@ | |||
| 9 | * Copyright (C) 2009 Wind River Systems, | 9 | * Copyright (C) 2009 Wind River Systems, |
| 10 | * written by Ralf Baechle <ralf@linux-mips.org> | 10 | * written by Ralf Baechle <ralf@linux-mips.org> |
| 11 | */ | 11 | */ |
| 12 | #include <linux/capability.h> | ||
| 12 | #include <linux/init.h> | 13 | #include <linux/init.h> |
| 13 | #include <linux/irqflags.h> | 14 | #include <linux/irqflags.h> |
| 14 | #include <linux/notifier.h> | 15 | #include <linux/notifier.h> |
| 15 | #include <linux/prefetch.h> | 16 | #include <linux/prefetch.h> |
| 17 | #include <linux/ptrace.h> | ||
| 16 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
| 19 | #include <linux/sched/task_stack.h> | ||
| 17 | 20 | ||
| 18 | #include <asm/cop2.h> | 21 | #include <asm/cop2.h> |
| 19 | #include <asm/current.h> | 22 | #include <asm/current.h> |
diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c index 1f2a5bc4779e..75460e1e106b 100644 --- a/arch/mips/sgi-ip22/ip28-berr.c +++ b/arch/mips/sgi-ip22/ip28-berr.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
| 10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
| 11 | #include <linux/sched/debug.h> | 11 | #include <linux/sched/debug.h> |
| 12 | #include <linux/sched/signal.h> | ||
| 12 | #include <linux/seq_file.h> | 13 | #include <linux/seq_file.h> |
| 13 | 14 | ||
| 14 | #include <asm/addrspace.h> | 15 | #include <asm/addrspace.h> |
diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c index d12879eb2b1f..83efe03d5c60 100644 --- a/arch/mips/sgi-ip27/ip27-berr.c +++ b/arch/mips/sgi-ip27/ip27-berr.c | |||
| @@ -12,7 +12,9 @@ | |||
| 12 | #include <linux/signal.h> /* for SIGBUS */ | 12 | #include <linux/signal.h> /* for SIGBUS */ |
| 13 | #include <linux/sched.h> /* schow_regs(), force_sig() */ | 13 | #include <linux/sched.h> /* schow_regs(), force_sig() */ |
| 14 | #include <linux/sched/debug.h> | 14 | #include <linux/sched/debug.h> |
| 15 | #include <linux/sched/signal.h> | ||
| 15 | 16 | ||
| 17 | #include <asm/ptrace.h> | ||
| 16 | #include <asm/sn/addrs.h> | 18 | #include <asm/sn/addrs.h> |
| 17 | #include <asm/sn/arch.h> | 19 | #include <asm/sn/arch.h> |
| 18 | #include <asm/sn/sn0/hub.h> | 20 | #include <asm/sn/sn0/hub.h> |
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c index f5ed45e8f442..4cd47d23d81a 100644 --- a/arch/mips/sgi-ip27/ip27-smp.c +++ b/arch/mips/sgi-ip27/ip27-smp.c | |||
| @@ -8,10 +8,13 @@ | |||
| 8 | */ | 8 | */ |
| 9 | #include <linux/init.h> | 9 | #include <linux/init.h> |
| 10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
| 11 | #include <linux/sched/task_stack.h> | ||
| 11 | #include <linux/topology.h> | 12 | #include <linux/topology.h> |
| 12 | #include <linux/nodemask.h> | 13 | #include <linux/nodemask.h> |
| 14 | |||
| 13 | #include <asm/page.h> | 15 | #include <asm/page.h> |
| 14 | #include <asm/processor.h> | 16 | #include <asm/processor.h> |
| 17 | #include <asm/ptrace.h> | ||
| 15 | #include <asm/sn/arch.h> | 18 | #include <asm/sn/arch.h> |
| 16 | #include <asm/sn/gda.h> | 19 | #include <asm/sn/gda.h> |
| 17 | #include <asm/sn/intr.h> | 20 | #include <asm/sn/intr.h> |
diff --git a/arch/mips/sgi-ip32/ip32-berr.c b/arch/mips/sgi-ip32/ip32-berr.c index 57d8c7486fe6..c1f12a9cf305 100644 --- a/arch/mips/sgi-ip32/ip32-berr.c +++ b/arch/mips/sgi-ip32/ip32-berr.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
| 12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
| 13 | #include <linux/sched/debug.h> | 13 | #include <linux/sched/debug.h> |
| 14 | #include <linux/sched/signal.h> | ||
| 14 | #include <asm/traps.h> | 15 | #include <asm/traps.h> |
| 15 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
| 16 | #include <asm/addrspace.h> | 17 | #include <asm/addrspace.h> |
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c index 8bd415c8729f..b3b442def423 100644 --- a/arch/mips/sgi-ip32/ip32-reset.c +++ b/arch/mips/sgi-ip32/ip32-reset.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
| 14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
| 16 | #include <linux/sched/signal.h> | ||
| 16 | #include <linux/notifier.h> | 17 | #include <linux/notifier.h> |
| 17 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
| 18 | #include <linux/rtc/ds1685.h> | 19 | #include <linux/rtc/ds1685.h> |
diff --git a/arch/mn10300/include/asm/page.h b/arch/mn10300/include/asm/page.h index 3810a6f740fd..dfe730a5ede0 100644 --- a/arch/mn10300/include/asm/page.h +++ b/arch/mn10300/include/asm/page.h | |||
| @@ -57,6 +57,7 @@ typedef struct page *pgtable_t; | |||
| 57 | #define __pgd(x) ((pgd_t) { (x) }) | 57 | #define __pgd(x) ((pgd_t) { (x) }) |
| 58 | #define __pgprot(x) ((pgprot_t) { (x) }) | 58 | #define __pgprot(x) ((pgprot_t) { (x) }) |
| 59 | 59 | ||
| 60 | #define __ARCH_USE_5LEVEL_HACK | ||
| 60 | #include <asm-generic/pgtable-nopmd.h> | 61 | #include <asm-generic/pgtable-nopmd.h> |
| 61 | 62 | ||
| 62 | #endif /* !__ASSEMBLY__ */ | 63 | #endif /* !__ASSEMBLY__ */ |
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h index 298393c3cb42..db4f7d179220 100644 --- a/arch/nios2/include/asm/pgtable.h +++ b/arch/nios2/include/asm/pgtable.h | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <asm/tlbflush.h> | 22 | #include <asm/tlbflush.h> |
| 23 | 23 | ||
| 24 | #include <asm/pgtable-bits.h> | 24 | #include <asm/pgtable-bits.h> |
| 25 | #define __ARCH_USE_5LEVEL_HACK | ||
| 25 | #include <asm-generic/pgtable-nopmd.h> | 26 | #include <asm-generic/pgtable-nopmd.h> |
| 26 | 27 | ||
| 27 | #define FIRST_USER_ADDRESS 0UL | 28 | #define FIRST_USER_ADDRESS 0UL |
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h index 3567aa7be555..ff97374ca069 100644 --- a/arch/openrisc/include/asm/pgtable.h +++ b/arch/openrisc/include/asm/pgtable.h | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #ifndef __ASM_OPENRISC_PGTABLE_H | 25 | #ifndef __ASM_OPENRISC_PGTABLE_H |
| 26 | #define __ASM_OPENRISC_PGTABLE_H | 26 | #define __ASM_OPENRISC_PGTABLE_H |
| 27 | 27 | ||
| 28 | #define __ARCH_USE_5LEVEL_HACK | ||
| 28 | #include <asm-generic/pgtable-nopmd.h> | 29 | #include <asm-generic/pgtable-nopmd.h> |
| 29 | 30 | ||
| 30 | #ifndef __ASSEMBLY__ | 31 | #ifndef __ASSEMBLY__ |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 494091762bd7..97a8bc8a095c 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
| @@ -80,93 +80,99 @@ config ARCH_HAS_DMA_SET_COHERENT_MASK | |||
| 80 | config PPC | 80 | config PPC |
| 81 | bool | 81 | bool |
| 82 | default y | 82 | default y |
| 83 | select BUILDTIME_EXTABLE_SORT | 83 | # |
| 84 | # Please keep this list sorted alphabetically. | ||
| 85 | # | ||
| 86 | select ARCH_HAS_DEVMEM_IS_ALLOWED | ||
| 87 | select ARCH_HAS_DMA_SET_COHERENT_MASK | ||
| 88 | select ARCH_HAS_ELF_RANDOMIZE | ||
| 89 | select ARCH_HAS_GCOV_PROFILE_ALL | ||
| 90 | select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE | ||
| 91 | select ARCH_HAS_SG_CHAIN | ||
| 92 | select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST | ||
| 93 | select ARCH_HAS_UBSAN_SANITIZE_ALL | ||
| 94 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
| 84 | select ARCH_MIGHT_HAVE_PC_PARPORT | 95 | select ARCH_MIGHT_HAVE_PC_PARPORT |
| 85 | select ARCH_MIGHT_HAVE_PC_SERIO | 96 | select ARCH_MIGHT_HAVE_PC_SERIO |
| 97 | select ARCH_SUPPORTS_ATOMIC_RMW | ||
| 98 | select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT | ||
| 99 | select ARCH_USE_BUILTIN_BSWAP | ||
| 100 | select ARCH_USE_CMPXCHG_LOCKREF if PPC64 | ||
| 101 | select ARCH_WANT_IPC_PARSE_VERSION | ||
| 86 | select BINFMT_ELF | 102 | select BINFMT_ELF |
| 87 | select ARCH_HAS_ELF_RANDOMIZE | 103 | select BUILDTIME_EXTABLE_SORT |
| 88 | select OF | 104 | select CLONE_BACKWARDS |
| 89 | select OF_EARLY_FLATTREE | 105 | select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN |
| 90 | select OF_RESERVED_MEM | 106 | select EDAC_ATOMIC_SCRUB |
| 91 | select HAVE_FTRACE_MCOUNT_RECORD | 107 | select EDAC_SUPPORT |
| 108 | select GENERIC_ATOMIC64 if PPC32 | ||
| 109 | select GENERIC_CLOCKEVENTS | ||
| 110 | select GENERIC_CLOCKEVENTS_BROADCAST if SMP | ||
| 111 | select GENERIC_CMOS_UPDATE | ||
| 112 | select GENERIC_CPU_AUTOPROBE | ||
| 113 | select GENERIC_IRQ_SHOW | ||
| 114 | select GENERIC_IRQ_SHOW_LEVEL | ||
| 115 | select GENERIC_SMP_IDLE_THREAD | ||
| 116 | select GENERIC_STRNCPY_FROM_USER | ||
| 117 | select GENERIC_STRNLEN_USER | ||
| 118 | select GENERIC_TIME_VSYSCALL_OLD | ||
| 119 | select HAVE_ARCH_AUDITSYSCALL | ||
| 120 | select HAVE_ARCH_HARDENED_USERCOPY | ||
| 121 | select HAVE_ARCH_JUMP_LABEL | ||
| 122 | select HAVE_ARCH_KGDB | ||
| 123 | select HAVE_ARCH_SECCOMP_FILTER | ||
| 124 | select HAVE_ARCH_TRACEHOOK | ||
| 125 | select HAVE_CBPF_JIT if !PPC64 | ||
| 126 | select HAVE_CONTEXT_TRACKING if PPC64 | ||
| 127 | select HAVE_DEBUG_KMEMLEAK | ||
| 128 | select HAVE_DEBUG_STACKOVERFLOW | ||
| 129 | select HAVE_DMA_API_DEBUG | ||
| 92 | select HAVE_DYNAMIC_FTRACE | 130 | select HAVE_DYNAMIC_FTRACE |
| 93 | select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL | 131 | select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL |
| 94 | select HAVE_FUNCTION_TRACER | 132 | select HAVE_EBPF_JIT if PPC64 |
| 133 | select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU) | ||
| 134 | select HAVE_FTRACE_MCOUNT_RECORD | ||
| 95 | select HAVE_FUNCTION_GRAPH_TRACER | 135 | select HAVE_FUNCTION_GRAPH_TRACER |
| 136 | select HAVE_FUNCTION_TRACER | ||
| 96 | select HAVE_GCC_PLUGINS | 137 | select HAVE_GCC_PLUGINS |
| 97 | select SYSCTL_EXCEPTION_TRACE | 138 | select HAVE_GENERIC_RCU_GUP |
| 98 | select VIRT_TO_BUS if !PPC64 | 139 | select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx) |
| 99 | select HAVE_IDE | 140 | select HAVE_IDE |
| 100 | select HAVE_IOREMAP_PROT | 141 | select HAVE_IOREMAP_PROT |
| 101 | select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU) | 142 | select HAVE_IRQ_EXIT_ON_IRQ_STACK |
| 143 | select HAVE_KERNEL_GZIP | ||
| 102 | select HAVE_KPROBES | 144 | select HAVE_KPROBES |
| 103 | select HAVE_OPTPROBES if PPC64 | ||
| 104 | select HAVE_ARCH_KGDB | ||
| 105 | select HAVE_KRETPROBES | 145 | select HAVE_KRETPROBES |
| 106 | select HAVE_ARCH_TRACEHOOK | 146 | select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS |
| 107 | select HAVE_MEMBLOCK | 147 | select HAVE_MEMBLOCK |
| 108 | select HAVE_MEMBLOCK_NODE_MAP | 148 | select HAVE_MEMBLOCK_NODE_MAP |
| 109 | select HAVE_DMA_API_DEBUG | 149 | select HAVE_MOD_ARCH_SPECIFIC |
| 150 | select HAVE_NMI if PERF_EVENTS | ||
| 110 | select HAVE_OPROFILE | 151 | select HAVE_OPROFILE |
| 111 | select HAVE_DEBUG_KMEMLEAK | 152 | select HAVE_OPTPROBES if PPC64 |
| 112 | select ARCH_HAS_SG_CHAIN | ||
| 113 | select GENERIC_ATOMIC64 if PPC32 | ||
| 114 | select HAVE_PERF_EVENTS | 153 | select HAVE_PERF_EVENTS |
| 154 | select HAVE_PERF_EVENTS_NMI if PPC64 | ||
| 115 | select HAVE_PERF_REGS | 155 | select HAVE_PERF_REGS |
| 116 | select HAVE_PERF_USER_STACK_DUMP | 156 | select HAVE_PERF_USER_STACK_DUMP |
| 157 | select HAVE_RCU_TABLE_FREE if SMP | ||
| 117 | select HAVE_REGS_AND_STACK_ACCESS_API | 158 | select HAVE_REGS_AND_STACK_ACCESS_API |
| 118 | select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx) | 159 | select HAVE_SYSCALL_TRACEPOINTS |
| 119 | select ARCH_WANT_IPC_PARSE_VERSION | 160 | select HAVE_VIRT_CPU_ACCOUNTING |
| 120 | select SPARSE_IRQ | ||
| 121 | select IRQ_DOMAIN | 161 | select IRQ_DOMAIN |
| 122 | select GENERIC_IRQ_SHOW | ||
| 123 | select GENERIC_IRQ_SHOW_LEVEL | ||
| 124 | select IRQ_FORCED_THREADING | 162 | select IRQ_FORCED_THREADING |
| 125 | select HAVE_RCU_TABLE_FREE if SMP | ||
| 126 | select HAVE_SYSCALL_TRACEPOINTS | ||
| 127 | select HAVE_CBPF_JIT if !PPC64 | ||
| 128 | select HAVE_EBPF_JIT if PPC64 | ||
| 129 | select HAVE_ARCH_JUMP_LABEL | ||
| 130 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
| 131 | select ARCH_HAS_GCOV_PROFILE_ALL | ||
| 132 | select GENERIC_SMP_IDLE_THREAD | ||
| 133 | select GENERIC_CMOS_UPDATE | ||
| 134 | select GENERIC_TIME_VSYSCALL_OLD | ||
| 135 | select GENERIC_CLOCKEVENTS | ||
| 136 | select GENERIC_CLOCKEVENTS_BROADCAST if SMP | ||
| 137 | select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST | ||
| 138 | select GENERIC_STRNCPY_FROM_USER | ||
| 139 | select GENERIC_STRNLEN_USER | ||
| 140 | select HAVE_MOD_ARCH_SPECIFIC | ||
| 141 | select MODULES_USE_ELF_RELA | 163 | select MODULES_USE_ELF_RELA |
| 142 | select CLONE_BACKWARDS | ||
| 143 | select ARCH_USE_BUILTIN_BSWAP | ||
| 144 | select OLD_SIGSUSPEND | ||
| 145 | select OLD_SIGACTION if PPC32 | ||
| 146 | select HAVE_DEBUG_STACKOVERFLOW | ||
| 147 | select HAVE_IRQ_EXIT_ON_IRQ_STACK | ||
| 148 | select ARCH_USE_CMPXCHG_LOCKREF if PPC64 | ||
| 149 | select HAVE_ARCH_AUDITSYSCALL | ||
| 150 | select ARCH_SUPPORTS_ATOMIC_RMW | ||
| 151 | select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN | ||
| 152 | select NO_BOOTMEM | 164 | select NO_BOOTMEM |
| 153 | select HAVE_GENERIC_RCU_GUP | 165 | select OF |
| 154 | select HAVE_PERF_EVENTS_NMI if PPC64 | 166 | select OF_EARLY_FLATTREE |
| 155 | select HAVE_NMI if PERF_EVENTS | 167 | select OF_RESERVED_MEM |
| 156 | select EDAC_SUPPORT | 168 | select OLD_SIGACTION if PPC32 |
| 157 | select EDAC_ATOMIC_SCRUB | 169 | select OLD_SIGSUSPEND |
| 158 | select ARCH_HAS_DMA_SET_COHERENT_MASK | 170 | select SPARSE_IRQ |
| 159 | select ARCH_HAS_DEVMEM_IS_ALLOWED | 171 | select SYSCTL_EXCEPTION_TRACE |
| 160 | select HAVE_ARCH_SECCOMP_FILTER | 172 | select VIRT_TO_BUS if !PPC64 |
| 161 | select ARCH_HAS_UBSAN_SANITIZE_ALL | 173 | # |
| 162 | select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT | 174 | # Please keep this list sorted alphabetically. |
| 163 | select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS | 175 | # |
| 164 | select GENERIC_CPU_AUTOPROBE | ||
| 165 | select HAVE_VIRT_CPU_ACCOUNTING | ||
| 166 | select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE | ||
| 167 | select HAVE_ARCH_HARDENED_USERCOPY | ||
| 168 | select HAVE_KERNEL_GZIP | ||
| 169 | select HAVE_CONTEXT_TRACKING if PPC64 | ||
| 170 | 176 | ||
| 171 | config GENERIC_CSUM | 177 | config GENERIC_CSUM |
| 172 | def_bool n | 178 | def_bool n |
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 31286fa7873c..19b0d1a81959 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile | |||
| @@ -72,8 +72,15 @@ GNUTARGET := powerpc | |||
| 72 | MULTIPLEWORD := -mmultiple | 72 | MULTIPLEWORD := -mmultiple |
| 73 | endif | 73 | endif |
| 74 | 74 | ||
| 75 | cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) | 75 | ifdef CONFIG_PPC64 |
| 76 | cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1) | ||
| 77 | cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc) | ||
| 78 | aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1) | ||
| 79 | aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2 | ||
| 80 | endif | ||
| 81 | |||
| 76 | cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian | 82 | cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian |
| 83 | cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) | ||
| 77 | ifneq ($(cc-name),clang) | 84 | ifneq ($(cc-name),clang) |
| 78 | cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align | 85 | cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align |
| 79 | endif | 86 | endif |
| @@ -113,7 +120,9 @@ ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) | |||
| 113 | CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc)) | 120 | CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc)) |
| 114 | AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2) | 121 | AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2) |
| 115 | else | 122 | else |
| 123 | CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) | ||
| 116 | CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc) | 124 | CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc) |
| 125 | AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) | ||
| 117 | endif | 126 | endif |
| 118 | CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc)) | 127 | CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc)) |
| 119 | CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) | 128 | CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) |
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 012223638815..26ed228d4dc6 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H | 1 | #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H |
| 2 | #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H | 2 | #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H |
| 3 | 3 | ||
| 4 | #define __ARCH_USE_5LEVEL_HACK | ||
| 4 | #include <asm-generic/pgtable-nopmd.h> | 5 | #include <asm-generic/pgtable-nopmd.h> |
| 5 | 6 | ||
| 6 | #include <asm/book3s/32/hash.h> | 7 | #include <asm/book3s/32/hash.h> |
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 1eeeb72c7015..8f4d41936e5a 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h | |||
| @@ -1,9 +1,12 @@ | |||
| 1 | #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ | 1 | #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ |
| 2 | #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ | 2 | #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ |
| 3 | 3 | ||
| 4 | #include <asm-generic/5level-fixup.h> | ||
| 5 | |||
| 4 | #ifndef __ASSEMBLY__ | 6 | #ifndef __ASSEMBLY__ |
| 5 | #include <linux/mmdebug.h> | 7 | #include <linux/mmdebug.h> |
| 6 | #endif | 8 | #endif |
| 9 | |||
| 7 | /* | 10 | /* |
| 8 | * Common bits between hash and Radix page table | 11 | * Common bits between hash and Radix page table |
| 9 | */ | 12 | */ |
| @@ -347,23 +350,58 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, | |||
| 347 | __r; \ | 350 | __r; \ |
| 348 | }) | 351 | }) |
| 349 | 352 | ||
| 353 | static inline int __pte_write(pte_t pte) | ||
| 354 | { | ||
| 355 | return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE)); | ||
| 356 | } | ||
| 357 | |||
| 358 | #ifdef CONFIG_NUMA_BALANCING | ||
| 359 | #define pte_savedwrite pte_savedwrite | ||
| 360 | static inline bool pte_savedwrite(pte_t pte) | ||
| 361 | { | ||
| 362 | /* | ||
| 363 | * Saved write ptes are prot none ptes that doesn't have | ||
| 364 | * privileged bit sit. We mark prot none as one which has | ||
| 365 | * present and pviliged bit set and RWX cleared. To mark | ||
| 366 | * protnone which used to have _PAGE_WRITE set we clear | ||
| 367 | * the privileged bit. | ||
| 368 | */ | ||
| 369 | return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED)); | ||
| 370 | } | ||
| 371 | #else | ||
| 372 | #define pte_savedwrite pte_savedwrite | ||
| 373 | static inline bool pte_savedwrite(pte_t pte) | ||
| 374 | { | ||
| 375 | return false; | ||
| 376 | } | ||
| 377 | #endif | ||
| 378 | |||
| 379 | static inline int pte_write(pte_t pte) | ||
| 380 | { | ||
| 381 | return __pte_write(pte) || pte_savedwrite(pte); | ||
| 382 | } | ||
| 383 | |||
| 350 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 384 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
| 351 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | 385 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, |
| 352 | pte_t *ptep) | 386 | pte_t *ptep) |
| 353 | { | 387 | { |
| 354 | if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0) | 388 | if (__pte_write(*ptep)) |
| 355 | return; | 389 | pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0); |
| 356 | 390 | else if (unlikely(pte_savedwrite(*ptep))) | |
| 357 | pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0); | 391 | pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0); |
| 358 | } | 392 | } |
| 359 | 393 | ||
| 360 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | 394 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, |
| 361 | unsigned long addr, pte_t *ptep) | 395 | unsigned long addr, pte_t *ptep) |
| 362 | { | 396 | { |
| 363 | if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0) | 397 | /* |
| 364 | return; | 398 | * We should not find protnone for hugetlb, but this complete the |
| 365 | 399 | * interface. | |
| 366 | pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1); | 400 | */ |
| 401 | if (__pte_write(*ptep)) | ||
| 402 | pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1); | ||
| 403 | else if (unlikely(pte_savedwrite(*ptep))) | ||
| 404 | pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1); | ||
| 367 | } | 405 | } |
| 368 | 406 | ||
| 369 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 407 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
| @@ -397,11 +435,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, | |||
| 397 | pte_update(mm, addr, ptep, ~0UL, 0, 0); | 435 | pte_update(mm, addr, ptep, ~0UL, 0, 0); |
| 398 | } | 436 | } |
| 399 | 437 | ||
| 400 | static inline int pte_write(pte_t pte) | ||
| 401 | { | ||
| 402 | return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE)); | ||
| 403 | } | ||
| 404 | |||
| 405 | static inline int pte_dirty(pte_t pte) | 438 | static inline int pte_dirty(pte_t pte) |
| 406 | { | 439 | { |
| 407 | return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY)); | 440 | return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY)); |
| @@ -465,19 +498,12 @@ static inline pte_t pte_clear_savedwrite(pte_t pte) | |||
| 465 | VM_BUG_ON(!pte_protnone(pte)); | 498 | VM_BUG_ON(!pte_protnone(pte)); |
| 466 | return __pte(pte_val(pte) | _PAGE_PRIVILEGED); | 499 | return __pte(pte_val(pte) | _PAGE_PRIVILEGED); |
| 467 | } | 500 | } |
| 468 | 501 | #else | |
| 469 | #define pte_savedwrite pte_savedwrite | 502 | #define pte_clear_savedwrite pte_clear_savedwrite |
| 470 | static inline bool pte_savedwrite(pte_t pte) | 503 | static inline pte_t pte_clear_savedwrite(pte_t pte) |
| 471 | { | 504 | { |
| 472 | /* | 505 | VM_WARN_ON(1); |
| 473 | * Saved write ptes are prot none ptes that doesn't have | 506 | return __pte(pte_val(pte) & ~_PAGE_WRITE); |
| 474 | * privileged bit sit. We mark prot none as one which has | ||
| 475 | * present and pviliged bit set and RWX cleared. To mark | ||
| 476 | * protnone which used to have _PAGE_WRITE set we clear | ||
| 477 | * the privileged bit. | ||
| 478 | */ | ||
| 479 | VM_BUG_ON(!pte_protnone(pte)); | ||
| 480 | return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED)); | ||
| 481 | } | 507 | } |
| 482 | #endif /* CONFIG_NUMA_BALANCING */ | 508 | #endif /* CONFIG_NUMA_BALANCING */ |
| 483 | 509 | ||
| @@ -506,6 +532,8 @@ static inline unsigned long pte_pfn(pte_t pte) | |||
| 506 | /* Generic modifiers for PTE bits */ | 532 | /* Generic modifiers for PTE bits */ |
| 507 | static inline pte_t pte_wrprotect(pte_t pte) | 533 | static inline pte_t pte_wrprotect(pte_t pte) |
| 508 | { | 534 | { |
| 535 | if (unlikely(pte_savedwrite(pte))) | ||
| 536 | return pte_clear_savedwrite(pte); | ||
| 509 | return __pte(pte_val(pte) & ~_PAGE_WRITE); | 537 | return __pte(pte_val(pte) & ~_PAGE_WRITE); |
| 510 | } | 538 | } |
| 511 | 539 | ||
| @@ -926,6 +954,7 @@ static inline int pmd_protnone(pmd_t pmd) | |||
| 926 | 954 | ||
| 927 | #define __HAVE_ARCH_PMD_WRITE | 955 | #define __HAVE_ARCH_PMD_WRITE |
| 928 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) | 956 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) |
| 957 | #define __pmd_write(pmd) __pte_write(pmd_pte(pmd)) | ||
| 929 | #define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd)) | 958 | #define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd)) |
| 930 | 959 | ||
| 931 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 960 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| @@ -982,11 +1011,10 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, | |||
| 982 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, | 1011 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, |
| 983 | pmd_t *pmdp) | 1012 | pmd_t *pmdp) |
| 984 | { | 1013 | { |
| 985 | 1014 | if (__pmd_write((*pmdp))) | |
| 986 | if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_WRITE)) == 0) | 1015 | pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0); |
| 987 | return; | 1016 | else if (unlikely(pmd_savedwrite(*pmdp))) |
| 988 | 1017 | pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED); | |
| 989 | pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0); | ||
| 990 | } | 1018 | } |
| 991 | 1019 | ||
| 992 | static inline int pmd_trans_huge(pmd_t pmd) | 1020 | static inline int pmd_trans_huge(pmd_t pmd) |
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index 4e63787dc3be..842124b199b5 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h | |||
| @@ -112,7 +112,7 @@ static inline __wsum csum_add(__wsum csum, __wsum addend) | |||
| 112 | 112 | ||
| 113 | #ifdef __powerpc64__ | 113 | #ifdef __powerpc64__ |
| 114 | res += (__force u64)addend; | 114 | res += (__force u64)addend; |
| 115 | return (__force __wsum)((u32)res + (res >> 32)); | 115 | return (__force __wsum) from64to32(res); |
| 116 | #else | 116 | #else |
| 117 | asm("addc %0,%0,%1;" | 117 | asm("addc %0,%0,%1;" |
| 118 | "addze %0,%0;" | 118 | "addze %0,%0;" |
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h index fd321eb423cb..155731557c9b 100644 --- a/arch/powerpc/include/asm/cpuidle.h +++ b/arch/powerpc/include/asm/cpuidle.h | |||
| @@ -70,8 +70,8 @@ static inline void report_invalid_psscr_val(u64 psscr_val, int err) | |||
| 70 | std r0,0(r1); \ | 70 | std r0,0(r1); \ |
| 71 | ptesync; \ | 71 | ptesync; \ |
| 72 | ld r0,0(r1); \ | 72 | ld r0,0(r1); \ |
| 73 | 1: cmpd cr0,r0,r0; \ | 73 | 236: cmpd cr0,r0,r0; \ |
| 74 | bne 1b; \ | 74 | bne 236b; \ |
| 75 | IDLE_INST; \ | 75 | IDLE_INST; \ |
| 76 | 76 | ||
| 77 | #define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \ | 77 | #define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \ |
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index 93b9b84568e8..09bde6e34f5d 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h | |||
| @@ -144,8 +144,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
| 144 | #define ARCH_DLINFO_CACHE_GEOMETRY \ | 144 | #define ARCH_DLINFO_CACHE_GEOMETRY \ |
| 145 | NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size); \ | 145 | NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size); \ |
| 146 | NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i)); \ | 146 | NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i)); \ |
| 147 | NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1i.size); \ | 147 | NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1d.size); \ |
| 148 | NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1i)); \ | 148 | NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1d)); \ |
| 149 | NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size); \ | 149 | NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size); \ |
| 150 | NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2)); \ | 150 | NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2)); \ |
| 151 | NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size); \ | 151 | NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size); \ |
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index ba9921bf202e..5134ade2e850 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H | 1 | #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H |
| 2 | #define _ASM_POWERPC_NOHASH_32_PGTABLE_H | 2 | #define _ASM_POWERPC_NOHASH_32_PGTABLE_H |
| 3 | 3 | ||
| 4 | #define __ARCH_USE_5LEVEL_HACK | ||
| 4 | #include <asm-generic/pgtable-nopmd.h> | 5 | #include <asm-generic/pgtable-nopmd.h> |
| 5 | 6 | ||
| 6 | #ifndef __ASSEMBLY__ | 7 | #ifndef __ASSEMBLY__ |
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h index d0db98793dd8..9f4de0a1035e 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h | |||
| @@ -1,5 +1,8 @@ | |||
| 1 | #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H | 1 | #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H |
| 2 | #define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H | 2 | #define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H |
| 3 | |||
| 4 | #include <asm-generic/5level-fixup.h> | ||
| 5 | |||
| 3 | /* | 6 | /* |
| 4 | * Entries per page directory level. The PTE level must use a 64b record | 7 | * Entries per page directory level. The PTE level must use a 64b record |
| 5 | * for each page table entry. The PMD and PGD level use a 32b record for | 8 | * for each page table entry. The PMD and PGD level use a 32b record for |
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h index 55b28ef3409a..1facb584dd29 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H | 1 | #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H |
| 2 | #define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H | 2 | #define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H |
| 3 | 3 | ||
| 4 | #define __ARCH_USE_5LEVEL_HACK | ||
| 4 | #include <asm-generic/pgtable-nopud.h> | 5 | #include <asm-generic/pgtable-nopud.h> |
| 5 | 6 | ||
| 6 | 7 | ||
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 0cd8a3852763..e5805ad78e12 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h | |||
| @@ -230,7 +230,7 @@ static inline int hugepd_ok(hugepd_t hpd) | |||
| 230 | return ((hpd_val(hpd) & 0x4) != 0); | 230 | return ((hpd_val(hpd) & 0x4) != 0); |
| 231 | #else | 231 | #else |
| 232 | /* We clear the top bit to indicate hugepd */ | 232 | /* We clear the top bit to indicate hugepd */ |
| 233 | return ((hpd_val(hpd) & PD_HUGE) == 0); | 233 | return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0); |
| 234 | #endif | 234 | #endif |
| 235 | } | 235 | } |
| 236 | 236 | ||
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index d99bd442aacb..e7d6d86563ee 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h | |||
| @@ -284,6 +284,13 @@ | |||
| 284 | #define PPC_INST_BRANCH_COND 0x40800000 | 284 | #define PPC_INST_BRANCH_COND 0x40800000 |
| 285 | #define PPC_INST_LBZCIX 0x7c0006aa | 285 | #define PPC_INST_LBZCIX 0x7c0006aa |
| 286 | #define PPC_INST_STBCIX 0x7c0007aa | 286 | #define PPC_INST_STBCIX 0x7c0007aa |
| 287 | #define PPC_INST_LWZX 0x7c00002e | ||
| 288 | #define PPC_INST_LFSX 0x7c00042e | ||
| 289 | #define PPC_INST_STFSX 0x7c00052e | ||
| 290 | #define PPC_INST_LFDX 0x7c0004ae | ||
| 291 | #define PPC_INST_STFDX 0x7c0005ae | ||
| 292 | #define PPC_INST_LVX 0x7c0000ce | ||
| 293 | #define PPC_INST_STVX 0x7c0001ce | ||
| 287 | 294 | ||
| 288 | /* macros to insert fields into opcodes */ | 295 | /* macros to insert fields into opcodes */ |
| 289 | #define ___PPC_RA(a) (((a) & 0x1f) << 16) | 296 | #define ___PPC_RA(a) (((a) & 0x1f) << 16) |
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index 4a90634e8322..35c00d7a0cf8 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h | |||
| @@ -160,12 +160,18 @@ struct of_drconf_cell { | |||
| 160 | #define OV5_PFO_HW_ENCR 0x1120 /* PFO Encryption Accelerator */ | 160 | #define OV5_PFO_HW_ENCR 0x1120 /* PFO Encryption Accelerator */ |
| 161 | #define OV5_SUB_PROCESSORS 0x1501 /* 1,2,or 4 Sub-Processors supported */ | 161 | #define OV5_SUB_PROCESSORS 0x1501 /* 1,2,or 4 Sub-Processors supported */ |
| 162 | #define OV5_XIVE_EXPLOIT 0x1701 /* XIVE exploitation supported */ | 162 | #define OV5_XIVE_EXPLOIT 0x1701 /* XIVE exploitation supported */ |
| 163 | #define OV5_MMU_RADIX_300 0x1880 /* ISA v3.00 radix MMU supported */ | 163 | /* MMU Base Architecture */ |
| 164 | #define OV5_MMU_HASH_300 0x1840 /* ISA v3.00 hash MMU supported */ | 164 | #define OV5_MMU_SUPPORT 0x18C0 /* MMU Mode Support Mask */ |
| 165 | #define OV5_MMU_SEGM_RADIX 0x1820 /* radix mode (no segmentation) */ | 165 | #define OV5_MMU_HASH 0x1800 /* Hash MMU Only */ |
| 166 | #define OV5_MMU_PROC_TBL 0x1810 /* hcall selects SLB or proc table */ | 166 | #define OV5_MMU_RADIX 0x1840 /* Radix MMU Only */ |
| 167 | #define OV5_MMU_SLB 0x1800 /* always use SLB */ | 167 | #define OV5_MMU_EITHER 0x1880 /* Hash or Radix Supported */ |
| 168 | #define OV5_MMU_GTSE 0x1808 /* Guest translation shootdown */ | 168 | #define OV5_MMU_DYNAMIC 0x18C0 /* Hash or Radix Can Switch Later */ |
| 169 | #define OV5_NMMU 0x1820 /* Nest MMU Available */ | ||
| 170 | /* Hash Table Extensions */ | ||
| 171 | #define OV5_HASH_SEG_TBL 0x1980 /* In Memory Segment Tables Available */ | ||
| 172 | #define OV5_HASH_GTSE 0x1940 /* Guest Translation Shoot Down Avail */ | ||
| 173 | /* Radix Table Extensions */ | ||
| 174 | #define OV5_RADIX_GTSE 0x1A40 /* Guest Translation Shoot Down Avail */ | ||
| 169 | 175 | ||
| 170 | /* Option Vector 6: IBM PAPR hints */ | 176 | /* Option Vector 6: IBM PAPR hints */ |
| 171 | #define OV6_LINUX 0x02 /* Linux is our OS */ | 177 | #define OV6_LINUX 0x02 /* Linux is our OS */ |
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 5f61cc0349c0..995728736677 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S | |||
| @@ -276,19 +276,21 @@ power_enter_stop: | |||
| 276 | */ | 276 | */ |
| 277 | andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED | 277 | andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED |
| 278 | clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ | 278 | clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ |
| 279 | bne 1f | 279 | bne .Lhandle_esl_ec_set |
| 280 | IDLE_STATE_ENTER_SEQ(PPC_STOP) | 280 | IDLE_STATE_ENTER_SEQ(PPC_STOP) |
| 281 | li r3,0 /* Since we didn't lose state, return 0 */ | 281 | li r3,0 /* Since we didn't lose state, return 0 */ |
| 282 | b pnv_wakeup_noloss | 282 | b pnv_wakeup_noloss |
| 283 | |||
| 284 | .Lhandle_esl_ec_set: | ||
| 283 | /* | 285 | /* |
| 284 | * Check if the requested state is a deep idle state. | 286 | * Check if the requested state is a deep idle state. |
| 285 | */ | 287 | */ |
| 286 | 1: LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) | 288 | LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) |
| 287 | ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) | 289 | ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) |
| 288 | cmpd r3,r4 | 290 | cmpd r3,r4 |
| 289 | bge 2f | 291 | bge .Lhandle_deep_stop |
| 290 | IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) | 292 | IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) |
| 291 | 2: | 293 | .Lhandle_deep_stop: |
| 292 | /* | 294 | /* |
| 293 | * Entering deep idle state. | 295 | * Entering deep idle state. |
| 294 | * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to | 296 | * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index a3944540fe0d..1c1b44ec7642 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
| @@ -168,6 +168,14 @@ static unsigned long __initdata prom_tce_alloc_start; | |||
| 168 | static unsigned long __initdata prom_tce_alloc_end; | 168 | static unsigned long __initdata prom_tce_alloc_end; |
| 169 | #endif | 169 | #endif |
| 170 | 170 | ||
| 171 | static bool __initdata prom_radix_disable; | ||
| 172 | |||
| 173 | struct platform_support { | ||
| 174 | bool hash_mmu; | ||
| 175 | bool radix_mmu; | ||
| 176 | bool radix_gtse; | ||
| 177 | }; | ||
| 178 | |||
| 171 | /* Platforms codes are now obsolete in the kernel. Now only used within this | 179 | /* Platforms codes are now obsolete in the kernel. Now only used within this |
| 172 | * file and ultimately gone too. Feel free to change them if you need, they | 180 | * file and ultimately gone too. Feel free to change them if you need, they |
| 173 | * are not shared with anything outside of this file anymore | 181 | * are not shared with anything outside of this file anymore |
| @@ -626,6 +634,12 @@ static void __init early_cmdline_parse(void) | |||
| 626 | prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000); | 634 | prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000); |
| 627 | #endif | 635 | #endif |
| 628 | } | 636 | } |
| 637 | |||
| 638 | opt = strstr(prom_cmd_line, "disable_radix"); | ||
| 639 | if (opt) { | ||
| 640 | prom_debug("Radix disabled from cmdline\n"); | ||
| 641 | prom_radix_disable = true; | ||
| 642 | } | ||
| 629 | } | 643 | } |
| 630 | 644 | ||
| 631 | #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) | 645 | #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) |
| @@ -695,6 +709,8 @@ struct option_vector5 { | |||
| 695 | u8 byte22; | 709 | u8 byte22; |
| 696 | u8 intarch; | 710 | u8 intarch; |
| 697 | u8 mmu; | 711 | u8 mmu; |
| 712 | u8 hash_ext; | ||
| 713 | u8 radix_ext; | ||
| 698 | } __packed; | 714 | } __packed; |
| 699 | 715 | ||
| 700 | struct option_vector6 { | 716 | struct option_vector6 { |
| @@ -850,8 +866,9 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = { | |||
| 850 | .reserved3 = 0, | 866 | .reserved3 = 0, |
| 851 | .subprocessors = 1, | 867 | .subprocessors = 1, |
| 852 | .intarch = 0, | 868 | .intarch = 0, |
| 853 | .mmu = OV5_FEAT(OV5_MMU_RADIX_300) | OV5_FEAT(OV5_MMU_HASH_300) | | 869 | .mmu = 0, |
| 854 | OV5_FEAT(OV5_MMU_PROC_TBL) | OV5_FEAT(OV5_MMU_GTSE), | 870 | .hash_ext = 0, |
| 871 | .radix_ext = 0, | ||
| 855 | }, | 872 | }, |
| 856 | 873 | ||
| 857 | /* option vector 6: IBM PAPR hints */ | 874 | /* option vector 6: IBM PAPR hints */ |
| @@ -990,6 +1007,92 @@ static int __init prom_count_smt_threads(void) | |||
| 990 | 1007 | ||
| 991 | } | 1008 | } |
| 992 | 1009 | ||
| 1010 | static void __init prom_parse_mmu_model(u8 val, | ||
| 1011 | struct platform_support *support) | ||
| 1012 | { | ||
| 1013 | switch (val) { | ||
| 1014 | case OV5_FEAT(OV5_MMU_DYNAMIC): | ||
| 1015 | case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */ | ||
| 1016 | prom_debug("MMU - either supported\n"); | ||
| 1017 | support->radix_mmu = !prom_radix_disable; | ||
| 1018 | support->hash_mmu = true; | ||
| 1019 | break; | ||
| 1020 | case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */ | ||
| 1021 | prom_debug("MMU - radix only\n"); | ||
| 1022 | if (prom_radix_disable) { | ||
| 1023 | /* | ||
| 1024 | * If we __have__ to do radix, we're better off ignoring | ||
| 1025 | * the command line rather than not booting. | ||
| 1026 | */ | ||
| 1027 | prom_printf("WARNING: Ignoring cmdline option disable_radix\n"); | ||
| 1028 | } | ||
| 1029 | support->radix_mmu = true; | ||
| 1030 | break; | ||
| 1031 | case OV5_FEAT(OV5_MMU_HASH): | ||
| 1032 | prom_debug("MMU - hash only\n"); | ||
| 1033 | support->hash_mmu = true; | ||
| 1034 | break; | ||
| 1035 | default: | ||
| 1036 | prom_debug("Unknown mmu support option: 0x%x\n", val); | ||
| 1037 | break; | ||
| 1038 | } | ||
| 1039 | } | ||
| 1040 | |||
| 1041 | static void __init prom_parse_platform_support(u8 index, u8 val, | ||
| 1042 | struct platform_support *support) | ||
| 1043 | { | ||
| 1044 | switch (index) { | ||
| 1045 | case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */ | ||
| 1046 | prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support); | ||
| 1047 | break; | ||
| 1048 | case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */ | ||
| 1049 | if (val & OV5_FEAT(OV5_RADIX_GTSE)) { | ||
| 1050 | prom_debug("Radix - GTSE supported\n"); | ||
| 1051 | support->radix_gtse = true; | ||
| 1052 | } | ||
| 1053 | break; | ||
| 1054 | } | ||
| 1055 | } | ||
| 1056 | |||
| 1057 | static void __init prom_check_platform_support(void) | ||
| 1058 | { | ||
| 1059 | struct platform_support supported = { | ||
| 1060 | .hash_mmu = false, | ||
| 1061 | .radix_mmu = false, | ||
| 1062 | .radix_gtse = false | ||
| 1063 | }; | ||
| 1064 | int prop_len = prom_getproplen(prom.chosen, | ||
| 1065 | "ibm,arch-vec-5-platform-support"); | ||
| 1066 | if (prop_len > 1) { | ||
| 1067 | int i; | ||
| 1068 | u8 vec[prop_len]; | ||
| 1069 | prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n", | ||
| 1070 | prop_len); | ||
| 1071 | prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", | ||
| 1072 | &vec, sizeof(vec)); | ||
| 1073 | for (i = 0; i < prop_len; i += 2) { | ||
| 1074 | prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2 | ||
| 1075 | , vec[i] | ||
| 1076 | , vec[i + 1]); | ||
| 1077 | prom_parse_platform_support(vec[i], vec[i + 1], | ||
| 1078 | &supported); | ||
| 1079 | } | ||
| 1080 | } | ||
| 1081 | |||
| 1082 | if (supported.radix_mmu && supported.radix_gtse) { | ||
| 1083 | /* Radix preferred - but we require GTSE for now */ | ||
| 1084 | prom_debug("Asking for radix with GTSE\n"); | ||
| 1085 | ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX); | ||
| 1086 | ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE); | ||
| 1087 | } else if (supported.hash_mmu) { | ||
| 1088 | /* Default to hash mmu (if we can) */ | ||
| 1089 | prom_debug("Asking for hash\n"); | ||
| 1090 | ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH); | ||
| 1091 | } else { | ||
| 1092 | /* We're probably on a legacy hypervisor */ | ||
| 1093 | prom_debug("Assuming legacy hash support\n"); | ||
| 1094 | } | ||
| 1095 | } | ||
| 993 | 1096 | ||
| 994 | static void __init prom_send_capabilities(void) | 1097 | static void __init prom_send_capabilities(void) |
| 995 | { | 1098 | { |
| @@ -997,6 +1100,9 @@ static void __init prom_send_capabilities(void) | |||
| 997 | prom_arg_t ret; | 1100 | prom_arg_t ret; |
| 998 | u32 cores; | 1101 | u32 cores; |
| 999 | 1102 | ||
| 1103 | /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */ | ||
| 1104 | prom_check_platform_support(); | ||
| 1105 | |||
| 1000 | root = call_prom("open", 1, 1, ADDR("/")); | 1106 | root = call_prom("open", 1, 1, ADDR("/")); |
| 1001 | if (root != 0) { | 1107 | if (root != 0) { |
| 1002 | /* We need to tell the FW about the number of cores we support. | 1108 | /* We need to tell the FW about the number of cores we support. |
| @@ -2993,6 +3099,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, | |||
| 2993 | */ | 3099 | */ |
| 2994 | prom_check_initrd(r3, r4); | 3100 | prom_check_initrd(r3, r4); |
| 2995 | 3101 | ||
| 3102 | /* | ||
| 3103 | * Do early parsing of command line | ||
| 3104 | */ | ||
| 3105 | early_cmdline_parse(); | ||
| 3106 | |||
| 2996 | #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) | 3107 | #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) |
| 2997 | /* | 3108 | /* |
| 2998 | * On pSeries, inform the firmware about our capabilities | 3109 | * On pSeries, inform the firmware about our capabilities |
| @@ -3009,11 +3120,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, | |||
| 3009 | copy_and_flush(0, kbase, 0x100, 0); | 3120 | copy_and_flush(0, kbase, 0x100, 0); |
| 3010 | 3121 | ||
| 3011 | /* | 3122 | /* |
| 3012 | * Do early parsing of command line | ||
| 3013 | */ | ||
| 3014 | early_cmdline_parse(); | ||
| 3015 | |||
| 3016 | /* | ||
| 3017 | * Initialize memory management within prom_init | 3123 | * Initialize memory management within prom_init |
| 3018 | */ | 3124 | */ |
| 3019 | prom_init_mem(); | 3125 | prom_init_mem(); |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index adf2084f214b..9cfaa8b69b5f 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
| @@ -408,7 +408,10 @@ static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize, | |||
| 408 | info->line_size = lsize; | 408 | info->line_size = lsize; |
| 409 | info->block_size = bsize; | 409 | info->block_size = bsize; |
| 410 | info->log_block_size = __ilog2(bsize); | 410 | info->log_block_size = __ilog2(bsize); |
| 411 | info->blocks_per_page = PAGE_SIZE / bsize; | 411 | if (bsize) |
| 412 | info->blocks_per_page = PAGE_SIZE / bsize; | ||
| 413 | else | ||
| 414 | info->blocks_per_page = 0; | ||
| 412 | 415 | ||
| 413 | if (sets == 0) | 416 | if (sets == 0) |
| 414 | info->assoc = 0xffff; | 417 | info->assoc = 0xffff; |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index f3158fb16de3..8c68145ba1bd 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
| @@ -601,7 +601,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 601 | hva, NULL, NULL); | 601 | hva, NULL, NULL); |
| 602 | if (ptep) { | 602 | if (ptep) { |
| 603 | pte = kvmppc_read_update_linux_pte(ptep, 1); | 603 | pte = kvmppc_read_update_linux_pte(ptep, 1); |
| 604 | if (pte_write(pte)) | 604 | if (__pte_write(pte)) |
| 605 | write_ok = 1; | 605 | write_ok = 1; |
| 606 | } | 606 | } |
| 607 | local_irq_restore(flags); | 607 | local_irq_restore(flags); |
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 6fca970373ee..ce6f2121fffe 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
| @@ -256,7 +256,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
| 256 | } | 256 | } |
| 257 | pte = kvmppc_read_update_linux_pte(ptep, writing); | 257 | pte = kvmppc_read_update_linux_pte(ptep, writing); |
| 258 | if (pte_present(pte) && !pte_protnone(pte)) { | 258 | if (pte_present(pte) && !pte_protnone(pte)) { |
| 259 | if (writing && !pte_write(pte)) | 259 | if (writing && !__pte_write(pte)) |
| 260 | /* make the actual HPTE be read-only */ | 260 | /* make the actual HPTE be read-only */ |
| 261 | ptel = hpte_make_readonly(ptel); | 261 | ptel = hpte_make_readonly(ptel); |
| 262 | is_ci = pte_ci(pte); | 262 | is_ci = pte_ci(pte); |
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 0e649d72fe8d..2b5e09020cfe 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile | |||
| @@ -20,6 +20,7 @@ obj64-y += copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \ | |||
| 20 | 20 | ||
| 21 | obj64-$(CONFIG_SMP) += locks.o | 21 | obj64-$(CONFIG_SMP) += locks.o |
| 22 | obj64-$(CONFIG_ALTIVEC) += vmx-helper.o | 22 | obj64-$(CONFIG_ALTIVEC) += vmx-helper.o |
| 23 | obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o | ||
| 23 | 24 | ||
| 24 | obj-y += checksum_$(BITS).o checksum_wrappers.o | 25 | obj-y += checksum_$(BITS).o checksum_wrappers.o |
| 25 | 26 | ||
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 846dba2c6360..9c542ec70c5b 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c | |||
| @@ -1799,8 +1799,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
| 1799 | goto instr_done; | 1799 | goto instr_done; |
| 1800 | 1800 | ||
| 1801 | case LARX: | 1801 | case LARX: |
| 1802 | if (regs->msr & MSR_LE) | ||
| 1803 | return 0; | ||
| 1804 | if (op.ea & (size - 1)) | 1802 | if (op.ea & (size - 1)) |
| 1805 | break; /* can't handle misaligned */ | 1803 | break; /* can't handle misaligned */ |
| 1806 | if (!address_ok(regs, op.ea, size)) | 1804 | if (!address_ok(regs, op.ea, size)) |
| @@ -1823,8 +1821,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
| 1823 | goto ldst_done; | 1821 | goto ldst_done; |
| 1824 | 1822 | ||
| 1825 | case STCX: | 1823 | case STCX: |
| 1826 | if (regs->msr & MSR_LE) | ||
| 1827 | return 0; | ||
| 1828 | if (op.ea & (size - 1)) | 1824 | if (op.ea & (size - 1)) |
| 1829 | break; /* can't handle misaligned */ | 1825 | break; /* can't handle misaligned */ |
| 1830 | if (!address_ok(regs, op.ea, size)) | 1826 | if (!address_ok(regs, op.ea, size)) |
| @@ -1849,8 +1845,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
| 1849 | goto ldst_done; | 1845 | goto ldst_done; |
| 1850 | 1846 | ||
| 1851 | case LOAD: | 1847 | case LOAD: |
| 1852 | if (regs->msr & MSR_LE) | ||
| 1853 | return 0; | ||
| 1854 | err = read_mem(®s->gpr[op.reg], op.ea, size, regs); | 1848 | err = read_mem(®s->gpr[op.reg], op.ea, size, regs); |
| 1855 | if (!err) { | 1849 | if (!err) { |
| 1856 | if (op.type & SIGNEXT) | 1850 | if (op.type & SIGNEXT) |
| @@ -1862,8 +1856,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
| 1862 | 1856 | ||
| 1863 | #ifdef CONFIG_PPC_FPU | 1857 | #ifdef CONFIG_PPC_FPU |
| 1864 | case LOAD_FP: | 1858 | case LOAD_FP: |
| 1865 | if (regs->msr & MSR_LE) | ||
| 1866 | return 0; | ||
| 1867 | if (size == 4) | 1859 | if (size == 4) |
| 1868 | err = do_fp_load(op.reg, do_lfs, op.ea, size, regs); | 1860 | err = do_fp_load(op.reg, do_lfs, op.ea, size, regs); |
| 1869 | else | 1861 | else |
| @@ -1872,15 +1864,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
| 1872 | #endif | 1864 | #endif |
| 1873 | #ifdef CONFIG_ALTIVEC | 1865 | #ifdef CONFIG_ALTIVEC |
| 1874 | case LOAD_VMX: | 1866 | case LOAD_VMX: |
| 1875 | if (regs->msr & MSR_LE) | ||
| 1876 | return 0; | ||
| 1877 | err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs); | 1867 | err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs); |
| 1878 | goto ldst_done; | 1868 | goto ldst_done; |
| 1879 | #endif | 1869 | #endif |
| 1880 | #ifdef CONFIG_VSX | 1870 | #ifdef CONFIG_VSX |
| 1881 | case LOAD_VSX: | 1871 | case LOAD_VSX: |
| 1882 | if (regs->msr & MSR_LE) | ||
| 1883 | return 0; | ||
| 1884 | err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs); | 1872 | err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs); |
| 1885 | goto ldst_done; | 1873 | goto ldst_done; |
| 1886 | #endif | 1874 | #endif |
| @@ -1903,8 +1891,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
| 1903 | goto instr_done; | 1891 | goto instr_done; |
| 1904 | 1892 | ||
| 1905 | case STORE: | 1893 | case STORE: |
| 1906 | if (regs->msr & MSR_LE) | ||
| 1907 | return 0; | ||
| 1908 | if ((op.type & UPDATE) && size == sizeof(long) && | 1894 | if ((op.type & UPDATE) && size == sizeof(long) && |
| 1909 | op.reg == 1 && op.update_reg == 1 && | 1895 | op.reg == 1 && op.update_reg == 1 && |
| 1910 | !(regs->msr & MSR_PR) && | 1896 | !(regs->msr & MSR_PR) && |
| @@ -1917,8 +1903,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
| 1917 | 1903 | ||
| 1918 | #ifdef CONFIG_PPC_FPU | 1904 | #ifdef CONFIG_PPC_FPU |
| 1919 | case STORE_FP: | 1905 | case STORE_FP: |
| 1920 | if (regs->msr & MSR_LE) | ||
| 1921 | return 0; | ||
| 1922 | if (size == 4) | 1906 | if (size == 4) |
| 1923 | err = do_fp_store(op.reg, do_stfs, op.ea, size, regs); | 1907 | err = do_fp_store(op.reg, do_stfs, op.ea, size, regs); |
| 1924 | else | 1908 | else |
| @@ -1927,15 +1911,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
| 1927 | #endif | 1911 | #endif |
| 1928 | #ifdef CONFIG_ALTIVEC | 1912 | #ifdef CONFIG_ALTIVEC |
| 1929 | case STORE_VMX: | 1913 | case STORE_VMX: |
| 1930 | if (regs->msr & MSR_LE) | ||
| 1931 | return 0; | ||
| 1932 | err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs); | 1914 | err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs); |
| 1933 | goto ldst_done; | 1915 | goto ldst_done; |
| 1934 | #endif | 1916 | #endif |
| 1935 | #ifdef CONFIG_VSX | 1917 | #ifdef CONFIG_VSX |
| 1936 | case STORE_VSX: | 1918 | case STORE_VSX: |
| 1937 | if (regs->msr & MSR_LE) | ||
| 1938 | return 0; | ||
| 1939 | err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs); | 1919 | err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs); |
| 1940 | goto ldst_done; | 1920 | goto ldst_done; |
| 1941 | #endif | 1921 | #endif |
diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c new file mode 100644 index 000000000000..2534c1447554 --- /dev/null +++ b/arch/powerpc/lib/test_emulate_step.c | |||
| @@ -0,0 +1,434 @@ | |||
| 1 | /* | ||
| 2 | * Simple sanity test for emulate_step load/store instructions. | ||
| 3 | * | ||
| 4 | * Copyright IBM Corp. 2016 | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #define pr_fmt(fmt) "emulate_step_test: " fmt | ||
| 13 | |||
| 14 | #include <linux/ptrace.h> | ||
| 15 | #include <asm/sstep.h> | ||
| 16 | #include <asm/ppc-opcode.h> | ||
| 17 | |||
| 18 | #define IMM_L(i) ((uintptr_t)(i) & 0xffff) | ||
| 19 | |||
| 20 | /* | ||
| 21 | * Defined with TEST_ prefix so it does not conflict with other | ||
| 22 | * definitions. | ||
| 23 | */ | ||
| 24 | #define TEST_LD(r, base, i) (PPC_INST_LD | ___PPC_RT(r) | \ | ||
| 25 | ___PPC_RA(base) | IMM_L(i)) | ||
| 26 | #define TEST_LWZ(r, base, i) (PPC_INST_LWZ | ___PPC_RT(r) | \ | ||
| 27 | ___PPC_RA(base) | IMM_L(i)) | ||
| 28 | #define TEST_LWZX(t, a, b) (PPC_INST_LWZX | ___PPC_RT(t) | \ | ||
| 29 | ___PPC_RA(a) | ___PPC_RB(b)) | ||
| 30 | #define TEST_STD(r, base, i) (PPC_INST_STD | ___PPC_RS(r) | \ | ||
| 31 | ___PPC_RA(base) | ((i) & 0xfffc)) | ||
| 32 | #define TEST_LDARX(t, a, b, eh) (PPC_INST_LDARX | ___PPC_RT(t) | \ | ||
| 33 | ___PPC_RA(a) | ___PPC_RB(b) | \ | ||
| 34 | __PPC_EH(eh)) | ||
| 35 | #define TEST_STDCX(s, a, b) (PPC_INST_STDCX | ___PPC_RS(s) | \ | ||
| 36 | ___PPC_RA(a) | ___PPC_RB(b)) | ||
| 37 | #define TEST_LFSX(t, a, b) (PPC_INST_LFSX | ___PPC_RT(t) | \ | ||
| 38 | ___PPC_RA(a) | ___PPC_RB(b)) | ||
| 39 | #define TEST_STFSX(s, a, b) (PPC_INST_STFSX | ___PPC_RS(s) | \ | ||
| 40 | ___PPC_RA(a) | ___PPC_RB(b)) | ||
| 41 | #define TEST_LFDX(t, a, b) (PPC_INST_LFDX | ___PPC_RT(t) | \ | ||
| 42 | ___PPC_RA(a) | ___PPC_RB(b)) | ||
| 43 | #define TEST_STFDX(s, a, b) (PPC_INST_STFDX | ___PPC_RS(s) | \ | ||
| 44 | ___PPC_RA(a) | ___PPC_RB(b)) | ||
| 45 | #define TEST_LVX(t, a, b) (PPC_INST_LVX | ___PPC_RT(t) | \ | ||
| 46 | ___PPC_RA(a) | ___PPC_RB(b)) | ||
| 47 | #define TEST_STVX(s, a, b) (PPC_INST_STVX | ___PPC_RS(s) | \ | ||
| 48 | ___PPC_RA(a) | ___PPC_RB(b)) | ||
| 49 | #define TEST_LXVD2X(s, a, b) (PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b)) | ||
| 50 | #define TEST_STXVD2X(s, a, b) (PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b)) | ||
| 51 | |||
| 52 | |||
| 53 | static void __init init_pt_regs(struct pt_regs *regs) | ||
| 54 | { | ||
| 55 | static unsigned long msr; | ||
| 56 | static bool msr_cached; | ||
| 57 | |||
| 58 | memset(regs, 0, sizeof(struct pt_regs)); | ||
| 59 | |||
| 60 | if (likely(msr_cached)) { | ||
| 61 | regs->msr = msr; | ||
| 62 | return; | ||
| 63 | } | ||
| 64 | |||
| 65 | asm volatile("mfmsr %0" : "=r"(regs->msr)); | ||
| 66 | |||
| 67 | regs->msr |= MSR_FP; | ||
| 68 | regs->msr |= MSR_VEC; | ||
| 69 | regs->msr |= MSR_VSX; | ||
| 70 | |||
| 71 | msr = regs->msr; | ||
| 72 | msr_cached = true; | ||
| 73 | } | ||
| 74 | |||
| 75 | static void __init show_result(char *ins, char *result) | ||
| 76 | { | ||
| 77 | pr_info("%-14s : %s\n", ins, result); | ||
| 78 | } | ||
| 79 | |||
| 80 | static void __init test_ld(void) | ||
| 81 | { | ||
| 82 | struct pt_regs regs; | ||
| 83 | unsigned long a = 0x23; | ||
| 84 | int stepped = -1; | ||
| 85 | |||
| 86 | init_pt_regs(®s); | ||
| 87 | regs.gpr[3] = (unsigned long) &a; | ||
| 88 | |||
| 89 | /* ld r5, 0(r3) */ | ||
| 90 | stepped = emulate_step(®s, TEST_LD(5, 3, 0)); | ||
| 91 | |||
| 92 | if (stepped == 1 && regs.gpr[5] == a) | ||
| 93 | show_result("ld", "PASS"); | ||
| 94 | else | ||
| 95 | show_result("ld", "FAIL"); | ||
| 96 | } | ||
| 97 | |||
| 98 | static void __init test_lwz(void) | ||
| 99 | { | ||
| 100 | struct pt_regs regs; | ||
| 101 | unsigned int a = 0x4545; | ||
| 102 | int stepped = -1; | ||
| 103 | |||
| 104 | init_pt_regs(®s); | ||
| 105 | regs.gpr[3] = (unsigned long) &a; | ||
| 106 | |||
| 107 | /* lwz r5, 0(r3) */ | ||
| 108 | stepped = emulate_step(®s, TEST_LWZ(5, 3, 0)); | ||
| 109 | |||
| 110 | if (stepped == 1 && regs.gpr[5] == a) | ||
| 111 | show_result("lwz", "PASS"); | ||
| 112 | else | ||
| 113 | show_result("lwz", "FAIL"); | ||
| 114 | } | ||
| 115 | |||
| 116 | static void __init test_lwzx(void) | ||
| 117 | { | ||
| 118 | struct pt_regs regs; | ||
| 119 | unsigned int a[3] = {0x0, 0x0, 0x1234}; | ||
| 120 | int stepped = -1; | ||
| 121 | |||
| 122 | init_pt_regs(®s); | ||
| 123 | regs.gpr[3] = (unsigned long) a; | ||
| 124 | regs.gpr[4] = 8; | ||
| 125 | regs.gpr[5] = 0x8765; | ||
| 126 | |||
| 127 | /* lwzx r5, r3, r4 */ | ||
| 128 | stepped = emulate_step(®s, TEST_LWZX(5, 3, 4)); | ||
| 129 | if (stepped == 1 && regs.gpr[5] == a[2]) | ||
| 130 | show_result("lwzx", "PASS"); | ||
| 131 | else | ||
| 132 | show_result("lwzx", "FAIL"); | ||
| 133 | } | ||
| 134 | |||
| 135 | static void __init test_std(void) | ||
| 136 | { | ||
| 137 | struct pt_regs regs; | ||
| 138 | unsigned long a = 0x1234; | ||
| 139 | int stepped = -1; | ||
| 140 | |||
| 141 | init_pt_regs(®s); | ||
| 142 | regs.gpr[3] = (unsigned long) &a; | ||
| 143 | regs.gpr[5] = 0x5678; | ||
| 144 | |||
| 145 | /* std r5, 0(r3) */ | ||
| 146 | stepped = emulate_step(®s, TEST_STD(5, 3, 0)); | ||
| 147 | if (stepped == 1 || regs.gpr[5] == a) | ||
| 148 | show_result("std", "PASS"); | ||
| 149 | else | ||
| 150 | show_result("std", "FAIL"); | ||
| 151 | } | ||
| 152 | |||
| 153 | static void __init test_ldarx_stdcx(void) | ||
| 154 | { | ||
| 155 | struct pt_regs regs; | ||
| 156 | unsigned long a = 0x1234; | ||
| 157 | int stepped = -1; | ||
| 158 | unsigned long cr0_eq = 0x1 << 29; /* eq bit of CR0 */ | ||
| 159 | |||
| 160 | init_pt_regs(®s); | ||
| 161 | asm volatile("mfcr %0" : "=r"(regs.ccr)); | ||
| 162 | |||
| 163 | |||
| 164 | /*** ldarx ***/ | ||
| 165 | |||
| 166 | regs.gpr[3] = (unsigned long) &a; | ||
| 167 | regs.gpr[4] = 0; | ||
| 168 | regs.gpr[5] = 0x5678; | ||
| 169 | |||
| 170 | /* ldarx r5, r3, r4, 0 */ | ||
| 171 | stepped = emulate_step(®s, TEST_LDARX(5, 3, 4, 0)); | ||
| 172 | |||
| 173 | /* | ||
| 174 | * Don't touch 'a' here. Touching 'a' can do Load/store | ||
| 175 | * of 'a' which result in failure of subsequent stdcx. | ||
| 176 | * Instead, use hardcoded value for comparison. | ||
| 177 | */ | ||
| 178 | if (stepped <= 0 || regs.gpr[5] != 0x1234) { | ||
| 179 | show_result("ldarx / stdcx.", "FAIL (ldarx)"); | ||
| 180 | return; | ||
| 181 | } | ||
| 182 | |||
| 183 | |||
| 184 | /*** stdcx. ***/ | ||
| 185 | |||
| 186 | regs.gpr[5] = 0x9ABC; | ||
| 187 | |||
| 188 | /* stdcx. r5, r3, r4 */ | ||
| 189 | stepped = emulate_step(®s, TEST_STDCX(5, 3, 4)); | ||
| 190 | |||
| 191 | /* | ||
| 192 | * Two possible scenarios that indicates successful emulation | ||
| 193 | * of stdcx. : | ||
| 194 | * 1. Reservation is active and store is performed. In this | ||
| 195 | * case cr0.eq bit will be set to 1. | ||
| 196 | * 2. Reservation is not active and store is not performed. | ||
| 197 | * In this case cr0.eq bit will be set to 0. | ||
| 198 | */ | ||
| 199 | if (stepped == 1 && ((regs.gpr[5] == a && (regs.ccr & cr0_eq)) | ||
| 200 | || (regs.gpr[5] != a && !(regs.ccr & cr0_eq)))) | ||
| 201 | show_result("ldarx / stdcx.", "PASS"); | ||
| 202 | else | ||
| 203 | show_result("ldarx / stdcx.", "FAIL (stdcx.)"); | ||
| 204 | } | ||
| 205 | |||
| 206 | #ifdef CONFIG_PPC_FPU | ||
| 207 | static void __init test_lfsx_stfsx(void) | ||
| 208 | { | ||
| 209 | struct pt_regs regs; | ||
| 210 | union { | ||
| 211 | float a; | ||
| 212 | int b; | ||
| 213 | } c; | ||
| 214 | int cached_b; | ||
| 215 | int stepped = -1; | ||
| 216 | |||
| 217 | init_pt_regs(®s); | ||
| 218 | |||
| 219 | |||
| 220 | /*** lfsx ***/ | ||
| 221 | |||
| 222 | c.a = 123.45; | ||
| 223 | cached_b = c.b; | ||
| 224 | |||
| 225 | regs.gpr[3] = (unsigned long) &c.a; | ||
| 226 | regs.gpr[4] = 0; | ||
| 227 | |||
| 228 | /* lfsx frt10, r3, r4 */ | ||
| 229 | stepped = emulate_step(®s, TEST_LFSX(10, 3, 4)); | ||
| 230 | |||
| 231 | if (stepped == 1) | ||
| 232 | show_result("lfsx", "PASS"); | ||
| 233 | else | ||
| 234 | show_result("lfsx", "FAIL"); | ||
| 235 | |||
| 236 | |||
| 237 | /*** stfsx ***/ | ||
| 238 | |||
| 239 | c.a = 678.91; | ||
| 240 | |||
| 241 | /* stfsx frs10, r3, r4 */ | ||
| 242 | stepped = emulate_step(®s, TEST_STFSX(10, 3, 4)); | ||
| 243 | |||
| 244 | if (stepped == 1 && c.b == cached_b) | ||
| 245 | show_result("stfsx", "PASS"); | ||
| 246 | else | ||
| 247 | show_result("stfsx", "FAIL"); | ||
| 248 | } | ||
| 249 | |||
| 250 | static void __init test_lfdx_stfdx(void) | ||
| 251 | { | ||
| 252 | struct pt_regs regs; | ||
| 253 | union { | ||
| 254 | double a; | ||
| 255 | long b; | ||
| 256 | } c; | ||
| 257 | long cached_b; | ||
| 258 | int stepped = -1; | ||
| 259 | |||
| 260 | init_pt_regs(®s); | ||
| 261 | |||
| 262 | |||
| 263 | /*** lfdx ***/ | ||
| 264 | |||
| 265 | c.a = 123456.78; | ||
| 266 | cached_b = c.b; | ||
| 267 | |||
| 268 | regs.gpr[3] = (unsigned long) &c.a; | ||
| 269 | regs.gpr[4] = 0; | ||
| 270 | |||
| 271 | /* lfdx frt10, r3, r4 */ | ||
| 272 | stepped = emulate_step(®s, TEST_LFDX(10, 3, 4)); | ||
| 273 | |||
| 274 | if (stepped == 1) | ||
| 275 | show_result("lfdx", "PASS"); | ||
| 276 | else | ||
| 277 | show_result("lfdx", "FAIL"); | ||
| 278 | |||
| 279 | |||
| 280 | /*** stfdx ***/ | ||
| 281 | |||
| 282 | c.a = 987654.32; | ||
| 283 | |||
| 284 | /* stfdx frs10, r3, r4 */ | ||
| 285 | stepped = emulate_step(®s, TEST_STFDX(10, 3, 4)); | ||
| 286 | |||
| 287 | if (stepped == 1 && c.b == cached_b) | ||
| 288 | show_result("stfdx", "PASS"); | ||
| 289 | else | ||
| 290 | show_result("stfdx", "FAIL"); | ||
| 291 | } | ||
| 292 | #else | ||
| 293 | static void __init test_lfsx_stfsx(void) | ||
| 294 | { | ||
| 295 | show_result("lfsx", "SKIP (CONFIG_PPC_FPU is not set)"); | ||
| 296 | show_result("stfsx", "SKIP (CONFIG_PPC_FPU is not set)"); | ||
| 297 | } | ||
| 298 | |||
| 299 | static void __init test_lfdx_stfdx(void) | ||
| 300 | { | ||
| 301 | show_result("lfdx", "SKIP (CONFIG_PPC_FPU is not set)"); | ||
| 302 | show_result("stfdx", "SKIP (CONFIG_PPC_FPU is not set)"); | ||
| 303 | } | ||
| 304 | #endif /* CONFIG_PPC_FPU */ | ||
| 305 | |||
| 306 | #ifdef CONFIG_ALTIVEC | ||
| 307 | static void __init test_lvx_stvx(void) | ||
| 308 | { | ||
| 309 | struct pt_regs regs; | ||
| 310 | union { | ||
| 311 | vector128 a; | ||
| 312 | u32 b[4]; | ||
| 313 | } c; | ||
| 314 | u32 cached_b[4]; | ||
| 315 | int stepped = -1; | ||
| 316 | |||
| 317 | init_pt_regs(®s); | ||
| 318 | |||
| 319 | |||
| 320 | /*** lvx ***/ | ||
| 321 | |||
| 322 | cached_b[0] = c.b[0] = 923745; | ||
| 323 | cached_b[1] = c.b[1] = 2139478; | ||
| 324 | cached_b[2] = c.b[2] = 9012; | ||
| 325 | cached_b[3] = c.b[3] = 982134; | ||
| 326 | |||
| 327 | regs.gpr[3] = (unsigned long) &c.a; | ||
| 328 | regs.gpr[4] = 0; | ||
| 329 | |||
| 330 | /* lvx vrt10, r3, r4 */ | ||
| 331 | stepped = emulate_step(®s, TEST_LVX(10, 3, 4)); | ||
| 332 | |||
| 333 | if (stepped == 1) | ||
| 334 | show_result("lvx", "PASS"); | ||
| 335 | else | ||
| 336 | show_result("lvx", "FAIL"); | ||
| 337 | |||
| 338 | |||
| 339 | /*** stvx ***/ | ||
| 340 | |||
| 341 | c.b[0] = 4987513; | ||
| 342 | c.b[1] = 84313948; | ||
| 343 | c.b[2] = 71; | ||
| 344 | c.b[3] = 498532; | ||
| 345 | |||
| 346 | /* stvx vrs10, r3, r4 */ | ||
| 347 | stepped = emulate_step(®s, TEST_STVX(10, 3, 4)); | ||
| 348 | |||
| 349 | if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] && | ||
| 350 | cached_b[2] == c.b[2] && cached_b[3] == c.b[3]) | ||
| 351 | show_result("stvx", "PASS"); | ||
| 352 | else | ||
| 353 | show_result("stvx", "FAIL"); | ||
| 354 | } | ||
| 355 | #else | ||
| 356 | static void __init test_lvx_stvx(void) | ||
| 357 | { | ||
| 358 | show_result("lvx", "SKIP (CONFIG_ALTIVEC is not set)"); | ||
| 359 | show_result("stvx", "SKIP (CONFIG_ALTIVEC is not set)"); | ||
| 360 | } | ||
| 361 | #endif /* CONFIG_ALTIVEC */ | ||
| 362 | |||
| 363 | #ifdef CONFIG_VSX | ||
| 364 | static void __init test_lxvd2x_stxvd2x(void) | ||
| 365 | { | ||
| 366 | struct pt_regs regs; | ||
| 367 | union { | ||
| 368 | vector128 a; | ||
| 369 | u32 b[4]; | ||
| 370 | } c; | ||
| 371 | u32 cached_b[4]; | ||
| 372 | int stepped = -1; | ||
| 373 | |||
| 374 | init_pt_regs(®s); | ||
| 375 | |||
| 376 | |||
| 377 | /*** lxvd2x ***/ | ||
| 378 | |||
| 379 | cached_b[0] = c.b[0] = 18233; | ||
| 380 | cached_b[1] = c.b[1] = 34863571; | ||
| 381 | cached_b[2] = c.b[2] = 834; | ||
| 382 | cached_b[3] = c.b[3] = 6138911; | ||
| 383 | |||
| 384 | regs.gpr[3] = (unsigned long) &c.a; | ||
| 385 | regs.gpr[4] = 0; | ||
| 386 | |||
| 387 | /* lxvd2x vsr39, r3, r4 */ | ||
| 388 | stepped = emulate_step(®s, TEST_LXVD2X(39, 3, 4)); | ||
| 389 | |||
| 390 | if (stepped == 1) | ||
| 391 | show_result("lxvd2x", "PASS"); | ||
| 392 | else | ||
| 393 | show_result("lxvd2x", "FAIL"); | ||
| 394 | |||
| 395 | |||
| 396 | /*** stxvd2x ***/ | ||
| 397 | |||
| 398 | c.b[0] = 21379463; | ||
| 399 | c.b[1] = 87; | ||
| 400 | c.b[2] = 374234; | ||
| 401 | c.b[3] = 4; | ||
| 402 | |||
| 403 | /* stxvd2x vsr39, r3, r4 */ | ||
| 404 | stepped = emulate_step(®s, TEST_STXVD2X(39, 3, 4)); | ||
| 405 | |||
| 406 | if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] && | ||
| 407 | cached_b[2] == c.b[2] && cached_b[3] == c.b[3]) | ||
| 408 | show_result("stxvd2x", "PASS"); | ||
| 409 | else | ||
| 410 | show_result("stxvd2x", "FAIL"); | ||
| 411 | } | ||
| 412 | #else | ||
| 413 | static void __init test_lxvd2x_stxvd2x(void) | ||
| 414 | { | ||
| 415 | show_result("lxvd2x", "SKIP (CONFIG_VSX is not set)"); | ||
| 416 | show_result("stxvd2x", "SKIP (CONFIG_VSX is not set)"); | ||
| 417 | } | ||
| 418 | #endif /* CONFIG_VSX */ | ||
| 419 | |||
| 420 | static int __init test_emulate_step(void) | ||
| 421 | { | ||
| 422 | test_ld(); | ||
| 423 | test_lwz(); | ||
| 424 | test_lwzx(); | ||
| 425 | test_std(); | ||
| 426 | test_ldarx_stdcx(); | ||
| 427 | test_lfsx_stfsx(); | ||
| 428 | test_lfdx_stfdx(); | ||
| 429 | test_lvx_stvx(); | ||
| 430 | test_lxvd2x_stxvd2x(); | ||
| 431 | |||
| 432 | return 0; | ||
| 433 | } | ||
| 434 | late_initcall(test_emulate_step); | ||
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 6aa3b76aa0d6..9be992083d2a 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
| @@ -356,18 +356,42 @@ static void early_check_vec5(void) | |||
| 356 | unsigned long root, chosen; | 356 | unsigned long root, chosen; |
| 357 | int size; | 357 | int size; |
| 358 | const u8 *vec5; | 358 | const u8 *vec5; |
| 359 | u8 mmu_supported; | ||
| 359 | 360 | ||
| 360 | root = of_get_flat_dt_root(); | 361 | root = of_get_flat_dt_root(); |
| 361 | chosen = of_get_flat_dt_subnode_by_name(root, "chosen"); | 362 | chosen = of_get_flat_dt_subnode_by_name(root, "chosen"); |
| 362 | if (chosen == -FDT_ERR_NOTFOUND) | 363 | if (chosen == -FDT_ERR_NOTFOUND) { |
| 364 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; | ||
| 363 | return; | 365 | return; |
| 366 | } | ||
| 364 | vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size); | 367 | vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size); |
| 365 | if (!vec5) | 368 | if (!vec5) { |
| 369 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; | ||
| 366 | return; | 370 | return; |
| 367 | if (size <= OV5_INDX(OV5_MMU_RADIX_300) || | 371 | } |
| 368 | !(vec5[OV5_INDX(OV5_MMU_RADIX_300)] & OV5_FEAT(OV5_MMU_RADIX_300))) | 372 | if (size <= OV5_INDX(OV5_MMU_SUPPORT)) { |
| 369 | /* Hypervisor doesn't support radix */ | ||
| 370 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; | 373 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; |
| 374 | return; | ||
| 375 | } | ||
| 376 | |||
| 377 | /* Check for supported configuration */ | ||
| 378 | mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] & | ||
| 379 | OV5_FEAT(OV5_MMU_SUPPORT); | ||
| 380 | if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) { | ||
| 381 | /* Hypervisor only supports radix - check enabled && GTSE */ | ||
| 382 | if (!early_radix_enabled()) { | ||
| 383 | pr_warn("WARNING: Ignoring cmdline option disable_radix\n"); | ||
| 384 | } | ||
| 385 | if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] & | ||
| 386 | OV5_FEAT(OV5_RADIX_GTSE))) { | ||
| 387 | pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n"); | ||
| 388 | } | ||
| 389 | /* Do radix anyway - the hypervisor said we had to */ | ||
| 390 | cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX; | ||
| 391 | } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) { | ||
| 392 | /* Hypervisor only supports hash - disable radix */ | ||
| 393 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; | ||
| 394 | } | ||
| 371 | } | 395 | } |
| 372 | 396 | ||
| 373 | void __init mmu_early_init_devtree(void) | 397 | void __init mmu_early_init_devtree(void) |
| @@ -383,7 +407,7 @@ void __init mmu_early_init_devtree(void) | |||
| 383 | * even though the ibm,architecture-vec-5 property created by | 407 | * even though the ibm,architecture-vec-5 property created by |
| 384 | * skiboot doesn't have the necessary bits set. | 408 | * skiboot doesn't have the necessary bits set. |
| 385 | */ | 409 | */ |
| 386 | if (early_radix_enabled() && !(mfmsr() & MSR_HV)) | 410 | if (!(mfmsr() & MSR_HV)) |
| 387 | early_check_vec5(); | 411 | early_check_vec5(); |
| 388 | 412 | ||
| 389 | if (early_radix_enabled()) | 413 | if (early_radix_enabled()) |
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 2a590a98e652..c28165d8970b 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c | |||
| @@ -186,6 +186,10 @@ static void __init radix_init_pgtable(void) | |||
| 186 | */ | 186 | */ |
| 187 | register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12); | 187 | register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12); |
| 188 | pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd); | 188 | pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd); |
| 189 | asm volatile("ptesync" : : : "memory"); | ||
| 190 | asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : | ||
| 191 | "r" (TLBIEL_INVAL_SET_LPID), "r" (0)); | ||
| 192 | asm volatile("eieio; tlbsync; ptesync" : : : "memory"); | ||
| 189 | } | 193 | } |
| 190 | 194 | ||
| 191 | static void __init radix_init_partition_table(void) | 195 | static void __init radix_init_partition_table(void) |
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 6693f75e93d1..da8a0f7a035c 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S | |||
| @@ -39,8 +39,8 @@ opal_tracepoint_refcount: | |||
| 39 | BEGIN_FTR_SECTION; \ | 39 | BEGIN_FTR_SECTION; \ |
| 40 | b 1f; \ | 40 | b 1f; \ |
| 41 | END_FTR_SECTION(0, 1); \ | 41 | END_FTR_SECTION(0, 1); \ |
| 42 | ld r12,opal_tracepoint_refcount@toc(r2); \ | 42 | ld r11,opal_tracepoint_refcount@toc(r2); \ |
| 43 | cmpdi r12,0; \ | 43 | cmpdi r11,0; \ |
| 44 | bne- LABEL; \ | 44 | bne- LABEL; \ |
| 45 | 1: | 45 | 1: |
| 46 | 46 | ||
diff --git a/arch/powerpc/purgatory/trampoline.S b/arch/powerpc/purgatory/trampoline.S index f9760ccf4032..3696ea6c4826 100644 --- a/arch/powerpc/purgatory/trampoline.S +++ b/arch/powerpc/purgatory/trampoline.S | |||
| @@ -116,13 +116,13 @@ dt_offset: | |||
| 116 | 116 | ||
| 117 | .data | 117 | .data |
| 118 | .balign 8 | 118 | .balign 8 |
| 119 | .globl sha256_digest | 119 | .globl purgatory_sha256_digest |
| 120 | sha256_digest: | 120 | purgatory_sha256_digest: |
| 121 | .skip 32 | 121 | .skip 32 |
| 122 | .size sha256_digest, . - sha256_digest | 122 | .size purgatory_sha256_digest, . - purgatory_sha256_digest |
| 123 | 123 | ||
| 124 | .balign 8 | 124 | .balign 8 |
| 125 | .globl sha_regions | 125 | .globl purgatory_sha_regions |
| 126 | sha_regions: | 126 | purgatory_sha_regions: |
| 127 | .skip 8 * 2 * 16 | 127 | .skip 8 * 2 * 16 |
| 128 | .size sha_regions, . - sha_regions | 128 | .size purgatory_sha_regions, . - purgatory_sha_regions |
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index ada29eaed6e2..f523ac883150 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c | |||
| @@ -274,7 +274,9 @@ failed: | |||
| 274 | if (bank->disk->major > 0) | 274 | if (bank->disk->major > 0) |
| 275 | unregister_blkdev(bank->disk->major, | 275 | unregister_blkdev(bank->disk->major, |
| 276 | bank->disk->disk_name); | 276 | bank->disk->disk_name); |
| 277 | del_gendisk(bank->disk); | 277 | if (bank->disk->flags & GENHD_FL_UP) |
| 278 | del_gendisk(bank->disk); | ||
| 279 | put_disk(bank->disk); | ||
| 278 | } | 280 | } |
| 279 | device->dev.platform_data = NULL; | 281 | device->dev.platform_data = NULL; |
| 280 | if (bank->io_addr != 0) | 282 | if (bank->io_addr != 0) |
| @@ -299,6 +301,7 @@ axon_ram_remove(struct platform_device *device) | |||
| 299 | device_remove_file(&device->dev, &dev_attr_ecc); | 301 | device_remove_file(&device->dev, &dev_attr_ecc); |
| 300 | free_irq(bank->irq_id, device); | 302 | free_irq(bank->irq_id, device); |
| 301 | del_gendisk(bank->disk); | 303 | del_gendisk(bank->disk); |
| 304 | put_disk(bank->disk); | ||
| 302 | iounmap((void __iomem *) bank->io_addr); | 305 | iounmap((void __iomem *) bank->io_addr); |
| 303 | kfree(bank); | 306 | kfree(bank); |
| 304 | 307 | ||
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c index f9670eabfcfa..b53f80f0b4d8 100644 --- a/arch/powerpc/sysdev/xics/icp-opal.c +++ b/arch/powerpc/sysdev/xics/icp-opal.c | |||
| @@ -91,6 +91,16 @@ static unsigned int icp_opal_get_irq(void) | |||
| 91 | 91 | ||
| 92 | static void icp_opal_set_cpu_priority(unsigned char cppr) | 92 | static void icp_opal_set_cpu_priority(unsigned char cppr) |
| 93 | { | 93 | { |
| 94 | /* | ||
| 95 | * Here be dragons. The caller has asked to allow only IPI's and not | ||
| 96 | * external interrupts. But OPAL XIVE doesn't support that. So instead | ||
| 97 | * of allowing no interrupts allow all. That's still not right, but | ||
| 98 | * currently the only caller who does this is xics_migrate_irqs_away() | ||
| 99 | * and it works in that case. | ||
| 100 | */ | ||
| 101 | if (cppr >= DEFAULT_PRIORITY) | ||
| 102 | cppr = LOWEST_PRIORITY; | ||
| 103 | |||
| 94 | xics_set_base_cppr(cppr); | 104 | xics_set_base_cppr(cppr); |
| 95 | opal_int_set_cppr(cppr); | 105 | opal_int_set_cppr(cppr); |
| 96 | iosync(); | 106 | iosync(); |
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index 69d858e51ac7..23efe4e42172 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/of.h> | 20 | #include <linux/of.h> |
| 21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
| 22 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
| 23 | #include <linux/delay.h> | ||
| 23 | 24 | ||
| 24 | #include <asm/prom.h> | 25 | #include <asm/prom.h> |
| 25 | #include <asm/io.h> | 26 | #include <asm/io.h> |
| @@ -198,9 +199,6 @@ void xics_migrate_irqs_away(void) | |||
| 198 | /* Remove ourselves from the global interrupt queue */ | 199 | /* Remove ourselves from the global interrupt queue */ |
| 199 | xics_set_cpu_giq(xics_default_distrib_server, 0); | 200 | xics_set_cpu_giq(xics_default_distrib_server, 0); |
| 200 | 201 | ||
| 201 | /* Allow IPIs again... */ | ||
| 202 | icp_ops->set_priority(DEFAULT_PRIORITY); | ||
| 203 | |||
| 204 | for_each_irq_desc(virq, desc) { | 202 | for_each_irq_desc(virq, desc) { |
| 205 | struct irq_chip *chip; | 203 | struct irq_chip *chip; |
| 206 | long server; | 204 | long server; |
| @@ -255,6 +253,19 @@ void xics_migrate_irqs_away(void) | |||
| 255 | unlock: | 253 | unlock: |
| 256 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 254 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 257 | } | 255 | } |
| 256 | |||
| 257 | /* Allow "sufficient" time to drop any inflight IRQ's */ | ||
| 258 | mdelay(5); | ||
| 259 | |||
| 260 | /* | ||
| 261 | * Allow IPIs again. This is done at the very end, after migrating all | ||
| 262 | * interrupts, the expectation is that we'll only get woken up by an IPI | ||
| 263 | * interrupt beyond this point, but leave externals masked just to be | ||
| 264 | * safe. If we're using icp-opal this may actually allow all | ||
| 265 | * interrupts anyway, but that should be OK. | ||
| 266 | */ | ||
| 267 | icp_ops->set_priority(DEFAULT_PRIORITY); | ||
| 268 | |||
| 258 | } | 269 | } |
| 259 | #endif /* CONFIG_HOTPLUG_CPU */ | 270 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 260 | 271 | ||
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 143b1e00b818..4b176fe83da4 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig | |||
| @@ -609,7 +609,7 @@ CONFIG_SCHED_TRACER=y | |||
| 609 | CONFIG_FTRACE_SYSCALLS=y | 609 | CONFIG_FTRACE_SYSCALLS=y |
| 610 | CONFIG_STACK_TRACER=y | 610 | CONFIG_STACK_TRACER=y |
| 611 | CONFIG_BLK_DEV_IO_TRACE=y | 611 | CONFIG_BLK_DEV_IO_TRACE=y |
| 612 | CONFIG_UPROBE_EVENT=y | 612 | CONFIG_UPROBE_EVENTS=y |
| 613 | CONFIG_FUNCTION_PROFILER=y | 613 | CONFIG_FUNCTION_PROFILER=y |
| 614 | CONFIG_HIST_TRIGGERS=y | 614 | CONFIG_HIST_TRIGGERS=y |
| 615 | CONFIG_TRACE_ENUM_MAP_FILE=y | 615 | CONFIG_TRACE_ENUM_MAP_FILE=y |
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index f05d2d6e1087..0de46cc397f6 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig | |||
| @@ -560,7 +560,7 @@ CONFIG_SCHED_TRACER=y | |||
| 560 | CONFIG_FTRACE_SYSCALLS=y | 560 | CONFIG_FTRACE_SYSCALLS=y |
| 561 | CONFIG_STACK_TRACER=y | 561 | CONFIG_STACK_TRACER=y |
| 562 | CONFIG_BLK_DEV_IO_TRACE=y | 562 | CONFIG_BLK_DEV_IO_TRACE=y |
| 563 | CONFIG_UPROBE_EVENT=y | 563 | CONFIG_UPROBE_EVENTS=y |
| 564 | CONFIG_FUNCTION_PROFILER=y | 564 | CONFIG_FUNCTION_PROFILER=y |
| 565 | CONFIG_HIST_TRIGGERS=y | 565 | CONFIG_HIST_TRIGGERS=y |
| 566 | CONFIG_TRACE_ENUM_MAP_FILE=y | 566 | CONFIG_TRACE_ENUM_MAP_FILE=y |
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 2358bf33c5ef..e167557b434c 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig | |||
| @@ -558,7 +558,7 @@ CONFIG_SCHED_TRACER=y | |||
| 558 | CONFIG_FTRACE_SYSCALLS=y | 558 | CONFIG_FTRACE_SYSCALLS=y |
| 559 | CONFIG_STACK_TRACER=y | 559 | CONFIG_STACK_TRACER=y |
| 560 | CONFIG_BLK_DEV_IO_TRACE=y | 560 | CONFIG_BLK_DEV_IO_TRACE=y |
| 561 | CONFIG_UPROBE_EVENT=y | 561 | CONFIG_UPROBE_EVENTS=y |
| 562 | CONFIG_FUNCTION_PROFILER=y | 562 | CONFIG_FUNCTION_PROFILER=y |
| 563 | CONFIG_HIST_TRIGGERS=y | 563 | CONFIG_HIST_TRIGGERS=y |
| 564 | CONFIG_TRACE_ENUM_MAP_FILE=y | 564 | CONFIG_TRACE_ENUM_MAP_FILE=y |
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index d69ea495c4d7..716b17238599 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c | |||
| @@ -474,8 +474,11 @@ static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier, | |||
| 474 | ret = blkcipher_walk_done(desc, walk, nbytes - n); | 474 | ret = blkcipher_walk_done(desc, walk, nbytes - n); |
| 475 | } | 475 | } |
| 476 | if (k < n) { | 476 | if (k < n) { |
| 477 | if (__ctr_paes_set_key(ctx) != 0) | 477 | if (__ctr_paes_set_key(ctx) != 0) { |
| 478 | if (locked) | ||
| 479 | spin_unlock(&ctrblk_lock); | ||
| 478 | return blkcipher_walk_done(desc, walk, -EIO); | 480 | return blkcipher_walk_done(desc, walk, -EIO); |
| 481 | } | ||
| 479 | } | 482 | } |
| 480 | } | 483 | } |
| 481 | if (locked) | 484 | if (locked) |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 68bfd09f1b02..97189dbaf34b 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
| @@ -179,7 +179,7 @@ CONFIG_FTRACE_SYSCALLS=y | |||
| 179 | CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y | 179 | CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y |
| 180 | CONFIG_STACK_TRACER=y | 180 | CONFIG_STACK_TRACER=y |
| 181 | CONFIG_BLK_DEV_IO_TRACE=y | 181 | CONFIG_BLK_DEV_IO_TRACE=y |
| 182 | CONFIG_UPROBE_EVENT=y | 182 | CONFIG_UPROBE_EVENTS=y |
| 183 | CONFIG_FUNCTION_PROFILER=y | 183 | CONFIG_FUNCTION_PROFILER=y |
| 184 | CONFIG_TRACE_ENUM_MAP_FILE=y | 184 | CONFIG_TRACE_ENUM_MAP_FILE=y |
| 185 | CONFIG_KPROBES_SANITY_TEST=y | 185 | CONFIG_KPROBES_SANITY_TEST=y |
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index d1c407ddf703..9072bf63a846 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h | |||
| @@ -8,31 +8,27 @@ | |||
| 8 | #define _S390_CPUTIME_H | 8 | #define _S390_CPUTIME_H |
| 9 | 9 | ||
| 10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
| 11 | #include <asm/div64.h> | 11 | #include <asm/timex.h> |
| 12 | 12 | ||
| 13 | #define CPUTIME_PER_USEC 4096ULL | 13 | #define CPUTIME_PER_USEC 4096ULL |
| 14 | #define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC) | 14 | #define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC) |
| 15 | 15 | ||
| 16 | /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ | 16 | /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ |
| 17 | 17 | ||
| 18 | typedef unsigned long long __nocast cputime_t; | ||
| 19 | typedef unsigned long long __nocast cputime64_t; | ||
| 20 | |||
| 21 | #define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new) | 18 | #define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new) |
| 22 | 19 | ||
| 23 | static inline unsigned long __div(unsigned long long n, unsigned long base) | ||
| 24 | { | ||
| 25 | return n / base; | ||
| 26 | } | ||
| 27 | |||
| 28 | /* | 20 | /* |
| 29 | * Convert cputime to microseconds and back. | 21 | * Convert cputime to microseconds. |
| 30 | */ | 22 | */ |
| 31 | static inline unsigned int cputime_to_usecs(const cputime_t cputime) | 23 | static inline u64 cputime_to_usecs(const u64 cputime) |
| 32 | { | 24 | { |
| 33 | return (__force unsigned long long) cputime >> 12; | 25 | return cputime >> 12; |
| 34 | } | 26 | } |
| 35 | 27 | ||
| 28 | /* | ||
| 29 | * Convert cputime to nanoseconds. | ||
| 30 | */ | ||
| 31 | #define cputime_to_nsecs(cputime) tod_to_ns(cputime) | ||
| 36 | 32 | ||
| 37 | u64 arch_cpu_idle_time(int cpu); | 33 | u64 arch_cpu_idle_time(int cpu); |
| 38 | 34 | ||
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 7ed1972b1920..93e37b12e882 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | * the S390 page table tree. | 24 | * the S390 page table tree. |
| 25 | */ | 25 | */ |
| 26 | #ifndef __ASSEMBLY__ | 26 | #ifndef __ASSEMBLY__ |
| 27 | #include <asm-generic/5level-fixup.h> | ||
| 27 | #include <linux/sched.h> | 28 | #include <linux/sched.h> |
| 28 | #include <linux/mm_types.h> | 29 | #include <linux/mm_types.h> |
| 29 | #include <linux/page-flags.h> | 30 | #include <linux/page-flags.h> |
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 354344dcc198..118535123f34 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h | |||
| @@ -206,20 +206,16 @@ static inline unsigned long long get_tod_clock_monotonic(void) | |||
| 206 | * ns = (todval * 125) >> 9; | 206 | * ns = (todval * 125) >> 9; |
| 207 | * | 207 | * |
| 208 | * In order to avoid an overflow with the multiplication we can rewrite this. | 208 | * In order to avoid an overflow with the multiplication we can rewrite this. |
| 209 | * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits) | 209 | * With a split todval == 2^9 * th + tl (th upper 55 bits, tl lower 9 bits) |
| 210 | * we end up with | 210 | * we end up with |
| 211 | * | 211 | * |
| 212 | * ns = ((2^32 * th + tl) * 125 ) >> 9; | 212 | * ns = ((2^9 * th + tl) * 125 ) >> 9; |
| 213 | * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9); | 213 | * -> ns = (th * 125) + ((tl * 125) >> 9); |
| 214 | * | 214 | * |
| 215 | */ | 215 | */ |
| 216 | static inline unsigned long long tod_to_ns(unsigned long long todval) | 216 | static inline unsigned long long tod_to_ns(unsigned long long todval) |
| 217 | { | 217 | { |
| 218 | unsigned long long ns; | 218 | return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9); |
| 219 | |||
| 220 | ns = ((todval >> 32) << 23) * 125; | ||
| 221 | ns += ((todval & 0xffffffff) * 125) >> 9; | ||
| 222 | return ns; | ||
| 223 | } | 219 | } |
| 224 | 220 | ||
| 225 | #endif | 221 | #endif |
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 4384bc797a54..152de9b796e1 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h | |||
| @@ -313,7 +313,9 @@ | |||
| 313 | #define __NR_copy_file_range 375 | 313 | #define __NR_copy_file_range 375 |
| 314 | #define __NR_preadv2 376 | 314 | #define __NR_preadv2 376 |
| 315 | #define __NR_pwritev2 377 | 315 | #define __NR_pwritev2 377 |
| 316 | #define NR_syscalls 378 | 316 | /* Number 378 is reserved for guarded storage */ |
| 317 | #define __NR_statx 379 | ||
| 318 | #define NR_syscalls 380 | ||
| 317 | 319 | ||
| 318 | /* | 320 | /* |
| 319 | * There are some system calls that are not present on 64 bit, some | 321 | * There are some system calls that are not present on 64 bit, some |
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index ae2cda5eee5a..e89cc2e71db1 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c | |||
| @@ -178,3 +178,4 @@ COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, | |||
| 178 | COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); | 178 | COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); |
| 179 | COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags); | 179 | COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags); |
| 180 | COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); | 180 | COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); |
| 181 | COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer); | ||
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index dff2152350a7..6a7d737d514c 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
| @@ -490,7 +490,7 @@ ENTRY(pgm_check_handler) | |||
| 490 | jnz .Lpgm_svcper # -> single stepped svc | 490 | jnz .Lpgm_svcper # -> single stepped svc |
| 491 | 1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC | 491 | 1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC |
| 492 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 492 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
| 493 | j 3f | 493 | j 4f |
| 494 | 2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER | 494 | 2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER |
| 495 | lg %r15,__LC_KERNEL_STACK | 495 | lg %r15,__LC_KERNEL_STACK |
| 496 | lgr %r14,%r12 | 496 | lgr %r14,%r12 |
| @@ -499,8 +499,8 @@ ENTRY(pgm_check_handler) | |||
| 499 | tm __LC_PGM_ILC+2,0x02 # check for transaction abort | 499 | tm __LC_PGM_ILC+2,0x02 # check for transaction abort |
| 500 | jz 3f | 500 | jz 3f |
| 501 | mvc __THREAD_trap_tdb(256,%r14),0(%r13) | 501 | mvc __THREAD_trap_tdb(256,%r14),0(%r13) |
| 502 | 3: la %r11,STACK_FRAME_OVERHEAD(%r15) | 502 | 3: stg %r10,__THREAD_last_break(%r14) |
| 503 | stg %r10,__THREAD_last_break(%r14) | 503 | 4: la %r11,STACK_FRAME_OVERHEAD(%r15) |
| 504 | stmg %r0,%r7,__PT_R0(%r11) | 504 | stmg %r0,%r7,__PT_R0(%r11) |
| 505 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC | 505 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC |
| 506 | stmg %r8,%r9,__PT_PSW(%r11) | 506 | stmg %r8,%r9,__PT_PSW(%r11) |
| @@ -509,14 +509,14 @@ ENTRY(pgm_check_handler) | |||
| 509 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) | 509 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) |
| 510 | stg %r10,__PT_ARGS(%r11) | 510 | stg %r10,__PT_ARGS(%r11) |
| 511 | tm __LC_PGM_ILC+3,0x80 # check for per exception | 511 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
| 512 | jz 4f | 512 | jz 5f |
| 513 | tmhh %r8,0x0001 # kernel per event ? | 513 | tmhh %r8,0x0001 # kernel per event ? |
| 514 | jz .Lpgm_kprobe | 514 | jz .Lpgm_kprobe |
| 515 | oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP | 515 | oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP |
| 516 | mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS | 516 | mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS |
| 517 | mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE | 517 | mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE |
| 518 | mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID | 518 | mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID |
| 519 | 4: REENABLE_IRQS | 519 | 5: REENABLE_IRQS |
| 520 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 520 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
| 521 | larl %r1,pgm_check_table | 521 | larl %r1,pgm_check_table |
| 522 | llgh %r10,__PT_INT_CODE+2(%r11) | 522 | llgh %r10,__PT_INT_CODE+2(%r11) |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index b67dafb7b7cf..e545ffe5155a 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
| @@ -564,6 +564,8 @@ static struct kset *ipl_kset; | |||
| 564 | 564 | ||
| 565 | static void __ipl_run(void *unused) | 565 | static void __ipl_run(void *unused) |
| 566 | { | 566 | { |
| 567 | if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW) | ||
| 568 | diag308(DIAG308_LOAD_NORMAL_DUMP, NULL); | ||
| 567 | diag308(DIAG308_LOAD_CLEAR, NULL); | 569 | diag308(DIAG308_LOAD_CLEAR, NULL); |
| 568 | if (MACHINE_IS_VM) | 570 | if (MACHINE_IS_VM) |
| 569 | __cpcmd("IPL", NULL, 0, NULL); | 571 | __cpcmd("IPL", NULL, 0, NULL); |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 20cd339e11ae..f29e41c5e2ec 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
| @@ -124,7 +124,10 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp, | |||
| 124 | clear_tsk_thread_flag(p, TIF_SINGLE_STEP); | 124 | clear_tsk_thread_flag(p, TIF_SINGLE_STEP); |
| 125 | /* Initialize per thread user and system timer values */ | 125 | /* Initialize per thread user and system timer values */ |
| 126 | p->thread.user_timer = 0; | 126 | p->thread.user_timer = 0; |
| 127 | p->thread.guest_timer = 0; | ||
| 127 | p->thread.system_timer = 0; | 128 | p->thread.system_timer = 0; |
| 129 | p->thread.hardirq_timer = 0; | ||
| 130 | p->thread.softirq_timer = 0; | ||
| 128 | 131 | ||
| 129 | frame->sf.back_chain = 0; | 132 | frame->sf.back_chain = 0; |
| 130 | /* new return point is ret_from_fork */ | 133 | /* new return point is ret_from_fork */ |
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 9b59e6212d8f..2659b5cfeddb 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S | |||
| @@ -386,3 +386,5 @@ SYSCALL(sys_mlock2,compat_sys_mlock2) | |||
| 386 | SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */ | 386 | SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */ |
| 387 | SYSCALL(sys_preadv2,compat_sys_preadv2) | 387 | SYSCALL(sys_preadv2,compat_sys_preadv2) |
| 388 | SYSCALL(sys_pwritev2,compat_sys_pwritev2) | 388 | SYSCALL(sys_pwritev2,compat_sys_pwritev2) |
| 389 | NI_SYSCALL | ||
| 390 | SYSCALL(sys_statx,compat_sys_statx) | ||
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index c14fc9029912..072d84ba42a3 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
| @@ -111,7 +111,7 @@ static inline u64 scale_vtime(u64 vtime) | |||
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | static void account_system_index_scaled(struct task_struct *p, | 113 | static void account_system_index_scaled(struct task_struct *p, |
| 114 | cputime_t cputime, cputime_t scaled, | 114 | u64 cputime, u64 scaled, |
| 115 | enum cpu_usage_stat index) | 115 | enum cpu_usage_stat index) |
| 116 | { | 116 | { |
| 117 | p->stimescaled += cputime_to_nsecs(scaled); | 117 | p->stimescaled += cputime_to_nsecs(scaled); |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index b48dc5f1900b..463e5ef02304 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
| @@ -608,12 +608,29 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |||
| 608 | bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr) | 608 | bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr) |
| 609 | { | 609 | { |
| 610 | spinlock_t *ptl; | 610 | spinlock_t *ptl; |
| 611 | pgd_t *pgd; | ||
| 612 | pud_t *pud; | ||
| 613 | pmd_t *pmd; | ||
| 611 | pgste_t pgste; | 614 | pgste_t pgste; |
| 612 | pte_t *ptep; | 615 | pte_t *ptep; |
| 613 | pte_t pte; | 616 | pte_t pte; |
| 614 | bool dirty; | 617 | bool dirty; |
| 615 | 618 | ||
| 616 | ptep = get_locked_pte(mm, addr, &ptl); | 619 | pgd = pgd_offset(mm, addr); |
| 620 | pud = pud_alloc(mm, pgd, addr); | ||
| 621 | if (!pud) | ||
| 622 | return false; | ||
| 623 | pmd = pmd_alloc(mm, pud, addr); | ||
| 624 | if (!pmd) | ||
| 625 | return false; | ||
| 626 | /* We can't run guests backed by huge pages, but userspace can | ||
| 627 | * still set them up and then try to migrate them without any | ||
| 628 | * migration support. | ||
| 629 | */ | ||
| 630 | if (pmd_large(*pmd)) | ||
| 631 | return true; | ||
| 632 | |||
| 633 | ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl); | ||
| 617 | if (unlikely(!ptep)) | 634 | if (unlikely(!ptep)) |
| 618 | return false; | 635 | return false; |
| 619 | 636 | ||
diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h index 0553e5cd5985..46ff8fd678a7 100644 --- a/arch/score/include/asm/pgtable.h +++ b/arch/score/include/asm/pgtable.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #define _ASM_SCORE_PGTABLE_H | 2 | #define _ASM_SCORE_PGTABLE_H |
| 3 | 3 | ||
| 4 | #include <linux/const.h> | 4 | #include <linux/const.h> |
| 5 | #define __ARCH_USE_5LEVEL_HACK | ||
| 5 | #include <asm-generic/pgtable-nopmd.h> | 6 | #include <asm-generic/pgtable-nopmd.h> |
| 6 | 7 | ||
| 7 | #include <asm/fixmap.h> | 8 | #include <asm/fixmap.h> |
diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c index e359ec675869..12daf45369b4 100644 --- a/arch/score/kernel/traps.c +++ b/arch/score/kernel/traps.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | */ | 24 | */ |
| 25 | 25 | ||
| 26 | #include <linux/extable.h> | 26 | #include <linux/extable.h> |
| 27 | #include <linux/ptrace.h> | ||
| 27 | #include <linux/sched/mm.h> | 28 | #include <linux/sched/mm.h> |
| 28 | #include <linux/sched/signal.h> | 29 | #include <linux/sched/signal.h> |
| 29 | #include <linux/sched/debug.h> | 30 | #include <linux/sched/debug.h> |
diff --git a/arch/score/mm/extable.c b/arch/score/mm/extable.c index ec871355fc2d..6736a3ad6286 100644 --- a/arch/score/mm/extable.c +++ b/arch/score/mm/extable.c | |||
| @@ -24,6 +24,8 @@ | |||
| 24 | */ | 24 | */ |
| 25 | 25 | ||
| 26 | #include <linux/extable.h> | 26 | #include <linux/extable.h> |
| 27 | #include <linux/ptrace.h> | ||
| 28 | #include <asm/extable.h> | ||
| 27 | 29 | ||
| 28 | int fixup_exception(struct pt_regs *regs) | 30 | int fixup_exception(struct pt_regs *regs) |
| 29 | { | 31 | { |
diff --git a/arch/sh/boards/mach-cayman/setup.c b/arch/sh/boards/mach-cayman/setup.c index 340fd40b381d..9c292c27e0d7 100644 --- a/arch/sh/boards/mach-cayman/setup.c +++ b/arch/sh/boards/mach-cayman/setup.c | |||
| @@ -128,7 +128,6 @@ static int __init smsc_superio_setup(void) | |||
| 128 | SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX); | 128 | SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX); |
| 129 | SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX); | 129 | SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX); |
| 130 | 130 | ||
| 131 | #ifdef CONFIG_IDE | ||
| 132 | /* | 131 | /* |
| 133 | * Only IDE1 exists on the Cayman | 132 | * Only IDE1 exists on the Cayman |
| 134 | */ | 133 | */ |
| @@ -158,7 +157,6 @@ static int __init smsc_superio_setup(void) | |||
| 158 | SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */ | 157 | SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */ |
| 159 | SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */ | 158 | SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */ |
| 160 | SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */ | 159 | SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */ |
| 161 | #endif | ||
| 162 | 160 | ||
| 163 | /* Exit the configuration state */ | 161 | /* Exit the configuration state */ |
| 164 | outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); | 162 | outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); |
diff --git a/arch/sh/include/asm/pgtable-2level.h b/arch/sh/include/asm/pgtable-2level.h index 19bd89db17e7..f75cf4387257 100644 --- a/arch/sh/include/asm/pgtable-2level.h +++ b/arch/sh/include/asm/pgtable-2level.h | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | #ifndef __ASM_SH_PGTABLE_2LEVEL_H | 1 | #ifndef __ASM_SH_PGTABLE_2LEVEL_H |
| 2 | #define __ASM_SH_PGTABLE_2LEVEL_H | 2 | #define __ASM_SH_PGTABLE_2LEVEL_H |
| 3 | 3 | ||
| 4 | #define __ARCH_USE_5LEVEL_HACK | ||
| 4 | #include <asm-generic/pgtable-nopmd.h> | 5 | #include <asm-generic/pgtable-nopmd.h> |
| 5 | 6 | ||
| 6 | /* | 7 | /* |
diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h index 249a985d9648..9b1e776eca31 100644 --- a/arch/sh/include/asm/pgtable-3level.h +++ b/arch/sh/include/asm/pgtable-3level.h | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | #ifndef __ASM_SH_PGTABLE_3LEVEL_H | 1 | #ifndef __ASM_SH_PGTABLE_3LEVEL_H |
| 2 | #define __ASM_SH_PGTABLE_3LEVEL_H | 2 | #define __ASM_SH_PGTABLE_3LEVEL_H |
| 3 | 3 | ||
| 4 | #define __ARCH_USE_5LEVEL_HACK | ||
| 4 | #include <asm-generic/pgtable-nopud.h> | 5 | #include <asm-generic/pgtable-nopud.h> |
| 5 | 6 | ||
| 6 | /* | 7 | /* |
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 56e49c8f770d..8a598528ec1f 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | * the SpitFire page tables. | 12 | * the SpitFire page tables. |
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #include <asm-generic/5level-fixup.h> | ||
| 15 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
| 16 | #include <linux/const.h> | 17 | #include <linux/const.h> |
| 17 | #include <asm/types.h> | 18 | #include <asm/types.h> |
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h index d26a42279036..5f8c615cb5e9 100644 --- a/arch/tile/include/asm/pgtable_32.h +++ b/arch/tile/include/asm/pgtable_32.h | |||
| @@ -74,6 +74,7 @@ extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */; | |||
| 74 | #define MAXMEM (_VMALLOC_START - PAGE_OFFSET) | 74 | #define MAXMEM (_VMALLOC_START - PAGE_OFFSET) |
| 75 | 75 | ||
| 76 | /* We have no pmd or pud since we are strictly a two-level page table */ | 76 | /* We have no pmd or pud since we are strictly a two-level page table */ |
| 77 | #define __ARCH_USE_5LEVEL_HACK | ||
| 77 | #include <asm-generic/pgtable-nopmd.h> | 78 | #include <asm-generic/pgtable-nopmd.h> |
| 78 | 79 | ||
| 79 | static inline int pud_huge_page(pud_t pud) { return 0; } | 80 | static inline int pud_huge_page(pud_t pud) { return 0; } |
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h index e96cec52f6d8..96fe58b45118 100644 --- a/arch/tile/include/asm/pgtable_64.h +++ b/arch/tile/include/asm/pgtable_64.h | |||
| @@ -59,6 +59,7 @@ | |||
| 59 | #ifndef __ASSEMBLY__ | 59 | #ifndef __ASSEMBLY__ |
| 60 | 60 | ||
| 61 | /* We have no pud since we are a three-level page table. */ | 61 | /* We have no pud since we are a three-level page table. */ |
| 62 | #define __ARCH_USE_5LEVEL_HACK | ||
| 62 | #include <asm-generic/pgtable-nopud.h> | 63 | #include <asm-generic/pgtable-nopud.h> |
| 63 | 64 | ||
| 64 | /* | 65 | /* |
diff --git a/arch/um/include/asm/pgtable-2level.h b/arch/um/include/asm/pgtable-2level.h index cfbe59752469..179c0ea87a0c 100644 --- a/arch/um/include/asm/pgtable-2level.h +++ b/arch/um/include/asm/pgtable-2level.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #ifndef __UM_PGTABLE_2LEVEL_H | 8 | #ifndef __UM_PGTABLE_2LEVEL_H |
| 9 | #define __UM_PGTABLE_2LEVEL_H | 9 | #define __UM_PGTABLE_2LEVEL_H |
| 10 | 10 | ||
| 11 | #define __ARCH_USE_5LEVEL_HACK | ||
| 11 | #include <asm-generic/pgtable-nopmd.h> | 12 | #include <asm-generic/pgtable-nopmd.h> |
| 12 | 13 | ||
| 13 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ | 14 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ |
diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h index bae8523a162f..c4d876dfb9ac 100644 --- a/arch/um/include/asm/pgtable-3level.h +++ b/arch/um/include/asm/pgtable-3level.h | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #ifndef __UM_PGTABLE_3LEVEL_H | 7 | #ifndef __UM_PGTABLE_3LEVEL_H |
| 8 | #define __UM_PGTABLE_3LEVEL_H | 8 | #define __UM_PGTABLE_3LEVEL_H |
| 9 | 9 | ||
| 10 | #define __ARCH_USE_5LEVEL_HACK | ||
| 10 | #include <asm-generic/pgtable-nopud.h> | 11 | #include <asm-generic/pgtable-nopud.h> |
| 11 | 12 | ||
| 12 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ | 13 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ |
diff --git a/arch/unicore32/include/asm/pgtable.h b/arch/unicore32/include/asm/pgtable.h index 818d0f5598e3..a4f2bef37e70 100644 --- a/arch/unicore32/include/asm/pgtable.h +++ b/arch/unicore32/include/asm/pgtable.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #ifndef __UNICORE_PGTABLE_H__ | 12 | #ifndef __UNICORE_PGTABLE_H__ |
| 13 | #define __UNICORE_PGTABLE_H__ | 13 | #define __UNICORE_PGTABLE_H__ |
| 14 | 14 | ||
| 15 | #define __ARCH_USE_5LEVEL_HACK | ||
| 15 | #include <asm-generic/pgtable-nopmd.h> | 16 | #include <asm-generic/pgtable-nopmd.h> |
| 16 | #include <asm/cpu-single.h> | 17 | #include <asm/cpu-single.h> |
| 17 | 18 | ||
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 7ef4a099defc..6205d3b81e6d 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig | |||
| @@ -176,6 +176,7 @@ CONFIG_E1000E=y | |||
| 176 | CONFIG_SKY2=y | 176 | CONFIG_SKY2=y |
| 177 | CONFIG_FORCEDETH=y | 177 | CONFIG_FORCEDETH=y |
| 178 | CONFIG_8139TOO=y | 178 | CONFIG_8139TOO=y |
| 179 | CONFIG_R8169=y | ||
| 179 | CONFIG_FDDI=y | 180 | CONFIG_FDDI=y |
| 180 | CONFIG_INPUT_POLLDEV=y | 181 | CONFIG_INPUT_POLLDEV=y |
| 181 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set | 182 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set |
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index afb222b63cae..c84584bb9402 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c | |||
| @@ -604,7 +604,7 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx, | |||
| 604 | return &amd_f15_PMC20; | 604 | return &amd_f15_PMC20; |
| 605 | } | 605 | } |
| 606 | case AMD_EVENT_NB: | 606 | case AMD_EVENT_NB: |
| 607 | /* moved to perf_event_amd_uncore.c */ | 607 | /* moved to uncore.c */ |
| 608 | return &emptyconstraint; | 608 | return &emptyconstraint; |
| 609 | default: | 609 | default: |
| 610 | return &emptyconstraint; | 610 | return &emptyconstraint; |
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index aff4b5b69d40..238ae3248ba5 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * perf_event_intel_cstate.c: support cstate residency counters | 2 | * Support cstate residency counters |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2015, Intel Corp. | 4 | * Copyright (C) 2015, Intel Corp. |
| 5 | * Author: Kan Liang (kan.liang@intel.com) | 5 | * Author: Kan Liang (kan.liang@intel.com) |
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 22054ca49026..9d05c7e67f60 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * perf_event_intel_rapl.c: support Intel RAPL energy consumption counters | 2 | * Support Intel RAPL energy consumption counters |
| 3 | * Copyright (C) 2013 Google, Inc., Stephane Eranian | 3 | * Copyright (C) 2013 Google, Inc., Stephane Eranian |
| 4 | * | 4 | * |
| 5 | * Intel RAPL interface is specified in the IA-32 Manual Vol3b | 5 | * Intel RAPL interface is specified in the IA-32 Manual Vol3b |
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index ad986c1e29bc..df5989f27b1b 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h | |||
| @@ -360,7 +360,7 @@ extern struct list_head pci2phy_map_head; | |||
| 360 | extern struct pci_extra_dev *uncore_extra_pci_dev; | 360 | extern struct pci_extra_dev *uncore_extra_pci_dev; |
| 361 | extern struct event_constraint uncore_constraint_empty; | 361 | extern struct event_constraint uncore_constraint_empty; |
| 362 | 362 | ||
| 363 | /* perf_event_intel_uncore_snb.c */ | 363 | /* uncore_snb.c */ |
| 364 | int snb_uncore_pci_init(void); | 364 | int snb_uncore_pci_init(void); |
| 365 | int ivb_uncore_pci_init(void); | 365 | int ivb_uncore_pci_init(void); |
| 366 | int hsw_uncore_pci_init(void); | 366 | int hsw_uncore_pci_init(void); |
| @@ -371,7 +371,7 @@ void nhm_uncore_cpu_init(void); | |||
| 371 | void skl_uncore_cpu_init(void); | 371 | void skl_uncore_cpu_init(void); |
| 372 | int snb_pci2phy_map_init(int devid); | 372 | int snb_pci2phy_map_init(int devid); |
| 373 | 373 | ||
| 374 | /* perf_event_intel_uncore_snbep.c */ | 374 | /* uncore_snbep.c */ |
| 375 | int snbep_uncore_pci_init(void); | 375 | int snbep_uncore_pci_init(void); |
| 376 | void snbep_uncore_cpu_init(void); | 376 | void snbep_uncore_cpu_init(void); |
| 377 | int ivbep_uncore_pci_init(void); | 377 | int ivbep_uncore_pci_init(void); |
| @@ -385,5 +385,5 @@ void knl_uncore_cpu_init(void); | |||
| 385 | int skx_uncore_pci_init(void); | 385 | int skx_uncore_pci_init(void); |
| 386 | void skx_uncore_cpu_init(void); | 386 | void skx_uncore_cpu_init(void); |
| 387 | 387 | ||
| 388 | /* perf_event_intel_uncore_nhmex.c */ | 388 | /* uncore_nhmex.c */ |
| 389 | void nhmex_uncore_cpu_init(void); | 389 | void nhmex_uncore_cpu_init(void); |
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index db64baf0e500..8bef70e7f3cc 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c | |||
| @@ -158,13 +158,13 @@ void hyperv_init(void) | |||
| 158 | clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); | 158 | clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); |
| 159 | return; | 159 | return; |
| 160 | } | 160 | } |
| 161 | register_msr_cs: | ||
| 161 | #endif | 162 | #endif |
| 162 | /* | 163 | /* |
| 163 | * For 32 bit guests just use the MSR based mechanism for reading | 164 | * For 32 bit guests just use the MSR based mechanism for reading |
| 164 | * the partition counter. | 165 | * the partition counter. |
| 165 | */ | 166 | */ |
| 166 | 167 | ||
| 167 | register_msr_cs: | ||
| 168 | hyperv_cs = &hyperv_cs_msr; | 168 | hyperv_cs = &hyperv_cs_msr; |
| 169 | if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) | 169 | if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) |
| 170 | clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); | 170 | clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); |
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 4e7772387c6e..b04bb6dfed7f 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h | |||
| @@ -289,7 +289,8 @@ | |||
| 289 | #define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ | 289 | #define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ |
| 290 | #define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ | 290 | #define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ |
| 291 | #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ | 291 | #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ |
| 292 | #define X86_FEATURE_RDPID (16*32+ 22) /* RDPID instruction */ | 292 | #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ |
| 293 | #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ | ||
| 293 | 294 | ||
| 294 | /* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ | 295 | /* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ |
| 295 | #define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ | 296 | #define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 8b4de22d6429..62484333673d 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
| @@ -273,6 +273,8 @@ static inline pgdval_t pgd_flags(pgd_t pgd) | |||
| 273 | } | 273 | } |
| 274 | 274 | ||
| 275 | #if CONFIG_PGTABLE_LEVELS > 3 | 275 | #if CONFIG_PGTABLE_LEVELS > 3 |
| 276 | #include <asm-generic/5level-fixup.h> | ||
| 277 | |||
| 276 | typedef struct { pudval_t pud; } pud_t; | 278 | typedef struct { pudval_t pud; } pud_t; |
| 277 | 279 | ||
| 278 | static inline pud_t native_make_pud(pmdval_t val) | 280 | static inline pud_t native_make_pud(pmdval_t val) |
| @@ -285,6 +287,7 @@ static inline pudval_t native_pud_val(pud_t pud) | |||
| 285 | return pud.pud; | 287 | return pud.pud; |
| 286 | } | 288 | } |
| 287 | #else | 289 | #else |
| 290 | #define __ARCH_USE_5LEVEL_HACK | ||
| 288 | #include <asm-generic/pgtable-nopud.h> | 291 | #include <asm-generic/pgtable-nopud.h> |
| 289 | 292 | ||
| 290 | static inline pudval_t native_pud_val(pud_t pud) | 293 | static inline pudval_t native_pud_val(pud_t pud) |
| @@ -306,6 +309,7 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) | |||
| 306 | return pmd.pmd; | 309 | return pmd.pmd; |
| 307 | } | 310 | } |
| 308 | #else | 311 | #else |
| 312 | #define __ARCH_USE_5LEVEL_HACK | ||
| 309 | #include <asm-generic/pgtable-nopmd.h> | 313 | #include <asm-generic/pgtable-nopmd.h> |
| 310 | 314 | ||
| 311 | static inline pmdval_t native_pmd_val(pmd_t pmd) | 315 | static inline pmdval_t native_pmd_val(pmd_t pmd) |
diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h index 34684adb6899..b3b09b98896d 100644 --- a/arch/x86/include/asm/pkeys.h +++ b/arch/x86/include/asm/pkeys.h | |||
| @@ -46,6 +46,15 @@ extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey, | |||
| 46 | static inline | 46 | static inline |
| 47 | bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) | 47 | bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) |
| 48 | { | 48 | { |
| 49 | /* | ||
| 50 | * "Allocated" pkeys are those that have been returned | ||
| 51 | * from pkey_alloc(). pkey 0 is special, and never | ||
| 52 | * returned from pkey_alloc(). | ||
| 53 | */ | ||
| 54 | if (pkey <= 0) | ||
| 55 | return false; | ||
| 56 | if (pkey >= arch_max_pkey()) | ||
| 57 | return false; | ||
| 49 | return mm_pkey_allocation_map(mm) & (1U << pkey); | 58 | return mm_pkey_allocation_map(mm) & (1U << pkey); |
| 50 | } | 59 | } |
| 51 | 60 | ||
| @@ -82,12 +91,6 @@ int mm_pkey_alloc(struct mm_struct *mm) | |||
| 82 | static inline | 91 | static inline |
| 83 | int mm_pkey_free(struct mm_struct *mm, int pkey) | 92 | int mm_pkey_free(struct mm_struct *mm, int pkey) |
| 84 | { | 93 | { |
| 85 | /* | ||
| 86 | * pkey 0 is special, always allocated and can never | ||
| 87 | * be freed. | ||
| 88 | */ | ||
| 89 | if (!pkey) | ||
| 90 | return -EINVAL; | ||
| 91 | if (!mm_pkey_is_allocated(mm, pkey)) | 94 | if (!mm_pkey_is_allocated(mm, pkey)) |
| 92 | return -EINVAL; | 95 | return -EINVAL; |
| 93 | 96 | ||
diff --git a/arch/x86/include/asm/purgatory.h b/arch/x86/include/asm/purgatory.h new file mode 100644 index 000000000000..d7da2729903d --- /dev/null +++ b/arch/x86/include/asm/purgatory.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef _ASM_X86_PURGATORY_H | ||
| 2 | #define _ASM_X86_PURGATORY_H | ||
| 3 | |||
| 4 | #ifndef __ASSEMBLY__ | ||
| 5 | #include <linux/purgatory.h> | ||
| 6 | |||
| 7 | extern void purgatory(void); | ||
| 8 | /* | ||
| 9 | * These forward declarations serve two purposes: | ||
| 10 | * | ||
| 11 | * 1) Make sparse happy when checking arch/purgatory | ||
| 12 | * 2) Document that these are required to be global so the symbol | ||
| 13 | * lookup in kexec works | ||
| 14 | */ | ||
| 15 | extern unsigned long purgatory_backup_dest; | ||
| 16 | extern unsigned long purgatory_backup_src; | ||
| 17 | extern unsigned long purgatory_backup_sz; | ||
| 18 | #endif /* __ASSEMBLY__ */ | ||
| 19 | |||
| 20 | #endif /* _ASM_PURGATORY_H */ | ||
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 6fa85944af83..fc5abff9b7fd 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
| @@ -188,7 +188,7 @@ static inline void __native_flush_tlb_single(unsigned long addr) | |||
| 188 | 188 | ||
| 189 | static inline void __flush_tlb_all(void) | 189 | static inline void __flush_tlb_all(void) |
| 190 | { | 190 | { |
| 191 | if (static_cpu_has(X86_FEATURE_PGE)) | 191 | if (boot_cpu_has(X86_FEATURE_PGE)) |
| 192 | __flush_tlb_global(); | 192 | __flush_tlb_global(); |
| 193 | else | 193 | else |
| 194 | __flush_tlb(); | 194 | __flush_tlb(); |
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 5138dacf8bb8..07244ea16765 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h | |||
| @@ -58,7 +58,7 @@ struct setup_header { | |||
| 58 | __u32 header; | 58 | __u32 header; |
| 59 | __u16 version; | 59 | __u16 version; |
| 60 | __u32 realmode_swtch; | 60 | __u32 realmode_swtch; |
| 61 | __u16 start_sys; | 61 | __u16 start_sys_seg; |
| 62 | __u16 kernel_version; | 62 | __u16 kernel_version; |
| 63 | __u8 type_of_loader; | 63 | __u8 type_of_loader; |
| 64 | __u8 loadflags; | 64 | __u8 loadflags; |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 4261b3282ad9..aee7deddabd0 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
| @@ -1610,24 +1610,15 @@ static inline void try_to_enable_x2apic(int remap_mode) { } | |||
| 1610 | static inline void __x2apic_enable(void) { } | 1610 | static inline void __x2apic_enable(void) { } |
| 1611 | #endif /* !CONFIG_X86_X2APIC */ | 1611 | #endif /* !CONFIG_X86_X2APIC */ |
| 1612 | 1612 | ||
| 1613 | static int __init try_to_enable_IR(void) | ||
| 1614 | { | ||
| 1615 | #ifdef CONFIG_X86_IO_APIC | ||
| 1616 | if (!x2apic_enabled() && skip_ioapic_setup) { | ||
| 1617 | pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n"); | ||
| 1618 | return -1; | ||
| 1619 | } | ||
| 1620 | #endif | ||
| 1621 | return irq_remapping_enable(); | ||
| 1622 | } | ||
| 1623 | |||
| 1624 | void __init enable_IR_x2apic(void) | 1613 | void __init enable_IR_x2apic(void) |
| 1625 | { | 1614 | { |
| 1626 | unsigned long flags; | 1615 | unsigned long flags; |
| 1627 | int ret, ir_stat; | 1616 | int ret, ir_stat; |
| 1628 | 1617 | ||
| 1629 | if (skip_ioapic_setup) | 1618 | if (skip_ioapic_setup) { |
| 1619 | pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n"); | ||
| 1630 | return; | 1620 | return; |
| 1621 | } | ||
| 1631 | 1622 | ||
| 1632 | ir_stat = irq_remapping_prepare(); | 1623 | ir_stat = irq_remapping_prepare(); |
| 1633 | if (ir_stat < 0 && !x2apic_supported()) | 1624 | if (ir_stat < 0 && !x2apic_supported()) |
| @@ -1645,7 +1636,7 @@ void __init enable_IR_x2apic(void) | |||
| 1645 | 1636 | ||
| 1646 | /* If irq_remapping_prepare() succeeded, try to enable it */ | 1637 | /* If irq_remapping_prepare() succeeded, try to enable it */ |
| 1647 | if (ir_stat >= 0) | 1638 | if (ir_stat >= 0) |
| 1648 | ir_stat = try_to_enable_IR(); | 1639 | ir_stat = irq_remapping_enable(); |
| 1649 | /* ir_stat contains the remap mode or an error code */ | 1640 | /* ir_stat contains the remap mode or an error code */ |
| 1650 | try_to_enable_x2apic(ir_stat); | 1641 | try_to_enable_x2apic(ir_stat); |
| 1651 | 1642 | ||
| @@ -2062,10 +2053,10 @@ static int allocate_logical_cpuid(int apicid) | |||
| 2062 | 2053 | ||
| 2063 | /* Allocate a new cpuid. */ | 2054 | /* Allocate a new cpuid. */ |
| 2064 | if (nr_logical_cpuids >= nr_cpu_ids) { | 2055 | if (nr_logical_cpuids >= nr_cpu_ids) { |
| 2065 | WARN_ONCE(1, "Only %d processors supported." | 2056 | WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %i reached. " |
| 2066 | "Processor %d/0x%x and the rest are ignored.\n", | 2057 | "Processor %d/0x%x and the rest are ignored.\n", |
| 2067 | nr_cpu_ids - 1, nr_logical_cpuids, apicid); | 2058 | nr_cpu_ids, nr_logical_cpuids, apicid); |
| 2068 | return -1; | 2059 | return -EINVAL; |
| 2069 | } | 2060 | } |
| 2070 | 2061 | ||
| 2071 | cpuid_to_apicid[nr_logical_cpuids] = apicid; | 2062 | cpuid_to_apicid[nr_logical_cpuids] = apicid; |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 35a5d5dca2fa..c36140d788fe 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
| @@ -556,10 +556,6 @@ static void early_init_amd(struct cpuinfo_x86 *c) | |||
| 556 | if (c->x86_power & (1 << 8)) { | 556 | if (c->x86_power & (1 << 8)) { |
| 557 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 557 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
| 558 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); | 558 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); |
| 559 | if (check_tsc_unstable()) | ||
| 560 | clear_sched_clock_stable(); | ||
| 561 | } else { | ||
| 562 | clear_sched_clock_stable(); | ||
| 563 | } | 559 | } |
| 564 | 560 | ||
| 565 | /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ | 561 | /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ |
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index adc0ebd8bed0..43955ee6715b 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
| @@ -105,8 +105,6 @@ static void early_init_centaur(struct cpuinfo_x86 *c) | |||
| 105 | #ifdef CONFIG_X86_64 | 105 | #ifdef CONFIG_X86_64 |
| 106 | set_cpu_cap(c, X86_FEATURE_SYSENTER32); | 106 | set_cpu_cap(c, X86_FEATURE_SYSENTER32); |
| 107 | #endif | 107 | #endif |
| 108 | |||
| 109 | clear_sched_clock_stable(); | ||
| 110 | } | 108 | } |
| 111 | 109 | ||
| 112 | static void init_centaur(struct cpuinfo_x86 *c) | 110 | static void init_centaur(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index b11b38c3b0bd..58094a1f9e9d 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -88,7 +88,6 @@ static void default_init(struct cpuinfo_x86 *c) | |||
| 88 | strcpy(c->x86_model_id, "386"); | 88 | strcpy(c->x86_model_id, "386"); |
| 89 | } | 89 | } |
| 90 | #endif | 90 | #endif |
| 91 | clear_sched_clock_stable(); | ||
| 92 | } | 91 | } |
| 93 | 92 | ||
| 94 | static const struct cpu_dev default_cpu = { | 93 | static const struct cpu_dev default_cpu = { |
| @@ -1077,8 +1076,6 @@ static void identify_cpu(struct cpuinfo_x86 *c) | |||
| 1077 | */ | 1076 | */ |
| 1078 | if (this_cpu->c_init) | 1077 | if (this_cpu->c_init) |
| 1079 | this_cpu->c_init(c); | 1078 | this_cpu->c_init(c); |
| 1080 | else | ||
| 1081 | clear_sched_clock_stable(); | ||
| 1082 | 1079 | ||
| 1083 | /* Disable the PN if appropriate */ | 1080 | /* Disable the PN if appropriate */ |
| 1084 | squash_the_stupid_serial_number(c); | 1081 | squash_the_stupid_serial_number(c); |
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 0a3bc19de017..a70fd61095f8 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
| @@ -185,7 +185,6 @@ static void early_init_cyrix(struct cpuinfo_x86 *c) | |||
| 185 | set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); | 185 | set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); |
| 186 | break; | 186 | break; |
| 187 | } | 187 | } |
| 188 | clear_sched_clock_stable(); | ||
| 189 | } | 188 | } |
| 190 | 189 | ||
| 191 | static void init_cyrix(struct cpuinfo_x86 *c) | 190 | static void init_cyrix(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index fe0a615a051b..063197771b8d 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
| @@ -162,10 +162,6 @@ static void early_init_intel(struct cpuinfo_x86 *c) | |||
| 162 | if (c->x86_power & (1 << 8)) { | 162 | if (c->x86_power & (1 << 8)) { |
| 163 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 163 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
| 164 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); | 164 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); |
| 165 | if (check_tsc_unstable()) | ||
| 166 | clear_sched_clock_stable(); | ||
| 167 | } else { | ||
| 168 | clear_sched_clock_stable(); | ||
| 169 | } | 165 | } |
| 170 | 166 | ||
| 171 | /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ | 167 | /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ |
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index 0bbe0f3a039f..c05509d38b1f 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | |||
| @@ -28,7 +28,6 @@ | |||
| 28 | #include <linux/sched/signal.h> | 28 | #include <linux/sched/signal.h> |
| 29 | #include <linux/sched/task.h> | 29 | #include <linux/sched/task.h> |
| 30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
| 31 | #include <linux/cpu.h> | ||
| 32 | #include <linux/task_work.h> | 31 | #include <linux/task_work.h> |
| 33 | 32 | ||
| 34 | #include <uapi/linux/magic.h> | 33 | #include <uapi/linux/magic.h> |
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index 8457b4978668..d77d07ab310b 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c | |||
| @@ -16,8 +16,6 @@ static void early_init_transmeta(struct cpuinfo_x86 *c) | |||
| 16 | if (xlvl >= 0x80860001) | 16 | if (xlvl >= 0x80860001) |
| 17 | c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001); | 17 | c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001); |
| 18 | } | 18 | } |
| 19 | |||
| 20 | clear_sched_clock_stable(); | ||
| 21 | } | 19 | } |
| 22 | 20 | ||
| 23 | static void init_transmeta(struct cpuinfo_x86 *c) | 21 | static void init_transmeta(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 891f4dad7b2c..22403a28caf5 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
| @@ -30,7 +30,6 @@ | |||
| 30 | #include <asm/hypervisor.h> | 30 | #include <asm/hypervisor.h> |
| 31 | #include <asm/timer.h> | 31 | #include <asm/timer.h> |
| 32 | #include <asm/apic.h> | 32 | #include <asm/apic.h> |
| 33 | #include <asm/timer.h> | ||
| 34 | 33 | ||
| 35 | #undef pr_fmt | 34 | #undef pr_fmt |
| 36 | #define pr_fmt(fmt) "vmware: " fmt | 35 | #define pr_fmt(fmt) "vmware: " fmt |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 8639bb2ae058..8f3d9cf26ff9 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
| @@ -535,7 +535,7 @@ static void run_sync(void) | |||
| 535 | { | 535 | { |
| 536 | int enable_irqs = irqs_disabled(); | 536 | int enable_irqs = irqs_disabled(); |
| 537 | 537 | ||
| 538 | /* We may be called with interrupts disbled (on bootup). */ | 538 | /* We may be called with interrupts disabled (on bootup). */ |
| 539 | if (enable_irqs) | 539 | if (enable_irqs) |
| 540 | local_irq_enable(); | 540 | local_irq_enable(); |
| 541 | on_each_cpu(do_sync_core, NULL, 1); | 541 | on_each_cpu(do_sync_core, NULL, 1); |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index dc6ba5bda9fc..89ff7af2de50 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
| @@ -354,7 +354,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer) | |||
| 354 | 354 | ||
| 355 | irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq)); | 355 | irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq)); |
| 356 | irq_domain_activate_irq(irq_get_irq_data(hdev->irq)); | 356 | irq_domain_activate_irq(irq_get_irq_data(hdev->irq)); |
| 357 | disable_irq(hdev->irq); | 357 | disable_hardirq(hdev->irq); |
| 358 | irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); | 358 | irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); |
| 359 | enable_irq(hdev->irq); | 359 | enable_irq(hdev->irq); |
| 360 | } | 360 | } |
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c index bdb83e431d89..38b64587b31b 100644 --- a/arch/x86/kernel/kdebugfs.c +++ b/arch/x86/kernel/kdebugfs.c | |||
| @@ -167,7 +167,7 @@ static int __init boot_params_kdebugfs_init(void) | |||
| 167 | struct dentry *dbp, *version, *data; | 167 | struct dentry *dbp, *version, *data; |
| 168 | int error = -ENOMEM; | 168 | int error = -ENOMEM; |
| 169 | 169 | ||
| 170 | dbp = debugfs_create_dir("boot_params", NULL); | 170 | dbp = debugfs_create_dir("boot_params", arch_debugfs_dir); |
| 171 | if (!dbp) | 171 | if (!dbp) |
| 172 | return -ENOMEM; | 172 | return -ENOMEM; |
| 173 | 173 | ||
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h index c6ee63f927ab..d688826e5736 100644 --- a/arch/x86/kernel/kprobes/common.h +++ b/arch/x86/kernel/kprobes/common.h | |||
| @@ -67,7 +67,7 @@ | |||
| 67 | #endif | 67 | #endif |
| 68 | 68 | ||
| 69 | /* Ensure if the instruction can be boostable */ | 69 | /* Ensure if the instruction can be boostable */ |
| 70 | extern int can_boost(kprobe_opcode_t *instruction); | 70 | extern int can_boost(kprobe_opcode_t *instruction, void *addr); |
| 71 | /* Recover instruction if given address is probed */ | 71 | /* Recover instruction if given address is probed */ |
| 72 | extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf, | 72 | extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf, |
| 73 | unsigned long addr); | 73 | unsigned long addr); |
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 6384eb754a58..993fa4fe4f68 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
| @@ -167,12 +167,12 @@ NOKPROBE_SYMBOL(skip_prefixes); | |||
| 167 | * Returns non-zero if opcode is boostable. | 167 | * Returns non-zero if opcode is boostable. |
| 168 | * RIP relative instructions are adjusted at copying time in 64 bits mode | 168 | * RIP relative instructions are adjusted at copying time in 64 bits mode |
| 169 | */ | 169 | */ |
| 170 | int can_boost(kprobe_opcode_t *opcodes) | 170 | int can_boost(kprobe_opcode_t *opcodes, void *addr) |
| 171 | { | 171 | { |
| 172 | kprobe_opcode_t opcode; | 172 | kprobe_opcode_t opcode; |
| 173 | kprobe_opcode_t *orig_opcodes = opcodes; | 173 | kprobe_opcode_t *orig_opcodes = opcodes; |
| 174 | 174 | ||
| 175 | if (search_exception_tables((unsigned long)opcodes)) | 175 | if (search_exception_tables((unsigned long)addr)) |
| 176 | return 0; /* Page fault may occur on this address. */ | 176 | return 0; /* Page fault may occur on this address. */ |
| 177 | 177 | ||
| 178 | retry: | 178 | retry: |
| @@ -417,7 +417,7 @@ static int arch_copy_kprobe(struct kprobe *p) | |||
| 417 | * __copy_instruction can modify the displacement of the instruction, | 417 | * __copy_instruction can modify the displacement of the instruction, |
| 418 | * but it doesn't affect boostable check. | 418 | * but it doesn't affect boostable check. |
| 419 | */ | 419 | */ |
| 420 | if (can_boost(p->ainsn.insn)) | 420 | if (can_boost(p->ainsn.insn, p->addr)) |
| 421 | p->ainsn.boostable = 0; | 421 | p->ainsn.boostable = 0; |
| 422 | else | 422 | else |
| 423 | p->ainsn.boostable = -1; | 423 | p->ainsn.boostable = -1; |
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 3d1bee9d6a72..3e7c6e5a08ff 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c | |||
| @@ -178,7 +178,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src) | |||
| 178 | 178 | ||
| 179 | while (len < RELATIVEJUMP_SIZE) { | 179 | while (len < RELATIVEJUMP_SIZE) { |
| 180 | ret = __copy_instruction(dest + len, src + len); | 180 | ret = __copy_instruction(dest + len, src + len); |
| 181 | if (!ret || !can_boost(dest + len)) | 181 | if (!ret || !can_boost(dest + len, src + len)) |
| 182 | return -EINVAL; | 182 | return -EINVAL; |
| 183 | len += ret; | 183 | len += ret; |
| 184 | } | 184 | } |
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 307b1f4543de..857cdbd02867 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c | |||
| @@ -194,19 +194,22 @@ static int arch_update_purgatory(struct kimage *image) | |||
| 194 | 194 | ||
| 195 | /* Setup copying of backup region */ | 195 | /* Setup copying of backup region */ |
| 196 | if (image->type == KEXEC_TYPE_CRASH) { | 196 | if (image->type == KEXEC_TYPE_CRASH) { |
| 197 | ret = kexec_purgatory_get_set_symbol(image, "backup_dest", | 197 | ret = kexec_purgatory_get_set_symbol(image, |
| 198 | "purgatory_backup_dest", | ||
| 198 | &image->arch.backup_load_addr, | 199 | &image->arch.backup_load_addr, |
| 199 | sizeof(image->arch.backup_load_addr), 0); | 200 | sizeof(image->arch.backup_load_addr), 0); |
| 200 | if (ret) | 201 | if (ret) |
| 201 | return ret; | 202 | return ret; |
| 202 | 203 | ||
| 203 | ret = kexec_purgatory_get_set_symbol(image, "backup_src", | 204 | ret = kexec_purgatory_get_set_symbol(image, |
| 205 | "purgatory_backup_src", | ||
| 204 | &image->arch.backup_src_start, | 206 | &image->arch.backup_src_start, |
| 205 | sizeof(image->arch.backup_src_start), 0); | 207 | sizeof(image->arch.backup_src_start), 0); |
| 206 | if (ret) | 208 | if (ret) |
| 207 | return ret; | 209 | return ret; |
| 208 | 210 | ||
| 209 | ret = kexec_purgatory_get_set_symbol(image, "backup_sz", | 211 | ret = kexec_purgatory_get_set_symbol(image, |
| 212 | "purgatory_backup_sz", | ||
| 210 | &image->arch.backup_src_sz, | 213 | &image->arch.backup_src_sz, |
| 211 | sizeof(image->arch.backup_src_sz), 0); | 214 | sizeof(image->arch.backup_src_sz), 0); |
| 212 | if (ret) | 215 | if (ret) |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index e244c19a2451..067f9813fd2c 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
| @@ -223,6 +223,22 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
| 223 | DMI_MATCH(DMI_BOARD_NAME, "P4S800"), | 223 | DMI_MATCH(DMI_BOARD_NAME, "P4S800"), |
| 224 | }, | 224 | }, |
| 225 | }, | 225 | }, |
| 226 | { /* Handle problems with rebooting on ASUS EeeBook X205TA */ | ||
| 227 | .callback = set_acpi_reboot, | ||
| 228 | .ident = "ASUS EeeBook X205TA", | ||
| 229 | .matches = { | ||
| 230 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), | ||
| 231 | DMI_MATCH(DMI_PRODUCT_NAME, "X205TA"), | ||
| 232 | }, | ||
| 233 | }, | ||
| 234 | { /* Handle problems with rebooting on ASUS EeeBook X205TAW */ | ||
| 235 | .callback = set_acpi_reboot, | ||
| 236 | .ident = "ASUS EeeBook X205TAW", | ||
| 237 | .matches = { | ||
| 238 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), | ||
| 239 | DMI_MATCH(DMI_PRODUCT_NAME, "X205TAW"), | ||
| 240 | }, | ||
| 241 | }, | ||
| 226 | 242 | ||
| 227 | /* Certec */ | 243 | /* Certec */ |
| 228 | { /* Handle problems with rebooting on Certec BPC600 */ | 244 | { /* Handle problems with rebooting on Certec BPC600 */ |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 46bcda4cb1c2..4f7a9833d8e5 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
| @@ -327,9 +327,16 @@ unsigned long long sched_clock(void) | |||
| 327 | { | 327 | { |
| 328 | return paravirt_sched_clock(); | 328 | return paravirt_sched_clock(); |
| 329 | } | 329 | } |
| 330 | |||
| 331 | static inline bool using_native_sched_clock(void) | ||
| 332 | { | ||
| 333 | return pv_time_ops.sched_clock == native_sched_clock; | ||
| 334 | } | ||
| 330 | #else | 335 | #else |
| 331 | unsigned long long | 336 | unsigned long long |
| 332 | sched_clock(void) __attribute__((alias("native_sched_clock"))); | 337 | sched_clock(void) __attribute__((alias("native_sched_clock"))); |
| 338 | |||
| 339 | static inline bool using_native_sched_clock(void) { return true; } | ||
| 333 | #endif | 340 | #endif |
| 334 | 341 | ||
| 335 | int check_tsc_unstable(void) | 342 | int check_tsc_unstable(void) |
| @@ -1112,8 +1119,10 @@ static void tsc_cs_mark_unstable(struct clocksource *cs) | |||
| 1112 | { | 1119 | { |
| 1113 | if (tsc_unstable) | 1120 | if (tsc_unstable) |
| 1114 | return; | 1121 | return; |
| 1122 | |||
| 1115 | tsc_unstable = 1; | 1123 | tsc_unstable = 1; |
| 1116 | clear_sched_clock_stable(); | 1124 | if (using_native_sched_clock()) |
| 1125 | clear_sched_clock_stable(); | ||
| 1117 | disable_sched_clock_irqtime(); | 1126 | disable_sched_clock_irqtime(); |
| 1118 | pr_info("Marking TSC unstable due to clocksource watchdog\n"); | 1127 | pr_info("Marking TSC unstable due to clocksource watchdog\n"); |
| 1119 | } | 1128 | } |
| @@ -1135,18 +1144,20 @@ static struct clocksource clocksource_tsc = { | |||
| 1135 | 1144 | ||
| 1136 | void mark_tsc_unstable(char *reason) | 1145 | void mark_tsc_unstable(char *reason) |
| 1137 | { | 1146 | { |
| 1138 | if (!tsc_unstable) { | 1147 | if (tsc_unstable) |
| 1139 | tsc_unstable = 1; | 1148 | return; |
| 1149 | |||
| 1150 | tsc_unstable = 1; | ||
| 1151 | if (using_native_sched_clock()) | ||
| 1140 | clear_sched_clock_stable(); | 1152 | clear_sched_clock_stable(); |
| 1141 | disable_sched_clock_irqtime(); | 1153 | disable_sched_clock_irqtime(); |
| 1142 | pr_info("Marking TSC unstable due to %s\n", reason); | 1154 | pr_info("Marking TSC unstable due to %s\n", reason); |
| 1143 | /* Change only the rating, when not registered */ | 1155 | /* Change only the rating, when not registered */ |
| 1144 | if (clocksource_tsc.mult) | 1156 | if (clocksource_tsc.mult) { |
| 1145 | clocksource_mark_unstable(&clocksource_tsc); | 1157 | clocksource_mark_unstable(&clocksource_tsc); |
| 1146 | else { | 1158 | } else { |
| 1147 | clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; | 1159 | clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; |
| 1148 | clocksource_tsc.rating = 0; | 1160 | clocksource_tsc.rating = 0; |
| 1149 | } | ||
| 1150 | } | 1161 | } |
| 1151 | } | 1162 | } |
| 1152 | 1163 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 283aa8601833..98e82ee1e699 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -7258,9 +7258,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu) | |||
| 7258 | static int handle_vmclear(struct kvm_vcpu *vcpu) | 7258 | static int handle_vmclear(struct kvm_vcpu *vcpu) |
| 7259 | { | 7259 | { |
| 7260 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 7260 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 7261 | u32 zero = 0; | ||
| 7261 | gpa_t vmptr; | 7262 | gpa_t vmptr; |
| 7262 | struct vmcs12 *vmcs12; | ||
| 7263 | struct page *page; | ||
| 7264 | 7263 | ||
| 7265 | if (!nested_vmx_check_permission(vcpu)) | 7264 | if (!nested_vmx_check_permission(vcpu)) |
| 7266 | return 1; | 7265 | return 1; |
| @@ -7271,22 +7270,9 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) | |||
| 7271 | if (vmptr == vmx->nested.current_vmptr) | 7270 | if (vmptr == vmx->nested.current_vmptr) |
| 7272 | nested_release_vmcs12(vmx); | 7271 | nested_release_vmcs12(vmx); |
| 7273 | 7272 | ||
| 7274 | page = nested_get_page(vcpu, vmptr); | 7273 | kvm_vcpu_write_guest(vcpu, |
| 7275 | if (page == NULL) { | 7274 | vmptr + offsetof(struct vmcs12, launch_state), |
| 7276 | /* | 7275 | &zero, sizeof(zero)); |
| 7277 | * For accurate processor emulation, VMCLEAR beyond available | ||
| 7278 | * physical memory should do nothing at all. However, it is | ||
| 7279 | * possible that a nested vmx bug, not a guest hypervisor bug, | ||
| 7280 | * resulted in this case, so let's shut down before doing any | ||
| 7281 | * more damage: | ||
| 7282 | */ | ||
| 7283 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); | ||
| 7284 | return 1; | ||
| 7285 | } | ||
| 7286 | vmcs12 = kmap(page); | ||
| 7287 | vmcs12->launch_state = 0; | ||
| 7288 | kunmap(page); | ||
| 7289 | nested_release_page(page); | ||
| 7290 | 7276 | ||
| 7291 | nested_free_vmcs02(vmx, vmptr); | 7277 | nested_free_vmcs02(vmx, vmptr); |
| 7292 | 7278 | ||
| @@ -9694,10 +9680,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, | |||
| 9694 | return false; | 9680 | return false; |
| 9695 | 9681 | ||
| 9696 | page = nested_get_page(vcpu, vmcs12->msr_bitmap); | 9682 | page = nested_get_page(vcpu, vmcs12->msr_bitmap); |
| 9697 | if (!page) { | 9683 | if (!page) |
| 9698 | WARN_ON(1); | ||
| 9699 | return false; | 9684 | return false; |
| 9700 | } | ||
| 9701 | msr_bitmap_l1 = (unsigned long *)kmap(page); | 9685 | msr_bitmap_l1 = (unsigned long *)kmap(page); |
| 9702 | 9686 | ||
| 9703 | memset(msr_bitmap_l0, 0xff, PAGE_SIZE); | 9687 | memset(msr_bitmap_l0, 0xff, PAGE_SIZE); |
| @@ -11121,8 +11105,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |||
| 11121 | */ | 11105 | */ |
| 11122 | static void vmx_leave_nested(struct kvm_vcpu *vcpu) | 11106 | static void vmx_leave_nested(struct kvm_vcpu *vcpu) |
| 11123 | { | 11107 | { |
| 11124 | if (is_guest_mode(vcpu)) | 11108 | if (is_guest_mode(vcpu)) { |
| 11109 | to_vmx(vcpu)->nested.nested_run_pending = 0; | ||
| 11125 | nested_vmx_vmexit(vcpu, -1, 0, 0); | 11110 | nested_vmx_vmexit(vcpu, -1, 0, 0); |
| 11111 | } | ||
| 11126 | free_nested(to_vmx(vcpu)); | 11112 | free_nested(to_vmx(vcpu)); |
| 11127 | } | 11113 | } |
| 11128 | 11114 | ||
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 99c7805a9693..1f3b6ef105cd 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c | |||
| @@ -106,32 +106,35 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, | |||
| 106 | unsigned long end, int write, struct page **pages, int *nr) | 106 | unsigned long end, int write, struct page **pages, int *nr) |
| 107 | { | 107 | { |
| 108 | struct dev_pagemap *pgmap = NULL; | 108 | struct dev_pagemap *pgmap = NULL; |
| 109 | int nr_start = *nr; | 109 | int nr_start = *nr, ret = 0; |
| 110 | pte_t *ptep; | 110 | pte_t *ptep, *ptem; |
| 111 | 111 | ||
| 112 | ptep = pte_offset_map(&pmd, addr); | 112 | /* |
| 113 | * Keep the original mapped PTE value (ptem) around since we | ||
| 114 | * might increment ptep off the end of the page when finishing | ||
| 115 | * our loop iteration. | ||
| 116 | */ | ||
| 117 | ptem = ptep = pte_offset_map(&pmd, addr); | ||
| 113 | do { | 118 | do { |
| 114 | pte_t pte = gup_get_pte(ptep); | 119 | pte_t pte = gup_get_pte(ptep); |
| 115 | struct page *page; | 120 | struct page *page; |
| 116 | 121 | ||
| 117 | /* Similar to the PMD case, NUMA hinting must take slow path */ | 122 | /* Similar to the PMD case, NUMA hinting must take slow path */ |
| 118 | if (pte_protnone(pte)) { | 123 | if (pte_protnone(pte)) |
| 119 | pte_unmap(ptep); | 124 | break; |
| 120 | return 0; | 125 | |
| 121 | } | 126 | if (!pte_allows_gup(pte_val(pte), write)) |
| 127 | break; | ||
| 122 | 128 | ||
| 123 | if (pte_devmap(pte)) { | 129 | if (pte_devmap(pte)) { |
| 124 | pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); | 130 | pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); |
| 125 | if (unlikely(!pgmap)) { | 131 | if (unlikely(!pgmap)) { |
| 126 | undo_dev_pagemap(nr, nr_start, pages); | 132 | undo_dev_pagemap(nr, nr_start, pages); |
| 127 | pte_unmap(ptep); | 133 | break; |
| 128 | return 0; | ||
| 129 | } | 134 | } |
| 130 | } else if (!pte_allows_gup(pte_val(pte), write) || | 135 | } else if (pte_special(pte)) |
| 131 | pte_special(pte)) { | 136 | break; |
| 132 | pte_unmap(ptep); | 137 | |
| 133 | return 0; | ||
| 134 | } | ||
| 135 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | 138 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); |
| 136 | page = pte_page(pte); | 139 | page = pte_page(pte); |
| 137 | get_page(page); | 140 | get_page(page); |
| @@ -141,9 +144,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, | |||
| 141 | (*nr)++; | 144 | (*nr)++; |
| 142 | 145 | ||
| 143 | } while (ptep++, addr += PAGE_SIZE, addr != end); | 146 | } while (ptep++, addr += PAGE_SIZE, addr != end); |
| 144 | pte_unmap(ptep - 1); | 147 | if (addr == end) |
| 148 | ret = 1; | ||
| 149 | pte_unmap(ptem); | ||
| 145 | 150 | ||
| 146 | return 1; | 151 | return ret; |
| 147 | } | 152 | } |
| 148 | 153 | ||
| 149 | static inline void get_head_page_multiple(struct page *page, int nr) | 154 | static inline void get_head_page_multiple(struct page *page, int nr) |
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 0cb52ae0a8f0..190e718694b1 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
| @@ -735,6 +735,15 @@ void pcibios_disable_device (struct pci_dev *dev) | |||
| 735 | pcibios_disable_irq(dev); | 735 | pcibios_disable_irq(dev); |
| 736 | } | 736 | } |
| 737 | 737 | ||
| 738 | #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC | ||
| 739 | void pcibios_release_device(struct pci_dev *dev) | ||
| 740 | { | ||
| 741 | if (atomic_dec_return(&dev->enable_cnt) >= 0) | ||
| 742 | pcibios_disable_device(dev); | ||
| 743 | |||
| 744 | } | ||
| 745 | #endif | ||
| 746 | |||
| 738 | int pci_ext_cfg_avail(void) | 747 | int pci_ext_cfg_avail(void) |
| 739 | { | 748 | { |
| 740 | if (raw_pci_ext_ops) | 749 | if (raw_pci_ext_ops) |
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index e1fb269c87af..292ab0364a89 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
| @@ -234,23 +234,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
| 234 | return 1; | 234 | return 1; |
| 235 | 235 | ||
| 236 | for_each_pci_msi_entry(msidesc, dev) { | 236 | for_each_pci_msi_entry(msidesc, dev) { |
| 237 | __pci_read_msi_msg(msidesc, &msg); | 237 | pirq = xen_allocate_pirq_msi(dev, msidesc); |
| 238 | pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | | 238 | if (pirq < 0) { |
| 239 | ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); | 239 | irq = -ENODEV; |
| 240 | if (msg.data != XEN_PIRQ_MSI_DATA || | 240 | goto error; |
| 241 | xen_irq_from_pirq(pirq) < 0) { | ||
| 242 | pirq = xen_allocate_pirq_msi(dev, msidesc); | ||
| 243 | if (pirq < 0) { | ||
| 244 | irq = -ENODEV; | ||
| 245 | goto error; | ||
| 246 | } | ||
| 247 | xen_msi_compose_msg(dev, pirq, &msg); | ||
| 248 | __pci_write_msi_msg(msidesc, &msg); | ||
| 249 | dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); | ||
| 250 | } else { | ||
| 251 | dev_dbg(&dev->dev, | ||
| 252 | "xen: msi already bound to pirq=%d\n", pirq); | ||
| 253 | } | 241 | } |
| 242 | xen_msi_compose_msg(dev, pirq, &msg); | ||
| 243 | __pci_write_msi_msg(msidesc, &msg); | ||
| 244 | dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); | ||
| 254 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, | 245 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, |
| 255 | (type == PCI_CAP_ID_MSI) ? nvec : 1, | 246 | (type == PCI_CAP_ID_MSI) ? nvec : 1, |
| 256 | (type == PCI_CAP_ID_MSIX) ? | 247 | (type == PCI_CAP_ID_MSIX) ? |
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 766d4d3529a1..f25982cdff90 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
| @@ -1847,7 +1847,6 @@ static void pq_init(int node, int pnode) | |||
| 1847 | 1847 | ||
| 1848 | ops.write_payload_first(pnode, first); | 1848 | ops.write_payload_first(pnode, first); |
| 1849 | ops.write_payload_last(pnode, last); | 1849 | ops.write_payload_last(pnode, last); |
| 1850 | ops.write_g_sw_ack(pnode, 0xffffUL); | ||
| 1851 | 1850 | ||
| 1852 | /* in effect, all msg_type's are set to MSG_NOOP */ | 1851 | /* in effect, all msg_type's are set to MSG_NOOP */ |
| 1853 | memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE); | 1852 | memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE); |
diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c index 25e068ba3382..470edad96bb9 100644 --- a/arch/x86/purgatory/purgatory.c +++ b/arch/x86/purgatory/purgatory.c | |||
| @@ -10,21 +10,19 @@ | |||
| 10 | * Version 2. See the file COPYING for more details. | 10 | * Version 2. See the file COPYING for more details. |
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #include <linux/bug.h> | ||
| 14 | #include <asm/purgatory.h> | ||
| 15 | |||
| 13 | #include "sha256.h" | 16 | #include "sha256.h" |
| 14 | #include "../boot/string.h" | 17 | #include "../boot/string.h" |
| 15 | 18 | ||
| 16 | struct sha_region { | 19 | unsigned long purgatory_backup_dest __section(.kexec-purgatory); |
| 17 | unsigned long start; | 20 | unsigned long purgatory_backup_src __section(.kexec-purgatory); |
| 18 | unsigned long len; | 21 | unsigned long purgatory_backup_sz __section(.kexec-purgatory); |
| 19 | }; | ||
| 20 | |||
| 21 | unsigned long backup_dest = 0; | ||
| 22 | unsigned long backup_src = 0; | ||
| 23 | unsigned long backup_sz = 0; | ||
| 24 | 22 | ||
| 25 | u8 sha256_digest[SHA256_DIGEST_SIZE] = { 0 }; | 23 | u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(.kexec-purgatory); |
| 26 | 24 | ||
| 27 | struct sha_region sha_regions[16] = {}; | 25 | struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(.kexec-purgatory); |
| 28 | 26 | ||
| 29 | /* | 27 | /* |
| 30 | * On x86, second kernel requries first 640K of memory to boot. Copy | 28 | * On x86, second kernel requries first 640K of memory to boot. Copy |
| @@ -33,26 +31,28 @@ struct sha_region sha_regions[16] = {}; | |||
| 33 | */ | 31 | */ |
| 34 | static int copy_backup_region(void) | 32 | static int copy_backup_region(void) |
| 35 | { | 33 | { |
| 36 | if (backup_dest) | 34 | if (purgatory_backup_dest) { |
| 37 | memcpy((void *)backup_dest, (void *)backup_src, backup_sz); | 35 | memcpy((void *)purgatory_backup_dest, |
| 38 | 36 | (void *)purgatory_backup_src, purgatory_backup_sz); | |
| 37 | } | ||
| 39 | return 0; | 38 | return 0; |
| 40 | } | 39 | } |
| 41 | 40 | ||
| 42 | int verify_sha256_digest(void) | 41 | static int verify_sha256_digest(void) |
| 43 | { | 42 | { |
| 44 | struct sha_region *ptr, *end; | 43 | struct kexec_sha_region *ptr, *end; |
| 45 | u8 digest[SHA256_DIGEST_SIZE]; | 44 | u8 digest[SHA256_DIGEST_SIZE]; |
| 46 | struct sha256_state sctx; | 45 | struct sha256_state sctx; |
| 47 | 46 | ||
| 48 | sha256_init(&sctx); | 47 | sha256_init(&sctx); |
| 49 | end = &sha_regions[sizeof(sha_regions)/sizeof(sha_regions[0])]; | 48 | end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions); |
| 50 | for (ptr = sha_regions; ptr < end; ptr++) | 49 | |
| 50 | for (ptr = purgatory_sha_regions; ptr < end; ptr++) | ||
| 51 | sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len); | 51 | sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len); |
| 52 | 52 | ||
| 53 | sha256_final(&sctx, digest); | 53 | sha256_final(&sctx, digest); |
| 54 | 54 | ||
| 55 | if (memcmp(digest, sha256_digest, sizeof(digest))) | 55 | if (memcmp(digest, purgatory_sha256_digest, sizeof(digest))) |
| 56 | return 1; | 56 | return 1; |
| 57 | 57 | ||
| 58 | return 0; | 58 | return 0; |
diff --git a/arch/x86/purgatory/setup-x86_64.S b/arch/x86/purgatory/setup-x86_64.S index fe3c91ba1bd0..dfae9b9e60b5 100644 --- a/arch/x86/purgatory/setup-x86_64.S +++ b/arch/x86/purgatory/setup-x86_64.S | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | * This source code is licensed under the GNU General Public License, | 9 | * This source code is licensed under the GNU General Public License, |
| 10 | * Version 2. See the file COPYING for more details. | 10 | * Version 2. See the file COPYING for more details. |
| 11 | */ | 11 | */ |
| 12 | #include <asm/purgatory.h> | ||
| 12 | 13 | ||
| 13 | .text | 14 | .text |
| 14 | .globl purgatory_start | 15 | .globl purgatory_start |
diff --git a/arch/x86/purgatory/sha256.h b/arch/x86/purgatory/sha256.h index bd15a4127735..2867d9825a57 100644 --- a/arch/x86/purgatory/sha256.h +++ b/arch/x86/purgatory/sha256.h | |||
| @@ -10,7 +10,6 @@ | |||
| 10 | #ifndef SHA256_H | 10 | #ifndef SHA256_H |
| 11 | #define SHA256_H | 11 | #define SHA256_H |
| 12 | 12 | ||
| 13 | |||
| 14 | #include <linux/types.h> | 13 | #include <linux/types.h> |
| 15 | #include <crypto/sha.h> | 14 | #include <crypto/sha.h> |
| 16 | 15 | ||
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 8aa0e0d9cbb2..30dd5b2e4ad5 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #ifndef _XTENSA_PGTABLE_H | 11 | #ifndef _XTENSA_PGTABLE_H |
| 12 | #define _XTENSA_PGTABLE_H | 12 | #define _XTENSA_PGTABLE_H |
| 13 | 13 | ||
| 14 | #define __ARCH_USE_5LEVEL_HACK | ||
| 14 | #include <asm-generic/pgtable-nopmd.h> | 15 | #include <asm-generic/pgtable-nopmd.h> |
| 15 | #include <asm/page.h> | 16 | #include <asm/page.h> |
| 16 | #include <asm/kmem_layout.h> | 17 | #include <asm/kmem_layout.h> |
diff --git a/block/blk-core.c b/block/blk-core.c index 1086dac8724c..0eeb99ef654f 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
| @@ -578,8 +578,6 @@ void blk_cleanup_queue(struct request_queue *q) | |||
| 578 | q->queue_lock = &q->__queue_lock; | 578 | q->queue_lock = &q->__queue_lock; |
| 579 | spin_unlock_irq(lock); | 579 | spin_unlock_irq(lock); |
| 580 | 580 | ||
| 581 | put_disk_devt(q->disk_devt); | ||
| 582 | |||
| 583 | /* @q is and will stay empty, shutdown and put */ | 581 | /* @q is and will stay empty, shutdown and put */ |
| 584 | blk_put_queue(q); | 582 | blk_put_queue(q); |
| 585 | } | 583 | } |
| @@ -2017,17 +2015,34 @@ blk_qc_t generic_make_request(struct bio *bio) | |||
| 2017 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | 2015 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
| 2018 | 2016 | ||
| 2019 | if (likely(blk_queue_enter(q, false) == 0)) { | 2017 | if (likely(blk_queue_enter(q, false) == 0)) { |
| 2018 | struct bio_list hold; | ||
| 2019 | struct bio_list lower, same; | ||
| 2020 | |||
| 2021 | /* Create a fresh bio_list for all subordinate requests */ | ||
| 2022 | hold = bio_list_on_stack; | ||
| 2023 | bio_list_init(&bio_list_on_stack); | ||
| 2020 | ret = q->make_request_fn(q, bio); | 2024 | ret = q->make_request_fn(q, bio); |
| 2021 | 2025 | ||
| 2022 | blk_queue_exit(q); | 2026 | blk_queue_exit(q); |
| 2023 | 2027 | ||
| 2024 | bio = bio_list_pop(current->bio_list); | 2028 | /* sort new bios into those for a lower level |
| 2029 | * and those for the same level | ||
| 2030 | */ | ||
| 2031 | bio_list_init(&lower); | ||
| 2032 | bio_list_init(&same); | ||
| 2033 | while ((bio = bio_list_pop(&bio_list_on_stack)) != NULL) | ||
| 2034 | if (q == bdev_get_queue(bio->bi_bdev)) | ||
| 2035 | bio_list_add(&same, bio); | ||
| 2036 | else | ||
| 2037 | bio_list_add(&lower, bio); | ||
| 2038 | /* now assemble so we handle the lowest level first */ | ||
| 2039 | bio_list_merge(&bio_list_on_stack, &lower); | ||
| 2040 | bio_list_merge(&bio_list_on_stack, &same); | ||
| 2041 | bio_list_merge(&bio_list_on_stack, &hold); | ||
| 2025 | } else { | 2042 | } else { |
| 2026 | struct bio *bio_next = bio_list_pop(current->bio_list); | ||
| 2027 | |||
| 2028 | bio_io_error(bio); | 2043 | bio_io_error(bio); |
| 2029 | bio = bio_next; | ||
| 2030 | } | 2044 | } |
| 2045 | bio = bio_list_pop(current->bio_list); | ||
| 2031 | } while (bio); | 2046 | } while (bio); |
| 2032 | current->bio_list = NULL; /* deactivate */ | 2047 | current->bio_list = NULL; /* deactivate */ |
| 2033 | 2048 | ||
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 295e69670c39..d745ab81033a 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c | |||
| @@ -17,6 +17,15 @@ static void blk_mq_sysfs_release(struct kobject *kobj) | |||
| 17 | { | 17 | { |
| 18 | } | 18 | } |
| 19 | 19 | ||
| 20 | static void blk_mq_hw_sysfs_release(struct kobject *kobj) | ||
| 21 | { | ||
| 22 | struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, | ||
| 23 | kobj); | ||
| 24 | free_cpumask_var(hctx->cpumask); | ||
| 25 | kfree(hctx->ctxs); | ||
| 26 | kfree(hctx); | ||
| 27 | } | ||
| 28 | |||
| 20 | struct blk_mq_ctx_sysfs_entry { | 29 | struct blk_mq_ctx_sysfs_entry { |
| 21 | struct attribute attr; | 30 | struct attribute attr; |
| 22 | ssize_t (*show)(struct blk_mq_ctx *, char *); | 31 | ssize_t (*show)(struct blk_mq_ctx *, char *); |
| @@ -200,7 +209,7 @@ static struct kobj_type blk_mq_ctx_ktype = { | |||
| 200 | static struct kobj_type blk_mq_hw_ktype = { | 209 | static struct kobj_type blk_mq_hw_ktype = { |
| 201 | .sysfs_ops = &blk_mq_hw_sysfs_ops, | 210 | .sysfs_ops = &blk_mq_hw_sysfs_ops, |
| 202 | .default_attrs = default_hw_ctx_attrs, | 211 | .default_attrs = default_hw_ctx_attrs, |
| 203 | .release = blk_mq_sysfs_release, | 212 | .release = blk_mq_hw_sysfs_release, |
| 204 | }; | 213 | }; |
| 205 | 214 | ||
| 206 | static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) | 215 | static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) |
| @@ -242,24 +251,15 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) | |||
| 242 | static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q) | 251 | static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q) |
| 243 | { | 252 | { |
| 244 | struct blk_mq_hw_ctx *hctx; | 253 | struct blk_mq_hw_ctx *hctx; |
| 245 | struct blk_mq_ctx *ctx; | 254 | int i; |
| 246 | int i, j; | ||
| 247 | 255 | ||
| 248 | queue_for_each_hw_ctx(q, hctx, i) { | 256 | queue_for_each_hw_ctx(q, hctx, i) |
| 249 | blk_mq_unregister_hctx(hctx); | 257 | blk_mq_unregister_hctx(hctx); |
| 250 | 258 | ||
| 251 | hctx_for_each_ctx(hctx, ctx, j) | ||
| 252 | kobject_put(&ctx->kobj); | ||
| 253 | |||
| 254 | kobject_put(&hctx->kobj); | ||
| 255 | } | ||
| 256 | |||
| 257 | blk_mq_debugfs_unregister_hctxs(q); | 259 | blk_mq_debugfs_unregister_hctxs(q); |
| 258 | 260 | ||
| 259 | kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); | 261 | kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); |
| 260 | kobject_del(&q->mq_kobj); | 262 | kobject_del(&q->mq_kobj); |
| 261 | kobject_put(&q->mq_kobj); | ||
| 262 | |||
| 263 | kobject_put(&dev->kobj); | 263 | kobject_put(&dev->kobj); |
| 264 | 264 | ||
| 265 | q->mq_sysfs_init_done = false; | 265 | q->mq_sysfs_init_done = false; |
| @@ -277,7 +277,19 @@ void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx) | |||
| 277 | kobject_init(&hctx->kobj, &blk_mq_hw_ktype); | 277 | kobject_init(&hctx->kobj, &blk_mq_hw_ktype); |
| 278 | } | 278 | } |
| 279 | 279 | ||
| 280 | static void blk_mq_sysfs_init(struct request_queue *q) | 280 | void blk_mq_sysfs_deinit(struct request_queue *q) |
| 281 | { | ||
| 282 | struct blk_mq_ctx *ctx; | ||
| 283 | int cpu; | ||
| 284 | |||
| 285 | for_each_possible_cpu(cpu) { | ||
| 286 | ctx = per_cpu_ptr(q->queue_ctx, cpu); | ||
| 287 | kobject_put(&ctx->kobj); | ||
| 288 | } | ||
| 289 | kobject_put(&q->mq_kobj); | ||
| 290 | } | ||
| 291 | |||
| 292 | void blk_mq_sysfs_init(struct request_queue *q) | ||
| 281 | { | 293 | { |
| 282 | struct blk_mq_ctx *ctx; | 294 | struct blk_mq_ctx *ctx; |
| 283 | int cpu; | 295 | int cpu; |
| @@ -297,8 +309,6 @@ int blk_mq_register_dev(struct device *dev, struct request_queue *q) | |||
| 297 | 309 | ||
| 298 | blk_mq_disable_hotplug(); | 310 | blk_mq_disable_hotplug(); |
| 299 | 311 | ||
| 300 | blk_mq_sysfs_init(q); | ||
| 301 | |||
| 302 | ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); | 312 | ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); |
| 303 | if (ret < 0) | 313 | if (ret < 0) |
| 304 | goto out; | 314 | goto out; |
diff --git a/block/blk-mq.c b/block/blk-mq.c index b2fd175e84d7..159187a28d66 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -1955,16 +1955,6 @@ static void blk_mq_exit_hw_queues(struct request_queue *q, | |||
| 1955 | } | 1955 | } |
| 1956 | } | 1956 | } |
| 1957 | 1957 | ||
| 1958 | static void blk_mq_free_hw_queues(struct request_queue *q, | ||
| 1959 | struct blk_mq_tag_set *set) | ||
| 1960 | { | ||
| 1961 | struct blk_mq_hw_ctx *hctx; | ||
| 1962 | unsigned int i; | ||
| 1963 | |||
| 1964 | queue_for_each_hw_ctx(q, hctx, i) | ||
| 1965 | free_cpumask_var(hctx->cpumask); | ||
| 1966 | } | ||
| 1967 | |||
| 1968 | static int blk_mq_init_hctx(struct request_queue *q, | 1958 | static int blk_mq_init_hctx(struct request_queue *q, |
| 1969 | struct blk_mq_tag_set *set, | 1959 | struct blk_mq_tag_set *set, |
| 1970 | struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) | 1960 | struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) |
| @@ -2045,7 +2035,6 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, | |||
| 2045 | struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); | 2035 | struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); |
| 2046 | struct blk_mq_hw_ctx *hctx; | 2036 | struct blk_mq_hw_ctx *hctx; |
| 2047 | 2037 | ||
| 2048 | memset(__ctx, 0, sizeof(*__ctx)); | ||
| 2049 | __ctx->cpu = i; | 2038 | __ctx->cpu = i; |
| 2050 | spin_lock_init(&__ctx->lock); | 2039 | spin_lock_init(&__ctx->lock); |
| 2051 | INIT_LIST_HEAD(&__ctx->rq_list); | 2040 | INIT_LIST_HEAD(&__ctx->rq_list); |
| @@ -2257,15 +2246,19 @@ void blk_mq_release(struct request_queue *q) | |||
| 2257 | queue_for_each_hw_ctx(q, hctx, i) { | 2246 | queue_for_each_hw_ctx(q, hctx, i) { |
| 2258 | if (!hctx) | 2247 | if (!hctx) |
| 2259 | continue; | 2248 | continue; |
| 2260 | kfree(hctx->ctxs); | 2249 | kobject_put(&hctx->kobj); |
| 2261 | kfree(hctx); | ||
| 2262 | } | 2250 | } |
| 2263 | 2251 | ||
| 2264 | q->mq_map = NULL; | 2252 | q->mq_map = NULL; |
| 2265 | 2253 | ||
| 2266 | kfree(q->queue_hw_ctx); | 2254 | kfree(q->queue_hw_ctx); |
| 2267 | 2255 | ||
| 2268 | /* ctx kobj stays in queue_ctx */ | 2256 | /* |
| 2257 | * release .mq_kobj and sw queue's kobject now because | ||
| 2258 | * both share lifetime with request queue. | ||
| 2259 | */ | ||
| 2260 | blk_mq_sysfs_deinit(q); | ||
| 2261 | |||
| 2269 | free_percpu(q->queue_ctx); | 2262 | free_percpu(q->queue_ctx); |
| 2270 | } | 2263 | } |
| 2271 | 2264 | ||
| @@ -2330,10 +2323,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, | |||
| 2330 | if (hctx->tags) | 2323 | if (hctx->tags) |
| 2331 | blk_mq_free_map_and_requests(set, j); | 2324 | blk_mq_free_map_and_requests(set, j); |
| 2332 | blk_mq_exit_hctx(q, set, hctx, j); | 2325 | blk_mq_exit_hctx(q, set, hctx, j); |
| 2333 | free_cpumask_var(hctx->cpumask); | ||
| 2334 | kobject_put(&hctx->kobj); | 2326 | kobject_put(&hctx->kobj); |
| 2335 | kfree(hctx->ctxs); | ||
| 2336 | kfree(hctx); | ||
| 2337 | hctxs[j] = NULL; | 2327 | hctxs[j] = NULL; |
| 2338 | 2328 | ||
| 2339 | } | 2329 | } |
| @@ -2352,6 +2342,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, | |||
| 2352 | if (!q->queue_ctx) | 2342 | if (!q->queue_ctx) |
| 2353 | goto err_exit; | 2343 | goto err_exit; |
| 2354 | 2344 | ||
| 2345 | /* init q->mq_kobj and sw queues' kobjects */ | ||
| 2346 | blk_mq_sysfs_init(q); | ||
| 2347 | |||
| 2355 | q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)), | 2348 | q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)), |
| 2356 | GFP_KERNEL, set->numa_node); | 2349 | GFP_KERNEL, set->numa_node); |
| 2357 | if (!q->queue_hw_ctx) | 2350 | if (!q->queue_hw_ctx) |
| @@ -2442,7 +2435,6 @@ void blk_mq_free_queue(struct request_queue *q) | |||
| 2442 | blk_mq_del_queue_tag_set(q); | 2435 | blk_mq_del_queue_tag_set(q); |
| 2443 | 2436 | ||
| 2444 | blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); | 2437 | blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); |
| 2445 | blk_mq_free_hw_queues(q, set); | ||
| 2446 | } | 2438 | } |
| 2447 | 2439 | ||
| 2448 | /* Basically redo blk_mq_init_queue with queue frozen */ | 2440 | /* Basically redo blk_mq_init_queue with queue frozen */ |
diff --git a/block/blk-mq.h b/block/blk-mq.h index 088ced003c13..b79f9a7d8cf6 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
| @@ -77,6 +77,8 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, | |||
| 77 | /* | 77 | /* |
| 78 | * sysfs helpers | 78 | * sysfs helpers |
| 79 | */ | 79 | */ |
| 80 | extern void blk_mq_sysfs_init(struct request_queue *q); | ||
| 81 | extern void blk_mq_sysfs_deinit(struct request_queue *q); | ||
| 80 | extern int blk_mq_sysfs_register(struct request_queue *q); | 82 | extern int blk_mq_sysfs_register(struct request_queue *q); |
| 81 | extern void blk_mq_sysfs_unregister(struct request_queue *q); | 83 | extern void blk_mq_sysfs_unregister(struct request_queue *q); |
| 82 | extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); | 84 | extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); |
diff --git a/block/genhd.c b/block/genhd.c index b26a5ea115d0..a9c516a8b37d 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
| @@ -572,20 +572,6 @@ exit: | |||
| 572 | disk_part_iter_exit(&piter); | 572 | disk_part_iter_exit(&piter); |
| 573 | } | 573 | } |
| 574 | 574 | ||
| 575 | void put_disk_devt(struct disk_devt *disk_devt) | ||
| 576 | { | ||
| 577 | if (disk_devt && atomic_dec_and_test(&disk_devt->count)) | ||
| 578 | disk_devt->release(disk_devt); | ||
| 579 | } | ||
| 580 | EXPORT_SYMBOL(put_disk_devt); | ||
| 581 | |||
| 582 | void get_disk_devt(struct disk_devt *disk_devt) | ||
| 583 | { | ||
| 584 | if (disk_devt) | ||
| 585 | atomic_inc(&disk_devt->count); | ||
| 586 | } | ||
| 587 | EXPORT_SYMBOL(get_disk_devt); | ||
| 588 | |||
| 589 | /** | 575 | /** |
| 590 | * device_add_disk - add partitioning information to kernel list | 576 | * device_add_disk - add partitioning information to kernel list |
| 591 | * @parent: parent device for the disk | 577 | * @parent: parent device for the disk |
| @@ -626,13 +612,6 @@ void device_add_disk(struct device *parent, struct gendisk *disk) | |||
| 626 | 612 | ||
| 627 | disk_alloc_events(disk); | 613 | disk_alloc_events(disk); |
| 628 | 614 | ||
| 629 | /* | ||
| 630 | * Take a reference on the devt and assign it to queue since it | ||
| 631 | * must not be reallocated while the bdi is registered | ||
| 632 | */ | ||
| 633 | disk->queue->disk_devt = disk->disk_devt; | ||
| 634 | get_disk_devt(disk->disk_devt); | ||
| 635 | |||
| 636 | /* Register BDI before referencing it from bdev */ | 615 | /* Register BDI before referencing it from bdev */ |
| 637 | bdi = disk->queue->backing_dev_info; | 616 | bdi = disk->queue->backing_dev_info; |
| 638 | bdi_register_owner(bdi, disk_to_dev(disk)); | 617 | bdi_register_owner(bdi, disk_to_dev(disk)); |
| @@ -681,12 +660,16 @@ void del_gendisk(struct gendisk *disk) | |||
| 681 | disk->flags &= ~GENHD_FL_UP; | 660 | disk->flags &= ~GENHD_FL_UP; |
| 682 | 661 | ||
| 683 | sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); | 662 | sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); |
| 684 | /* | 663 | if (disk->queue) { |
| 685 | * Unregister bdi before releasing device numbers (as they can get | 664 | /* |
| 686 | * reused and we'd get clashes in sysfs). | 665 | * Unregister bdi before releasing device numbers (as they can |
| 687 | */ | 666 | * get reused and we'd get clashes in sysfs). |
| 688 | bdi_unregister(disk->queue->backing_dev_info); | 667 | */ |
| 689 | blk_unregister_queue(disk); | 668 | bdi_unregister(disk->queue->backing_dev_info); |
| 669 | blk_unregister_queue(disk); | ||
| 670 | } else { | ||
| 671 | WARN_ON(1); | ||
| 672 | } | ||
| 690 | blk_unregister_region(disk_devt(disk), disk->minors); | 673 | blk_unregister_region(disk_devt(disk), disk->minors); |
| 691 | 674 | ||
| 692 | part_stat_set_all(&disk->part0, 0); | 675 | part_stat_set_all(&disk->part0, 0); |
diff --git a/block/sed-opal.c b/block/sed-opal.c index 1e18dca360fc..14035f826b5e 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c | |||
| @@ -1023,7 +1023,6 @@ static int finalize_and_send(struct opal_dev *dev, cont_fn cont) | |||
| 1023 | 1023 | ||
| 1024 | static int gen_key(struct opal_dev *dev, void *data) | 1024 | static int gen_key(struct opal_dev *dev, void *data) |
| 1025 | { | 1025 | { |
| 1026 | const u8 *method; | ||
| 1027 | u8 uid[OPAL_UID_LENGTH]; | 1026 | u8 uid[OPAL_UID_LENGTH]; |
| 1028 | int err = 0; | 1027 | int err = 0; |
| 1029 | 1028 | ||
| @@ -1031,7 +1030,6 @@ static int gen_key(struct opal_dev *dev, void *data) | |||
| 1031 | set_comid(dev, dev->comid); | 1030 | set_comid(dev, dev->comid); |
| 1032 | 1031 | ||
| 1033 | memcpy(uid, dev->prev_data, min(sizeof(uid), dev->prev_d_len)); | 1032 | memcpy(uid, dev->prev_data, min(sizeof(uid), dev->prev_d_len)); |
| 1034 | method = opalmethod[OPAL_GENKEY]; | ||
| 1035 | kfree(dev->prev_data); | 1033 | kfree(dev->prev_data); |
| 1036 | dev->prev_data = NULL; | 1034 | dev->prev_data = NULL; |
| 1037 | 1035 | ||
| @@ -1669,7 +1667,6 @@ static int add_user_to_lr(struct opal_dev *dev, void *data) | |||
| 1669 | static int lock_unlock_locking_range(struct opal_dev *dev, void *data) | 1667 | static int lock_unlock_locking_range(struct opal_dev *dev, void *data) |
| 1670 | { | 1668 | { |
| 1671 | u8 lr_buffer[OPAL_UID_LENGTH]; | 1669 | u8 lr_buffer[OPAL_UID_LENGTH]; |
| 1672 | const u8 *method; | ||
| 1673 | struct opal_lock_unlock *lkul = data; | 1670 | struct opal_lock_unlock *lkul = data; |
| 1674 | u8 read_locked = 1, write_locked = 1; | 1671 | u8 read_locked = 1, write_locked = 1; |
| 1675 | int err = 0; | 1672 | int err = 0; |
| @@ -1677,7 +1674,6 @@ static int lock_unlock_locking_range(struct opal_dev *dev, void *data) | |||
| 1677 | clear_opal_cmd(dev); | 1674 | clear_opal_cmd(dev); |
| 1678 | set_comid(dev, dev->comid); | 1675 | set_comid(dev, dev->comid); |
| 1679 | 1676 | ||
| 1680 | method = opalmethod[OPAL_SET]; | ||
| 1681 | if (build_locking_range(lr_buffer, sizeof(lr_buffer), | 1677 | if (build_locking_range(lr_buffer, sizeof(lr_buffer), |
| 1682 | lkul->session.opal_key.lr) < 0) | 1678 | lkul->session.opal_key.lr) < 0) |
| 1683 | return -ERANGE; | 1679 | return -ERANGE; |
| @@ -1733,14 +1729,12 @@ static int lock_unlock_locking_range_sum(struct opal_dev *dev, void *data) | |||
| 1733 | { | 1729 | { |
| 1734 | u8 lr_buffer[OPAL_UID_LENGTH]; | 1730 | u8 lr_buffer[OPAL_UID_LENGTH]; |
| 1735 | u8 read_locked = 1, write_locked = 1; | 1731 | u8 read_locked = 1, write_locked = 1; |
| 1736 | const u8 *method; | ||
| 1737 | struct opal_lock_unlock *lkul = data; | 1732 | struct opal_lock_unlock *lkul = data; |
| 1738 | int ret; | 1733 | int ret; |
| 1739 | 1734 | ||
| 1740 | clear_opal_cmd(dev); | 1735 | clear_opal_cmd(dev); |
| 1741 | set_comid(dev, dev->comid); | 1736 | set_comid(dev, dev->comid); |
| 1742 | 1737 | ||
| 1743 | method = opalmethod[OPAL_SET]; | ||
| 1744 | if (build_locking_range(lr_buffer, sizeof(lr_buffer), | 1738 | if (build_locking_range(lr_buffer, sizeof(lr_buffer), |
| 1745 | lkul->session.opal_key.lr) < 0) | 1739 | lkul->session.opal_key.lr) < 0) |
| 1746 | return -ERANGE; | 1740 | return -ERANGE; |
| @@ -2133,7 +2127,7 @@ static int opal_add_user_to_lr(struct opal_dev *dev, | |||
| 2133 | pr_err("Locking state was not RO or RW\n"); | 2127 | pr_err("Locking state was not RO or RW\n"); |
| 2134 | return -EINVAL; | 2128 | return -EINVAL; |
| 2135 | } | 2129 | } |
| 2136 | if (lk_unlk->session.who < OPAL_USER1 && | 2130 | if (lk_unlk->session.who < OPAL_USER1 || |
| 2137 | lk_unlk->session.who > OPAL_USER9) { | 2131 | lk_unlk->session.who > OPAL_USER9) { |
| 2138 | pr_err("Authority was not within the range of users: %d\n", | 2132 | pr_err("Authority was not within the range of users: %d\n", |
| 2139 | lk_unlk->session.who); | 2133 | lk_unlk->session.who); |
| @@ -2316,7 +2310,7 @@ static int opal_activate_user(struct opal_dev *dev, | |||
| 2316 | int ret; | 2310 | int ret; |
| 2317 | 2311 | ||
| 2318 | /* We can't activate Admin1 it's active as manufactured */ | 2312 | /* We can't activate Admin1 it's active as manufactured */ |
| 2319 | if (opal_session->who < OPAL_USER1 && | 2313 | if (opal_session->who < OPAL_USER1 || |
| 2320 | opal_session->who > OPAL_USER9) { | 2314 | opal_session->who > OPAL_USER9) { |
| 2321 | pr_err("Who was not a valid user: %d\n", opal_session->who); | 2315 | pr_err("Who was not a valid user: %d\n", opal_session->who); |
| 2322 | return -EINVAL; | 2316 | return -EINVAL; |
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 219b90bc0922..f15900132912 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
| @@ -41,8 +41,10 @@ void acpi_gpe_apply_masked_gpes(void); | |||
| 41 | void acpi_container_init(void); | 41 | void acpi_container_init(void); |
| 42 | void acpi_memory_hotplug_init(void); | 42 | void acpi_memory_hotplug_init(void); |
| 43 | #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC | 43 | #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC |
| 44 | void pci_ioapic_remove(struct acpi_pci_root *root); | ||
| 44 | int acpi_ioapic_remove(struct acpi_pci_root *root); | 45 | int acpi_ioapic_remove(struct acpi_pci_root *root); |
| 45 | #else | 46 | #else |
| 47 | static inline void pci_ioapic_remove(struct acpi_pci_root *root) { return; } | ||
| 46 | static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; } | 48 | static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; } |
| 47 | #endif | 49 | #endif |
| 48 | #ifdef CONFIG_ACPI_DOCK | 50 | #ifdef CONFIG_ACPI_DOCK |
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c index 6d7ce6e12aaa..1120dfd625b8 100644 --- a/drivers/acpi/ioapic.c +++ b/drivers/acpi/ioapic.c | |||
| @@ -206,24 +206,34 @@ int acpi_ioapic_add(acpi_handle root_handle) | |||
| 206 | return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV; | 206 | return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV; |
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | int acpi_ioapic_remove(struct acpi_pci_root *root) | 209 | void pci_ioapic_remove(struct acpi_pci_root *root) |
| 210 | { | 210 | { |
| 211 | int retval = 0; | ||
| 212 | struct acpi_pci_ioapic *ioapic, *tmp; | 211 | struct acpi_pci_ioapic *ioapic, *tmp; |
| 213 | 212 | ||
| 214 | mutex_lock(&ioapic_list_lock); | 213 | mutex_lock(&ioapic_list_lock); |
| 215 | list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) { | 214 | list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) { |
| 216 | if (root->device->handle != ioapic->root_handle) | 215 | if (root->device->handle != ioapic->root_handle) |
| 217 | continue; | 216 | continue; |
| 218 | |||
| 219 | if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base)) | ||
| 220 | retval = -EBUSY; | ||
| 221 | |||
| 222 | if (ioapic->pdev) { | 217 | if (ioapic->pdev) { |
| 223 | pci_release_region(ioapic->pdev, 0); | 218 | pci_release_region(ioapic->pdev, 0); |
| 224 | pci_disable_device(ioapic->pdev); | 219 | pci_disable_device(ioapic->pdev); |
| 225 | pci_dev_put(ioapic->pdev); | 220 | pci_dev_put(ioapic->pdev); |
| 226 | } | 221 | } |
| 222 | } | ||
| 223 | mutex_unlock(&ioapic_list_lock); | ||
| 224 | } | ||
| 225 | |||
| 226 | int acpi_ioapic_remove(struct acpi_pci_root *root) | ||
| 227 | { | ||
| 228 | int retval = 0; | ||
| 229 | struct acpi_pci_ioapic *ioapic, *tmp; | ||
| 230 | |||
| 231 | mutex_lock(&ioapic_list_lock); | ||
| 232 | list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) { | ||
| 233 | if (root->device->handle != ioapic->root_handle) | ||
| 234 | continue; | ||
| 235 | if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base)) | ||
| 236 | retval = -EBUSY; | ||
| 227 | if (ioapic->res.flags && ioapic->res.parent) | 237 | if (ioapic->res.flags && ioapic->res.parent) |
| 228 | release_resource(&ioapic->res); | 238 | release_resource(&ioapic->res); |
| 229 | list_del(&ioapic->list); | 239 | list_del(&ioapic->list); |
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index bf601d4df8cf..919be0aa2578 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
| @@ -648,12 +648,12 @@ static void acpi_pci_root_remove(struct acpi_device *device) | |||
| 648 | 648 | ||
| 649 | pci_stop_root_bus(root->bus); | 649 | pci_stop_root_bus(root->bus); |
| 650 | 650 | ||
| 651 | WARN_ON(acpi_ioapic_remove(root)); | 651 | pci_ioapic_remove(root); |
| 652 | |||
| 653 | device_set_run_wake(root->bus->bridge, false); | 652 | device_set_run_wake(root->bus->bridge, false); |
| 654 | pci_acpi_remove_bus_pm_notifier(device); | 653 | pci_acpi_remove_bus_pm_notifier(device); |
| 655 | 654 | ||
| 656 | pci_remove_root_bus(root->bus); | 655 | pci_remove_root_bus(root->bus); |
| 656 | WARN_ON(acpi_ioapic_remove(root)); | ||
| 657 | 657 | ||
| 658 | dmar_device_remove(device->handle); | 658 | dmar_device_remove(device->handle); |
| 659 | 659 | ||
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 10aed84244f5..939641d6e262 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c | |||
| @@ -50,7 +50,7 @@ | |||
| 50 | the slower the port i/o. In some cases, setting | 50 | the slower the port i/o. In some cases, setting |
| 51 | this to zero will speed up the device. (default -1) | 51 | this to zero will speed up the device. (default -1) |
| 52 | 52 | ||
| 53 | major You may use this parameter to overide the | 53 | major You may use this parameter to override the |
| 54 | default major number (46) that this driver | 54 | default major number (46) that this driver |
| 55 | will use. Be sure to change the device | 55 | will use. Be sure to change the device |
| 56 | name as well. | 56 | name as well. |
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 644ba0888bd4..9cfd2e06a649 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c | |||
| @@ -61,7 +61,7 @@ | |||
| 61 | first drive found. | 61 | first drive found. |
| 62 | 62 | ||
| 63 | 63 | ||
| 64 | major You may use this parameter to overide the | 64 | major You may use this parameter to override the |
| 65 | default major number (45) that this driver | 65 | default major number (45) that this driver |
| 66 | will use. Be sure to change the device | 66 | will use. Be sure to change the device |
| 67 | name as well. | 67 | name as well. |
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index ed93e8badf56..14c5d32f5d8b 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c | |||
| @@ -59,7 +59,7 @@ | |||
| 59 | the slower the port i/o. In some cases, setting | 59 | the slower the port i/o. In some cases, setting |
| 60 | this to zero will speed up the device. (default -1) | 60 | this to zero will speed up the device. (default -1) |
| 61 | 61 | ||
| 62 | major You may use this parameter to overide the | 62 | major You may use this parameter to override the |
| 63 | default major number (47) that this driver | 63 | default major number (47) that this driver |
| 64 | will use. Be sure to change the device | 64 | will use. Be sure to change the device |
| 65 | name as well. | 65 | name as well. |
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c index 5db955fe3a94..3b5882bfb736 100644 --- a/drivers/block/paride/pg.c +++ b/drivers/block/paride/pg.c | |||
| @@ -84,7 +84,7 @@ | |||
| 84 | the slower the port i/o. In some cases, setting | 84 | the slower the port i/o. In some cases, setting |
| 85 | this to zero will speed up the device. (default -1) | 85 | this to zero will speed up the device. (default -1) |
| 86 | 86 | ||
| 87 | major You may use this parameter to overide the | 87 | major You may use this parameter to override the |
| 88 | default major number (97) that this driver | 88 | default major number (97) that this driver |
| 89 | will use. Be sure to change the device | 89 | will use. Be sure to change the device |
| 90 | name as well. | 90 | name as well. |
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c index 61fc6824299a..e815312a00ad 100644 --- a/drivers/block/paride/pt.c +++ b/drivers/block/paride/pt.c | |||
| @@ -61,7 +61,7 @@ | |||
| 61 | the slower the port i/o. In some cases, setting | 61 | the slower the port i/o. In some cases, setting |
| 62 | this to zero will speed up the device. (default -1) | 62 | this to zero will speed up the device. (default -1) |
| 63 | 63 | ||
| 64 | major You may use this parameter to overide the | 64 | major You may use this parameter to override the |
| 65 | default major number (96) that this driver | 65 | default major number (96) that this driver |
| 66 | will use. Be sure to change the device | 66 | will use. Be sure to change the device |
| 67 | name as well. | 67 | name as well. |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4d6807723798..517838b65964 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
| @@ -120,10 +120,11 @@ static int atomic_dec_return_safe(atomic_t *v) | |||
| 120 | 120 | ||
| 121 | /* Feature bits */ | 121 | /* Feature bits */ |
| 122 | 122 | ||
| 123 | #define RBD_FEATURE_LAYERING (1<<0) | 123 | #define RBD_FEATURE_LAYERING (1ULL<<0) |
| 124 | #define RBD_FEATURE_STRIPINGV2 (1<<1) | 124 | #define RBD_FEATURE_STRIPINGV2 (1ULL<<1) |
| 125 | #define RBD_FEATURE_EXCLUSIVE_LOCK (1<<2) | 125 | #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2) |
| 126 | #define RBD_FEATURE_DATA_POOL (1<<7) | 126 | #define RBD_FEATURE_DATA_POOL (1ULL<<7) |
| 127 | |||
| 127 | #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \ | 128 | #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \ |
| 128 | RBD_FEATURE_STRIPINGV2 | \ | 129 | RBD_FEATURE_STRIPINGV2 | \ |
| 129 | RBD_FEATURE_EXCLUSIVE_LOCK | \ | 130 | RBD_FEATURE_EXCLUSIVE_LOCK | \ |
| @@ -499,16 +500,23 @@ static bool rbd_is_lock_owner(struct rbd_device *rbd_dev) | |||
| 499 | return is_lock_owner; | 500 | return is_lock_owner; |
| 500 | } | 501 | } |
| 501 | 502 | ||
| 503 | static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf) | ||
| 504 | { | ||
| 505 | return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED); | ||
| 506 | } | ||
| 507 | |||
| 502 | static BUS_ATTR(add, S_IWUSR, NULL, rbd_add); | 508 | static BUS_ATTR(add, S_IWUSR, NULL, rbd_add); |
| 503 | static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove); | 509 | static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove); |
| 504 | static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major); | 510 | static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major); |
| 505 | static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major); | 511 | static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major); |
| 512 | static BUS_ATTR(supported_features, S_IRUGO, rbd_supported_features_show, NULL); | ||
| 506 | 513 | ||
| 507 | static struct attribute *rbd_bus_attrs[] = { | 514 | static struct attribute *rbd_bus_attrs[] = { |
| 508 | &bus_attr_add.attr, | 515 | &bus_attr_add.attr, |
| 509 | &bus_attr_remove.attr, | 516 | &bus_attr_remove.attr, |
| 510 | &bus_attr_add_single_major.attr, | 517 | &bus_attr_add_single_major.attr, |
| 511 | &bus_attr_remove_single_major.attr, | 518 | &bus_attr_remove_single_major.attr, |
| 519 | &bus_attr_supported_features.attr, | ||
| 512 | NULL, | 520 | NULL, |
| 513 | }; | 521 | }; |
| 514 | 522 | ||
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index e27d89a36c34..dceb5edd1e54 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
| @@ -1189,6 +1189,8 @@ static int zram_add(void) | |||
| 1189 | blk_queue_io_min(zram->disk->queue, PAGE_SIZE); | 1189 | blk_queue_io_min(zram->disk->queue, PAGE_SIZE); |
| 1190 | blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); | 1190 | blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); |
| 1191 | zram->disk->queue->limits.discard_granularity = PAGE_SIZE; | 1191 | zram->disk->queue->limits.discard_granularity = PAGE_SIZE; |
| 1192 | zram->disk->queue->limits.max_sectors = SECTORS_PER_PAGE; | ||
| 1193 | zram->disk->queue->limits.chunk_sectors = 0; | ||
| 1192 | blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); | 1194 | blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); |
| 1193 | /* | 1195 | /* |
| 1194 | * zram_bio_discard() will clear all logical blocks if logical block | 1196 | * zram_bio_discard() will clear all logical blocks if logical block |
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c index a5b1eb276c0b..e6d0d271c58c 100644 --- a/drivers/char/nwbutton.c +++ b/drivers/char/nwbutton.c | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | 6 | ||
| 7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
| 8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
| 9 | #include <linux/sched.h> | 9 | #include <linux/sched/signal.h> |
| 10 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
| 11 | #include <linux/time.h> | 11 | #include <linux/time.h> |
| 12 | #include <linux/timer.h> | 12 | #include <linux/timer.h> |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 1ef26403bcc8..0ab024918907 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
| @@ -313,13 +313,6 @@ static int random_read_wakeup_bits = 64; | |||
| 313 | static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS; | 313 | static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS; |
| 314 | 314 | ||
| 315 | /* | 315 | /* |
| 316 | * The minimum number of seconds between urandom pool reseeding. We | ||
| 317 | * do this to limit the amount of entropy that can be drained from the | ||
| 318 | * input pool even if there are heavy demands on /dev/urandom. | ||
| 319 | */ | ||
| 320 | static int random_min_urandom_seed = 60; | ||
| 321 | |||
| 322 | /* | ||
| 323 | * Originally, we used a primitive polynomial of degree .poolwords | 316 | * Originally, we used a primitive polynomial of degree .poolwords |
| 324 | * over GF(2). The taps for various sizes are defined below. They | 317 | * over GF(2). The taps for various sizes are defined below. They |
| 325 | * were chosen to be evenly spaced except for the last tap, which is 1 | 318 | * were chosen to be evenly spaced except for the last tap, which is 1 |
| @@ -409,7 +402,6 @@ static struct poolinfo { | |||
| 409 | */ | 402 | */ |
| 410 | static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); | 403 | static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); |
| 411 | static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); | 404 | static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); |
| 412 | static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait); | ||
| 413 | static struct fasync_struct *fasync; | 405 | static struct fasync_struct *fasync; |
| 414 | 406 | ||
| 415 | static DEFINE_SPINLOCK(random_ready_list_lock); | 407 | static DEFINE_SPINLOCK(random_ready_list_lock); |
| @@ -467,7 +459,6 @@ struct entropy_store { | |||
| 467 | int entropy_count; | 459 | int entropy_count; |
| 468 | int entropy_total; | 460 | int entropy_total; |
| 469 | unsigned int initialized:1; | 461 | unsigned int initialized:1; |
| 470 | unsigned int limit:1; | ||
| 471 | unsigned int last_data_init:1; | 462 | unsigned int last_data_init:1; |
| 472 | __u8 last_data[EXTRACT_SIZE]; | 463 | __u8 last_data[EXTRACT_SIZE]; |
| 473 | }; | 464 | }; |
| @@ -485,7 +476,6 @@ static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy; | |||
| 485 | static struct entropy_store input_pool = { | 476 | static struct entropy_store input_pool = { |
| 486 | .poolinfo = &poolinfo_table[0], | 477 | .poolinfo = &poolinfo_table[0], |
| 487 | .name = "input", | 478 | .name = "input", |
| 488 | .limit = 1, | ||
| 489 | .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), | 479 | .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), |
| 490 | .pool = input_pool_data | 480 | .pool = input_pool_data |
| 491 | }; | 481 | }; |
| @@ -493,7 +483,6 @@ static struct entropy_store input_pool = { | |||
| 493 | static struct entropy_store blocking_pool = { | 483 | static struct entropy_store blocking_pool = { |
| 494 | .poolinfo = &poolinfo_table[1], | 484 | .poolinfo = &poolinfo_table[1], |
| 495 | .name = "blocking", | 485 | .name = "blocking", |
| 496 | .limit = 1, | ||
| 497 | .pull = &input_pool, | 486 | .pull = &input_pool, |
| 498 | .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock), | 487 | .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock), |
| 499 | .pool = blocking_pool_data, | 488 | .pool = blocking_pool_data, |
| @@ -855,13 +844,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) | |||
| 855 | spin_unlock_irqrestore(&primary_crng.lock, flags); | 844 | spin_unlock_irqrestore(&primary_crng.lock, flags); |
| 856 | } | 845 | } |
| 857 | 846 | ||
| 858 | static inline void maybe_reseed_primary_crng(void) | ||
| 859 | { | ||
| 860 | if (crng_init > 2 && | ||
| 861 | time_after(jiffies, primary_crng.init_time + CRNG_RESEED_INTERVAL)) | ||
| 862 | crng_reseed(&primary_crng, &input_pool); | ||
| 863 | } | ||
| 864 | |||
| 865 | static inline void crng_wait_ready(void) | 847 | static inline void crng_wait_ready(void) |
| 866 | { | 848 | { |
| 867 | wait_event_interruptible(crng_init_wait, crng_ready()); | 849 | wait_event_interruptible(crng_init_wait, crng_ready()); |
| @@ -1220,15 +1202,6 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) | |||
| 1220 | r->entropy_count > r->poolinfo->poolfracbits) | 1202 | r->entropy_count > r->poolinfo->poolfracbits) |
| 1221 | return; | 1203 | return; |
| 1222 | 1204 | ||
| 1223 | if (r->limit == 0 && random_min_urandom_seed) { | ||
| 1224 | unsigned long now = jiffies; | ||
| 1225 | |||
| 1226 | if (time_before(now, | ||
| 1227 | r->last_pulled + random_min_urandom_seed * HZ)) | ||
| 1228 | return; | ||
| 1229 | r->last_pulled = now; | ||
| 1230 | } | ||
| 1231 | |||
| 1232 | _xfer_secondary_pool(r, nbytes); | 1205 | _xfer_secondary_pool(r, nbytes); |
| 1233 | } | 1206 | } |
| 1234 | 1207 | ||
| @@ -1236,8 +1209,6 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes) | |||
| 1236 | { | 1209 | { |
| 1237 | __u32 tmp[OUTPUT_POOL_WORDS]; | 1210 | __u32 tmp[OUTPUT_POOL_WORDS]; |
| 1238 | 1211 | ||
| 1239 | /* For /dev/random's pool, always leave two wakeups' worth */ | ||
| 1240 | int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4; | ||
| 1241 | int bytes = nbytes; | 1212 | int bytes = nbytes; |
| 1242 | 1213 | ||
| 1243 | /* pull at least as much as a wakeup */ | 1214 | /* pull at least as much as a wakeup */ |
| @@ -1248,7 +1219,7 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes) | |||
| 1248 | trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8, | 1219 | trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8, |
| 1249 | ENTROPY_BITS(r), ENTROPY_BITS(r->pull)); | 1220 | ENTROPY_BITS(r), ENTROPY_BITS(r->pull)); |
| 1250 | bytes = extract_entropy(r->pull, tmp, bytes, | 1221 | bytes = extract_entropy(r->pull, tmp, bytes, |
| 1251 | random_read_wakeup_bits / 8, rsvd_bytes); | 1222 | random_read_wakeup_bits / 8, 0); |
| 1252 | mix_pool_bytes(r, tmp, bytes); | 1223 | mix_pool_bytes(r, tmp, bytes); |
| 1253 | credit_entropy_bits(r, bytes*8); | 1224 | credit_entropy_bits(r, bytes*8); |
| 1254 | } | 1225 | } |
| @@ -1276,7 +1247,7 @@ static void push_to_pool(struct work_struct *work) | |||
| 1276 | static size_t account(struct entropy_store *r, size_t nbytes, int min, | 1247 | static size_t account(struct entropy_store *r, size_t nbytes, int min, |
| 1277 | int reserved) | 1248 | int reserved) |
| 1278 | { | 1249 | { |
| 1279 | int entropy_count, orig; | 1250 | int entropy_count, orig, have_bytes; |
| 1280 | size_t ibytes, nfrac; | 1251 | size_t ibytes, nfrac; |
| 1281 | 1252 | ||
| 1282 | BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); | 1253 | BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); |
| @@ -1285,14 +1256,12 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, | |||
| 1285 | retry: | 1256 | retry: |
| 1286 | entropy_count = orig = ACCESS_ONCE(r->entropy_count); | 1257 | entropy_count = orig = ACCESS_ONCE(r->entropy_count); |
| 1287 | ibytes = nbytes; | 1258 | ibytes = nbytes; |
| 1288 | /* If limited, never pull more than available */ | 1259 | /* never pull more than available */ |
| 1289 | if (r->limit) { | 1260 | have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); |
| 1290 | int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); | ||
| 1291 | 1261 | ||
| 1292 | if ((have_bytes -= reserved) < 0) | 1262 | if ((have_bytes -= reserved) < 0) |
| 1293 | have_bytes = 0; | 1263 | have_bytes = 0; |
| 1294 | ibytes = min_t(size_t, ibytes, have_bytes); | 1264 | ibytes = min_t(size_t, ibytes, have_bytes); |
| 1295 | } | ||
| 1296 | if (ibytes < min) | 1265 | if (ibytes < min) |
| 1297 | ibytes = 0; | 1266 | ibytes = 0; |
| 1298 | 1267 | ||
| @@ -1912,6 +1881,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, | |||
| 1912 | static int min_read_thresh = 8, min_write_thresh; | 1881 | static int min_read_thresh = 8, min_write_thresh; |
| 1913 | static int max_read_thresh = OUTPUT_POOL_WORDS * 32; | 1882 | static int max_read_thresh = OUTPUT_POOL_WORDS * 32; |
| 1914 | static int max_write_thresh = INPUT_POOL_WORDS * 32; | 1883 | static int max_write_thresh = INPUT_POOL_WORDS * 32; |
| 1884 | static int random_min_urandom_seed = 60; | ||
| 1915 | static char sysctl_bootid[16]; | 1885 | static char sysctl_bootid[16]; |
| 1916 | 1886 | ||
| 1917 | /* | 1887 | /* |
| @@ -2042,63 +2012,64 @@ struct ctl_table random_table[] = { | |||
| 2042 | }; | 2012 | }; |
| 2043 | #endif /* CONFIG_SYSCTL */ | 2013 | #endif /* CONFIG_SYSCTL */ |
| 2044 | 2014 | ||
| 2045 | static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; | 2015 | struct batched_entropy { |
| 2046 | 2016 | union { | |
| 2047 | int random_int_secret_init(void) | 2017 | u64 entropy_u64[CHACHA20_BLOCK_SIZE / sizeof(u64)]; |
| 2048 | { | 2018 | u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)]; |
| 2049 | get_random_bytes(random_int_secret, sizeof(random_int_secret)); | 2019 | }; |
| 2050 | return 0; | 2020 | unsigned int position; |
| 2051 | } | 2021 | }; |
| 2052 | |||
| 2053 | static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash) | ||
| 2054 | __aligned(sizeof(unsigned long)); | ||
| 2055 | 2022 | ||
| 2056 | /* | 2023 | /* |
| 2057 | * Get a random word for internal kernel use only. Similar to urandom but | 2024 | * Get a random word for internal kernel use only. The quality of the random |
| 2058 | * with the goal of minimal entropy pool depletion. As a result, the random | 2025 | * number is either as good as RDRAND or as good as /dev/urandom, with the |
| 2059 | * value is not cryptographically secure but for several uses the cost of | 2026 | * goal of being quite fast and not depleting entropy. |
| 2060 | * depleting entropy is too high | ||
| 2061 | */ | 2027 | */ |
| 2062 | unsigned int get_random_int(void) | 2028 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); |
| 2029 | u64 get_random_u64(void) | ||
| 2063 | { | 2030 | { |
| 2064 | __u32 *hash; | 2031 | u64 ret; |
| 2065 | unsigned int ret; | 2032 | struct batched_entropy *batch; |
| 2066 | 2033 | ||
| 2067 | if (arch_get_random_int(&ret)) | 2034 | #if BITS_PER_LONG == 64 |
| 2035 | if (arch_get_random_long((unsigned long *)&ret)) | ||
| 2068 | return ret; | 2036 | return ret; |
| 2037 | #else | ||
| 2038 | if (arch_get_random_long((unsigned long *)&ret) && | ||
| 2039 | arch_get_random_long((unsigned long *)&ret + 1)) | ||
| 2040 | return ret; | ||
| 2041 | #endif | ||
| 2069 | 2042 | ||
| 2070 | hash = get_cpu_var(get_random_int_hash); | 2043 | batch = &get_cpu_var(batched_entropy_u64); |
| 2071 | 2044 | if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { | |
| 2072 | hash[0] += current->pid + jiffies + random_get_entropy(); | 2045 | extract_crng((u8 *)batch->entropy_u64); |
| 2073 | md5_transform(hash, random_int_secret); | 2046 | batch->position = 0; |
| 2074 | ret = hash[0]; | 2047 | } |
| 2075 | put_cpu_var(get_random_int_hash); | 2048 | ret = batch->entropy_u64[batch->position++]; |
| 2076 | 2049 | put_cpu_var(batched_entropy_u64); | |
| 2077 | return ret; | 2050 | return ret; |
| 2078 | } | 2051 | } |
| 2079 | EXPORT_SYMBOL(get_random_int); | 2052 | EXPORT_SYMBOL(get_random_u64); |
| 2080 | 2053 | ||
| 2081 | /* | 2054 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); |
| 2082 | * Same as get_random_int(), but returns unsigned long. | 2055 | u32 get_random_u32(void) |
| 2083 | */ | ||
| 2084 | unsigned long get_random_long(void) | ||
| 2085 | { | 2056 | { |
| 2086 | __u32 *hash; | 2057 | u32 ret; |
| 2087 | unsigned long ret; | 2058 | struct batched_entropy *batch; |
| 2088 | 2059 | ||
| 2089 | if (arch_get_random_long(&ret)) | 2060 | if (arch_get_random_int(&ret)) |
| 2090 | return ret; | 2061 | return ret; |
| 2091 | 2062 | ||
| 2092 | hash = get_cpu_var(get_random_int_hash); | 2063 | batch = &get_cpu_var(batched_entropy_u32); |
| 2093 | 2064 | if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { | |
| 2094 | hash[0] += current->pid + jiffies + random_get_entropy(); | 2065 | extract_crng((u8 *)batch->entropy_u32); |
| 2095 | md5_transform(hash, random_int_secret); | 2066 | batch->position = 0; |
| 2096 | ret = *(unsigned long *)hash; | 2067 | } |
| 2097 | put_cpu_var(get_random_int_hash); | 2068 | ret = batch->entropy_u32[batch->position++]; |
| 2098 | 2069 | put_cpu_var(batched_entropy_u32); | |
| 2099 | return ret; | 2070 | return ret; |
| 2100 | } | 2071 | } |
| 2101 | EXPORT_SYMBOL(get_random_long); | 2072 | EXPORT_SYMBOL(get_random_u32); |
| 2102 | 2073 | ||
| 2103 | /** | 2074 | /** |
| 2104 | * randomize_page - Generate a random, page aligned address | 2075 | * randomize_page - Generate a random, page aligned address |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index a47543281864..38b9fdf854a4 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -2532,4 +2532,5 @@ static int __init cpufreq_core_init(void) | |||
| 2532 | 2532 | ||
| 2533 | return 0; | 2533 | return 0; |
| 2534 | } | 2534 | } |
| 2535 | module_param(off, int, 0444); | ||
| 2535 | core_initcall(cpufreq_core_init); | 2536 | core_initcall(cpufreq_core_init); |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index b1fbaa30ae04..3d37219a0dd7 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -377,6 +377,7 @@ static void intel_pstate_set_performance_limits(struct perf_limits *limits) | |||
| 377 | intel_pstate_init_limits(limits); | 377 | intel_pstate_init_limits(limits); |
| 378 | limits->min_perf_pct = 100; | 378 | limits->min_perf_pct = 100; |
| 379 | limits->min_perf = int_ext_tofp(1); | 379 | limits->min_perf = int_ext_tofp(1); |
| 380 | limits->min_sysfs_pct = 100; | ||
| 380 | } | 381 | } |
| 381 | 382 | ||
| 382 | static DEFINE_MUTEX(intel_pstate_driver_lock); | 383 | static DEFINE_MUTEX(intel_pstate_driver_lock); |
| @@ -968,11 +969,20 @@ static int intel_pstate_resume(struct cpufreq_policy *policy) | |||
| 968 | } | 969 | } |
| 969 | 970 | ||
| 970 | static void intel_pstate_update_policies(void) | 971 | static void intel_pstate_update_policies(void) |
| 972 | __releases(&intel_pstate_limits_lock) | ||
| 973 | __acquires(&intel_pstate_limits_lock) | ||
| 971 | { | 974 | { |
| 975 | struct perf_limits *saved_limits = limits; | ||
| 972 | int cpu; | 976 | int cpu; |
| 973 | 977 | ||
| 978 | mutex_unlock(&intel_pstate_limits_lock); | ||
| 979 | |||
| 974 | for_each_possible_cpu(cpu) | 980 | for_each_possible_cpu(cpu) |
| 975 | cpufreq_update_policy(cpu); | 981 | cpufreq_update_policy(cpu); |
| 982 | |||
| 983 | mutex_lock(&intel_pstate_limits_lock); | ||
| 984 | |||
| 985 | limits = saved_limits; | ||
| 976 | } | 986 | } |
| 977 | 987 | ||
| 978 | /************************** debugfs begin ************************/ | 988 | /************************** debugfs begin ************************/ |
| @@ -1180,10 +1190,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, | |||
| 1180 | 1190 | ||
| 1181 | limits->no_turbo = clamp_t(int, input, 0, 1); | 1191 | limits->no_turbo = clamp_t(int, input, 0, 1); |
| 1182 | 1192 | ||
| 1183 | mutex_unlock(&intel_pstate_limits_lock); | ||
| 1184 | |||
| 1185 | intel_pstate_update_policies(); | 1193 | intel_pstate_update_policies(); |
| 1186 | 1194 | ||
| 1195 | mutex_unlock(&intel_pstate_limits_lock); | ||
| 1196 | |||
| 1187 | mutex_unlock(&intel_pstate_driver_lock); | 1197 | mutex_unlock(&intel_pstate_driver_lock); |
| 1188 | 1198 | ||
| 1189 | return count; | 1199 | return count; |
| @@ -1217,10 +1227,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, | |||
| 1217 | limits->max_perf_pct); | 1227 | limits->max_perf_pct); |
| 1218 | limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); | 1228 | limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); |
| 1219 | 1229 | ||
| 1220 | mutex_unlock(&intel_pstate_limits_lock); | ||
| 1221 | |||
| 1222 | intel_pstate_update_policies(); | 1230 | intel_pstate_update_policies(); |
| 1223 | 1231 | ||
| 1232 | mutex_unlock(&intel_pstate_limits_lock); | ||
| 1233 | |||
| 1224 | mutex_unlock(&intel_pstate_driver_lock); | 1234 | mutex_unlock(&intel_pstate_driver_lock); |
| 1225 | 1235 | ||
| 1226 | return count; | 1236 | return count; |
| @@ -1254,10 +1264,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, | |||
| 1254 | limits->min_perf_pct); | 1264 | limits->min_perf_pct); |
| 1255 | limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); | 1265 | limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); |
| 1256 | 1266 | ||
| 1257 | mutex_unlock(&intel_pstate_limits_lock); | ||
| 1258 | |||
| 1259 | intel_pstate_update_policies(); | 1267 | intel_pstate_update_policies(); |
| 1260 | 1268 | ||
| 1269 | mutex_unlock(&intel_pstate_limits_lock); | ||
| 1270 | |||
| 1261 | mutex_unlock(&intel_pstate_driver_lock); | 1271 | mutex_unlock(&intel_pstate_driver_lock); |
| 1262 | 1272 | ||
| 1263 | return count; | 1273 | return count; |
| @@ -1874,13 +1884,11 @@ static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) | |||
| 1874 | 1884 | ||
| 1875 | intel_pstate_get_min_max(cpu, &min_perf, &max_perf); | 1885 | intel_pstate_get_min_max(cpu, &min_perf, &max_perf); |
| 1876 | pstate = clamp_t(int, pstate, min_perf, max_perf); | 1886 | pstate = clamp_t(int, pstate, min_perf, max_perf); |
| 1877 | trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); | ||
| 1878 | return pstate; | 1887 | return pstate; |
| 1879 | } | 1888 | } |
| 1880 | 1889 | ||
| 1881 | static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) | 1890 | static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) |
| 1882 | { | 1891 | { |
| 1883 | pstate = intel_pstate_prepare_request(cpu, pstate); | ||
| 1884 | if (pstate == cpu->pstate.current_pstate) | 1892 | if (pstate == cpu->pstate.current_pstate) |
| 1885 | return; | 1893 | return; |
| 1886 | 1894 | ||
| @@ -1900,6 +1908,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) | |||
| 1900 | 1908 | ||
| 1901 | update_turbo_state(); | 1909 | update_turbo_state(); |
| 1902 | 1910 | ||
| 1911 | target_pstate = intel_pstate_prepare_request(cpu, target_pstate); | ||
| 1912 | trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); | ||
| 1903 | intel_pstate_update_pstate(cpu, target_pstate); | 1913 | intel_pstate_update_pstate(cpu, target_pstate); |
| 1904 | 1914 | ||
| 1905 | sample = &cpu->sample; | 1915 | sample = &cpu->sample; |
| @@ -2132,16 +2142,11 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 2132 | mutex_lock(&intel_pstate_limits_lock); | 2142 | mutex_lock(&intel_pstate_limits_lock); |
| 2133 | 2143 | ||
| 2134 | if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { | 2144 | if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { |
| 2145 | pr_debug("set performance\n"); | ||
| 2135 | if (!perf_limits) { | 2146 | if (!perf_limits) { |
| 2136 | limits = &performance_limits; | 2147 | limits = &performance_limits; |
| 2137 | perf_limits = limits; | 2148 | perf_limits = limits; |
| 2138 | } | 2149 | } |
| 2139 | if (policy->max >= policy->cpuinfo.max_freq && | ||
| 2140 | !limits->no_turbo) { | ||
| 2141 | pr_debug("set performance\n"); | ||
| 2142 | intel_pstate_set_performance_limits(perf_limits); | ||
| 2143 | goto out; | ||
| 2144 | } | ||
| 2145 | } else { | 2150 | } else { |
| 2146 | pr_debug("set powersave\n"); | 2151 | pr_debug("set powersave\n"); |
| 2147 | if (!perf_limits) { | 2152 | if (!perf_limits) { |
| @@ -2152,7 +2157,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 2152 | } | 2157 | } |
| 2153 | 2158 | ||
| 2154 | intel_pstate_update_perf_limits(policy, perf_limits); | 2159 | intel_pstate_update_perf_limits(policy, perf_limits); |
| 2155 | out: | 2160 | |
| 2156 | if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { | 2161 | if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { |
| 2157 | /* | 2162 | /* |
| 2158 | * NOHZ_FULL CPUs need this as the governor callback may not | 2163 | * NOHZ_FULL CPUs need this as the governor callback may not |
| @@ -2198,9 +2203,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy) | |||
| 2198 | unsigned int max_freq, min_freq; | 2203 | unsigned int max_freq, min_freq; |
| 2199 | 2204 | ||
| 2200 | max_freq = policy->cpuinfo.max_freq * | 2205 | max_freq = policy->cpuinfo.max_freq * |
| 2201 | limits->max_sysfs_pct / 100; | 2206 | perf_limits->max_sysfs_pct / 100; |
| 2202 | min_freq = policy->cpuinfo.max_freq * | 2207 | min_freq = policy->cpuinfo.max_freq * |
| 2203 | limits->min_sysfs_pct / 100; | 2208 | perf_limits->min_sysfs_pct / 100; |
| 2204 | cpufreq_verify_within_limits(policy, min_freq, max_freq); | 2209 | cpufreq_verify_within_limits(policy, min_freq, max_freq); |
| 2205 | } | 2210 | } |
| 2206 | 2211 | ||
| @@ -2243,13 +2248,8 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) | |||
| 2243 | 2248 | ||
| 2244 | cpu = all_cpu_data[policy->cpu]; | 2249 | cpu = all_cpu_data[policy->cpu]; |
| 2245 | 2250 | ||
| 2246 | /* | ||
| 2247 | * We need sane value in the cpu->perf_limits, so inherit from global | ||
| 2248 | * perf_limits limits, which are seeded with values based on the | ||
| 2249 | * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up. | ||
| 2250 | */ | ||
| 2251 | if (per_cpu_limits) | 2251 | if (per_cpu_limits) |
| 2252 | memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits)); | 2252 | intel_pstate_init_limits(cpu->perf_limits); |
| 2253 | 2253 | ||
| 2254 | policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; | 2254 | policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; |
| 2255 | policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; | 2255 | policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; |
| @@ -2301,7 +2301,6 @@ static struct cpufreq_driver intel_pstate = { | |||
| 2301 | static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) | 2301 | static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) |
| 2302 | { | 2302 | { |
| 2303 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | 2303 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
| 2304 | struct perf_limits *perf_limits = limits; | ||
| 2305 | 2304 | ||
| 2306 | update_turbo_state(); | 2305 | update_turbo_state(); |
| 2307 | policy->cpuinfo.max_freq = limits->turbo_disabled ? | 2306 | policy->cpuinfo.max_freq = limits->turbo_disabled ? |
| @@ -2309,15 +2308,6 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) | |||
| 2309 | 2308 | ||
| 2310 | cpufreq_verify_within_cpu_limits(policy); | 2309 | cpufreq_verify_within_cpu_limits(policy); |
| 2311 | 2310 | ||
| 2312 | if (per_cpu_limits) | ||
| 2313 | perf_limits = cpu->perf_limits; | ||
| 2314 | |||
| 2315 | mutex_lock(&intel_pstate_limits_lock); | ||
| 2316 | |||
| 2317 | intel_pstate_update_perf_limits(policy, perf_limits); | ||
| 2318 | |||
| 2319 | mutex_unlock(&intel_pstate_limits_lock); | ||
| 2320 | |||
| 2321 | return 0; | 2311 | return 0; |
| 2322 | } | 2312 | } |
| 2323 | 2313 | ||
| @@ -2370,6 +2360,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, | |||
| 2370 | wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, | 2360 | wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, |
| 2371 | pstate_funcs.get_val(cpu, target_pstate)); | 2361 | pstate_funcs.get_val(cpu, target_pstate)); |
| 2372 | } | 2362 | } |
| 2363 | freqs.new = target_pstate * cpu->pstate.scaling; | ||
| 2373 | cpufreq_freq_transition_end(policy, &freqs, false); | 2364 | cpufreq_freq_transition_end(policy, &freqs, false); |
| 2374 | 2365 | ||
| 2375 | return 0; | 2366 | return 0; |
| @@ -2383,8 +2374,9 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, | |||
| 2383 | 2374 | ||
| 2384 | target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq); | 2375 | target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq); |
| 2385 | target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); | 2376 | target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); |
| 2377 | target_pstate = intel_pstate_prepare_request(cpu, target_pstate); | ||
| 2386 | intel_pstate_update_pstate(cpu, target_pstate); | 2378 | intel_pstate_update_pstate(cpu, target_pstate); |
| 2387 | return target_freq; | 2379 | return target_pstate * cpu->pstate.scaling; |
| 2388 | } | 2380 | } |
| 2389 | 2381 | ||
| 2390 | static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) | 2382 | static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) |
| @@ -2437,8 +2429,11 @@ static int intel_pstate_register_driver(void) | |||
| 2437 | 2429 | ||
| 2438 | intel_pstate_init_limits(&powersave_limits); | 2430 | intel_pstate_init_limits(&powersave_limits); |
| 2439 | intel_pstate_set_performance_limits(&performance_limits); | 2431 | intel_pstate_set_performance_limits(&performance_limits); |
| 2440 | limits = IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) ? | 2432 | if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) && |
| 2441 | &performance_limits : &powersave_limits; | 2433 | intel_pstate_driver == &intel_pstate) |
| 2434 | limits = &performance_limits; | ||
| 2435 | else | ||
| 2436 | limits = &powersave_limits; | ||
| 2442 | 2437 | ||
| 2443 | ret = cpufreq_register_driver(intel_pstate_driver); | 2438 | ret = cpufreq_register_driver(intel_pstate_driver); |
| 2444 | if (ret) { | 2439 | if (ret) { |
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index 43a0c8a26ab0..00a16ab601cb 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c | |||
| @@ -82,7 +82,7 @@ void cryp_activity(struct cryp_device_data *device_data, | |||
| 82 | void cryp_flush_inoutfifo(struct cryp_device_data *device_data) | 82 | void cryp_flush_inoutfifo(struct cryp_device_data *device_data) |
| 83 | { | 83 | { |
| 84 | /* | 84 | /* |
| 85 | * We always need to disble the hardware before trying to flush the | 85 | * We always need to disable the hardware before trying to flush the |
| 86 | * FIFO. This is something that isn't written in the design | 86 | * FIFO. This is something that isn't written in the design |
| 87 | * specification, but we have been informed by the hardware designers | 87 | * specification, but we have been informed by the hardware designers |
| 88 | * that this must be done. | 88 | * that this must be done. |
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 349dc3e1e52e..974c5a31a005 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c | |||
| @@ -65,6 +65,7 @@ static bool __init efi_virtmap_init(void) | |||
| 65 | bool systab_found; | 65 | bool systab_found; |
| 66 | 66 | ||
| 67 | efi_mm.pgd = pgd_alloc(&efi_mm); | 67 | efi_mm.pgd = pgd_alloc(&efi_mm); |
| 68 | mm_init_cpumask(&efi_mm); | ||
| 68 | init_new_context(NULL, &efi_mm); | 69 | init_new_context(NULL, &efi_mm); |
| 69 | 70 | ||
| 70 | systab_found = false; | 71 | systab_found = false; |
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c index 6def402bf569..5da36e56b36a 100644 --- a/drivers/firmware/efi/libstub/secureboot.c +++ b/drivers/firmware/efi/libstub/secureboot.c | |||
| @@ -45,6 +45,8 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg) | |||
| 45 | size = sizeof(secboot); | 45 | size = sizeof(secboot); |
| 46 | status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid, | 46 | status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid, |
| 47 | NULL, &size, &secboot); | 47 | NULL, &size, &secboot); |
| 48 | if (status == EFI_NOT_FOUND) | ||
| 49 | return efi_secureboot_mode_disabled; | ||
| 48 | if (status != EFI_SUCCESS) | 50 | if (status != EFI_SUCCESS) |
| 49 | goto out_efi_err; | 51 | goto out_efi_err; |
| 50 | 52 | ||
| @@ -78,7 +80,5 @@ secure_boot_enabled: | |||
| 78 | 80 | ||
| 79 | out_efi_err: | 81 | out_efi_err: |
| 80 | pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n"); | 82 | pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n"); |
| 81 | if (status == EFI_NOT_FOUND) | ||
| 82 | return efi_secureboot_mode_disabled; | ||
| 83 | return efi_secureboot_mode_unknown; | 83 | return efi_secureboot_mode_unknown; |
| 84 | } | 84 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 31375bdde6f1..011800f621c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
| @@ -788,7 +788,7 @@ static int sdma_v3_0_start(struct amdgpu_device *adev) | |||
| 788 | } | 788 | } |
| 789 | } | 789 | } |
| 790 | 790 | ||
| 791 | /* disble sdma engine before programing it */ | 791 | /* disable sdma engine before programing it */ |
| 792 | sdma_v3_0_ctx_switch_enable(adev, false); | 792 | sdma_v3_0_ctx_switch_enable(adev, false); |
| 793 | sdma_v3_0_enable(adev, false); | 793 | sdma_v3_0_enable(adev, false); |
| 794 | 794 | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index af267c35d813..ee5883f59be5 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c | |||
| @@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, | |||
| 147 | struct drm_gem_object *obj = buffer->priv; | 147 | struct drm_gem_object *obj = buffer->priv; |
| 148 | int ret = 0; | 148 | int ret = 0; |
| 149 | 149 | ||
| 150 | if (WARN_ON(!obj->filp)) | ||
| 151 | return -EINVAL; | ||
| 152 | |||
| 153 | ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); | 150 | ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); |
| 154 | if (ret < 0) | 151 | if (ret < 0) |
| 155 | return ret; | 152 | return ret; |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index b5bfbe50bd87..b0ff304ce3dc 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c | |||
| @@ -32,6 +32,10 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) | |||
| 32 | { | 32 | { |
| 33 | const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode; | 33 | const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode; |
| 34 | struct rcar_du_device *rcdu = crtc->group->dev; | 34 | struct rcar_du_device *rcdu = crtc->group->dev; |
| 35 | struct vsp1_du_lif_config cfg = { | ||
| 36 | .width = mode->hdisplay, | ||
| 37 | .height = mode->vdisplay, | ||
| 38 | }; | ||
| 35 | struct rcar_du_plane_state state = { | 39 | struct rcar_du_plane_state state = { |
| 36 | .state = { | 40 | .state = { |
| 37 | .crtc = &crtc->crtc, | 41 | .crtc = &crtc->crtc, |
| @@ -66,12 +70,12 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) | |||
| 66 | */ | 70 | */ |
| 67 | crtc->group->need_restart = true; | 71 | crtc->group->need_restart = true; |
| 68 | 72 | ||
| 69 | vsp1_du_setup_lif(crtc->vsp->vsp, mode->hdisplay, mode->vdisplay); | 73 | vsp1_du_setup_lif(crtc->vsp->vsp, &cfg); |
| 70 | } | 74 | } |
| 71 | 75 | ||
| 72 | void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) | 76 | void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) |
| 73 | { | 77 | { |
| 74 | vsp1_du_setup_lif(crtc->vsp->vsp, 0, 0); | 78 | vsp1_du_setup_lif(crtc->vsp->vsp, NULL); |
| 75 | } | 79 | } |
| 76 | 80 | ||
| 77 | void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) | 81 | void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) |
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index f80bf9385e41..d745e8b50fb8 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c | |||
| @@ -464,6 +464,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc) | |||
| 464 | { | 464 | { |
| 465 | struct drm_device *dev = crtc->dev; | 465 | struct drm_device *dev = crtc->dev; |
| 466 | struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); | 466 | struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); |
| 467 | unsigned long flags; | ||
| 467 | 468 | ||
| 468 | WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); | 469 | WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); |
| 469 | mutex_lock(&tilcdc_crtc->enable_lock); | 470 | mutex_lock(&tilcdc_crtc->enable_lock); |
| @@ -484,7 +485,17 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc) | |||
| 484 | tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG, | 485 | tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG, |
| 485 | LCDC_PALETTE_LOAD_MODE(DATA_ONLY), | 486 | LCDC_PALETTE_LOAD_MODE(DATA_ONLY), |
| 486 | LCDC_PALETTE_LOAD_MODE_MASK); | 487 | LCDC_PALETTE_LOAD_MODE_MASK); |
| 488 | |||
| 489 | /* There is no real chance for a race here as the time stamp | ||
| 490 | * is taken before the raster DMA is started. The spin-lock is | ||
| 491 | * taken to have a memory barrier after taking the time-stamp | ||
| 492 | * and to avoid a context switch between taking the stamp and | ||
| 493 | * enabling the raster. | ||
| 494 | */ | ||
| 495 | spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); | ||
| 496 | tilcdc_crtc->last_vblank = ktime_get(); | ||
| 487 | tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); | 497 | tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); |
| 498 | spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); | ||
| 488 | 499 | ||
| 489 | drm_crtc_vblank_on(crtc); | 500 | drm_crtc_vblank_on(crtc); |
| 490 | 501 | ||
| @@ -539,7 +550,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown) | |||
| 539 | } | 550 | } |
| 540 | 551 | ||
| 541 | drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); | 552 | drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); |
| 542 | tilcdc_crtc->last_vblank = 0; | ||
| 543 | 553 | ||
| 544 | tilcdc_crtc->enabled = false; | 554 | tilcdc_crtc->enabled = false; |
| 545 | mutex_unlock(&tilcdc_crtc->enable_lock); | 555 | mutex_unlock(&tilcdc_crtc->enable_lock); |
| @@ -602,7 +612,6 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc, | |||
| 602 | { | 612 | { |
| 603 | struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); | 613 | struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); |
| 604 | struct drm_device *dev = crtc->dev; | 614 | struct drm_device *dev = crtc->dev; |
| 605 | unsigned long flags; | ||
| 606 | 615 | ||
| 607 | WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); | 616 | WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); |
| 608 | 617 | ||
| @@ -614,28 +623,30 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc, | |||
| 614 | drm_framebuffer_reference(fb); | 623 | drm_framebuffer_reference(fb); |
| 615 | 624 | ||
| 616 | crtc->primary->fb = fb; | 625 | crtc->primary->fb = fb; |
| 626 | tilcdc_crtc->event = event; | ||
| 617 | 627 | ||
| 618 | spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); | 628 | mutex_lock(&tilcdc_crtc->enable_lock); |
| 619 | 629 | ||
| 620 | if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) { | 630 | if (tilcdc_crtc->enabled) { |
| 631 | unsigned long flags; | ||
| 621 | ktime_t next_vblank; | 632 | ktime_t next_vblank; |
| 622 | s64 tdiff; | 633 | s64 tdiff; |
| 623 | 634 | ||
| 624 | next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, | 635 | spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); |
| 625 | 1000000 / crtc->hwmode.vrefresh); | ||
| 626 | 636 | ||
| 637 | next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, | ||
| 638 | 1000000 / crtc->hwmode.vrefresh); | ||
| 627 | tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); | 639 | tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); |
| 628 | 640 | ||
| 629 | if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US) | 641 | if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US) |
| 630 | tilcdc_crtc->next_fb = fb; | 642 | tilcdc_crtc->next_fb = fb; |
| 631 | } | 643 | else |
| 632 | 644 | set_scanout(crtc, fb); | |
| 633 | if (tilcdc_crtc->next_fb != fb) | ||
| 634 | set_scanout(crtc, fb); | ||
| 635 | 645 | ||
| 636 | tilcdc_crtc->event = event; | 646 | spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); |
| 647 | } | ||
| 637 | 648 | ||
| 638 | spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); | 649 | mutex_unlock(&tilcdc_crtc->enable_lock); |
| 639 | 650 | ||
| 640 | return 0; | 651 | return 0; |
| 641 | } | 652 | } |
| @@ -1036,5 +1047,5 @@ int tilcdc_crtc_create(struct drm_device *dev) | |||
| 1036 | 1047 | ||
| 1037 | fail: | 1048 | fail: |
| 1038 | tilcdc_crtc_destroy(crtc); | 1049 | tilcdc_crtc_destroy(crtc); |
| 1039 | return -ENOMEM; | 1050 | return ret; |
| 1040 | } | 1051 | } |
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 81a80c82f1bd..bd0d1988feb2 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c | |||
| @@ -543,7 +543,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel) | |||
| 543 | /* | 543 | /* |
| 544 | * In case a device driver's probe() fails (e.g., | 544 | * In case a device driver's probe() fails (e.g., |
| 545 | * util_probe() -> vmbus_open() returns -ENOMEM) and the device is | 545 | * util_probe() -> vmbus_open() returns -ENOMEM) and the device is |
| 546 | * rescinded later (e.g., we dynamically disble an Integrated Service | 546 | * rescinded later (e.g., we dynamically disable an Integrated Service |
| 547 | * in Hyper-V Manager), the driver's remove() invokes vmbus_close(): | 547 | * in Hyper-V Manager), the driver's remove() invokes vmbus_close(): |
| 548 | * here we should skip most of the below cleanup work. | 548 | * here we should skip most of the below cleanup work. |
| 549 | */ | 549 | */ |
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 0652281662a8..78792b4d6437 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c | |||
| @@ -465,6 +465,7 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, | |||
| 465 | u8 *tmp_buf; | 465 | u8 *tmp_buf; |
| 466 | int len = 0; | 466 | int len = 0; |
| 467 | int xfersz = brcmstb_i2c_get_xfersz(dev); | 467 | int xfersz = brcmstb_i2c_get_xfersz(dev); |
| 468 | u32 cond, cond_per_msg; | ||
| 468 | 469 | ||
| 469 | if (dev->is_suspended) | 470 | if (dev->is_suspended) |
| 470 | return -EBUSY; | 471 | return -EBUSY; |
| @@ -481,10 +482,11 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, | |||
| 481 | pmsg->buf ? pmsg->buf[0] : '0', pmsg->len); | 482 | pmsg->buf ? pmsg->buf[0] : '0', pmsg->len); |
| 482 | 483 | ||
| 483 | if (i < (num - 1) && (msgs[i + 1].flags & I2C_M_NOSTART)) | 484 | if (i < (num - 1) && (msgs[i + 1].flags & I2C_M_NOSTART)) |
| 484 | brcmstb_set_i2c_start_stop(dev, ~(COND_START_STOP)); | 485 | cond = ~COND_START_STOP; |
| 485 | else | 486 | else |
| 486 | brcmstb_set_i2c_start_stop(dev, | 487 | cond = COND_RESTART | COND_NOSTOP; |
| 487 | COND_RESTART | COND_NOSTOP); | 488 | |
| 489 | brcmstb_set_i2c_start_stop(dev, cond); | ||
| 488 | 490 | ||
| 489 | /* Send slave address */ | 491 | /* Send slave address */ |
| 490 | if (!(pmsg->flags & I2C_M_NOSTART)) { | 492 | if (!(pmsg->flags & I2C_M_NOSTART)) { |
| @@ -497,13 +499,24 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, | |||
| 497 | } | 499 | } |
| 498 | } | 500 | } |
| 499 | 501 | ||
| 502 | cond_per_msg = cond; | ||
| 503 | |||
| 500 | /* Perform data transfer */ | 504 | /* Perform data transfer */ |
| 501 | while (len) { | 505 | while (len) { |
| 502 | bytes_to_xfer = min(len, xfersz); | 506 | bytes_to_xfer = min(len, xfersz); |
| 503 | 507 | ||
| 504 | if (len <= xfersz && i == (num - 1)) | 508 | if (len <= xfersz) { |
| 505 | brcmstb_set_i2c_start_stop(dev, | 509 | if (i == (num - 1)) |
| 506 | ~(COND_START_STOP)); | 510 | cond_per_msg = cond_per_msg & |
| 511 | ~(COND_RESTART | COND_NOSTOP); | ||
| 512 | else | ||
| 513 | cond_per_msg = cond; | ||
| 514 | } else { | ||
| 515 | cond_per_msg = (cond_per_msg & ~COND_RESTART) | | ||
| 516 | COND_NOSTOP; | ||
| 517 | } | ||
| 518 | |||
| 519 | brcmstb_set_i2c_start_stop(dev, cond_per_msg); | ||
| 507 | 520 | ||
| 508 | rc = brcmstb_i2c_xfer_bsc_data(dev, tmp_buf, | 521 | rc = brcmstb_i2c_xfer_bsc_data(dev, tmp_buf, |
| 509 | bytes_to_xfer, pmsg); | 522 | bytes_to_xfer, pmsg); |
| @@ -512,6 +525,8 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, | |||
| 512 | 525 | ||
| 513 | len -= bytes_to_xfer; | 526 | len -= bytes_to_xfer; |
| 514 | tmp_buf += bytes_to_xfer; | 527 | tmp_buf += bytes_to_xfer; |
| 528 | |||
| 529 | cond_per_msg = COND_NOSTART | COND_NOSTOP; | ||
| 515 | } | 530 | } |
| 516 | } | 531 | } |
| 517 | 532 | ||
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index c1db3a5a340f..d9aaf1790e0e 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h | |||
| @@ -88,6 +88,7 @@ struct dw_i2c_dev { | |||
| 88 | void __iomem *base; | 88 | void __iomem *base; |
| 89 | struct completion cmd_complete; | 89 | struct completion cmd_complete; |
| 90 | struct clk *clk; | 90 | struct clk *clk; |
| 91 | struct reset_control *rst; | ||
| 91 | u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev); | 92 | u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev); |
| 92 | struct dw_pci_controller *controller; | 93 | struct dw_pci_controller *controller; |
| 93 | int cmd_err; | 94 | int cmd_err; |
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 6ce431323125..79c4b4ea0539 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <linux/pm_runtime.h> | 38 | #include <linux/pm_runtime.h> |
| 39 | #include <linux/property.h> | 39 | #include <linux/property.h> |
| 40 | #include <linux/io.h> | 40 | #include <linux/io.h> |
| 41 | #include <linux/reset.h> | ||
| 41 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
| 42 | #include <linux/acpi.h> | 43 | #include <linux/acpi.h> |
| 43 | #include <linux/platform_data/i2c-designware.h> | 44 | #include <linux/platform_data/i2c-designware.h> |
| @@ -199,6 +200,14 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) | |||
| 199 | dev->irq = irq; | 200 | dev->irq = irq; |
| 200 | platform_set_drvdata(pdev, dev); | 201 | platform_set_drvdata(pdev, dev); |
| 201 | 202 | ||
| 203 | dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); | ||
| 204 | if (IS_ERR(dev->rst)) { | ||
| 205 | if (PTR_ERR(dev->rst) == -EPROBE_DEFER) | ||
| 206 | return -EPROBE_DEFER; | ||
| 207 | } else { | ||
| 208 | reset_control_deassert(dev->rst); | ||
| 209 | } | ||
| 210 | |||
| 202 | if (pdata) { | 211 | if (pdata) { |
| 203 | dev->clk_freq = pdata->i2c_scl_freq; | 212 | dev->clk_freq = pdata->i2c_scl_freq; |
| 204 | } else { | 213 | } else { |
| @@ -235,12 +244,13 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) | |||
| 235 | && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { | 244 | && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { |
| 236 | dev_err(&pdev->dev, | 245 | dev_err(&pdev->dev, |
| 237 | "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported"); | 246 | "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported"); |
| 238 | return -EINVAL; | 247 | r = -EINVAL; |
| 248 | goto exit_reset; | ||
| 239 | } | 249 | } |
| 240 | 250 | ||
| 241 | r = i2c_dw_eval_lock_support(dev); | 251 | r = i2c_dw_eval_lock_support(dev); |
| 242 | if (r) | 252 | if (r) |
| 243 | return r; | 253 | goto exit_reset; |
| 244 | 254 | ||
| 245 | dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY; | 255 | dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY; |
| 246 | 256 | ||
| @@ -286,10 +296,18 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) | |||
| 286 | } | 296 | } |
| 287 | 297 | ||
| 288 | r = i2c_dw_probe(dev); | 298 | r = i2c_dw_probe(dev); |
| 289 | if (r && !dev->pm_runtime_disabled) | 299 | if (r) |
| 290 | pm_runtime_disable(&pdev->dev); | 300 | goto exit_probe; |
| 291 | 301 | ||
| 292 | return r; | 302 | return r; |
| 303 | |||
| 304 | exit_probe: | ||
| 305 | if (!dev->pm_runtime_disabled) | ||
| 306 | pm_runtime_disable(&pdev->dev); | ||
| 307 | exit_reset: | ||
| 308 | if (!IS_ERR_OR_NULL(dev->rst)) | ||
| 309 | reset_control_assert(dev->rst); | ||
| 310 | return r; | ||
| 293 | } | 311 | } |
| 294 | 312 | ||
| 295 | static int dw_i2c_plat_remove(struct platform_device *pdev) | 313 | static int dw_i2c_plat_remove(struct platform_device *pdev) |
| @@ -306,6 +324,8 @@ static int dw_i2c_plat_remove(struct platform_device *pdev) | |||
| 306 | pm_runtime_put_sync(&pdev->dev); | 324 | pm_runtime_put_sync(&pdev->dev); |
| 307 | if (!dev->pm_runtime_disabled) | 325 | if (!dev->pm_runtime_disabled) |
| 308 | pm_runtime_disable(&pdev->dev); | 326 | pm_runtime_disable(&pdev->dev); |
| 327 | if (!IS_ERR_OR_NULL(dev->rst)) | ||
| 328 | reset_control_assert(dev->rst); | ||
| 309 | 329 | ||
| 310 | return 0; | 330 | return 0; |
| 311 | } | 331 | } |
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c index cbd93ce0661f..736a82472101 100644 --- a/drivers/i2c/busses/i2c-exynos5.c +++ b/drivers/i2c/busses/i2c-exynos5.c | |||
| @@ -457,7 +457,6 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id) | |||
| 457 | 457 | ||
| 458 | int_status = readl(i2c->regs + HSI2C_INT_STATUS); | 458 | int_status = readl(i2c->regs + HSI2C_INT_STATUS); |
| 459 | writel(int_status, i2c->regs + HSI2C_INT_STATUS); | 459 | writel(int_status, i2c->regs + HSI2C_INT_STATUS); |
| 460 | trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); | ||
| 461 | 460 | ||
| 462 | /* handle interrupt related to the transfer status */ | 461 | /* handle interrupt related to the transfer status */ |
| 463 | if (i2c->variant->hw == HSI2C_EXYNOS7) { | 462 | if (i2c->variant->hw == HSI2C_EXYNOS7) { |
| @@ -482,11 +481,13 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id) | |||
| 482 | goto stop; | 481 | goto stop; |
| 483 | } | 482 | } |
| 484 | 483 | ||
| 484 | trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); | ||
| 485 | if ((trans_status & HSI2C_MASTER_ST_MASK) == HSI2C_MASTER_ST_LOSE) { | 485 | if ((trans_status & HSI2C_MASTER_ST_MASK) == HSI2C_MASTER_ST_LOSE) { |
| 486 | i2c->state = -EAGAIN; | 486 | i2c->state = -EAGAIN; |
| 487 | goto stop; | 487 | goto stop; |
| 488 | } | 488 | } |
| 489 | } else if (int_status & HSI2C_INT_I2C) { | 489 | } else if (int_status & HSI2C_INT_I2C) { |
| 490 | trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); | ||
| 490 | if (trans_status & HSI2C_NO_DEV_ACK) { | 491 | if (trans_status & HSI2C_NO_DEV_ACK) { |
| 491 | dev_dbg(i2c->dev, "No ACK from device\n"); | 492 | dev_dbg(i2c->dev, "No ACK from device\n"); |
| 492 | i2c->state = -ENXIO; | 493 | i2c->state = -ENXIO; |
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index 2aa61bbbd307..73b97c71a484 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c | |||
| @@ -175,7 +175,7 @@ static void meson_i2c_put_data(struct meson_i2c *i2c, char *buf, int len) | |||
| 175 | wdata1 |= *buf++ << ((i - 4) * 8); | 175 | wdata1 |= *buf++ << ((i - 4) * 8); |
| 176 | 176 | ||
| 177 | writel(wdata0, i2c->regs + REG_TOK_WDATA0); | 177 | writel(wdata0, i2c->regs + REG_TOK_WDATA0); |
| 178 | writel(wdata0, i2c->regs + REG_TOK_WDATA1); | 178 | writel(wdata1, i2c->regs + REG_TOK_WDATA1); |
| 179 | 179 | ||
| 180 | dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__, | 180 | dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__, |
| 181 | wdata0, wdata1, len); | 181 | wdata0, wdata1, len); |
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index 4a7d9bc2142b..45d61714c81b 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c | |||
| @@ -172,14 +172,6 @@ static const struct i2c_adapter_quirks mt6577_i2c_quirks = { | |||
| 172 | .max_comb_2nd_msg_len = 31, | 172 | .max_comb_2nd_msg_len = 31, |
| 173 | }; | 173 | }; |
| 174 | 174 | ||
| 175 | static const struct i2c_adapter_quirks mt8173_i2c_quirks = { | ||
| 176 | .max_num_msgs = 65535, | ||
| 177 | .max_write_len = 65535, | ||
| 178 | .max_read_len = 65535, | ||
| 179 | .max_comb_1st_msg_len = 65535, | ||
| 180 | .max_comb_2nd_msg_len = 65535, | ||
| 181 | }; | ||
| 182 | |||
| 183 | static const struct mtk_i2c_compatible mt6577_compat = { | 175 | static const struct mtk_i2c_compatible mt6577_compat = { |
| 184 | .quirks = &mt6577_i2c_quirks, | 176 | .quirks = &mt6577_i2c_quirks, |
| 185 | .pmic_i2c = 0, | 177 | .pmic_i2c = 0, |
| @@ -199,7 +191,6 @@ static const struct mtk_i2c_compatible mt6589_compat = { | |||
| 199 | }; | 191 | }; |
| 200 | 192 | ||
| 201 | static const struct mtk_i2c_compatible mt8173_compat = { | 193 | static const struct mtk_i2c_compatible mt8173_compat = { |
| 202 | .quirks = &mt8173_i2c_quirks, | ||
| 203 | .pmic_i2c = 0, | 194 | .pmic_i2c = 0, |
| 204 | .dcm = 1, | 195 | .dcm = 1, |
| 205 | .auto_restart = 1, | 196 | .auto_restart = 1, |
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c index 8f11d347b3ec..c811af4c8d81 100644 --- a/drivers/i2c/busses/i2c-riic.c +++ b/drivers/i2c/busses/i2c-riic.c | |||
| @@ -218,8 +218,12 @@ static irqreturn_t riic_tend_isr(int irq, void *data) | |||
| 218 | } | 218 | } |
| 219 | 219 | ||
| 220 | if (riic->is_last || riic->err) { | 220 | if (riic->is_last || riic->err) { |
| 221 | riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER); | 221 | riic_clear_set_bit(riic, ICIER_TEIE, ICIER_SPIE, RIIC_ICIER); |
| 222 | writeb(ICCR2_SP, riic->base + RIIC_ICCR2); | 222 | writeb(ICCR2_SP, riic->base + RIIC_ICCR2); |
| 223 | } else { | ||
| 224 | /* Transfer is complete, but do not send STOP */ | ||
| 225 | riic_clear_set_bit(riic, ICIER_TEIE, 0, RIIC_ICIER); | ||
| 226 | complete(&riic->msg_done); | ||
| 223 | } | 227 | } |
| 224 | 228 | ||
| 225 | return IRQ_HANDLED; | 229 | return IRQ_HANDLED; |
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index 83768e85a919..2178266bca79 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c | |||
| @@ -429,6 +429,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc) | |||
| 429 | while (muxc->num_adapters) { | 429 | while (muxc->num_adapters) { |
| 430 | struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters]; | 430 | struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters]; |
| 431 | struct i2c_mux_priv *priv = adap->algo_data; | 431 | struct i2c_mux_priv *priv = adap->algo_data; |
| 432 | struct device_node *np = adap->dev.of_node; | ||
| 432 | 433 | ||
| 433 | muxc->adapter[muxc->num_adapters] = NULL; | 434 | muxc->adapter[muxc->num_adapters] = NULL; |
| 434 | 435 | ||
| @@ -438,6 +439,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc) | |||
| 438 | 439 | ||
| 439 | sysfs_remove_link(&priv->adap.dev.kobj, "mux_device"); | 440 | sysfs_remove_link(&priv->adap.dev.kobj, "mux_device"); |
| 440 | i2c_del_adapter(adap); | 441 | i2c_del_adapter(adap); |
| 442 | of_node_put(np); | ||
| 441 | kfree(priv); | 443 | kfree(priv); |
| 442 | } | 444 | } |
| 443 | } | 445 | } |
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c index 1eef56a89b1f..f96601268f71 100644 --- a/drivers/irqchip/irq-crossbar.c +++ b/drivers/irqchip/irq-crossbar.c | |||
| @@ -198,7 +198,8 @@ static const struct irq_domain_ops crossbar_domain_ops = { | |||
| 198 | 198 | ||
| 199 | static int __init crossbar_of_init(struct device_node *node) | 199 | static int __init crossbar_of_init(struct device_node *node) |
| 200 | { | 200 | { |
| 201 | int i, size, max = 0, reserved = 0, entry; | 201 | u32 max = 0, entry, reg_size; |
| 202 | int i, size, reserved = 0; | ||
| 202 | const __be32 *irqsr; | 203 | const __be32 *irqsr; |
| 203 | int ret = -ENOMEM; | 204 | int ret = -ENOMEM; |
| 204 | 205 | ||
| @@ -275,9 +276,9 @@ static int __init crossbar_of_init(struct device_node *node) | |||
| 275 | if (!cb->register_offsets) | 276 | if (!cb->register_offsets) |
| 276 | goto err_irq_map; | 277 | goto err_irq_map; |
| 277 | 278 | ||
| 278 | of_property_read_u32(node, "ti,reg-size", &size); | 279 | of_property_read_u32(node, "ti,reg-size", ®_size); |
| 279 | 280 | ||
| 280 | switch (size) { | 281 | switch (reg_size) { |
| 281 | case 1: | 282 | case 1: |
| 282 | cb->write = crossbar_writeb; | 283 | cb->write = crossbar_writeb; |
| 283 | break; | 284 | break; |
| @@ -303,7 +304,7 @@ static int __init crossbar_of_init(struct device_node *node) | |||
| 303 | continue; | 304 | continue; |
| 304 | 305 | ||
| 305 | cb->register_offsets[i] = reserved; | 306 | cb->register_offsets[i] = reserved; |
| 306 | reserved += size; | 307 | reserved += reg_size; |
| 307 | } | 308 | } |
| 308 | 309 | ||
| 309 | of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map); | 310 | of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map); |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 23201004fd7a..f77f840d2b5f 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
| @@ -1601,6 +1601,14 @@ static void __maybe_unused its_enable_quirk_cavium_23144(void *data) | |||
| 1601 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; | 1601 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; |
| 1602 | } | 1602 | } |
| 1603 | 1603 | ||
| 1604 | static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) | ||
| 1605 | { | ||
| 1606 | struct its_node *its = data; | ||
| 1607 | |||
| 1608 | /* On QDF2400, the size of the ITE is 16Bytes */ | ||
| 1609 | its->ite_size = 16; | ||
| 1610 | } | ||
| 1611 | |||
| 1604 | static const struct gic_quirk its_quirks[] = { | 1612 | static const struct gic_quirk its_quirks[] = { |
| 1605 | #ifdef CONFIG_CAVIUM_ERRATUM_22375 | 1613 | #ifdef CONFIG_CAVIUM_ERRATUM_22375 |
| 1606 | { | 1614 | { |
| @@ -1618,6 +1626,14 @@ static const struct gic_quirk its_quirks[] = { | |||
| 1618 | .init = its_enable_quirk_cavium_23144, | 1626 | .init = its_enable_quirk_cavium_23144, |
| 1619 | }, | 1627 | }, |
| 1620 | #endif | 1628 | #endif |
| 1629 | #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 | ||
| 1630 | { | ||
| 1631 | .desc = "ITS: QDF2400 erratum 0065", | ||
| 1632 | .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ | ||
| 1633 | .mask = 0xffffffff, | ||
| 1634 | .init = its_enable_quirk_qdf2400_e0065, | ||
| 1635 | }, | ||
| 1636 | #endif | ||
| 1621 | { | 1637 | { |
| 1622 | } | 1638 | } |
| 1623 | }; | 1639 | }; |
diff --git a/drivers/isdn/hisax/st5481_b.c b/drivers/isdn/hisax/st5481_b.c index 409849165838..f64a36007800 100644 --- a/drivers/isdn/hisax/st5481_b.c +++ b/drivers/isdn/hisax/st5481_b.c | |||
| @@ -239,7 +239,7 @@ static void st5481B_mode(struct st5481_bcs *bcs, int mode) | |||
| 239 | } | 239 | } |
| 240 | } | 240 | } |
| 241 | } else { | 241 | } else { |
| 242 | // Disble B channel interrupts | 242 | // Disable B channel interrupts |
| 243 | st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel * 2), 0, NULL, NULL); | 243 | st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel * 2), 0, NULL, NULL); |
| 244 | 244 | ||
| 245 | // Disable B channel FIFOs | 245 | // Disable B channel FIFOs |
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index a126919ed102..5d13930f0f22 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h | |||
| @@ -4,7 +4,6 @@ | |||
| 4 | 4 | ||
| 5 | #include <linux/blkdev.h> | 5 | #include <linux/blkdev.h> |
| 6 | #include <linux/errno.h> | 6 | #include <linux/errno.h> |
| 7 | #include <linux/blkdev.h> | ||
| 8 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
| 9 | #include <linux/sched/clock.h> | 8 | #include <linux/sched/clock.h> |
| 10 | #include <linux/llist.h> | 9 | #include <linux/llist.h> |
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h index 7a681d8202c7..4442e478db72 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h +++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h | |||
| @@ -256,8 +256,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner, | |||
| 256 | * | 256 | * |
| 257 | * The actual DAP implementation may be restricted to only one of the modes. | 257 | * The actual DAP implementation may be restricted to only one of the modes. |
| 258 | * A compiler warning or error will be generated if the DAP implementation | 258 | * A compiler warning or error will be generated if the DAP implementation |
| 259 | * overides or cannot handle the mode defined below. | 259 | * overrides or cannot handle the mode defined below. |
| 260 | * | ||
| 261 | */ | 260 | */ |
| 262 | #ifndef DRXDAP_SINGLE_MASTER | 261 | #ifndef DRXDAP_SINGLE_MASTER |
| 263 | #define DRXDAP_SINGLE_MASTER 1 | 262 | #define DRXDAP_SINGLE_MASTER 1 |
| @@ -272,7 +271,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner, | |||
| 272 | * | 271 | * |
| 273 | * This maximum size may be restricted by the actual DAP implementation. | 272 | * This maximum size may be restricted by the actual DAP implementation. |
| 274 | * A compiler warning or error will be generated if the DAP implementation | 273 | * A compiler warning or error will be generated if the DAP implementation |
| 275 | * overides or cannot handle the chunksize defined below. | 274 | * overrides or cannot handle the chunksize defined below. |
| 276 | * | 275 | * |
| 277 | * Beware that the DAP uses DRXDAP_MAX_WCHUNKSIZE to create a temporary data | 276 | * Beware that the DAP uses DRXDAP_MAX_WCHUNKSIZE to create a temporary data |
| 278 | * buffer. Do not undefine or choose too large, unless your system is able to | 277 | * buffer. Do not undefine or choose too large, unless your system is able to |
| @@ -292,8 +291,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner, | |||
| 292 | * | 291 | * |
| 293 | * This maximum size may be restricted by the actual DAP implementation. | 292 | * This maximum size may be restricted by the actual DAP implementation. |
| 294 | * A compiler warning or error will be generated if the DAP implementation | 293 | * A compiler warning or error will be generated if the DAP implementation |
| 295 | * overides or cannot handle the chunksize defined below. | 294 | * overrides or cannot handle the chunksize defined below. |
| 296 | * | ||
| 297 | */ | 295 | */ |
| 298 | #ifndef DRXDAP_MAX_RCHUNKSIZE | 296 | #ifndef DRXDAP_MAX_RCHUNKSIZE |
| 299 | #define DRXDAP_MAX_RCHUNKSIZE 60 | 297 | #define DRXDAP_MAX_RCHUNKSIZE 60 |
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c index b4b583f7137a..b4c0f10fc3b0 100644 --- a/drivers/media/platform/vsp1/vsp1_drm.c +++ b/drivers/media/platform/vsp1/vsp1_drm.c | |||
| @@ -54,12 +54,11 @@ EXPORT_SYMBOL_GPL(vsp1_du_init); | |||
| 54 | /** | 54 | /** |
| 55 | * vsp1_du_setup_lif - Setup the output part of the VSP pipeline | 55 | * vsp1_du_setup_lif - Setup the output part of the VSP pipeline |
| 56 | * @dev: the VSP device | 56 | * @dev: the VSP device |
| 57 | * @width: output frame width in pixels | 57 | * @cfg: the LIF configuration |
| 58 | * @height: output frame height in pixels | ||
| 59 | * | 58 | * |
| 60 | * Configure the output part of VSP DRM pipeline for the given frame @width and | 59 | * Configure the output part of VSP DRM pipeline for the given frame @cfg.width |
| 61 | * @height. This sets up formats on the BRU source pad, the WPF0 sink and source | 60 | * and @cfg.height. This sets up formats on the BRU source pad, the WPF0 sink |
| 62 | * pads, and the LIF sink pad. | 61 | * and source pads, and the LIF sink pad. |
| 63 | * | 62 | * |
| 64 | * As the media bus code on the BRU source pad is conditioned by the | 63 | * As the media bus code on the BRU source pad is conditioned by the |
| 65 | * configuration of the BRU sink 0 pad, we also set up the formats on all BRU | 64 | * configuration of the BRU sink 0 pad, we also set up the formats on all BRU |
| @@ -69,8 +68,7 @@ EXPORT_SYMBOL_GPL(vsp1_du_init); | |||
| 69 | * | 68 | * |
| 70 | * Return 0 on success or a negative error code on failure. | 69 | * Return 0 on success or a negative error code on failure. |
| 71 | */ | 70 | */ |
| 72 | int vsp1_du_setup_lif(struct device *dev, unsigned int width, | 71 | int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg) |
| 73 | unsigned int height) | ||
| 74 | { | 72 | { |
| 75 | struct vsp1_device *vsp1 = dev_get_drvdata(dev); | 73 | struct vsp1_device *vsp1 = dev_get_drvdata(dev); |
| 76 | struct vsp1_pipeline *pipe = &vsp1->drm->pipe; | 74 | struct vsp1_pipeline *pipe = &vsp1->drm->pipe; |
| @@ -79,11 +77,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width, | |||
| 79 | unsigned int i; | 77 | unsigned int i; |
| 80 | int ret; | 78 | int ret; |
| 81 | 79 | ||
| 82 | dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n", | 80 | if (!cfg) { |
| 83 | __func__, width, height); | 81 | /* NULL configuration means the CRTC is being disabled, stop |
| 84 | |||
| 85 | if (width == 0 || height == 0) { | ||
| 86 | /* Zero width or height means the CRTC is being disabled, stop | ||
| 87 | * the pipeline and turn the light off. | 82 | * the pipeline and turn the light off. |
| 88 | */ | 83 | */ |
| 89 | ret = vsp1_pipeline_stop(pipe); | 84 | ret = vsp1_pipeline_stop(pipe); |
| @@ -108,6 +103,9 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width, | |||
| 108 | return 0; | 103 | return 0; |
| 109 | } | 104 | } |
| 110 | 105 | ||
| 106 | dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n", | ||
| 107 | __func__, cfg->width, cfg->height); | ||
| 108 | |||
| 111 | /* Configure the format at the BRU sinks and propagate it through the | 109 | /* Configure the format at the BRU sinks and propagate it through the |
| 112 | * pipeline. | 110 | * pipeline. |
| 113 | */ | 111 | */ |
| @@ -117,8 +115,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width, | |||
| 117 | for (i = 0; i < bru->entity.source_pad; ++i) { | 115 | for (i = 0; i < bru->entity.source_pad; ++i) { |
| 118 | format.pad = i; | 116 | format.pad = i; |
| 119 | 117 | ||
| 120 | format.format.width = width; | 118 | format.format.width = cfg->width; |
| 121 | format.format.height = height; | 119 | format.format.height = cfg->height; |
| 122 | format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; | 120 | format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; |
| 123 | format.format.field = V4L2_FIELD_NONE; | 121 | format.format.field = V4L2_FIELD_NONE; |
| 124 | 122 | ||
| @@ -133,8 +131,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width, | |||
| 133 | } | 131 | } |
| 134 | 132 | ||
| 135 | format.pad = bru->entity.source_pad; | 133 | format.pad = bru->entity.source_pad; |
| 136 | format.format.width = width; | 134 | format.format.width = cfg->width; |
| 137 | format.format.height = height; | 135 | format.format.height = cfg->height; |
| 138 | format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; | 136 | format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; |
| 139 | format.format.field = V4L2_FIELD_NONE; | 137 | format.format.field = V4L2_FIELD_NONE; |
| 140 | 138 | ||
| @@ -180,7 +178,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width, | |||
| 180 | /* Verify that the format at the output of the pipeline matches the | 178 | /* Verify that the format at the output of the pipeline matches the |
| 181 | * requested frame size and media bus code. | 179 | * requested frame size and media bus code. |
| 182 | */ | 180 | */ |
| 183 | if (format.format.width != width || format.format.height != height || | 181 | if (format.format.width != cfg->width || |
| 182 | format.format.height != cfg->height || | ||
| 184 | format.format.code != MEDIA_BUS_FMT_ARGB8888_1X32) { | 183 | format.format.code != MEDIA_BUS_FMT_ARGB8888_1X32) { |
| 185 | dev_dbg(vsp1->dev, "%s: format mismatch\n", __func__); | 184 | dev_dbg(vsp1->dev, "%s: format mismatch\n", __func__); |
| 186 | return -EPIPE; | 185 | return -EPIPE; |
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index 393dccaabdd0..1688893a65bb 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c | |||
| @@ -436,6 +436,8 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file) | |||
| 436 | return -ERESTARTSYS; | 436 | return -ERESTARTSYS; |
| 437 | 437 | ||
| 438 | ir = irctls[iminor(inode)]; | 438 | ir = irctls[iminor(inode)]; |
| 439 | mutex_unlock(&lirc_dev_lock); | ||
| 440 | |||
| 439 | if (!ir) { | 441 | if (!ir) { |
| 440 | retval = -ENODEV; | 442 | retval = -ENODEV; |
| 441 | goto error; | 443 | goto error; |
| @@ -476,8 +478,6 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file) | |||
| 476 | } | 478 | } |
| 477 | 479 | ||
| 478 | error: | 480 | error: |
| 479 | mutex_unlock(&lirc_dev_lock); | ||
| 480 | |||
| 481 | nonseekable_open(inode, file); | 481 | nonseekable_open(inode, file); |
| 482 | 482 | ||
| 483 | return retval; | 483 | return retval; |
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c index b109f8246b96..ec4b25bd2ec2 100644 --- a/drivers/media/rc/nuvoton-cir.c +++ b/drivers/media/rc/nuvoton-cir.c | |||
| @@ -176,12 +176,13 @@ static void nvt_write_wakeup_codes(struct rc_dev *dev, | |||
| 176 | { | 176 | { |
| 177 | u8 tolerance, config; | 177 | u8 tolerance, config; |
| 178 | struct nvt_dev *nvt = dev->priv; | 178 | struct nvt_dev *nvt = dev->priv; |
| 179 | unsigned long flags; | ||
| 179 | int i; | 180 | int i; |
| 180 | 181 | ||
| 181 | /* hardcode the tolerance to 10% */ | 182 | /* hardcode the tolerance to 10% */ |
| 182 | tolerance = DIV_ROUND_UP(count, 10); | 183 | tolerance = DIV_ROUND_UP(count, 10); |
| 183 | 184 | ||
| 184 | spin_lock(&nvt->lock); | 185 | spin_lock_irqsave(&nvt->lock, flags); |
| 185 | 186 | ||
| 186 | nvt_clear_cir_wake_fifo(nvt); | 187 | nvt_clear_cir_wake_fifo(nvt); |
| 187 | nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP); | 188 | nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP); |
| @@ -203,7 +204,7 @@ static void nvt_write_wakeup_codes(struct rc_dev *dev, | |||
| 203 | 204 | ||
| 204 | nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON); | 205 | nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON); |
| 205 | 206 | ||
| 206 | spin_unlock(&nvt->lock); | 207 | spin_unlock_irqrestore(&nvt->lock, flags); |
| 207 | } | 208 | } |
| 208 | 209 | ||
| 209 | static ssize_t wakeup_data_show(struct device *dev, | 210 | static ssize_t wakeup_data_show(struct device *dev, |
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 2424946740e6..d84533699668 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c | |||
| @@ -1663,6 +1663,7 @@ static int rc_setup_rx_device(struct rc_dev *dev) | |||
| 1663 | { | 1663 | { |
| 1664 | int rc; | 1664 | int rc; |
| 1665 | struct rc_map *rc_map; | 1665 | struct rc_map *rc_map; |
| 1666 | u64 rc_type; | ||
| 1666 | 1667 | ||
| 1667 | if (!dev->map_name) | 1668 | if (!dev->map_name) |
| 1668 | return -EINVAL; | 1669 | return -EINVAL; |
| @@ -1677,15 +1678,18 @@ static int rc_setup_rx_device(struct rc_dev *dev) | |||
| 1677 | if (rc) | 1678 | if (rc) |
| 1678 | return rc; | 1679 | return rc; |
| 1679 | 1680 | ||
| 1680 | if (dev->change_protocol) { | 1681 | rc_type = BIT_ULL(rc_map->rc_type); |
| 1681 | u64 rc_type = (1ll << rc_map->rc_type); | ||
| 1682 | 1682 | ||
| 1683 | if (dev->change_protocol) { | ||
| 1683 | rc = dev->change_protocol(dev, &rc_type); | 1684 | rc = dev->change_protocol(dev, &rc_type); |
| 1684 | if (rc < 0) | 1685 | if (rc < 0) |
| 1685 | goto out_table; | 1686 | goto out_table; |
| 1686 | dev->enabled_protocols = rc_type; | 1687 | dev->enabled_protocols = rc_type; |
| 1687 | } | 1688 | } |
| 1688 | 1689 | ||
| 1690 | if (dev->driver_type == RC_DRIVER_IR_RAW) | ||
| 1691 | ir_raw_load_modules(&rc_type); | ||
| 1692 | |||
| 1689 | set_bit(EV_KEY, dev->input_dev->evbit); | 1693 | set_bit(EV_KEY, dev->input_dev->evbit); |
| 1690 | set_bit(EV_REP, dev->input_dev->evbit); | 1694 | set_bit(EV_REP, dev->input_dev->evbit); |
| 1691 | set_bit(EV_MSC, dev->input_dev->evbit); | 1695 | set_bit(EV_MSC, dev->input_dev->evbit); |
| @@ -1777,12 +1781,6 @@ int rc_register_device(struct rc_dev *dev) | |||
| 1777 | dev->input_name ?: "Unspecified device", path ?: "N/A"); | 1781 | dev->input_name ?: "Unspecified device", path ?: "N/A"); |
| 1778 | kfree(path); | 1782 | kfree(path); |
| 1779 | 1783 | ||
| 1780 | if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { | ||
| 1781 | rc = rc_setup_rx_device(dev); | ||
| 1782 | if (rc) | ||
| 1783 | goto out_dev; | ||
| 1784 | } | ||
| 1785 | |||
| 1786 | if (dev->driver_type == RC_DRIVER_IR_RAW || | 1784 | if (dev->driver_type == RC_DRIVER_IR_RAW || |
| 1787 | dev->driver_type == RC_DRIVER_IR_RAW_TX) { | 1785 | dev->driver_type == RC_DRIVER_IR_RAW_TX) { |
| 1788 | if (!raw_init) { | 1786 | if (!raw_init) { |
| @@ -1791,7 +1789,13 @@ int rc_register_device(struct rc_dev *dev) | |||
| 1791 | } | 1789 | } |
| 1792 | rc = ir_raw_event_register(dev); | 1790 | rc = ir_raw_event_register(dev); |
| 1793 | if (rc < 0) | 1791 | if (rc < 0) |
| 1794 | goto out_rx; | 1792 | goto out_dev; |
| 1793 | } | ||
| 1794 | |||
| 1795 | if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { | ||
| 1796 | rc = rc_setup_rx_device(dev); | ||
| 1797 | if (rc) | ||
| 1798 | goto out_raw; | ||
| 1795 | } | 1799 | } |
| 1796 | 1800 | ||
| 1797 | /* Allow the RC sysfs nodes to be accessible */ | 1801 | /* Allow the RC sysfs nodes to be accessible */ |
| @@ -1803,8 +1807,8 @@ int rc_register_device(struct rc_dev *dev) | |||
| 1803 | 1807 | ||
| 1804 | return 0; | 1808 | return 0; |
| 1805 | 1809 | ||
| 1806 | out_rx: | 1810 | out_raw: |
| 1807 | rc_free_rx_device(dev); | 1811 | ir_raw_event_unregister(dev); |
| 1808 | out_dev: | 1812 | out_dev: |
| 1809 | device_del(&dev->dev); | 1813 | device_del(&dev->dev); |
| 1810 | out_unlock: | 1814 | out_unlock: |
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c index 923fb2299553..41b54e40176c 100644 --- a/drivers/media/rc/serial_ir.c +++ b/drivers/media/rc/serial_ir.c | |||
| @@ -487,10 +487,69 @@ static void serial_ir_timeout(unsigned long arg) | |||
| 487 | ir_raw_event_handle(serial_ir.rcdev); | 487 | ir_raw_event_handle(serial_ir.rcdev); |
| 488 | } | 488 | } |
| 489 | 489 | ||
| 490 | /* Needed by serial_ir_probe() */ | ||
| 491 | static int serial_ir_tx(struct rc_dev *dev, unsigned int *txbuf, | ||
| 492 | unsigned int count); | ||
| 493 | static int serial_ir_tx_duty_cycle(struct rc_dev *dev, u32 cycle); | ||
| 494 | static int serial_ir_tx_carrier(struct rc_dev *dev, u32 carrier); | ||
| 495 | static int serial_ir_open(struct rc_dev *rcdev); | ||
| 496 | static void serial_ir_close(struct rc_dev *rcdev); | ||
| 497 | |||
| 490 | static int serial_ir_probe(struct platform_device *dev) | 498 | static int serial_ir_probe(struct platform_device *dev) |
| 491 | { | 499 | { |
| 500 | struct rc_dev *rcdev; | ||
| 492 | int i, nlow, nhigh, result; | 501 | int i, nlow, nhigh, result; |
| 493 | 502 | ||
| 503 | rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW); | ||
| 504 | if (!rcdev) | ||
| 505 | return -ENOMEM; | ||
| 506 | |||
| 507 | if (hardware[type].send_pulse && hardware[type].send_space) | ||
| 508 | rcdev->tx_ir = serial_ir_tx; | ||
| 509 | if (hardware[type].set_send_carrier) | ||
| 510 | rcdev->s_tx_carrier = serial_ir_tx_carrier; | ||
| 511 | if (hardware[type].set_duty_cycle) | ||
| 512 | rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle; | ||
| 513 | |||
| 514 | switch (type) { | ||
| 515 | case IR_HOMEBREW: | ||
| 516 | rcdev->input_name = "Serial IR type home-brew"; | ||
| 517 | break; | ||
| 518 | case IR_IRDEO: | ||
| 519 | rcdev->input_name = "Serial IR type IRdeo"; | ||
| 520 | break; | ||
| 521 | case IR_IRDEO_REMOTE: | ||
| 522 | rcdev->input_name = "Serial IR type IRdeo remote"; | ||
| 523 | break; | ||
| 524 | case IR_ANIMAX: | ||
| 525 | rcdev->input_name = "Serial IR type AnimaX"; | ||
| 526 | break; | ||
| 527 | case IR_IGOR: | ||
| 528 | rcdev->input_name = "Serial IR type IgorPlug"; | ||
| 529 | break; | ||
| 530 | } | ||
| 531 | |||
| 532 | rcdev->input_phys = KBUILD_MODNAME "/input0"; | ||
| 533 | rcdev->input_id.bustype = BUS_HOST; | ||
| 534 | rcdev->input_id.vendor = 0x0001; | ||
| 535 | rcdev->input_id.product = 0x0001; | ||
| 536 | rcdev->input_id.version = 0x0100; | ||
| 537 | rcdev->open = serial_ir_open; | ||
| 538 | rcdev->close = serial_ir_close; | ||
| 539 | rcdev->dev.parent = &serial_ir.pdev->dev; | ||
| 540 | rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; | ||
| 541 | rcdev->driver_name = KBUILD_MODNAME; | ||
| 542 | rcdev->map_name = RC_MAP_RC6_MCE; | ||
| 543 | rcdev->min_timeout = 1; | ||
| 544 | rcdev->timeout = IR_DEFAULT_TIMEOUT; | ||
| 545 | rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT; | ||
| 546 | rcdev->rx_resolution = 250000; | ||
| 547 | |||
| 548 | serial_ir.rcdev = rcdev; | ||
| 549 | |||
| 550 | setup_timer(&serial_ir.timeout_timer, serial_ir_timeout, | ||
| 551 | (unsigned long)&serial_ir); | ||
| 552 | |||
| 494 | result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler, | 553 | result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler, |
| 495 | share_irq ? IRQF_SHARED : 0, | 554 | share_irq ? IRQF_SHARED : 0, |
| 496 | KBUILD_MODNAME, &hardware); | 555 | KBUILD_MODNAME, &hardware); |
| @@ -516,9 +575,6 @@ static int serial_ir_probe(struct platform_device *dev) | |||
| 516 | return -EBUSY; | 575 | return -EBUSY; |
| 517 | } | 576 | } |
| 518 | 577 | ||
| 519 | setup_timer(&serial_ir.timeout_timer, serial_ir_timeout, | ||
| 520 | (unsigned long)&serial_ir); | ||
| 521 | |||
| 522 | result = hardware_init_port(); | 578 | result = hardware_init_port(); |
| 523 | if (result < 0) | 579 | if (result < 0) |
| 524 | return result; | 580 | return result; |
| @@ -552,7 +608,8 @@ static int serial_ir_probe(struct platform_device *dev) | |||
| 552 | sense ? "low" : "high"); | 608 | sense ? "low" : "high"); |
| 553 | 609 | ||
| 554 | dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io); | 610 | dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io); |
| 555 | return 0; | 611 | |
| 612 | return devm_rc_register_device(&dev->dev, rcdev); | ||
| 556 | } | 613 | } |
| 557 | 614 | ||
| 558 | static int serial_ir_open(struct rc_dev *rcdev) | 615 | static int serial_ir_open(struct rc_dev *rcdev) |
| @@ -723,7 +780,6 @@ static void serial_ir_exit(void) | |||
| 723 | 780 | ||
| 724 | static int __init serial_ir_init_module(void) | 781 | static int __init serial_ir_init_module(void) |
| 725 | { | 782 | { |
| 726 | struct rc_dev *rcdev; | ||
| 727 | int result; | 783 | int result; |
| 728 | 784 | ||
| 729 | switch (type) { | 785 | switch (type) { |
| @@ -754,63 +810,9 @@ static int __init serial_ir_init_module(void) | |||
| 754 | sense = !!sense; | 810 | sense = !!sense; |
| 755 | 811 | ||
| 756 | result = serial_ir_init(); | 812 | result = serial_ir_init(); |
| 757 | if (result) | ||
| 758 | return result; | ||
| 759 | |||
| 760 | rcdev = devm_rc_allocate_device(&serial_ir.pdev->dev, RC_DRIVER_IR_RAW); | ||
| 761 | if (!rcdev) { | ||
| 762 | result = -ENOMEM; | ||
| 763 | goto serial_cleanup; | ||
| 764 | } | ||
| 765 | |||
| 766 | if (hardware[type].send_pulse && hardware[type].send_space) | ||
| 767 | rcdev->tx_ir = serial_ir_tx; | ||
| 768 | if (hardware[type].set_send_carrier) | ||
| 769 | rcdev->s_tx_carrier = serial_ir_tx_carrier; | ||
| 770 | if (hardware[type].set_duty_cycle) | ||
| 771 | rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle; | ||
| 772 | |||
| 773 | switch (type) { | ||
| 774 | case IR_HOMEBREW: | ||
| 775 | rcdev->input_name = "Serial IR type home-brew"; | ||
| 776 | break; | ||
| 777 | case IR_IRDEO: | ||
| 778 | rcdev->input_name = "Serial IR type IRdeo"; | ||
| 779 | break; | ||
| 780 | case IR_IRDEO_REMOTE: | ||
| 781 | rcdev->input_name = "Serial IR type IRdeo remote"; | ||
| 782 | break; | ||
| 783 | case IR_ANIMAX: | ||
| 784 | rcdev->input_name = "Serial IR type AnimaX"; | ||
| 785 | break; | ||
| 786 | case IR_IGOR: | ||
| 787 | rcdev->input_name = "Serial IR type IgorPlug"; | ||
| 788 | break; | ||
| 789 | } | ||
| 790 | |||
| 791 | rcdev->input_phys = KBUILD_MODNAME "/input0"; | ||
| 792 | rcdev->input_id.bustype = BUS_HOST; | ||
| 793 | rcdev->input_id.vendor = 0x0001; | ||
| 794 | rcdev->input_id.product = 0x0001; | ||
| 795 | rcdev->input_id.version = 0x0100; | ||
| 796 | rcdev->open = serial_ir_open; | ||
| 797 | rcdev->close = serial_ir_close; | ||
| 798 | rcdev->dev.parent = &serial_ir.pdev->dev; | ||
| 799 | rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; | ||
| 800 | rcdev->driver_name = KBUILD_MODNAME; | ||
| 801 | rcdev->map_name = RC_MAP_RC6_MCE; | ||
| 802 | rcdev->min_timeout = 1; | ||
| 803 | rcdev->timeout = IR_DEFAULT_TIMEOUT; | ||
| 804 | rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT; | ||
| 805 | rcdev->rx_resolution = 250000; | ||
| 806 | |||
| 807 | serial_ir.rcdev = rcdev; | ||
| 808 | |||
| 809 | result = rc_register_device(rcdev); | ||
| 810 | |||
| 811 | if (!result) | 813 | if (!result) |
| 812 | return 0; | 814 | return 0; |
| 813 | serial_cleanup: | 815 | |
| 814 | serial_ir_exit(); | 816 | serial_ir_exit(); |
| 815 | return result; | 817 | return result; |
| 816 | } | 818 | } |
| @@ -818,7 +820,6 @@ serial_cleanup: | |||
| 818 | static void __exit serial_ir_exit_module(void) | 820 | static void __exit serial_ir_exit_module(void) |
| 819 | { | 821 | { |
| 820 | del_timer_sync(&serial_ir.timeout_timer); | 822 | del_timer_sync(&serial_ir.timeout_timer); |
| 821 | rc_unregister_device(serial_ir.rcdev); | ||
| 822 | serial_ir_exit(); | 823 | serial_ir_exit(); |
| 823 | } | 824 | } |
| 824 | 825 | ||
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index 6ca502d834b4..4f42d57f81d9 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c | |||
| @@ -68,6 +68,7 @@ | |||
| 68 | struct dw2102_state { | 68 | struct dw2102_state { |
| 69 | u8 initialized; | 69 | u8 initialized; |
| 70 | u8 last_lock; | 70 | u8 last_lock; |
| 71 | u8 data[MAX_XFER_SIZE + 4]; | ||
| 71 | struct i2c_client *i2c_client_demod; | 72 | struct i2c_client *i2c_client_demod; |
| 72 | struct i2c_client *i2c_client_tuner; | 73 | struct i2c_client *i2c_client_tuner; |
| 73 | 74 | ||
| @@ -661,62 +662,72 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], | |||
| 661 | int num) | 662 | int num) |
| 662 | { | 663 | { |
| 663 | struct dvb_usb_device *d = i2c_get_adapdata(adap); | 664 | struct dvb_usb_device *d = i2c_get_adapdata(adap); |
| 664 | u8 obuf[0x40], ibuf[0x40]; | 665 | struct dw2102_state *state; |
| 665 | 666 | ||
| 666 | if (!d) | 667 | if (!d) |
| 667 | return -ENODEV; | 668 | return -ENODEV; |
| 669 | |||
| 670 | state = d->priv; | ||
| 671 | |||
| 668 | if (mutex_lock_interruptible(&d->i2c_mutex) < 0) | 672 | if (mutex_lock_interruptible(&d->i2c_mutex) < 0) |
| 669 | return -EAGAIN; | 673 | return -EAGAIN; |
| 674 | if (mutex_lock_interruptible(&d->data_mutex) < 0) { | ||
| 675 | mutex_unlock(&d->i2c_mutex); | ||
| 676 | return -EAGAIN; | ||
| 677 | } | ||
| 670 | 678 | ||
| 671 | switch (num) { | 679 | switch (num) { |
| 672 | case 1: | 680 | case 1: |
| 673 | switch (msg[0].addr) { | 681 | switch (msg[0].addr) { |
| 674 | case SU3000_STREAM_CTRL: | 682 | case SU3000_STREAM_CTRL: |
| 675 | obuf[0] = msg[0].buf[0] + 0x36; | 683 | state->data[0] = msg[0].buf[0] + 0x36; |
| 676 | obuf[1] = 3; | 684 | state->data[1] = 3; |
| 677 | obuf[2] = 0; | 685 | state->data[2] = 0; |
| 678 | if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 0, 0) < 0) | 686 | if (dvb_usb_generic_rw(d, state->data, 3, |
| 687 | state->data, 0, 0) < 0) | ||
| 679 | err("i2c transfer failed."); | 688 | err("i2c transfer failed."); |
| 680 | break; | 689 | break; |
| 681 | case DW2102_RC_QUERY: | 690 | case DW2102_RC_QUERY: |
| 682 | obuf[0] = 0x10; | 691 | state->data[0] = 0x10; |
| 683 | if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 2, 0) < 0) | 692 | if (dvb_usb_generic_rw(d, state->data, 1, |
| 693 | state->data, 2, 0) < 0) | ||
| 684 | err("i2c transfer failed."); | 694 | err("i2c transfer failed."); |
| 685 | msg[0].buf[1] = ibuf[0]; | 695 | msg[0].buf[1] = state->data[0]; |
| 686 | msg[0].buf[0] = ibuf[1]; | 696 | msg[0].buf[0] = state->data[1]; |
| 687 | break; | 697 | break; |
| 688 | default: | 698 | default: |
| 689 | /* always i2c write*/ | 699 | /* always i2c write*/ |
| 690 | obuf[0] = 0x08; | 700 | state->data[0] = 0x08; |
| 691 | obuf[1] = msg[0].addr; | 701 | state->data[1] = msg[0].addr; |
| 692 | obuf[2] = msg[0].len; | 702 | state->data[2] = msg[0].len; |
| 693 | 703 | ||
| 694 | memcpy(&obuf[3], msg[0].buf, msg[0].len); | 704 | memcpy(&state->data[3], msg[0].buf, msg[0].len); |
| 695 | 705 | ||
| 696 | if (dvb_usb_generic_rw(d, obuf, msg[0].len + 3, | 706 | if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3, |
| 697 | ibuf, 1, 0) < 0) | 707 | state->data, 1, 0) < 0) |
| 698 | err("i2c transfer failed."); | 708 | err("i2c transfer failed."); |
| 699 | 709 | ||
| 700 | } | 710 | } |
| 701 | break; | 711 | break; |
| 702 | case 2: | 712 | case 2: |
| 703 | /* always i2c read */ | 713 | /* always i2c read */ |
| 704 | obuf[0] = 0x09; | 714 | state->data[0] = 0x09; |
| 705 | obuf[1] = msg[0].len; | 715 | state->data[1] = msg[0].len; |
| 706 | obuf[2] = msg[1].len; | 716 | state->data[2] = msg[1].len; |
| 707 | obuf[3] = msg[0].addr; | 717 | state->data[3] = msg[0].addr; |
| 708 | memcpy(&obuf[4], msg[0].buf, msg[0].len); | 718 | memcpy(&state->data[4], msg[0].buf, msg[0].len); |
| 709 | 719 | ||
| 710 | if (dvb_usb_generic_rw(d, obuf, msg[0].len + 4, | 720 | if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4, |
| 711 | ibuf, msg[1].len + 1, 0) < 0) | 721 | state->data, msg[1].len + 1, 0) < 0) |
| 712 | err("i2c transfer failed."); | 722 | err("i2c transfer failed."); |
| 713 | 723 | ||
| 714 | memcpy(msg[1].buf, &ibuf[1], msg[1].len); | 724 | memcpy(msg[1].buf, &state->data[1], msg[1].len); |
| 715 | break; | 725 | break; |
| 716 | default: | 726 | default: |
| 717 | warn("more than 2 i2c messages at a time is not handled yet."); | 727 | warn("more than 2 i2c messages at a time is not handled yet."); |
| 718 | break; | 728 | break; |
| 719 | } | 729 | } |
| 730 | mutex_unlock(&d->data_mutex); | ||
| 720 | mutex_unlock(&d->i2c_mutex); | 731 | mutex_unlock(&d->i2c_mutex); |
| 721 | return num; | 732 | return num; |
| 722 | } | 733 | } |
| @@ -844,17 +855,23 @@ static int su3000_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) | |||
| 844 | static int su3000_power_ctrl(struct dvb_usb_device *d, int i) | 855 | static int su3000_power_ctrl(struct dvb_usb_device *d, int i) |
| 845 | { | 856 | { |
| 846 | struct dw2102_state *state = (struct dw2102_state *)d->priv; | 857 | struct dw2102_state *state = (struct dw2102_state *)d->priv; |
| 847 | u8 obuf[] = {0xde, 0}; | 858 | int ret = 0; |
| 848 | 859 | ||
| 849 | info("%s: %d, initialized %d", __func__, i, state->initialized); | 860 | info("%s: %d, initialized %d", __func__, i, state->initialized); |
| 850 | 861 | ||
| 851 | if (i && !state->initialized) { | 862 | if (i && !state->initialized) { |
| 863 | mutex_lock(&d->data_mutex); | ||
| 864 | |||
| 865 | state->data[0] = 0xde; | ||
| 866 | state->data[1] = 0; | ||
| 867 | |||
| 852 | state->initialized = 1; | 868 | state->initialized = 1; |
| 853 | /* reset board */ | 869 | /* reset board */ |
| 854 | return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0); | 870 | ret = dvb_usb_generic_rw(d, state->data, 2, NULL, 0, 0); |
| 871 | mutex_unlock(&d->data_mutex); | ||
| 855 | } | 872 | } |
| 856 | 873 | ||
| 857 | return 0; | 874 | return ret; |
| 858 | } | 875 | } |
| 859 | 876 | ||
| 860 | static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) | 877 | static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) |
| @@ -1309,49 +1326,57 @@ static int prof_7500_frontend_attach(struct dvb_usb_adapter *d) | |||
| 1309 | return 0; | 1326 | return 0; |
| 1310 | } | 1327 | } |
| 1311 | 1328 | ||
| 1312 | static int su3000_frontend_attach(struct dvb_usb_adapter *d) | 1329 | static int su3000_frontend_attach(struct dvb_usb_adapter *adap) |
| 1313 | { | 1330 | { |
| 1314 | u8 obuf[3] = { 0xe, 0x80, 0 }; | 1331 | struct dvb_usb_device *d = adap->dev; |
| 1315 | u8 ibuf[] = { 0 }; | 1332 | struct dw2102_state *state = d->priv; |
| 1333 | |||
| 1334 | mutex_lock(&d->data_mutex); | ||
| 1335 | |||
| 1336 | state->data[0] = 0xe; | ||
| 1337 | state->data[1] = 0x80; | ||
| 1338 | state->data[2] = 0; | ||
| 1316 | 1339 | ||
| 1317 | if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) | 1340 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
| 1318 | err("command 0x0e transfer failed."); | 1341 | err("command 0x0e transfer failed."); |
| 1319 | 1342 | ||
| 1320 | obuf[0] = 0xe; | 1343 | state->data[0] = 0xe; |
| 1321 | obuf[1] = 0x02; | 1344 | state->data[1] = 0x02; |
| 1322 | obuf[2] = 1; | 1345 | state->data[2] = 1; |
| 1323 | 1346 | ||
| 1324 | if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) | 1347 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
| 1325 | err("command 0x0e transfer failed."); | 1348 | err("command 0x0e transfer failed."); |
| 1326 | msleep(300); | 1349 | msleep(300); |
| 1327 | 1350 | ||
| 1328 | obuf[0] = 0xe; | 1351 | state->data[0] = 0xe; |
| 1329 | obuf[1] = 0x83; | 1352 | state->data[1] = 0x83; |
| 1330 | obuf[2] = 0; | 1353 | state->data[2] = 0; |
| 1331 | 1354 | ||
| 1332 | if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) | 1355 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
| 1333 | err("command 0x0e transfer failed."); | 1356 | err("command 0x0e transfer failed."); |
| 1334 | 1357 | ||
| 1335 | obuf[0] = 0xe; | 1358 | state->data[0] = 0xe; |
| 1336 | obuf[1] = 0x83; | 1359 | state->data[1] = 0x83; |
| 1337 | obuf[2] = 1; | 1360 | state->data[2] = 1; |
| 1338 | 1361 | ||
| 1339 | if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) | 1362 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
| 1340 | err("command 0x0e transfer failed."); | 1363 | err("command 0x0e transfer failed."); |
| 1341 | 1364 | ||
| 1342 | obuf[0] = 0x51; | 1365 | state->data[0] = 0x51; |
| 1343 | 1366 | ||
| 1344 | if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) | 1367 | if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) |
| 1345 | err("command 0x51 transfer failed."); | 1368 | err("command 0x51 transfer failed."); |
| 1346 | 1369 | ||
| 1347 | d->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config, | 1370 | mutex_unlock(&d->data_mutex); |
| 1348 | &d->dev->i2c_adap); | 1371 | |
| 1349 | if (d->fe_adap[0].fe == NULL) | 1372 | adap->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config, |
| 1373 | &d->i2c_adap); | ||
| 1374 | if (adap->fe_adap[0].fe == NULL) | ||
| 1350 | return -EIO; | 1375 | return -EIO; |
| 1351 | 1376 | ||
| 1352 | if (dvb_attach(ts2020_attach, d->fe_adap[0].fe, | 1377 | if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe, |
| 1353 | &dw2104_ts2020_config, | 1378 | &dw2104_ts2020_config, |
| 1354 | &d->dev->i2c_adap)) { | 1379 | &d->i2c_adap)) { |
| 1355 | info("Attached DS3000/TS2020!"); | 1380 | info("Attached DS3000/TS2020!"); |
| 1356 | return 0; | 1381 | return 0; |
| 1357 | } | 1382 | } |
| @@ -1360,47 +1385,55 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d) | |||
| 1360 | return -EIO; | 1385 | return -EIO; |
| 1361 | } | 1386 | } |
| 1362 | 1387 | ||
| 1363 | static int t220_frontend_attach(struct dvb_usb_adapter *d) | 1388 | static int t220_frontend_attach(struct dvb_usb_adapter *adap) |
| 1364 | { | 1389 | { |
| 1365 | u8 obuf[3] = { 0xe, 0x87, 0 }; | 1390 | struct dvb_usb_device *d = adap->dev; |
| 1366 | u8 ibuf[] = { 0 }; | 1391 | struct dw2102_state *state = d->priv; |
| 1392 | |||
| 1393 | mutex_lock(&d->data_mutex); | ||
| 1367 | 1394 | ||
| 1368 | if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) | 1395 | state->data[0] = 0xe; |
| 1396 | state->data[1] = 0x87; | ||
| 1397 | state->data[2] = 0x0; | ||
| 1398 | |||
| 1399 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) | ||
| 1369 | err("command 0x0e transfer failed."); | 1400 | err("command 0x0e transfer failed."); |
| 1370 | 1401 | ||
| 1371 | obuf[0] = 0xe; | 1402 | state->data[0] = 0xe; |
| 1372 | obuf[1] = 0x86; | 1403 | state->data[1] = 0x86; |
| 1373 | obuf[2] = 1; | 1404 | state->data[2] = 1; |
| 1374 | 1405 | ||
| 1375 | if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) | 1406 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
| 1376 | err("command 0x0e transfer failed."); | 1407 | err("command 0x0e transfer failed."); |
| 1377 | 1408 | ||
| 1378 | obuf[0] = 0xe; | 1409 | state->data[0] = 0xe; |
| 1379 | obuf[1] = 0x80; | 1410 | state->data[1] = 0x80; |
| 1380 | obuf[2] = 0; | 1411 | state->data[2] = 0; |
| 1381 | 1412 | ||
| 1382 | if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) | 1413 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
| 1383 | err("command 0x0e transfer failed."); | 1414 | err("command 0x0e transfer failed."); |
| 1384 | 1415 | ||
| 1385 | msleep(50); | 1416 | msleep(50); |
| 1386 | 1417 | ||
| 1387 | obuf[0] = 0xe; | 1418 | state->data[0] = 0xe; |
| 1388 | obuf[1] = 0x80; | 1419 | state->data[1] = 0x80; |
| 1389 | obuf[2] = 1; | 1420 | state->data[2] = 1; |
| 1390 | 1421 | ||
| 1391 | if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) | 1422 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
| 1392 | err("command 0x0e transfer failed."); | 1423 | err("command 0x0e transfer failed."); |
| 1393 | 1424 | ||
| 1394 | obuf[0] = 0x51; | 1425 | state->data[0] = 0x51; |
| 1395 | 1426 | ||
| 1396 | if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) | 1427 | if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) |
| 1397 | err("command 0x51 transfer failed."); | 1428 | err("command 0x51 transfer failed."); |
| 1398 | 1429 | ||
| 1399 | d->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config, | 1430 | mutex_unlock(&d->data_mutex); |
| 1400 | &d->dev->i2c_adap, NULL); | 1431 | |
| 1401 | if (d->fe_adap[0].fe != NULL) { | 1432 | adap->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config, |
| 1402 | if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60, | 1433 | &d->i2c_adap, NULL); |
| 1403 | &d->dev->i2c_adap, &tda18271_config)) { | 1434 | if (adap->fe_adap[0].fe != NULL) { |
| 1435 | if (dvb_attach(tda18271_attach, adap->fe_adap[0].fe, 0x60, | ||
| 1436 | &d->i2c_adap, &tda18271_config)) { | ||
| 1404 | info("Attached TDA18271HD/CXD2820R!"); | 1437 | info("Attached TDA18271HD/CXD2820R!"); |
| 1405 | return 0; | 1438 | return 0; |
| 1406 | } | 1439 | } |
| @@ -1410,23 +1443,30 @@ static int t220_frontend_attach(struct dvb_usb_adapter *d) | |||
| 1410 | return -EIO; | 1443 | return -EIO; |
| 1411 | } | 1444 | } |
| 1412 | 1445 | ||
| 1413 | static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d) | 1446 | static int m88rs2000_frontend_attach(struct dvb_usb_adapter *adap) |
| 1414 | { | 1447 | { |
| 1415 | u8 obuf[] = { 0x51 }; | 1448 | struct dvb_usb_device *d = adap->dev; |
| 1416 | u8 ibuf[] = { 0 }; | 1449 | struct dw2102_state *state = d->priv; |
| 1450 | |||
| 1451 | mutex_lock(&d->data_mutex); | ||
| 1417 | 1452 | ||
| 1418 | if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) | 1453 | state->data[0] = 0x51; |
| 1454 | |||
| 1455 | if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) | ||
| 1419 | err("command 0x51 transfer failed."); | 1456 | err("command 0x51 transfer failed."); |
| 1420 | 1457 | ||
| 1421 | d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config, | 1458 | mutex_unlock(&d->data_mutex); |
| 1422 | &d->dev->i2c_adap); | ||
| 1423 | 1459 | ||
| 1424 | if (d->fe_adap[0].fe == NULL) | 1460 | adap->fe_adap[0].fe = dvb_attach(m88rs2000_attach, |
| 1461 | &s421_m88rs2000_config, | ||
| 1462 | &d->i2c_adap); | ||
| 1463 | |||
| 1464 | if (adap->fe_adap[0].fe == NULL) | ||
| 1425 | return -EIO; | 1465 | return -EIO; |
| 1426 | 1466 | ||
| 1427 | if (dvb_attach(ts2020_attach, d->fe_adap[0].fe, | 1467 | if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe, |
| 1428 | &dw2104_ts2020_config, | 1468 | &dw2104_ts2020_config, |
| 1429 | &d->dev->i2c_adap)) { | 1469 | &d->i2c_adap)) { |
| 1430 | info("Attached RS2000/TS2020!"); | 1470 | info("Attached RS2000/TS2020!"); |
| 1431 | return 0; | 1471 | return 0; |
| 1432 | } | 1472 | } |
| @@ -1439,44 +1479,50 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap) | |||
| 1439 | { | 1479 | { |
| 1440 | struct dvb_usb_device *d = adap->dev; | 1480 | struct dvb_usb_device *d = adap->dev; |
| 1441 | struct dw2102_state *state = d->priv; | 1481 | struct dw2102_state *state = d->priv; |
| 1442 | u8 obuf[3] = { 0xe, 0x80, 0 }; | ||
| 1443 | u8 ibuf[] = { 0 }; | ||
| 1444 | struct i2c_adapter *i2c_adapter; | 1482 | struct i2c_adapter *i2c_adapter; |
| 1445 | struct i2c_client *client; | 1483 | struct i2c_client *client; |
| 1446 | struct i2c_board_info board_info; | 1484 | struct i2c_board_info board_info; |
| 1447 | struct m88ds3103_platform_data m88ds3103_pdata = {}; | 1485 | struct m88ds3103_platform_data m88ds3103_pdata = {}; |
| 1448 | struct ts2020_config ts2020_config = {}; | 1486 | struct ts2020_config ts2020_config = {}; |
| 1449 | 1487 | ||
| 1450 | if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) | 1488 | mutex_lock(&d->data_mutex); |
| 1489 | |||
| 1490 | state->data[0] = 0xe; | ||
| 1491 | state->data[1] = 0x80; | ||
| 1492 | state->data[2] = 0x0; | ||
| 1493 | |||
| 1494 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) | ||
| 1451 | err("command 0x0e transfer failed."); | 1495 | err("command 0x0e transfer failed."); |
| 1452 | 1496 | ||
| 1453 | obuf[0] = 0xe; | 1497 | state->data[0] = 0xe; |
| 1454 | obuf[1] = 0x02; | 1498 | state->data[1] = 0x02; |
| 1455 | obuf[2] = 1; | 1499 | state->data[2] = 1; |
| 1456 | 1500 | ||
| 1457 | if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) | 1501 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
| 1458 | err("command 0x0e transfer failed."); | 1502 | err("command 0x0e transfer failed."); |
| 1459 | msleep(300); | 1503 | msleep(300); |
| 1460 | 1504 | ||
| 1461 | obuf[0] = 0xe; | 1505 | state->data[0] = 0xe; |
| 1462 | obuf[1] = 0x83; | 1506 | state->data[1] = 0x83; |
| 1463 | obuf[2] = 0; | 1507 | state->data[2] = 0; |
| 1464 | 1508 | ||
| 1465 | if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) | 1509 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
| 1466 | err("command 0x0e transfer failed."); | 1510 | err("command 0x0e transfer failed."); |
| 1467 | 1511 | ||
| 1468 | obuf[0] = 0xe; | 1512 | state->data[0] = 0xe; |
| 1469 | obuf[1] = 0x83; | 1513 | state->data[1] = 0x83; |
| 1470 | obuf[2] = 1; | 1514 | state->data[2] = 1; |
| 1471 | 1515 | ||
| 1472 | if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) | 1516 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
| 1473 | err("command 0x0e transfer failed."); | 1517 | err("command 0x0e transfer failed."); |
| 1474 | 1518 | ||
| 1475 | obuf[0] = 0x51; | 1519 | state->data[0] = 0x51; |
| 1476 | 1520 | ||
| 1477 | if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 1, 0) < 0) | 1521 | if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) |
| 1478 | err("command 0x51 transfer failed."); | 1522 | err("command 0x51 transfer failed."); |
| 1479 | 1523 | ||
| 1524 | mutex_unlock(&d->data_mutex); | ||
| 1525 | |||
| 1480 | /* attach demod */ | 1526 | /* attach demod */ |
| 1481 | m88ds3103_pdata.clk = 27000000; | 1527 | m88ds3103_pdata.clk = 27000000; |
| 1482 | m88ds3103_pdata.i2c_wr_max = 33; | 1528 | m88ds3103_pdata.i2c_wr_max = 33; |
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index 6fb773dbcd0c..93be82fc338a 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c | |||
| @@ -219,15 +219,20 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, | |||
| 219 | int write, unsigned long *paddr, int *pageshift) | 219 | int write, unsigned long *paddr, int *pageshift) |
| 220 | { | 220 | { |
| 221 | pgd_t *pgdp; | 221 | pgd_t *pgdp; |
| 222 | pmd_t *pmdp; | 222 | p4d_t *p4dp; |
| 223 | pud_t *pudp; | 223 | pud_t *pudp; |
| 224 | pmd_t *pmdp; | ||
| 224 | pte_t pte; | 225 | pte_t pte; |
| 225 | 226 | ||
| 226 | pgdp = pgd_offset(vma->vm_mm, vaddr); | 227 | pgdp = pgd_offset(vma->vm_mm, vaddr); |
| 227 | if (unlikely(pgd_none(*pgdp))) | 228 | if (unlikely(pgd_none(*pgdp))) |
| 228 | goto err; | 229 | goto err; |
| 229 | 230 | ||
| 230 | pudp = pud_offset(pgdp, vaddr); | 231 | p4dp = p4d_offset(pgdp, vaddr); |
| 232 | if (unlikely(p4d_none(*p4dp))) | ||
| 233 | goto err; | ||
| 234 | |||
| 235 | pudp = pud_offset(p4dp, vaddr); | ||
| 231 | if (unlikely(pud_none(*pudp))) | 236 | if (unlikely(pud_none(*pudp))) |
| 232 | goto err; | 237 | goto err; |
| 233 | 238 | ||
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 1ae872bfc3ba..747645c74134 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c | |||
| @@ -186,7 +186,7 @@ static inline int write_enable(struct spi_nor *nor) | |||
| 186 | } | 186 | } |
| 187 | 187 | ||
| 188 | /* | 188 | /* |
| 189 | * Send write disble instruction to the chip. | 189 | * Send write disable instruction to the chip. |
| 190 | */ | 190 | */ |
| 191 | static inline int write_disable(struct spi_nor *nor) | 191 | static inline int write_disable(struct spi_nor *nor) |
| 192 | { | 192 | { |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index 6d31f92ef2b6..84ac50f92c9c 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h | |||
| @@ -1162,8 +1162,8 @@ struct ob_mac_tso_iocb_rsp { | |||
| 1162 | struct ib_mac_iocb_rsp { | 1162 | struct ib_mac_iocb_rsp { |
| 1163 | u8 opcode; /* 0x20 */ | 1163 | u8 opcode; /* 0x20 */ |
| 1164 | u8 flags1; | 1164 | u8 flags1; |
| 1165 | #define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */ | 1165 | #define IB_MAC_IOCB_RSP_OI 0x01 /* Override intr delay */ |
| 1166 | #define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */ | 1166 | #define IB_MAC_IOCB_RSP_I 0x02 /* Disable Intr Generation */ |
| 1167 | #define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */ | 1167 | #define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */ |
| 1168 | #define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */ | 1168 | #define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */ |
| 1169 | #define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */ | 1169 | #define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */ |
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c index 993b650ef275..44f774c12fb2 100644 --- a/drivers/pci/dwc/pci-exynos.c +++ b/drivers/pci/dwc/pci-exynos.c | |||
| @@ -132,10 +132,6 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev, | |||
| 132 | struct device *dev = pci->dev; | 132 | struct device *dev = pci->dev; |
| 133 | struct resource *res; | 133 | struct resource *res; |
| 134 | 134 | ||
| 135 | /* If using the PHY framework, doesn't need to get other resource */ | ||
| 136 | if (ep->using_phy) | ||
| 137 | return 0; | ||
| 138 | |||
| 139 | ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL); | 135 | ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL); |
| 140 | if (!ep->mem_res) | 136 | if (!ep->mem_res) |
| 141 | return -ENOMEM; | 137 | return -ENOMEM; |
| @@ -145,6 +141,10 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev, | |||
| 145 | if (IS_ERR(ep->mem_res->elbi_base)) | 141 | if (IS_ERR(ep->mem_res->elbi_base)) |
| 146 | return PTR_ERR(ep->mem_res->elbi_base); | 142 | return PTR_ERR(ep->mem_res->elbi_base); |
| 147 | 143 | ||
| 144 | /* If using the PHY framework, doesn't need to get other resource */ | ||
| 145 | if (ep->using_phy) | ||
| 146 | return 0; | ||
| 147 | |||
| 148 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 148 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
| 149 | ep->mem_res->phy_base = devm_ioremap_resource(dev, res); | 149 | ep->mem_res->phy_base = devm_ioremap_resource(dev, res); |
| 150 | if (IS_ERR(ep->mem_res->phy_base)) | 150 | if (IS_ERR(ep->mem_res->phy_base)) |
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 973472c23d89..1dfa10cc566b 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
| @@ -478,7 +478,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link, | |||
| 478 | 478 | ||
| 479 | static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) | 479 | static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) |
| 480 | { | 480 | { |
| 481 | struct pci_dev *child, *parent = link->pdev; | 481 | struct pci_dev *child = link->downstream, *parent = link->pdev; |
| 482 | struct pci_bus *linkbus = parent->subordinate; | 482 | struct pci_bus *linkbus = parent->subordinate; |
| 483 | struct aspm_register_info upreg, dwreg; | 483 | struct aspm_register_info upreg, dwreg; |
| 484 | 484 | ||
| @@ -491,9 +491,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) | |||
| 491 | 491 | ||
| 492 | /* Get upstream/downstream components' register state */ | 492 | /* Get upstream/downstream components' register state */ |
| 493 | pcie_get_aspm_reg(parent, &upreg); | 493 | pcie_get_aspm_reg(parent, &upreg); |
| 494 | child = pci_function_0(linkbus); | ||
| 495 | pcie_get_aspm_reg(child, &dwreg); | 494 | pcie_get_aspm_reg(child, &dwreg); |
| 496 | link->downstream = child; | ||
| 497 | 495 | ||
| 498 | /* | 496 | /* |
| 499 | * If ASPM not supported, don't mess with the clocks and link, | 497 | * If ASPM not supported, don't mess with the clocks and link, |
| @@ -800,6 +798,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) | |||
| 800 | INIT_LIST_HEAD(&link->children); | 798 | INIT_LIST_HEAD(&link->children); |
| 801 | INIT_LIST_HEAD(&link->link); | 799 | INIT_LIST_HEAD(&link->link); |
| 802 | link->pdev = pdev; | 800 | link->pdev = pdev; |
| 801 | link->downstream = pci_function_0(pdev->subordinate); | ||
| 803 | 802 | ||
| 804 | /* | 803 | /* |
| 805 | * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe | 804 | * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index f754453fe754..673683660b5c 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -2174,6 +2174,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd); | |||
| 2174 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd); | 2174 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd); |
| 2175 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, | 2175 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, |
| 2176 | quirk_blacklist_vpd); | 2176 | quirk_blacklist_vpd); |
| 2177 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd); | ||
| 2177 | 2178 | ||
| 2178 | /* | 2179 | /* |
| 2179 | * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the | 2180 | * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the |
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index f8e9e1c2b2f6..c978be5eb9eb 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c | |||
| @@ -422,6 +422,20 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in | |||
| 422 | return 0; | 422 | return 0; |
| 423 | } | 423 | } |
| 424 | 424 | ||
| 425 | static int msm_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) | ||
| 426 | { | ||
| 427 | struct msm_pinctrl *pctrl = gpiochip_get_data(chip); | ||
| 428 | const struct msm_pingroup *g; | ||
| 429 | u32 val; | ||
| 430 | |||
| 431 | g = &pctrl->soc->groups[offset]; | ||
| 432 | |||
| 433 | val = readl(pctrl->regs + g->ctl_reg); | ||
| 434 | |||
| 435 | /* 0 = output, 1 = input */ | ||
| 436 | return val & BIT(g->oe_bit) ? 0 : 1; | ||
| 437 | } | ||
| 438 | |||
| 425 | static int msm_gpio_get(struct gpio_chip *chip, unsigned offset) | 439 | static int msm_gpio_get(struct gpio_chip *chip, unsigned offset) |
| 426 | { | 440 | { |
| 427 | const struct msm_pingroup *g; | 441 | const struct msm_pingroup *g; |
| @@ -510,6 +524,7 @@ static void msm_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) | |||
| 510 | static struct gpio_chip msm_gpio_template = { | 524 | static struct gpio_chip msm_gpio_template = { |
| 511 | .direction_input = msm_gpio_direction_input, | 525 | .direction_input = msm_gpio_direction_input, |
| 512 | .direction_output = msm_gpio_direction_output, | 526 | .direction_output = msm_gpio_direction_output, |
| 527 | .get_direction = msm_gpio_get_direction, | ||
| 513 | .get = msm_gpio_get, | 528 | .get = msm_gpio_get, |
| 514 | .set = msm_gpio_set, | 529 | .set = msm_gpio_set, |
| 515 | .request = gpiochip_generic_request, | 530 | .request = gpiochip_generic_request, |
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c index 77a0236ee781..83f8864fa76a 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c | |||
| @@ -390,22 +390,22 @@ static const struct pinctrl_pin_desc uniphier_ld11_pins[] = { | |||
| 390 | UNIPHIER_PINCTRL_PIN(140, "AO1D0", 140, | 390 | UNIPHIER_PINCTRL_PIN(140, "AO1D0", 140, |
| 391 | 140, UNIPHIER_PIN_DRV_1BIT, | 391 | 140, UNIPHIER_PIN_DRV_1BIT, |
| 392 | 140, UNIPHIER_PIN_PULL_DOWN), | 392 | 140, UNIPHIER_PIN_PULL_DOWN), |
| 393 | UNIPHIER_PINCTRL_PIN(141, "TCON0", 141, | 393 | UNIPHIER_PINCTRL_PIN(141, "AO1D1", 141, |
| 394 | 141, UNIPHIER_PIN_DRV_1BIT, | 394 | 141, UNIPHIER_PIN_DRV_1BIT, |
| 395 | 141, UNIPHIER_PIN_PULL_DOWN), | 395 | 141, UNIPHIER_PIN_PULL_DOWN), |
| 396 | UNIPHIER_PINCTRL_PIN(142, "TCON1", 142, | 396 | UNIPHIER_PINCTRL_PIN(142, "AO1D2", 142, |
| 397 | 142, UNIPHIER_PIN_DRV_1BIT, | 397 | 142, UNIPHIER_PIN_DRV_1BIT, |
| 398 | 142, UNIPHIER_PIN_PULL_DOWN), | 398 | 142, UNIPHIER_PIN_PULL_DOWN), |
| 399 | UNIPHIER_PINCTRL_PIN(143, "TCON2", 143, | 399 | UNIPHIER_PINCTRL_PIN(143, "XIRQ9", 143, |
| 400 | 143, UNIPHIER_PIN_DRV_1BIT, | 400 | 143, UNIPHIER_PIN_DRV_1BIT, |
| 401 | 143, UNIPHIER_PIN_PULL_DOWN), | 401 | 143, UNIPHIER_PIN_PULL_DOWN), |
| 402 | UNIPHIER_PINCTRL_PIN(144, "TCON3", 144, | 402 | UNIPHIER_PINCTRL_PIN(144, "XIRQ10", 144, |
| 403 | 144, UNIPHIER_PIN_DRV_1BIT, | 403 | 144, UNIPHIER_PIN_DRV_1BIT, |
| 404 | 144, UNIPHIER_PIN_PULL_DOWN), | 404 | 144, UNIPHIER_PIN_PULL_DOWN), |
| 405 | UNIPHIER_PINCTRL_PIN(145, "TCON4", 145, | 405 | UNIPHIER_PINCTRL_PIN(145, "XIRQ11", 145, |
| 406 | 145, UNIPHIER_PIN_DRV_1BIT, | 406 | 145, UNIPHIER_PIN_DRV_1BIT, |
| 407 | 145, UNIPHIER_PIN_PULL_DOWN), | 407 | 145, UNIPHIER_PIN_PULL_DOWN), |
| 408 | UNIPHIER_PINCTRL_PIN(146, "TCON5", 146, | 408 | UNIPHIER_PINCTRL_PIN(146, "XIRQ13", 146, |
| 409 | 146, UNIPHIER_PIN_DRV_1BIT, | 409 | 146, UNIPHIER_PIN_DRV_1BIT, |
| 410 | 146, UNIPHIER_PIN_PULL_DOWN), | 410 | 146, UNIPHIER_PIN_PULL_DOWN), |
| 411 | UNIPHIER_PINCTRL_PIN(147, "PWMA", 147, | 411 | UNIPHIER_PINCTRL_PIN(147, "PWMA", 147, |
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index 109e2c99e6c1..95d8f25cbcca 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c | |||
| @@ -6278,7 +6278,7 @@ ahd_reset(struct ahd_softc *ahd, int reinit) | |||
| 6278 | * does not disable its parity logic prior to | 6278 | * does not disable its parity logic prior to |
| 6279 | * the start of the reset. This may cause a | 6279 | * the start of the reset. This may cause a |
| 6280 | * parity error to be detected and thus a | 6280 | * parity error to be detected and thus a |
| 6281 | * spurious SERR or PERR assertion. Disble | 6281 | * spurious SERR or PERR assertion. Disable |
| 6282 | * PERR and SERR responses during the CHIPRST. | 6282 | * PERR and SERR responses during the CHIPRST. |
| 6283 | */ | 6283 | */ |
| 6284 | mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN); | 6284 | mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN); |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index c7839f6c35cc..d277e8620e3e 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -3075,23 +3075,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie) | |||
| 3075 | put_device(&sdkp->dev); | 3075 | put_device(&sdkp->dev); |
| 3076 | } | 3076 | } |
| 3077 | 3077 | ||
| 3078 | struct sd_devt { | ||
| 3079 | int idx; | ||
| 3080 | struct disk_devt disk_devt; | ||
| 3081 | }; | ||
| 3082 | |||
| 3083 | static void sd_devt_release(struct disk_devt *disk_devt) | ||
| 3084 | { | ||
| 3085 | struct sd_devt *sd_devt = container_of(disk_devt, struct sd_devt, | ||
| 3086 | disk_devt); | ||
| 3087 | |||
| 3088 | spin_lock(&sd_index_lock); | ||
| 3089 | ida_remove(&sd_index_ida, sd_devt->idx); | ||
| 3090 | spin_unlock(&sd_index_lock); | ||
| 3091 | |||
| 3092 | kfree(sd_devt); | ||
| 3093 | } | ||
| 3094 | |||
| 3095 | /** | 3078 | /** |
| 3096 | * sd_probe - called during driver initialization and whenever a | 3079 | * sd_probe - called during driver initialization and whenever a |
| 3097 | * new scsi device is attached to the system. It is called once | 3080 | * new scsi device is attached to the system. It is called once |
| @@ -3113,7 +3096,6 @@ static void sd_devt_release(struct disk_devt *disk_devt) | |||
| 3113 | static int sd_probe(struct device *dev) | 3096 | static int sd_probe(struct device *dev) |
| 3114 | { | 3097 | { |
| 3115 | struct scsi_device *sdp = to_scsi_device(dev); | 3098 | struct scsi_device *sdp = to_scsi_device(dev); |
| 3116 | struct sd_devt *sd_devt; | ||
| 3117 | struct scsi_disk *sdkp; | 3099 | struct scsi_disk *sdkp; |
| 3118 | struct gendisk *gd; | 3100 | struct gendisk *gd; |
| 3119 | int index; | 3101 | int index; |
| @@ -3139,13 +3121,9 @@ static int sd_probe(struct device *dev) | |||
| 3139 | if (!sdkp) | 3121 | if (!sdkp) |
| 3140 | goto out; | 3122 | goto out; |
| 3141 | 3123 | ||
| 3142 | sd_devt = kzalloc(sizeof(*sd_devt), GFP_KERNEL); | ||
| 3143 | if (!sd_devt) | ||
| 3144 | goto out_free; | ||
| 3145 | |||
| 3146 | gd = alloc_disk(SD_MINORS); | 3124 | gd = alloc_disk(SD_MINORS); |
| 3147 | if (!gd) | 3125 | if (!gd) |
| 3148 | goto out_free_devt; | 3126 | goto out_free; |
| 3149 | 3127 | ||
| 3150 | do { | 3128 | do { |
| 3151 | if (!ida_pre_get(&sd_index_ida, GFP_KERNEL)) | 3129 | if (!ida_pre_get(&sd_index_ida, GFP_KERNEL)) |
| @@ -3161,11 +3139,6 @@ static int sd_probe(struct device *dev) | |||
| 3161 | goto out_put; | 3139 | goto out_put; |
| 3162 | } | 3140 | } |
| 3163 | 3141 | ||
| 3164 | atomic_set(&sd_devt->disk_devt.count, 1); | ||
| 3165 | sd_devt->disk_devt.release = sd_devt_release; | ||
| 3166 | sd_devt->idx = index; | ||
| 3167 | gd->disk_devt = &sd_devt->disk_devt; | ||
| 3168 | |||
| 3169 | error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); | 3142 | error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); |
| 3170 | if (error) { | 3143 | if (error) { |
| 3171 | sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); | 3144 | sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); |
| @@ -3205,12 +3178,11 @@ static int sd_probe(struct device *dev) | |||
| 3205 | return 0; | 3178 | return 0; |
| 3206 | 3179 | ||
| 3207 | out_free_index: | 3180 | out_free_index: |
| 3208 | put_disk_devt(&sd_devt->disk_devt); | 3181 | spin_lock(&sd_index_lock); |
| 3209 | sd_devt = NULL; | 3182 | ida_remove(&sd_index_ida, index); |
| 3183 | spin_unlock(&sd_index_lock); | ||
| 3210 | out_put: | 3184 | out_put: |
| 3211 | put_disk(gd); | 3185 | put_disk(gd); |
| 3212 | out_free_devt: | ||
| 3213 | kfree(sd_devt); | ||
| 3214 | out_free: | 3186 | out_free: |
| 3215 | kfree(sdkp); | 3187 | kfree(sdkp); |
| 3216 | out: | 3188 | out: |
| @@ -3271,7 +3243,10 @@ static void scsi_disk_release(struct device *dev) | |||
| 3271 | struct scsi_disk *sdkp = to_scsi_disk(dev); | 3243 | struct scsi_disk *sdkp = to_scsi_disk(dev); |
| 3272 | struct gendisk *disk = sdkp->disk; | 3244 | struct gendisk *disk = sdkp->disk; |
| 3273 | 3245 | ||
| 3274 | put_disk_devt(disk->disk_devt); | 3246 | spin_lock(&sd_index_lock); |
| 3247 | ida_remove(&sd_index_ida, sdkp->index); | ||
| 3248 | spin_unlock(&sd_index_lock); | ||
| 3249 | |||
| 3275 | disk->private_data = NULL; | 3250 | disk->private_data = NULL; |
| 3276 | put_disk(disk); | 3251 | put_disk(disk); |
| 3277 | put_device(&sdkp->device->sdev_gendev); | 3252 | put_device(&sdkp->device->sdev_gendev); |
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index 7f8cf875157c..65a285631994 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c | |||
| @@ -336,7 +336,6 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget) | |||
| 336 | if (likely((port < TOTAL_NUMBER_OF_PORTS) && | 336 | if (likely((port < TOTAL_NUMBER_OF_PORTS) && |
| 337 | cvm_oct_device[port])) { | 337 | cvm_oct_device[port])) { |
| 338 | struct net_device *dev = cvm_oct_device[port]; | 338 | struct net_device *dev = cvm_oct_device[port]; |
| 339 | struct octeon_ethernet *priv = netdev_priv(dev); | ||
| 340 | 339 | ||
| 341 | /* | 340 | /* |
| 342 | * Only accept packets for devices that are | 341 | * Only accept packets for devices that are |
diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig index e61e4ca064a8..74094fff4367 100644 --- a/drivers/staging/vc04_services/Kconfig +++ b/drivers/staging/vc04_services/Kconfig | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | config BCM2835_VCHIQ | 1 | config BCM2835_VCHIQ |
| 2 | tristate "Videocore VCHIQ" | 2 | tristate "Videocore VCHIQ" |
| 3 | depends on HAS_DMA | 3 | depends on HAS_DMA |
| 4 | depends on OF | ||
| 4 | depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE) | 5 | depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE) |
| 5 | default y | 6 | default y |
| 6 | help | 7 | help |
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c index 1bacbc3b19a0..e94aea8c0d05 100644 --- a/drivers/tty/n_hdlc.c +++ b/drivers/tty/n_hdlc.c | |||
| @@ -114,7 +114,7 @@ | |||
| 114 | #define DEFAULT_TX_BUF_COUNT 3 | 114 | #define DEFAULT_TX_BUF_COUNT 3 |
| 115 | 115 | ||
| 116 | struct n_hdlc_buf { | 116 | struct n_hdlc_buf { |
| 117 | struct n_hdlc_buf *link; | 117 | struct list_head list_item; |
| 118 | int count; | 118 | int count; |
| 119 | char buf[1]; | 119 | char buf[1]; |
| 120 | }; | 120 | }; |
| @@ -122,8 +122,7 @@ struct n_hdlc_buf { | |||
| 122 | #define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe) | 122 | #define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe) |
| 123 | 123 | ||
| 124 | struct n_hdlc_buf_list { | 124 | struct n_hdlc_buf_list { |
| 125 | struct n_hdlc_buf *head; | 125 | struct list_head list; |
| 126 | struct n_hdlc_buf *tail; | ||
| 127 | int count; | 126 | int count; |
| 128 | spinlock_t spinlock; | 127 | spinlock_t spinlock; |
| 129 | }; | 128 | }; |
| @@ -136,7 +135,6 @@ struct n_hdlc_buf_list { | |||
| 136 | * @backup_tty - TTY to use if tty gets closed | 135 | * @backup_tty - TTY to use if tty gets closed |
| 137 | * @tbusy - reentrancy flag for tx wakeup code | 136 | * @tbusy - reentrancy flag for tx wakeup code |
| 138 | * @woke_up - FIXME: describe this field | 137 | * @woke_up - FIXME: describe this field |
| 139 | * @tbuf - currently transmitting tx buffer | ||
| 140 | * @tx_buf_list - list of pending transmit frame buffers | 138 | * @tx_buf_list - list of pending transmit frame buffers |
| 141 | * @rx_buf_list - list of received frame buffers | 139 | * @rx_buf_list - list of received frame buffers |
| 142 | * @tx_free_buf_list - list unused transmit frame buffers | 140 | * @tx_free_buf_list - list unused transmit frame buffers |
| @@ -149,7 +147,6 @@ struct n_hdlc { | |||
| 149 | struct tty_struct *backup_tty; | 147 | struct tty_struct *backup_tty; |
| 150 | int tbusy; | 148 | int tbusy; |
| 151 | int woke_up; | 149 | int woke_up; |
| 152 | struct n_hdlc_buf *tbuf; | ||
| 153 | struct n_hdlc_buf_list tx_buf_list; | 150 | struct n_hdlc_buf_list tx_buf_list; |
| 154 | struct n_hdlc_buf_list rx_buf_list; | 151 | struct n_hdlc_buf_list rx_buf_list; |
| 155 | struct n_hdlc_buf_list tx_free_buf_list; | 152 | struct n_hdlc_buf_list tx_free_buf_list; |
| @@ -159,6 +156,8 @@ struct n_hdlc { | |||
| 159 | /* | 156 | /* |
| 160 | * HDLC buffer list manipulation functions | 157 | * HDLC buffer list manipulation functions |
| 161 | */ | 158 | */ |
| 159 | static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list, | ||
| 160 | struct n_hdlc_buf *buf); | ||
| 162 | static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, | 161 | static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, |
| 163 | struct n_hdlc_buf *buf); | 162 | struct n_hdlc_buf *buf); |
| 164 | static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list); | 163 | static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list); |
| @@ -208,16 +207,9 @@ static void flush_tx_queue(struct tty_struct *tty) | |||
| 208 | { | 207 | { |
| 209 | struct n_hdlc *n_hdlc = tty2n_hdlc(tty); | 208 | struct n_hdlc *n_hdlc = tty2n_hdlc(tty); |
| 210 | struct n_hdlc_buf *buf; | 209 | struct n_hdlc_buf *buf; |
| 211 | unsigned long flags; | ||
| 212 | 210 | ||
| 213 | while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list))) | 211 | while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list))) |
| 214 | n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf); | 212 | n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf); |
| 215 | spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags); | ||
| 216 | if (n_hdlc->tbuf) { | ||
| 217 | n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf); | ||
| 218 | n_hdlc->tbuf = NULL; | ||
| 219 | } | ||
| 220 | spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); | ||
| 221 | } | 213 | } |
| 222 | 214 | ||
| 223 | static struct tty_ldisc_ops n_hdlc_ldisc = { | 215 | static struct tty_ldisc_ops n_hdlc_ldisc = { |
| @@ -283,7 +275,6 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc) | |||
| 283 | } else | 275 | } else |
| 284 | break; | 276 | break; |
| 285 | } | 277 | } |
| 286 | kfree(n_hdlc->tbuf); | ||
| 287 | kfree(n_hdlc); | 278 | kfree(n_hdlc); |
| 288 | 279 | ||
| 289 | } /* end of n_hdlc_release() */ | 280 | } /* end of n_hdlc_release() */ |
| @@ -402,13 +393,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) | |||
| 402 | n_hdlc->woke_up = 0; | 393 | n_hdlc->woke_up = 0; |
| 403 | spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); | 394 | spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); |
| 404 | 395 | ||
| 405 | /* get current transmit buffer or get new transmit */ | 396 | tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); |
| 406 | /* buffer from list of pending transmit buffers */ | ||
| 407 | |||
| 408 | tbuf = n_hdlc->tbuf; | ||
| 409 | if (!tbuf) | ||
| 410 | tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); | ||
| 411 | |||
| 412 | while (tbuf) { | 397 | while (tbuf) { |
| 413 | if (debuglevel >= DEBUG_LEVEL_INFO) | 398 | if (debuglevel >= DEBUG_LEVEL_INFO) |
| 414 | printk("%s(%d)sending frame %p, count=%d\n", | 399 | printk("%s(%d)sending frame %p, count=%d\n", |
| @@ -420,7 +405,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) | |||
| 420 | 405 | ||
| 421 | /* rollback was possible and has been done */ | 406 | /* rollback was possible and has been done */ |
| 422 | if (actual == -ERESTARTSYS) { | 407 | if (actual == -ERESTARTSYS) { |
| 423 | n_hdlc->tbuf = tbuf; | 408 | n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf); |
| 424 | break; | 409 | break; |
| 425 | } | 410 | } |
| 426 | /* if transmit error, throw frame away by */ | 411 | /* if transmit error, throw frame away by */ |
| @@ -435,10 +420,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) | |||
| 435 | 420 | ||
| 436 | /* free current transmit buffer */ | 421 | /* free current transmit buffer */ |
| 437 | n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf); | 422 | n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf); |
| 438 | 423 | ||
| 439 | /* this tx buffer is done */ | ||
| 440 | n_hdlc->tbuf = NULL; | ||
| 441 | |||
| 442 | /* wait up sleeping writers */ | 424 | /* wait up sleeping writers */ |
| 443 | wake_up_interruptible(&tty->write_wait); | 425 | wake_up_interruptible(&tty->write_wait); |
| 444 | 426 | ||
| @@ -448,10 +430,12 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) | |||
| 448 | if (debuglevel >= DEBUG_LEVEL_INFO) | 430 | if (debuglevel >= DEBUG_LEVEL_INFO) |
| 449 | printk("%s(%d)frame %p pending\n", | 431 | printk("%s(%d)frame %p pending\n", |
| 450 | __FILE__,__LINE__,tbuf); | 432 | __FILE__,__LINE__,tbuf); |
| 451 | 433 | ||
| 452 | /* buffer not accepted by driver */ | 434 | /* |
| 453 | /* set this buffer as pending buffer */ | 435 | * the buffer was not accepted by driver, |
| 454 | n_hdlc->tbuf = tbuf; | 436 | * return it back into tx queue |
| 437 | */ | ||
| 438 | n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf); | ||
| 455 | break; | 439 | break; |
| 456 | } | 440 | } |
| 457 | } | 441 | } |
| @@ -749,7 +733,8 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, | |||
| 749 | int error = 0; | 733 | int error = 0; |
| 750 | int count; | 734 | int count; |
| 751 | unsigned long flags; | 735 | unsigned long flags; |
| 752 | 736 | struct n_hdlc_buf *buf = NULL; | |
| 737 | |||
| 753 | if (debuglevel >= DEBUG_LEVEL_INFO) | 738 | if (debuglevel >= DEBUG_LEVEL_INFO) |
| 754 | printk("%s(%d)n_hdlc_tty_ioctl() called %d\n", | 739 | printk("%s(%d)n_hdlc_tty_ioctl() called %d\n", |
| 755 | __FILE__,__LINE__,cmd); | 740 | __FILE__,__LINE__,cmd); |
| @@ -763,8 +748,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, | |||
| 763 | /* report count of read data available */ | 748 | /* report count of read data available */ |
| 764 | /* in next available frame (if any) */ | 749 | /* in next available frame (if any) */ |
| 765 | spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags); | 750 | spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags); |
| 766 | if (n_hdlc->rx_buf_list.head) | 751 | buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list, |
| 767 | count = n_hdlc->rx_buf_list.head->count; | 752 | struct n_hdlc_buf, list_item); |
| 753 | if (buf) | ||
| 754 | count = buf->count; | ||
| 768 | else | 755 | else |
| 769 | count = 0; | 756 | count = 0; |
| 770 | spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags); | 757 | spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags); |
| @@ -776,8 +763,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, | |||
| 776 | count = tty_chars_in_buffer(tty); | 763 | count = tty_chars_in_buffer(tty); |
| 777 | /* add size of next output frame in queue */ | 764 | /* add size of next output frame in queue */ |
| 778 | spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags); | 765 | spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags); |
| 779 | if (n_hdlc->tx_buf_list.head) | 766 | buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list, |
| 780 | count += n_hdlc->tx_buf_list.head->count; | 767 | struct n_hdlc_buf, list_item); |
| 768 | if (buf) | ||
| 769 | count += buf->count; | ||
| 781 | spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags); | 770 | spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags); |
| 782 | error = put_user(count, (int __user *)arg); | 771 | error = put_user(count, (int __user *)arg); |
| 783 | break; | 772 | break; |
| @@ -825,14 +814,14 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp, | |||
| 825 | poll_wait(filp, &tty->write_wait, wait); | 814 | poll_wait(filp, &tty->write_wait, wait); |
| 826 | 815 | ||
| 827 | /* set bits for operations that won't block */ | 816 | /* set bits for operations that won't block */ |
| 828 | if (n_hdlc->rx_buf_list.head) | 817 | if (!list_empty(&n_hdlc->rx_buf_list.list)) |
| 829 | mask |= POLLIN | POLLRDNORM; /* readable */ | 818 | mask |= POLLIN | POLLRDNORM; /* readable */ |
| 830 | if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) | 819 | if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) |
| 831 | mask |= POLLHUP; | 820 | mask |= POLLHUP; |
| 832 | if (tty_hung_up_p(filp)) | 821 | if (tty_hung_up_p(filp)) |
| 833 | mask |= POLLHUP; | 822 | mask |= POLLHUP; |
| 834 | if (!tty_is_writelocked(tty) && | 823 | if (!tty_is_writelocked(tty) && |
| 835 | n_hdlc->tx_free_buf_list.head) | 824 | !list_empty(&n_hdlc->tx_free_buf_list.list)) |
| 836 | mask |= POLLOUT | POLLWRNORM; /* writable */ | 825 | mask |= POLLOUT | POLLWRNORM; /* writable */ |
| 837 | } | 826 | } |
| 838 | return mask; | 827 | return mask; |
| @@ -856,7 +845,12 @@ static struct n_hdlc *n_hdlc_alloc(void) | |||
| 856 | spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock); | 845 | spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock); |
| 857 | spin_lock_init(&n_hdlc->rx_buf_list.spinlock); | 846 | spin_lock_init(&n_hdlc->rx_buf_list.spinlock); |
| 858 | spin_lock_init(&n_hdlc->tx_buf_list.spinlock); | 847 | spin_lock_init(&n_hdlc->tx_buf_list.spinlock); |
| 859 | 848 | ||
| 849 | INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list); | ||
| 850 | INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list); | ||
| 851 | INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list); | ||
| 852 | INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list); | ||
| 853 | |||
| 860 | /* allocate free rx buffer list */ | 854 | /* allocate free rx buffer list */ |
| 861 | for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) { | 855 | for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) { |
| 862 | buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL); | 856 | buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL); |
| @@ -884,53 +878,65 @@ static struct n_hdlc *n_hdlc_alloc(void) | |||
| 884 | } /* end of n_hdlc_alloc() */ | 878 | } /* end of n_hdlc_alloc() */ |
| 885 | 879 | ||
| 886 | /** | 880 | /** |
| 881 | * n_hdlc_buf_return - put the HDLC buffer after the head of the specified list | ||
| 882 | * @buf_list - pointer to the buffer list | ||
| 883 | * @buf - pointer to the buffer | ||
| 884 | */ | ||
| 885 | static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list, | ||
| 886 | struct n_hdlc_buf *buf) | ||
| 887 | { | ||
| 888 | unsigned long flags; | ||
| 889 | |||
| 890 | spin_lock_irqsave(&buf_list->spinlock, flags); | ||
| 891 | |||
| 892 | list_add(&buf->list_item, &buf_list->list); | ||
| 893 | buf_list->count++; | ||
| 894 | |||
| 895 | spin_unlock_irqrestore(&buf_list->spinlock, flags); | ||
| 896 | } | ||
| 897 | |||
| 898 | /** | ||
| 887 | * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list | 899 | * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list |
| 888 | * @list - pointer to buffer list | 900 | * @buf_list - pointer to buffer list |
| 889 | * @buf - pointer to buffer | 901 | * @buf - pointer to buffer |
| 890 | */ | 902 | */ |
| 891 | static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, | 903 | static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list, |
| 892 | struct n_hdlc_buf *buf) | 904 | struct n_hdlc_buf *buf) |
| 893 | { | 905 | { |
| 894 | unsigned long flags; | 906 | unsigned long flags; |
| 895 | spin_lock_irqsave(&list->spinlock,flags); | 907 | |
| 896 | 908 | spin_lock_irqsave(&buf_list->spinlock, flags); | |
| 897 | buf->link=NULL; | 909 | |
| 898 | if (list->tail) | 910 | list_add_tail(&buf->list_item, &buf_list->list); |
| 899 | list->tail->link = buf; | 911 | buf_list->count++; |
| 900 | else | 912 | |
| 901 | list->head = buf; | 913 | spin_unlock_irqrestore(&buf_list->spinlock, flags); |
| 902 | list->tail = buf; | ||
| 903 | (list->count)++; | ||
| 904 | |||
| 905 | spin_unlock_irqrestore(&list->spinlock,flags); | ||
| 906 | |||
| 907 | } /* end of n_hdlc_buf_put() */ | 914 | } /* end of n_hdlc_buf_put() */ |
| 908 | 915 | ||
| 909 | /** | 916 | /** |
| 910 | * n_hdlc_buf_get - remove and return an HDLC buffer from list | 917 | * n_hdlc_buf_get - remove and return an HDLC buffer from list |
| 911 | * @list - pointer to HDLC buffer list | 918 | * @buf_list - pointer to HDLC buffer list |
| 912 | * | 919 | * |
| 913 | * Remove and return an HDLC buffer from the head of the specified HDLC buffer | 920 | * Remove and return an HDLC buffer from the head of the specified HDLC buffer |
| 914 | * list. | 921 | * list. |
| 915 | * Returns a pointer to HDLC buffer if available, otherwise %NULL. | 922 | * Returns a pointer to HDLC buffer if available, otherwise %NULL. |
| 916 | */ | 923 | */ |
| 917 | static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list) | 924 | static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list) |
| 918 | { | 925 | { |
| 919 | unsigned long flags; | 926 | unsigned long flags; |
| 920 | struct n_hdlc_buf *buf; | 927 | struct n_hdlc_buf *buf; |
| 921 | spin_lock_irqsave(&list->spinlock,flags); | 928 | |
| 922 | 929 | spin_lock_irqsave(&buf_list->spinlock, flags); | |
| 923 | buf = list->head; | 930 | |
| 931 | buf = list_first_entry_or_null(&buf_list->list, | ||
| 932 | struct n_hdlc_buf, list_item); | ||
| 924 | if (buf) { | 933 | if (buf) { |
| 925 | list->head = buf->link; | 934 | list_del(&buf->list_item); |
| 926 | (list->count)--; | 935 | buf_list->count--; |
| 927 | } | 936 | } |
| 928 | if (!list->head) | 937 | |
| 929 | list->tail = NULL; | 938 | spin_unlock_irqrestore(&buf_list->spinlock, flags); |
| 930 | |||
| 931 | spin_unlock_irqrestore(&list->spinlock,flags); | ||
| 932 | return buf; | 939 | return buf; |
| 933 | |||
| 934 | } /* end of n_hdlc_buf_get() */ | 940 | } /* end of n_hdlc_buf_get() */ |
| 935 | 941 | ||
| 936 | static char hdlc_banner[] __initdata = | 942 | static char hdlc_banner[] __initdata = |
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index b4f86c219db1..7a17aedbf902 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c | |||
| @@ -1031,8 +1031,10 @@ static int s3c64xx_serial_startup(struct uart_port *port) | |||
| 1031 | if (ourport->dma) { | 1031 | if (ourport->dma) { |
| 1032 | ret = s3c24xx_serial_request_dma(ourport); | 1032 | ret = s3c24xx_serial_request_dma(ourport); |
| 1033 | if (ret < 0) { | 1033 | if (ret < 0) { |
| 1034 | dev_warn(port->dev, "DMA request failed\n"); | 1034 | dev_warn(port->dev, |
| 1035 | return ret; | 1035 | "DMA request failed, DMA will not be used\n"); |
| 1036 | devm_kfree(port->dev, ourport->dma); | ||
| 1037 | ourport->dma = NULL; | ||
| 1036 | } | 1038 | } |
| 1037 | } | 1039 | } |
| 1038 | 1040 | ||
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index 2092e46b1380..f8d0747810e7 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c | |||
| @@ -250,6 +250,7 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, | |||
| 250 | val = dwc3_omap_read_utmi_ctrl(omap); | 250 | val = dwc3_omap_read_utmi_ctrl(omap); |
| 251 | val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG; | 251 | val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG; |
| 252 | dwc3_omap_write_utmi_ctrl(omap, val); | 252 | dwc3_omap_write_utmi_ctrl(omap, val); |
| 253 | break; | ||
| 253 | 254 | ||
| 254 | case OMAP_DWC3_VBUS_OFF: | 255 | case OMAP_DWC3_VBUS_OFF: |
| 255 | val = dwc3_omap_read_utmi_ctrl(omap); | 256 | val = dwc3_omap_read_utmi_ctrl(omap); |
| @@ -392,7 +393,7 @@ static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap) | |||
| 392 | { | 393 | { |
| 393 | u32 reg; | 394 | u32 reg; |
| 394 | struct device_node *node = omap->dev->of_node; | 395 | struct device_node *node = omap->dev->of_node; |
| 395 | int utmi_mode = 0; | 396 | u32 utmi_mode = 0; |
| 396 | 397 | ||
| 397 | reg = dwc3_omap_read_utmi_ctrl(omap); | 398 | reg = dwc3_omap_read_utmi_ctrl(omap); |
| 398 | 399 | ||
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 4db97ecae885..0d75158e43fe 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
| @@ -1342,6 +1342,68 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, | |||
| 1342 | if (r == req) { | 1342 | if (r == req) { |
| 1343 | /* wait until it is processed */ | 1343 | /* wait until it is processed */ |
| 1344 | dwc3_stop_active_transfer(dwc, dep->number, true); | 1344 | dwc3_stop_active_transfer(dwc, dep->number, true); |
| 1345 | |||
| 1346 | /* | ||
| 1347 | * If request was already started, this means we had to | ||
| 1348 | * stop the transfer. With that we also need to ignore | ||
| 1349 | * all TRBs used by the request, however TRBs can only | ||
| 1350 | * be modified after completion of END_TRANSFER | ||
| 1351 | * command. So what we do here is that we wait for | ||
| 1352 | * END_TRANSFER completion and only after that, we jump | ||
| 1353 | * over TRBs by clearing HWO and incrementing dequeue | ||
| 1354 | * pointer. | ||
| 1355 | * | ||
| 1356 | * Note that we have 2 possible types of transfers here: | ||
| 1357 | * | ||
| 1358 | * i) Linear buffer request | ||
| 1359 | * ii) SG-list based request | ||
| 1360 | * | ||
| 1361 | * SG-list based requests will have r->num_pending_sgs | ||
| 1362 | * set to a valid number (> 0). Linear requests, | ||
| 1363 | * normally use a single TRB. | ||
| 1364 | * | ||
| 1365 | * For each of these two cases, if r->unaligned flag is | ||
| 1366 | * set, one extra TRB has been used to align transfer | ||
| 1367 | * size to wMaxPacketSize. | ||
| 1368 | * | ||
| 1369 | * All of these cases need to be taken into | ||
| 1370 | * consideration so we don't mess up our TRB ring | ||
| 1371 | * pointers. | ||
| 1372 | */ | ||
| 1373 | wait_event_lock_irq(dep->wait_end_transfer, | ||
| 1374 | !(dep->flags & DWC3_EP_END_TRANSFER_PENDING), | ||
| 1375 | dwc->lock); | ||
| 1376 | |||
| 1377 | if (!r->trb) | ||
| 1378 | goto out1; | ||
| 1379 | |||
| 1380 | if (r->num_pending_sgs) { | ||
| 1381 | struct dwc3_trb *trb; | ||
| 1382 | int i = 0; | ||
| 1383 | |||
| 1384 | for (i = 0; i < r->num_pending_sgs; i++) { | ||
| 1385 | trb = r->trb + i; | ||
| 1386 | trb->ctrl &= ~DWC3_TRB_CTRL_HWO; | ||
| 1387 | dwc3_ep_inc_deq(dep); | ||
| 1388 | } | ||
| 1389 | |||
| 1390 | if (r->unaligned) { | ||
| 1391 | trb = r->trb + r->num_pending_sgs + 1; | ||
| 1392 | trb->ctrl &= ~DWC3_TRB_CTRL_HWO; | ||
| 1393 | dwc3_ep_inc_deq(dep); | ||
| 1394 | } | ||
| 1395 | } else { | ||
| 1396 | struct dwc3_trb *trb = r->trb; | ||
| 1397 | |||
| 1398 | trb->ctrl &= ~DWC3_TRB_CTRL_HWO; | ||
| 1399 | dwc3_ep_inc_deq(dep); | ||
| 1400 | |||
| 1401 | if (r->unaligned) { | ||
| 1402 | trb = r->trb + 1; | ||
| 1403 | trb->ctrl &= ~DWC3_TRB_CTRL_HWO; | ||
| 1404 | dwc3_ep_inc_deq(dep); | ||
| 1405 | } | ||
| 1406 | } | ||
| 1345 | goto out1; | 1407 | goto out1; |
| 1346 | } | 1408 | } |
| 1347 | dev_err(dwc->dev, "request %p was not queued to %s\n", | 1409 | dev_err(dwc->dev, "request %p was not queued to %s\n", |
| @@ -1352,6 +1414,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, | |||
| 1352 | 1414 | ||
| 1353 | out1: | 1415 | out1: |
| 1354 | /* giveback the request */ | 1416 | /* giveback the request */ |
| 1417 | dep->queued_requests--; | ||
| 1355 | dwc3_gadget_giveback(dep, req, -ECONNRESET); | 1418 | dwc3_gadget_giveback(dep, req, -ECONNRESET); |
| 1356 | 1419 | ||
| 1357 | out0: | 1420 | out0: |
| @@ -2126,12 +2189,12 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, | |||
| 2126 | return 1; | 2189 | return 1; |
| 2127 | } | 2190 | } |
| 2128 | 2191 | ||
| 2129 | if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) | ||
| 2130 | return 1; | ||
| 2131 | |||
| 2132 | count = trb->size & DWC3_TRB_SIZE_MASK; | 2192 | count = trb->size & DWC3_TRB_SIZE_MASK; |
| 2133 | req->remaining += count; | 2193 | req->remaining += count; |
| 2134 | 2194 | ||
| 2195 | if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) | ||
| 2196 | return 1; | ||
| 2197 | |||
| 2135 | if (dep->direction) { | 2198 | if (dep->direction) { |
| 2136 | if (count) { | 2199 | if (count) { |
| 2137 | trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); | 2200 | trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); |
| @@ -3228,15 +3291,10 @@ void dwc3_gadget_exit(struct dwc3 *dwc) | |||
| 3228 | 3291 | ||
| 3229 | int dwc3_gadget_suspend(struct dwc3 *dwc) | 3292 | int dwc3_gadget_suspend(struct dwc3 *dwc) |
| 3230 | { | 3293 | { |
| 3231 | int ret; | ||
| 3232 | |||
| 3233 | if (!dwc->gadget_driver) | 3294 | if (!dwc->gadget_driver) |
| 3234 | return 0; | 3295 | return 0; |
| 3235 | 3296 | ||
| 3236 | ret = dwc3_gadget_run_stop(dwc, false, false); | 3297 | dwc3_gadget_run_stop(dwc, false, false); |
| 3237 | if (ret < 0) | ||
| 3238 | return ret; | ||
| 3239 | |||
| 3240 | dwc3_disconnect_gadget(dwc); | 3298 | dwc3_disconnect_gadget(dwc); |
| 3241 | __dwc3_gadget_stop(dwc); | 3299 | __dwc3_gadget_stop(dwc); |
| 3242 | 3300 | ||
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h index 3129bcf74d7d..265e223ab645 100644 --- a/drivers/usb/dwc3/gadget.h +++ b/drivers/usb/dwc3/gadget.h | |||
| @@ -28,23 +28,23 @@ struct dwc3; | |||
| 28 | #define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget)) | 28 | #define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget)) |
| 29 | 29 | ||
| 30 | /* DEPCFG parameter 1 */ | 30 | /* DEPCFG parameter 1 */ |
| 31 | #define DWC3_DEPCFG_INT_NUM(n) ((n) << 0) | 31 | #define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0) |
| 32 | #define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8) | 32 | #define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8) |
| 33 | #define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9) | 33 | #define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9) |
| 34 | #define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10) | 34 | #define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10) |
| 35 | #define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11) | 35 | #define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11) |
| 36 | #define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13) | 36 | #define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13) |
| 37 | #define DWC3_DEPCFG_BINTERVAL_M1(n) ((n) << 16) | 37 | #define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16) |
| 38 | #define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24) | 38 | #define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24) |
| 39 | #define DWC3_DEPCFG_EP_NUMBER(n) ((n) << 25) | 39 | #define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25) |
| 40 | #define DWC3_DEPCFG_BULK_BASED (1 << 30) | 40 | #define DWC3_DEPCFG_BULK_BASED (1 << 30) |
| 41 | #define DWC3_DEPCFG_FIFO_BASED (1 << 31) | 41 | #define DWC3_DEPCFG_FIFO_BASED (1 << 31) |
| 42 | 42 | ||
| 43 | /* DEPCFG parameter 0 */ | 43 | /* DEPCFG parameter 0 */ |
| 44 | #define DWC3_DEPCFG_EP_TYPE(n) ((n) << 1) | 44 | #define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1) |
| 45 | #define DWC3_DEPCFG_MAX_PACKET_SIZE(n) ((n) << 3) | 45 | #define DWC3_DEPCFG_MAX_PACKET_SIZE(n) (((n) & 0x7ff) << 3) |
| 46 | #define DWC3_DEPCFG_FIFO_NUMBER(n) ((n) << 17) | 46 | #define DWC3_DEPCFG_FIFO_NUMBER(n) (((n) & 0x1f) << 17) |
| 47 | #define DWC3_DEPCFG_BURST_SIZE(n) ((n) << 22) | 47 | #define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22) |
| 48 | #define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26) | 48 | #define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26) |
| 49 | /* This applies for core versions earlier than 1.94a */ | 49 | /* This applies for core versions earlier than 1.94a */ |
| 50 | #define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31) | 50 | #define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31) |
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 78c44979dde3..cbff3b02840d 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c | |||
| @@ -269,6 +269,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item, | |||
| 269 | ret = unregister_gadget(gi); | 269 | ret = unregister_gadget(gi); |
| 270 | if (ret) | 270 | if (ret) |
| 271 | goto err; | 271 | goto err; |
| 272 | kfree(name); | ||
| 272 | } else { | 273 | } else { |
| 273 | if (gi->composite.gadget_driver.udc_name) { | 274 | if (gi->composite.gadget_driver.udc_name) { |
| 274 | ret = -EBUSY; | 275 | ret = -EBUSY; |
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index a5b7cd615698..a0085571824d 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
| @@ -1834,11 +1834,14 @@ static int ffs_func_eps_enable(struct ffs_function *func) | |||
| 1834 | spin_lock_irqsave(&func->ffs->eps_lock, flags); | 1834 | spin_lock_irqsave(&func->ffs->eps_lock, flags); |
| 1835 | while(count--) { | 1835 | while(count--) { |
| 1836 | struct usb_endpoint_descriptor *ds; | 1836 | struct usb_endpoint_descriptor *ds; |
| 1837 | struct usb_ss_ep_comp_descriptor *comp_desc = NULL; | ||
| 1838 | int needs_comp_desc = false; | ||
| 1837 | int desc_idx; | 1839 | int desc_idx; |
| 1838 | 1840 | ||
| 1839 | if (ffs->gadget->speed == USB_SPEED_SUPER) | 1841 | if (ffs->gadget->speed == USB_SPEED_SUPER) { |
| 1840 | desc_idx = 2; | 1842 | desc_idx = 2; |
| 1841 | else if (ffs->gadget->speed == USB_SPEED_HIGH) | 1843 | needs_comp_desc = true; |
| 1844 | } else if (ffs->gadget->speed == USB_SPEED_HIGH) | ||
| 1842 | desc_idx = 1; | 1845 | desc_idx = 1; |
| 1843 | else | 1846 | else |
| 1844 | desc_idx = 0; | 1847 | desc_idx = 0; |
| @@ -1855,6 +1858,14 @@ static int ffs_func_eps_enable(struct ffs_function *func) | |||
| 1855 | 1858 | ||
| 1856 | ep->ep->driver_data = ep; | 1859 | ep->ep->driver_data = ep; |
| 1857 | ep->ep->desc = ds; | 1860 | ep->ep->desc = ds; |
| 1861 | |||
| 1862 | comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + | ||
| 1863 | USB_DT_ENDPOINT_SIZE); | ||
| 1864 | ep->ep->maxburst = comp_desc->bMaxBurst + 1; | ||
| 1865 | |||
| 1866 | if (needs_comp_desc) | ||
| 1867 | ep->ep->comp_desc = comp_desc; | ||
| 1868 | |||
| 1858 | ret = usb_ep_enable(ep->ep); | 1869 | ret = usb_ep_enable(ep->ep); |
| 1859 | if (likely(!ret)) { | 1870 | if (likely(!ret)) { |
| 1860 | epfile->ep = ep; | 1871 | epfile->ep = ep; |
| @@ -2253,7 +2264,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, | |||
| 2253 | 2264 | ||
| 2254 | if (len < sizeof(*d) || | 2265 | if (len < sizeof(*d) || |
| 2255 | d->bFirstInterfaceNumber >= ffs->interfaces_count || | 2266 | d->bFirstInterfaceNumber >= ffs->interfaces_count || |
| 2256 | d->Reserved1) | 2267 | !d->Reserved1) |
| 2257 | return -EINVAL; | 2268 | return -EINVAL; |
| 2258 | for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) | 2269 | for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) |
| 2259 | if (d->Reserved2[i]) | 2270 | if (d->Reserved2[i]) |
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index 27ed51b5082f..29b41b5dee04 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c | |||
| @@ -258,13 +258,6 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) | |||
| 258 | memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req)); | 258 | memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req)); |
| 259 | v4l2_event_queue(&uvc->vdev, &v4l2_event); | 259 | v4l2_event_queue(&uvc->vdev, &v4l2_event); |
| 260 | 260 | ||
| 261 | /* Pass additional setup data to userspace */ | ||
| 262 | if (uvc->event_setup_out && uvc->event_length) { | ||
| 263 | uvc->control_req->length = uvc->event_length; | ||
| 264 | return usb_ep_queue(uvc->func.config->cdev->gadget->ep0, | ||
| 265 | uvc->control_req, GFP_ATOMIC); | ||
| 266 | } | ||
| 267 | |||
| 268 | return 0; | 261 | return 0; |
| 269 | } | 262 | } |
| 270 | 263 | ||
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index a2615d64d07c..a2c916869293 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c | |||
| @@ -84,8 +84,7 @@ static int ep_open(struct inode *, struct file *); | |||
| 84 | 84 | ||
| 85 | /* /dev/gadget/$CHIP represents ep0 and the whole device */ | 85 | /* /dev/gadget/$CHIP represents ep0 and the whole device */ |
| 86 | enum ep0_state { | 86 | enum ep0_state { |
| 87 | /* DISBLED is the initial state. | 87 | /* DISABLED is the initial state. */ |
| 88 | */ | ||
| 89 | STATE_DEV_DISABLED = 0, | 88 | STATE_DEV_DISABLED = 0, |
| 90 | 89 | ||
| 91 | /* Only one open() of /dev/gadget/$CHIP; only one file tracks | 90 | /* Only one open() of /dev/gadget/$CHIP; only one file tracks |
| @@ -1782,8 +1781,10 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
| 1782 | 1781 | ||
| 1783 | spin_lock_irq (&dev->lock); | 1782 | spin_lock_irq (&dev->lock); |
| 1784 | value = -EINVAL; | 1783 | value = -EINVAL; |
| 1785 | if (dev->buf) | 1784 | if (dev->buf) { |
| 1785 | kfree(kbuf); | ||
| 1786 | goto fail; | 1786 | goto fail; |
| 1787 | } | ||
| 1787 | dev->buf = kbuf; | 1788 | dev->buf = kbuf; |
| 1788 | 1789 | ||
| 1789 | /* full or low speed config */ | 1790 | /* full or low speed config */ |
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 11bbce28bc23..2035906b8ced 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c | |||
| @@ -610,7 +610,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) | |||
| 610 | { | 610 | { |
| 611 | struct usba_ep *ep = to_usba_ep(_ep); | 611 | struct usba_ep *ep = to_usba_ep(_ep); |
| 612 | struct usba_udc *udc = ep->udc; | 612 | struct usba_udc *udc = ep->udc; |
| 613 | unsigned long flags, ept_cfg, maxpacket; | 613 | unsigned long flags, maxpacket; |
| 614 | unsigned int nr_trans; | 614 | unsigned int nr_trans; |
| 615 | 615 | ||
| 616 | DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc); | 616 | DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc); |
| @@ -630,7 +630,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) | |||
| 630 | ep->is_in = 0; | 630 | ep->is_in = 0; |
| 631 | 631 | ||
| 632 | DBG(DBG_ERR, "%s: EPT_CFG = 0x%lx (maxpacket = %lu)\n", | 632 | DBG(DBG_ERR, "%s: EPT_CFG = 0x%lx (maxpacket = %lu)\n", |
| 633 | ep->ep.name, ept_cfg, maxpacket); | 633 | ep->ep.name, ep->ept_cfg, maxpacket); |
| 634 | 634 | ||
| 635 | if (usb_endpoint_dir_in(desc)) { | 635 | if (usb_endpoint_dir_in(desc)) { |
| 636 | ep->is_in = 1; | 636 | ep->is_in = 1; |
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index c60abe3a68f9..8cabc5944d5f 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c | |||
| @@ -1031,6 +1031,8 @@ static int dummy_udc_probe(struct platform_device *pdev) | |||
| 1031 | int rc; | 1031 | int rc; |
| 1032 | 1032 | ||
| 1033 | dum = *((void **)dev_get_platdata(&pdev->dev)); | 1033 | dum = *((void **)dev_get_platdata(&pdev->dev)); |
| 1034 | /* Clear usb_gadget region for new registration to udc-core */ | ||
| 1035 | memzero_explicit(&dum->gadget, sizeof(struct usb_gadget)); | ||
| 1034 | dum->gadget.name = gadget_name; | 1036 | dum->gadget.name = gadget_name; |
| 1035 | dum->gadget.ops = &dummy_ops; | 1037 | dum->gadget.ops = &dummy_ops; |
| 1036 | dum->gadget.max_speed = USB_SPEED_SUPER; | 1038 | dum->gadget.max_speed = USB_SPEED_SUPER; |
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 85504419ab31..3828c2ec8623 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c | |||
| @@ -1146,15 +1146,15 @@ static int scan_dma_completions(struct net2280_ep *ep) | |||
| 1146 | */ | 1146 | */ |
| 1147 | while (!list_empty(&ep->queue)) { | 1147 | while (!list_empty(&ep->queue)) { |
| 1148 | struct net2280_request *req; | 1148 | struct net2280_request *req; |
| 1149 | u32 tmp; | 1149 | u32 req_dma_count; |
| 1150 | 1150 | ||
| 1151 | req = list_entry(ep->queue.next, | 1151 | req = list_entry(ep->queue.next, |
| 1152 | struct net2280_request, queue); | 1152 | struct net2280_request, queue); |
| 1153 | if (!req->valid) | 1153 | if (!req->valid) |
| 1154 | break; | 1154 | break; |
| 1155 | rmb(); | 1155 | rmb(); |
| 1156 | tmp = le32_to_cpup(&req->td->dmacount); | 1156 | req_dma_count = le32_to_cpup(&req->td->dmacount); |
| 1157 | if ((tmp & BIT(VALID_BIT)) != 0) | 1157 | if ((req_dma_count & BIT(VALID_BIT)) != 0) |
| 1158 | break; | 1158 | break; |
| 1159 | 1159 | ||
| 1160 | /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short" | 1160 | /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short" |
| @@ -1163,40 +1163,41 @@ static int scan_dma_completions(struct net2280_ep *ep) | |||
| 1163 | */ | 1163 | */ |
| 1164 | if (unlikely(req->td->dmadesc == 0)) { | 1164 | if (unlikely(req->td->dmadesc == 0)) { |
| 1165 | /* paranoia */ | 1165 | /* paranoia */ |
| 1166 | tmp = readl(&ep->dma->dmacount); | 1166 | u32 const ep_dmacount = readl(&ep->dma->dmacount); |
| 1167 | if (tmp & DMA_BYTE_COUNT_MASK) | 1167 | |
| 1168 | if (ep_dmacount & DMA_BYTE_COUNT_MASK) | ||
| 1168 | break; | 1169 | break; |
| 1169 | /* single transfer mode */ | 1170 | /* single transfer mode */ |
| 1170 | dma_done(ep, req, tmp, 0); | 1171 | dma_done(ep, req, req_dma_count, 0); |
| 1171 | num_completed++; | 1172 | num_completed++; |
| 1172 | break; | 1173 | break; |
| 1173 | } else if (!ep->is_in && | 1174 | } else if (!ep->is_in && |
| 1174 | (req->req.length % ep->ep.maxpacket) && | 1175 | (req->req.length % ep->ep.maxpacket) && |
| 1175 | !(ep->dev->quirks & PLX_PCIE)) { | 1176 | !(ep->dev->quirks & PLX_PCIE)) { |
| 1176 | 1177 | ||
| 1177 | tmp = readl(&ep->regs->ep_stat); | 1178 | u32 const ep_stat = readl(&ep->regs->ep_stat); |
| 1178 | /* AVOID TROUBLE HERE by not issuing short reads from | 1179 | /* AVOID TROUBLE HERE by not issuing short reads from |
| 1179 | * your gadget driver. That helps avoids errata 0121, | 1180 | * your gadget driver. That helps avoids errata 0121, |
| 1180 | * 0122, and 0124; not all cases trigger the warning. | 1181 | * 0122, and 0124; not all cases trigger the warning. |
| 1181 | */ | 1182 | */ |
| 1182 | if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) { | 1183 | if ((ep_stat & BIT(NAK_OUT_PACKETS)) == 0) { |
| 1183 | ep_warn(ep->dev, "%s lost packet sync!\n", | 1184 | ep_warn(ep->dev, "%s lost packet sync!\n", |
| 1184 | ep->ep.name); | 1185 | ep->ep.name); |
| 1185 | req->req.status = -EOVERFLOW; | 1186 | req->req.status = -EOVERFLOW; |
| 1186 | } else { | 1187 | } else { |
| 1187 | tmp = readl(&ep->regs->ep_avail); | 1188 | u32 const ep_avail = readl(&ep->regs->ep_avail); |
| 1188 | if (tmp) { | 1189 | if (ep_avail) { |
| 1189 | /* fifo gets flushed later */ | 1190 | /* fifo gets flushed later */ |
| 1190 | ep->out_overflow = 1; | 1191 | ep->out_overflow = 1; |
| 1191 | ep_dbg(ep->dev, | 1192 | ep_dbg(ep->dev, |
| 1192 | "%s dma, discard %d len %d\n", | 1193 | "%s dma, discard %d len %d\n", |
| 1193 | ep->ep.name, tmp, | 1194 | ep->ep.name, ep_avail, |
| 1194 | req->req.length); | 1195 | req->req.length); |
| 1195 | req->req.status = -EOVERFLOW; | 1196 | req->req.status = -EOVERFLOW; |
| 1196 | } | 1197 | } |
| 1197 | } | 1198 | } |
| 1198 | } | 1199 | } |
| 1199 | dma_done(ep, req, tmp, 0); | 1200 | dma_done(ep, req, req_dma_count, 0); |
| 1200 | num_completed++; | 1201 | num_completed++; |
| 1201 | } | 1202 | } |
| 1202 | 1203 | ||
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c index e1335ad5bce9..832c4fdbe985 100644 --- a/drivers/usb/gadget/udc/pxa27x_udc.c +++ b/drivers/usb/gadget/udc/pxa27x_udc.c | |||
| @@ -2534,9 +2534,10 @@ static int pxa_udc_remove(struct platform_device *_dev) | |||
| 2534 | usb_del_gadget_udc(&udc->gadget); | 2534 | usb_del_gadget_udc(&udc->gadget); |
| 2535 | pxa_cleanup_debugfs(udc); | 2535 | pxa_cleanup_debugfs(udc); |
| 2536 | 2536 | ||
| 2537 | if (!IS_ERR_OR_NULL(udc->transceiver)) | 2537 | if (!IS_ERR_OR_NULL(udc->transceiver)) { |
| 2538 | usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy); | 2538 | usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy); |
| 2539 | usb_put_phy(udc->transceiver); | 2539 | usb_put_phy(udc->transceiver); |
| 2540 | } | ||
| 2540 | 2541 | ||
| 2541 | udc->transceiver = NULL; | 2542 | udc->transceiver = NULL; |
| 2542 | the_controller = NULL; | 2543 | the_controller = NULL; |
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 414e3c376dbb..5302f988e7e6 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c | |||
| @@ -350,7 +350,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
| 350 | 350 | ||
| 351 | case USB_PORT_FEAT_SUSPEND: | 351 | case USB_PORT_FEAT_SUSPEND: |
| 352 | dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n"); | 352 | dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n"); |
| 353 | if (valid_port(wIndex)) { | 353 | if (valid_port(wIndex) && ohci_at91->sfr_regmap) { |
| 354 | ohci_at91_port_suspend(ohci_at91->sfr_regmap, | 354 | ohci_at91_port_suspend(ohci_at91->sfr_regmap, |
| 355 | 1); | 355 | 1); |
| 356 | return 0; | 356 | return 0; |
| @@ -393,7 +393,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
| 393 | 393 | ||
| 394 | case USB_PORT_FEAT_SUSPEND: | 394 | case USB_PORT_FEAT_SUSPEND: |
| 395 | dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n"); | 395 | dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n"); |
| 396 | if (valid_port(wIndex)) { | 396 | if (valid_port(wIndex) && ohci_at91->sfr_regmap) { |
| 397 | ohci_at91_port_suspend(ohci_at91->sfr_regmap, | 397 | ohci_at91_port_suspend(ohci_at91->sfr_regmap, |
| 398 | 0); | 398 | 0); |
| 399 | return 0; | 399 | return 0; |
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 363d125300ea..2b4a00fa735d 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c | |||
| @@ -109,7 +109,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci) | |||
| 109 | xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK); | 109 | xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK); |
| 110 | 110 | ||
| 111 | /* xhci 1.1 controllers have the HCCPARAMS2 register */ | 111 | /* xhci 1.1 controllers have the HCCPARAMS2 register */ |
| 112 | if (hci_version > 100) { | 112 | if (hci_version > 0x100) { |
| 113 | temp = readl(&xhci->cap_regs->hcc_params2); | 113 | temp = readl(&xhci->cap_regs->hcc_params2); |
| 114 | xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp); | 114 | xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp); |
| 115 | xhci_dbg(xhci, " HC %s Force save context capability", | 115 | xhci_dbg(xhci, " HC %s Force save context capability", |
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index 9066ec9e0c2e..67d5dc79b6b5 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c | |||
| @@ -382,7 +382,6 @@ static int usb_wakeup_of_property_parse(struct xhci_hcd_mtk *mtk, | |||
| 382 | 382 | ||
| 383 | static int xhci_mtk_setup(struct usb_hcd *hcd); | 383 | static int xhci_mtk_setup(struct usb_hcd *hcd); |
| 384 | static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = { | 384 | static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = { |
| 385 | .extra_priv_size = sizeof(struct xhci_hcd), | ||
| 386 | .reset = xhci_mtk_setup, | 385 | .reset = xhci_mtk_setup, |
| 387 | }; | 386 | }; |
| 388 | 387 | ||
| @@ -678,13 +677,13 @@ static int xhci_mtk_probe(struct platform_device *pdev) | |||
| 678 | goto power_off_phys; | 677 | goto power_off_phys; |
| 679 | } | 678 | } |
| 680 | 679 | ||
| 681 | if (HCC_MAX_PSA(xhci->hcc_params) >= 4) | ||
| 682 | xhci->shared_hcd->can_do_streams = 1; | ||
| 683 | |||
| 684 | ret = usb_add_hcd(hcd, irq, IRQF_SHARED); | 680 | ret = usb_add_hcd(hcd, irq, IRQF_SHARED); |
| 685 | if (ret) | 681 | if (ret) |
| 686 | goto put_usb3_hcd; | 682 | goto put_usb3_hcd; |
| 687 | 683 | ||
| 684 | if (HCC_MAX_PSA(xhci->hcc_params) >= 4) | ||
| 685 | xhci->shared_hcd->can_do_streams = 1; | ||
| 686 | |||
| 688 | ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); | 687 | ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); |
| 689 | if (ret) | 688 | if (ret) |
| 690 | goto dealloc_usb2_hcd; | 689 | goto dealloc_usb2_hcd; |
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 6d33b42ffcf5..bd02a6cd8e2c 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c | |||
| @@ -286,6 +286,8 @@ static int xhci_plat_remove(struct platform_device *dev) | |||
| 286 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 286 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 287 | struct clk *clk = xhci->clk; | 287 | struct clk *clk = xhci->clk; |
| 288 | 288 | ||
| 289 | xhci->xhc_state |= XHCI_STATE_REMOVING; | ||
| 290 | |||
| 289 | usb_remove_hcd(xhci->shared_hcd); | 291 | usb_remove_hcd(xhci->shared_hcd); |
| 290 | usb_phy_shutdown(hcd->usb_phy); | 292 | usb_phy_shutdown(hcd->usb_phy); |
| 291 | 293 | ||
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index a59fafb4b329..74436f8ca538 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c | |||
| @@ -1308,7 +1308,6 @@ static int tegra_xhci_setup(struct usb_hcd *hcd) | |||
| 1308 | } | 1308 | } |
| 1309 | 1309 | ||
| 1310 | static const struct xhci_driver_overrides tegra_xhci_overrides __initconst = { | 1310 | static const struct xhci_driver_overrides tegra_xhci_overrides __initconst = { |
| 1311 | .extra_priv_size = sizeof(struct xhci_hcd), | ||
| 1312 | .reset = tegra_xhci_setup, | 1311 | .reset = tegra_xhci_setup, |
| 1313 | }; | 1312 | }; |
| 1314 | 1313 | ||
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 6d6c46000e56..50aee8b7718b 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
| @@ -868,7 +868,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) | |||
| 868 | 868 | ||
| 869 | spin_lock_irqsave(&xhci->lock, flags); | 869 | spin_lock_irqsave(&xhci->lock, flags); |
| 870 | 870 | ||
| 871 | /* disble usb3 ports Wake bits*/ | 871 | /* disable usb3 ports Wake bits */ |
| 872 | port_index = xhci->num_usb3_ports; | 872 | port_index = xhci->num_usb3_ports; |
| 873 | port_array = xhci->usb3_ports; | 873 | port_array = xhci->usb3_ports; |
| 874 | while (port_index--) { | 874 | while (port_index--) { |
| @@ -879,7 +879,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) | |||
| 879 | writel(t2, port_array[port_index]); | 879 | writel(t2, port_array[port_index]); |
| 880 | } | 880 | } |
| 881 | 881 | ||
| 882 | /* disble usb2 ports Wake bits*/ | 882 | /* disable usb2 ports Wake bits */ |
| 883 | port_index = xhci->num_usb2_ports; | 883 | port_index = xhci->num_usb2_ports; |
| 884 | port_array = xhci->usb2_ports; | 884 | port_array = xhci->usb2_ports; |
| 885 | while (port_index--) { | 885 | while (port_index--) { |
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 095778ff984d..37c63cb39714 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c | |||
| @@ -781,12 +781,6 @@ static int iowarrior_probe(struct usb_interface *interface, | |||
| 781 | iface_desc = interface->cur_altsetting; | 781 | iface_desc = interface->cur_altsetting; |
| 782 | dev->product_id = le16_to_cpu(udev->descriptor.idProduct); | 782 | dev->product_id = le16_to_cpu(udev->descriptor.idProduct); |
| 783 | 783 | ||
| 784 | if (iface_desc->desc.bNumEndpoints < 1) { | ||
| 785 | dev_err(&interface->dev, "Invalid number of endpoints\n"); | ||
| 786 | retval = -EINVAL; | ||
| 787 | goto error; | ||
| 788 | } | ||
| 789 | |||
| 790 | /* set up the endpoint information */ | 784 | /* set up the endpoint information */ |
| 791 | for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { | 785 | for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { |
| 792 | endpoint = &iface_desc->endpoint[i].desc; | 786 | endpoint = &iface_desc->endpoint[i].desc; |
| @@ -797,6 +791,21 @@ static int iowarrior_probe(struct usb_interface *interface, | |||
| 797 | /* this one will match for the IOWarrior56 only */ | 791 | /* this one will match for the IOWarrior56 only */ |
| 798 | dev->int_out_endpoint = endpoint; | 792 | dev->int_out_endpoint = endpoint; |
| 799 | } | 793 | } |
| 794 | |||
| 795 | if (!dev->int_in_endpoint) { | ||
| 796 | dev_err(&interface->dev, "no interrupt-in endpoint found\n"); | ||
| 797 | retval = -ENODEV; | ||
| 798 | goto error; | ||
| 799 | } | ||
| 800 | |||
| 801 | if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) { | ||
| 802 | if (!dev->int_out_endpoint) { | ||
| 803 | dev_err(&interface->dev, "no interrupt-out endpoint found\n"); | ||
| 804 | retval = -ENODEV; | ||
| 805 | goto error; | ||
| 806 | } | ||
| 807 | } | ||
| 808 | |||
| 800 | /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ | 809 | /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ |
| 801 | dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); | 810 | dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); |
| 802 | if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && | 811 | if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && |
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c index 4e18600dc9b4..91f66d68bcb7 100644 --- a/drivers/usb/misc/usb251xb.c +++ b/drivers/usb/misc/usb251xb.c | |||
| @@ -375,18 +375,24 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, | |||
| 375 | if (of_get_property(np, "dynamic-power-switching", NULL)) | 375 | if (of_get_property(np, "dynamic-power-switching", NULL)) |
| 376 | hub->conf_data2 |= BIT(7); | 376 | hub->conf_data2 |= BIT(7); |
| 377 | 377 | ||
| 378 | if (of_get_property(np, "oc-delay-100us", NULL)) { | 378 | if (!of_property_read_u32(np, "oc-delay-us", property_u32)) { |
| 379 | hub->conf_data2 &= ~BIT(5); | 379 | if (*property_u32 == 100) { |
| 380 | hub->conf_data2 &= ~BIT(4); | 380 | /* 100 us*/ |
| 381 | } else if (of_get_property(np, "oc-delay-4ms", NULL)) { | 381 | hub->conf_data2 &= ~BIT(5); |
| 382 | hub->conf_data2 &= ~BIT(5); | 382 | hub->conf_data2 &= ~BIT(4); |
| 383 | hub->conf_data2 |= BIT(4); | 383 | } else if (*property_u32 == 4000) { |
| 384 | } else if (of_get_property(np, "oc-delay-8ms", NULL)) { | 384 | /* 4 ms */ |
| 385 | hub->conf_data2 |= BIT(5); | 385 | hub->conf_data2 &= ~BIT(5); |
| 386 | hub->conf_data2 &= ~BIT(4); | 386 | hub->conf_data2 |= BIT(4); |
| 387 | } else if (of_get_property(np, "oc-delay-16ms", NULL)) { | 387 | } else if (*property_u32 == 16000) { |
| 388 | hub->conf_data2 |= BIT(5); | 388 | /* 16 ms */ |
| 389 | hub->conf_data2 |= BIT(4); | 389 | hub->conf_data2 |= BIT(5); |
| 390 | hub->conf_data2 |= BIT(4); | ||
| 391 | } else { | ||
| 392 | /* 8 ms (DEFAULT) */ | ||
| 393 | hub->conf_data2 |= BIT(5); | ||
| 394 | hub->conf_data2 &= ~BIT(4); | ||
| 395 | } | ||
| 390 | } | 396 | } |
| 391 | 397 | ||
| 392 | if (of_get_property(np, "compound-device", NULL)) | 398 | if (of_get_property(np, "compound-device", NULL)) |
| @@ -432,30 +438,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, | |||
| 432 | } | 438 | } |
| 433 | } | 439 | } |
| 434 | 440 | ||
| 435 | hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF; | ||
| 436 | if (!of_property_read_u32(np, "max-sp-power", property_u32)) | ||
| 437 | hub->max_power_sp = min_t(u8, be32_to_cpu(*property_u32) / 2, | ||
| 438 | 250); | ||
| 439 | |||
| 440 | hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS; | ||
| 441 | if (!of_property_read_u32(np, "max-bp-power", property_u32)) | ||
| 442 | hub->max_power_bp = min_t(u8, be32_to_cpu(*property_u32) / 2, | ||
| 443 | 250); | ||
| 444 | |||
| 445 | hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF; | ||
| 446 | if (!of_property_read_u32(np, "max-sp-current", property_u32)) | ||
| 447 | hub->max_current_sp = min_t(u8, be32_to_cpu(*property_u32) / 2, | ||
| 448 | 250); | ||
| 449 | |||
| 450 | hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS; | ||
| 451 | if (!of_property_read_u32(np, "max-bp-current", property_u32)) | ||
| 452 | hub->max_current_bp = min_t(u8, be32_to_cpu(*property_u32) / 2, | ||
| 453 | 250); | ||
| 454 | |||
| 455 | hub->power_on_time = USB251XB_DEF_POWER_ON_TIME; | 441 | hub->power_on_time = USB251XB_DEF_POWER_ON_TIME; |
| 456 | if (!of_property_read_u32(np, "power-on-time", property_u32)) | 442 | if (!of_property_read_u32(np, "power-on-time-ms", property_u32)) |
| 457 | hub->power_on_time = min_t(u8, be32_to_cpu(*property_u32) / 2, | 443 | hub->power_on_time = min_t(u8, *property_u32 / 2, 255); |
| 458 | 255); | ||
| 459 | 444 | ||
| 460 | if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1)) | 445 | if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1)) |
| 461 | hub->lang_id = USB251XB_DEF_LANGUAGE_ID; | 446 | hub->lang_id = USB251XB_DEF_LANGUAGE_ID; |
| @@ -492,6 +477,10 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, | |||
| 492 | /* The following parameters are currently not exposed to devicetree, but | 477 | /* The following parameters are currently not exposed to devicetree, but |
| 493 | * may be as soon as needed. | 478 | * may be as soon as needed. |
| 494 | */ | 479 | */ |
| 480 | hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF; | ||
| 481 | hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS; | ||
| 482 | hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF; | ||
| 483 | hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS; | ||
| 495 | hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE; | 484 | hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE; |
| 496 | hub->boost_up = USB251XB_DEF_BOOST_UP; | 485 | hub->boost_up = USB251XB_DEF_BOOST_UP; |
| 497 | hub->boost_x = USB251XB_DEF_BOOST_X; | 486 | hub->boost_x = USB251XB_DEF_BOOST_X; |
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c index db68156568e6..b3b33cf7ddf6 100644 --- a/drivers/usb/phy/phy-isp1301.c +++ b/drivers/usb/phy/phy-isp1301.c | |||
| @@ -33,6 +33,12 @@ static const struct i2c_device_id isp1301_id[] = { | |||
| 33 | }; | 33 | }; |
| 34 | MODULE_DEVICE_TABLE(i2c, isp1301_id); | 34 | MODULE_DEVICE_TABLE(i2c, isp1301_id); |
| 35 | 35 | ||
| 36 | static const struct of_device_id isp1301_of_match[] = { | ||
| 37 | {.compatible = "nxp,isp1301" }, | ||
| 38 | { }, | ||
| 39 | }; | ||
| 40 | MODULE_DEVICE_TABLE(of, isp1301_of_match); | ||
| 41 | |||
| 36 | static struct i2c_client *isp1301_i2c_client; | 42 | static struct i2c_client *isp1301_i2c_client; |
| 37 | 43 | ||
| 38 | static int __isp1301_write(struct isp1301 *isp, u8 reg, u8 value, u8 clear) | 44 | static int __isp1301_write(struct isp1301 *isp, u8 reg, u8 value, u8 clear) |
| @@ -130,6 +136,7 @@ static int isp1301_remove(struct i2c_client *client) | |||
| 130 | static struct i2c_driver isp1301_driver = { | 136 | static struct i2c_driver isp1301_driver = { |
| 131 | .driver = { | 137 | .driver = { |
| 132 | .name = DRV_NAME, | 138 | .name = DRV_NAME, |
| 139 | .of_match_table = of_match_ptr(isp1301_of_match), | ||
| 133 | }, | 140 | }, |
| 134 | .probe = isp1301_probe, | 141 | .probe = isp1301_probe, |
| 135 | .remove = isp1301_remove, | 142 | .remove = isp1301_remove, |
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index ab78111e0968..6537d3ca2797 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c | |||
| @@ -1500,7 +1500,7 @@ static int digi_read_oob_callback(struct urb *urb) | |||
| 1500 | return -1; | 1500 | return -1; |
| 1501 | 1501 | ||
| 1502 | /* handle each oob command */ | 1502 | /* handle each oob command */ |
| 1503 | for (i = 0; i < urb->actual_length - 4; i += 4) { | 1503 | for (i = 0; i < urb->actual_length - 3; i += 4) { |
| 1504 | opcode = buf[i]; | 1504 | opcode = buf[i]; |
| 1505 | line = buf[i + 1]; | 1505 | line = buf[i + 1]; |
| 1506 | status = buf[i + 2]; | 1506 | status = buf[i + 2]; |
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index ceaeebaa6f90..a76b95d32157 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c | |||
| @@ -1674,6 +1674,12 @@ static void edge_interrupt_callback(struct urb *urb) | |||
| 1674 | function = TIUMP_GET_FUNC_FROM_CODE(data[0]); | 1674 | function = TIUMP_GET_FUNC_FROM_CODE(data[0]); |
| 1675 | dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__, | 1675 | dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__, |
| 1676 | port_number, function, data[1]); | 1676 | port_number, function, data[1]); |
| 1677 | |||
| 1678 | if (port_number >= edge_serial->serial->num_ports) { | ||
| 1679 | dev_err(dev, "bad port number %d\n", port_number); | ||
| 1680 | goto exit; | ||
| 1681 | } | ||
| 1682 | |||
| 1677 | port = edge_serial->serial->port[port_number]; | 1683 | port = edge_serial->serial->port[port_number]; |
| 1678 | edge_port = usb_get_serial_port_data(port); | 1684 | edge_port = usb_get_serial_port_data(port); |
| 1679 | if (!edge_port) { | 1685 | if (!edge_port) { |
| @@ -1755,7 +1761,7 @@ static void edge_bulk_in_callback(struct urb *urb) | |||
| 1755 | 1761 | ||
| 1756 | port_number = edge_port->port->port_number; | 1762 | port_number = edge_port->port->port_number; |
| 1757 | 1763 | ||
| 1758 | if (edge_port->lsr_event) { | 1764 | if (urb->actual_length > 0 && edge_port->lsr_event) { |
| 1759 | edge_port->lsr_event = 0; | 1765 | edge_port->lsr_event = 0; |
| 1760 | dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n", | 1766 | dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n", |
| 1761 | __func__, port_number, edge_port->lsr_mask, *data); | 1767 | __func__, port_number, edge_port->lsr_mask, *data); |
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c index a180b17d2432..dd706953b466 100644 --- a/drivers/usb/serial/omninet.c +++ b/drivers/usb/serial/omninet.c | |||
| @@ -31,7 +31,6 @@ | |||
| 31 | #define BT_IGNITIONPRO_ID 0x2000 | 31 | #define BT_IGNITIONPRO_ID 0x2000 |
| 32 | 32 | ||
| 33 | /* function prototypes */ | 33 | /* function prototypes */ |
| 34 | static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port); | ||
| 35 | static void omninet_process_read_urb(struct urb *urb); | 34 | static void omninet_process_read_urb(struct urb *urb); |
| 36 | static void omninet_write_bulk_callback(struct urb *urb); | 35 | static void omninet_write_bulk_callback(struct urb *urb); |
| 37 | static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port, | 36 | static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port, |
| @@ -60,7 +59,6 @@ static struct usb_serial_driver zyxel_omninet_device = { | |||
| 60 | .attach = omninet_attach, | 59 | .attach = omninet_attach, |
| 61 | .port_probe = omninet_port_probe, | 60 | .port_probe = omninet_port_probe, |
| 62 | .port_remove = omninet_port_remove, | 61 | .port_remove = omninet_port_remove, |
| 63 | .open = omninet_open, | ||
| 64 | .write = omninet_write, | 62 | .write = omninet_write, |
| 65 | .write_room = omninet_write_room, | 63 | .write_room = omninet_write_room, |
| 66 | .write_bulk_callback = omninet_write_bulk_callback, | 64 | .write_bulk_callback = omninet_write_bulk_callback, |
| @@ -140,17 +138,6 @@ static int omninet_port_remove(struct usb_serial_port *port) | |||
| 140 | return 0; | 138 | return 0; |
| 141 | } | 139 | } |
| 142 | 140 | ||
| 143 | static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port) | ||
| 144 | { | ||
| 145 | struct usb_serial *serial = port->serial; | ||
| 146 | struct usb_serial_port *wport; | ||
| 147 | |||
| 148 | wport = serial->port[1]; | ||
| 149 | tty_port_tty_set(&wport->port, tty); | ||
| 150 | |||
| 151 | return usb_serial_generic_open(tty, port); | ||
| 152 | } | ||
| 153 | |||
| 154 | #define OMNINET_HEADERLEN 4 | 141 | #define OMNINET_HEADERLEN 4 |
| 155 | #define OMNINET_BULKOUTSIZE 64 | 142 | #define OMNINET_BULKOUTSIZE 64 |
| 156 | #define OMNINET_PAYLOADSIZE (OMNINET_BULKOUTSIZE - OMNINET_HEADERLEN) | 143 | #define OMNINET_PAYLOADSIZE (OMNINET_BULKOUTSIZE - OMNINET_HEADERLEN) |
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c index 93c6c9b08daa..8a069aa154ed 100644 --- a/drivers/usb/serial/safe_serial.c +++ b/drivers/usb/serial/safe_serial.c | |||
| @@ -200,6 +200,11 @@ static void safe_process_read_urb(struct urb *urb) | |||
| 200 | if (!safe) | 200 | if (!safe) |
| 201 | goto out; | 201 | goto out; |
| 202 | 202 | ||
| 203 | if (length < 2) { | ||
| 204 | dev_err(&port->dev, "malformed packet\n"); | ||
| 205 | return; | ||
| 206 | } | ||
| 207 | |||
| 203 | fcs = fcs_compute10(data, length, CRC10_INITFCS); | 208 | fcs = fcs_compute10(data, length, CRC10_INITFCS); |
| 204 | if (fcs) { | 209 | if (fcs) { |
| 205 | dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs); | 210 | dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs); |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 16cc18369111..9129f6cb8230 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
| @@ -2071,6 +2071,20 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110, | |||
| 2071 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 2071 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
| 2072 | US_FL_IGNORE_RESIDUE ), | 2072 | US_FL_IGNORE_RESIDUE ), |
| 2073 | 2073 | ||
| 2074 | /* | ||
| 2075 | * Reported by Tobias Jakobi <tjakobi@math.uni-bielefeld.de> | ||
| 2076 | * The INIC-3619 bridge is used in the StarTech SLSODDU33B | ||
| 2077 | * SATA-USB enclosure for slimline optical drives. | ||
| 2078 | * | ||
| 2079 | * The quirk enables MakeMKV to properly exchange keys with | ||
| 2080 | * an installed BD drive. | ||
| 2081 | */ | ||
| 2082 | UNUSUAL_DEV( 0x13fd, 0x3609, 0x0209, 0x0209, | ||
| 2083 | "Initio Corporation", | ||
| 2084 | "INIC-3619", | ||
| 2085 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
| 2086 | US_FL_IGNORE_RESIDUE ), | ||
| 2087 | |||
| 2074 | /* Reported by Qinglin Ye <yestyle@gmail.com> */ | 2088 | /* Reported by Qinglin Ye <yestyle@gmail.com> */ |
| 2075 | UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100, | 2089 | UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100, |
| 2076 | "Kingston", | 2090 | "Kingston", |
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index f8afc6dcc29f..e8cef1ad0fe3 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
| @@ -681,3 +681,50 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask) | |||
| 681 | return 0; | 681 | return 0; |
| 682 | } | 682 | } |
| 683 | EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask); | 683 | EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask); |
| 684 | |||
| 685 | /* | ||
| 686 | * Create userspace mapping for the DMA-coherent memory. | ||
| 687 | * This function should be called with the pages from the current domain only, | ||
| 688 | * passing pages mapped from other domains would lead to memory corruption. | ||
| 689 | */ | ||
| 690 | int | ||
| 691 | xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, | ||
| 692 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
| 693 | unsigned long attrs) | ||
| 694 | { | ||
| 695 | #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) | ||
| 696 | if (__generic_dma_ops(dev)->mmap) | ||
| 697 | return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr, | ||
| 698 | dma_addr, size, attrs); | ||
| 699 | #endif | ||
| 700 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | ||
| 701 | } | ||
| 702 | EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap); | ||
| 703 | |||
| 704 | /* | ||
| 705 | * This function should be called with the pages from the current domain only, | ||
| 706 | * passing pages mapped from other domains would lead to memory corruption. | ||
| 707 | */ | ||
| 708 | int | ||
| 709 | xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
| 710 | void *cpu_addr, dma_addr_t handle, size_t size, | ||
| 711 | unsigned long attrs) | ||
| 712 | { | ||
| 713 | #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) | ||
| 714 | if (__generic_dma_ops(dev)->get_sgtable) { | ||
| 715 | #if 0 | ||
| 716 | /* | ||
| 717 | * This check verifies that the page belongs to the current domain and | ||
| 718 | * is not one mapped from another domain. | ||
| 719 | * This check is for debug only, and should not go to production build | ||
| 720 | */ | ||
| 721 | unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle)); | ||
| 722 | BUG_ON (!page_is_ram(bfn)); | ||
| 723 | #endif | ||
| 724 | return __generic_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr, | ||
| 725 | handle, size, attrs); | ||
| 726 | } | ||
| 727 | #endif | ||
| 728 | return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size); | ||
| 729 | } | ||
| 730 | EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable); | ||
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 4d343eed08f5..1f4733b80c87 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c | |||
| @@ -55,7 +55,6 @@ | |||
| 55 | #include <linux/string.h> | 55 | #include <linux/string.h> |
| 56 | #include <linux/slab.h> | 56 | #include <linux/slab.h> |
| 57 | #include <linux/miscdevice.h> | 57 | #include <linux/miscdevice.h> |
| 58 | #include <linux/init.h> | ||
| 59 | 58 | ||
| 60 | #include <xen/xenbus.h> | 59 | #include <xen/xenbus.h> |
| 61 | #include <xen/xen.h> | 60 | #include <xen/xen.h> |
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 338d2f73eb29..a2c05f2ada6d 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
| @@ -1359,6 +1359,16 @@ out: | |||
| 1359 | return 0; | 1359 | return 0; |
| 1360 | } | 1360 | } |
| 1361 | 1361 | ||
| 1362 | static void fat_dummy_inode_init(struct inode *inode) | ||
| 1363 | { | ||
| 1364 | /* Initialize this dummy inode to work as no-op. */ | ||
| 1365 | MSDOS_I(inode)->mmu_private = 0; | ||
| 1366 | MSDOS_I(inode)->i_start = 0; | ||
| 1367 | MSDOS_I(inode)->i_logstart = 0; | ||
| 1368 | MSDOS_I(inode)->i_attrs = 0; | ||
| 1369 | MSDOS_I(inode)->i_pos = 0; | ||
| 1370 | } | ||
| 1371 | |||
| 1362 | static int fat_read_root(struct inode *inode) | 1372 | static int fat_read_root(struct inode *inode) |
| 1363 | { | 1373 | { |
| 1364 | struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); | 1374 | struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); |
| @@ -1803,12 +1813,13 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, | |||
| 1803 | fat_inode = new_inode(sb); | 1813 | fat_inode = new_inode(sb); |
| 1804 | if (!fat_inode) | 1814 | if (!fat_inode) |
| 1805 | goto out_fail; | 1815 | goto out_fail; |
| 1806 | MSDOS_I(fat_inode)->i_pos = 0; | 1816 | fat_dummy_inode_init(fat_inode); |
| 1807 | sbi->fat_inode = fat_inode; | 1817 | sbi->fat_inode = fat_inode; |
| 1808 | 1818 | ||
| 1809 | fsinfo_inode = new_inode(sb); | 1819 | fsinfo_inode = new_inode(sb); |
| 1810 | if (!fsinfo_inode) | 1820 | if (!fsinfo_inode) |
| 1811 | goto out_fail; | 1821 | goto out_fail; |
| 1822 | fat_dummy_inode_init(fsinfo_inode); | ||
| 1812 | fsinfo_inode->i_ino = MSDOS_FSINFO_INO; | 1823 | fsinfo_inode->i_ino = MSDOS_FSINFO_INO; |
| 1813 | sbi->fsinfo_inode = fsinfo_inode; | 1824 | sbi->fsinfo_inode = fsinfo_inode; |
| 1814 | insert_inode_hash(fsinfo_inode); | 1825 | insert_inode_hash(fsinfo_inode); |
diff --git a/fs/iomap.c b/fs/iomap.c index 3ca1a8e44135..141c3cd55a8b 100644 --- a/fs/iomap.c +++ b/fs/iomap.c | |||
| @@ -846,7 +846,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |||
| 846 | struct address_space *mapping = iocb->ki_filp->f_mapping; | 846 | struct address_space *mapping = iocb->ki_filp->f_mapping; |
| 847 | struct inode *inode = file_inode(iocb->ki_filp); | 847 | struct inode *inode = file_inode(iocb->ki_filp); |
| 848 | size_t count = iov_iter_count(iter); | 848 | size_t count = iov_iter_count(iter); |
| 849 | loff_t pos = iocb->ki_pos, end = iocb->ki_pos + count - 1, ret = 0; | 849 | loff_t pos = iocb->ki_pos, start = pos; |
| 850 | loff_t end = iocb->ki_pos + count - 1, ret = 0; | ||
| 850 | unsigned int flags = IOMAP_DIRECT; | 851 | unsigned int flags = IOMAP_DIRECT; |
| 851 | struct blk_plug plug; | 852 | struct blk_plug plug; |
| 852 | struct iomap_dio *dio; | 853 | struct iomap_dio *dio; |
| @@ -887,12 +888,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |||
| 887 | } | 888 | } |
| 888 | 889 | ||
| 889 | if (mapping->nrpages) { | 890 | if (mapping->nrpages) { |
| 890 | ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end); | 891 | ret = filemap_write_and_wait_range(mapping, start, end); |
| 891 | if (ret) | 892 | if (ret) |
| 892 | goto out_free_dio; | 893 | goto out_free_dio; |
| 893 | 894 | ||
| 894 | ret = invalidate_inode_pages2_range(mapping, | 895 | ret = invalidate_inode_pages2_range(mapping, |
| 895 | iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT); | 896 | start >> PAGE_SHIFT, end >> PAGE_SHIFT); |
| 896 | WARN_ON_ONCE(ret); | 897 | WARN_ON_ONCE(ret); |
| 897 | ret = 0; | 898 | ret = 0; |
| 898 | } | 899 | } |
| @@ -941,6 +942,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |||
| 941 | __set_current_state(TASK_RUNNING); | 942 | __set_current_state(TASK_RUNNING); |
| 942 | } | 943 | } |
| 943 | 944 | ||
| 945 | ret = iomap_dio_complete(dio); | ||
| 946 | |||
| 944 | /* | 947 | /* |
| 945 | * Try again to invalidate clean pages which might have been cached by | 948 | * Try again to invalidate clean pages which might have been cached by |
| 946 | * non-direct readahead, or faulted in by get_user_pages() if the source | 949 | * non-direct readahead, or faulted in by get_user_pages() if the source |
| @@ -949,12 +952,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |||
| 949 | * this invalidation fails, tough, the write still worked... | 952 | * this invalidation fails, tough, the write still worked... |
| 950 | */ | 953 | */ |
| 951 | if (iov_iter_rw(iter) == WRITE && mapping->nrpages) { | 954 | if (iov_iter_rw(iter) == WRITE && mapping->nrpages) { |
| 952 | ret = invalidate_inode_pages2_range(mapping, | 955 | int err = invalidate_inode_pages2_range(mapping, |
| 953 | iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT); | 956 | start >> PAGE_SHIFT, end >> PAGE_SHIFT); |
| 954 | WARN_ON_ONCE(ret); | 957 | WARN_ON_ONCE(err); |
| 955 | } | 958 | } |
| 956 | 959 | ||
| 957 | return iomap_dio_complete(dio); | 960 | return ret; |
| 958 | 961 | ||
| 959 | out_free_dio: | 962 | out_free_dio: |
| 960 | kfree(dio); | 963 | kfree(dio); |
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index 1953986ee6bc..6e610a205e15 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c | |||
| @@ -12,7 +12,6 @@ | |||
| 12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
| 13 | #include <linux/cred.h> | 13 | #include <linux/cred.h> |
| 14 | #include <linux/xattr.h> | 14 | #include <linux/xattr.h> |
| 15 | #include <linux/sched/signal.h> | ||
| 16 | #include "overlayfs.h" | 15 | #include "overlayfs.h" |
| 17 | #include "ovl_entry.h" | 16 | #include "ovl_entry.h" |
| 18 | 17 | ||
diff --git a/fs/timerfd.c b/fs/timerfd.c index 384fa759a563..c543cdb5f8ed 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c | |||
| @@ -400,9 +400,9 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags) | |||
| 400 | clockid != CLOCK_BOOTTIME_ALARM)) | 400 | clockid != CLOCK_BOOTTIME_ALARM)) |
| 401 | return -EINVAL; | 401 | return -EINVAL; |
| 402 | 402 | ||
| 403 | if (!capable(CAP_WAKE_ALARM) && | 403 | if ((clockid == CLOCK_REALTIME_ALARM || |
| 404 | (clockid == CLOCK_REALTIME_ALARM || | 404 | clockid == CLOCK_BOOTTIME_ALARM) && |
| 405 | clockid == CLOCK_BOOTTIME_ALARM)) | 405 | !capable(CAP_WAKE_ALARM)) |
| 406 | return -EPERM; | 406 | return -EPERM; |
| 407 | 407 | ||
| 408 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | 408 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
| @@ -449,7 +449,7 @@ static int do_timerfd_settime(int ufd, int flags, | |||
| 449 | return ret; | 449 | return ret; |
| 450 | ctx = f.file->private_data; | 450 | ctx = f.file->private_data; |
| 451 | 451 | ||
| 452 | if (!capable(CAP_WAKE_ALARM) && isalarm(ctx)) { | 452 | if (isalarm(ctx) && !capable(CAP_WAKE_ALARM)) { |
| 453 | fdput(f); | 453 | fdput(f); |
| 454 | return -EPERM; | 454 | return -EPERM; |
| 455 | } | 455 | } |
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 973607df579d..1d227b0fcf49 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
| @@ -138,8 +138,6 @@ out: | |||
| 138 | * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd | 138 | * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd |
| 139 | * context. | 139 | * context. |
| 140 | * @ctx: [in] Pointer to the userfaultfd context. | 140 | * @ctx: [in] Pointer to the userfaultfd context. |
| 141 | * | ||
| 142 | * Returns: In case of success, returns not zero. | ||
| 143 | */ | 141 | */ |
| 144 | static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) | 142 | static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) |
| 145 | { | 143 | { |
| @@ -267,6 +265,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, | |||
| 267 | { | 265 | { |
| 268 | struct mm_struct *mm = ctx->mm; | 266 | struct mm_struct *mm = ctx->mm; |
| 269 | pgd_t *pgd; | 267 | pgd_t *pgd; |
| 268 | p4d_t *p4d; | ||
| 270 | pud_t *pud; | 269 | pud_t *pud; |
| 271 | pmd_t *pmd, _pmd; | 270 | pmd_t *pmd, _pmd; |
| 272 | pte_t *pte; | 271 | pte_t *pte; |
| @@ -277,7 +276,10 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, | |||
| 277 | pgd = pgd_offset(mm, address); | 276 | pgd = pgd_offset(mm, address); |
| 278 | if (!pgd_present(*pgd)) | 277 | if (!pgd_present(*pgd)) |
| 279 | goto out; | 278 | goto out; |
| 280 | pud = pud_offset(pgd, address); | 279 | p4d = p4d_offset(pgd, address); |
| 280 | if (!p4d_present(*p4d)) | ||
| 281 | goto out; | ||
| 282 | pud = pud_offset(p4d, address); | ||
| 281 | if (!pud_present(*pud)) | 283 | if (!pud_present(*pud)) |
| 282 | goto out; | 284 | goto out; |
| 283 | pmd = pmd_offset(pud, address); | 285 | pmd = pmd_offset(pud, address); |
| @@ -490,7 +492,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
| 490 | * in such case. | 492 | * in such case. |
| 491 | */ | 493 | */ |
| 492 | down_read(&mm->mmap_sem); | 494 | down_read(&mm->mmap_sem); |
| 493 | ret = 0; | 495 | ret = VM_FAULT_NOPAGE; |
| 494 | } | 496 | } |
| 495 | } | 497 | } |
| 496 | 498 | ||
| @@ -527,10 +529,11 @@ out: | |||
| 527 | return ret; | 529 | return ret; |
| 528 | } | 530 | } |
| 529 | 531 | ||
| 530 | static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, | 532 | static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, |
| 531 | struct userfaultfd_wait_queue *ewq) | 533 | struct userfaultfd_wait_queue *ewq) |
| 532 | { | 534 | { |
| 533 | int ret = 0; | 535 | if (WARN_ON_ONCE(current->flags & PF_EXITING)) |
| 536 | goto out; | ||
| 534 | 537 | ||
| 535 | ewq->ctx = ctx; | 538 | ewq->ctx = ctx; |
| 536 | init_waitqueue_entry(&ewq->wq, current); | 539 | init_waitqueue_entry(&ewq->wq, current); |
| @@ -547,8 +550,16 @@ static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, | |||
| 547 | break; | 550 | break; |
| 548 | if (ACCESS_ONCE(ctx->released) || | 551 | if (ACCESS_ONCE(ctx->released) || |
| 549 | fatal_signal_pending(current)) { | 552 | fatal_signal_pending(current)) { |
| 550 | ret = -1; | ||
| 551 | __remove_wait_queue(&ctx->event_wqh, &ewq->wq); | 553 | __remove_wait_queue(&ctx->event_wqh, &ewq->wq); |
| 554 | if (ewq->msg.event == UFFD_EVENT_FORK) { | ||
| 555 | struct userfaultfd_ctx *new; | ||
| 556 | |||
| 557 | new = (struct userfaultfd_ctx *) | ||
| 558 | (unsigned long) | ||
| 559 | ewq->msg.arg.reserved.reserved1; | ||
| 560 | |||
| 561 | userfaultfd_ctx_put(new); | ||
| 562 | } | ||
| 552 | break; | 563 | break; |
| 553 | } | 564 | } |
| 554 | 565 | ||
| @@ -566,9 +577,8 @@ static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, | |||
| 566 | * ctx may go away after this if the userfault pseudo fd is | 577 | * ctx may go away after this if the userfault pseudo fd is |
| 567 | * already released. | 578 | * already released. |
| 568 | */ | 579 | */ |
| 569 | 580 | out: | |
| 570 | userfaultfd_ctx_put(ctx); | 581 | userfaultfd_ctx_put(ctx); |
| 571 | return ret; | ||
| 572 | } | 582 | } |
| 573 | 583 | ||
| 574 | static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx, | 584 | static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx, |
| @@ -626,7 +636,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) | |||
| 626 | return 0; | 636 | return 0; |
| 627 | } | 637 | } |
| 628 | 638 | ||
| 629 | static int dup_fctx(struct userfaultfd_fork_ctx *fctx) | 639 | static void dup_fctx(struct userfaultfd_fork_ctx *fctx) |
| 630 | { | 640 | { |
| 631 | struct userfaultfd_ctx *ctx = fctx->orig; | 641 | struct userfaultfd_ctx *ctx = fctx->orig; |
| 632 | struct userfaultfd_wait_queue ewq; | 642 | struct userfaultfd_wait_queue ewq; |
| @@ -636,17 +646,15 @@ static int dup_fctx(struct userfaultfd_fork_ctx *fctx) | |||
| 636 | ewq.msg.event = UFFD_EVENT_FORK; | 646 | ewq.msg.event = UFFD_EVENT_FORK; |
| 637 | ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new; | 647 | ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new; |
| 638 | 648 | ||
| 639 | return userfaultfd_event_wait_completion(ctx, &ewq); | 649 | userfaultfd_event_wait_completion(ctx, &ewq); |
| 640 | } | 650 | } |
| 641 | 651 | ||
| 642 | void dup_userfaultfd_complete(struct list_head *fcs) | 652 | void dup_userfaultfd_complete(struct list_head *fcs) |
| 643 | { | 653 | { |
| 644 | int ret = 0; | ||
| 645 | struct userfaultfd_fork_ctx *fctx, *n; | 654 | struct userfaultfd_fork_ctx *fctx, *n; |
| 646 | 655 | ||
| 647 | list_for_each_entry_safe(fctx, n, fcs, list) { | 656 | list_for_each_entry_safe(fctx, n, fcs, list) { |
| 648 | if (!ret) | 657 | dup_fctx(fctx); |
| 649 | ret = dup_fctx(fctx); | ||
| 650 | list_del(&fctx->list); | 658 | list_del(&fctx->list); |
| 651 | kfree(fctx); | 659 | kfree(fctx); |
| 652 | } | 660 | } |
| @@ -689,8 +697,7 @@ void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx, | |||
| 689 | userfaultfd_event_wait_completion(ctx, &ewq); | 697 | userfaultfd_event_wait_completion(ctx, &ewq); |
| 690 | } | 698 | } |
| 691 | 699 | ||
| 692 | void userfaultfd_remove(struct vm_area_struct *vma, | 700 | bool userfaultfd_remove(struct vm_area_struct *vma, |
| 693 | struct vm_area_struct **prev, | ||
| 694 | unsigned long start, unsigned long end) | 701 | unsigned long start, unsigned long end) |
| 695 | { | 702 | { |
| 696 | struct mm_struct *mm = vma->vm_mm; | 703 | struct mm_struct *mm = vma->vm_mm; |
| @@ -699,13 +706,11 @@ void userfaultfd_remove(struct vm_area_struct *vma, | |||
| 699 | 706 | ||
| 700 | ctx = vma->vm_userfaultfd_ctx.ctx; | 707 | ctx = vma->vm_userfaultfd_ctx.ctx; |
| 701 | if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE)) | 708 | if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE)) |
| 702 | return; | 709 | return true; |
| 703 | 710 | ||
| 704 | userfaultfd_ctx_get(ctx); | 711 | userfaultfd_ctx_get(ctx); |
| 705 | up_read(&mm->mmap_sem); | 712 | up_read(&mm->mmap_sem); |
| 706 | 713 | ||
| 707 | *prev = NULL; /* We wait for ACK w/o the mmap semaphore */ | ||
| 708 | |||
| 709 | msg_init(&ewq.msg); | 714 | msg_init(&ewq.msg); |
| 710 | 715 | ||
| 711 | ewq.msg.event = UFFD_EVENT_REMOVE; | 716 | ewq.msg.event = UFFD_EVENT_REMOVE; |
| @@ -714,7 +719,7 @@ void userfaultfd_remove(struct vm_area_struct *vma, | |||
| 714 | 719 | ||
| 715 | userfaultfd_event_wait_completion(ctx, &ewq); | 720 | userfaultfd_event_wait_completion(ctx, &ewq); |
| 716 | 721 | ||
| 717 | down_read(&mm->mmap_sem); | 722 | return false; |
| 718 | } | 723 | } |
| 719 | 724 | ||
| 720 | static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, | 725 | static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, |
| @@ -775,34 +780,6 @@ void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf) | |||
| 775 | } | 780 | } |
| 776 | } | 781 | } |
| 777 | 782 | ||
| 778 | void userfaultfd_exit(struct mm_struct *mm) | ||
| 779 | { | ||
| 780 | struct vm_area_struct *vma = mm->mmap; | ||
| 781 | |||
| 782 | /* | ||
| 783 | * We can do the vma walk without locking because the caller | ||
| 784 | * (exit_mm) knows it now has exclusive access | ||
| 785 | */ | ||
| 786 | while (vma) { | ||
| 787 | struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; | ||
| 788 | |||
| 789 | if (ctx && (ctx->features & UFFD_FEATURE_EVENT_EXIT)) { | ||
| 790 | struct userfaultfd_wait_queue ewq; | ||
| 791 | |||
| 792 | userfaultfd_ctx_get(ctx); | ||
| 793 | |||
| 794 | msg_init(&ewq.msg); | ||
| 795 | ewq.msg.event = UFFD_EVENT_EXIT; | ||
| 796 | |||
| 797 | userfaultfd_event_wait_completion(ctx, &ewq); | ||
| 798 | |||
| 799 | ctx->features &= ~UFFD_FEATURE_EVENT_EXIT; | ||
| 800 | } | ||
| 801 | |||
| 802 | vma = vma->vm_next; | ||
| 803 | } | ||
| 804 | } | ||
| 805 | |||
| 806 | static int userfaultfd_release(struct inode *inode, struct file *file) | 783 | static int userfaultfd_release(struct inode *inode, struct file *file) |
| 807 | { | 784 | { |
| 808 | struct userfaultfd_ctx *ctx = file->private_data; | 785 | struct userfaultfd_ctx *ctx = file->private_data; |
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c index 2dfdc62f795e..70a5b55e0870 100644 --- a/fs/xfs/kmem.c +++ b/fs/xfs/kmem.c | |||
| @@ -25,24 +25,6 @@ | |||
| 25 | #include "kmem.h" | 25 | #include "kmem.h" |
| 26 | #include "xfs_message.h" | 26 | #include "xfs_message.h" |
| 27 | 27 | ||
| 28 | /* | ||
| 29 | * Greedy allocation. May fail and may return vmalloced memory. | ||
| 30 | */ | ||
| 31 | void * | ||
| 32 | kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize) | ||
| 33 | { | ||
| 34 | void *ptr; | ||
| 35 | size_t kmsize = maxsize; | ||
| 36 | |||
| 37 | while (!(ptr = vzalloc(kmsize))) { | ||
| 38 | if ((kmsize >>= 1) <= minsize) | ||
| 39 | kmsize = minsize; | ||
| 40 | } | ||
| 41 | if (ptr) | ||
| 42 | *size = kmsize; | ||
| 43 | return ptr; | ||
| 44 | } | ||
| 45 | |||
| 46 | void * | 28 | void * |
| 47 | kmem_alloc(size_t size, xfs_km_flags_t flags) | 29 | kmem_alloc(size_t size, xfs_km_flags_t flags) |
| 48 | { | 30 | { |
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h index 689f746224e7..f0fc84fcaac2 100644 --- a/fs/xfs/kmem.h +++ b/fs/xfs/kmem.h | |||
| @@ -69,8 +69,6 @@ static inline void kmem_free(const void *ptr) | |||
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | 71 | ||
| 72 | extern void *kmem_zalloc_greedy(size_t *, size_t, size_t); | ||
| 73 | |||
| 74 | static inline void * | 72 | static inline void * |
| 75 | kmem_zalloc(size_t size, xfs_km_flags_t flags) | 73 | kmem_zalloc(size_t size, xfs_km_flags_t flags) |
| 76 | { | 74 | { |
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index a9c66d47757a..9bd104f32908 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c | |||
| @@ -763,8 +763,8 @@ xfs_bmap_extents_to_btree( | |||
| 763 | args.type = XFS_ALLOCTYPE_START_BNO; | 763 | args.type = XFS_ALLOCTYPE_START_BNO; |
| 764 | args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); | 764 | args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); |
| 765 | } else if (dfops->dop_low) { | 765 | } else if (dfops->dop_low) { |
| 766 | try_another_ag: | ||
| 767 | args.type = XFS_ALLOCTYPE_START_BNO; | 766 | args.type = XFS_ALLOCTYPE_START_BNO; |
| 767 | try_another_ag: | ||
| 768 | args.fsbno = *firstblock; | 768 | args.fsbno = *firstblock; |
| 769 | } else { | 769 | } else { |
| 770 | args.type = XFS_ALLOCTYPE_NEAR_BNO; | 770 | args.type = XFS_ALLOCTYPE_NEAR_BNO; |
| @@ -790,13 +790,17 @@ try_another_ag: | |||
| 790 | if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) && | 790 | if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) && |
| 791 | args.fsbno == NULLFSBLOCK && | 791 | args.fsbno == NULLFSBLOCK && |
| 792 | args.type == XFS_ALLOCTYPE_NEAR_BNO) { | 792 | args.type == XFS_ALLOCTYPE_NEAR_BNO) { |
| 793 | dfops->dop_low = true; | 793 | args.type = XFS_ALLOCTYPE_FIRST_AG; |
| 794 | goto try_another_ag; | 794 | goto try_another_ag; |
| 795 | } | 795 | } |
| 796 | if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { | ||
| 797 | xfs_iroot_realloc(ip, -1, whichfork); | ||
| 798 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); | ||
| 799 | return -ENOSPC; | ||
| 800 | } | ||
| 796 | /* | 801 | /* |
| 797 | * Allocation can't fail, the space was reserved. | 802 | * Allocation can't fail, the space was reserved. |
| 798 | */ | 803 | */ |
| 799 | ASSERT(args.fsbno != NULLFSBLOCK); | ||
| 800 | ASSERT(*firstblock == NULLFSBLOCK || | 804 | ASSERT(*firstblock == NULLFSBLOCK || |
| 801 | args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock)); | 805 | args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock)); |
| 802 | *firstblock = cur->bc_private.b.firstblock = args.fsbno; | 806 | *firstblock = cur->bc_private.b.firstblock = args.fsbno; |
| @@ -4150,6 +4154,19 @@ xfs_bmapi_read( | |||
| 4150 | return 0; | 4154 | return 0; |
| 4151 | } | 4155 | } |
| 4152 | 4156 | ||
| 4157 | /* | ||
| 4158 | * Add a delayed allocation extent to an inode. Blocks are reserved from the | ||
| 4159 | * global pool and the extent inserted into the inode in-core extent tree. | ||
| 4160 | * | ||
| 4161 | * On entry, got refers to the first extent beyond the offset of the extent to | ||
| 4162 | * allocate or eof is specified if no such extent exists. On return, got refers | ||
| 4163 | * to the extent record that was inserted to the inode fork. | ||
| 4164 | * | ||
| 4165 | * Note that the allocated extent may have been merged with contiguous extents | ||
| 4166 | * during insertion into the inode fork. Thus, got does not reflect the current | ||
| 4167 | * state of the inode fork on return. If necessary, the caller can use lastx to | ||
| 4168 | * look up the updated record in the inode fork. | ||
| 4169 | */ | ||
| 4153 | int | 4170 | int |
| 4154 | xfs_bmapi_reserve_delalloc( | 4171 | xfs_bmapi_reserve_delalloc( |
| 4155 | struct xfs_inode *ip, | 4172 | struct xfs_inode *ip, |
| @@ -4236,13 +4253,8 @@ xfs_bmapi_reserve_delalloc( | |||
| 4236 | got->br_startblock = nullstartblock(indlen); | 4253 | got->br_startblock = nullstartblock(indlen); |
| 4237 | got->br_blockcount = alen; | 4254 | got->br_blockcount = alen; |
| 4238 | got->br_state = XFS_EXT_NORM; | 4255 | got->br_state = XFS_EXT_NORM; |
| 4239 | xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got); | ||
| 4240 | 4256 | ||
| 4241 | /* | 4257 | xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got); |
| 4242 | * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay | ||
| 4243 | * might have merged it into one of the neighbouring ones. | ||
| 4244 | */ | ||
| 4245 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got); | ||
| 4246 | 4258 | ||
| 4247 | /* | 4259 | /* |
| 4248 | * Tag the inode if blocks were preallocated. Note that COW fork | 4260 | * Tag the inode if blocks were preallocated. Note that COW fork |
| @@ -4254,10 +4266,6 @@ xfs_bmapi_reserve_delalloc( | |||
| 4254 | if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) | 4266 | if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) |
| 4255 | xfs_inode_set_cowblocks_tag(ip); | 4267 | xfs_inode_set_cowblocks_tag(ip); |
| 4256 | 4268 | ||
| 4257 | ASSERT(got->br_startoff <= aoff); | ||
| 4258 | ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen); | ||
| 4259 | ASSERT(isnullstartblock(got->br_startblock)); | ||
| 4260 | ASSERT(got->br_state == XFS_EXT_NORM); | ||
| 4261 | return 0; | 4269 | return 0; |
| 4262 | 4270 | ||
| 4263 | out_unreserve_blocks: | 4271 | out_unreserve_blocks: |
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c index f93072b58a58..fd55db479385 100644 --- a/fs/xfs/libxfs/xfs_bmap_btree.c +++ b/fs/xfs/libxfs/xfs_bmap_btree.c | |||
| @@ -447,8 +447,8 @@ xfs_bmbt_alloc_block( | |||
| 447 | 447 | ||
| 448 | if (args.fsbno == NULLFSBLOCK) { | 448 | if (args.fsbno == NULLFSBLOCK) { |
| 449 | args.fsbno = be64_to_cpu(start->l); | 449 | args.fsbno = be64_to_cpu(start->l); |
| 450 | try_another_ag: | ||
| 451 | args.type = XFS_ALLOCTYPE_START_BNO; | 450 | args.type = XFS_ALLOCTYPE_START_BNO; |
| 451 | try_another_ag: | ||
| 452 | /* | 452 | /* |
| 453 | * Make sure there is sufficient room left in the AG to | 453 | * Make sure there is sufficient room left in the AG to |
| 454 | * complete a full tree split for an extent insert. If | 454 | * complete a full tree split for an extent insert. If |
| @@ -488,8 +488,8 @@ try_another_ag: | |||
| 488 | if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) && | 488 | if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) && |
| 489 | args.fsbno == NULLFSBLOCK && | 489 | args.fsbno == NULLFSBLOCK && |
| 490 | args.type == XFS_ALLOCTYPE_NEAR_BNO) { | 490 | args.type == XFS_ALLOCTYPE_NEAR_BNO) { |
| 491 | cur->bc_private.b.dfops->dop_low = true; | ||
| 492 | args.fsbno = cur->bc_private.b.firstblock; | 491 | args.fsbno = cur->bc_private.b.firstblock; |
| 492 | args.type = XFS_ALLOCTYPE_FIRST_AG; | ||
| 493 | goto try_another_ag; | 493 | goto try_another_ag; |
| 494 | } | 494 | } |
| 495 | 495 | ||
| @@ -506,7 +506,7 @@ try_another_ag: | |||
| 506 | goto error0; | 506 | goto error0; |
| 507 | cur->bc_private.b.dfops->dop_low = true; | 507 | cur->bc_private.b.dfops->dop_low = true; |
| 508 | } | 508 | } |
| 509 | if (args.fsbno == NULLFSBLOCK) { | 509 | if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { |
| 510 | XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); | 510 | XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); |
| 511 | *stat = 0; | 511 | *stat = 0; |
| 512 | return 0; | 512 | return 0; |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index bf65a9ea8642..61494295d92f 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
| @@ -274,54 +274,49 @@ xfs_end_io( | |||
| 274 | struct xfs_ioend *ioend = | 274 | struct xfs_ioend *ioend = |
| 275 | container_of(work, struct xfs_ioend, io_work); | 275 | container_of(work, struct xfs_ioend, io_work); |
| 276 | struct xfs_inode *ip = XFS_I(ioend->io_inode); | 276 | struct xfs_inode *ip = XFS_I(ioend->io_inode); |
| 277 | xfs_off_t offset = ioend->io_offset; | ||
| 278 | size_t size = ioend->io_size; | ||
| 277 | int error = ioend->io_bio->bi_error; | 279 | int error = ioend->io_bio->bi_error; |
| 278 | 280 | ||
| 279 | /* | 281 | /* |
| 280 | * Set an error if the mount has shut down and proceed with end I/O | 282 | * Just clean up the in-memory strutures if the fs has been shut down. |
| 281 | * processing so it can perform whatever cleanups are necessary. | ||
| 282 | */ | 283 | */ |
| 283 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 284 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
| 284 | error = -EIO; | 285 | error = -EIO; |
| 286 | goto done; | ||
| 287 | } | ||
| 285 | 288 | ||
| 286 | /* | 289 | /* |
| 287 | * For a CoW extent, we need to move the mapping from the CoW fork | 290 | * Clean up any COW blocks on an I/O error. |
| 288 | * to the data fork. If instead an error happened, just dump the | ||
| 289 | * new blocks. | ||
| 290 | */ | 291 | */ |
| 291 | if (ioend->io_type == XFS_IO_COW) { | 292 | if (unlikely(error)) { |
| 292 | if (error) | 293 | switch (ioend->io_type) { |
| 293 | goto done; | 294 | case XFS_IO_COW: |
| 294 | if (ioend->io_bio->bi_error) { | 295 | xfs_reflink_cancel_cow_range(ip, offset, size, true); |
| 295 | error = xfs_reflink_cancel_cow_range(ip, | 296 | break; |
| 296 | ioend->io_offset, ioend->io_size); | ||
| 297 | goto done; | ||
| 298 | } | 297 | } |
| 299 | error = xfs_reflink_end_cow(ip, ioend->io_offset, | 298 | |
| 300 | ioend->io_size); | 299 | goto done; |
| 301 | if (error) | ||
| 302 | goto done; | ||
| 303 | } | 300 | } |
| 304 | 301 | ||
| 305 | /* | 302 | /* |
| 306 | * For unwritten extents we need to issue transactions to convert a | 303 | * Success: commit the COW or unwritten blocks if needed. |
| 307 | * range to normal written extens after the data I/O has finished. | ||
| 308 | * Detecting and handling completion IO errors is done individually | ||
| 309 | * for each case as different cleanup operations need to be performed | ||
| 310 | * on error. | ||
| 311 | */ | 304 | */ |
| 312 | if (ioend->io_type == XFS_IO_UNWRITTEN) { | 305 | switch (ioend->io_type) { |
| 313 | if (error) | 306 | case XFS_IO_COW: |
| 314 | goto done; | 307 | error = xfs_reflink_end_cow(ip, offset, size); |
| 315 | error = xfs_iomap_write_unwritten(ip, ioend->io_offset, | 308 | break; |
| 316 | ioend->io_size); | 309 | case XFS_IO_UNWRITTEN: |
| 317 | } else if (ioend->io_append_trans) { | 310 | error = xfs_iomap_write_unwritten(ip, offset, size); |
| 318 | error = xfs_setfilesize_ioend(ioend, error); | 311 | break; |
| 319 | } else { | 312 | default: |
| 320 | ASSERT(!xfs_ioend_is_append(ioend) || | 313 | ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans); |
| 321 | ioend->io_type == XFS_IO_COW); | 314 | break; |
| 322 | } | 315 | } |
| 323 | 316 | ||
| 324 | done: | 317 | done: |
| 318 | if (ioend->io_append_trans) | ||
| 319 | error = xfs_setfilesize_ioend(ioend, error); | ||
| 325 | xfs_destroy_ioend(ioend, error); | 320 | xfs_destroy_ioend(ioend, error); |
| 326 | } | 321 | } |
| 327 | 322 | ||
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 7234b9748c36..3531f8f72fa5 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c | |||
| @@ -1608,7 +1608,7 @@ xfs_inode_free_cowblocks( | |||
| 1608 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | 1608 | xfs_ilock(ip, XFS_IOLOCK_EXCL); |
| 1609 | xfs_ilock(ip, XFS_MMAPLOCK_EXCL); | 1609 | xfs_ilock(ip, XFS_MMAPLOCK_EXCL); |
| 1610 | 1610 | ||
| 1611 | ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF); | 1611 | ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false); |
| 1612 | 1612 | ||
| 1613 | xfs_iunlock(ip, XFS_MMAPLOCK_EXCL); | 1613 | xfs_iunlock(ip, XFS_MMAPLOCK_EXCL); |
| 1614 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | 1614 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index edfa6a55b064..7eaf1ef74e3c 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
| @@ -1615,7 +1615,7 @@ xfs_itruncate_extents( | |||
| 1615 | 1615 | ||
| 1616 | /* Remove all pending CoW reservations. */ | 1616 | /* Remove all pending CoW reservations. */ |
| 1617 | error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block, | 1617 | error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block, |
| 1618 | last_block); | 1618 | last_block, true); |
| 1619 | if (error) | 1619 | if (error) |
| 1620 | goto out; | 1620 | goto out; |
| 1621 | 1621 | ||
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 41662fb14e87..288ee5b840d7 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
| @@ -630,6 +630,11 @@ retry: | |||
| 630 | goto out_unlock; | 630 | goto out_unlock; |
| 631 | } | 631 | } |
| 632 | 632 | ||
| 633 | /* | ||
| 634 | * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch | ||
| 635 | * them out if the write happens to fail. | ||
| 636 | */ | ||
| 637 | iomap->flags = IOMAP_F_NEW; | ||
| 633 | trace_xfs_iomap_alloc(ip, offset, count, 0, &got); | 638 | trace_xfs_iomap_alloc(ip, offset, count, 0, &got); |
| 634 | done: | 639 | done: |
| 635 | if (isnullstartblock(got.br_startblock)) | 640 | if (isnullstartblock(got.br_startblock)) |
| @@ -1071,16 +1076,22 @@ xfs_file_iomap_end_delalloc( | |||
| 1071 | struct xfs_inode *ip, | 1076 | struct xfs_inode *ip, |
| 1072 | loff_t offset, | 1077 | loff_t offset, |
| 1073 | loff_t length, | 1078 | loff_t length, |
| 1074 | ssize_t written) | 1079 | ssize_t written, |
| 1080 | struct iomap *iomap) | ||
| 1075 | { | 1081 | { |
| 1076 | struct xfs_mount *mp = ip->i_mount; | 1082 | struct xfs_mount *mp = ip->i_mount; |
| 1077 | xfs_fileoff_t start_fsb; | 1083 | xfs_fileoff_t start_fsb; |
| 1078 | xfs_fileoff_t end_fsb; | 1084 | xfs_fileoff_t end_fsb; |
| 1079 | int error = 0; | 1085 | int error = 0; |
| 1080 | 1086 | ||
| 1081 | /* behave as if the write failed if drop writes is enabled */ | 1087 | /* |
| 1082 | if (xfs_mp_drop_writes(mp)) | 1088 | * Behave as if the write failed if drop writes is enabled. Set the NEW |
| 1089 | * flag to force delalloc cleanup. | ||
| 1090 | */ | ||
| 1091 | if (xfs_mp_drop_writes(mp)) { | ||
| 1092 | iomap->flags |= IOMAP_F_NEW; | ||
| 1083 | written = 0; | 1093 | written = 0; |
| 1094 | } | ||
| 1084 | 1095 | ||
| 1085 | /* | 1096 | /* |
| 1086 | * start_fsb refers to the first unused block after a short write. If | 1097 | * start_fsb refers to the first unused block after a short write. If |
| @@ -1094,14 +1105,14 @@ xfs_file_iomap_end_delalloc( | |||
| 1094 | end_fsb = XFS_B_TO_FSB(mp, offset + length); | 1105 | end_fsb = XFS_B_TO_FSB(mp, offset + length); |
| 1095 | 1106 | ||
| 1096 | /* | 1107 | /* |
| 1097 | * Trim back delalloc blocks if we didn't manage to write the whole | 1108 | * Trim delalloc blocks if they were allocated by this write and we |
| 1098 | * range reserved. | 1109 | * didn't manage to write the whole range. |
| 1099 | * | 1110 | * |
| 1100 | * We don't need to care about racing delalloc as we hold i_mutex | 1111 | * We don't need to care about racing delalloc as we hold i_mutex |
| 1101 | * across the reserve/allocate/unreserve calls. If there are delalloc | 1112 | * across the reserve/allocate/unreserve calls. If there are delalloc |
| 1102 | * blocks in the range, they are ours. | 1113 | * blocks in the range, they are ours. |
| 1103 | */ | 1114 | */ |
| 1104 | if (start_fsb < end_fsb) { | 1115 | if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) { |
| 1105 | truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb), | 1116 | truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb), |
| 1106 | XFS_FSB_TO_B(mp, end_fsb) - 1); | 1117 | XFS_FSB_TO_B(mp, end_fsb) - 1); |
| 1107 | 1118 | ||
| @@ -1131,7 +1142,7 @@ xfs_file_iomap_end( | |||
| 1131 | { | 1142 | { |
| 1132 | if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC) | 1143 | if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC) |
| 1133 | return xfs_file_iomap_end_delalloc(XFS_I(inode), offset, | 1144 | return xfs_file_iomap_end_delalloc(XFS_I(inode), offset, |
| 1134 | length, written); | 1145 | length, written, iomap); |
| 1135 | return 0; | 1146 | return 0; |
| 1136 | } | 1147 | } |
| 1137 | 1148 | ||
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 66e881790c17..2a6d9b1558e0 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c | |||
| @@ -361,7 +361,6 @@ xfs_bulkstat( | |||
| 361 | xfs_agino_t agino; /* inode # in allocation group */ | 361 | xfs_agino_t agino; /* inode # in allocation group */ |
| 362 | xfs_agnumber_t agno; /* allocation group number */ | 362 | xfs_agnumber_t agno; /* allocation group number */ |
| 363 | xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ | 363 | xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ |
| 364 | size_t irbsize; /* size of irec buffer in bytes */ | ||
| 365 | xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ | 364 | xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ |
| 366 | int nirbuf; /* size of irbuf */ | 365 | int nirbuf; /* size of irbuf */ |
| 367 | int ubcount; /* size of user's buffer */ | 366 | int ubcount; /* size of user's buffer */ |
| @@ -388,11 +387,10 @@ xfs_bulkstat( | |||
| 388 | *ubcountp = 0; | 387 | *ubcountp = 0; |
| 389 | *done = 0; | 388 | *done = 0; |
| 390 | 389 | ||
| 391 | irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); | 390 | irbuf = kmem_zalloc_large(PAGE_SIZE * 4, KM_SLEEP); |
| 392 | if (!irbuf) | 391 | if (!irbuf) |
| 393 | return -ENOMEM; | 392 | return -ENOMEM; |
| 394 | 393 | nirbuf = (PAGE_SIZE * 4) / sizeof(*irbuf); | |
| 395 | nirbuf = irbsize / sizeof(*irbuf); | ||
| 396 | 394 | ||
| 397 | /* | 395 | /* |
| 398 | * Loop over the allocation groups, starting from the last | 396 | * Loop over the allocation groups, starting from the last |
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 450bde68bb75..688ebff1f663 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
| @@ -513,8 +513,7 @@ STATIC void | |||
| 513 | xfs_set_inoalignment(xfs_mount_t *mp) | 513 | xfs_set_inoalignment(xfs_mount_t *mp) |
| 514 | { | 514 | { |
| 515 | if (xfs_sb_version_hasalign(&mp->m_sb) && | 515 | if (xfs_sb_version_hasalign(&mp->m_sb) && |
| 516 | mp->m_sb.sb_inoalignmt >= | 516 | mp->m_sb.sb_inoalignmt >= xfs_icluster_size_fsb(mp)) |
| 517 | XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) | ||
| 518 | mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1; | 517 | mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1; |
| 519 | else | 518 | else |
| 520 | mp->m_inoalign_mask = 0; | 519 | mp->m_inoalign_mask = 0; |
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index da6d08fb359c..4a84c5ea266d 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c | |||
| @@ -548,14 +548,18 @@ xfs_reflink_trim_irec_to_next_cow( | |||
| 548 | } | 548 | } |
| 549 | 549 | ||
| 550 | /* | 550 | /* |
| 551 | * Cancel all pending CoW reservations for some block range of an inode. | 551 | * Cancel CoW reservations for some block range of an inode. |
| 552 | * | ||
| 553 | * If cancel_real is true this function cancels all COW fork extents for the | ||
| 554 | * inode; if cancel_real is false, real extents are not cleared. | ||
| 552 | */ | 555 | */ |
| 553 | int | 556 | int |
| 554 | xfs_reflink_cancel_cow_blocks( | 557 | xfs_reflink_cancel_cow_blocks( |
| 555 | struct xfs_inode *ip, | 558 | struct xfs_inode *ip, |
| 556 | struct xfs_trans **tpp, | 559 | struct xfs_trans **tpp, |
| 557 | xfs_fileoff_t offset_fsb, | 560 | xfs_fileoff_t offset_fsb, |
| 558 | xfs_fileoff_t end_fsb) | 561 | xfs_fileoff_t end_fsb, |
| 562 | bool cancel_real) | ||
| 559 | { | 563 | { |
| 560 | struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); | 564 | struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); |
| 561 | struct xfs_bmbt_irec got, del; | 565 | struct xfs_bmbt_irec got, del; |
| @@ -579,7 +583,7 @@ xfs_reflink_cancel_cow_blocks( | |||
| 579 | &idx, &got, &del); | 583 | &idx, &got, &del); |
| 580 | if (error) | 584 | if (error) |
| 581 | break; | 585 | break; |
| 582 | } else { | 586 | } else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) { |
| 583 | xfs_trans_ijoin(*tpp, ip, 0); | 587 | xfs_trans_ijoin(*tpp, ip, 0); |
| 584 | xfs_defer_init(&dfops, &firstfsb); | 588 | xfs_defer_init(&dfops, &firstfsb); |
| 585 | 589 | ||
| @@ -621,13 +625,17 @@ xfs_reflink_cancel_cow_blocks( | |||
| 621 | } | 625 | } |
| 622 | 626 | ||
| 623 | /* | 627 | /* |
| 624 | * Cancel all pending CoW reservations for some byte range of an inode. | 628 | * Cancel CoW reservations for some byte range of an inode. |
| 629 | * | ||
| 630 | * If cancel_real is true this function cancels all COW fork extents for the | ||
| 631 | * inode; if cancel_real is false, real extents are not cleared. | ||
| 625 | */ | 632 | */ |
| 626 | int | 633 | int |
| 627 | xfs_reflink_cancel_cow_range( | 634 | xfs_reflink_cancel_cow_range( |
| 628 | struct xfs_inode *ip, | 635 | struct xfs_inode *ip, |
| 629 | xfs_off_t offset, | 636 | xfs_off_t offset, |
| 630 | xfs_off_t count) | 637 | xfs_off_t count, |
| 638 | bool cancel_real) | ||
| 631 | { | 639 | { |
| 632 | struct xfs_trans *tp; | 640 | struct xfs_trans *tp; |
| 633 | xfs_fileoff_t offset_fsb; | 641 | xfs_fileoff_t offset_fsb; |
| @@ -653,7 +661,8 @@ xfs_reflink_cancel_cow_range( | |||
| 653 | xfs_trans_ijoin(tp, ip, 0); | 661 | xfs_trans_ijoin(tp, ip, 0); |
| 654 | 662 | ||
| 655 | /* Scrape out the old CoW reservations */ | 663 | /* Scrape out the old CoW reservations */ |
| 656 | error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb); | 664 | error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb, |
| 665 | cancel_real); | ||
| 657 | if (error) | 666 | if (error) |
| 658 | goto out_cancel; | 667 | goto out_cancel; |
| 659 | 668 | ||
| @@ -1450,7 +1459,7 @@ next: | |||
| 1450 | * We didn't find any shared blocks so turn off the reflink flag. | 1459 | * We didn't find any shared blocks so turn off the reflink flag. |
| 1451 | * First, get rid of any leftover CoW mappings. | 1460 | * First, get rid of any leftover CoW mappings. |
| 1452 | */ | 1461 | */ |
| 1453 | error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF); | 1462 | error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF, true); |
| 1454 | if (error) | 1463 | if (error) |
| 1455 | return error; | 1464 | return error; |
| 1456 | 1465 | ||
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h index 33ac9b8db683..d29a7967f029 100644 --- a/fs/xfs/xfs_reflink.h +++ b/fs/xfs/xfs_reflink.h | |||
| @@ -39,9 +39,9 @@ extern void xfs_reflink_trim_irec_to_next_cow(struct xfs_inode *ip, | |||
| 39 | 39 | ||
| 40 | extern int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip, | 40 | extern int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip, |
| 41 | struct xfs_trans **tpp, xfs_fileoff_t offset_fsb, | 41 | struct xfs_trans **tpp, xfs_fileoff_t offset_fsb, |
| 42 | xfs_fileoff_t end_fsb); | 42 | xfs_fileoff_t end_fsb, bool cancel_real); |
| 43 | extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset, | 43 | extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset, |
| 44 | xfs_off_t count); | 44 | xfs_off_t count, bool cancel_real); |
| 45 | extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset, | 45 | extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset, |
| 46 | xfs_off_t count); | 46 | xfs_off_t count); |
| 47 | extern int xfs_reflink_recover_cow(struct xfs_mount *mp); | 47 | extern int xfs_reflink_recover_cow(struct xfs_mount *mp); |
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 890862f2447c..685c042a120f 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c | |||
| @@ -953,7 +953,7 @@ xfs_fs_destroy_inode( | |||
| 953 | XFS_STATS_INC(ip->i_mount, vn_remove); | 953 | XFS_STATS_INC(ip->i_mount, vn_remove); |
| 954 | 954 | ||
| 955 | if (xfs_is_reflink_inode(ip)) { | 955 | if (xfs_is_reflink_inode(ip)) { |
| 956 | error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF); | 956 | error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true); |
| 957 | if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount)) | 957 | if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount)) |
| 958 | xfs_warn(ip->i_mount, | 958 | xfs_warn(ip->i_mount, |
| 959 | "Error %d while evicting CoW blocks for inode %llu.", | 959 | "Error %d while evicting CoW blocks for inode %llu.", |
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h index 5bdab6bffd23..928fd66b1271 100644 --- a/include/asm-generic/4level-fixup.h +++ b/include/asm-generic/4level-fixup.h | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \ | 15 | ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \ |
| 16 | NULL: pmd_offset(pud, address)) | 16 | NULL: pmd_offset(pud, address)) |
| 17 | 17 | ||
| 18 | #define pud_alloc(mm, pgd, address) (pgd) | ||
| 19 | #define pud_offset(pgd, start) (pgd) | 18 | #define pud_offset(pgd, start) (pgd) |
| 20 | #define pud_none(pud) 0 | 19 | #define pud_none(pud) 0 |
| 21 | #define pud_bad(pud) 0 | 20 | #define pud_bad(pud) 0 |
| @@ -35,4 +34,6 @@ | |||
| 35 | #undef pud_addr_end | 34 | #undef pud_addr_end |
| 36 | #define pud_addr_end(addr, end) (end) | 35 | #define pud_addr_end(addr, end) (end) |
| 37 | 36 | ||
| 37 | #include <asm-generic/5level-fixup.h> | ||
| 38 | |||
| 38 | #endif | 39 | #endif |
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h new file mode 100644 index 000000000000..b5ca82dc4175 --- /dev/null +++ b/include/asm-generic/5level-fixup.h | |||
| @@ -0,0 +1,41 @@ | |||
| 1 | #ifndef _5LEVEL_FIXUP_H | ||
| 2 | #define _5LEVEL_FIXUP_H | ||
| 3 | |||
| 4 | #define __ARCH_HAS_5LEVEL_HACK | ||
| 5 | #define __PAGETABLE_P4D_FOLDED | ||
| 6 | |||
| 7 | #define P4D_SHIFT PGDIR_SHIFT | ||
| 8 | #define P4D_SIZE PGDIR_SIZE | ||
| 9 | #define P4D_MASK PGDIR_MASK | ||
| 10 | #define PTRS_PER_P4D 1 | ||
| 11 | |||
| 12 | #define p4d_t pgd_t | ||
| 13 | |||
| 14 | #define pud_alloc(mm, p4d, address) \ | ||
| 15 | ((unlikely(pgd_none(*(p4d))) && __pud_alloc(mm, p4d, address)) ? \ | ||
| 16 | NULL : pud_offset(p4d, address)) | ||
| 17 | |||
| 18 | #define p4d_alloc(mm, pgd, address) (pgd) | ||
| 19 | #define p4d_offset(pgd, start) (pgd) | ||
| 20 | #define p4d_none(p4d) 0 | ||
| 21 | #define p4d_bad(p4d) 0 | ||
| 22 | #define p4d_present(p4d) 1 | ||
| 23 | #define p4d_ERROR(p4d) do { } while (0) | ||
| 24 | #define p4d_clear(p4d) pgd_clear(p4d) | ||
| 25 | #define p4d_val(p4d) pgd_val(p4d) | ||
| 26 | #define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud) | ||
| 27 | #define p4d_page(p4d) pgd_page(p4d) | ||
| 28 | #define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d) | ||
| 29 | |||
| 30 | #define __p4d(x) __pgd(x) | ||
| 31 | #define set_p4d(p4dp, p4d) set_pgd(p4dp, p4d) | ||
| 32 | |||
| 33 | #undef p4d_free_tlb | ||
| 34 | #define p4d_free_tlb(tlb, x, addr) do { } while (0) | ||
| 35 | #define p4d_free(mm, x) do { } while (0) | ||
| 36 | #define __p4d_free_tlb(tlb, x, addr) do { } while (0) | ||
| 37 | |||
| 38 | #undef p4d_addr_end | ||
| 39 | #define p4d_addr_end(addr, end) (end) | ||
| 40 | |||
| 41 | #endif | ||
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h new file mode 100644 index 000000000000..752fb7511750 --- /dev/null +++ b/include/asm-generic/pgtable-nop4d-hack.h | |||
| @@ -0,0 +1,62 @@ | |||
| 1 | #ifndef _PGTABLE_NOP4D_HACK_H | ||
| 2 | #define _PGTABLE_NOP4D_HACK_H | ||
| 3 | |||
| 4 | #ifndef __ASSEMBLY__ | ||
| 5 | #include <asm-generic/5level-fixup.h> | ||
| 6 | |||
| 7 | #define __PAGETABLE_PUD_FOLDED | ||
| 8 | |||
| 9 | /* | ||
| 10 | * Having the pud type consist of a pgd gets the size right, and allows | ||
| 11 | * us to conceptually access the pgd entry that this pud is folded into | ||
| 12 | * without casting. | ||
| 13 | */ | ||
| 14 | typedef struct { pgd_t pgd; } pud_t; | ||
| 15 | |||
| 16 | #define PUD_SHIFT PGDIR_SHIFT | ||
| 17 | #define PTRS_PER_PUD 1 | ||
| 18 | #define PUD_SIZE (1UL << PUD_SHIFT) | ||
| 19 | #define PUD_MASK (~(PUD_SIZE-1)) | ||
| 20 | |||
| 21 | /* | ||
| 22 | * The "pgd_xxx()" functions here are trivial for a folded two-level | ||
| 23 | * setup: the pud is never bad, and a pud always exists (as it's folded | ||
| 24 | * into the pgd entry) | ||
| 25 | */ | ||
| 26 | static inline int pgd_none(pgd_t pgd) { return 0; } | ||
| 27 | static inline int pgd_bad(pgd_t pgd) { return 0; } | ||
| 28 | static inline int pgd_present(pgd_t pgd) { return 1; } | ||
| 29 | static inline void pgd_clear(pgd_t *pgd) { } | ||
| 30 | #define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) | ||
| 31 | |||
| 32 | #define pgd_populate(mm, pgd, pud) do { } while (0) | ||
| 33 | /* | ||
| 34 | * (puds are folded into pgds so this doesn't get actually called, | ||
| 35 | * but the define is needed for a generic inline function.) | ||
| 36 | */ | ||
| 37 | #define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval }) | ||
| 38 | |||
| 39 | static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) | ||
| 40 | { | ||
| 41 | return (pud_t *)pgd; | ||
| 42 | } | ||
| 43 | |||
| 44 | #define pud_val(x) (pgd_val((x).pgd)) | ||
| 45 | #define __pud(x) ((pud_t) { __pgd(x) }) | ||
| 46 | |||
| 47 | #define pgd_page(pgd) (pud_page((pud_t){ pgd })) | ||
| 48 | #define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd })) | ||
| 49 | |||
| 50 | /* | ||
| 51 | * allocating and freeing a pud is trivial: the 1-entry pud is | ||
| 52 | * inside the pgd, so has no extra memory associated with it. | ||
| 53 | */ | ||
| 54 | #define pud_alloc_one(mm, address) NULL | ||
| 55 | #define pud_free(mm, x) do { } while (0) | ||
| 56 | #define __pud_free_tlb(tlb, x, a) do { } while (0) | ||
| 57 | |||
| 58 | #undef pud_addr_end | ||
| 59 | #define pud_addr_end(addr, end) (end) | ||
| 60 | |||
| 61 | #endif /* __ASSEMBLY__ */ | ||
| 62 | #endif /* _PGTABLE_NOP4D_HACK_H */ | ||
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h new file mode 100644 index 000000000000..de364ecb8df6 --- /dev/null +++ b/include/asm-generic/pgtable-nop4d.h | |||
| @@ -0,0 +1,56 @@ | |||
| 1 | #ifndef _PGTABLE_NOP4D_H | ||
| 2 | #define _PGTABLE_NOP4D_H | ||
| 3 | |||
| 4 | #ifndef __ASSEMBLY__ | ||
| 5 | |||
| 6 | #define __PAGETABLE_P4D_FOLDED | ||
| 7 | |||
| 8 | typedef struct { pgd_t pgd; } p4d_t; | ||
| 9 | |||
| 10 | #define P4D_SHIFT PGDIR_SHIFT | ||
| 11 | #define PTRS_PER_P4D 1 | ||
| 12 | #define P4D_SIZE (1UL << P4D_SHIFT) | ||
| 13 | #define P4D_MASK (~(P4D_SIZE-1)) | ||
| 14 | |||
| 15 | /* | ||
| 16 | * The "pgd_xxx()" functions here are trivial for a folded two-level | ||
| 17 | * setup: the p4d is never bad, and a p4d always exists (as it's folded | ||
| 18 | * into the pgd entry) | ||
| 19 | */ | ||
| 20 | static inline int pgd_none(pgd_t pgd) { return 0; } | ||
| 21 | static inline int pgd_bad(pgd_t pgd) { return 0; } | ||
| 22 | static inline int pgd_present(pgd_t pgd) { return 1; } | ||
| 23 | static inline void pgd_clear(pgd_t *pgd) { } | ||
| 24 | #define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd)) | ||
| 25 | |||
| 26 | #define pgd_populate(mm, pgd, p4d) do { } while (0) | ||
| 27 | /* | ||
| 28 | * (p4ds are folded into pgds so this doesn't get actually called, | ||
| 29 | * but the define is needed for a generic inline function.) | ||
| 30 | */ | ||
| 31 | #define set_pgd(pgdptr, pgdval) set_p4d((p4d_t *)(pgdptr), (p4d_t) { pgdval }) | ||
| 32 | |||
| 33 | static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) | ||
| 34 | { | ||
| 35 | return (p4d_t *)pgd; | ||
| 36 | } | ||
| 37 | |||
| 38 | #define p4d_val(x) (pgd_val((x).pgd)) | ||
| 39 | #define __p4d(x) ((p4d_t) { __pgd(x) }) | ||
| 40 | |||
| 41 | #define pgd_page(pgd) (p4d_page((p4d_t){ pgd })) | ||
| 42 | #define pgd_page_vaddr(pgd) (p4d_page_vaddr((p4d_t){ pgd })) | ||
| 43 | |||
| 44 | /* | ||
| 45 | * allocating and freeing a p4d is trivial: the 1-entry p4d is | ||
| 46 | * inside the pgd, so has no extra memory associated with it. | ||
| 47 | */ | ||
| 48 | #define p4d_alloc_one(mm, address) NULL | ||
| 49 | #define p4d_free(mm, x) do { } while (0) | ||
| 50 | #define __p4d_free_tlb(tlb, x, a) do { } while (0) | ||
| 51 | |||
| 52 | #undef p4d_addr_end | ||
| 53 | #define p4d_addr_end(addr, end) (end) | ||
| 54 | |||
| 55 | #endif /* __ASSEMBLY__ */ | ||
| 56 | #endif /* _PGTABLE_NOP4D_H */ | ||
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h index 810431d8351b..c2b9b96d6268 100644 --- a/include/asm-generic/pgtable-nopud.h +++ b/include/asm-generic/pgtable-nopud.h | |||
| @@ -3,52 +3,57 @@ | |||
| 3 | 3 | ||
| 4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
| 5 | 5 | ||
| 6 | #ifdef __ARCH_USE_5LEVEL_HACK | ||
| 7 | #include <asm-generic/pgtable-nop4d-hack.h> | ||
| 8 | #else | ||
| 9 | #include <asm-generic/pgtable-nop4d.h> | ||
| 10 | |||
| 6 | #define __PAGETABLE_PUD_FOLDED | 11 | #define __PAGETABLE_PUD_FOLDED |
| 7 | 12 | ||
| 8 | /* | 13 | /* |
| 9 | * Having the pud type consist of a pgd gets the size right, and allows | 14 | * Having the pud type consist of a p4d gets the size right, and allows |
| 10 | * us to conceptually access the pgd entry that this pud is folded into | 15 | * us to conceptually access the p4d entry that this pud is folded into |
| 11 | * without casting. | 16 | * without casting. |
| 12 | */ | 17 | */ |
| 13 | typedef struct { pgd_t pgd; } pud_t; | 18 | typedef struct { p4d_t p4d; } pud_t; |
| 14 | 19 | ||
| 15 | #define PUD_SHIFT PGDIR_SHIFT | 20 | #define PUD_SHIFT P4D_SHIFT |
| 16 | #define PTRS_PER_PUD 1 | 21 | #define PTRS_PER_PUD 1 |
| 17 | #define PUD_SIZE (1UL << PUD_SHIFT) | 22 | #define PUD_SIZE (1UL << PUD_SHIFT) |
| 18 | #define PUD_MASK (~(PUD_SIZE-1)) | 23 | #define PUD_MASK (~(PUD_SIZE-1)) |
| 19 | 24 | ||
| 20 | /* | 25 | /* |
| 21 | * The "pgd_xxx()" functions here are trivial for a folded two-level | 26 | * The "p4d_xxx()" functions here are trivial for a folded two-level |
| 22 | * setup: the pud is never bad, and a pud always exists (as it's folded | 27 | * setup: the pud is never bad, and a pud always exists (as it's folded |
| 23 | * into the pgd entry) | 28 | * into the p4d entry) |
| 24 | */ | 29 | */ |
| 25 | static inline int pgd_none(pgd_t pgd) { return 0; } | 30 | static inline int p4d_none(p4d_t p4d) { return 0; } |
| 26 | static inline int pgd_bad(pgd_t pgd) { return 0; } | 31 | static inline int p4d_bad(p4d_t p4d) { return 0; } |
| 27 | static inline int pgd_present(pgd_t pgd) { return 1; } | 32 | static inline int p4d_present(p4d_t p4d) { return 1; } |
| 28 | static inline void pgd_clear(pgd_t *pgd) { } | 33 | static inline void p4d_clear(p4d_t *p4d) { } |
| 29 | #define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) | 34 | #define pud_ERROR(pud) (p4d_ERROR((pud).p4d)) |
| 30 | 35 | ||
| 31 | #define pgd_populate(mm, pgd, pud) do { } while (0) | 36 | #define p4d_populate(mm, p4d, pud) do { } while (0) |
| 32 | /* | 37 | /* |
| 33 | * (puds are folded into pgds so this doesn't get actually called, | 38 | * (puds are folded into p4ds so this doesn't get actually called, |
| 34 | * but the define is needed for a generic inline function.) | 39 | * but the define is needed for a generic inline function.) |
| 35 | */ | 40 | */ |
| 36 | #define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval }) | 41 | #define set_p4d(p4dptr, p4dval) set_pud((pud_t *)(p4dptr), (pud_t) { p4dval }) |
| 37 | 42 | ||
| 38 | static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address) | 43 | static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) |
| 39 | { | 44 | { |
| 40 | return (pud_t *)pgd; | 45 | return (pud_t *)p4d; |
| 41 | } | 46 | } |
| 42 | 47 | ||
| 43 | #define pud_val(x) (pgd_val((x).pgd)) | 48 | #define pud_val(x) (p4d_val((x).p4d)) |
| 44 | #define __pud(x) ((pud_t) { __pgd(x) } ) | 49 | #define __pud(x) ((pud_t) { __p4d(x) }) |
| 45 | 50 | ||
| 46 | #define pgd_page(pgd) (pud_page((pud_t){ pgd })) | 51 | #define p4d_page(p4d) (pud_page((pud_t){ p4d })) |
| 47 | #define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd })) | 52 | #define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d })) |
| 48 | 53 | ||
| 49 | /* | 54 | /* |
| 50 | * allocating and freeing a pud is trivial: the 1-entry pud is | 55 | * allocating and freeing a pud is trivial: the 1-entry pud is |
| 51 | * inside the pgd, so has no extra memory associated with it. | 56 | * inside the p4d, so has no extra memory associated with it. |
| 52 | */ | 57 | */ |
| 53 | #define pud_alloc_one(mm, address) NULL | 58 | #define pud_alloc_one(mm, address) NULL |
| 54 | #define pud_free(mm, x) do { } while (0) | 59 | #define pud_free(mm, x) do { } while (0) |
| @@ -58,4 +63,5 @@ static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address) | |||
| 58 | #define pud_addr_end(addr, end) (end) | 63 | #define pud_addr_end(addr, end) (end) |
| 59 | 64 | ||
| 60 | #endif /* __ASSEMBLY__ */ | 65 | #endif /* __ASSEMBLY__ */ |
| 66 | #endif /* !__ARCH_USE_5LEVEL_HACK */ | ||
| 61 | #endif /* _PGTABLE_NOPUD_H */ | 67 | #endif /* _PGTABLE_NOPUD_H */ |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index f4ca23b158b3..1fad160f35de 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
| @@ -10,9 +10,9 @@ | |||
| 10 | #include <linux/bug.h> | 10 | #include <linux/bug.h> |
| 11 | #include <linux/errno.h> | 11 | #include <linux/errno.h> |
| 12 | 12 | ||
| 13 | #if 4 - defined(__PAGETABLE_PUD_FOLDED) - defined(__PAGETABLE_PMD_FOLDED) != \ | 13 | #if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \ |
| 14 | CONFIG_PGTABLE_LEVELS | 14 | defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS |
| 15 | #error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{PUD,PMD}_FOLDED | 15 | #error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED |
| 16 | #endif | 16 | #endif |
| 17 | 17 | ||
| 18 | /* | 18 | /* |
| @@ -424,6 +424,13 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | |||
| 424 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ | 424 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ |
| 425 | }) | 425 | }) |
| 426 | 426 | ||
| 427 | #ifndef p4d_addr_end | ||
| 428 | #define p4d_addr_end(addr, end) \ | ||
| 429 | ({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \ | ||
| 430 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ | ||
| 431 | }) | ||
| 432 | #endif | ||
| 433 | |||
| 427 | #ifndef pud_addr_end | 434 | #ifndef pud_addr_end |
| 428 | #define pud_addr_end(addr, end) \ | 435 | #define pud_addr_end(addr, end) \ |
| 429 | ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ | 436 | ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ |
| @@ -444,6 +451,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | |||
| 444 | * Do the tests inline, but report and clear the bad entry in mm/memory.c. | 451 | * Do the tests inline, but report and clear the bad entry in mm/memory.c. |
| 445 | */ | 452 | */ |
| 446 | void pgd_clear_bad(pgd_t *); | 453 | void pgd_clear_bad(pgd_t *); |
| 454 | void p4d_clear_bad(p4d_t *); | ||
| 447 | void pud_clear_bad(pud_t *); | 455 | void pud_clear_bad(pud_t *); |
| 448 | void pmd_clear_bad(pmd_t *); | 456 | void pmd_clear_bad(pmd_t *); |
| 449 | 457 | ||
| @@ -458,6 +466,17 @@ static inline int pgd_none_or_clear_bad(pgd_t *pgd) | |||
| 458 | return 0; | 466 | return 0; |
| 459 | } | 467 | } |
| 460 | 468 | ||
| 469 | static inline int p4d_none_or_clear_bad(p4d_t *p4d) | ||
| 470 | { | ||
| 471 | if (p4d_none(*p4d)) | ||
| 472 | return 1; | ||
| 473 | if (unlikely(p4d_bad(*p4d))) { | ||
| 474 | p4d_clear_bad(p4d); | ||
| 475 | return 1; | ||
| 476 | } | ||
| 477 | return 0; | ||
| 478 | } | ||
| 479 | |||
| 461 | static inline int pud_none_or_clear_bad(pud_t *pud) | 480 | static inline int pud_none_or_clear_bad(pud_t *pud) |
| 462 | { | 481 | { |
| 463 | if (pud_none(*pud)) | 482 | if (pud_none(*pud)) |
| @@ -844,11 +863,30 @@ static inline int pmd_protnone(pmd_t pmd) | |||
| 844 | #endif /* CONFIG_MMU */ | 863 | #endif /* CONFIG_MMU */ |
| 845 | 864 | ||
| 846 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP | 865 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP |
| 866 | |||
| 867 | #ifndef __PAGETABLE_P4D_FOLDED | ||
| 868 | int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot); | ||
| 869 | int p4d_clear_huge(p4d_t *p4d); | ||
| 870 | #else | ||
| 871 | static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) | ||
| 872 | { | ||
| 873 | return 0; | ||
| 874 | } | ||
| 875 | static inline int p4d_clear_huge(p4d_t *p4d) | ||
| 876 | { | ||
| 877 | return 0; | ||
| 878 | } | ||
| 879 | #endif /* !__PAGETABLE_P4D_FOLDED */ | ||
| 880 | |||
| 847 | int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); | 881 | int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); |
| 848 | int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); | 882 | int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); |
| 849 | int pud_clear_huge(pud_t *pud); | 883 | int pud_clear_huge(pud_t *pud); |
| 850 | int pmd_clear_huge(pmd_t *pmd); | 884 | int pmd_clear_huge(pmd_t *pmd); |
| 851 | #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ | 885 | #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ |
| 886 | static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) | ||
| 887 | { | ||
| 888 | return 0; | ||
| 889 | } | ||
| 852 | static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) | 890 | static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) |
| 853 | { | 891 | { |
| 854 | return 0; | 892 | return 0; |
| @@ -857,6 +895,10 @@ static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) | |||
| 857 | { | 895 | { |
| 858 | return 0; | 896 | return 0; |
| 859 | } | 897 | } |
| 898 | static inline int p4d_clear_huge(p4d_t *p4d) | ||
| 899 | { | ||
| 900 | return 0; | ||
| 901 | } | ||
| 860 | static inline int pud_clear_huge(pud_t *pud) | 902 | static inline int pud_clear_huge(pud_t *pud) |
| 861 | { | 903 | { |
| 862 | return 0; | 904 | return 0; |
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 4329bc6ef04b..8afa4335e5b2 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h | |||
| @@ -270,6 +270,12 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, | |||
| 270 | __pte_free_tlb(tlb, ptep, address); \ | 270 | __pte_free_tlb(tlb, ptep, address); \ |
| 271 | } while (0) | 271 | } while (0) |
| 272 | 272 | ||
| 273 | #define pmd_free_tlb(tlb, pmdp, address) \ | ||
| 274 | do { \ | ||
| 275 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ | ||
| 276 | __pmd_free_tlb(tlb, pmdp, address); \ | ||
| 277 | } while (0) | ||
| 278 | |||
| 273 | #ifndef __ARCH_HAS_4LEVEL_HACK | 279 | #ifndef __ARCH_HAS_4LEVEL_HACK |
| 274 | #define pud_free_tlb(tlb, pudp, address) \ | 280 | #define pud_free_tlb(tlb, pudp, address) \ |
| 275 | do { \ | 281 | do { \ |
| @@ -278,11 +284,13 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, | |||
| 278 | } while (0) | 284 | } while (0) |
| 279 | #endif | 285 | #endif |
| 280 | 286 | ||
| 281 | #define pmd_free_tlb(tlb, pmdp, address) \ | 287 | #ifndef __ARCH_HAS_5LEVEL_HACK |
| 288 | #define p4d_free_tlb(tlb, pudp, address) \ | ||
| 282 | do { \ | 289 | do { \ |
| 283 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ | 290 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
| 284 | __pmd_free_tlb(tlb, pmdp, address); \ | 291 | __p4d_free_tlb(tlb, pudp, address); \ |
| 285 | } while (0) | 292 | } while (0) |
| 293 | #endif | ||
| 286 | 294 | ||
| 287 | #define tlb_migrate_finish(mm) do {} while (0) | 295 | #define tlb_migrate_finish(mm) do {} while (0) |
| 288 | 296 | ||
diff --git a/include/dt-bindings/sound/cs42l42.h b/include/dt-bindings/sound/cs42l42.h index 399a123aed58..db69d84ed7d1 100644 --- a/include/dt-bindings/sound/cs42l42.h +++ b/include/dt-bindings/sound/cs42l42.h | |||
| @@ -20,7 +20,7 @@ | |||
| 20 | #define CS42L42_HPOUT_LOAD_1NF 0 | 20 | #define CS42L42_HPOUT_LOAD_1NF 0 |
| 21 | #define CS42L42_HPOUT_LOAD_10NF 1 | 21 | #define CS42L42_HPOUT_LOAD_10NF 1 |
| 22 | 22 | ||
| 23 | /* HPOUT Clamp to GND Overide */ | 23 | /* HPOUT Clamp to GND Override */ |
| 24 | #define CS42L42_HPOUT_CLAMP_EN 0 | 24 | #define CS42L42_HPOUT_CLAMP_EN 0 |
| 25 | #define CS42L42_HPOUT_CLAMP_DIS 1 | 25 | #define CS42L42_HPOUT_CLAMP_DIS 1 |
| 26 | 26 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 796016e63c1d..5a7da607ca04 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -435,7 +435,6 @@ struct request_queue { | |||
| 435 | struct delayed_work delay_work; | 435 | struct delayed_work delay_work; |
| 436 | 436 | ||
| 437 | struct backing_dev_info *backing_dev_info; | 437 | struct backing_dev_info *backing_dev_info; |
| 438 | struct disk_devt *disk_devt; | ||
| 439 | 438 | ||
| 440 | /* | 439 | /* |
| 441 | * The queue owner gets to use this for whatever they like. | 440 | * The queue owner gets to use this for whatever they like. |
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 1816c5e26581..88cd5dc8e238 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h | |||
| @@ -48,6 +48,7 @@ struct ceph_options { | |||
| 48 | unsigned long mount_timeout; /* jiffies */ | 48 | unsigned long mount_timeout; /* jiffies */ |
| 49 | unsigned long osd_idle_ttl; /* jiffies */ | 49 | unsigned long osd_idle_ttl; /* jiffies */ |
| 50 | unsigned long osd_keepalive_timeout; /* jiffies */ | 50 | unsigned long osd_keepalive_timeout; /* jiffies */ |
| 51 | unsigned long osd_request_timeout; /* jiffies */ | ||
| 51 | 52 | ||
| 52 | /* | 53 | /* |
| 53 | * any type that can't be simply compared or doesn't need need | 54 | * any type that can't be simply compared or doesn't need need |
| @@ -68,6 +69,7 @@ struct ceph_options { | |||
| 68 | #define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000) | 69 | #define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000) |
| 69 | #define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) | 70 | #define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) |
| 70 | #define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) | 71 | #define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) |
| 72 | #define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0 /* no timeout */ | ||
| 71 | 73 | ||
| 72 | #define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000) | 74 | #define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000) |
| 73 | #define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000) | 75 | #define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000) |
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 2ea0c282f3dc..c125b5d9e13c 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h | |||
| @@ -189,6 +189,7 @@ struct ceph_osd_request { | |||
| 189 | 189 | ||
| 190 | /* internal */ | 190 | /* internal */ |
| 191 | unsigned long r_stamp; /* jiffies, send or check time */ | 191 | unsigned long r_stamp; /* jiffies, send or check time */ |
| 192 | unsigned long r_start_stamp; /* jiffies */ | ||
| 192 | int r_attempts; | 193 | int r_attempts; |
| 193 | struct ceph_eversion r_replay_version; /* aka reassert_version */ | 194 | struct ceph_eversion r_replay_version; /* aka reassert_version */ |
| 194 | u32 r_last_force_resend; | 195 | u32 r_last_force_resend; |
diff --git a/include/linux/fs.h b/include/linux/fs.h index aad3fd0ff5f8..7251f7bb45e8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -2678,7 +2678,7 @@ static const char * const kernel_read_file_str[] = { | |||
| 2678 | 2678 | ||
| 2679 | static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id) | 2679 | static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id) |
| 2680 | { | 2680 | { |
| 2681 | if (id < 0 || id >= READING_MAX_ID) | 2681 | if ((unsigned)id >= READING_MAX_ID) |
| 2682 | return kernel_read_file_str[READING_UNKNOWN]; | 2682 | return kernel_read_file_str[READING_UNKNOWN]; |
| 2683 | 2683 | ||
| 2684 | return kernel_read_file_str[id]; | 2684 | return kernel_read_file_str[id]; |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index a999d281a2f1..76f39754e7b0 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
| @@ -167,13 +167,6 @@ struct blk_integrity { | |||
| 167 | }; | 167 | }; |
| 168 | 168 | ||
| 169 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 169 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
| 170 | struct disk_devt { | ||
| 171 | atomic_t count; | ||
| 172 | void (*release)(struct disk_devt *disk_devt); | ||
| 173 | }; | ||
| 174 | |||
| 175 | void put_disk_devt(struct disk_devt *disk_devt); | ||
| 176 | void get_disk_devt(struct disk_devt *disk_devt); | ||
| 177 | 170 | ||
| 178 | struct gendisk { | 171 | struct gendisk { |
| 179 | /* major, first_minor and minors are input parameters only, | 172 | /* major, first_minor and minors are input parameters only, |
| @@ -183,7 +176,6 @@ struct gendisk { | |||
| 183 | int first_minor; | 176 | int first_minor; |
| 184 | int minors; /* maximum number of minors, =1 for | 177 | int minors; /* maximum number of minors, =1 for |
| 185 | * disks that can't be partitioned. */ | 178 | * disks that can't be partitioned. */ |
| 186 | struct disk_devt *disk_devt; | ||
| 187 | 179 | ||
| 188 | char disk_name[DISK_NAME_LEN]; /* name of major driver */ | 180 | char disk_name[DISK_NAME_LEN]; /* name of major driver */ |
| 189 | char *(*devnode)(struct gendisk *gd, umode_t *mode); | 181 | char *(*devnode)(struct gendisk *gd, umode_t *mode); |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 503099d8aada..b857fc8cc2ec 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
| @@ -122,7 +122,7 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, | |||
| 122 | struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, | 122 | struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, |
| 123 | pud_t *pud, int flags); | 123 | pud_t *pud, int flags); |
| 124 | int pmd_huge(pmd_t pmd); | 124 | int pmd_huge(pmd_t pmd); |
| 125 | int pud_huge(pud_t pmd); | 125 | int pud_huge(pud_t pud); |
| 126 | unsigned long hugetlb_change_protection(struct vm_area_struct *vma, | 126 | unsigned long hugetlb_change_protection(struct vm_area_struct *vma, |
| 127 | unsigned long address, unsigned long end, pgprot_t newprot); | 127 | unsigned long address, unsigned long end, pgprot_t newprot); |
| 128 | 128 | ||
| @@ -197,6 +197,9 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb, | |||
| 197 | #ifndef pgd_huge | 197 | #ifndef pgd_huge |
| 198 | #define pgd_huge(x) 0 | 198 | #define pgd_huge(x) 0 |
| 199 | #endif | 199 | #endif |
| 200 | #ifndef p4d_huge | ||
| 201 | #define p4d_huge(x) 0 | ||
| 202 | #endif | ||
| 200 | 203 | ||
| 201 | #ifndef pgd_write | 204 | #ifndef pgd_write |
| 202 | static inline int pgd_write(pgd_t pgd) | 205 | static inline int pgd_write(pgd_t pgd) |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 672cfef72fc8..97cbca19430d 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
| @@ -373,6 +373,8 @@ | |||
| 373 | #define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT) | 373 | #define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT) |
| 374 | #define ICC_IGRPEN1_EL1_SHIFT 0 | 374 | #define ICC_IGRPEN1_EL1_SHIFT 0 |
| 375 | #define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT) | 375 | #define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT) |
| 376 | #define ICC_SRE_EL1_DIB (1U << 2) | ||
| 377 | #define ICC_SRE_EL1_DFB (1U << 1) | ||
| 376 | #define ICC_SRE_EL1_SRE (1U << 0) | 378 | #define ICC_SRE_EL1_SRE (1U << 0) |
| 377 | 379 | ||
| 378 | /* | 380 | /* |
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 188eced6813e..9f3616085423 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
| @@ -524,6 +524,10 @@ static inline struct irq_domain *irq_find_matching_fwnode( | |||
| 524 | { | 524 | { |
| 525 | return NULL; | 525 | return NULL; |
| 526 | } | 526 | } |
| 527 | static inline bool irq_domain_check_msi_remap(void) | ||
| 528 | { | ||
| 529 | return false; | ||
| 530 | } | ||
| 527 | #endif /* !CONFIG_IRQ_DOMAIN */ | 531 | #endif /* !CONFIG_IRQ_DOMAIN */ |
| 528 | 532 | ||
| 529 | #endif /* _LINUX_IRQDOMAIN_H */ | 533 | #endif /* _LINUX_IRQDOMAIN_H */ |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 8e06d758ee48..2afd74b9d844 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
| @@ -90,6 +90,13 @@ extern bool static_key_initialized; | |||
| 90 | struct static_key { | 90 | struct static_key { |
| 91 | atomic_t enabled; | 91 | atomic_t enabled; |
| 92 | /* | 92 | /* |
| 93 | * Note: | ||
| 94 | * To make anonymous unions work with old compilers, the static | ||
| 95 | * initialization of them requires brackets. This creates a dependency | ||
| 96 | * on the order of the struct with the initializers. If any fields | ||
| 97 | * are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need | ||
| 98 | * to be modified. | ||
| 99 | * | ||
| 93 | * bit 0 => 1 if key is initially true | 100 | * bit 0 => 1 if key is initially true |
| 94 | * 0 if initially false | 101 | * 0 if initially false |
| 95 | * bit 1 => 1 if points to struct static_key_mod | 102 | * bit 1 => 1 if points to struct static_key_mod |
| @@ -166,10 +173,10 @@ extern void static_key_disable(struct static_key *key); | |||
| 166 | */ | 173 | */ |
| 167 | #define STATIC_KEY_INIT_TRUE \ | 174 | #define STATIC_KEY_INIT_TRUE \ |
| 168 | { .enabled = { 1 }, \ | 175 | { .enabled = { 1 }, \ |
| 169 | .entries = (void *)JUMP_TYPE_TRUE } | 176 | { .entries = (void *)JUMP_TYPE_TRUE } } |
| 170 | #define STATIC_KEY_INIT_FALSE \ | 177 | #define STATIC_KEY_INIT_FALSE \ |
| 171 | { .enabled = { 0 }, \ | 178 | { .enabled = { 0 }, \ |
| 172 | .entries = (void *)JUMP_TYPE_FALSE } | 179 | { .entries = (void *)JUMP_TYPE_FALSE } } |
| 173 | 180 | ||
| 174 | #else /* !HAVE_JUMP_LABEL */ | 181 | #else /* !HAVE_JUMP_LABEL */ |
| 175 | 182 | ||
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index ceb3fe78a0d3..1c823bef4c15 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h | |||
| @@ -18,6 +18,7 @@ extern unsigned char kasan_zero_page[PAGE_SIZE]; | |||
| 18 | extern pte_t kasan_zero_pte[PTRS_PER_PTE]; | 18 | extern pte_t kasan_zero_pte[PTRS_PER_PTE]; |
| 19 | extern pmd_t kasan_zero_pmd[PTRS_PER_PMD]; | 19 | extern pmd_t kasan_zero_pmd[PTRS_PER_PMD]; |
| 20 | extern pud_t kasan_zero_pud[PTRS_PER_PUD]; | 20 | extern pud_t kasan_zero_pud[PTRS_PER_PUD]; |
| 21 | extern p4d_t kasan_zero_p4d[PTRS_PER_P4D]; | ||
| 21 | 22 | ||
| 22 | void kasan_populate_zero_shadow(const void *shadow_start, | 23 | void kasan_populate_zero_shadow(const void *shadow_start, |
| 23 | const void *shadow_end); | 24 | const void *shadow_end); |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 0d65dd72c0f4..5f01c88f0800 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -1560,14 +1560,24 @@ static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, | |||
| 1560 | return ptep; | 1560 | return ptep; |
| 1561 | } | 1561 | } |
| 1562 | 1562 | ||
| 1563 | #ifdef __PAGETABLE_P4D_FOLDED | ||
| 1564 | static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, | ||
| 1565 | unsigned long address) | ||
| 1566 | { | ||
| 1567 | return 0; | ||
| 1568 | } | ||
| 1569 | #else | ||
| 1570 | int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); | ||
| 1571 | #endif | ||
| 1572 | |||
| 1563 | #ifdef __PAGETABLE_PUD_FOLDED | 1573 | #ifdef __PAGETABLE_PUD_FOLDED |
| 1564 | static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, | 1574 | static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, |
| 1565 | unsigned long address) | 1575 | unsigned long address) |
| 1566 | { | 1576 | { |
| 1567 | return 0; | 1577 | return 0; |
| 1568 | } | 1578 | } |
| 1569 | #else | 1579 | #else |
| 1570 | int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); | 1580 | int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); |
| 1571 | #endif | 1581 | #endif |
| 1572 | 1582 | ||
| 1573 | #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) | 1583 | #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) |
| @@ -1619,11 +1629,22 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); | |||
| 1619 | * Remove it when 4level-fixup.h has been removed. | 1629 | * Remove it when 4level-fixup.h has been removed. |
| 1620 | */ | 1630 | */ |
| 1621 | #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) | 1631 | #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) |
| 1622 | static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) | 1632 | |
| 1633 | #ifndef __ARCH_HAS_5LEVEL_HACK | ||
| 1634 | static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, | ||
| 1635 | unsigned long address) | ||
| 1636 | { | ||
| 1637 | return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? | ||
| 1638 | NULL : p4d_offset(pgd, address); | ||
| 1639 | } | ||
| 1640 | |||
| 1641 | static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, | ||
| 1642 | unsigned long address) | ||
| 1623 | { | 1643 | { |
| 1624 | return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))? | 1644 | return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? |
| 1625 | NULL: pud_offset(pgd, address); | 1645 | NULL : pud_offset(p4d, address); |
| 1626 | } | 1646 | } |
| 1647 | #endif /* !__ARCH_HAS_5LEVEL_HACK */ | ||
| 1627 | 1648 | ||
| 1628 | static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) | 1649 | static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) |
| 1629 | { | 1650 | { |
| @@ -2385,7 +2406,8 @@ void sparse_mem_maps_populate_node(struct page **map_map, | |||
| 2385 | 2406 | ||
| 2386 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid); | 2407 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid); |
| 2387 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); | 2408 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); |
| 2388 | pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); | 2409 | p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); |
| 2410 | pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); | ||
| 2389 | pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); | 2411 | pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); |
| 2390 | pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); | 2412 | pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); |
| 2391 | void *vmemmap_alloc_block(unsigned long size, int node); | 2413 | void *vmemmap_alloc_block(unsigned long size, int node); |
diff --git a/include/linux/purgatory.h b/include/linux/purgatory.h new file mode 100644 index 000000000000..d60d4e278609 --- /dev/null +++ b/include/linux/purgatory.h | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | #ifndef _LINUX_PURGATORY_H | ||
| 2 | #define _LINUX_PURGATORY_H | ||
| 3 | |||
| 4 | #include <linux/types.h> | ||
| 5 | #include <crypto/sha.h> | ||
| 6 | #include <uapi/linux/kexec.h> | ||
| 7 | |||
| 8 | struct kexec_sha_region { | ||
| 9 | unsigned long start; | ||
| 10 | unsigned long len; | ||
| 11 | }; | ||
| 12 | |||
| 13 | /* | ||
| 14 | * These forward declarations serve two purposes: | ||
| 15 | * | ||
| 16 | * 1) Make sparse happy when checking arch/purgatory | ||
| 17 | * 2) Document that these are required to be global so the symbol | ||
| 18 | * lookup in kexec works | ||
| 19 | */ | ||
| 20 | extern struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX]; | ||
| 21 | extern u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE]; | ||
| 22 | |||
| 23 | #endif | ||
diff --git a/include/linux/random.h b/include/linux/random.h index 7bd2403e4fef..ed5c3838780d 100644 --- a/include/linux/random.h +++ b/include/linux/random.h | |||
| @@ -37,14 +37,26 @@ extern void get_random_bytes(void *buf, int nbytes); | |||
| 37 | extern int add_random_ready_callback(struct random_ready_callback *rdy); | 37 | extern int add_random_ready_callback(struct random_ready_callback *rdy); |
| 38 | extern void del_random_ready_callback(struct random_ready_callback *rdy); | 38 | extern void del_random_ready_callback(struct random_ready_callback *rdy); |
| 39 | extern void get_random_bytes_arch(void *buf, int nbytes); | 39 | extern void get_random_bytes_arch(void *buf, int nbytes); |
| 40 | extern int random_int_secret_init(void); | ||
| 41 | 40 | ||
| 42 | #ifndef MODULE | 41 | #ifndef MODULE |
| 43 | extern const struct file_operations random_fops, urandom_fops; | 42 | extern const struct file_operations random_fops, urandom_fops; |
| 44 | #endif | 43 | #endif |
| 45 | 44 | ||
| 46 | unsigned int get_random_int(void); | 45 | u32 get_random_u32(void); |
| 47 | unsigned long get_random_long(void); | 46 | u64 get_random_u64(void); |
| 47 | static inline unsigned int get_random_int(void) | ||
| 48 | { | ||
| 49 | return get_random_u32(); | ||
| 50 | } | ||
| 51 | static inline unsigned long get_random_long(void) | ||
| 52 | { | ||
| 53 | #if BITS_PER_LONG == 64 | ||
| 54 | return get_random_u64(); | ||
| 55 | #else | ||
| 56 | return get_random_u32(); | ||
| 57 | #endif | ||
| 58 | } | ||
| 59 | |||
| 48 | unsigned long randomize_page(unsigned long start, unsigned long range); | 60 | unsigned long randomize_page(unsigned long start, unsigned long range); |
| 49 | 61 | ||
| 50 | u32 prandom_u32(void); | 62 | u32 prandom_u32(void); |
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index ad3e5158e586..c9f795e9a2ee 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h | |||
| @@ -65,7 +65,7 @@ struct regulator_state { | |||
| 65 | int uV; /* suspend voltage */ | 65 | int uV; /* suspend voltage */ |
| 66 | unsigned int mode; /* suspend regulator operating mode */ | 66 | unsigned int mode; /* suspend regulator operating mode */ |
| 67 | int enabled; /* is regulator enabled in this suspend state */ | 67 | int enabled; /* is regulator enabled in this suspend state */ |
| 68 | int disabled; /* is the regulator disbled in this suspend state */ | 68 | int disabled; /* is the regulator disabled in this suspend state */ |
| 69 | }; | 69 | }; |
| 70 | 70 | ||
| 71 | /** | 71 | /** |
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index be765234c0a2..32354b4b4b2b 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h | |||
| @@ -72,7 +72,7 @@ struct ucounts { | |||
| 72 | struct hlist_node node; | 72 | struct hlist_node node; |
| 73 | struct user_namespace *ns; | 73 | struct user_namespace *ns; |
| 74 | kuid_t uid; | 74 | kuid_t uid; |
| 75 | atomic_t count; | 75 | int count; |
| 76 | atomic_t ucount[UCOUNT_COUNTS]; | 76 | atomic_t ucount[UCOUNT_COUNTS]; |
| 77 | }; | 77 | }; |
| 78 | 78 | ||
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 0468548acebf..48a3483dccb1 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h | |||
| @@ -61,8 +61,7 @@ extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *, | |||
| 61 | unsigned long from, unsigned long to, | 61 | unsigned long from, unsigned long to, |
| 62 | unsigned long len); | 62 | unsigned long len); |
| 63 | 63 | ||
| 64 | extern void userfaultfd_remove(struct vm_area_struct *vma, | 64 | extern bool userfaultfd_remove(struct vm_area_struct *vma, |
| 65 | struct vm_area_struct **prev, | ||
| 66 | unsigned long start, | 65 | unsigned long start, |
| 67 | unsigned long end); | 66 | unsigned long end); |
| 68 | 67 | ||
| @@ -72,8 +71,6 @@ extern int userfaultfd_unmap_prep(struct vm_area_struct *vma, | |||
| 72 | extern void userfaultfd_unmap_complete(struct mm_struct *mm, | 71 | extern void userfaultfd_unmap_complete(struct mm_struct *mm, |
| 73 | struct list_head *uf); | 72 | struct list_head *uf); |
| 74 | 73 | ||
| 75 | extern void userfaultfd_exit(struct mm_struct *mm); | ||
| 76 | |||
| 77 | #else /* CONFIG_USERFAULTFD */ | 74 | #else /* CONFIG_USERFAULTFD */ |
| 78 | 75 | ||
| 79 | /* mm helpers */ | 76 | /* mm helpers */ |
| @@ -120,11 +117,11 @@ static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx, | |||
| 120 | { | 117 | { |
| 121 | } | 118 | } |
| 122 | 119 | ||
| 123 | static inline void userfaultfd_remove(struct vm_area_struct *vma, | 120 | static inline bool userfaultfd_remove(struct vm_area_struct *vma, |
| 124 | struct vm_area_struct **prev, | ||
| 125 | unsigned long start, | 121 | unsigned long start, |
| 126 | unsigned long end) | 122 | unsigned long end) |
| 127 | { | 123 | { |
| 124 | return true; | ||
| 128 | } | 125 | } |
| 129 | 126 | ||
| 130 | static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, | 127 | static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, |
| @@ -139,10 +136,6 @@ static inline void userfaultfd_unmap_complete(struct mm_struct *mm, | |||
| 139 | { | 136 | { |
| 140 | } | 137 | } |
| 141 | 138 | ||
| 142 | static inline void userfaultfd_exit(struct mm_struct *mm) | ||
| 143 | { | ||
| 144 | } | ||
| 145 | |||
| 146 | #endif /* CONFIG_USERFAULTFD */ | 139 | #endif /* CONFIG_USERFAULTFD */ |
| 147 | 140 | ||
| 148 | #endif /* _LINUX_USERFAULTFD_K_H */ | 141 | #endif /* _LINUX_USERFAULTFD_K_H */ |
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 6aa1b6cb5828..a80b7b59cf33 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h | |||
| @@ -79,6 +79,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
| 79 | THP_SPLIT_PAGE_FAILED, | 79 | THP_SPLIT_PAGE_FAILED, |
| 80 | THP_DEFERRED_SPLIT_PAGE, | 80 | THP_DEFERRED_SPLIT_PAGE, |
| 81 | THP_SPLIT_PMD, | 81 | THP_SPLIT_PMD, |
| 82 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | ||
| 83 | THP_SPLIT_PUD, | ||
| 84 | #endif | ||
| 82 | THP_ZERO_PAGE_ALLOC, | 85 | THP_ZERO_PAGE_ALLOC, |
| 83 | THP_ZERO_PAGE_ALLOC_FAILED, | 86 | THP_ZERO_PAGE_ALLOC_FAILED, |
| 84 | #endif | 87 | #endif |
diff --git a/include/linux/wait.h b/include/linux/wait.h index aacb1282d19a..db076ca7f11d 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
| @@ -620,30 +620,19 @@ do { \ | |||
| 620 | __ret; \ | 620 | __ret; \ |
| 621 | }) | 621 | }) |
| 622 | 622 | ||
| 623 | extern int do_wait_intr(wait_queue_head_t *, wait_queue_t *); | ||
| 624 | extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *); | ||
| 623 | 625 | ||
| 624 | #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ | 626 | #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \ |
| 625 | ({ \ | 627 | ({ \ |
| 626 | int __ret = 0; \ | 628 | int __ret; \ |
| 627 | DEFINE_WAIT(__wait); \ | 629 | DEFINE_WAIT(__wait); \ |
| 628 | if (exclusive) \ | 630 | if (exclusive) \ |
| 629 | __wait.flags |= WQ_FLAG_EXCLUSIVE; \ | 631 | __wait.flags |= WQ_FLAG_EXCLUSIVE; \ |
| 630 | do { \ | 632 | do { \ |
| 631 | if (likely(list_empty(&__wait.task_list))) \ | 633 | __ret = fn(&(wq), &__wait); \ |
| 632 | __add_wait_queue_tail(&(wq), &__wait); \ | 634 | if (__ret) \ |
| 633 | set_current_state(TASK_INTERRUPTIBLE); \ | ||
| 634 | if (signal_pending(current)) { \ | ||
| 635 | __ret = -ERESTARTSYS; \ | ||
| 636 | break; \ | 635 | break; \ |
| 637 | } \ | ||
| 638 | if (irq) \ | ||
| 639 | spin_unlock_irq(&(wq).lock); \ | ||
| 640 | else \ | ||
| 641 | spin_unlock(&(wq).lock); \ | ||
| 642 | schedule(); \ | ||
| 643 | if (irq) \ | ||
| 644 | spin_lock_irq(&(wq).lock); \ | ||
| 645 | else \ | ||
| 646 | spin_lock(&(wq).lock); \ | ||
| 647 | } while (!(condition)); \ | 636 | } while (!(condition)); \ |
| 648 | __remove_wait_queue(&(wq), &__wait); \ | 637 | __remove_wait_queue(&(wq), &__wait); \ |
| 649 | __set_current_state(TASK_RUNNING); \ | 638 | __set_current_state(TASK_RUNNING); \ |
| @@ -676,7 +665,7 @@ do { \ | |||
| 676 | */ | 665 | */ |
| 677 | #define wait_event_interruptible_locked(wq, condition) \ | 666 | #define wait_event_interruptible_locked(wq, condition) \ |
| 678 | ((condition) \ | 667 | ((condition) \ |
| 679 | ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0)) | 668 | ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr)) |
| 680 | 669 | ||
| 681 | /** | 670 | /** |
| 682 | * wait_event_interruptible_locked_irq - sleep until a condition gets true | 671 | * wait_event_interruptible_locked_irq - sleep until a condition gets true |
| @@ -703,7 +692,7 @@ do { \ | |||
| 703 | */ | 692 | */ |
| 704 | #define wait_event_interruptible_locked_irq(wq, condition) \ | 693 | #define wait_event_interruptible_locked_irq(wq, condition) \ |
| 705 | ((condition) \ | 694 | ((condition) \ |
| 706 | ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1)) | 695 | ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq)) |
| 707 | 696 | ||
| 708 | /** | 697 | /** |
| 709 | * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true | 698 | * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true |
| @@ -734,7 +723,7 @@ do { \ | |||
| 734 | */ | 723 | */ |
| 735 | #define wait_event_interruptible_exclusive_locked(wq, condition) \ | 724 | #define wait_event_interruptible_exclusive_locked(wq, condition) \ |
| 736 | ((condition) \ | 725 | ((condition) \ |
| 737 | ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0)) | 726 | ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr)) |
| 738 | 727 | ||
| 739 | /** | 728 | /** |
| 740 | * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true | 729 | * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true |
| @@ -765,7 +754,7 @@ do { \ | |||
| 765 | */ | 754 | */ |
| 766 | #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ | 755 | #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ |
| 767 | ((condition) \ | 756 | ((condition) \ |
| 768 | ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) | 757 | ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq)) |
| 769 | 758 | ||
| 770 | 759 | ||
| 771 | #define __wait_event_killable(wq, condition) \ | 760 | #define __wait_event_killable(wq, condition) \ |
diff --git a/include/media/vsp1.h b/include/media/vsp1.h index 458b400373d4..38aac554dbba 100644 --- a/include/media/vsp1.h +++ b/include/media/vsp1.h | |||
| @@ -20,8 +20,17 @@ struct device; | |||
| 20 | 20 | ||
| 21 | int vsp1_du_init(struct device *dev); | 21 | int vsp1_du_init(struct device *dev); |
| 22 | 22 | ||
| 23 | int vsp1_du_setup_lif(struct device *dev, unsigned int width, | 23 | /** |
| 24 | unsigned int height); | 24 | * struct vsp1_du_lif_config - VSP LIF configuration |
| 25 | * @width: output frame width | ||
| 26 | * @height: output frame height | ||
| 27 | */ | ||
| 28 | struct vsp1_du_lif_config { | ||
| 29 | unsigned int width; | ||
| 30 | unsigned int height; | ||
| 31 | }; | ||
| 32 | |||
| 33 | int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg); | ||
| 25 | 34 | ||
| 26 | struct vsp1_du_atomic_config { | 35 | struct vsp1_du_atomic_config { |
| 27 | u32 pixelformat; | 36 | u32 pixelformat; |
diff --git a/include/net/irda/timer.h b/include/net/irda/timer.h index cb2615ccf761..d784f242cf7b 100644 --- a/include/net/irda/timer.h +++ b/include/net/irda/timer.h | |||
| @@ -59,7 +59,7 @@ struct lap_cb; | |||
| 59 | * Slot timer must never exceed 85 ms, and must always be at least 25 ms, | 59 | * Slot timer must never exceed 85 ms, and must always be at least 25 ms, |
| 60 | * suggested to 75-85 msec by IrDA lite. This doesn't work with a lot of | 60 | * suggested to 75-85 msec by IrDA lite. This doesn't work with a lot of |
| 61 | * devices, and other stackes uses a lot more, so it's best we do it as well | 61 | * devices, and other stackes uses a lot more, so it's best we do it as well |
| 62 | * (Note : this is the default value and sysctl overides it - Jean II) | 62 | * (Note : this is the default value and sysctl overrides it - Jean II) |
| 63 | */ | 63 | */ |
| 64 | #define SLOT_TIMEOUT (90*HZ/1000) | 64 | #define SLOT_TIMEOUT (90*HZ/1000) |
| 65 | 65 | ||
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h index 14e49c798135..b35533b94277 100644 --- a/include/trace/events/syscalls.h +++ b/include/trace/events/syscalls.h | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | #undef TRACE_SYSTEM | 1 | #undef TRACE_SYSTEM |
| 2 | #define TRACE_SYSTEM raw_syscalls | 2 | #define TRACE_SYSTEM raw_syscalls |
| 3 | #undef TRACE_INCLUDE_FILE | ||
| 3 | #define TRACE_INCLUDE_FILE syscalls | 4 | #define TRACE_INCLUDE_FILE syscalls |
| 4 | 5 | ||
| 5 | #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) | 6 | #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) |
diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h index 407cb55df6ac..7fb97863c945 100644 --- a/include/uapi/drm/omap_drm.h +++ b/include/uapi/drm/omap_drm.h | |||
| @@ -33,8 +33,8 @@ extern "C" { | |||
| 33 | #define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */ | 33 | #define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */ |
| 34 | 34 | ||
| 35 | struct drm_omap_param { | 35 | struct drm_omap_param { |
| 36 | uint64_t param; /* in */ | 36 | __u64 param; /* in */ |
| 37 | uint64_t value; /* in (set_param), out (get_param) */ | 37 | __u64 value; /* in (set_param), out (get_param) */ |
| 38 | }; | 38 | }; |
| 39 | 39 | ||
| 40 | #define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */ | 40 | #define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */ |
| @@ -53,18 +53,18 @@ struct drm_omap_param { | |||
| 53 | #define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32) | 53 | #define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32) |
| 54 | 54 | ||
| 55 | union omap_gem_size { | 55 | union omap_gem_size { |
| 56 | uint32_t bytes; /* (for non-tiled formats) */ | 56 | __u32 bytes; /* (for non-tiled formats) */ |
| 57 | struct { | 57 | struct { |
| 58 | uint16_t width; | 58 | __u16 width; |
| 59 | uint16_t height; | 59 | __u16 height; |
| 60 | } tiled; /* (for tiled formats) */ | 60 | } tiled; /* (for tiled formats) */ |
| 61 | }; | 61 | }; |
| 62 | 62 | ||
| 63 | struct drm_omap_gem_new { | 63 | struct drm_omap_gem_new { |
| 64 | union omap_gem_size size; /* in */ | 64 | union omap_gem_size size; /* in */ |
| 65 | uint32_t flags; /* in */ | 65 | __u32 flags; /* in */ |
| 66 | uint32_t handle; /* out */ | 66 | __u32 handle; /* out */ |
| 67 | uint32_t __pad; | 67 | __u32 __pad; |
| 68 | }; | 68 | }; |
| 69 | 69 | ||
| 70 | /* mask of operations: */ | 70 | /* mask of operations: */ |
| @@ -74,33 +74,33 @@ enum omap_gem_op { | |||
| 74 | }; | 74 | }; |
| 75 | 75 | ||
| 76 | struct drm_omap_gem_cpu_prep { | 76 | struct drm_omap_gem_cpu_prep { |
| 77 | uint32_t handle; /* buffer handle (in) */ | 77 | __u32 handle; /* buffer handle (in) */ |
| 78 | uint32_t op; /* mask of omap_gem_op (in) */ | 78 | __u32 op; /* mask of omap_gem_op (in) */ |
| 79 | }; | 79 | }; |
| 80 | 80 | ||
| 81 | struct drm_omap_gem_cpu_fini { | 81 | struct drm_omap_gem_cpu_fini { |
| 82 | uint32_t handle; /* buffer handle (in) */ | 82 | __u32 handle; /* buffer handle (in) */ |
| 83 | uint32_t op; /* mask of omap_gem_op (in) */ | 83 | __u32 op; /* mask of omap_gem_op (in) */ |
| 84 | /* TODO maybe here we pass down info about what regions are touched | 84 | /* TODO maybe here we pass down info about what regions are touched |
| 85 | * by sw so we can be clever about cache ops? For now a placeholder, | 85 | * by sw so we can be clever about cache ops? For now a placeholder, |
| 86 | * set to zero and we just do full buffer flush.. | 86 | * set to zero and we just do full buffer flush.. |
| 87 | */ | 87 | */ |
| 88 | uint32_t nregions; | 88 | __u32 nregions; |
| 89 | uint32_t __pad; | 89 | __u32 __pad; |
| 90 | }; | 90 | }; |
| 91 | 91 | ||
| 92 | struct drm_omap_gem_info { | 92 | struct drm_omap_gem_info { |
| 93 | uint32_t handle; /* buffer handle (in) */ | 93 | __u32 handle; /* buffer handle (in) */ |
| 94 | uint32_t pad; | 94 | __u32 pad; |
| 95 | uint64_t offset; /* mmap offset (out) */ | 95 | __u64 offset; /* mmap offset (out) */ |
| 96 | /* note: in case of tiled buffers, the user virtual size can be | 96 | /* note: in case of tiled buffers, the user virtual size can be |
| 97 | * different from the physical size (ie. how many pages are needed | 97 | * different from the physical size (ie. how many pages are needed |
| 98 | * to back the object) which is returned in DRM_IOCTL_GEM_OPEN.. | 98 | * to back the object) which is returned in DRM_IOCTL_GEM_OPEN.. |
| 99 | * This size here is the one that should be used if you want to | 99 | * This size here is the one that should be used if you want to |
| 100 | * mmap() the buffer: | 100 | * mmap() the buffer: |
| 101 | */ | 101 | */ |
| 102 | uint32_t size; /* virtual size for mmap'ing (out) */ | 102 | __u32 size; /* virtual size for mmap'ing (out) */ |
| 103 | uint32_t __pad; | 103 | __u32 __pad; |
| 104 | }; | 104 | }; |
| 105 | 105 | ||
| 106 | #define DRM_OMAP_GET_PARAM 0x00 | 106 | #define DRM_OMAP_GET_PARAM 0x00 |
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index c055947c5c98..3b059530dac9 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h | |||
| @@ -18,8 +18,7 @@ | |||
| 18 | * means the userland is reading). | 18 | * means the userland is reading). |
| 19 | */ | 19 | */ |
| 20 | #define UFFD_API ((__u64)0xAA) | 20 | #define UFFD_API ((__u64)0xAA) |
| 21 | #define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_EXIT | \ | 21 | #define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_FORK | \ |
| 22 | UFFD_FEATURE_EVENT_FORK | \ | ||
| 23 | UFFD_FEATURE_EVENT_REMAP | \ | 22 | UFFD_FEATURE_EVENT_REMAP | \ |
| 24 | UFFD_FEATURE_EVENT_REMOVE | \ | 23 | UFFD_FEATURE_EVENT_REMOVE | \ |
| 25 | UFFD_FEATURE_EVENT_UNMAP | \ | 24 | UFFD_FEATURE_EVENT_UNMAP | \ |
| @@ -113,7 +112,6 @@ struct uffd_msg { | |||
| 113 | #define UFFD_EVENT_REMAP 0x14 | 112 | #define UFFD_EVENT_REMAP 0x14 |
| 114 | #define UFFD_EVENT_REMOVE 0x15 | 113 | #define UFFD_EVENT_REMOVE 0x15 |
| 115 | #define UFFD_EVENT_UNMAP 0x16 | 114 | #define UFFD_EVENT_UNMAP 0x16 |
| 116 | #define UFFD_EVENT_EXIT 0x17 | ||
| 117 | 115 | ||
| 118 | /* flags for UFFD_EVENT_PAGEFAULT */ | 116 | /* flags for UFFD_EVENT_PAGEFAULT */ |
| 119 | #define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */ | 117 | #define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */ |
| @@ -163,7 +161,6 @@ struct uffdio_api { | |||
| 163 | #define UFFD_FEATURE_MISSING_HUGETLBFS (1<<4) | 161 | #define UFFD_FEATURE_MISSING_HUGETLBFS (1<<4) |
| 164 | #define UFFD_FEATURE_MISSING_SHMEM (1<<5) | 162 | #define UFFD_FEATURE_MISSING_SHMEM (1<<5) |
| 165 | #define UFFD_FEATURE_EVENT_UNMAP (1<<6) | 163 | #define UFFD_FEATURE_EVENT_UNMAP (1<<6) |
| 166 | #define UFFD_FEATURE_EVENT_EXIT (1<<7) | ||
| 167 | __u64 features; | 164 | __u64 features; |
| 168 | 165 | ||
| 169 | __u64 ioctls; | 166 | __u64 ioctls; |
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h index a0083be5d529..1f6d78f044b6 100644 --- a/include/xen/swiotlb-xen.h +++ b/include/xen/swiotlb-xen.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #define __LINUX_SWIOTLB_XEN_H | 2 | #define __LINUX_SWIOTLB_XEN_H |
| 3 | 3 | ||
| 4 | #include <linux/dma-direction.h> | 4 | #include <linux/dma-direction.h> |
| 5 | #include <linux/scatterlist.h> | ||
| 5 | #include <linux/swiotlb.h> | 6 | #include <linux/swiotlb.h> |
| 6 | 7 | ||
| 7 | extern int xen_swiotlb_init(int verbose, bool early); | 8 | extern int xen_swiotlb_init(int verbose, bool early); |
| @@ -55,4 +56,14 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask); | |||
| 55 | 56 | ||
| 56 | extern int | 57 | extern int |
| 57 | xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask); | 58 | xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask); |
| 59 | |||
| 60 | extern int | ||
| 61 | xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, | ||
| 62 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
| 63 | unsigned long attrs); | ||
| 64 | |||
| 65 | extern int | ||
| 66 | xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
| 67 | void *cpu_addr, dma_addr_t handle, size_t size, | ||
| 68 | unsigned long attrs); | ||
| 58 | #endif /* __LINUX_SWIOTLB_XEN_H */ | 69 | #endif /* __LINUX_SWIOTLB_XEN_H */ |
diff --git a/init/main.c b/init/main.c index eae2f15657c6..f9c9d9948203 100644 --- a/init/main.c +++ b/init/main.c | |||
| @@ -882,7 +882,6 @@ static void __init do_basic_setup(void) | |||
| 882 | do_ctors(); | 882 | do_ctors(); |
| 883 | usermodehelper_enable(); | 883 | usermodehelper_enable(); |
| 884 | do_initcalls(); | 884 | do_initcalls(); |
| 885 | random_int_secret_init(); | ||
| 886 | } | 885 | } |
| 887 | 886 | ||
| 888 | static void __init do_pre_smp_initcalls(void) | 887 | static void __init do_pre_smp_initcalls(void) |
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 0125589c7428..48851327a15e 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c | |||
| @@ -2669,7 +2669,7 @@ static bool css_visible(struct cgroup_subsys_state *css) | |||
| 2669 | * | 2669 | * |
| 2670 | * Returns 0 on success, -errno on failure. On failure, csses which have | 2670 | * Returns 0 on success, -errno on failure. On failure, csses which have |
| 2671 | * been processed already aren't cleaned up. The caller is responsible for | 2671 | * been processed already aren't cleaned up. The caller is responsible for |
| 2672 | * cleaning up with cgroup_apply_control_disble(). | 2672 | * cleaning up with cgroup_apply_control_disable(). |
| 2673 | */ | 2673 | */ |
| 2674 | static int cgroup_apply_control_enable(struct cgroup *cgrp) | 2674 | static int cgroup_apply_control_enable(struct cgroup *cgrp) |
| 2675 | { | 2675 | { |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 6f41548f2e32..a17ed56c8ce1 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -998,7 +998,7 @@ list_update_cgroup_event(struct perf_event *event, | |||
| 998 | */ | 998 | */ |
| 999 | #define PERF_CPU_HRTIMER (1000 / HZ) | 999 | #define PERF_CPU_HRTIMER (1000 / HZ) |
| 1000 | /* | 1000 | /* |
| 1001 | * function must be called with interrupts disbled | 1001 | * function must be called with interrupts disabled |
| 1002 | */ | 1002 | */ |
| 1003 | static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) | 1003 | static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) |
| 1004 | { | 1004 | { |
diff --git a/kernel/exit.c b/kernel/exit.c index e126ebf2400c..516acdb0e0ec 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -554,7 +554,6 @@ static void exit_mm(void) | |||
| 554 | enter_lazy_tlb(mm, current); | 554 | enter_lazy_tlb(mm, current); |
| 555 | task_unlock(current); | 555 | task_unlock(current); |
| 556 | mm_update_next_owner(mm); | 556 | mm_update_next_owner(mm); |
| 557 | userfaultfd_exit(mm); | ||
| 558 | mmput(mm); | 557 | mmput(mm); |
| 559 | if (test_thread_flag(TIF_MEMDIE)) | 558 | if (test_thread_flag(TIF_MEMDIE)) |
| 560 | exit_oom_victim(); | 559 | exit_oom_victim(); |
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index b56a558e406d..b118735fea9d 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c | |||
| @@ -614,13 +614,13 @@ static int kexec_calculate_store_digests(struct kimage *image) | |||
| 614 | ret = crypto_shash_final(desc, digest); | 614 | ret = crypto_shash_final(desc, digest); |
| 615 | if (ret) | 615 | if (ret) |
| 616 | goto out_free_digest; | 616 | goto out_free_digest; |
| 617 | ret = kexec_purgatory_get_set_symbol(image, "sha_regions", | 617 | ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha_regions", |
| 618 | sha_regions, sha_region_sz, 0); | 618 | sha_regions, sha_region_sz, 0); |
| 619 | if (ret) | 619 | if (ret) |
| 620 | goto out_free_digest; | 620 | goto out_free_digest; |
| 621 | 621 | ||
| 622 | ret = kexec_purgatory_get_set_symbol(image, "sha256_digest", | 622 | ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha256_digest", |
| 623 | digest, SHA256_DIGEST_SIZE, 0); | 623 | digest, SHA256_DIGEST_SIZE, 0); |
| 624 | if (ret) | 624 | if (ret) |
| 625 | goto out_free_digest; | 625 | goto out_free_digest; |
| 626 | } | 626 | } |
diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h index 4cef7e4706b0..799a8a452187 100644 --- a/kernel/kexec_internal.h +++ b/kernel/kexec_internal.h | |||
| @@ -15,11 +15,7 @@ int kimage_is_destination_range(struct kimage *image, | |||
| 15 | extern struct mutex kexec_mutex; | 15 | extern struct mutex kexec_mutex; |
| 16 | 16 | ||
| 17 | #ifdef CONFIG_KEXEC_FILE | 17 | #ifdef CONFIG_KEXEC_FILE |
| 18 | struct kexec_sha_region { | 18 | #include <linux/purgatory.h> |
| 19 | unsigned long start; | ||
| 20 | unsigned long len; | ||
| 21 | }; | ||
| 22 | |||
| 23 | void kimage_file_post_load_cleanup(struct kimage *image); | 19 | void kimage_file_post_load_cleanup(struct kimage *image); |
| 24 | #else /* CONFIG_KEXEC_FILE */ | 20 | #else /* CONFIG_KEXEC_FILE */ |
| 25 | static inline void kimage_file_post_load_cleanup(struct kimage *image) { } | 21 | static inline void kimage_file_post_load_cleanup(struct kimage *image) { } |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 12e38c213b70..a95e5d1f4a9c 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
| @@ -3262,10 +3262,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 3262 | if (depth) { | 3262 | if (depth) { |
| 3263 | hlock = curr->held_locks + depth - 1; | 3263 | hlock = curr->held_locks + depth - 1; |
| 3264 | if (hlock->class_idx == class_idx && nest_lock) { | 3264 | if (hlock->class_idx == class_idx && nest_lock) { |
| 3265 | if (hlock->references) | 3265 | if (hlock->references) { |
| 3266 | /* | ||
| 3267 | * Check: unsigned int references:12, overflow. | ||
| 3268 | */ | ||
| 3269 | if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1)) | ||
| 3270 | return 0; | ||
| 3271 | |||
| 3266 | hlock->references++; | 3272 | hlock->references++; |
| 3267 | else | 3273 | } else { |
| 3268 | hlock->references = 2; | 3274 | hlock->references = 2; |
| 3275 | } | ||
| 3269 | 3276 | ||
| 3270 | return 1; | 3277 | return 1; |
| 3271 | } | 3278 | } |
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c index da6c9a34f62f..6b7abb334ca6 100644 --- a/kernel/locking/test-ww_mutex.c +++ b/kernel/locking/test-ww_mutex.c | |||
| @@ -50,7 +50,7 @@ static void test_mutex_work(struct work_struct *work) | |||
| 50 | 50 | ||
| 51 | if (mtx->flags & TEST_MTX_TRY) { | 51 | if (mtx->flags & TEST_MTX_TRY) { |
| 52 | while (!ww_mutex_trylock(&mtx->mutex)) | 52 | while (!ww_mutex_trylock(&mtx->mutex)) |
| 53 | cpu_relax(); | 53 | cond_resched(); |
| 54 | } else { | 54 | } else { |
| 55 | ww_mutex_lock(&mtx->mutex, NULL); | 55 | ww_mutex_lock(&mtx->mutex, NULL); |
| 56 | } | 56 | } |
| @@ -88,7 +88,7 @@ static int __test_mutex(unsigned int flags) | |||
| 88 | ret = -EINVAL; | 88 | ret = -EINVAL; |
| 89 | break; | 89 | break; |
| 90 | } | 90 | } |
| 91 | cpu_relax(); | 91 | cond_resched(); |
| 92 | } while (time_before(jiffies, timeout)); | 92 | } while (time_before(jiffies, timeout)); |
| 93 | } else { | 93 | } else { |
| 94 | ret = wait_for_completion_timeout(&mtx.done, TIMEOUT); | 94 | ret = wait_for_completion_timeout(&mtx.done, TIMEOUT); |
| @@ -627,7 +627,7 @@ static int __init test_ww_mutex_init(void) | |||
| 627 | if (ret) | 627 | if (ret) |
| 628 | return ret; | 628 | return ret; |
| 629 | 629 | ||
| 630 | ret = stress(4096, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL); | 630 | ret = stress(4095, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL); |
| 631 | if (ret) | 631 | if (ret) |
| 632 | return ret; | 632 | return ret; |
| 633 | 633 | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 956383844116..3b31fc05a0f1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -3287,10 +3287,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |||
| 3287 | struct task_struct *p; | 3287 | struct task_struct *p; |
| 3288 | 3288 | ||
| 3289 | /* | 3289 | /* |
| 3290 | * Optimization: we know that if all tasks are in | 3290 | * Optimization: we know that if all tasks are in the fair class we can |
| 3291 | * the fair class we can call that function directly: | 3291 | * call that function directly, but only if the @prev task wasn't of a |
| 3292 | * higher scheduling class, because otherwise those loose the | ||
| 3293 | * opportunity to pull in more work from other CPUs. | ||
| 3292 | */ | 3294 | */ |
| 3293 | if (likely(rq->nr_running == rq->cfs.h_nr_running)) { | 3295 | if (likely((prev->sched_class == &idle_sched_class || |
| 3296 | prev->sched_class == &fair_sched_class) && | ||
| 3297 | rq->nr_running == rq->cfs.h_nr_running)) { | ||
| 3298 | |||
| 3294 | p = fair_sched_class.pick_next_task(rq, prev, rf); | 3299 | p = fair_sched_class.pick_next_task(rq, prev, rf); |
| 3295 | if (unlikely(p == RETRY_TASK)) | 3300 | if (unlikely(p == RETRY_TASK)) |
| 3296 | goto again; | 3301 | goto again; |
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 8f8de3d4d6b7..cd7cd489f739 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c | |||
| @@ -36,6 +36,7 @@ struct sugov_policy { | |||
| 36 | u64 last_freq_update_time; | 36 | u64 last_freq_update_time; |
| 37 | s64 freq_update_delay_ns; | 37 | s64 freq_update_delay_ns; |
| 38 | unsigned int next_freq; | 38 | unsigned int next_freq; |
| 39 | unsigned int cached_raw_freq; | ||
| 39 | 40 | ||
| 40 | /* The next fields are only needed if fast switch cannot be used. */ | 41 | /* The next fields are only needed if fast switch cannot be used. */ |
| 41 | struct irq_work irq_work; | 42 | struct irq_work irq_work; |
| @@ -52,7 +53,6 @@ struct sugov_cpu { | |||
| 52 | struct update_util_data update_util; | 53 | struct update_util_data update_util; |
| 53 | struct sugov_policy *sg_policy; | 54 | struct sugov_policy *sg_policy; |
| 54 | 55 | ||
| 55 | unsigned int cached_raw_freq; | ||
| 56 | unsigned long iowait_boost; | 56 | unsigned long iowait_boost; |
| 57 | unsigned long iowait_boost_max; | 57 | unsigned long iowait_boost_max; |
| 58 | u64 last_update; | 58 | u64 last_update; |
| @@ -116,7 +116,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, | |||
| 116 | 116 | ||
| 117 | /** | 117 | /** |
| 118 | * get_next_freq - Compute a new frequency for a given cpufreq policy. | 118 | * get_next_freq - Compute a new frequency for a given cpufreq policy. |
| 119 | * @sg_cpu: schedutil cpu object to compute the new frequency for. | 119 | * @sg_policy: schedutil policy object to compute the new frequency for. |
| 120 | * @util: Current CPU utilization. | 120 | * @util: Current CPU utilization. |
| 121 | * @max: CPU capacity. | 121 | * @max: CPU capacity. |
| 122 | * | 122 | * |
| @@ -136,19 +136,18 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, | |||
| 136 | * next_freq (as calculated above) is returned, subject to policy min/max and | 136 | * next_freq (as calculated above) is returned, subject to policy min/max and |
| 137 | * cpufreq driver limitations. | 137 | * cpufreq driver limitations. |
| 138 | */ | 138 | */ |
| 139 | static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util, | 139 | static unsigned int get_next_freq(struct sugov_policy *sg_policy, |
| 140 | unsigned long max) | 140 | unsigned long util, unsigned long max) |
| 141 | { | 141 | { |
| 142 | struct sugov_policy *sg_policy = sg_cpu->sg_policy; | ||
| 143 | struct cpufreq_policy *policy = sg_policy->policy; | 142 | struct cpufreq_policy *policy = sg_policy->policy; |
| 144 | unsigned int freq = arch_scale_freq_invariant() ? | 143 | unsigned int freq = arch_scale_freq_invariant() ? |
| 145 | policy->cpuinfo.max_freq : policy->cur; | 144 | policy->cpuinfo.max_freq : policy->cur; |
| 146 | 145 | ||
| 147 | freq = (freq + (freq >> 2)) * util / max; | 146 | freq = (freq + (freq >> 2)) * util / max; |
| 148 | 147 | ||
| 149 | if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX) | 148 | if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX) |
| 150 | return sg_policy->next_freq; | 149 | return sg_policy->next_freq; |
| 151 | sg_cpu->cached_raw_freq = freq; | 150 | sg_policy->cached_raw_freq = freq; |
| 152 | return cpufreq_driver_resolve_freq(policy, freq); | 151 | return cpufreq_driver_resolve_freq(policy, freq); |
| 153 | } | 152 | } |
| 154 | 153 | ||
| @@ -213,7 +212,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, | |||
| 213 | } else { | 212 | } else { |
| 214 | sugov_get_util(&util, &max); | 213 | sugov_get_util(&util, &max); |
| 215 | sugov_iowait_boost(sg_cpu, &util, &max); | 214 | sugov_iowait_boost(sg_cpu, &util, &max); |
| 216 | next_f = get_next_freq(sg_cpu, util, max); | 215 | next_f = get_next_freq(sg_policy, util, max); |
| 217 | } | 216 | } |
| 218 | sugov_update_commit(sg_policy, time, next_f); | 217 | sugov_update_commit(sg_policy, time, next_f); |
| 219 | } | 218 | } |
| @@ -267,7 +266,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, | |||
| 267 | sugov_iowait_boost(j_sg_cpu, &util, &max); | 266 | sugov_iowait_boost(j_sg_cpu, &util, &max); |
| 268 | } | 267 | } |
| 269 | 268 | ||
| 270 | return get_next_freq(sg_cpu, util, max); | 269 | return get_next_freq(sg_policy, util, max); |
| 271 | } | 270 | } |
| 272 | 271 | ||
| 273 | static void sugov_update_shared(struct update_util_data *hook, u64 time, | 272 | static void sugov_update_shared(struct update_util_data *hook, u64 time, |
| @@ -580,6 +579,7 @@ static int sugov_start(struct cpufreq_policy *policy) | |||
| 580 | sg_policy->next_freq = UINT_MAX; | 579 | sg_policy->next_freq = UINT_MAX; |
| 581 | sg_policy->work_in_progress = false; | 580 | sg_policy->work_in_progress = false; |
| 582 | sg_policy->need_freq_update = false; | 581 | sg_policy->need_freq_update = false; |
| 582 | sg_policy->cached_raw_freq = 0; | ||
| 583 | 583 | ||
| 584 | for_each_cpu(cpu, policy->cpus) { | 584 | for_each_cpu(cpu, policy->cpus) { |
| 585 | struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); | 585 | struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); |
| @@ -590,7 +590,6 @@ static int sugov_start(struct cpufreq_policy *policy) | |||
| 590 | sg_cpu->max = 0; | 590 | sg_cpu->max = 0; |
| 591 | sg_cpu->flags = SCHED_CPUFREQ_RT; | 591 | sg_cpu->flags = SCHED_CPUFREQ_RT; |
| 592 | sg_cpu->last_update = 0; | 592 | sg_cpu->last_update = 0; |
| 593 | sg_cpu->cached_raw_freq = 0; | ||
| 594 | sg_cpu->iowait_boost = 0; | 593 | sg_cpu->iowait_boost = 0; |
| 595 | sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; | 594 | sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; |
| 596 | cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, | 595 | cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3e88b35ac157..dea138964b91 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -5799,7 +5799,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t | |||
| 5799 | * Due to large variance we need a large fuzz factor; hackbench in | 5799 | * Due to large variance we need a large fuzz factor; hackbench in |
| 5800 | * particularly is sensitive here. | 5800 | * particularly is sensitive here. |
| 5801 | */ | 5801 | */ |
| 5802 | if ((avg_idle / 512) < avg_cost) | 5802 | if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost) |
| 5803 | return -1; | 5803 | return -1; |
| 5804 | 5804 | ||
| 5805 | time = local_clock(); | 5805 | time = local_clock(); |
diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 69631fa46c2f..1b3c8189b286 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h | |||
| @@ -51,6 +51,11 @@ SCHED_FEAT(NONTASK_CAPACITY, true) | |||
| 51 | */ | 51 | */ |
| 52 | SCHED_FEAT(TTWU_QUEUE, true) | 52 | SCHED_FEAT(TTWU_QUEUE, true) |
| 53 | 53 | ||
| 54 | /* | ||
| 55 | * When doing wakeups, attempt to limit superfluous scans of the LLC domain. | ||
| 56 | */ | ||
| 57 | SCHED_FEAT(SIS_AVG_CPU, false) | ||
| 58 | |||
| 54 | #ifdef HAVE_RT_PUSH_IPI | 59 | #ifdef HAVE_RT_PUSH_IPI |
| 55 | /* | 60 | /* |
| 56 | * In order to avoid a thundering herd attack of CPUs that are | 61 | * In order to avoid a thundering herd attack of CPUs that are |
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 4d2ea6f25568..b8c84c6dee64 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c | |||
| @@ -242,6 +242,45 @@ long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state) | |||
| 242 | } | 242 | } |
| 243 | EXPORT_SYMBOL(prepare_to_wait_event); | 243 | EXPORT_SYMBOL(prepare_to_wait_event); |
| 244 | 244 | ||
| 245 | /* | ||
| 246 | * Note! These two wait functions are entered with the | ||
| 247 | * wait-queue lock held (and interrupts off in the _irq | ||
| 248 | * case), so there is no race with testing the wakeup | ||
| 249 | * condition in the caller before they add the wait | ||
| 250 | * entry to the wake queue. | ||
| 251 | */ | ||
| 252 | int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait) | ||
| 253 | { | ||
| 254 | if (likely(list_empty(&wait->task_list))) | ||
| 255 | __add_wait_queue_tail(wq, wait); | ||
| 256 | |||
| 257 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 258 | if (signal_pending(current)) | ||
| 259 | return -ERESTARTSYS; | ||
| 260 | |||
| 261 | spin_unlock(&wq->lock); | ||
| 262 | schedule(); | ||
| 263 | spin_lock(&wq->lock); | ||
| 264 | return 0; | ||
| 265 | } | ||
| 266 | EXPORT_SYMBOL(do_wait_intr); | ||
| 267 | |||
| 268 | int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_t *wait) | ||
| 269 | { | ||
| 270 | if (likely(list_empty(&wait->task_list))) | ||
| 271 | __add_wait_queue_tail(wq, wait); | ||
| 272 | |||
| 273 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 274 | if (signal_pending(current)) | ||
| 275 | return -ERESTARTSYS; | ||
| 276 | |||
| 277 | spin_unlock_irq(&wq->lock); | ||
| 278 | schedule(); | ||
| 279 | spin_lock_irq(&wq->lock); | ||
| 280 | return 0; | ||
| 281 | } | ||
| 282 | EXPORT_SYMBOL(do_wait_intr_irq); | ||
| 283 | |||
| 245 | /** | 284 | /** |
| 246 | * finish_wait - clean up after waiting in a queue | 285 | * finish_wait - clean up after waiting in a queue |
| 247 | * @q: waitqueue waited on | 286 | * @q: waitqueue waited on |
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 7906b3f0c41a..497719127bf9 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c | |||
| @@ -125,7 +125,7 @@ int register_refined_jiffies(long cycles_per_second) | |||
| 125 | shift_hz += cycles_per_tick/2; | 125 | shift_hz += cycles_per_tick/2; |
| 126 | do_div(shift_hz, cycles_per_tick); | 126 | do_div(shift_hz, cycles_per_tick); |
| 127 | /* Calculate nsec_per_tick using shift_hz */ | 127 | /* Calculate nsec_per_tick using shift_hz */ |
| 128 | nsec_per_tick = (u64)TICK_NSEC << 8; | 128 | nsec_per_tick = (u64)NSEC_PER_SEC << 8; |
| 129 | nsec_per_tick += (u32)shift_hz/2; | 129 | nsec_per_tick += (u32)shift_hz/2; |
| 130 | do_div(nsec_per_tick, (u32)shift_hz); | 130 | do_div(nsec_per_tick, (u32)shift_hz); |
| 131 | 131 | ||
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index d5038005eb5d..d4a06e714645 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -429,7 +429,7 @@ config BLK_DEV_IO_TRACE | |||
| 429 | 429 | ||
| 430 | If unsure, say N. | 430 | If unsure, say N. |
| 431 | 431 | ||
| 432 | config KPROBE_EVENT | 432 | config KPROBE_EVENTS |
| 433 | depends on KPROBES | 433 | depends on KPROBES |
| 434 | depends on HAVE_REGS_AND_STACK_ACCESS_API | 434 | depends on HAVE_REGS_AND_STACK_ACCESS_API |
| 435 | bool "Enable kprobes-based dynamic events" | 435 | bool "Enable kprobes-based dynamic events" |
| @@ -447,7 +447,7 @@ config KPROBE_EVENT | |||
| 447 | This option is also required by perf-probe subcommand of perf tools. | 447 | This option is also required by perf-probe subcommand of perf tools. |
| 448 | If you want to use perf tools, this option is strongly recommended. | 448 | If you want to use perf tools, this option is strongly recommended. |
| 449 | 449 | ||
| 450 | config UPROBE_EVENT | 450 | config UPROBE_EVENTS |
| 451 | bool "Enable uprobes-based dynamic events" | 451 | bool "Enable uprobes-based dynamic events" |
| 452 | depends on ARCH_SUPPORTS_UPROBES | 452 | depends on ARCH_SUPPORTS_UPROBES |
| 453 | depends on MMU | 453 | depends on MMU |
| @@ -466,7 +466,7 @@ config UPROBE_EVENT | |||
| 466 | 466 | ||
| 467 | config BPF_EVENTS | 467 | config BPF_EVENTS |
| 468 | depends on BPF_SYSCALL | 468 | depends on BPF_SYSCALL |
| 469 | depends on (KPROBE_EVENT || UPROBE_EVENT) && PERF_EVENTS | 469 | depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS |
| 470 | bool | 470 | bool |
| 471 | default y | 471 | default y |
| 472 | help | 472 | help |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index e57980845549..90f2701d92a7 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
| @@ -57,7 +57,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | |||
| 57 | obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o | 57 | obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o |
| 58 | obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o | 58 | obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o |
| 59 | obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o | 59 | obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o |
| 60 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o | 60 | obj-$(CONFIG_KPROBE_EVENTS) += trace_kprobe.o |
| 61 | obj-$(CONFIG_TRACEPOINTS) += power-traces.o | 61 | obj-$(CONFIG_TRACEPOINTS) += power-traces.o |
| 62 | ifeq ($(CONFIG_PM),y) | 62 | ifeq ($(CONFIG_PM),y) |
| 63 | obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o | 63 | obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o |
| @@ -66,7 +66,7 @@ ifeq ($(CONFIG_TRACING),y) | |||
| 66 | obj-$(CONFIG_KGDB_KDB) += trace_kdb.o | 66 | obj-$(CONFIG_KGDB_KDB) += trace_kdb.o |
| 67 | endif | 67 | endif |
| 68 | obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o | 68 | obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o |
| 69 | obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o | 69 | obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o |
| 70 | 70 | ||
| 71 | obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o | 71 | obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o |
| 72 | 72 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0d1597c9ee30..b9691ee8f6c1 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -4416,16 +4416,24 @@ static int __init set_graph_notrace_function(char *str) | |||
| 4416 | } | 4416 | } |
| 4417 | __setup("ftrace_graph_notrace=", set_graph_notrace_function); | 4417 | __setup("ftrace_graph_notrace=", set_graph_notrace_function); |
| 4418 | 4418 | ||
| 4419 | static int __init set_graph_max_depth_function(char *str) | ||
| 4420 | { | ||
| 4421 | if (!str) | ||
| 4422 | return 0; | ||
| 4423 | fgraph_max_depth = simple_strtoul(str, NULL, 0); | ||
| 4424 | return 1; | ||
| 4425 | } | ||
| 4426 | __setup("ftrace_graph_max_depth=", set_graph_max_depth_function); | ||
| 4427 | |||
| 4419 | static void __init set_ftrace_early_graph(char *buf, int enable) | 4428 | static void __init set_ftrace_early_graph(char *buf, int enable) |
| 4420 | { | 4429 | { |
| 4421 | int ret; | 4430 | int ret; |
| 4422 | char *func; | 4431 | char *func; |
| 4423 | struct ftrace_hash *hash; | 4432 | struct ftrace_hash *hash; |
| 4424 | 4433 | ||
| 4425 | if (enable) | 4434 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); |
| 4426 | hash = ftrace_graph_hash; | 4435 | if (WARN_ON(!hash)) |
| 4427 | else | 4436 | return; |
| 4428 | hash = ftrace_graph_notrace_hash; | ||
| 4429 | 4437 | ||
| 4430 | while (buf) { | 4438 | while (buf) { |
| 4431 | func = strsep(&buf, ","); | 4439 | func = strsep(&buf, ","); |
| @@ -4435,6 +4443,11 @@ static void __init set_ftrace_early_graph(char *buf, int enable) | |||
| 4435 | printk(KERN_DEBUG "ftrace: function %s not " | 4443 | printk(KERN_DEBUG "ftrace: function %s not " |
| 4436 | "traceable\n", func); | 4444 | "traceable\n", func); |
| 4437 | } | 4445 | } |
| 4446 | |||
| 4447 | if (enable) | ||
| 4448 | ftrace_graph_hash = hash; | ||
| 4449 | else | ||
| 4450 | ftrace_graph_notrace_hash = hash; | ||
| 4438 | } | 4451 | } |
| 4439 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 4452 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 4440 | 4453 | ||
| @@ -5488,7 +5501,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, | |||
| 5488 | * Normally the mcount trampoline will call the ops->func, but there | 5501 | * Normally the mcount trampoline will call the ops->func, but there |
| 5489 | * are times that it should not. For example, if the ops does not | 5502 | * are times that it should not. For example, if the ops does not |
| 5490 | * have its own recursion protection, then it should call the | 5503 | * have its own recursion protection, then it should call the |
| 5491 | * ftrace_ops_recurs_func() instead. | 5504 | * ftrace_ops_assist_func() instead. |
| 5492 | * | 5505 | * |
| 5493 | * Returns the function that the trampoline should call for @ops. | 5506 | * Returns the function that the trampoline should call for @ops. |
| 5494 | */ | 5507 | */ |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 707445ceb7ef..f35109514a01 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -4341,22 +4341,22 @@ static const char readme_msg[] = | |||
| 4341 | "\t\t\t traces\n" | 4341 | "\t\t\t traces\n" |
| 4342 | #endif | 4342 | #endif |
| 4343 | #endif /* CONFIG_STACK_TRACER */ | 4343 | #endif /* CONFIG_STACK_TRACER */ |
| 4344 | #ifdef CONFIG_KPROBE_EVENT | 4344 | #ifdef CONFIG_KPROBE_EVENTS |
| 4345 | " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n" | 4345 | " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n" |
| 4346 | "\t\t\t Write into this file to define/undefine new trace events.\n" | 4346 | "\t\t\t Write into this file to define/undefine new trace events.\n" |
| 4347 | #endif | 4347 | #endif |
| 4348 | #ifdef CONFIG_UPROBE_EVENT | 4348 | #ifdef CONFIG_UPROBE_EVENTS |
| 4349 | " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n" | 4349 | " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n" |
| 4350 | "\t\t\t Write into this file to define/undefine new trace events.\n" | 4350 | "\t\t\t Write into this file to define/undefine new trace events.\n" |
| 4351 | #endif | 4351 | #endif |
| 4352 | #if defined(CONFIG_KPROBE_EVENT) || defined(CONFIG_UPROBE_EVENT) | 4352 | #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) |
| 4353 | "\t accepts: event-definitions (one definition per line)\n" | 4353 | "\t accepts: event-definitions (one definition per line)\n" |
| 4354 | "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n" | 4354 | "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n" |
| 4355 | "\t -:[<group>/]<event>\n" | 4355 | "\t -:[<group>/]<event>\n" |
| 4356 | #ifdef CONFIG_KPROBE_EVENT | 4356 | #ifdef CONFIG_KPROBE_EVENTS |
| 4357 | "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" | 4357 | "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" |
| 4358 | #endif | 4358 | #endif |
| 4359 | #ifdef CONFIG_UPROBE_EVENT | 4359 | #ifdef CONFIG_UPROBE_EVENTS |
| 4360 | "\t place: <path>:<offset>\n" | 4360 | "\t place: <path>:<offset>\n" |
| 4361 | #endif | 4361 | #endif |
| 4362 | "\t args: <name>=fetcharg[:type]\n" | 4362 | "\t args: <name>=fetcharg[:type]\n" |
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 0c0ae54d44c6..903273c93e61 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h | |||
| @@ -248,7 +248,7 @@ ASSIGN_FETCH_FUNC(file_offset, ftype), \ | |||
| 248 | #define FETCH_TYPE_STRING 0 | 248 | #define FETCH_TYPE_STRING 0 |
| 249 | #define FETCH_TYPE_STRSIZE 1 | 249 | #define FETCH_TYPE_STRSIZE 1 |
| 250 | 250 | ||
| 251 | #ifdef CONFIG_KPROBE_EVENT | 251 | #ifdef CONFIG_KPROBE_EVENTS |
| 252 | struct symbol_cache; | 252 | struct symbol_cache; |
| 253 | unsigned long update_symbol_cache(struct symbol_cache *sc); | 253 | unsigned long update_symbol_cache(struct symbol_cache *sc); |
| 254 | void free_symbol_cache(struct symbol_cache *sc); | 254 | void free_symbol_cache(struct symbol_cache *sc); |
| @@ -278,7 +278,7 @@ alloc_symbol_cache(const char *sym, long offset) | |||
| 278 | { | 278 | { |
| 279 | return NULL; | 279 | return NULL; |
| 280 | } | 280 | } |
| 281 | #endif /* CONFIG_KPROBE_EVENT */ | 281 | #endif /* CONFIG_KPROBE_EVENTS */ |
| 282 | 282 | ||
| 283 | struct probe_arg { | 283 | struct probe_arg { |
| 284 | struct fetch_param fetch; | 284 | struct fetch_param fetch; |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 1d68b5b7ad41..5fb1f2c87e6b 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -65,7 +65,7 @@ void stack_trace_print(void) | |||
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | /* | 67 | /* |
| 68 | * When arch-specific code overides this function, the following | 68 | * When arch-specific code overrides this function, the following |
| 69 | * data should be filled up, assuming stack_trace_max_lock is held to | 69 | * data should be filled up, assuming stack_trace_max_lock is held to |
| 70 | * prevent concurrent updates. | 70 | * prevent concurrent updates. |
| 71 | * stack_trace_index[] | 71 | * stack_trace_index[] |
diff --git a/kernel/ucount.c b/kernel/ucount.c index 62630a40ab3a..b4eeee03934f 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c | |||
| @@ -144,7 +144,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) | |||
| 144 | 144 | ||
| 145 | new->ns = ns; | 145 | new->ns = ns; |
| 146 | new->uid = uid; | 146 | new->uid = uid; |
| 147 | atomic_set(&new->count, 0); | 147 | new->count = 0; |
| 148 | 148 | ||
| 149 | spin_lock_irq(&ucounts_lock); | 149 | spin_lock_irq(&ucounts_lock); |
| 150 | ucounts = find_ucounts(ns, uid, hashent); | 150 | ucounts = find_ucounts(ns, uid, hashent); |
| @@ -155,8 +155,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) | |||
| 155 | ucounts = new; | 155 | ucounts = new; |
| 156 | } | 156 | } |
| 157 | } | 157 | } |
| 158 | if (!atomic_add_unless(&ucounts->count, 1, INT_MAX)) | 158 | if (ucounts->count == INT_MAX) |
| 159 | ucounts = NULL; | 159 | ucounts = NULL; |
| 160 | else | ||
| 161 | ucounts->count += 1; | ||
| 160 | spin_unlock_irq(&ucounts_lock); | 162 | spin_unlock_irq(&ucounts_lock); |
| 161 | return ucounts; | 163 | return ucounts; |
| 162 | } | 164 | } |
| @@ -165,13 +167,15 @@ static void put_ucounts(struct ucounts *ucounts) | |||
| 165 | { | 167 | { |
| 166 | unsigned long flags; | 168 | unsigned long flags; |
| 167 | 169 | ||
| 168 | if (atomic_dec_and_test(&ucounts->count)) { | 170 | spin_lock_irqsave(&ucounts_lock, flags); |
| 169 | spin_lock_irqsave(&ucounts_lock, flags); | 171 | ucounts->count -= 1; |
| 172 | if (!ucounts->count) | ||
| 170 | hlist_del_init(&ucounts->node); | 173 | hlist_del_init(&ucounts->node); |
| 171 | spin_unlock_irqrestore(&ucounts_lock, flags); | 174 | else |
| 175 | ucounts = NULL; | ||
| 176 | spin_unlock_irqrestore(&ucounts_lock, flags); | ||
| 172 | 177 | ||
| 173 | kfree(ucounts); | 178 | kfree(ucounts); |
| 174 | } | ||
| 175 | } | 179 | } |
| 176 | 180 | ||
| 177 | static inline bool atomic_inc_below(atomic_t *v, int u) | 181 | static inline bool atomic_inc_below(atomic_t *v, int u) |
diff --git a/lib/ioremap.c b/lib/ioremap.c index a3e14ce92a56..4bb30206b942 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <asm/pgtable.h> | 14 | #include <asm/pgtable.h> |
| 15 | 15 | ||
| 16 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP | 16 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP |
| 17 | static int __read_mostly ioremap_p4d_capable; | ||
| 17 | static int __read_mostly ioremap_pud_capable; | 18 | static int __read_mostly ioremap_pud_capable; |
| 18 | static int __read_mostly ioremap_pmd_capable; | 19 | static int __read_mostly ioremap_pmd_capable; |
| 19 | static int __read_mostly ioremap_huge_disabled; | 20 | static int __read_mostly ioremap_huge_disabled; |
| @@ -35,6 +36,11 @@ void __init ioremap_huge_init(void) | |||
| 35 | } | 36 | } |
| 36 | } | 37 | } |
| 37 | 38 | ||
| 39 | static inline int ioremap_p4d_enabled(void) | ||
| 40 | { | ||
| 41 | return ioremap_p4d_capable; | ||
| 42 | } | ||
| 43 | |||
| 38 | static inline int ioremap_pud_enabled(void) | 44 | static inline int ioremap_pud_enabled(void) |
| 39 | { | 45 | { |
| 40 | return ioremap_pud_capable; | 46 | return ioremap_pud_capable; |
| @@ -46,6 +52,7 @@ static inline int ioremap_pmd_enabled(void) | |||
| 46 | } | 52 | } |
| 47 | 53 | ||
| 48 | #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ | 54 | #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ |
| 55 | static inline int ioremap_p4d_enabled(void) { return 0; } | ||
| 49 | static inline int ioremap_pud_enabled(void) { return 0; } | 56 | static inline int ioremap_pud_enabled(void) { return 0; } |
| 50 | static inline int ioremap_pmd_enabled(void) { return 0; } | 57 | static inline int ioremap_pmd_enabled(void) { return 0; } |
| 51 | #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ | 58 | #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ |
| @@ -94,14 +101,14 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, | |||
| 94 | return 0; | 101 | return 0; |
| 95 | } | 102 | } |
| 96 | 103 | ||
| 97 | static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, | 104 | static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr, |
| 98 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) | 105 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) |
| 99 | { | 106 | { |
| 100 | pud_t *pud; | 107 | pud_t *pud; |
| 101 | unsigned long next; | 108 | unsigned long next; |
| 102 | 109 | ||
| 103 | phys_addr -= addr; | 110 | phys_addr -= addr; |
| 104 | pud = pud_alloc(&init_mm, pgd, addr); | 111 | pud = pud_alloc(&init_mm, p4d, addr); |
| 105 | if (!pud) | 112 | if (!pud) |
| 106 | return -ENOMEM; | 113 | return -ENOMEM; |
| 107 | do { | 114 | do { |
| @@ -120,6 +127,32 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, | |||
| 120 | return 0; | 127 | return 0; |
| 121 | } | 128 | } |
| 122 | 129 | ||
| 130 | static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr, | ||
| 131 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) | ||
| 132 | { | ||
| 133 | p4d_t *p4d; | ||
| 134 | unsigned long next; | ||
| 135 | |||
| 136 | phys_addr -= addr; | ||
| 137 | p4d = p4d_alloc(&init_mm, pgd, addr); | ||
| 138 | if (!p4d) | ||
| 139 | return -ENOMEM; | ||
| 140 | do { | ||
| 141 | next = p4d_addr_end(addr, end); | ||
| 142 | |||
| 143 | if (ioremap_p4d_enabled() && | ||
| 144 | ((next - addr) == P4D_SIZE) && | ||
| 145 | IS_ALIGNED(phys_addr + addr, P4D_SIZE)) { | ||
| 146 | if (p4d_set_huge(p4d, phys_addr + addr, prot)) | ||
| 147 | continue; | ||
| 148 | } | ||
| 149 | |||
| 150 | if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot)) | ||
| 151 | return -ENOMEM; | ||
| 152 | } while (p4d++, addr = next, addr != end); | ||
| 153 | return 0; | ||
| 154 | } | ||
| 155 | |||
| 123 | int ioremap_page_range(unsigned long addr, | 156 | int ioremap_page_range(unsigned long addr, |
| 124 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) | 157 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) |
| 125 | { | 158 | { |
| @@ -135,7 +168,7 @@ int ioremap_page_range(unsigned long addr, | |||
| 135 | pgd = pgd_offset_k(addr); | 168 | pgd = pgd_offset_k(addr); |
| 136 | do { | 169 | do { |
| 137 | next = pgd_addr_end(addr, end); | 170 | next = pgd_addr_end(addr, end); |
| 138 | err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot); | 171 | err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot); |
| 139 | if (err) | 172 | if (err) |
| 140 | break; | 173 | break; |
| 141 | } while (pgd++, addr = next, addr != end); | 174 | } while (pgd++, addr = next, addr != end); |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 5ed506d648c4..691a9ad48497 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -2129,8 +2129,8 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) | |||
| 2129 | struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp); | 2129 | struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp); |
| 2130 | if (!bitmap) | 2130 | if (!bitmap) |
| 2131 | return 0; | 2131 | return 0; |
| 2132 | bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap); | 2132 | if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap)) |
| 2133 | kfree(bitmap); | 2133 | kfree(bitmap); |
| 2134 | } | 2134 | } |
| 2135 | 2135 | ||
| 2136 | return 1; | 2136 | return 1; |
diff --git a/lib/refcount.c b/lib/refcount.c index 1d33366189d1..aa09ad3c30b0 100644 --- a/lib/refcount.c +++ b/lib/refcount.c | |||
| @@ -58,7 +58,7 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r) | |||
| 58 | val = old; | 58 | val = old; |
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); | 61 | WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); |
| 62 | 62 | ||
| 63 | return true; | 63 | return true; |
| 64 | } | 64 | } |
| @@ -66,7 +66,7 @@ EXPORT_SYMBOL_GPL(refcount_add_not_zero); | |||
| 66 | 66 | ||
| 67 | void refcount_add(unsigned int i, refcount_t *r) | 67 | void refcount_add(unsigned int i, refcount_t *r) |
| 68 | { | 68 | { |
| 69 | WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); | 69 | WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); |
| 70 | } | 70 | } |
| 71 | EXPORT_SYMBOL_GPL(refcount_add); | 71 | EXPORT_SYMBOL_GPL(refcount_add); |
| 72 | 72 | ||
| @@ -97,7 +97,7 @@ bool refcount_inc_not_zero(refcount_t *r) | |||
| 97 | val = old; | 97 | val = old; |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); | 100 | WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); |
| 101 | 101 | ||
| 102 | return true; | 102 | return true; |
| 103 | } | 103 | } |
| @@ -111,7 +111,7 @@ EXPORT_SYMBOL_GPL(refcount_inc_not_zero); | |||
| 111 | */ | 111 | */ |
| 112 | void refcount_inc(refcount_t *r) | 112 | void refcount_inc(refcount_t *r) |
| 113 | { | 113 | { |
| 114 | WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); | 114 | WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); |
| 115 | } | 115 | } |
| 116 | EXPORT_SYMBOL_GPL(refcount_inc); | 116 | EXPORT_SYMBOL_GPL(refcount_inc); |
| 117 | 117 | ||
| @@ -125,7 +125,7 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r) | |||
| 125 | 125 | ||
| 126 | new = val - i; | 126 | new = val - i; |
| 127 | if (new > val) { | 127 | if (new > val) { |
| 128 | WARN(new > val, "refcount_t: underflow; use-after-free.\n"); | 128 | WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); |
| 129 | return false; | 129 | return false; |
| 130 | } | 130 | } |
| 131 | 131 | ||
| @@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(refcount_dec_and_test); | |||
| 164 | 164 | ||
| 165 | void refcount_dec(refcount_t *r) | 165 | void refcount_dec(refcount_t *r) |
| 166 | { | 166 | { |
| 167 | WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); | 167 | WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); |
| 168 | } | 168 | } |
| 169 | EXPORT_SYMBOL_GPL(refcount_dec); | 169 | EXPORT_SYMBOL_GPL(refcount_dec); |
| 170 | 170 | ||
| @@ -204,7 +204,7 @@ bool refcount_dec_not_one(refcount_t *r) | |||
| 204 | 204 | ||
| 205 | new = val - 1; | 205 | new = val - 1; |
| 206 | if (new > val) { | 206 | if (new > val) { |
| 207 | WARN(new > val, "refcount_t: underflow; use-after-free.\n"); | 207 | WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); |
| 208 | return true; | 208 | return true; |
| 209 | } | 209 | } |
| 210 | 210 | ||
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 6d861d090e9f..c6f2a37028c2 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
| @@ -683,33 +683,26 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) | |||
| 683 | static void cgwb_bdi_destroy(struct backing_dev_info *bdi) | 683 | static void cgwb_bdi_destroy(struct backing_dev_info *bdi) |
| 684 | { | 684 | { |
| 685 | struct radix_tree_iter iter; | 685 | struct radix_tree_iter iter; |
| 686 | struct rb_node *rbn; | ||
| 687 | void **slot; | 686 | void **slot; |
| 688 | 687 | ||
| 689 | WARN_ON(test_bit(WB_registered, &bdi->wb.state)); | 688 | WARN_ON(test_bit(WB_registered, &bdi->wb.state)); |
| 690 | 689 | ||
| 691 | spin_lock_irq(&cgwb_lock); | 690 | spin_lock_irq(&cgwb_lock); |
| 692 | |||
| 693 | radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) | 691 | radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) |
| 694 | cgwb_kill(*slot); | 692 | cgwb_kill(*slot); |
| 695 | |||
| 696 | while ((rbn = rb_first(&bdi->cgwb_congested_tree))) { | ||
| 697 | struct bdi_writeback_congested *congested = | ||
| 698 | rb_entry(rbn, struct bdi_writeback_congested, rb_node); | ||
| 699 | |||
| 700 | rb_erase(rbn, &bdi->cgwb_congested_tree); | ||
| 701 | congested->bdi = NULL; /* mark @congested unlinked */ | ||
| 702 | } | ||
| 703 | |||
| 704 | spin_unlock_irq(&cgwb_lock); | 693 | spin_unlock_irq(&cgwb_lock); |
| 705 | 694 | ||
| 706 | /* | 695 | /* |
| 707 | * All cgwb's and their congested states must be shutdown and | 696 | * All cgwb's must be shutdown and released before returning. Drain |
| 708 | * released before returning. Drain the usage counter to wait for | 697 | * the usage counter to wait for all cgwb's ever created on @bdi. |
| 709 | * all cgwb's and cgwb_congested's ever created on @bdi. | ||
| 710 | */ | 698 | */ |
| 711 | atomic_dec(&bdi->usage_cnt); | 699 | atomic_dec(&bdi->usage_cnt); |
| 712 | wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt)); | 700 | wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt)); |
| 701 | /* | ||
| 702 | * Grab back our reference so that we hold it when @bdi gets | ||
| 703 | * re-registered. | ||
| 704 | */ | ||
| 705 | atomic_inc(&bdi->usage_cnt); | ||
| 713 | } | 706 | } |
| 714 | 707 | ||
| 715 | /** | 708 | /** |
| @@ -749,6 +742,21 @@ void wb_blkcg_offline(struct blkcg *blkcg) | |||
| 749 | spin_unlock_irq(&cgwb_lock); | 742 | spin_unlock_irq(&cgwb_lock); |
| 750 | } | 743 | } |
| 751 | 744 | ||
| 745 | static void cgwb_bdi_exit(struct backing_dev_info *bdi) | ||
| 746 | { | ||
| 747 | struct rb_node *rbn; | ||
| 748 | |||
| 749 | spin_lock_irq(&cgwb_lock); | ||
| 750 | while ((rbn = rb_first(&bdi->cgwb_congested_tree))) { | ||
| 751 | struct bdi_writeback_congested *congested = | ||
| 752 | rb_entry(rbn, struct bdi_writeback_congested, rb_node); | ||
| 753 | |||
| 754 | rb_erase(rbn, &bdi->cgwb_congested_tree); | ||
| 755 | congested->bdi = NULL; /* mark @congested unlinked */ | ||
| 756 | } | ||
| 757 | spin_unlock_irq(&cgwb_lock); | ||
| 758 | } | ||
| 759 | |||
| 752 | #else /* CONFIG_CGROUP_WRITEBACK */ | 760 | #else /* CONFIG_CGROUP_WRITEBACK */ |
| 753 | 761 | ||
| 754 | static int cgwb_bdi_init(struct backing_dev_info *bdi) | 762 | static int cgwb_bdi_init(struct backing_dev_info *bdi) |
| @@ -769,7 +777,9 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) | |||
| 769 | return 0; | 777 | return 0; |
| 770 | } | 778 | } |
| 771 | 779 | ||
| 772 | static void cgwb_bdi_destroy(struct backing_dev_info *bdi) | 780 | static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { } |
| 781 | |||
| 782 | static void cgwb_bdi_exit(struct backing_dev_info *bdi) | ||
| 773 | { | 783 | { |
| 774 | wb_congested_put(bdi->wb_congested); | 784 | wb_congested_put(bdi->wb_congested); |
| 775 | } | 785 | } |
| @@ -857,6 +867,8 @@ int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner) | |||
| 857 | MINOR(owner->devt)); | 867 | MINOR(owner->devt)); |
| 858 | if (rc) | 868 | if (rc) |
| 859 | return rc; | 869 | return rc; |
| 870 | /* Leaking owner reference... */ | ||
| 871 | WARN_ON(bdi->owner); | ||
| 860 | bdi->owner = owner; | 872 | bdi->owner = owner; |
| 861 | get_device(owner); | 873 | get_device(owner); |
| 862 | return 0; | 874 | return 0; |
| @@ -898,6 +910,7 @@ static void bdi_exit(struct backing_dev_info *bdi) | |||
| 898 | { | 910 | { |
| 899 | WARN_ON_ONCE(bdi->dev); | 911 | WARN_ON_ONCE(bdi->dev); |
| 900 | wb_exit(&bdi->wb); | 912 | wb_exit(&bdi->wb); |
| 913 | cgwb_bdi_exit(bdi); | ||
| 901 | } | 914 | } |
| 902 | 915 | ||
| 903 | static void release_bdi(struct kref *ref) | 916 | static void release_bdi(struct kref *ref) |
| @@ -226,6 +226,7 @@ struct page *follow_page_mask(struct vm_area_struct *vma, | |||
| 226 | unsigned int *page_mask) | 226 | unsigned int *page_mask) |
| 227 | { | 227 | { |
| 228 | pgd_t *pgd; | 228 | pgd_t *pgd; |
| 229 | p4d_t *p4d; | ||
| 229 | pud_t *pud; | 230 | pud_t *pud; |
| 230 | pmd_t *pmd; | 231 | pmd_t *pmd; |
| 231 | spinlock_t *ptl; | 232 | spinlock_t *ptl; |
| @@ -243,8 +244,13 @@ struct page *follow_page_mask(struct vm_area_struct *vma, | |||
| 243 | pgd = pgd_offset(mm, address); | 244 | pgd = pgd_offset(mm, address); |
| 244 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | 245 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) |
| 245 | return no_page_table(vma, flags); | 246 | return no_page_table(vma, flags); |
| 246 | 247 | p4d = p4d_offset(pgd, address); | |
| 247 | pud = pud_offset(pgd, address); | 248 | if (p4d_none(*p4d)) |
| 249 | return no_page_table(vma, flags); | ||
| 250 | BUILD_BUG_ON(p4d_huge(*p4d)); | ||
| 251 | if (unlikely(p4d_bad(*p4d))) | ||
| 252 | return no_page_table(vma, flags); | ||
| 253 | pud = pud_offset(p4d, address); | ||
| 248 | if (pud_none(*pud)) | 254 | if (pud_none(*pud)) |
| 249 | return no_page_table(vma, flags); | 255 | return no_page_table(vma, flags); |
| 250 | if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { | 256 | if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { |
| @@ -325,6 +331,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, | |||
| 325 | struct page **page) | 331 | struct page **page) |
| 326 | { | 332 | { |
| 327 | pgd_t *pgd; | 333 | pgd_t *pgd; |
| 334 | p4d_t *p4d; | ||
| 328 | pud_t *pud; | 335 | pud_t *pud; |
| 329 | pmd_t *pmd; | 336 | pmd_t *pmd; |
| 330 | pte_t *pte; | 337 | pte_t *pte; |
| @@ -338,7 +345,9 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, | |||
| 338 | else | 345 | else |
| 339 | pgd = pgd_offset_gate(mm, address); | 346 | pgd = pgd_offset_gate(mm, address); |
| 340 | BUG_ON(pgd_none(*pgd)); | 347 | BUG_ON(pgd_none(*pgd)); |
| 341 | pud = pud_offset(pgd, address); | 348 | p4d = p4d_offset(pgd, address); |
| 349 | BUG_ON(p4d_none(*p4d)); | ||
| 350 | pud = pud_offset(p4d, address); | ||
| 342 | BUG_ON(pud_none(*pud)); | 351 | BUG_ON(pud_none(*pud)); |
| 343 | pmd = pmd_offset(pud, address); | 352 | pmd = pmd_offset(pud, address); |
| 344 | if (pmd_none(*pmd)) | 353 | if (pmd_none(*pmd)) |
| @@ -1400,13 +1409,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | |||
| 1400 | return 1; | 1409 | return 1; |
| 1401 | } | 1410 | } |
| 1402 | 1411 | ||
| 1403 | static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, | 1412 | static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, |
| 1404 | int write, struct page **pages, int *nr) | 1413 | int write, struct page **pages, int *nr) |
| 1405 | { | 1414 | { |
| 1406 | unsigned long next; | 1415 | unsigned long next; |
| 1407 | pud_t *pudp; | 1416 | pud_t *pudp; |
| 1408 | 1417 | ||
| 1409 | pudp = pud_offset(&pgd, addr); | 1418 | pudp = pud_offset(&p4d, addr); |
| 1410 | do { | 1419 | do { |
| 1411 | pud_t pud = READ_ONCE(*pudp); | 1420 | pud_t pud = READ_ONCE(*pudp); |
| 1412 | 1421 | ||
| @@ -1428,6 +1437,31 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, | |||
| 1428 | return 1; | 1437 | return 1; |
| 1429 | } | 1438 | } |
| 1430 | 1439 | ||
| 1440 | static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, | ||
| 1441 | int write, struct page **pages, int *nr) | ||
| 1442 | { | ||
| 1443 | unsigned long next; | ||
| 1444 | p4d_t *p4dp; | ||
| 1445 | |||
| 1446 | p4dp = p4d_offset(&pgd, addr); | ||
| 1447 | do { | ||
| 1448 | p4d_t p4d = READ_ONCE(*p4dp); | ||
| 1449 | |||
| 1450 | next = p4d_addr_end(addr, end); | ||
| 1451 | if (p4d_none(p4d)) | ||
| 1452 | return 0; | ||
| 1453 | BUILD_BUG_ON(p4d_huge(p4d)); | ||
| 1454 | if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { | ||
| 1455 | if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, | ||
| 1456 | P4D_SHIFT, next, write, pages, nr)) | ||
| 1457 | return 0; | ||
| 1458 | } else if (!gup_p4d_range(p4d, addr, next, write, pages, nr)) | ||
| 1459 | return 0; | ||
| 1460 | } while (p4dp++, addr = next, addr != end); | ||
| 1461 | |||
| 1462 | return 1; | ||
| 1463 | } | ||
| 1464 | |||
| 1431 | /* | 1465 | /* |
| 1432 | * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to | 1466 | * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to |
| 1433 | * the regular GUP. It will only return non-negative values. | 1467 | * the regular GUP. It will only return non-negative values. |
| @@ -1478,7 +1512,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | |||
| 1478 | if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, | 1512 | if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, |
| 1479 | PGDIR_SHIFT, next, write, pages, &nr)) | 1513 | PGDIR_SHIFT, next, write, pages, &nr)) |
| 1480 | break; | 1514 | break; |
| 1481 | } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) | 1515 | } else if (!gup_p4d_range(pgd, addr, next, write, pages, &nr)) |
| 1482 | break; | 1516 | break; |
| 1483 | } while (pgdp++, addr = next, addr != end); | 1517 | } while (pgdp++, addr = next, addr != end); |
| 1484 | local_irq_restore(flags); | 1518 | local_irq_restore(flags); |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d36b2af4d1bf..1ebc93e179f3 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -1828,7 +1828,7 @@ static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, | |||
| 1828 | VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); | 1828 | VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); |
| 1829 | VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); | 1829 | VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); |
| 1830 | 1830 | ||
| 1831 | count_vm_event(THP_SPLIT_PMD); | 1831 | count_vm_event(THP_SPLIT_PUD); |
| 1832 | 1832 | ||
| 1833 | pudp_huge_clear_flush_notify(vma, haddr, pud); | 1833 | pudp_huge_clear_flush_notify(vma, haddr, pud); |
| 1834 | } | 1834 | } |
| @@ -2048,6 +2048,7 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, | |||
| 2048 | bool freeze, struct page *page) | 2048 | bool freeze, struct page *page) |
| 2049 | { | 2049 | { |
| 2050 | pgd_t *pgd; | 2050 | pgd_t *pgd; |
| 2051 | p4d_t *p4d; | ||
| 2051 | pud_t *pud; | 2052 | pud_t *pud; |
| 2052 | pmd_t *pmd; | 2053 | pmd_t *pmd; |
| 2053 | 2054 | ||
| @@ -2055,7 +2056,11 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, | |||
| 2055 | if (!pgd_present(*pgd)) | 2056 | if (!pgd_present(*pgd)) |
| 2056 | return; | 2057 | return; |
| 2057 | 2058 | ||
| 2058 | pud = pud_offset(pgd, address); | 2059 | p4d = p4d_offset(pgd, address); |
| 2060 | if (!p4d_present(*p4d)) | ||
| 2061 | return; | ||
| 2062 | |||
| 2063 | pud = pud_offset(p4d, address); | ||
| 2059 | if (!pud_present(*pud)) | 2064 | if (!pud_present(*pud)) |
| 2060 | return; | 2065 | return; |
| 2061 | 2066 | ||
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a7aa811b7d14..3d0aab9ee80d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -4555,7 +4555,8 @@ out: | |||
| 4555 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | 4555 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) |
| 4556 | { | 4556 | { |
| 4557 | pgd_t *pgd = pgd_offset(mm, *addr); | 4557 | pgd_t *pgd = pgd_offset(mm, *addr); |
| 4558 | pud_t *pud = pud_offset(pgd, *addr); | 4558 | p4d_t *p4d = p4d_offset(pgd, *addr); |
| 4559 | pud_t *pud = pud_offset(p4d, *addr); | ||
| 4559 | 4560 | ||
| 4560 | BUG_ON(page_count(virt_to_page(ptep)) == 0); | 4561 | BUG_ON(page_count(virt_to_page(ptep)) == 0); |
| 4561 | if (page_count(virt_to_page(ptep)) == 1) | 4562 | if (page_count(virt_to_page(ptep)) == 1) |
| @@ -4586,11 +4587,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, | |||
| 4586 | unsigned long addr, unsigned long sz) | 4587 | unsigned long addr, unsigned long sz) |
| 4587 | { | 4588 | { |
| 4588 | pgd_t *pgd; | 4589 | pgd_t *pgd; |
| 4590 | p4d_t *p4d; | ||
| 4589 | pud_t *pud; | 4591 | pud_t *pud; |
| 4590 | pte_t *pte = NULL; | 4592 | pte_t *pte = NULL; |
| 4591 | 4593 | ||
| 4592 | pgd = pgd_offset(mm, addr); | 4594 | pgd = pgd_offset(mm, addr); |
| 4593 | pud = pud_alloc(mm, pgd, addr); | 4595 | p4d = p4d_offset(pgd, addr); |
| 4596 | pud = pud_alloc(mm, p4d, addr); | ||
| 4594 | if (pud) { | 4597 | if (pud) { |
| 4595 | if (sz == PUD_SIZE) { | 4598 | if (sz == PUD_SIZE) { |
| 4596 | pte = (pte_t *)pud; | 4599 | pte = (pte_t *)pud; |
| @@ -4610,18 +4613,22 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, | |||
| 4610 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | 4613 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
| 4611 | { | 4614 | { |
| 4612 | pgd_t *pgd; | 4615 | pgd_t *pgd; |
| 4616 | p4d_t *p4d; | ||
| 4613 | pud_t *pud; | 4617 | pud_t *pud; |
| 4614 | pmd_t *pmd = NULL; | 4618 | pmd_t *pmd; |
| 4615 | 4619 | ||
| 4616 | pgd = pgd_offset(mm, addr); | 4620 | pgd = pgd_offset(mm, addr); |
| 4617 | if (pgd_present(*pgd)) { | 4621 | if (!pgd_present(*pgd)) |
| 4618 | pud = pud_offset(pgd, addr); | 4622 | return NULL; |
| 4619 | if (pud_present(*pud)) { | 4623 | p4d = p4d_offset(pgd, addr); |
| 4620 | if (pud_huge(*pud)) | 4624 | if (!p4d_present(*p4d)) |
| 4621 | return (pte_t *)pud; | 4625 | return NULL; |
| 4622 | pmd = pmd_offset(pud, addr); | 4626 | pud = pud_offset(p4d, addr); |
| 4623 | } | 4627 | if (!pud_present(*pud)) |
| 4624 | } | 4628 | return NULL; |
| 4629 | if (pud_huge(*pud)) | ||
| 4630 | return (pte_t *)pud; | ||
| 4631 | pmd = pmd_offset(pud, addr); | ||
| 4625 | return (pte_t *) pmd; | 4632 | return (pte_t *) pmd; |
| 4626 | } | 4633 | } |
| 4627 | 4634 | ||
diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c index 31238dad85fb..b96a5f773d88 100644 --- a/mm/kasan/kasan_init.c +++ b/mm/kasan/kasan_init.c | |||
| @@ -30,6 +30,9 @@ | |||
| 30 | */ | 30 | */ |
| 31 | unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; | 31 | unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; |
| 32 | 32 | ||
| 33 | #if CONFIG_PGTABLE_LEVELS > 4 | ||
| 34 | p4d_t kasan_zero_p4d[PTRS_PER_P4D] __page_aligned_bss; | ||
| 35 | #endif | ||
| 33 | #if CONFIG_PGTABLE_LEVELS > 3 | 36 | #if CONFIG_PGTABLE_LEVELS > 3 |
| 34 | pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; | 37 | pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; |
| 35 | #endif | 38 | #endif |
| @@ -82,10 +85,10 @@ static void __init zero_pmd_populate(pud_t *pud, unsigned long addr, | |||
| 82 | } while (pmd++, addr = next, addr != end); | 85 | } while (pmd++, addr = next, addr != end); |
| 83 | } | 86 | } |
| 84 | 87 | ||
| 85 | static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr, | 88 | static void __init zero_pud_populate(p4d_t *p4d, unsigned long addr, |
| 86 | unsigned long end) | 89 | unsigned long end) |
| 87 | { | 90 | { |
| 88 | pud_t *pud = pud_offset(pgd, addr); | 91 | pud_t *pud = pud_offset(p4d, addr); |
| 89 | unsigned long next; | 92 | unsigned long next; |
| 90 | 93 | ||
| 91 | do { | 94 | do { |
| @@ -107,6 +110,23 @@ static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr, | |||
| 107 | } while (pud++, addr = next, addr != end); | 110 | } while (pud++, addr = next, addr != end); |
| 108 | } | 111 | } |
| 109 | 112 | ||
| 113 | static void __init zero_p4d_populate(pgd_t *pgd, unsigned long addr, | ||
| 114 | unsigned long end) | ||
| 115 | { | ||
| 116 | p4d_t *p4d = p4d_offset(pgd, addr); | ||
| 117 | unsigned long next; | ||
| 118 | |||
| 119 | do { | ||
| 120 | next = p4d_addr_end(addr, end); | ||
| 121 | |||
| 122 | if (p4d_none(*p4d)) { | ||
| 123 | p4d_populate(&init_mm, p4d, | ||
| 124 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | ||
| 125 | } | ||
| 126 | zero_pud_populate(p4d, addr, next); | ||
| 127 | } while (p4d++, addr = next, addr != end); | ||
| 128 | } | ||
| 129 | |||
| 110 | /** | 130 | /** |
| 111 | * kasan_populate_zero_shadow - populate shadow memory region with | 131 | * kasan_populate_zero_shadow - populate shadow memory region with |
| 112 | * kasan_zero_page | 132 | * kasan_zero_page |
| @@ -125,6 +145,7 @@ void __init kasan_populate_zero_shadow(const void *shadow_start, | |||
| 125 | next = pgd_addr_end(addr, end); | 145 | next = pgd_addr_end(addr, end); |
| 126 | 146 | ||
| 127 | if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) { | 147 | if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) { |
| 148 | p4d_t *p4d; | ||
| 128 | pud_t *pud; | 149 | pud_t *pud; |
| 129 | pmd_t *pmd; | 150 | pmd_t *pmd; |
| 130 | 151 | ||
| @@ -135,9 +156,22 @@ void __init kasan_populate_zero_shadow(const void *shadow_start, | |||
| 135 | * 3,2 - level page tables where we don't have | 156 | * 3,2 - level page tables where we don't have |
| 136 | * puds,pmds, so pgd_populate(), pud_populate() | 157 | * puds,pmds, so pgd_populate(), pud_populate() |
| 137 | * is noops. | 158 | * is noops. |
| 159 | * | ||
| 160 | * The ifndef is required to avoid build breakage. | ||
| 161 | * | ||
| 162 | * With 5level-fixup.h, pgd_populate() is not nop and | ||
| 163 | * we reference kasan_zero_p4d. It's not defined | ||
| 164 | * unless 5-level paging enabled. | ||
| 165 | * | ||
| 166 | * The ifndef can be dropped once all KASAN-enabled | ||
| 167 | * architectures will switch to pgtable-nop4d.h. | ||
| 138 | */ | 168 | */ |
| 139 | pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_pud)); | 169 | #ifndef __ARCH_HAS_5LEVEL_HACK |
| 140 | pud = pud_offset(pgd, addr); | 170 | pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_p4d)); |
| 171 | #endif | ||
| 172 | p4d = p4d_offset(pgd, addr); | ||
| 173 | p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud)); | ||
| 174 | pud = pud_offset(p4d, addr); | ||
| 141 | pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd)); | 175 | pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd)); |
| 142 | pmd = pmd_offset(pud, addr); | 176 | pmd = pmd_offset(pud, addr); |
| 143 | pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte)); | 177 | pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte)); |
| @@ -148,6 +182,6 @@ void __init kasan_populate_zero_shadow(const void *shadow_start, | |||
| 148 | pgd_populate(&init_mm, pgd, | 182 | pgd_populate(&init_mm, pgd, |
| 149 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | 183 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); |
| 150 | } | 184 | } |
| 151 | zero_pud_populate(pgd, addr, next); | 185 | zero_p4d_populate(pgd, addr, next); |
| 152 | } while (pgd++, addr = next, addr != end); | 186 | } while (pgd++, addr = next, addr != end); |
| 153 | } | 187 | } |
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index 6f1ed1630873..3a8ddf8baf7d 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/printk.h> | 25 | #include <linux/printk.h> |
| 26 | #include <linux/shrinker.h> | 26 | #include <linux/shrinker.h> |
| 27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
| 28 | #include <linux/srcu.h> | ||
| 28 | #include <linux/string.h> | 29 | #include <linux/string.h> |
| 29 | #include <linux/types.h> | 30 | #include <linux/types.h> |
| 30 | 31 | ||
| @@ -103,6 +104,7 @@ static int quarantine_tail; | |||
| 103 | /* Total size of all objects in global_quarantine across all batches. */ | 104 | /* Total size of all objects in global_quarantine across all batches. */ |
| 104 | static unsigned long quarantine_size; | 105 | static unsigned long quarantine_size; |
| 105 | static DEFINE_SPINLOCK(quarantine_lock); | 106 | static DEFINE_SPINLOCK(quarantine_lock); |
| 107 | DEFINE_STATIC_SRCU(remove_cache_srcu); | ||
| 106 | 108 | ||
| 107 | /* Maximum size of the global queue. */ | 109 | /* Maximum size of the global queue. */ |
| 108 | static unsigned long quarantine_max_size; | 110 | static unsigned long quarantine_max_size; |
| @@ -173,17 +175,22 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) | |||
| 173 | struct qlist_head *q; | 175 | struct qlist_head *q; |
| 174 | struct qlist_head temp = QLIST_INIT; | 176 | struct qlist_head temp = QLIST_INIT; |
| 175 | 177 | ||
| 178 | /* | ||
| 179 | * Note: irq must be disabled until after we move the batch to the | ||
| 180 | * global quarantine. Otherwise quarantine_remove_cache() can miss | ||
| 181 | * some objects belonging to the cache if they are in our local temp | ||
| 182 | * list. quarantine_remove_cache() executes on_each_cpu() at the | ||
| 183 | * beginning which ensures that it either sees the objects in per-cpu | ||
| 184 | * lists or in the global quarantine. | ||
| 185 | */ | ||
| 176 | local_irq_save(flags); | 186 | local_irq_save(flags); |
| 177 | 187 | ||
| 178 | q = this_cpu_ptr(&cpu_quarantine); | 188 | q = this_cpu_ptr(&cpu_quarantine); |
| 179 | qlist_put(q, &info->quarantine_link, cache->size); | 189 | qlist_put(q, &info->quarantine_link, cache->size); |
| 180 | if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) | 190 | if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { |
| 181 | qlist_move_all(q, &temp); | 191 | qlist_move_all(q, &temp); |
| 182 | 192 | ||
| 183 | local_irq_restore(flags); | 193 | spin_lock(&quarantine_lock); |
| 184 | |||
| 185 | if (unlikely(!qlist_empty(&temp))) { | ||
| 186 | spin_lock_irqsave(&quarantine_lock, flags); | ||
| 187 | WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); | 194 | WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); |
| 188 | qlist_move_all(&temp, &global_quarantine[quarantine_tail]); | 195 | qlist_move_all(&temp, &global_quarantine[quarantine_tail]); |
| 189 | if (global_quarantine[quarantine_tail].bytes >= | 196 | if (global_quarantine[quarantine_tail].bytes >= |
| @@ -196,20 +203,33 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) | |||
| 196 | if (new_tail != quarantine_head) | 203 | if (new_tail != quarantine_head) |
| 197 | quarantine_tail = new_tail; | 204 | quarantine_tail = new_tail; |
| 198 | } | 205 | } |
| 199 | spin_unlock_irqrestore(&quarantine_lock, flags); | 206 | spin_unlock(&quarantine_lock); |
| 200 | } | 207 | } |
| 208 | |||
| 209 | local_irq_restore(flags); | ||
| 201 | } | 210 | } |
| 202 | 211 | ||
| 203 | void quarantine_reduce(void) | 212 | void quarantine_reduce(void) |
| 204 | { | 213 | { |
| 205 | size_t total_size, new_quarantine_size, percpu_quarantines; | 214 | size_t total_size, new_quarantine_size, percpu_quarantines; |
| 206 | unsigned long flags; | 215 | unsigned long flags; |
| 216 | int srcu_idx; | ||
| 207 | struct qlist_head to_free = QLIST_INIT; | 217 | struct qlist_head to_free = QLIST_INIT; |
| 208 | 218 | ||
| 209 | if (likely(READ_ONCE(quarantine_size) <= | 219 | if (likely(READ_ONCE(quarantine_size) <= |
| 210 | READ_ONCE(quarantine_max_size))) | 220 | READ_ONCE(quarantine_max_size))) |
| 211 | return; | 221 | return; |
| 212 | 222 | ||
| 223 | /* | ||
| 224 | * srcu critical section ensures that quarantine_remove_cache() | ||
| 225 | * will not miss objects belonging to the cache while they are in our | ||
| 226 | * local to_free list. srcu is chosen because (1) it gives us private | ||
| 227 | * grace period domain that does not interfere with anything else, | ||
| 228 | * and (2) it allows synchronize_srcu() to return without waiting | ||
| 229 | * if there are no pending read critical sections (which is the | ||
| 230 | * expected case). | ||
| 231 | */ | ||
| 232 | srcu_idx = srcu_read_lock(&remove_cache_srcu); | ||
| 213 | spin_lock_irqsave(&quarantine_lock, flags); | 233 | spin_lock_irqsave(&quarantine_lock, flags); |
| 214 | 234 | ||
| 215 | /* | 235 | /* |
| @@ -237,6 +257,7 @@ void quarantine_reduce(void) | |||
| 237 | spin_unlock_irqrestore(&quarantine_lock, flags); | 257 | spin_unlock_irqrestore(&quarantine_lock, flags); |
| 238 | 258 | ||
| 239 | qlist_free_all(&to_free, NULL); | 259 | qlist_free_all(&to_free, NULL); |
| 260 | srcu_read_unlock(&remove_cache_srcu, srcu_idx); | ||
| 240 | } | 261 | } |
| 241 | 262 | ||
| 242 | static void qlist_move_cache(struct qlist_head *from, | 263 | static void qlist_move_cache(struct qlist_head *from, |
| @@ -280,12 +301,28 @@ void quarantine_remove_cache(struct kmem_cache *cache) | |||
| 280 | unsigned long flags, i; | 301 | unsigned long flags, i; |
| 281 | struct qlist_head to_free = QLIST_INIT; | 302 | struct qlist_head to_free = QLIST_INIT; |
| 282 | 303 | ||
| 304 | /* | ||
| 305 | * Must be careful to not miss any objects that are being moved from | ||
| 306 | * per-cpu list to the global quarantine in quarantine_put(), | ||
| 307 | * nor objects being freed in quarantine_reduce(). on_each_cpu() | ||
| 308 | * achieves the first goal, while synchronize_srcu() achieves the | ||
| 309 | * second. | ||
| 310 | */ | ||
| 283 | on_each_cpu(per_cpu_remove_cache, cache, 1); | 311 | on_each_cpu(per_cpu_remove_cache, cache, 1); |
| 284 | 312 | ||
| 285 | spin_lock_irqsave(&quarantine_lock, flags); | 313 | spin_lock_irqsave(&quarantine_lock, flags); |
| 286 | for (i = 0; i < QUARANTINE_BATCHES; i++) | 314 | for (i = 0; i < QUARANTINE_BATCHES; i++) { |
| 315 | if (qlist_empty(&global_quarantine[i])) | ||
| 316 | continue; | ||
| 287 | qlist_move_cache(&global_quarantine[i], &to_free, cache); | 317 | qlist_move_cache(&global_quarantine[i], &to_free, cache); |
| 318 | /* Scanning whole quarantine can take a while. */ | ||
| 319 | spin_unlock_irqrestore(&quarantine_lock, flags); | ||
| 320 | cond_resched(); | ||
| 321 | spin_lock_irqsave(&quarantine_lock, flags); | ||
| 322 | } | ||
| 288 | spin_unlock_irqrestore(&quarantine_lock, flags); | 323 | spin_unlock_irqrestore(&quarantine_lock, flags); |
| 289 | 324 | ||
| 290 | qlist_free_all(&to_free, cache); | 325 | qlist_free_all(&to_free, cache); |
| 326 | |||
| 327 | synchronize_srcu(&remove_cache_srcu); | ||
| 291 | } | 328 | } |
diff --git a/mm/madvise.c b/mm/madvise.c index dc5927c812d3..7a2abf0127ae 100644 --- a/mm/madvise.c +++ b/mm/madvise.c | |||
| @@ -513,7 +513,43 @@ static long madvise_dontneed(struct vm_area_struct *vma, | |||
| 513 | if (!can_madv_dontneed_vma(vma)) | 513 | if (!can_madv_dontneed_vma(vma)) |
| 514 | return -EINVAL; | 514 | return -EINVAL; |
| 515 | 515 | ||
| 516 | userfaultfd_remove(vma, prev, start, end); | 516 | if (!userfaultfd_remove(vma, start, end)) { |
| 517 | *prev = NULL; /* mmap_sem has been dropped, prev is stale */ | ||
| 518 | |||
| 519 | down_read(¤t->mm->mmap_sem); | ||
| 520 | vma = find_vma(current->mm, start); | ||
| 521 | if (!vma) | ||
| 522 | return -ENOMEM; | ||
| 523 | if (start < vma->vm_start) { | ||
| 524 | /* | ||
| 525 | * This "vma" under revalidation is the one | ||
| 526 | * with the lowest vma->vm_start where start | ||
| 527 | * is also < vma->vm_end. If start < | ||
| 528 | * vma->vm_start it means an hole materialized | ||
| 529 | * in the user address space within the | ||
| 530 | * virtual range passed to MADV_DONTNEED. | ||
| 531 | */ | ||
| 532 | return -ENOMEM; | ||
| 533 | } | ||
| 534 | if (!can_madv_dontneed_vma(vma)) | ||
| 535 | return -EINVAL; | ||
| 536 | if (end > vma->vm_end) { | ||
| 537 | /* | ||
| 538 | * Don't fail if end > vma->vm_end. If the old | ||
| 539 | * vma was splitted while the mmap_sem was | ||
| 540 | * released the effect of the concurrent | ||
| 541 | * operation may not cause MADV_DONTNEED to | ||
| 542 | * have an undefined result. There may be an | ||
| 543 | * adjacent next vma that we'll walk | ||
| 544 | * next. userfaultfd_remove() will generate an | ||
| 545 | * UFFD_EVENT_REMOVE repetition on the | ||
| 546 | * end-vma->vm_end range, but the manager can | ||
| 547 | * handle a repetition fine. | ||
| 548 | */ | ||
| 549 | end = vma->vm_end; | ||
| 550 | } | ||
| 551 | VM_WARN_ON(start >= end); | ||
| 552 | } | ||
| 517 | zap_page_range(vma, start, end - start); | 553 | zap_page_range(vma, start, end - start); |
| 518 | return 0; | 554 | return 0; |
| 519 | } | 555 | } |
| @@ -554,8 +590,10 @@ static long madvise_remove(struct vm_area_struct *vma, | |||
| 554 | * mmap_sem. | 590 | * mmap_sem. |
| 555 | */ | 591 | */ |
| 556 | get_file(f); | 592 | get_file(f); |
| 557 | userfaultfd_remove(vma, prev, start, end); | 593 | if (userfaultfd_remove(vma, start, end)) { |
| 558 | up_read(¤t->mm->mmap_sem); | 594 | /* mmap_sem was not released by userfaultfd_remove() */ |
| 595 | up_read(¤t->mm->mmap_sem); | ||
| 596 | } | ||
| 559 | error = vfs_fallocate(f, | 597 | error = vfs_fallocate(f, |
| 560 | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, | 598 | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, |
| 561 | offset, end - start); | 599 | offset, end - start); |
diff --git a/mm/memblock.c b/mm/memblock.c index b64b47803e52..696f06d17c4e 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
| @@ -1118,7 +1118,10 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn, | |||
| 1118 | } | 1118 | } |
| 1119 | } while (left < right); | 1119 | } while (left < right); |
| 1120 | 1120 | ||
| 1121 | return min(PHYS_PFN(type->regions[right].base), max_pfn); | 1121 | if (right == type->cnt) |
| 1122 | return max_pfn; | ||
| 1123 | else | ||
| 1124 | return min(PHYS_PFN(type->regions[right].base), max_pfn); | ||
| 1122 | } | 1125 | } |
| 1123 | 1126 | ||
| 1124 | /** | 1127 | /** |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c52ec893e241..2bd7541d7c11 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -466,6 +466,8 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) | |||
| 466 | struct mem_cgroup_tree_per_node *mctz; | 466 | struct mem_cgroup_tree_per_node *mctz; |
| 467 | 467 | ||
| 468 | mctz = soft_limit_tree_from_page(page); | 468 | mctz = soft_limit_tree_from_page(page); |
| 469 | if (!mctz) | ||
| 470 | return; | ||
| 469 | /* | 471 | /* |
| 470 | * Necessary to update all ancestors when hierarchy is used. | 472 | * Necessary to update all ancestors when hierarchy is used. |
| 471 | * because their event counter is not touched. | 473 | * because their event counter is not touched. |
| @@ -503,7 +505,8 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) | |||
| 503 | for_each_node(nid) { | 505 | for_each_node(nid) { |
| 504 | mz = mem_cgroup_nodeinfo(memcg, nid); | 506 | mz = mem_cgroup_nodeinfo(memcg, nid); |
| 505 | mctz = soft_limit_tree_node(nid); | 507 | mctz = soft_limit_tree_node(nid); |
| 506 | mem_cgroup_remove_exceeded(mz, mctz); | 508 | if (mctz) |
| 509 | mem_cgroup_remove_exceeded(mz, mctz); | ||
| 507 | } | 510 | } |
| 508 | } | 511 | } |
| 509 | 512 | ||
| @@ -2558,7 +2561,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, | |||
| 2558 | * is empty. Do it lockless to prevent lock bouncing. Races | 2561 | * is empty. Do it lockless to prevent lock bouncing. Races |
| 2559 | * are acceptable as soft limit is best effort anyway. | 2562 | * are acceptable as soft limit is best effort anyway. |
| 2560 | */ | 2563 | */ |
| 2561 | if (RB_EMPTY_ROOT(&mctz->rb_root)) | 2564 | if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) |
| 2562 | return 0; | 2565 | return 0; |
| 2563 | 2566 | ||
| 2564 | /* | 2567 | /* |
| @@ -4135,17 +4138,22 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) | |||
| 4135 | kfree(memcg->nodeinfo[node]); | 4138 | kfree(memcg->nodeinfo[node]); |
| 4136 | } | 4139 | } |
| 4137 | 4140 | ||
| 4138 | static void mem_cgroup_free(struct mem_cgroup *memcg) | 4141 | static void __mem_cgroup_free(struct mem_cgroup *memcg) |
| 4139 | { | 4142 | { |
| 4140 | int node; | 4143 | int node; |
| 4141 | 4144 | ||
| 4142 | memcg_wb_domain_exit(memcg); | ||
| 4143 | for_each_node(node) | 4145 | for_each_node(node) |
| 4144 | free_mem_cgroup_per_node_info(memcg, node); | 4146 | free_mem_cgroup_per_node_info(memcg, node); |
| 4145 | free_percpu(memcg->stat); | 4147 | free_percpu(memcg->stat); |
| 4146 | kfree(memcg); | 4148 | kfree(memcg); |
| 4147 | } | 4149 | } |
| 4148 | 4150 | ||
| 4151 | static void mem_cgroup_free(struct mem_cgroup *memcg) | ||
| 4152 | { | ||
| 4153 | memcg_wb_domain_exit(memcg); | ||
| 4154 | __mem_cgroup_free(memcg); | ||
| 4155 | } | ||
| 4156 | |||
| 4149 | static struct mem_cgroup *mem_cgroup_alloc(void) | 4157 | static struct mem_cgroup *mem_cgroup_alloc(void) |
| 4150 | { | 4158 | { |
| 4151 | struct mem_cgroup *memcg; | 4159 | struct mem_cgroup *memcg; |
| @@ -4196,7 +4204,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) | |||
| 4196 | fail: | 4204 | fail: |
| 4197 | if (memcg->id.id > 0) | 4205 | if (memcg->id.id > 0) |
| 4198 | idr_remove(&mem_cgroup_idr, memcg->id.id); | 4206 | idr_remove(&mem_cgroup_idr, memcg->id.id); |
| 4199 | mem_cgroup_free(memcg); | 4207 | __mem_cgroup_free(memcg); |
| 4200 | return NULL; | 4208 | return NULL; |
| 4201 | } | 4209 | } |
| 4202 | 4210 | ||
diff --git a/mm/memory.c b/mm/memory.c index a97a4cec2e1f..235ba51b2fbf 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -445,7 +445,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, | |||
| 445 | mm_dec_nr_pmds(tlb->mm); | 445 | mm_dec_nr_pmds(tlb->mm); |
| 446 | } | 446 | } |
| 447 | 447 | ||
| 448 | static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | 448 | static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, |
| 449 | unsigned long addr, unsigned long end, | 449 | unsigned long addr, unsigned long end, |
| 450 | unsigned long floor, unsigned long ceiling) | 450 | unsigned long floor, unsigned long ceiling) |
| 451 | { | 451 | { |
| @@ -454,7 +454,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | |||
| 454 | unsigned long start; | 454 | unsigned long start; |
| 455 | 455 | ||
| 456 | start = addr; | 456 | start = addr; |
| 457 | pud = pud_offset(pgd, addr); | 457 | pud = pud_offset(p4d, addr); |
| 458 | do { | 458 | do { |
| 459 | next = pud_addr_end(addr, end); | 459 | next = pud_addr_end(addr, end); |
| 460 | if (pud_none_or_clear_bad(pud)) | 460 | if (pud_none_or_clear_bad(pud)) |
| @@ -462,6 +462,39 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | |||
| 462 | free_pmd_range(tlb, pud, addr, next, floor, ceiling); | 462 | free_pmd_range(tlb, pud, addr, next, floor, ceiling); |
| 463 | } while (pud++, addr = next, addr != end); | 463 | } while (pud++, addr = next, addr != end); |
| 464 | 464 | ||
| 465 | start &= P4D_MASK; | ||
| 466 | if (start < floor) | ||
| 467 | return; | ||
| 468 | if (ceiling) { | ||
| 469 | ceiling &= P4D_MASK; | ||
| 470 | if (!ceiling) | ||
| 471 | return; | ||
| 472 | } | ||
| 473 | if (end - 1 > ceiling - 1) | ||
| 474 | return; | ||
| 475 | |||
| 476 | pud = pud_offset(p4d, start); | ||
| 477 | p4d_clear(p4d); | ||
| 478 | pud_free_tlb(tlb, pud, start); | ||
| 479 | } | ||
| 480 | |||
| 481 | static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd, | ||
| 482 | unsigned long addr, unsigned long end, | ||
| 483 | unsigned long floor, unsigned long ceiling) | ||
| 484 | { | ||
| 485 | p4d_t *p4d; | ||
| 486 | unsigned long next; | ||
| 487 | unsigned long start; | ||
| 488 | |||
| 489 | start = addr; | ||
| 490 | p4d = p4d_offset(pgd, addr); | ||
| 491 | do { | ||
| 492 | next = p4d_addr_end(addr, end); | ||
| 493 | if (p4d_none_or_clear_bad(p4d)) | ||
| 494 | continue; | ||
| 495 | free_pud_range(tlb, p4d, addr, next, floor, ceiling); | ||
| 496 | } while (p4d++, addr = next, addr != end); | ||
| 497 | |||
| 465 | start &= PGDIR_MASK; | 498 | start &= PGDIR_MASK; |
| 466 | if (start < floor) | 499 | if (start < floor) |
| 467 | return; | 500 | return; |
| @@ -473,9 +506,9 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | |||
| 473 | if (end - 1 > ceiling - 1) | 506 | if (end - 1 > ceiling - 1) |
| 474 | return; | 507 | return; |
| 475 | 508 | ||
| 476 | pud = pud_offset(pgd, start); | 509 | p4d = p4d_offset(pgd, start); |
| 477 | pgd_clear(pgd); | 510 | pgd_clear(pgd); |
| 478 | pud_free_tlb(tlb, pud, start); | 511 | p4d_free_tlb(tlb, p4d, start); |
| 479 | } | 512 | } |
| 480 | 513 | ||
| 481 | /* | 514 | /* |
| @@ -539,7 +572,7 @@ void free_pgd_range(struct mmu_gather *tlb, | |||
| 539 | next = pgd_addr_end(addr, end); | 572 | next = pgd_addr_end(addr, end); |
| 540 | if (pgd_none_or_clear_bad(pgd)) | 573 | if (pgd_none_or_clear_bad(pgd)) |
| 541 | continue; | 574 | continue; |
| 542 | free_pud_range(tlb, pgd, addr, next, floor, ceiling); | 575 | free_p4d_range(tlb, pgd, addr, next, floor, ceiling); |
| 543 | } while (pgd++, addr = next, addr != end); | 576 | } while (pgd++, addr = next, addr != end); |
| 544 | } | 577 | } |
| 545 | 578 | ||
| @@ -658,7 +691,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, | |||
| 658 | pte_t pte, struct page *page) | 691 | pte_t pte, struct page *page) |
| 659 | { | 692 | { |
| 660 | pgd_t *pgd = pgd_offset(vma->vm_mm, addr); | 693 | pgd_t *pgd = pgd_offset(vma->vm_mm, addr); |
| 661 | pud_t *pud = pud_offset(pgd, addr); | 694 | p4d_t *p4d = p4d_offset(pgd, addr); |
| 695 | pud_t *pud = pud_offset(p4d, addr); | ||
| 662 | pmd_t *pmd = pmd_offset(pud, addr); | 696 | pmd_t *pmd = pmd_offset(pud, addr); |
| 663 | struct address_space *mapping; | 697 | struct address_space *mapping; |
| 664 | pgoff_t index; | 698 | pgoff_t index; |
| @@ -1023,16 +1057,16 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src | |||
| 1023 | } | 1057 | } |
| 1024 | 1058 | ||
| 1025 | static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | 1059 | static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
| 1026 | pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, | 1060 | p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma, |
| 1027 | unsigned long addr, unsigned long end) | 1061 | unsigned long addr, unsigned long end) |
| 1028 | { | 1062 | { |
| 1029 | pud_t *src_pud, *dst_pud; | 1063 | pud_t *src_pud, *dst_pud; |
| 1030 | unsigned long next; | 1064 | unsigned long next; |
| 1031 | 1065 | ||
| 1032 | dst_pud = pud_alloc(dst_mm, dst_pgd, addr); | 1066 | dst_pud = pud_alloc(dst_mm, dst_p4d, addr); |
| 1033 | if (!dst_pud) | 1067 | if (!dst_pud) |
| 1034 | return -ENOMEM; | 1068 | return -ENOMEM; |
| 1035 | src_pud = pud_offset(src_pgd, addr); | 1069 | src_pud = pud_offset(src_p4d, addr); |
| 1036 | do { | 1070 | do { |
| 1037 | next = pud_addr_end(addr, end); | 1071 | next = pud_addr_end(addr, end); |
| 1038 | if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) { | 1072 | if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) { |
| @@ -1056,6 +1090,28 @@ static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src | |||
| 1056 | return 0; | 1090 | return 0; |
| 1057 | } | 1091 | } |
| 1058 | 1092 | ||
| 1093 | static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | ||
| 1094 | pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, | ||
| 1095 | unsigned long addr, unsigned long end) | ||
| 1096 | { | ||
| 1097 | p4d_t *src_p4d, *dst_p4d; | ||
| 1098 | unsigned long next; | ||
| 1099 | |||
| 1100 | dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr); | ||
| 1101 | if (!dst_p4d) | ||
| 1102 | return -ENOMEM; | ||
| 1103 | src_p4d = p4d_offset(src_pgd, addr); | ||
| 1104 | do { | ||
| 1105 | next = p4d_addr_end(addr, end); | ||
| 1106 | if (p4d_none_or_clear_bad(src_p4d)) | ||
| 1107 | continue; | ||
| 1108 | if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d, | ||
| 1109 | vma, addr, next)) | ||
| 1110 | return -ENOMEM; | ||
| 1111 | } while (dst_p4d++, src_p4d++, addr = next, addr != end); | ||
| 1112 | return 0; | ||
| 1113 | } | ||
| 1114 | |||
| 1059 | int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | 1115 | int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
| 1060 | struct vm_area_struct *vma) | 1116 | struct vm_area_struct *vma) |
| 1061 | { | 1117 | { |
| @@ -1111,7 +1167,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
| 1111 | next = pgd_addr_end(addr, end); | 1167 | next = pgd_addr_end(addr, end); |
| 1112 | if (pgd_none_or_clear_bad(src_pgd)) | 1168 | if (pgd_none_or_clear_bad(src_pgd)) |
| 1113 | continue; | 1169 | continue; |
| 1114 | if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, | 1170 | if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd, |
| 1115 | vma, addr, next))) { | 1171 | vma, addr, next))) { |
| 1116 | ret = -ENOMEM; | 1172 | ret = -ENOMEM; |
| 1117 | break; | 1173 | break; |
| @@ -1267,14 +1323,14 @@ next: | |||
| 1267 | } | 1323 | } |
| 1268 | 1324 | ||
| 1269 | static inline unsigned long zap_pud_range(struct mmu_gather *tlb, | 1325 | static inline unsigned long zap_pud_range(struct mmu_gather *tlb, |
| 1270 | struct vm_area_struct *vma, pgd_t *pgd, | 1326 | struct vm_area_struct *vma, p4d_t *p4d, |
| 1271 | unsigned long addr, unsigned long end, | 1327 | unsigned long addr, unsigned long end, |
| 1272 | struct zap_details *details) | 1328 | struct zap_details *details) |
| 1273 | { | 1329 | { |
| 1274 | pud_t *pud; | 1330 | pud_t *pud; |
| 1275 | unsigned long next; | 1331 | unsigned long next; |
| 1276 | 1332 | ||
| 1277 | pud = pud_offset(pgd, addr); | 1333 | pud = pud_offset(p4d, addr); |
| 1278 | do { | 1334 | do { |
| 1279 | next = pud_addr_end(addr, end); | 1335 | next = pud_addr_end(addr, end); |
| 1280 | if (pud_trans_huge(*pud) || pud_devmap(*pud)) { | 1336 | if (pud_trans_huge(*pud) || pud_devmap(*pud)) { |
| @@ -1295,6 +1351,25 @@ next: | |||
| 1295 | return addr; | 1351 | return addr; |
| 1296 | } | 1352 | } |
| 1297 | 1353 | ||
| 1354 | static inline unsigned long zap_p4d_range(struct mmu_gather *tlb, | ||
| 1355 | struct vm_area_struct *vma, pgd_t *pgd, | ||
| 1356 | unsigned long addr, unsigned long end, | ||
| 1357 | struct zap_details *details) | ||
| 1358 | { | ||
| 1359 | p4d_t *p4d; | ||
| 1360 | unsigned long next; | ||
| 1361 | |||
| 1362 | p4d = p4d_offset(pgd, addr); | ||
| 1363 | do { | ||
| 1364 | next = p4d_addr_end(addr, end); | ||
| 1365 | if (p4d_none_or_clear_bad(p4d)) | ||
| 1366 | continue; | ||
| 1367 | next = zap_pud_range(tlb, vma, p4d, addr, next, details); | ||
| 1368 | } while (p4d++, addr = next, addr != end); | ||
| 1369 | |||
| 1370 | return addr; | ||
| 1371 | } | ||
| 1372 | |||
| 1298 | void unmap_page_range(struct mmu_gather *tlb, | 1373 | void unmap_page_range(struct mmu_gather *tlb, |
| 1299 | struct vm_area_struct *vma, | 1374 | struct vm_area_struct *vma, |
| 1300 | unsigned long addr, unsigned long end, | 1375 | unsigned long addr, unsigned long end, |
| @@ -1310,7 +1385,7 @@ void unmap_page_range(struct mmu_gather *tlb, | |||
| 1310 | next = pgd_addr_end(addr, end); | 1385 | next = pgd_addr_end(addr, end); |
| 1311 | if (pgd_none_or_clear_bad(pgd)) | 1386 | if (pgd_none_or_clear_bad(pgd)) |
| 1312 | continue; | 1387 | continue; |
| 1313 | next = zap_pud_range(tlb, vma, pgd, addr, next, details); | 1388 | next = zap_p4d_range(tlb, vma, pgd, addr, next, details); |
| 1314 | } while (pgd++, addr = next, addr != end); | 1389 | } while (pgd++, addr = next, addr != end); |
| 1315 | tlb_end_vma(tlb, vma); | 1390 | tlb_end_vma(tlb, vma); |
| 1316 | } | 1391 | } |
| @@ -1465,16 +1540,24 @@ EXPORT_SYMBOL_GPL(zap_vma_ptes); | |||
| 1465 | pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, | 1540 | pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, |
| 1466 | spinlock_t **ptl) | 1541 | spinlock_t **ptl) |
| 1467 | { | 1542 | { |
| 1468 | pgd_t *pgd = pgd_offset(mm, addr); | 1543 | pgd_t *pgd; |
| 1469 | pud_t *pud = pud_alloc(mm, pgd, addr); | 1544 | p4d_t *p4d; |
| 1470 | if (pud) { | 1545 | pud_t *pud; |
| 1471 | pmd_t *pmd = pmd_alloc(mm, pud, addr); | 1546 | pmd_t *pmd; |
| 1472 | if (pmd) { | 1547 | |
| 1473 | VM_BUG_ON(pmd_trans_huge(*pmd)); | 1548 | pgd = pgd_offset(mm, addr); |
| 1474 | return pte_alloc_map_lock(mm, pmd, addr, ptl); | 1549 | p4d = p4d_alloc(mm, pgd, addr); |
| 1475 | } | 1550 | if (!p4d) |
| 1476 | } | 1551 | return NULL; |
| 1477 | return NULL; | 1552 | pud = pud_alloc(mm, p4d, addr); |
| 1553 | if (!pud) | ||
| 1554 | return NULL; | ||
| 1555 | pmd = pmd_alloc(mm, pud, addr); | ||
| 1556 | if (!pmd) | ||
| 1557 | return NULL; | ||
| 1558 | |||
| 1559 | VM_BUG_ON(pmd_trans_huge(*pmd)); | ||
| 1560 | return pte_alloc_map_lock(mm, pmd, addr, ptl); | ||
| 1478 | } | 1561 | } |
| 1479 | 1562 | ||
| 1480 | /* | 1563 | /* |
| @@ -1740,7 +1823,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, | |||
| 1740 | return 0; | 1823 | return 0; |
| 1741 | } | 1824 | } |
| 1742 | 1825 | ||
| 1743 | static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, | 1826 | static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, |
| 1744 | unsigned long addr, unsigned long end, | 1827 | unsigned long addr, unsigned long end, |
| 1745 | unsigned long pfn, pgprot_t prot) | 1828 | unsigned long pfn, pgprot_t prot) |
| 1746 | { | 1829 | { |
| @@ -1748,7 +1831,7 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, | |||
| 1748 | unsigned long next; | 1831 | unsigned long next; |
| 1749 | 1832 | ||
| 1750 | pfn -= addr >> PAGE_SHIFT; | 1833 | pfn -= addr >> PAGE_SHIFT; |
| 1751 | pud = pud_alloc(mm, pgd, addr); | 1834 | pud = pud_alloc(mm, p4d, addr); |
| 1752 | if (!pud) | 1835 | if (!pud) |
| 1753 | return -ENOMEM; | 1836 | return -ENOMEM; |
| 1754 | do { | 1837 | do { |
| @@ -1760,6 +1843,26 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, | |||
| 1760 | return 0; | 1843 | return 0; |
| 1761 | } | 1844 | } |
| 1762 | 1845 | ||
| 1846 | static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, | ||
| 1847 | unsigned long addr, unsigned long end, | ||
| 1848 | unsigned long pfn, pgprot_t prot) | ||
| 1849 | { | ||
| 1850 | p4d_t *p4d; | ||
| 1851 | unsigned long next; | ||
| 1852 | |||
| 1853 | pfn -= addr >> PAGE_SHIFT; | ||
| 1854 | p4d = p4d_alloc(mm, pgd, addr); | ||
| 1855 | if (!p4d) | ||
| 1856 | return -ENOMEM; | ||
| 1857 | do { | ||
| 1858 | next = p4d_addr_end(addr, end); | ||
| 1859 | if (remap_pud_range(mm, p4d, addr, next, | ||
| 1860 | pfn + (addr >> PAGE_SHIFT), prot)) | ||
| 1861 | return -ENOMEM; | ||
| 1862 | } while (p4d++, addr = next, addr != end); | ||
| 1863 | return 0; | ||
| 1864 | } | ||
| 1865 | |||
| 1763 | /** | 1866 | /** |
| 1764 | * remap_pfn_range - remap kernel memory to userspace | 1867 | * remap_pfn_range - remap kernel memory to userspace |
| 1765 | * @vma: user vma to map to | 1868 | * @vma: user vma to map to |
| @@ -1816,7 +1919,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | |||
| 1816 | flush_cache_range(vma, addr, end); | 1919 | flush_cache_range(vma, addr, end); |
| 1817 | do { | 1920 | do { |
| 1818 | next = pgd_addr_end(addr, end); | 1921 | next = pgd_addr_end(addr, end); |
| 1819 | err = remap_pud_range(mm, pgd, addr, next, | 1922 | err = remap_p4d_range(mm, pgd, addr, next, |
| 1820 | pfn + (addr >> PAGE_SHIFT), prot); | 1923 | pfn + (addr >> PAGE_SHIFT), prot); |
| 1821 | if (err) | 1924 | if (err) |
| 1822 | break; | 1925 | break; |
| @@ -1932,7 +2035,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, | |||
| 1932 | return err; | 2035 | return err; |
| 1933 | } | 2036 | } |
| 1934 | 2037 | ||
| 1935 | static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, | 2038 | static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, |
| 1936 | unsigned long addr, unsigned long end, | 2039 | unsigned long addr, unsigned long end, |
| 1937 | pte_fn_t fn, void *data) | 2040 | pte_fn_t fn, void *data) |
| 1938 | { | 2041 | { |
| @@ -1940,7 +2043,7 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, | |||
| 1940 | unsigned long next; | 2043 | unsigned long next; |
| 1941 | int err; | 2044 | int err; |
| 1942 | 2045 | ||
| 1943 | pud = pud_alloc(mm, pgd, addr); | 2046 | pud = pud_alloc(mm, p4d, addr); |
| 1944 | if (!pud) | 2047 | if (!pud) |
| 1945 | return -ENOMEM; | 2048 | return -ENOMEM; |
| 1946 | do { | 2049 | do { |
| @@ -1952,6 +2055,26 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, | |||
| 1952 | return err; | 2055 | return err; |
| 1953 | } | 2056 | } |
| 1954 | 2057 | ||
| 2058 | static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, | ||
| 2059 | unsigned long addr, unsigned long end, | ||
| 2060 | pte_fn_t fn, void *data) | ||
| 2061 | { | ||
| 2062 | p4d_t *p4d; | ||
| 2063 | unsigned long next; | ||
| 2064 | int err; | ||
| 2065 | |||
| 2066 | p4d = p4d_alloc(mm, pgd, addr); | ||
| 2067 | if (!p4d) | ||
| 2068 | return -ENOMEM; | ||
| 2069 | do { | ||
| 2070 | next = p4d_addr_end(addr, end); | ||
| 2071 | err = apply_to_pud_range(mm, p4d, addr, next, fn, data); | ||
| 2072 | if (err) | ||
| 2073 | break; | ||
| 2074 | } while (p4d++, addr = next, addr != end); | ||
| 2075 | return err; | ||
| 2076 | } | ||
| 2077 | |||
| 1955 | /* | 2078 | /* |
| 1956 | * Scan a region of virtual memory, filling in page tables as necessary | 2079 | * Scan a region of virtual memory, filling in page tables as necessary |
| 1957 | * and calling a provided function on each leaf page table. | 2080 | * and calling a provided function on each leaf page table. |
| @@ -1970,7 +2093,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, | |||
| 1970 | pgd = pgd_offset(mm, addr); | 2093 | pgd = pgd_offset(mm, addr); |
| 1971 | do { | 2094 | do { |
| 1972 | next = pgd_addr_end(addr, end); | 2095 | next = pgd_addr_end(addr, end); |
| 1973 | err = apply_to_pud_range(mm, pgd, addr, next, fn, data); | 2096 | err = apply_to_p4d_range(mm, pgd, addr, next, fn, data); |
| 1974 | if (err) | 2097 | if (err) |
| 1975 | break; | 2098 | break; |
| 1976 | } while (pgd++, addr = next, addr != end); | 2099 | } while (pgd++, addr = next, addr != end); |
| @@ -3653,11 +3776,15 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, | |||
| 3653 | }; | 3776 | }; |
| 3654 | struct mm_struct *mm = vma->vm_mm; | 3777 | struct mm_struct *mm = vma->vm_mm; |
| 3655 | pgd_t *pgd; | 3778 | pgd_t *pgd; |
| 3779 | p4d_t *p4d; | ||
| 3656 | int ret; | 3780 | int ret; |
| 3657 | 3781 | ||
| 3658 | pgd = pgd_offset(mm, address); | 3782 | pgd = pgd_offset(mm, address); |
| 3783 | p4d = p4d_alloc(mm, pgd, address); | ||
| 3784 | if (!p4d) | ||
| 3785 | return VM_FAULT_OOM; | ||
| 3659 | 3786 | ||
| 3660 | vmf.pud = pud_alloc(mm, pgd, address); | 3787 | vmf.pud = pud_alloc(mm, p4d, address); |
| 3661 | if (!vmf.pud) | 3788 | if (!vmf.pud) |
| 3662 | return VM_FAULT_OOM; | 3789 | return VM_FAULT_OOM; |
| 3663 | if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) { | 3790 | if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) { |
| @@ -3779,12 +3906,35 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, | |||
| 3779 | } | 3906 | } |
| 3780 | EXPORT_SYMBOL_GPL(handle_mm_fault); | 3907 | EXPORT_SYMBOL_GPL(handle_mm_fault); |
| 3781 | 3908 | ||
| 3909 | #ifndef __PAGETABLE_P4D_FOLDED | ||
| 3910 | /* | ||
| 3911 | * Allocate p4d page table. | ||
| 3912 | * We've already handled the fast-path in-line. | ||
| 3913 | */ | ||
| 3914 | int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) | ||
| 3915 | { | ||
| 3916 | p4d_t *new = p4d_alloc_one(mm, address); | ||
| 3917 | if (!new) | ||
| 3918 | return -ENOMEM; | ||
| 3919 | |||
| 3920 | smp_wmb(); /* See comment in __pte_alloc */ | ||
| 3921 | |||
| 3922 | spin_lock(&mm->page_table_lock); | ||
| 3923 | if (pgd_present(*pgd)) /* Another has populated it */ | ||
| 3924 | p4d_free(mm, new); | ||
| 3925 | else | ||
| 3926 | pgd_populate(mm, pgd, new); | ||
| 3927 | spin_unlock(&mm->page_table_lock); | ||
| 3928 | return 0; | ||
| 3929 | } | ||
| 3930 | #endif /* __PAGETABLE_P4D_FOLDED */ | ||
| 3931 | |||
| 3782 | #ifndef __PAGETABLE_PUD_FOLDED | 3932 | #ifndef __PAGETABLE_PUD_FOLDED |
| 3783 | /* | 3933 | /* |
| 3784 | * Allocate page upper directory. | 3934 | * Allocate page upper directory. |
| 3785 | * We've already handled the fast-path in-line. | 3935 | * We've already handled the fast-path in-line. |
| 3786 | */ | 3936 | */ |
| 3787 | int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) | 3937 | int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) |
| 3788 | { | 3938 | { |
| 3789 | pud_t *new = pud_alloc_one(mm, address); | 3939 | pud_t *new = pud_alloc_one(mm, address); |
| 3790 | if (!new) | 3940 | if (!new) |
| @@ -3793,10 +3943,17 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) | |||
| 3793 | smp_wmb(); /* See comment in __pte_alloc */ | 3943 | smp_wmb(); /* See comment in __pte_alloc */ |
| 3794 | 3944 | ||
| 3795 | spin_lock(&mm->page_table_lock); | 3945 | spin_lock(&mm->page_table_lock); |
| 3796 | if (pgd_present(*pgd)) /* Another has populated it */ | 3946 | #ifndef __ARCH_HAS_5LEVEL_HACK |
| 3947 | if (p4d_present(*p4d)) /* Another has populated it */ | ||
| 3797 | pud_free(mm, new); | 3948 | pud_free(mm, new); |
| 3798 | else | 3949 | else |
| 3799 | pgd_populate(mm, pgd, new); | 3950 | p4d_populate(mm, p4d, new); |
| 3951 | #else | ||
| 3952 | if (pgd_present(*p4d)) /* Another has populated it */ | ||
| 3953 | pud_free(mm, new); | ||
| 3954 | else | ||
| 3955 | pgd_populate(mm, p4d, new); | ||
| 3956 | #endif /* __ARCH_HAS_5LEVEL_HACK */ | ||
| 3800 | spin_unlock(&mm->page_table_lock); | 3957 | spin_unlock(&mm->page_table_lock); |
| 3801 | return 0; | 3958 | return 0; |
| 3802 | } | 3959 | } |
| @@ -3839,6 +3996,7 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, | |||
| 3839 | pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) | 3996 | pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) |
| 3840 | { | 3997 | { |
| 3841 | pgd_t *pgd; | 3998 | pgd_t *pgd; |
| 3999 | p4d_t *p4d; | ||
| 3842 | pud_t *pud; | 4000 | pud_t *pud; |
| 3843 | pmd_t *pmd; | 4001 | pmd_t *pmd; |
| 3844 | pte_t *ptep; | 4002 | pte_t *ptep; |
| @@ -3847,7 +4005,11 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, | |||
| 3847 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | 4005 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) |
| 3848 | goto out; | 4006 | goto out; |
| 3849 | 4007 | ||
| 3850 | pud = pud_offset(pgd, address); | 4008 | p4d = p4d_offset(pgd, address); |
| 4009 | if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) | ||
| 4010 | goto out; | ||
| 4011 | |||
| 4012 | pud = pud_offset(p4d, address); | ||
| 3851 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) | 4013 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) |
| 3852 | goto out; | 4014 | goto out; |
| 3853 | 4015 | ||
diff --git a/mm/mlock.c b/mm/mlock.c index 1050511f8b2b..0dd9ca18e19e 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
| @@ -380,6 +380,7 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, | |||
| 380 | pte = get_locked_pte(vma->vm_mm, start, &ptl); | 380 | pte = get_locked_pte(vma->vm_mm, start, &ptl); |
| 381 | /* Make sure we do not cross the page table boundary */ | 381 | /* Make sure we do not cross the page table boundary */ |
| 382 | end = pgd_addr_end(start, end); | 382 | end = pgd_addr_end(start, end); |
| 383 | end = p4d_addr_end(start, end); | ||
| 383 | end = pud_addr_end(start, end); | 384 | end = pud_addr_end(start, end); |
| 384 | end = pmd_addr_end(start, end); | 385 | end = pmd_addr_end(start, end); |
| 385 | 386 | ||
| @@ -442,7 +443,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
| 442 | 443 | ||
| 443 | while (start < end) { | 444 | while (start < end) { |
| 444 | struct page *page; | 445 | struct page *page; |
| 445 | unsigned int page_mask; | 446 | unsigned int page_mask = 0; |
| 446 | unsigned long page_increm; | 447 | unsigned long page_increm; |
| 447 | struct pagevec pvec; | 448 | struct pagevec pvec; |
| 448 | struct zone *zone; | 449 | struct zone *zone; |
| @@ -456,8 +457,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
| 456 | * suits munlock very well (and if somehow an abnormal page | 457 | * suits munlock very well (and if somehow an abnormal page |
| 457 | * has sneaked into the range, we won't oops here: great). | 458 | * has sneaked into the range, we won't oops here: great). |
| 458 | */ | 459 | */ |
| 459 | page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP, | 460 | page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); |
| 460 | &page_mask); | ||
| 461 | 461 | ||
| 462 | if (page && !IS_ERR(page)) { | 462 | if (page && !IS_ERR(page)) { |
| 463 | if (PageTransTail(page)) { | 463 | if (PageTransTail(page)) { |
| @@ -468,8 +468,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
| 468 | /* | 468 | /* |
| 469 | * Any THP page found by follow_page_mask() may | 469 | * Any THP page found by follow_page_mask() may |
| 470 | * have gotten split before reaching | 470 | * have gotten split before reaching |
| 471 | * munlock_vma_page(), so we need to recompute | 471 | * munlock_vma_page(), so we need to compute |
| 472 | * the page_mask here. | 472 | * the page_mask here instead. |
| 473 | */ | 473 | */ |
| 474 | page_mask = munlock_vma_page(page); | 474 | page_mask = munlock_vma_page(page); |
| 475 | unlock_page(page); | 475 | unlock_page(page); |
diff --git a/mm/mprotect.c b/mm/mprotect.c index 848e946b08e5..8edd0d576254 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
| @@ -193,14 +193,14 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, | |||
| 193 | } | 193 | } |
| 194 | 194 | ||
| 195 | static inline unsigned long change_pud_range(struct vm_area_struct *vma, | 195 | static inline unsigned long change_pud_range(struct vm_area_struct *vma, |
| 196 | pgd_t *pgd, unsigned long addr, unsigned long end, | 196 | p4d_t *p4d, unsigned long addr, unsigned long end, |
| 197 | pgprot_t newprot, int dirty_accountable, int prot_numa) | 197 | pgprot_t newprot, int dirty_accountable, int prot_numa) |
| 198 | { | 198 | { |
| 199 | pud_t *pud; | 199 | pud_t *pud; |
| 200 | unsigned long next; | 200 | unsigned long next; |
| 201 | unsigned long pages = 0; | 201 | unsigned long pages = 0; |
| 202 | 202 | ||
| 203 | pud = pud_offset(pgd, addr); | 203 | pud = pud_offset(p4d, addr); |
| 204 | do { | 204 | do { |
| 205 | next = pud_addr_end(addr, end); | 205 | next = pud_addr_end(addr, end); |
| 206 | if (pud_none_or_clear_bad(pud)) | 206 | if (pud_none_or_clear_bad(pud)) |
| @@ -212,6 +212,26 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma, | |||
| 212 | return pages; | 212 | return pages; |
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | static inline unsigned long change_p4d_range(struct vm_area_struct *vma, | ||
| 216 | pgd_t *pgd, unsigned long addr, unsigned long end, | ||
| 217 | pgprot_t newprot, int dirty_accountable, int prot_numa) | ||
| 218 | { | ||
| 219 | p4d_t *p4d; | ||
| 220 | unsigned long next; | ||
| 221 | unsigned long pages = 0; | ||
| 222 | |||
| 223 | p4d = p4d_offset(pgd, addr); | ||
| 224 | do { | ||
| 225 | next = p4d_addr_end(addr, end); | ||
| 226 | if (p4d_none_or_clear_bad(p4d)) | ||
| 227 | continue; | ||
| 228 | pages += change_pud_range(vma, p4d, addr, next, newprot, | ||
| 229 | dirty_accountable, prot_numa); | ||
| 230 | } while (p4d++, addr = next, addr != end); | ||
| 231 | |||
| 232 | return pages; | ||
| 233 | } | ||
| 234 | |||
| 215 | static unsigned long change_protection_range(struct vm_area_struct *vma, | 235 | static unsigned long change_protection_range(struct vm_area_struct *vma, |
| 216 | unsigned long addr, unsigned long end, pgprot_t newprot, | 236 | unsigned long addr, unsigned long end, pgprot_t newprot, |
| 217 | int dirty_accountable, int prot_numa) | 237 | int dirty_accountable, int prot_numa) |
| @@ -230,7 +250,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma, | |||
| 230 | next = pgd_addr_end(addr, end); | 250 | next = pgd_addr_end(addr, end); |
| 231 | if (pgd_none_or_clear_bad(pgd)) | 251 | if (pgd_none_or_clear_bad(pgd)) |
| 232 | continue; | 252 | continue; |
| 233 | pages += change_pud_range(vma, pgd, addr, next, newprot, | 253 | pages += change_p4d_range(vma, pgd, addr, next, newprot, |
| 234 | dirty_accountable, prot_numa); | 254 | dirty_accountable, prot_numa); |
| 235 | } while (pgd++, addr = next, addr != end); | 255 | } while (pgd++, addr = next, addr != end); |
| 236 | 256 | ||
diff --git a/mm/mremap.c b/mm/mremap.c index 8233b0105c82..cd8a1b199ef9 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) | 32 | static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) |
| 33 | { | 33 | { |
| 34 | pgd_t *pgd; | 34 | pgd_t *pgd; |
| 35 | p4d_t *p4d; | ||
| 35 | pud_t *pud; | 36 | pud_t *pud; |
| 36 | pmd_t *pmd; | 37 | pmd_t *pmd; |
| 37 | 38 | ||
| @@ -39,7 +40,11 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) | |||
| 39 | if (pgd_none_or_clear_bad(pgd)) | 40 | if (pgd_none_or_clear_bad(pgd)) |
| 40 | return NULL; | 41 | return NULL; |
| 41 | 42 | ||
| 42 | pud = pud_offset(pgd, addr); | 43 | p4d = p4d_offset(pgd, addr); |
| 44 | if (p4d_none_or_clear_bad(p4d)) | ||
| 45 | return NULL; | ||
| 46 | |||
| 47 | pud = pud_offset(p4d, addr); | ||
| 43 | if (pud_none_or_clear_bad(pud)) | 48 | if (pud_none_or_clear_bad(pud)) |
| 44 | return NULL; | 49 | return NULL; |
| 45 | 50 | ||
| @@ -54,11 +59,15 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 54 | unsigned long addr) | 59 | unsigned long addr) |
| 55 | { | 60 | { |
| 56 | pgd_t *pgd; | 61 | pgd_t *pgd; |
| 62 | p4d_t *p4d; | ||
| 57 | pud_t *pud; | 63 | pud_t *pud; |
| 58 | pmd_t *pmd; | 64 | pmd_t *pmd; |
| 59 | 65 | ||
| 60 | pgd = pgd_offset(mm, addr); | 66 | pgd = pgd_offset(mm, addr); |
| 61 | pud = pud_alloc(mm, pgd, addr); | 67 | p4d = p4d_alloc(mm, pgd, addr); |
| 68 | if (!p4d) | ||
| 69 | return NULL; | ||
| 70 | pud = pud_alloc(mm, p4d, addr); | ||
| 62 | if (!pud) | 71 | if (!pud) |
| 63 | return NULL; | 72 | return NULL; |
| 64 | 73 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index eaa64d2ffdc5..6cbde310abed 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -873,7 +873,8 @@ done_merging: | |||
| 873 | higher_page = page + (combined_pfn - pfn); | 873 | higher_page = page + (combined_pfn - pfn); |
| 874 | buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1); | 874 | buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1); |
| 875 | higher_buddy = higher_page + (buddy_pfn - combined_pfn); | 875 | higher_buddy = higher_page + (buddy_pfn - combined_pfn); |
| 876 | if (page_is_buddy(higher_page, higher_buddy, order + 1)) { | 876 | if (pfn_valid_within(buddy_pfn) && |
| 877 | page_is_buddy(higher_page, higher_buddy, order + 1)) { | ||
| 877 | list_add_tail(&page->lru, | 878 | list_add_tail(&page->lru, |
| 878 | &zone->free_area[order].free_list[migratetype]); | 879 | &zone->free_area[order].free_list[migratetype]); |
| 879 | goto out; | 880 | goto out; |
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index a23001a22c15..c4c9def8ffea 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c | |||
| @@ -104,6 +104,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) | |||
| 104 | struct mm_struct *mm = pvmw->vma->vm_mm; | 104 | struct mm_struct *mm = pvmw->vma->vm_mm; |
| 105 | struct page *page = pvmw->page; | 105 | struct page *page = pvmw->page; |
| 106 | pgd_t *pgd; | 106 | pgd_t *pgd; |
| 107 | p4d_t *p4d; | ||
| 107 | pud_t *pud; | 108 | pud_t *pud; |
| 108 | 109 | ||
| 109 | /* The only possible pmd mapping has been handled on last iteration */ | 110 | /* The only possible pmd mapping has been handled on last iteration */ |
| @@ -133,7 +134,10 @@ restart: | |||
| 133 | pgd = pgd_offset(mm, pvmw->address); | 134 | pgd = pgd_offset(mm, pvmw->address); |
| 134 | if (!pgd_present(*pgd)) | 135 | if (!pgd_present(*pgd)) |
| 135 | return false; | 136 | return false; |
| 136 | pud = pud_offset(pgd, pvmw->address); | 137 | p4d = p4d_offset(pgd, pvmw->address); |
| 138 | if (!p4d_present(*p4d)) | ||
| 139 | return false; | ||
| 140 | pud = pud_offset(p4d, pvmw->address); | ||
| 137 | if (!pud_present(*pud)) | 141 | if (!pud_present(*pud)) |
| 138 | return false; | 142 | return false; |
| 139 | pvmw->pmd = pmd_offset(pud, pvmw->address); | 143 | pvmw->pmd = pmd_offset(pud, pvmw->address); |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 03761577ae86..60f7856e508f 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
| @@ -69,14 +69,14 @@ again: | |||
| 69 | return err; | 69 | return err; |
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, | 72 | static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, |
| 73 | struct mm_walk *walk) | 73 | struct mm_walk *walk) |
| 74 | { | 74 | { |
| 75 | pud_t *pud; | 75 | pud_t *pud; |
| 76 | unsigned long next; | 76 | unsigned long next; |
| 77 | int err = 0; | 77 | int err = 0; |
| 78 | 78 | ||
| 79 | pud = pud_offset(pgd, addr); | 79 | pud = pud_offset(p4d, addr); |
| 80 | do { | 80 | do { |
| 81 | again: | 81 | again: |
| 82 | next = pud_addr_end(addr, end); | 82 | next = pud_addr_end(addr, end); |
| @@ -113,6 +113,32 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, | |||
| 113 | return err; | 113 | return err; |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, | ||
| 117 | struct mm_walk *walk) | ||
| 118 | { | ||
| 119 | p4d_t *p4d; | ||
| 120 | unsigned long next; | ||
| 121 | int err = 0; | ||
| 122 | |||
| 123 | p4d = p4d_offset(pgd, addr); | ||
| 124 | do { | ||
| 125 | next = p4d_addr_end(addr, end); | ||
| 126 | if (p4d_none_or_clear_bad(p4d)) { | ||
| 127 | if (walk->pte_hole) | ||
| 128 | err = walk->pte_hole(addr, next, walk); | ||
| 129 | if (err) | ||
| 130 | break; | ||
| 131 | continue; | ||
| 132 | } | ||
| 133 | if (walk->pmd_entry || walk->pte_entry) | ||
| 134 | err = walk_pud_range(p4d, addr, next, walk); | ||
| 135 | if (err) | ||
| 136 | break; | ||
| 137 | } while (p4d++, addr = next, addr != end); | ||
| 138 | |||
| 139 | return err; | ||
| 140 | } | ||
| 141 | |||
| 116 | static int walk_pgd_range(unsigned long addr, unsigned long end, | 142 | static int walk_pgd_range(unsigned long addr, unsigned long end, |
| 117 | struct mm_walk *walk) | 143 | struct mm_walk *walk) |
| 118 | { | 144 | { |
| @@ -131,7 +157,7 @@ static int walk_pgd_range(unsigned long addr, unsigned long end, | |||
| 131 | continue; | 157 | continue; |
| 132 | } | 158 | } |
| 133 | if (walk->pmd_entry || walk->pte_entry) | 159 | if (walk->pmd_entry || walk->pte_entry) |
| 134 | err = walk_pud_range(pgd, addr, next, walk); | 160 | err = walk_p4d_range(pgd, addr, next, walk); |
| 135 | if (err) | 161 | if (err) |
| 136 | break; | 162 | break; |
| 137 | } while (pgd++, addr = next, addr != end); | 163 | } while (pgd++, addr = next, addr != end); |
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index 4ed5908c65b0..c99d9512a45b 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c | |||
| @@ -22,6 +22,12 @@ void pgd_clear_bad(pgd_t *pgd) | |||
| 22 | pgd_clear(pgd); | 22 | pgd_clear(pgd); |
| 23 | } | 23 | } |
| 24 | 24 | ||
| 25 | void p4d_clear_bad(p4d_t *p4d) | ||
| 26 | { | ||
| 27 | p4d_ERROR(*p4d); | ||
| 28 | p4d_clear(p4d); | ||
| 29 | } | ||
| 30 | |||
| 25 | void pud_clear_bad(pud_t *pud) | 31 | void pud_clear_bad(pud_t *pud) |
| 26 | { | 32 | { |
| 27 | pud_ERROR(*pud); | 33 | pud_ERROR(*pud); |
| @@ -684,6 +684,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | |||
| 684 | pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) | 684 | pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) |
| 685 | { | 685 | { |
| 686 | pgd_t *pgd; | 686 | pgd_t *pgd; |
| 687 | p4d_t *p4d; | ||
| 687 | pud_t *pud; | 688 | pud_t *pud; |
| 688 | pmd_t *pmd = NULL; | 689 | pmd_t *pmd = NULL; |
| 689 | pmd_t pmde; | 690 | pmd_t pmde; |
| @@ -692,7 +693,11 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) | |||
| 692 | if (!pgd_present(*pgd)) | 693 | if (!pgd_present(*pgd)) |
| 693 | goto out; | 694 | goto out; |
| 694 | 695 | ||
| 695 | pud = pud_offset(pgd, address); | 696 | p4d = p4d_offset(pgd, address); |
| 697 | if (!p4d_present(*p4d)) | ||
| 698 | goto out; | ||
| 699 | |||
| 700 | pud = pud_offset(p4d, address); | ||
| 696 | if (!pud_present(*pud)) | 701 | if (!pud_present(*pud)) |
| 697 | goto out; | 702 | goto out; |
| 698 | 703 | ||
| @@ -1316,12 +1321,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
| 1316 | } | 1321 | } |
| 1317 | 1322 | ||
| 1318 | while (page_vma_mapped_walk(&pvmw)) { | 1323 | while (page_vma_mapped_walk(&pvmw)) { |
| 1319 | subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); | ||
| 1320 | address = pvmw.address; | ||
| 1321 | |||
| 1322 | /* Unexpected PMD-mapped THP? */ | ||
| 1323 | VM_BUG_ON_PAGE(!pvmw.pte, page); | ||
| 1324 | |||
| 1325 | /* | 1324 | /* |
| 1326 | * If the page is mlock()d, we cannot swap it out. | 1325 | * If the page is mlock()d, we cannot swap it out. |
| 1327 | * If it's recently referenced (perhaps page_referenced | 1326 | * If it's recently referenced (perhaps page_referenced |
| @@ -1345,6 +1344,13 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
| 1345 | continue; | 1344 | continue; |
| 1346 | } | 1345 | } |
| 1347 | 1346 | ||
| 1347 | /* Unexpected PMD-mapped THP? */ | ||
| 1348 | VM_BUG_ON_PAGE(!pvmw.pte, page); | ||
| 1349 | |||
| 1350 | subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); | ||
| 1351 | address = pvmw.address; | ||
| 1352 | |||
| 1353 | |||
| 1348 | if (!(flags & TTU_IGNORE_ACCESS)) { | 1354 | if (!(flags & TTU_IGNORE_ACCESS)) { |
| 1349 | if (ptep_clear_flush_young_notify(vma, address, | 1355 | if (ptep_clear_flush_young_notify(vma, address, |
| 1350 | pvmw.pte)) { | 1356 | pvmw.pte)) { |
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 574c67b663fe..a56c3989f773 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
| @@ -196,9 +196,9 @@ pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) | |||
| 196 | return pmd; | 196 | return pmd; |
| 197 | } | 197 | } |
| 198 | 198 | ||
| 199 | pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) | 199 | pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) |
| 200 | { | 200 | { |
| 201 | pud_t *pud = pud_offset(pgd, addr); | 201 | pud_t *pud = pud_offset(p4d, addr); |
| 202 | if (pud_none(*pud)) { | 202 | if (pud_none(*pud)) { |
| 203 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | 203 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); |
| 204 | if (!p) | 204 | if (!p) |
| @@ -208,6 +208,18 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) | |||
| 208 | return pud; | 208 | return pud; |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) | ||
| 212 | { | ||
| 213 | p4d_t *p4d = p4d_offset(pgd, addr); | ||
| 214 | if (p4d_none(*p4d)) { | ||
| 215 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | ||
| 216 | if (!p) | ||
| 217 | return NULL; | ||
| 218 | p4d_populate(&init_mm, p4d, p); | ||
| 219 | } | ||
| 220 | return p4d; | ||
| 221 | } | ||
| 222 | |||
| 211 | pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) | 223 | pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) |
| 212 | { | 224 | { |
| 213 | pgd_t *pgd = pgd_offset_k(addr); | 225 | pgd_t *pgd = pgd_offset_k(addr); |
| @@ -225,6 +237,7 @@ int __meminit vmemmap_populate_basepages(unsigned long start, | |||
| 225 | { | 237 | { |
| 226 | unsigned long addr = start; | 238 | unsigned long addr = start; |
| 227 | pgd_t *pgd; | 239 | pgd_t *pgd; |
| 240 | p4d_t *p4d; | ||
| 228 | pud_t *pud; | 241 | pud_t *pud; |
| 229 | pmd_t *pmd; | 242 | pmd_t *pmd; |
| 230 | pte_t *pte; | 243 | pte_t *pte; |
| @@ -233,7 +246,10 @@ int __meminit vmemmap_populate_basepages(unsigned long start, | |||
| 233 | pgd = vmemmap_pgd_populate(addr, node); | 246 | pgd = vmemmap_pgd_populate(addr, node); |
| 234 | if (!pgd) | 247 | if (!pgd) |
| 235 | return -ENOMEM; | 248 | return -ENOMEM; |
| 236 | pud = vmemmap_pud_populate(pgd, addr, node); | 249 | p4d = vmemmap_p4d_populate(pgd, addr, node); |
| 250 | if (!p4d) | ||
| 251 | return -ENOMEM; | ||
| 252 | pud = vmemmap_pud_populate(p4d, addr, node); | ||
| 237 | if (!pud) | 253 | if (!pud) |
| 238 | return -ENOMEM; | 254 | return -ENOMEM; |
| 239 | pmd = vmemmap_pmd_populate(pud, addr, node); | 255 | pmd = vmemmap_pmd_populate(pud, addr, node); |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 521ef9b6064f..178130880b90 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
| @@ -1517,7 +1517,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, | |||
| 1517 | return 0; | 1517 | return 0; |
| 1518 | } | 1518 | } |
| 1519 | 1519 | ||
| 1520 | static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd, | 1520 | static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d, |
| 1521 | unsigned long addr, unsigned long end, | 1521 | unsigned long addr, unsigned long end, |
| 1522 | swp_entry_t entry, struct page *page) | 1522 | swp_entry_t entry, struct page *page) |
| 1523 | { | 1523 | { |
| @@ -1525,7 +1525,7 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd, | |||
| 1525 | unsigned long next; | 1525 | unsigned long next; |
| 1526 | int ret; | 1526 | int ret; |
| 1527 | 1527 | ||
| 1528 | pud = pud_offset(pgd, addr); | 1528 | pud = pud_offset(p4d, addr); |
| 1529 | do { | 1529 | do { |
| 1530 | next = pud_addr_end(addr, end); | 1530 | next = pud_addr_end(addr, end); |
| 1531 | if (pud_none_or_clear_bad(pud)) | 1531 | if (pud_none_or_clear_bad(pud)) |
| @@ -1537,6 +1537,26 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd, | |||
| 1537 | return 0; | 1537 | return 0; |
| 1538 | } | 1538 | } |
| 1539 | 1539 | ||
| 1540 | static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd, | ||
| 1541 | unsigned long addr, unsigned long end, | ||
| 1542 | swp_entry_t entry, struct page *page) | ||
| 1543 | { | ||
| 1544 | p4d_t *p4d; | ||
| 1545 | unsigned long next; | ||
| 1546 | int ret; | ||
| 1547 | |||
| 1548 | p4d = p4d_offset(pgd, addr); | ||
| 1549 | do { | ||
| 1550 | next = p4d_addr_end(addr, end); | ||
| 1551 | if (p4d_none_or_clear_bad(p4d)) | ||
| 1552 | continue; | ||
| 1553 | ret = unuse_pud_range(vma, p4d, addr, next, entry, page); | ||
| 1554 | if (ret) | ||
| 1555 | return ret; | ||
| 1556 | } while (p4d++, addr = next, addr != end); | ||
| 1557 | return 0; | ||
| 1558 | } | ||
| 1559 | |||
| 1540 | static int unuse_vma(struct vm_area_struct *vma, | 1560 | static int unuse_vma(struct vm_area_struct *vma, |
| 1541 | swp_entry_t entry, struct page *page) | 1561 | swp_entry_t entry, struct page *page) |
| 1542 | { | 1562 | { |
| @@ -1560,7 +1580,7 @@ static int unuse_vma(struct vm_area_struct *vma, | |||
| 1560 | next = pgd_addr_end(addr, end); | 1580 | next = pgd_addr_end(addr, end); |
| 1561 | if (pgd_none_or_clear_bad(pgd)) | 1581 | if (pgd_none_or_clear_bad(pgd)) |
| 1562 | continue; | 1582 | continue; |
| 1563 | ret = unuse_pud_range(vma, pgd, addr, next, entry, page); | 1583 | ret = unuse_p4d_range(vma, pgd, addr, next, entry, page); |
| 1564 | if (ret) | 1584 | if (ret) |
| 1565 | return ret; | 1585 | return ret; |
| 1566 | } while (pgd++, addr = next, addr != end); | 1586 | } while (pgd++, addr = next, addr != end); |
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 479e631d43c2..8bcb501bce60 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c | |||
| @@ -128,19 +128,22 @@ out_unlock: | |||
| 128 | static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) | 128 | static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) |
| 129 | { | 129 | { |
| 130 | pgd_t *pgd; | 130 | pgd_t *pgd; |
| 131 | p4d_t *p4d; | ||
| 131 | pud_t *pud; | 132 | pud_t *pud; |
| 132 | pmd_t *pmd = NULL; | ||
| 133 | 133 | ||
| 134 | pgd = pgd_offset(mm, address); | 134 | pgd = pgd_offset(mm, address); |
| 135 | pud = pud_alloc(mm, pgd, address); | 135 | p4d = p4d_alloc(mm, pgd, address); |
| 136 | if (pud) | 136 | if (!p4d) |
| 137 | /* | 137 | return NULL; |
| 138 | * Note that we didn't run this because the pmd was | 138 | pud = pud_alloc(mm, p4d, address); |
| 139 | * missing, the *pmd may be already established and in | 139 | if (!pud) |
| 140 | * turn it may also be a trans_huge_pmd. | 140 | return NULL; |
| 141 | */ | 141 | /* |
| 142 | pmd = pmd_alloc(mm, pud, address); | 142 | * Note that we didn't run this because the pmd was |
| 143 | return pmd; | 143 | * missing, the *pmd may be already established and in |
| 144 | * turn it may also be a trans_huge_pmd. | ||
| 145 | */ | ||
| 146 | return pmd_alloc(mm, pud, address); | ||
| 144 | } | 147 | } |
| 145 | 148 | ||
| 146 | #ifdef CONFIG_HUGETLB_PAGE | 149 | #ifdef CONFIG_HUGETLB_PAGE |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index b4024d688f38..0dd80222b20b 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -86,12 +86,12 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) | |||
| 86 | } while (pmd++, addr = next, addr != end); | 86 | } while (pmd++, addr = next, addr != end); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) | 89 | static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end) |
| 90 | { | 90 | { |
| 91 | pud_t *pud; | 91 | pud_t *pud; |
| 92 | unsigned long next; | 92 | unsigned long next; |
| 93 | 93 | ||
| 94 | pud = pud_offset(pgd, addr); | 94 | pud = pud_offset(p4d, addr); |
| 95 | do { | 95 | do { |
| 96 | next = pud_addr_end(addr, end); | 96 | next = pud_addr_end(addr, end); |
| 97 | if (pud_clear_huge(pud)) | 97 | if (pud_clear_huge(pud)) |
| @@ -102,6 +102,22 @@ static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) | |||
| 102 | } while (pud++, addr = next, addr != end); | 102 | } while (pud++, addr = next, addr != end); |
| 103 | } | 103 | } |
| 104 | 104 | ||
| 105 | static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end) | ||
| 106 | { | ||
| 107 | p4d_t *p4d; | ||
| 108 | unsigned long next; | ||
| 109 | |||
| 110 | p4d = p4d_offset(pgd, addr); | ||
| 111 | do { | ||
| 112 | next = p4d_addr_end(addr, end); | ||
| 113 | if (p4d_clear_huge(p4d)) | ||
| 114 | continue; | ||
| 115 | if (p4d_none_or_clear_bad(p4d)) | ||
| 116 | continue; | ||
| 117 | vunmap_pud_range(p4d, addr, next); | ||
| 118 | } while (p4d++, addr = next, addr != end); | ||
| 119 | } | ||
| 120 | |||
| 105 | static void vunmap_page_range(unsigned long addr, unsigned long end) | 121 | static void vunmap_page_range(unsigned long addr, unsigned long end) |
| 106 | { | 122 | { |
| 107 | pgd_t *pgd; | 123 | pgd_t *pgd; |
| @@ -113,7 +129,7 @@ static void vunmap_page_range(unsigned long addr, unsigned long end) | |||
| 113 | next = pgd_addr_end(addr, end); | 129 | next = pgd_addr_end(addr, end); |
| 114 | if (pgd_none_or_clear_bad(pgd)) | 130 | if (pgd_none_or_clear_bad(pgd)) |
| 115 | continue; | 131 | continue; |
| 116 | vunmap_pud_range(pgd, addr, next); | 132 | vunmap_p4d_range(pgd, addr, next); |
| 117 | } while (pgd++, addr = next, addr != end); | 133 | } while (pgd++, addr = next, addr != end); |
| 118 | } | 134 | } |
| 119 | 135 | ||
| @@ -160,13 +176,13 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, | |||
| 160 | return 0; | 176 | return 0; |
| 161 | } | 177 | } |
| 162 | 178 | ||
| 163 | static int vmap_pud_range(pgd_t *pgd, unsigned long addr, | 179 | static int vmap_pud_range(p4d_t *p4d, unsigned long addr, |
| 164 | unsigned long end, pgprot_t prot, struct page **pages, int *nr) | 180 | unsigned long end, pgprot_t prot, struct page **pages, int *nr) |
| 165 | { | 181 | { |
| 166 | pud_t *pud; | 182 | pud_t *pud; |
| 167 | unsigned long next; | 183 | unsigned long next; |
| 168 | 184 | ||
| 169 | pud = pud_alloc(&init_mm, pgd, addr); | 185 | pud = pud_alloc(&init_mm, p4d, addr); |
| 170 | if (!pud) | 186 | if (!pud) |
| 171 | return -ENOMEM; | 187 | return -ENOMEM; |
| 172 | do { | 188 | do { |
| @@ -177,6 +193,23 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr, | |||
| 177 | return 0; | 193 | return 0; |
| 178 | } | 194 | } |
| 179 | 195 | ||
| 196 | static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, | ||
| 197 | unsigned long end, pgprot_t prot, struct page **pages, int *nr) | ||
| 198 | { | ||
| 199 | p4d_t *p4d; | ||
| 200 | unsigned long next; | ||
| 201 | |||
| 202 | p4d = p4d_alloc(&init_mm, pgd, addr); | ||
| 203 | if (!p4d) | ||
| 204 | return -ENOMEM; | ||
| 205 | do { | ||
| 206 | next = p4d_addr_end(addr, end); | ||
| 207 | if (vmap_pud_range(p4d, addr, next, prot, pages, nr)) | ||
| 208 | return -ENOMEM; | ||
| 209 | } while (p4d++, addr = next, addr != end); | ||
| 210 | return 0; | ||
| 211 | } | ||
| 212 | |||
| 180 | /* | 213 | /* |
| 181 | * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and | 214 | * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and |
| 182 | * will have pfns corresponding to the "pages" array. | 215 | * will have pfns corresponding to the "pages" array. |
| @@ -196,7 +229,7 @@ static int vmap_page_range_noflush(unsigned long start, unsigned long end, | |||
| 196 | pgd = pgd_offset_k(addr); | 229 | pgd = pgd_offset_k(addr); |
| 197 | do { | 230 | do { |
| 198 | next = pgd_addr_end(addr, end); | 231 | next = pgd_addr_end(addr, end); |
| 199 | err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); | 232 | err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr); |
| 200 | if (err) | 233 | if (err) |
| 201 | return err; | 234 | return err; |
| 202 | } while (pgd++, addr = next, addr != end); | 235 | } while (pgd++, addr = next, addr != end); |
| @@ -237,6 +270,10 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) | |||
| 237 | unsigned long addr = (unsigned long) vmalloc_addr; | 270 | unsigned long addr = (unsigned long) vmalloc_addr; |
| 238 | struct page *page = NULL; | 271 | struct page *page = NULL; |
| 239 | pgd_t *pgd = pgd_offset_k(addr); | 272 | pgd_t *pgd = pgd_offset_k(addr); |
| 273 | p4d_t *p4d; | ||
| 274 | pud_t *pud; | ||
| 275 | pmd_t *pmd; | ||
| 276 | pte_t *ptep, pte; | ||
| 240 | 277 | ||
| 241 | /* | 278 | /* |
| 242 | * XXX we might need to change this if we add VIRTUAL_BUG_ON for | 279 | * XXX we might need to change this if we add VIRTUAL_BUG_ON for |
| @@ -244,21 +281,23 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) | |||
| 244 | */ | 281 | */ |
| 245 | VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); | 282 | VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); |
| 246 | 283 | ||
| 247 | if (!pgd_none(*pgd)) { | 284 | if (pgd_none(*pgd)) |
| 248 | pud_t *pud = pud_offset(pgd, addr); | 285 | return NULL; |
| 249 | if (!pud_none(*pud)) { | 286 | p4d = p4d_offset(pgd, addr); |
| 250 | pmd_t *pmd = pmd_offset(pud, addr); | 287 | if (p4d_none(*p4d)) |
| 251 | if (!pmd_none(*pmd)) { | 288 | return NULL; |
| 252 | pte_t *ptep, pte; | 289 | pud = pud_offset(p4d, addr); |
| 253 | 290 | if (pud_none(*pud)) | |
| 254 | ptep = pte_offset_map(pmd, addr); | 291 | return NULL; |
| 255 | pte = *ptep; | 292 | pmd = pmd_offset(pud, addr); |
| 256 | if (pte_present(pte)) | 293 | if (pmd_none(*pmd)) |
| 257 | page = pte_page(pte); | 294 | return NULL; |
| 258 | pte_unmap(ptep); | 295 | |
| 259 | } | 296 | ptep = pte_offset_map(pmd, addr); |
| 260 | } | 297 | pte = *ptep; |
| 261 | } | 298 | if (pte_present(pte)) |
| 299 | page = pte_page(pte); | ||
| 300 | pte_unmap(ptep); | ||
| 262 | return page; | 301 | return page; |
| 263 | } | 302 | } |
| 264 | EXPORT_SYMBOL(vmalloc_to_page); | 303 | EXPORT_SYMBOL(vmalloc_to_page); |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 69f9aff39a2e..b1947f0cbee2 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
| @@ -1065,6 +1065,9 @@ const char * const vmstat_text[] = { | |||
| 1065 | "thp_split_page_failed", | 1065 | "thp_split_page_failed", |
| 1066 | "thp_deferred_split_page", | 1066 | "thp_deferred_split_page", |
| 1067 | "thp_split_pmd", | 1067 | "thp_split_pmd", |
| 1068 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | ||
| 1069 | "thp_split_pud", | ||
| 1070 | #endif | ||
| 1068 | "thp_zero_page_alloc", | 1071 | "thp_zero_page_alloc", |
| 1069 | "thp_zero_page_alloc_failed", | 1072 | "thp_zero_page_alloc_failed", |
| 1070 | #endif | 1073 | #endif |
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 464e88599b9d..108533859a53 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c | |||
| @@ -230,6 +230,7 @@ enum { | |||
| 230 | Opt_osdkeepalivetimeout, | 230 | Opt_osdkeepalivetimeout, |
| 231 | Opt_mount_timeout, | 231 | Opt_mount_timeout, |
| 232 | Opt_osd_idle_ttl, | 232 | Opt_osd_idle_ttl, |
| 233 | Opt_osd_request_timeout, | ||
| 233 | Opt_last_int, | 234 | Opt_last_int, |
| 234 | /* int args above */ | 235 | /* int args above */ |
| 235 | Opt_fsid, | 236 | Opt_fsid, |
| @@ -256,6 +257,7 @@ static match_table_t opt_tokens = { | |||
| 256 | {Opt_osdkeepalivetimeout, "osdkeepalive=%d"}, | 257 | {Opt_osdkeepalivetimeout, "osdkeepalive=%d"}, |
| 257 | {Opt_mount_timeout, "mount_timeout=%d"}, | 258 | {Opt_mount_timeout, "mount_timeout=%d"}, |
| 258 | {Opt_osd_idle_ttl, "osd_idle_ttl=%d"}, | 259 | {Opt_osd_idle_ttl, "osd_idle_ttl=%d"}, |
| 260 | {Opt_osd_request_timeout, "osd_request_timeout=%d"}, | ||
| 259 | /* int args above */ | 261 | /* int args above */ |
| 260 | {Opt_fsid, "fsid=%s"}, | 262 | {Opt_fsid, "fsid=%s"}, |
| 261 | {Opt_name, "name=%s"}, | 263 | {Opt_name, "name=%s"}, |
| @@ -361,6 +363,7 @@ ceph_parse_options(char *options, const char *dev_name, | |||
| 361 | opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; | 363 | opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; |
| 362 | opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; | 364 | opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; |
| 363 | opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; | 365 | opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; |
| 366 | opt->osd_request_timeout = CEPH_OSD_REQUEST_TIMEOUT_DEFAULT; | ||
| 364 | 367 | ||
| 365 | /* get mon ip(s) */ | 368 | /* get mon ip(s) */ |
| 366 | /* ip1[:port1][,ip2[:port2]...] */ | 369 | /* ip1[:port1][,ip2[:port2]...] */ |
| @@ -473,6 +476,15 @@ ceph_parse_options(char *options, const char *dev_name, | |||
| 473 | } | 476 | } |
| 474 | opt->mount_timeout = msecs_to_jiffies(intval * 1000); | 477 | opt->mount_timeout = msecs_to_jiffies(intval * 1000); |
| 475 | break; | 478 | break; |
| 479 | case Opt_osd_request_timeout: | ||
| 480 | /* 0 is "wait forever" (i.e. infinite timeout) */ | ||
| 481 | if (intval < 0 || intval > INT_MAX / 1000) { | ||
| 482 | pr_err("osd_request_timeout out of range\n"); | ||
| 483 | err = -EINVAL; | ||
| 484 | goto out; | ||
| 485 | } | ||
| 486 | opt->osd_request_timeout = msecs_to_jiffies(intval * 1000); | ||
| 487 | break; | ||
| 476 | 488 | ||
| 477 | case Opt_share: | 489 | case Opt_share: |
| 478 | opt->flags &= ~CEPH_OPT_NOSHARE; | 490 | opt->flags &= ~CEPH_OPT_NOSHARE; |
| @@ -557,6 +569,9 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client) | |||
| 557 | if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT) | 569 | if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT) |
| 558 | seq_printf(m, "osdkeepalivetimeout=%d,", | 570 | seq_printf(m, "osdkeepalivetimeout=%d,", |
| 559 | jiffies_to_msecs(opt->osd_keepalive_timeout) / 1000); | 571 | jiffies_to_msecs(opt->osd_keepalive_timeout) / 1000); |
| 572 | if (opt->osd_request_timeout != CEPH_OSD_REQUEST_TIMEOUT_DEFAULT) | ||
| 573 | seq_printf(m, "osd_request_timeout=%d,", | ||
| 574 | jiffies_to_msecs(opt->osd_request_timeout) / 1000); | ||
| 560 | 575 | ||
| 561 | /* drop redundant comma */ | 576 | /* drop redundant comma */ |
| 562 | if (m->count != pos) | 577 | if (m->count != pos) |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index b65bbf9f45eb..e15ea9e4c495 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
| @@ -1709,6 +1709,8 @@ static void account_request(struct ceph_osd_request *req) | |||
| 1709 | 1709 | ||
| 1710 | req->r_flags |= CEPH_OSD_FLAG_ONDISK; | 1710 | req->r_flags |= CEPH_OSD_FLAG_ONDISK; |
| 1711 | atomic_inc(&req->r_osdc->num_requests); | 1711 | atomic_inc(&req->r_osdc->num_requests); |
| 1712 | |||
| 1713 | req->r_start_stamp = jiffies; | ||
| 1712 | } | 1714 | } |
| 1713 | 1715 | ||
| 1714 | static void submit_request(struct ceph_osd_request *req, bool wrlocked) | 1716 | static void submit_request(struct ceph_osd_request *req, bool wrlocked) |
| @@ -1789,6 +1791,14 @@ static void cancel_request(struct ceph_osd_request *req) | |||
| 1789 | ceph_osdc_put_request(req); | 1791 | ceph_osdc_put_request(req); |
| 1790 | } | 1792 | } |
| 1791 | 1793 | ||
| 1794 | static void abort_request(struct ceph_osd_request *req, int err) | ||
| 1795 | { | ||
| 1796 | dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err); | ||
| 1797 | |||
| 1798 | cancel_map_check(req); | ||
| 1799 | complete_request(req, err); | ||
| 1800 | } | ||
| 1801 | |||
| 1792 | static void check_pool_dne(struct ceph_osd_request *req) | 1802 | static void check_pool_dne(struct ceph_osd_request *req) |
| 1793 | { | 1803 | { |
| 1794 | struct ceph_osd_client *osdc = req->r_osdc; | 1804 | struct ceph_osd_client *osdc = req->r_osdc; |
| @@ -2487,6 +2497,7 @@ static void handle_timeout(struct work_struct *work) | |||
| 2487 | container_of(work, struct ceph_osd_client, timeout_work.work); | 2497 | container_of(work, struct ceph_osd_client, timeout_work.work); |
| 2488 | struct ceph_options *opts = osdc->client->options; | 2498 | struct ceph_options *opts = osdc->client->options; |
| 2489 | unsigned long cutoff = jiffies - opts->osd_keepalive_timeout; | 2499 | unsigned long cutoff = jiffies - opts->osd_keepalive_timeout; |
| 2500 | unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout; | ||
| 2490 | LIST_HEAD(slow_osds); | 2501 | LIST_HEAD(slow_osds); |
| 2491 | struct rb_node *n, *p; | 2502 | struct rb_node *n, *p; |
| 2492 | 2503 | ||
| @@ -2502,15 +2513,23 @@ static void handle_timeout(struct work_struct *work) | |||
| 2502 | struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); | 2513 | struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); |
| 2503 | bool found = false; | 2514 | bool found = false; |
| 2504 | 2515 | ||
| 2505 | for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) { | 2516 | for (p = rb_first(&osd->o_requests); p; ) { |
| 2506 | struct ceph_osd_request *req = | 2517 | struct ceph_osd_request *req = |
| 2507 | rb_entry(p, struct ceph_osd_request, r_node); | 2518 | rb_entry(p, struct ceph_osd_request, r_node); |
| 2508 | 2519 | ||
| 2520 | p = rb_next(p); /* abort_request() */ | ||
| 2521 | |||
| 2509 | if (time_before(req->r_stamp, cutoff)) { | 2522 | if (time_before(req->r_stamp, cutoff)) { |
| 2510 | dout(" req %p tid %llu on osd%d is laggy\n", | 2523 | dout(" req %p tid %llu on osd%d is laggy\n", |
| 2511 | req, req->r_tid, osd->o_osd); | 2524 | req, req->r_tid, osd->o_osd); |
| 2512 | found = true; | 2525 | found = true; |
| 2513 | } | 2526 | } |
| 2527 | if (opts->osd_request_timeout && | ||
| 2528 | time_before(req->r_start_stamp, expiry_cutoff)) { | ||
| 2529 | pr_err_ratelimited("tid %llu on osd%d timeout\n", | ||
| 2530 | req->r_tid, osd->o_osd); | ||
| 2531 | abort_request(req, -ETIMEDOUT); | ||
| 2532 | } | ||
| 2514 | } | 2533 | } |
| 2515 | for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) { | 2534 | for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) { |
| 2516 | struct ceph_osd_linger_request *lreq = | 2535 | struct ceph_osd_linger_request *lreq = |
| @@ -2530,6 +2549,21 @@ static void handle_timeout(struct work_struct *work) | |||
| 2530 | list_move_tail(&osd->o_keepalive_item, &slow_osds); | 2549 | list_move_tail(&osd->o_keepalive_item, &slow_osds); |
| 2531 | } | 2550 | } |
| 2532 | 2551 | ||
| 2552 | if (opts->osd_request_timeout) { | ||
| 2553 | for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { | ||
| 2554 | struct ceph_osd_request *req = | ||
| 2555 | rb_entry(p, struct ceph_osd_request, r_node); | ||
| 2556 | |||
| 2557 | p = rb_next(p); /* abort_request() */ | ||
| 2558 | |||
| 2559 | if (time_before(req->r_start_stamp, expiry_cutoff)) { | ||
| 2560 | pr_err_ratelimited("tid %llu on osd%d timeout\n", | ||
| 2561 | req->r_tid, osdc->homeless_osd.o_osd); | ||
| 2562 | abort_request(req, -ETIMEDOUT); | ||
| 2563 | } | ||
| 2564 | } | ||
| 2565 | } | ||
| 2566 | |||
| 2533 | if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds)) | 2567 | if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds)) |
| 2534 | maybe_request_map(osdc); | 2568 | maybe_request_map(osdc); |
| 2535 | 2569 | ||
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 6824c0ec8373..ffe9e904d4d1 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c | |||
| @@ -390,9 +390,8 @@ static struct crush_map *crush_decode(void *pbyval, void *end) | |||
| 390 | dout("crush decode tunable chooseleaf_stable = %d\n", | 390 | dout("crush decode tunable chooseleaf_stable = %d\n", |
| 391 | c->chooseleaf_stable); | 391 | c->chooseleaf_stable); |
| 392 | 392 | ||
| 393 | crush_finalize(c); | ||
| 394 | |||
| 395 | done: | 393 | done: |
| 394 | crush_finalize(c); | ||
| 396 | dout("crush_decode success\n"); | 395 | dout("crush_decode success\n"); |
| 397 | return c; | 396 | return c; |
| 398 | 397 | ||
| @@ -1380,7 +1379,6 @@ static int decode_new_up_state_weight(void **p, void *end, | |||
| 1380 | if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && | 1379 | if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && |
| 1381 | (xorstate & CEPH_OSD_EXISTS)) { | 1380 | (xorstate & CEPH_OSD_EXISTS)) { |
| 1382 | pr_info("osd%d does not exist\n", osd); | 1381 | pr_info("osd%d does not exist\n", osd); |
| 1383 | map->osd_weight[osd] = CEPH_OSD_IN; | ||
| 1384 | ret = set_primary_affinity(map, osd, | 1382 | ret = set_primary_affinity(map, osd, |
| 1385 | CEPH_OSD_DEFAULT_PRIMARY_AFFINITY); | 1383 | CEPH_OSD_DEFAULT_PRIMARY_AFFINITY); |
| 1386 | if (ret) | 1384 | if (ret) |
diff --git a/scripts/gcc-plugins/sancov_plugin.c b/scripts/gcc-plugins/sancov_plugin.c index 9b0b5cbc5b89..0f98634c20a0 100644 --- a/scripts/gcc-plugins/sancov_plugin.c +++ b/scripts/gcc-plugins/sancov_plugin.c | |||
| @@ -133,7 +133,7 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gc | |||
| 133 | #if BUILDING_GCC_VERSION < 6000 | 133 | #if BUILDING_GCC_VERSION < 6000 |
| 134 | register_callback(plugin_name, PLUGIN_START_UNIT, &sancov_start_unit, NULL); | 134 | register_callback(plugin_name, PLUGIN_START_UNIT, &sancov_start_unit, NULL); |
| 135 | register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)>_ggc_r_gt_sancov); | 135 | register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)>_ggc_r_gt_sancov); |
| 136 | register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &sancov_plugin_pass_info); | 136 | register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &sancov_pass_info); |
| 137 | #endif | 137 | #endif |
| 138 | 138 | ||
| 139 | return 0; | 139 | return 0; |
diff --git a/scripts/module-common.lds b/scripts/module-common.lds index cf7e52e4781b..9b6e246a45d0 100644 --- a/scripts/module-common.lds +++ b/scripts/module-common.lds | |||
| @@ -22,4 +22,6 @@ SECTIONS { | |||
| 22 | 22 | ||
| 23 | . = ALIGN(8); | 23 | . = ALIGN(8); |
| 24 | .init_array 0 : { *(SORT(.init_array.*)) *(.init_array) } | 24 | .init_array 0 : { *(SORT(.init_array.*)) *(.init_array) } |
| 25 | |||
| 26 | __jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) } | ||
| 25 | } | 27 | } |
diff --git a/scripts/spelling.txt b/scripts/spelling.txt index 0458b037c8a1..0545f5a8cabe 100644 --- a/scripts/spelling.txt +++ b/scripts/spelling.txt | |||
| @@ -372,6 +372,8 @@ disassocation||disassociation | |||
| 372 | disapear||disappear | 372 | disapear||disappear |
| 373 | disapeared||disappeared | 373 | disapeared||disappeared |
| 374 | disappared||disappeared | 374 | disappared||disappeared |
| 375 | disble||disable | ||
| 376 | disbled||disabled | ||
| 375 | disconnet||disconnect | 377 | disconnet||disconnect |
| 376 | discontinous||discontinuous | 378 | discontinous||discontinuous |
| 377 | dispertion||dispersion | 379 | dispertion||dispersion |
| @@ -732,6 +734,7 @@ oustanding||outstanding | |||
| 732 | overaall||overall | 734 | overaall||overall |
| 733 | overhread||overhead | 735 | overhread||overhead |
| 734 | overlaping||overlapping | 736 | overlaping||overlapping |
| 737 | overide||override | ||
| 735 | overrided||overridden | 738 | overrided||overridden |
| 736 | overriden||overridden | 739 | overriden||overridden |
| 737 | overun||overrun | 740 | overun||overrun |
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c index ec1067a679da..08b1399d1da2 100644 --- a/sound/soc/amd/acp-pcm-dma.c +++ b/sound/soc/amd/acp-pcm-dma.c | |||
| @@ -89,7 +89,7 @@ static void acp_reg_write(u32 val, void __iomem *acp_mmio, u32 reg) | |||
| 89 | writel(val, acp_mmio + (reg * 4)); | 89 | writel(val, acp_mmio + (reg * 4)); |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | /* Configure a given dma channel parameters - enable/disble, | 92 | /* Configure a given dma channel parameters - enable/disable, |
| 93 | * number of descriptors, priority | 93 | * number of descriptors, priority |
| 94 | */ | 94 | */ |
| 95 | static void config_acp_dma_channel(void __iomem *acp_mmio, u8 ch_num, | 95 | static void config_acp_dma_channel(void __iomem *acp_mmio, u8 ch_num, |
diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 11c8d9bc762e..5d19fdf80292 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c | |||
| @@ -1387,7 +1387,7 @@ static bool pci_data_iowrite(u16 port, u32 mask, u32 val) | |||
| 1387 | /* Allow writing to any other BAR, or expansion ROM */ | 1387 | /* Allow writing to any other BAR, or expansion ROM */ |
| 1388 | iowrite(portoff, val, mask, &d->config_words[reg]); | 1388 | iowrite(portoff, val, mask, &d->config_words[reg]); |
| 1389 | return true; | 1389 | return true; |
| 1390 | /* We let them overide latency timer and cacheline size */ | 1390 | /* We let them override latency timer and cacheline size */ |
| 1391 | } else if (&d->config_words[reg] == (void *)&d->config.cacheline_size) { | 1391 | } else if (&d->config_words[reg] == (void *)&d->config.cacheline_size) { |
| 1392 | /* Only let them change the first two fields. */ | 1392 | /* Only let them change the first two fields. */ |
| 1393 | if (mask == 0xFFFFFFFF) | 1393 | if (mask == 0xFFFFFFFF) |
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index e2efddf10231..1f5300e56b44 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile | |||
| @@ -132,7 +132,7 @@ else | |||
| 132 | Q = @ | 132 | Q = @ |
| 133 | endif | 133 | endif |
| 134 | 134 | ||
| 135 | # Disable command line variables (CFLAGS) overide from top | 135 | # Disable command line variables (CFLAGS) override from top |
| 136 | # level Makefile (perf), otherwise build Makefile will get | 136 | # level Makefile (perf), otherwise build Makefile will get |
| 137 | # the same command line setup. | 137 | # the same command line setup. |
| 138 | MAKEOVERRIDES= | 138 | MAKEOVERRIDES= |
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile index 47076b15eebe..9b8555ea3459 100644 --- a/tools/lib/traceevent/Makefile +++ b/tools/lib/traceevent/Makefile | |||
| @@ -135,7 +135,7 @@ else | |||
| 135 | Q = @ | 135 | Q = @ |
| 136 | endif | 136 | endif |
| 137 | 137 | ||
| 138 | # Disable command line variables (CFLAGS) overide from top | 138 | # Disable command line variables (CFLAGS) override from top |
| 139 | # level Makefile (perf), otherwise build Makefile will get | 139 | # level Makefile (perf), otherwise build Makefile will get |
| 140 | # the same command line setup. | 140 | # the same command line setup. |
| 141 | MAKEOVERRIDES= | 141 | MAKEOVERRIDES= |
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h index 66342804161c..0c03538df74c 100644 --- a/tools/lib/traceevent/event-parse.h +++ b/tools/lib/traceevent/event-parse.h | |||
| @@ -140,7 +140,7 @@ struct pevent_plugin_option { | |||
| 140 | * struct pevent_plugin_option PEVENT_PLUGIN_OPTIONS[] = { | 140 | * struct pevent_plugin_option PEVENT_PLUGIN_OPTIONS[] = { |
| 141 | * { | 141 | * { |
| 142 | * .name = "option-name", | 142 | * .name = "option-name", |
| 143 | * .plugin_alias = "overide-file-name", (optional) | 143 | * .plugin_alias = "override-file-name", (optional) |
| 144 | * .description = "description of option to show users", | 144 | * .description = "description of option to show users", |
| 145 | * }, | 145 | * }, |
| 146 | * { | 146 | * { |
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c index 4cfdbb5b6967..066086dd59a8 100644 --- a/tools/objtool/builtin-check.c +++ b/tools/objtool/builtin-check.c | |||
| @@ -805,11 +805,20 @@ static struct rela *find_switch_table(struct objtool_file *file, | |||
| 805 | insn->jump_dest->offset > orig_insn->offset)) | 805 | insn->jump_dest->offset > orig_insn->offset)) |
| 806 | break; | 806 | break; |
| 807 | 807 | ||
| 808 | /* look for a relocation which references .rodata */ | ||
| 808 | text_rela = find_rela_by_dest_range(insn->sec, insn->offset, | 809 | text_rela = find_rela_by_dest_range(insn->sec, insn->offset, |
| 809 | insn->len); | 810 | insn->len); |
| 810 | if (text_rela && text_rela->sym == file->rodata->sym) | 811 | if (!text_rela || text_rela->sym != file->rodata->sym) |
| 811 | return find_rela_by_dest(file->rodata, | 812 | continue; |
| 812 | text_rela->addend); | 813 | |
| 814 | /* | ||
| 815 | * Make sure the .rodata address isn't associated with a | ||
| 816 | * symbol. gcc jump tables are anonymous data. | ||
| 817 | */ | ||
| 818 | if (find_symbol_containing(file->rodata, text_rela->addend)) | ||
| 819 | continue; | ||
| 820 | |||
| 821 | return find_rela_by_dest(file->rodata, text_rela->addend); | ||
| 813 | } | 822 | } |
| 814 | 823 | ||
| 815 | return NULL; | 824 | return NULL; |
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 0d7983ac63ef..d897702ce742 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c | |||
| @@ -85,6 +85,18 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset) | |||
| 85 | return NULL; | 85 | return NULL; |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | struct symbol *find_symbol_containing(struct section *sec, unsigned long offset) | ||
| 89 | { | ||
| 90 | struct symbol *sym; | ||
| 91 | |||
| 92 | list_for_each_entry(sym, &sec->symbol_list, list) | ||
| 93 | if (sym->type != STT_SECTION && | ||
| 94 | offset >= sym->offset && offset < sym->offset + sym->len) | ||
| 95 | return sym; | ||
| 96 | |||
| 97 | return NULL; | ||
| 98 | } | ||
| 99 | |||
| 88 | struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset, | 100 | struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset, |
| 89 | unsigned int len) | 101 | unsigned int len) |
| 90 | { | 102 | { |
diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index aa1ff6596684..731973e1a3f5 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h | |||
| @@ -79,6 +79,7 @@ struct elf { | |||
| 79 | struct elf *elf_open(const char *name); | 79 | struct elf *elf_open(const char *name); |
| 80 | struct section *find_section_by_name(struct elf *elf, const char *name); | 80 | struct section *find_section_by_name(struct elf *elf, const char *name); |
| 81 | struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset); | 81 | struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset); |
| 82 | struct symbol *find_symbol_containing(struct section *sec, unsigned long offset); | ||
| 82 | struct rela *find_rela_by_dest(struct section *sec, unsigned long offset); | 83 | struct rela *find_rela_by_dest(struct section *sec, unsigned long offset); |
| 83 | struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset, | 84 | struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset, |
| 84 | unsigned int len); | 85 | unsigned int len); |
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c index 7913363bde5c..4f3c758d875d 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c | |||
| @@ -31,7 +31,7 @@ | |||
| 31 | #error Instruction buffer size too small | 31 | #error Instruction buffer size too small |
| 32 | #endif | 32 | #endif |
| 33 | 33 | ||
| 34 | /* Based on branch_type() from perf_event_intel_lbr.c */ | 34 | /* Based on branch_type() from arch/x86/events/intel/lbr.c */ |
| 35 | static void intel_pt_insn_decoder(struct insn *insn, | 35 | static void intel_pt_insn_decoder(struct insn *insn, |
| 36 | struct intel_pt_insn *intel_pt_insn) | 36 | struct intel_pt_insn *intel_pt_insn) |
| 37 | { | 37 | { |
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 6e4eb2fc2d1e..0c8b61f8398e 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl | |||
| @@ -1880,6 +1880,7 @@ sub get_grub_index { | |||
| 1880 | sub wait_for_input | 1880 | sub wait_for_input |
| 1881 | { | 1881 | { |
| 1882 | my ($fp, $time) = @_; | 1882 | my ($fp, $time) = @_; |
| 1883 | my $start_time; | ||
| 1883 | my $rin; | 1884 | my $rin; |
| 1884 | my $rout; | 1885 | my $rout; |
| 1885 | my $nr; | 1886 | my $nr; |
| @@ -1895,17 +1896,22 @@ sub wait_for_input | |||
| 1895 | vec($rin, fileno($fp), 1) = 1; | 1896 | vec($rin, fileno($fp), 1) = 1; |
| 1896 | vec($rin, fileno(\*STDIN), 1) = 1; | 1897 | vec($rin, fileno(\*STDIN), 1) = 1; |
| 1897 | 1898 | ||
| 1899 | $start_time = time; | ||
| 1900 | |||
| 1898 | while (1) { | 1901 | while (1) { |
| 1899 | $nr = select($rout=$rin, undef, undef, $time); | 1902 | $nr = select($rout=$rin, undef, undef, $time); |
| 1900 | 1903 | ||
| 1901 | if ($nr <= 0) { | 1904 | last if ($nr <= 0); |
| 1902 | return undef; | ||
| 1903 | } | ||
| 1904 | 1905 | ||
| 1905 | # copy data from stdin to the console | 1906 | # copy data from stdin to the console |
| 1906 | if (vec($rout, fileno(\*STDIN), 1) == 1) { | 1907 | if (vec($rout, fileno(\*STDIN), 1) == 1) { |
| 1907 | sysread(\*STDIN, $buf, 1000); | 1908 | $nr = sysread(\*STDIN, $buf, 1000); |
| 1908 | syswrite($fp, $buf, 1000); | 1909 | syswrite($fp, $buf, $nr) if ($nr > 0); |
| 1910 | } | ||
| 1911 | |||
| 1912 | # The timeout is based on time waiting for the fp data | ||
| 1913 | if (vec($rout, fileno($fp), 1) != 1) { | ||
| 1914 | last if (defined($time) && (time - $start_time > $time)); | ||
| 1909 | next; | 1915 | next; |
| 1910 | } | 1916 | } |
| 1911 | 1917 | ||
| @@ -1917,12 +1923,11 @@ sub wait_for_input | |||
| 1917 | last if ($ch eq "\n"); | 1923 | last if ($ch eq "\n"); |
| 1918 | } | 1924 | } |
| 1919 | 1925 | ||
| 1920 | if (!length($line)) { | 1926 | last if (!length($line)); |
| 1921 | return undef; | ||
| 1922 | } | ||
| 1923 | 1927 | ||
| 1924 | return $line; | 1928 | return $line; |
| 1925 | } | 1929 | } |
| 1930 | return undef; | ||
| 1926 | } | 1931 | } |
| 1927 | 1932 | ||
| 1928 | sub reboot_to { | 1933 | sub reboot_to { |
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index f11315bedefc..6a9480c03cbd 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | 1 | ||
| 2 | CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address | 2 | CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address |
| 3 | LDFLAGS += -lpthread -lurcu | 3 | LDFLAGS += -fsanitize=address |
| 4 | LDLIBS+= -lpthread -lurcu | ||
| 4 | TARGETS = main idr-test multiorder | 5 | TARGETS = main idr-test multiorder |
| 5 | CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o | 6 | CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o |
| 6 | OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \ | 7 | OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \ |
| @@ -10,23 +11,25 @@ ifndef SHIFT | |||
| 10 | SHIFT=3 | 11 | SHIFT=3 |
| 11 | endif | 12 | endif |
| 12 | 13 | ||
| 14 | ifeq ($(BUILD), 32) | ||
| 15 | CFLAGS += -m32 | ||
| 16 | LDFLAGS += -m32 | ||
| 17 | endif | ||
| 18 | |||
| 13 | targets: mapshift $(TARGETS) | 19 | targets: mapshift $(TARGETS) |
| 14 | 20 | ||
| 15 | main: $(OFILES) | 21 | main: $(OFILES) |
| 16 | $(CC) $(CFLAGS) $(LDFLAGS) $^ -o main | ||
| 17 | 22 | ||
| 18 | idr-test: idr-test.o $(CORE_OFILES) | 23 | idr-test: idr-test.o $(CORE_OFILES) |
| 19 | $(CC) $(CFLAGS) $(LDFLAGS) $^ -o idr-test | ||
| 20 | 24 | ||
| 21 | multiorder: multiorder.o $(CORE_OFILES) | 25 | multiorder: multiorder.o $(CORE_OFILES) |
| 22 | $(CC) $(CFLAGS) $(LDFLAGS) $^ -o multiorder | ||
| 23 | 26 | ||
| 24 | clean: | 27 | clean: |
| 25 | $(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h | 28 | $(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h |
| 26 | 29 | ||
| 27 | vpath %.c ../../lib | 30 | vpath %.c ../../lib |
| 28 | 31 | ||
| 29 | $(OFILES): *.h */*.h generated/map-shift.h \ | 32 | $(OFILES): Makefile *.h */*.h generated/map-shift.h \ |
| 30 | ../../include/linux/*.h \ | 33 | ../../include/linux/*.h \ |
| 31 | ../../include/asm/*.h \ | 34 | ../../include/asm/*.h \ |
| 32 | ../../../include/linux/radix-tree.h \ | 35 | ../../../include/linux/radix-tree.h \ |
| @@ -41,7 +44,7 @@ idr.c: ../../../lib/idr.c | |||
| 41 | .PHONY: mapshift | 44 | .PHONY: mapshift |
| 42 | 45 | ||
| 43 | mapshift: | 46 | mapshift: |
| 44 | @if ! grep -qw $(SHIFT) generated/map-shift.h; then \ | 47 | @if ! grep -qws $(SHIFT) generated/map-shift.h; then \ |
| 45 | echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \ | 48 | echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \ |
| 46 | generated/map-shift.h; \ | 49 | generated/map-shift.h; \ |
| 47 | fi | 50 | fi |
diff --git a/tools/testing/radix-tree/benchmark.c b/tools/testing/radix-tree/benchmark.c index 9b09ddfe462f..99c40f3ed133 100644 --- a/tools/testing/radix-tree/benchmark.c +++ b/tools/testing/radix-tree/benchmark.c | |||
| @@ -17,6 +17,9 @@ | |||
| 17 | #include <time.h> | 17 | #include <time.h> |
| 18 | #include "test.h" | 18 | #include "test.h" |
| 19 | 19 | ||
| 20 | #define for_each_index(i, base, order) \ | ||
| 21 | for (i = base; i < base + (1 << order); i++) | ||
| 22 | |||
| 20 | #define NSEC_PER_SEC 1000000000L | 23 | #define NSEC_PER_SEC 1000000000L |
| 21 | 24 | ||
| 22 | static long long benchmark_iter(struct radix_tree_root *root, bool tagged) | 25 | static long long benchmark_iter(struct radix_tree_root *root, bool tagged) |
| @@ -57,27 +60,176 @@ again: | |||
| 57 | return nsec; | 60 | return nsec; |
| 58 | } | 61 | } |
| 59 | 62 | ||
| 63 | static void benchmark_insert(struct radix_tree_root *root, | ||
| 64 | unsigned long size, unsigned long step, int order) | ||
| 65 | { | ||
| 66 | struct timespec start, finish; | ||
| 67 | unsigned long index; | ||
| 68 | long long nsec; | ||
| 69 | |||
| 70 | clock_gettime(CLOCK_MONOTONIC, &start); | ||
| 71 | |||
| 72 | for (index = 0 ; index < size ; index += step) | ||
| 73 | item_insert_order(root, index, order); | ||
| 74 | |||
| 75 | clock_gettime(CLOCK_MONOTONIC, &finish); | ||
| 76 | |||
| 77 | nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + | ||
| 78 | (finish.tv_nsec - start.tv_nsec); | ||
| 79 | |||
| 80 | printv(2, "Size: %8ld, step: %8ld, order: %d, insertion: %15lld ns\n", | ||
| 81 | size, step, order, nsec); | ||
| 82 | } | ||
| 83 | |||
| 84 | static void benchmark_tagging(struct radix_tree_root *root, | ||
| 85 | unsigned long size, unsigned long step, int order) | ||
| 86 | { | ||
| 87 | struct timespec start, finish; | ||
| 88 | unsigned long index; | ||
| 89 | long long nsec; | ||
| 90 | |||
| 91 | clock_gettime(CLOCK_MONOTONIC, &start); | ||
| 92 | |||
| 93 | for (index = 0 ; index < size ; index += step) | ||
| 94 | radix_tree_tag_set(root, index, 0); | ||
| 95 | |||
| 96 | clock_gettime(CLOCK_MONOTONIC, &finish); | ||
| 97 | |||
| 98 | nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + | ||
| 99 | (finish.tv_nsec - start.tv_nsec); | ||
| 100 | |||
| 101 | printv(2, "Size: %8ld, step: %8ld, order: %d, tagging: %17lld ns\n", | ||
| 102 | size, step, order, nsec); | ||
| 103 | } | ||
| 104 | |||
| 105 | static void benchmark_delete(struct radix_tree_root *root, | ||
| 106 | unsigned long size, unsigned long step, int order) | ||
| 107 | { | ||
| 108 | struct timespec start, finish; | ||
| 109 | unsigned long index, i; | ||
| 110 | long long nsec; | ||
| 111 | |||
| 112 | clock_gettime(CLOCK_MONOTONIC, &start); | ||
| 113 | |||
| 114 | for (index = 0 ; index < size ; index += step) | ||
| 115 | for_each_index(i, index, order) | ||
| 116 | item_delete(root, i); | ||
| 117 | |||
| 118 | clock_gettime(CLOCK_MONOTONIC, &finish); | ||
| 119 | |||
| 120 | nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + | ||
| 121 | (finish.tv_nsec - start.tv_nsec); | ||
| 122 | |||
| 123 | printv(2, "Size: %8ld, step: %8ld, order: %d, deletion: %16lld ns\n", | ||
| 124 | size, step, order, nsec); | ||
| 125 | } | ||
| 126 | |||
| 60 | static void benchmark_size(unsigned long size, unsigned long step, int order) | 127 | static void benchmark_size(unsigned long size, unsigned long step, int order) |
| 61 | { | 128 | { |
| 62 | RADIX_TREE(tree, GFP_KERNEL); | 129 | RADIX_TREE(tree, GFP_KERNEL); |
| 63 | long long normal, tagged; | 130 | long long normal, tagged; |
| 64 | unsigned long index; | ||
| 65 | 131 | ||
| 66 | for (index = 0 ; index < size ; index += step) { | 132 | benchmark_insert(&tree, size, step, order); |
| 67 | item_insert_order(&tree, index, order); | 133 | benchmark_tagging(&tree, size, step, order); |
| 68 | radix_tree_tag_set(&tree, index, 0); | ||
| 69 | } | ||
| 70 | 134 | ||
| 71 | tagged = benchmark_iter(&tree, true); | 135 | tagged = benchmark_iter(&tree, true); |
| 72 | normal = benchmark_iter(&tree, false); | 136 | normal = benchmark_iter(&tree, false); |
| 73 | 137 | ||
| 74 | printv(2, "Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n", | 138 | printv(2, "Size: %8ld, step: %8ld, order: %d, tagged iteration: %8lld ns\n", |
| 75 | size, step, order, tagged, normal); | 139 | size, step, order, tagged); |
| 140 | printv(2, "Size: %8ld, step: %8ld, order: %d, normal iteration: %8lld ns\n", | ||
| 141 | size, step, order, normal); | ||
| 142 | |||
| 143 | benchmark_delete(&tree, size, step, order); | ||
| 76 | 144 | ||
| 77 | item_kill_tree(&tree); | 145 | item_kill_tree(&tree); |
| 78 | rcu_barrier(); | 146 | rcu_barrier(); |
| 79 | } | 147 | } |
| 80 | 148 | ||
| 149 | static long long __benchmark_split(unsigned long index, | ||
| 150 | int old_order, int new_order) | ||
| 151 | { | ||
| 152 | struct timespec start, finish; | ||
| 153 | long long nsec; | ||
| 154 | RADIX_TREE(tree, GFP_ATOMIC); | ||
| 155 | |||
| 156 | item_insert_order(&tree, index, old_order); | ||
| 157 | |||
| 158 | clock_gettime(CLOCK_MONOTONIC, &start); | ||
| 159 | radix_tree_split(&tree, index, new_order); | ||
| 160 | clock_gettime(CLOCK_MONOTONIC, &finish); | ||
| 161 | nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + | ||
| 162 | (finish.tv_nsec - start.tv_nsec); | ||
| 163 | |||
| 164 | item_kill_tree(&tree); | ||
| 165 | |||
| 166 | return nsec; | ||
| 167 | |||
| 168 | } | ||
| 169 | |||
| 170 | static void benchmark_split(unsigned long size, unsigned long step) | ||
| 171 | { | ||
| 172 | int i, j, idx; | ||
| 173 | long long nsec = 0; | ||
| 174 | |||
| 175 | |||
| 176 | for (idx = 0; idx < size; idx += step) { | ||
| 177 | for (i = 3; i < 11; i++) { | ||
| 178 | for (j = 0; j < i; j++) { | ||
| 179 | nsec += __benchmark_split(idx, i, j); | ||
| 180 | } | ||
| 181 | } | ||
| 182 | } | ||
| 183 | |||
| 184 | printv(2, "Size %8ld, step %8ld, split time %10lld ns\n", | ||
| 185 | size, step, nsec); | ||
| 186 | |||
| 187 | } | ||
| 188 | |||
| 189 | static long long __benchmark_join(unsigned long index, | ||
| 190 | unsigned order1, unsigned order2) | ||
| 191 | { | ||
| 192 | unsigned long loc; | ||
| 193 | struct timespec start, finish; | ||
| 194 | long long nsec; | ||
| 195 | void *item, *item2 = item_create(index + 1, order1); | ||
| 196 | RADIX_TREE(tree, GFP_KERNEL); | ||
| 197 | |||
| 198 | item_insert_order(&tree, index, order2); | ||
| 199 | item = radix_tree_lookup(&tree, index); | ||
| 200 | |||
| 201 | clock_gettime(CLOCK_MONOTONIC, &start); | ||
| 202 | radix_tree_join(&tree, index + 1, order1, item2); | ||
| 203 | clock_gettime(CLOCK_MONOTONIC, &finish); | ||
| 204 | nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + | ||
| 205 | (finish.tv_nsec - start.tv_nsec); | ||
| 206 | |||
| 207 | loc = find_item(&tree, item); | ||
| 208 | if (loc == -1) | ||
| 209 | free(item); | ||
| 210 | |||
| 211 | item_kill_tree(&tree); | ||
| 212 | |||
| 213 | return nsec; | ||
| 214 | } | ||
| 215 | |||
| 216 | static void benchmark_join(unsigned long step) | ||
| 217 | { | ||
| 218 | int i, j, idx; | ||
| 219 | long long nsec = 0; | ||
| 220 | |||
| 221 | for (idx = 0; idx < 1 << 10; idx += step) { | ||
| 222 | for (i = 1; i < 15; i++) { | ||
| 223 | for (j = 0; j < i; j++) { | ||
| 224 | nsec += __benchmark_join(idx, i, j); | ||
| 225 | } | ||
| 226 | } | ||
| 227 | } | ||
| 228 | |||
| 229 | printv(2, "Size %8d, step %8ld, join time %10lld ns\n", | ||
| 230 | 1 << 10, step, nsec); | ||
| 231 | } | ||
| 232 | |||
| 81 | void benchmark(void) | 233 | void benchmark(void) |
| 82 | { | 234 | { |
| 83 | unsigned long size[] = {1 << 10, 1 << 20, 0}; | 235 | unsigned long size[] = {1 << 10, 1 << 20, 0}; |
| @@ -95,4 +247,11 @@ void benchmark(void) | |||
| 95 | for (c = 0; size[c]; c++) | 247 | for (c = 0; size[c]; c++) |
| 96 | for (s = 0; step[s]; s++) | 248 | for (s = 0; step[s]; s++) |
| 97 | benchmark_size(size[c], step[s] << 9, 9); | 249 | benchmark_size(size[c], step[s] << 9, 9); |
| 250 | |||
| 251 | for (c = 0; size[c]; c++) | ||
| 252 | for (s = 0; step[s]; s++) | ||
| 253 | benchmark_split(size[c], step[s]); | ||
| 254 | |||
| 255 | for (s = 0; step[s]; s++) | ||
| 256 | benchmark_join(step[s]); | ||
| 98 | } | 257 | } |
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index a26098c6123d..30cd0b296f1a 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c | |||
| @@ -153,6 +153,30 @@ void idr_nowait_test(void) | |||
| 153 | idr_destroy(&idr); | 153 | idr_destroy(&idr); |
| 154 | } | 154 | } |
| 155 | 155 | ||
| 156 | void idr_get_next_test(void) | ||
| 157 | { | ||
| 158 | unsigned long i; | ||
| 159 | int nextid; | ||
| 160 | DEFINE_IDR(idr); | ||
| 161 | |||
| 162 | int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0}; | ||
| 163 | |||
| 164 | for(i = 0; indices[i]; i++) { | ||
| 165 | struct item *item = item_create(indices[i], 0); | ||
| 166 | assert(idr_alloc(&idr, item, indices[i], indices[i+1], | ||
| 167 | GFP_KERNEL) == indices[i]); | ||
| 168 | } | ||
| 169 | |||
| 170 | for(i = 0, nextid = 0; indices[i]; i++) { | ||
| 171 | idr_get_next(&idr, &nextid); | ||
| 172 | assert(nextid == indices[i]); | ||
| 173 | nextid++; | ||
| 174 | } | ||
| 175 | |||
| 176 | idr_for_each(&idr, item_idr_free, &idr); | ||
| 177 | idr_destroy(&idr); | ||
| 178 | } | ||
| 179 | |||
| 156 | void idr_checks(void) | 180 | void idr_checks(void) |
| 157 | { | 181 | { |
| 158 | unsigned long i; | 182 | unsigned long i; |
| @@ -202,6 +226,7 @@ void idr_checks(void) | |||
| 202 | idr_alloc_test(); | 226 | idr_alloc_test(); |
| 203 | idr_null_test(); | 227 | idr_null_test(); |
| 204 | idr_nowait_test(); | 228 | idr_nowait_test(); |
| 229 | idr_get_next_test(); | ||
| 205 | } | 230 | } |
| 206 | 231 | ||
| 207 | /* | 232 | /* |
| @@ -338,7 +363,7 @@ void ida_check_random(void) | |||
| 338 | { | 363 | { |
| 339 | DEFINE_IDA(ida); | 364 | DEFINE_IDA(ida); |
| 340 | DECLARE_BITMAP(bitmap, 2048); | 365 | DECLARE_BITMAP(bitmap, 2048); |
| 341 | int id; | 366 | int id, err; |
| 342 | unsigned int i; | 367 | unsigned int i; |
| 343 | time_t s = time(NULL); | 368 | time_t s = time(NULL); |
| 344 | 369 | ||
| @@ -352,8 +377,11 @@ void ida_check_random(void) | |||
| 352 | ida_remove(&ida, bit); | 377 | ida_remove(&ida, bit); |
| 353 | } else { | 378 | } else { |
| 354 | __set_bit(bit, bitmap); | 379 | __set_bit(bit, bitmap); |
| 355 | ida_pre_get(&ida, GFP_KERNEL); | 380 | do { |
| 356 | assert(!ida_get_new_above(&ida, bit, &id)); | 381 | ida_pre_get(&ida, GFP_KERNEL); |
| 382 | err = ida_get_new_above(&ida, bit, &id); | ||
| 383 | } while (err == -ENOMEM); | ||
| 384 | assert(!err); | ||
| 357 | assert(id == bit); | 385 | assert(id == bit); |
| 358 | } | 386 | } |
| 359 | } | 387 | } |
| @@ -362,6 +390,24 @@ void ida_check_random(void) | |||
| 362 | goto repeat; | 390 | goto repeat; |
| 363 | } | 391 | } |
| 364 | 392 | ||
| 393 | void ida_simple_get_remove_test(void) | ||
| 394 | { | ||
| 395 | DEFINE_IDA(ida); | ||
| 396 | unsigned long i; | ||
| 397 | |||
| 398 | for (i = 0; i < 10000; i++) { | ||
| 399 | assert(ida_simple_get(&ida, 0, 20000, GFP_KERNEL) == i); | ||
| 400 | } | ||
| 401 | assert(ida_simple_get(&ida, 5, 30, GFP_KERNEL) < 0); | ||
| 402 | |||
| 403 | for (i = 0; i < 10000; i++) { | ||
| 404 | ida_simple_remove(&ida, i); | ||
| 405 | } | ||
| 406 | assert(ida_is_empty(&ida)); | ||
| 407 | |||
| 408 | ida_destroy(&ida); | ||
| 409 | } | ||
| 410 | |||
| 365 | void ida_checks(void) | 411 | void ida_checks(void) |
| 366 | { | 412 | { |
| 367 | DEFINE_IDA(ida); | 413 | DEFINE_IDA(ida); |
| @@ -428,15 +474,41 @@ void ida_checks(void) | |||
| 428 | ida_check_max(); | 474 | ida_check_max(); |
| 429 | ida_check_conv(); | 475 | ida_check_conv(); |
| 430 | ida_check_random(); | 476 | ida_check_random(); |
| 477 | ida_simple_get_remove_test(); | ||
| 431 | 478 | ||
| 432 | radix_tree_cpu_dead(1); | 479 | radix_tree_cpu_dead(1); |
| 433 | } | 480 | } |
| 434 | 481 | ||
| 482 | static void *ida_random_fn(void *arg) | ||
| 483 | { | ||
| 484 | rcu_register_thread(); | ||
| 485 | ida_check_random(); | ||
| 486 | rcu_unregister_thread(); | ||
| 487 | return NULL; | ||
| 488 | } | ||
| 489 | |||
| 490 | void ida_thread_tests(void) | ||
| 491 | { | ||
| 492 | pthread_t threads[10]; | ||
| 493 | int i; | ||
| 494 | |||
| 495 | for (i = 0; i < ARRAY_SIZE(threads); i++) | ||
| 496 | if (pthread_create(&threads[i], NULL, ida_random_fn, NULL)) { | ||
| 497 | perror("creating ida thread"); | ||
| 498 | exit(1); | ||
| 499 | } | ||
| 500 | |||
| 501 | while (i--) | ||
| 502 | pthread_join(threads[i], NULL); | ||
| 503 | } | ||
| 504 | |||
| 435 | int __weak main(void) | 505 | int __weak main(void) |
| 436 | { | 506 | { |
| 437 | radix_tree_init(); | 507 | radix_tree_init(); |
| 438 | idr_checks(); | 508 | idr_checks(); |
| 439 | ida_checks(); | 509 | ida_checks(); |
| 510 | ida_thread_tests(); | ||
| 511 | radix_tree_cpu_dead(1); | ||
| 440 | rcu_barrier(); | 512 | rcu_barrier(); |
| 441 | if (nr_allocated) | 513 | if (nr_allocated) |
| 442 | printf("nr_allocated = %d\n", nr_allocated); | 514 | printf("nr_allocated = %d\n", nr_allocated); |
diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index b829127d5670..bc9a78449572 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c | |||
| @@ -368,6 +368,7 @@ int main(int argc, char **argv) | |||
| 368 | iteration_test(0, 10 + 90 * long_run); | 368 | iteration_test(0, 10 + 90 * long_run); |
| 369 | iteration_test(7, 10 + 90 * long_run); | 369 | iteration_test(7, 10 + 90 * long_run); |
| 370 | single_thread_tests(long_run); | 370 | single_thread_tests(long_run); |
| 371 | ida_thread_tests(); | ||
| 371 | 372 | ||
| 372 | /* Free any remaining preallocated nodes */ | 373 | /* Free any remaining preallocated nodes */ |
| 373 | radix_tree_cpu_dead(0); | 374 | radix_tree_cpu_dead(0); |
diff --git a/tools/testing/radix-tree/tag_check.c b/tools/testing/radix-tree/tag_check.c index d4ff00989245..36dcf7d6945d 100644 --- a/tools/testing/radix-tree/tag_check.c +++ b/tools/testing/radix-tree/tag_check.c | |||
| @@ -330,6 +330,34 @@ static void single_check(void) | |||
| 330 | item_kill_tree(&tree); | 330 | item_kill_tree(&tree); |
| 331 | } | 331 | } |
| 332 | 332 | ||
| 333 | void radix_tree_clear_tags_test(void) | ||
| 334 | { | ||
| 335 | unsigned long index; | ||
| 336 | struct radix_tree_node *node; | ||
| 337 | struct radix_tree_iter iter; | ||
| 338 | void **slot; | ||
| 339 | |||
| 340 | RADIX_TREE(tree, GFP_KERNEL); | ||
| 341 | |||
| 342 | item_insert(&tree, 0); | ||
| 343 | item_tag_set(&tree, 0, 0); | ||
| 344 | __radix_tree_lookup(&tree, 0, &node, &slot); | ||
| 345 | radix_tree_clear_tags(&tree, node, slot); | ||
| 346 | assert(item_tag_get(&tree, 0, 0) == 0); | ||
| 347 | |||
| 348 | for (index = 0; index < 1000; index++) { | ||
| 349 | item_insert(&tree, index); | ||
| 350 | item_tag_set(&tree, index, 0); | ||
| 351 | } | ||
| 352 | |||
| 353 | radix_tree_for_each_slot(slot, &tree, &iter, 0) { | ||
| 354 | radix_tree_clear_tags(&tree, iter.node, slot); | ||
| 355 | assert(item_tag_get(&tree, iter.index, 0) == 0); | ||
| 356 | } | ||
| 357 | |||
| 358 | item_kill_tree(&tree); | ||
| 359 | } | ||
| 360 | |||
| 333 | void tag_check(void) | 361 | void tag_check(void) |
| 334 | { | 362 | { |
| 335 | single_check(); | 363 | single_check(); |
| @@ -347,4 +375,5 @@ void tag_check(void) | |||
| 347 | thrash_tags(); | 375 | thrash_tags(); |
| 348 | rcu_barrier(); | 376 | rcu_barrier(); |
| 349 | printv(2, "after thrash_tags: %d allocated\n", nr_allocated); | 377 | printv(2, "after thrash_tags: %d allocated\n", nr_allocated); |
| 378 | radix_tree_clear_tags_test(); | ||
| 350 | } | 379 | } |
diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index b30e11d9d271..0f8220cc6166 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h | |||
| @@ -36,6 +36,7 @@ void iteration_test(unsigned order, unsigned duration); | |||
| 36 | void benchmark(void); | 36 | void benchmark(void); |
| 37 | void idr_checks(void); | 37 | void idr_checks(void); |
| 38 | void ida_checks(void); | 38 | void ida_checks(void); |
| 39 | void ida_thread_tests(void); | ||
| 39 | 40 | ||
| 40 | struct item * | 41 | struct item * |
| 41 | item_tag_set(struct radix_tree_root *root, unsigned long index, int tag); | 42 | item_tag_set(struct radix_tree_root *root, unsigned long index, int tag); |
diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c index 248a820048df..66d31de60b9a 100644 --- a/tools/testing/selftests/powerpc/harness.c +++ b/tools/testing/selftests/powerpc/harness.c | |||
| @@ -114,9 +114,11 @@ int test_harness(int (test_function)(void), char *name) | |||
| 114 | 114 | ||
| 115 | rc = run_test(test_function, name); | 115 | rc = run_test(test_function, name); |
| 116 | 116 | ||
| 117 | if (rc == MAGIC_SKIP_RETURN_VALUE) | 117 | if (rc == MAGIC_SKIP_RETURN_VALUE) { |
| 118 | test_skip(name); | 118 | test_skip(name); |
| 119 | else | 119 | /* so that skipped test is not marked as failed */ |
| 120 | rc = 0; | ||
| 121 | } else | ||
| 120 | test_finish(name, rc); | 122 | test_finish(name, rc); |
| 121 | 123 | ||
| 122 | return rc; | 124 | return rc; |
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index 4cff7e7ddcc4..41642ba5e318 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile | |||
| @@ -1,5 +1,9 @@ | |||
| 1 | # Makefile for vm selftests | 1 | # Makefile for vm selftests |
| 2 | 2 | ||
| 3 | ifndef OUTPUT | ||
| 4 | OUTPUT := $(shell pwd) | ||
| 5 | endif | ||
| 6 | |||
| 3 | CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS) | 7 | CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS) |
| 4 | LDLIBS = -lrt | 8 | LDLIBS = -lrt |
| 5 | TEST_GEN_FILES = compaction_test | 9 | TEST_GEN_FILES = compaction_test |
diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c index 5b2b4b3c634c..b4967d875236 100644 --- a/tools/testing/selftests/x86/fsgsbase.c +++ b/tools/testing/selftests/x86/fsgsbase.c | |||
| @@ -245,7 +245,7 @@ void do_unexpected_base(void) | |||
| 245 | long ret; | 245 | long ret; |
| 246 | asm volatile ("int $0x80" | 246 | asm volatile ("int $0x80" |
| 247 | : "=a" (ret) : "a" (243), "b" (low_desc) | 247 | : "=a" (ret) : "a" (243), "b" (low_desc) |
| 248 | : "flags"); | 248 | : "r8", "r9", "r10", "r11"); |
| 249 | memcpy(&desc, low_desc, sizeof(desc)); | 249 | memcpy(&desc, low_desc, sizeof(desc)); |
| 250 | munmap(low_desc, sizeof(desc)); | 250 | munmap(low_desc, sizeof(desc)); |
| 251 | 251 | ||
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c index 4af47079cf04..f6121612e769 100644 --- a/tools/testing/selftests/x86/ldt_gdt.c +++ b/tools/testing/selftests/x86/ldt_gdt.c | |||
| @@ -45,6 +45,12 @@ | |||
| 45 | #define AR_DB (1 << 22) | 45 | #define AR_DB (1 << 22) |
| 46 | #define AR_G (1 << 23) | 46 | #define AR_G (1 << 23) |
| 47 | 47 | ||
| 48 | #ifdef __x86_64__ | ||
| 49 | # define INT80_CLOBBERS "r8", "r9", "r10", "r11" | ||
| 50 | #else | ||
| 51 | # define INT80_CLOBBERS | ||
| 52 | #endif | ||
| 53 | |||
| 48 | static int nerrs; | 54 | static int nerrs; |
| 49 | 55 | ||
| 50 | /* Points to an array of 1024 ints, each holding its own index. */ | 56 | /* Points to an array of 1024 ints, each holding its own index. */ |
| @@ -588,7 +594,7 @@ static int invoke_set_thread_area(void) | |||
| 588 | asm volatile ("int $0x80" | 594 | asm volatile ("int $0x80" |
| 589 | : "=a" (ret), "+m" (low_user_desc) : | 595 | : "=a" (ret), "+m" (low_user_desc) : |
| 590 | "a" (243), "b" (low_user_desc) | 596 | "a" (243), "b" (low_user_desc) |
| 591 | : "flags"); | 597 | : INT80_CLOBBERS); |
| 592 | return ret; | 598 | return ret; |
| 593 | } | 599 | } |
| 594 | 600 | ||
| @@ -657,7 +663,7 @@ static void test_gdt_invalidation(void) | |||
| 657 | "+a" (eax) | 663 | "+a" (eax) |
| 658 | : "m" (low_user_desc_clear), | 664 | : "m" (low_user_desc_clear), |
| 659 | [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear) | 665 | [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear) |
| 660 | : "flags"); | 666 | : INT80_CLOBBERS); |
| 661 | 667 | ||
| 662 | if (sel != 0) { | 668 | if (sel != 0) { |
| 663 | result = "FAIL"; | 669 | result = "FAIL"; |
| @@ -688,7 +694,7 @@ static void test_gdt_invalidation(void) | |||
| 688 | "+a" (eax) | 694 | "+a" (eax) |
| 689 | : "m" (low_user_desc_clear), | 695 | : "m" (low_user_desc_clear), |
| 690 | [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear) | 696 | [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear) |
| 691 | : "flags"); | 697 | : INT80_CLOBBERS); |
| 692 | 698 | ||
| 693 | if (sel != 0) { | 699 | if (sel != 0) { |
| 694 | result = "FAIL"; | 700 | result = "FAIL"; |
| @@ -721,7 +727,7 @@ static void test_gdt_invalidation(void) | |||
| 721 | "+a" (eax) | 727 | "+a" (eax) |
| 722 | : "m" (low_user_desc_clear), | 728 | : "m" (low_user_desc_clear), |
| 723 | [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear) | 729 | [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear) |
| 724 | : "flags"); | 730 | : INT80_CLOBBERS); |
| 725 | 731 | ||
| 726 | #ifdef __x86_64__ | 732 | #ifdef __x86_64__ |
| 727 | syscall(SYS_arch_prctl, ARCH_GET_FS, &new_base); | 733 | syscall(SYS_arch_prctl, ARCH_GET_FS, &new_base); |
| @@ -774,7 +780,7 @@ static void test_gdt_invalidation(void) | |||
| 774 | "+a" (eax) | 780 | "+a" (eax) |
| 775 | : "m" (low_user_desc_clear), | 781 | : "m" (low_user_desc_clear), |
| 776 | [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear) | 782 | [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear) |
| 777 | : "flags"); | 783 | : INT80_CLOBBERS); |
| 778 | 784 | ||
| 779 | #ifdef __x86_64__ | 785 | #ifdef __x86_64__ |
| 780 | syscall(SYS_arch_prctl, ARCH_GET_GS, &new_base); | 786 | syscall(SYS_arch_prctl, ARCH_GET_GS, &new_base); |
diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c index b037ce9cf116..eaea92439708 100644 --- a/tools/testing/selftests/x86/ptrace_syscall.c +++ b/tools/testing/selftests/x86/ptrace_syscall.c | |||
| @@ -58,7 +58,8 @@ static void do_full_int80(struct syscall_args32 *args) | |||
| 58 | asm volatile ("int $0x80" | 58 | asm volatile ("int $0x80" |
| 59 | : "+a" (args->nr), | 59 | : "+a" (args->nr), |
| 60 | "+b" (args->arg0), "+c" (args->arg1), "+d" (args->arg2), | 60 | "+b" (args->arg0), "+c" (args->arg1), "+d" (args->arg2), |
| 61 | "+S" (args->arg3), "+D" (args->arg4), "+r" (bp)); | 61 | "+S" (args->arg3), "+D" (args->arg4), "+r" (bp) |
| 62 | : : "r8", "r9", "r10", "r11"); | ||
| 62 | args->arg5 = bp; | 63 | args->arg5 = bp; |
| 63 | #else | 64 | #else |
| 64 | sys32_helper(args, int80_and_ret); | 65 | sys32_helper(args, int80_and_ret); |
diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c index 50c26358e8b7..a48da95c18fd 100644 --- a/tools/testing/selftests/x86/single_step_syscall.c +++ b/tools/testing/selftests/x86/single_step_syscall.c | |||
| @@ -56,9 +56,11 @@ static volatile sig_atomic_t sig_traps; | |||
| 56 | #ifdef __x86_64__ | 56 | #ifdef __x86_64__ |
| 57 | # define REG_IP REG_RIP | 57 | # define REG_IP REG_RIP |
| 58 | # define WIDTH "q" | 58 | # define WIDTH "q" |
| 59 | # define INT80_CLOBBERS "r8", "r9", "r10", "r11" | ||
| 59 | #else | 60 | #else |
| 60 | # define REG_IP REG_EIP | 61 | # define REG_IP REG_EIP |
| 61 | # define WIDTH "l" | 62 | # define WIDTH "l" |
| 63 | # define INT80_CLOBBERS | ||
| 62 | #endif | 64 | #endif |
| 63 | 65 | ||
| 64 | static unsigned long get_eflags(void) | 66 | static unsigned long get_eflags(void) |
| @@ -140,7 +142,8 @@ int main() | |||
| 140 | 142 | ||
| 141 | printf("[RUN]\tSet TF and check int80\n"); | 143 | printf("[RUN]\tSet TF and check int80\n"); |
| 142 | set_eflags(get_eflags() | X86_EFLAGS_TF); | 144 | set_eflags(get_eflags() | X86_EFLAGS_TF); |
| 143 | asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)); | 145 | asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid) |
| 146 | : INT80_CLOBBERS); | ||
| 144 | check_result(); | 147 | check_result(); |
| 145 | 148 | ||
| 146 | /* | 149 | /* |
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 571b64a01c50..8d1da1af4b09 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c | |||
| @@ -360,29 +360,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) | |||
| 360 | return ret; | 360 | return ret; |
| 361 | } | 361 | } |
| 362 | 362 | ||
| 363 | static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu, | ||
| 364 | struct vgic_its *its, | ||
| 365 | gpa_t addr, unsigned int len) | ||
| 366 | { | ||
| 367 | u32 reg = 0; | ||
| 368 | |||
| 369 | mutex_lock(&its->cmd_lock); | ||
| 370 | if (its->creadr == its->cwriter) | ||
| 371 | reg |= GITS_CTLR_QUIESCENT; | ||
| 372 | if (its->enabled) | ||
| 373 | reg |= GITS_CTLR_ENABLE; | ||
| 374 | mutex_unlock(&its->cmd_lock); | ||
| 375 | |||
| 376 | return reg; | ||
| 377 | } | ||
| 378 | |||
| 379 | static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its, | ||
| 380 | gpa_t addr, unsigned int len, | ||
| 381 | unsigned long val) | ||
| 382 | { | ||
| 383 | its->enabled = !!(val & GITS_CTLR_ENABLE); | ||
| 384 | } | ||
| 385 | |||
| 386 | static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm, | 363 | static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm, |
| 387 | struct vgic_its *its, | 364 | struct vgic_its *its, |
| 388 | gpa_t addr, unsigned int len) | 365 | gpa_t addr, unsigned int len) |
| @@ -1161,33 +1138,16 @@ static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its, | |||
| 1161 | #define ITS_CMD_SIZE 32 | 1138 | #define ITS_CMD_SIZE 32 |
| 1162 | #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5)) | 1139 | #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5)) |
| 1163 | 1140 | ||
| 1164 | /* | 1141 | /* Must be called with the cmd_lock held. */ |
| 1165 | * By writing to CWRITER the guest announces new commands to be processed. | 1142 | static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its) |
| 1166 | * To avoid any races in the first place, we take the its_cmd lock, which | ||
| 1167 | * protects our ring buffer variables, so that there is only one user | ||
| 1168 | * per ITS handling commands at a given time. | ||
| 1169 | */ | ||
| 1170 | static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its, | ||
| 1171 | gpa_t addr, unsigned int len, | ||
| 1172 | unsigned long val) | ||
| 1173 | { | 1143 | { |
| 1174 | gpa_t cbaser; | 1144 | gpa_t cbaser; |
| 1175 | u64 cmd_buf[4]; | 1145 | u64 cmd_buf[4]; |
| 1176 | u32 reg; | ||
| 1177 | 1146 | ||
| 1178 | if (!its) | 1147 | /* Commands are only processed when the ITS is enabled. */ |
| 1179 | return; | 1148 | if (!its->enabled) |
| 1180 | |||
| 1181 | mutex_lock(&its->cmd_lock); | ||
| 1182 | |||
| 1183 | reg = update_64bit_reg(its->cwriter, addr & 7, len, val); | ||
| 1184 | reg = ITS_CMD_OFFSET(reg); | ||
| 1185 | if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) { | ||
| 1186 | mutex_unlock(&its->cmd_lock); | ||
| 1187 | return; | 1149 | return; |
| 1188 | } | ||
| 1189 | 1150 | ||
| 1190 | its->cwriter = reg; | ||
| 1191 | cbaser = CBASER_ADDRESS(its->cbaser); | 1151 | cbaser = CBASER_ADDRESS(its->cbaser); |
| 1192 | 1152 | ||
| 1193 | while (its->cwriter != its->creadr) { | 1153 | while (its->cwriter != its->creadr) { |
| @@ -1207,6 +1167,34 @@ static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its, | |||
| 1207 | if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser)) | 1167 | if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser)) |
| 1208 | its->creadr = 0; | 1168 | its->creadr = 0; |
| 1209 | } | 1169 | } |
| 1170 | } | ||
| 1171 | |||
| 1172 | /* | ||
| 1173 | * By writing to CWRITER the guest announces new commands to be processed. | ||
| 1174 | * To avoid any races in the first place, we take the its_cmd lock, which | ||
| 1175 | * protects our ring buffer variables, so that there is only one user | ||
| 1176 | * per ITS handling commands at a given time. | ||
| 1177 | */ | ||
| 1178 | static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its, | ||
| 1179 | gpa_t addr, unsigned int len, | ||
| 1180 | unsigned long val) | ||
| 1181 | { | ||
| 1182 | u64 reg; | ||
| 1183 | |||
| 1184 | if (!its) | ||
| 1185 | return; | ||
| 1186 | |||
| 1187 | mutex_lock(&its->cmd_lock); | ||
| 1188 | |||
| 1189 | reg = update_64bit_reg(its->cwriter, addr & 7, len, val); | ||
| 1190 | reg = ITS_CMD_OFFSET(reg); | ||
| 1191 | if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) { | ||
| 1192 | mutex_unlock(&its->cmd_lock); | ||
| 1193 | return; | ||
| 1194 | } | ||
| 1195 | its->cwriter = reg; | ||
| 1196 | |||
| 1197 | vgic_its_process_commands(kvm, its); | ||
| 1210 | 1198 | ||
| 1211 | mutex_unlock(&its->cmd_lock); | 1199 | mutex_unlock(&its->cmd_lock); |
| 1212 | } | 1200 | } |
| @@ -1287,6 +1275,39 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm, | |||
| 1287 | *regptr = reg; | 1275 | *regptr = reg; |
| 1288 | } | 1276 | } |
| 1289 | 1277 | ||
| 1278 | static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu, | ||
| 1279 | struct vgic_its *its, | ||
| 1280 | gpa_t addr, unsigned int len) | ||
| 1281 | { | ||
| 1282 | u32 reg = 0; | ||
| 1283 | |||
| 1284 | mutex_lock(&its->cmd_lock); | ||
| 1285 | if (its->creadr == its->cwriter) | ||
| 1286 | reg |= GITS_CTLR_QUIESCENT; | ||
| 1287 | if (its->enabled) | ||
| 1288 | reg |= GITS_CTLR_ENABLE; | ||
| 1289 | mutex_unlock(&its->cmd_lock); | ||
| 1290 | |||
| 1291 | return reg; | ||
| 1292 | } | ||
| 1293 | |||
| 1294 | static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its, | ||
| 1295 | gpa_t addr, unsigned int len, | ||
| 1296 | unsigned long val) | ||
| 1297 | { | ||
| 1298 | mutex_lock(&its->cmd_lock); | ||
| 1299 | |||
| 1300 | its->enabled = !!(val & GITS_CTLR_ENABLE); | ||
| 1301 | |||
| 1302 | /* | ||
| 1303 | * Try to process any pending commands. This function bails out early | ||
| 1304 | * if the ITS is disabled or no commands have been queued. | ||
| 1305 | */ | ||
| 1306 | vgic_its_process_commands(kvm, its); | ||
| 1307 | |||
| 1308 | mutex_unlock(&its->cmd_lock); | ||
| 1309 | } | ||
| 1310 | |||
| 1290 | #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \ | 1311 | #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \ |
| 1291 | { \ | 1312 | { \ |
| 1292 | .reg_offset = off, \ | 1313 | .reg_offset = off, \ |
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 3654b4c835ef..2a5db1352722 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c | |||
| @@ -180,21 +180,37 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, | |||
| 180 | static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, | 180 | static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, |
| 181 | bool new_active_state) | 181 | bool new_active_state) |
| 182 | { | 182 | { |
| 183 | struct kvm_vcpu *requester_vcpu; | ||
| 183 | spin_lock(&irq->irq_lock); | 184 | spin_lock(&irq->irq_lock); |
| 185 | |||
| 186 | /* | ||
| 187 | * The vcpu parameter here can mean multiple things depending on how | ||
| 188 | * this function is called; when handling a trap from the kernel it | ||
| 189 | * depends on the GIC version, and these functions are also called as | ||
| 190 | * part of save/restore from userspace. | ||
| 191 | * | ||
| 192 | * Therefore, we have to figure out the requester in a reliable way. | ||
| 193 | * | ||
| 194 | * When accessing VGIC state from user space, the requester_vcpu is | ||
| 195 | * NULL, which is fine, because we guarantee that no VCPUs are running | ||
| 196 | * when accessing VGIC state from user space so irq->vcpu->cpu is | ||
| 197 | * always -1. | ||
| 198 | */ | ||
| 199 | requester_vcpu = kvm_arm_get_running_vcpu(); | ||
| 200 | |||
| 184 | /* | 201 | /* |
| 185 | * If this virtual IRQ was written into a list register, we | 202 | * If this virtual IRQ was written into a list register, we |
| 186 | * have to make sure the CPU that runs the VCPU thread has | 203 | * have to make sure the CPU that runs the VCPU thread has |
| 187 | * synced back LR state to the struct vgic_irq. We can only | 204 | * synced back the LR state to the struct vgic_irq. |
| 188 | * know this for sure, when either this irq is not assigned to | ||
| 189 | * anyone's AP list anymore, or the VCPU thread is not | ||
| 190 | * running on any CPUs. | ||
| 191 | * | 205 | * |
| 192 | * In the opposite case, we know the VCPU thread may be on its | 206 | * As long as the conditions below are true, we know the VCPU thread |
| 193 | * way back from the guest and still has to sync back this | 207 | * may be on its way back from the guest (we kicked the VCPU thread in |
| 194 | * IRQ, so we release and re-acquire the spin_lock to let the | 208 | * vgic_change_active_prepare) and still has to sync back this IRQ, |
| 195 | * other thread sync back the IRQ. | 209 | * so we release and re-acquire the spin_lock to let the other thread |
| 210 | * sync back the IRQ. | ||
| 196 | */ | 211 | */ |
| 197 | while (irq->vcpu && /* IRQ may have state in an LR somewhere */ | 212 | while (irq->vcpu && /* IRQ may have state in an LR somewhere */ |
| 213 | irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */ | ||
| 198 | irq->vcpu->cpu != -1) /* VCPU thread is running */ | 214 | irq->vcpu->cpu != -1) /* VCPU thread is running */ |
| 199 | cond_resched_lock(&irq->irq_lock); | 215 | cond_resched_lock(&irq->irq_lock); |
| 200 | 216 | ||
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index edc6ee2dc852..be0f4c3e0142 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
| @@ -229,10 +229,13 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu) | |||
| 229 | /* | 229 | /* |
| 230 | * If we are emulating a GICv3, we do it in an non-GICv2-compatible | 230 | * If we are emulating a GICv3, we do it in an non-GICv2-compatible |
| 231 | * way, so we force SRE to 1 to demonstrate this to the guest. | 231 | * way, so we force SRE to 1 to demonstrate this to the guest. |
| 232 | * Also, we don't support any form of IRQ/FIQ bypass. | ||
| 232 | * This goes with the spec allowing the value to be RAO/WI. | 233 | * This goes with the spec allowing the value to be RAO/WI. |
| 233 | */ | 234 | */ |
| 234 | if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { | 235 | if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { |
| 235 | vgic_v3->vgic_sre = ICC_SRE_EL1_SRE; | 236 | vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB | |
| 237 | ICC_SRE_EL1_DFB | | ||
| 238 | ICC_SRE_EL1_SRE); | ||
| 236 | vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE; | 239 | vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE; |
| 237 | } else { | 240 | } else { |
| 238 | vgic_v3->vgic_sre = 0; | 241 | vgic_v3->vgic_sre = 0; |
